From 26a029d407be480d791972afb5975cf62c9360a6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 02:47:55 +0200 Subject: Adding upstream version 124.0.1. Signed-off-by: Daniel Baumann --- dom/media/ADTSDecoder.cpp | 46 + dom/media/ADTSDecoder.h | 30 + dom/media/ADTSDemuxer.cpp | 818 + dom/media/ADTSDemuxer.h | 152 + dom/media/AsyncLogger.h | 305 + dom/media/AudibilityMonitor.h | 100 + dom/media/AudioBufferUtils.h | 220 + dom/media/AudioCaptureTrack.cpp | 97 + dom/media/AudioCaptureTrack.h | 39 + dom/media/AudioChannelFormat.cpp | 16 + dom/media/AudioChannelFormat.h | 253 + dom/media/AudioCompactor.cpp | 65 + dom/media/AudioCompactor.h | 128 + dom/media/AudioConfig.cpp | 385 + dom/media/AudioConfig.h | 274 + dom/media/AudioConverter.cpp | 485 + dom/media/AudioConverter.h | 277 + dom/media/AudioDeviceInfo.cpp | 165 + dom/media/AudioDeviceInfo.h | 59 + dom/media/AudioInputSource.cpp | 241 + dom/media/AudioInputSource.h | 141 + dom/media/AudioMixer.h | 112 + dom/media/AudioPacketizer.h | 174 + dom/media/AudioRingBuffer.cpp | 606 + dom/media/AudioRingBuffer.h | 135 + dom/media/AudioSampleFormat.h | 236 + dom/media/AudioSegment.cpp | 292 + dom/media/AudioSegment.h | 482 + dom/media/AudioStream.cpp | 756 + dom/media/AudioStream.h | 382 + dom/media/AudioStreamTrack.cpp | 55 + dom/media/AudioStreamTrack.h | 48 + dom/media/AudioTrack.cpp | 70 + dom/media/AudioTrack.h | 52 + dom/media/AudioTrackList.cpp | 32 + dom/media/AudioTrackList.h | 38 + .../BackgroundVideoDecodingPermissionObserver.cpp | 149 + .../BackgroundVideoDecodingPermissionObserver.h | 51 + dom/media/BaseMediaResource.cpp | 171 + dom/media/BaseMediaResource.h | 151 + dom/media/Benchmark.cpp | 395 + dom/media/Benchmark.h | 119 + dom/media/BitReader.cpp | 197 + dom/media/BitReader.h | 54 + dom/media/BitWriter.cpp | 104 + dom/media/BitWriter.h | 49 + dom/media/BufferMediaResource.h | 76 + dom/media/BufferReader.h | 335 + dom/media/ByteWriter.h | 61 + dom/media/CallbackThreadRegistry.cpp | 101 + dom/media/CallbackThreadRegistry.h | 60 + dom/media/CanvasCaptureMediaStream.cpp | 212 + dom/media/CanvasCaptureMediaStream.h | 132 + dom/media/ChannelMediaDecoder.cpp | 567 + dom/media/ChannelMediaDecoder.h | 169 + dom/media/ChannelMediaResource.cpp | 1057 + dom/media/ChannelMediaResource.h | 275 + dom/media/CloneableWithRangeMediaResource.cpp | 228 + dom/media/CloneableWithRangeMediaResource.h | 101 + dom/media/CrossGraphPort.cpp | 163 + dom/media/CrossGraphPort.h | 100 + dom/media/CubebInputStream.cpp | 178 + dom/media/CubebInputStream.h | 86 + dom/media/CubebUtils.cpp | 908 + dom/media/CubebUtils.h | 120 + dom/media/DOMMediaStream.cpp | 545 + dom/media/DOMMediaStream.h | 252 + dom/media/DecoderTraits.cpp | 309 + dom/media/DecoderTraits.h | 68 + dom/media/DeviceInputTrack.cpp | 639 + dom/media/DeviceInputTrack.h | 303 + dom/media/DriftCompensation.h | 137 + dom/media/EncoderTraits.cpp | 17 + dom/media/EncoderTraits.h | 23 + dom/media/ExternalEngineStateMachine.cpp | 1277 ++ dom/media/ExternalEngineStateMachine.h | 346 + dom/media/FileBlockCache.cpp | 506 + dom/media/FileBlockCache.h | 193 + dom/media/FileMediaResource.cpp | 223 + dom/media/FileMediaResource.h | 136 + dom/media/ForwardedInputTrack.cpp | 291 + dom/media/ForwardedInputTrack.h | 68 + dom/media/FrameStatistics.h | 196 + dom/media/GetUserMediaRequest.cpp | 127 + dom/media/GetUserMediaRequest.h | 93 + dom/media/GraphDriver.cpp | 1379 ++ dom/media/GraphDriver.h | 793 + dom/media/GraphRunner.cpp | 178 + dom/media/GraphRunner.h | 121 + dom/media/IdpSandbox.sys.mjs | 284 + dom/media/ImageToI420.cpp | 148 + dom/media/ImageToI420.h | 26 + dom/media/Intervals.h | 762 + dom/media/MPSCQueue.h | 132 + dom/media/MediaBlockCacheBase.h | 81 + dom/media/MediaCache.cpp | 2816 +++ dom/media/MediaCache.h | 557 + dom/media/MediaChannelStatistics.h | 89 + dom/media/MediaContainerType.cpp | 35 + dom/media/MediaContainerType.h | 51 + dom/media/MediaData.cpp | 602 + dom/media/MediaData.h | 762 + dom/media/MediaDataDemuxer.h | 213 + dom/media/MediaDecoder.cpp | 1698 ++ dom/media/MediaDecoder.h | 822 + dom/media/MediaDecoderOwner.h | 200 + dom/media/MediaDecoderStateMachine.cpp | 4870 ++++ dom/media/MediaDecoderStateMachine.h | 570 + dom/media/MediaDecoderStateMachineBase.cpp | 186 + dom/media/MediaDecoderStateMachineBase.h | 308 + dom/media/MediaDeviceInfo.cpp | 42 + dom/media/MediaDeviceInfo.h | 59 + dom/media/MediaDevices.cpp | 798 + dom/media/MediaDevices.h | 140 + dom/media/MediaEventSource.h | 594 + dom/media/MediaFormatReader.cpp | 3454 +++ dom/media/MediaFormatReader.h | 892 + dom/media/MediaInfo.cpp | 97 + dom/media/MediaInfo.h | 752 + dom/media/MediaMIMETypes.cpp | 267 + dom/media/MediaMIMETypes.h | 222 + dom/media/MediaManager.cpp | 4613 ++++ dom/media/MediaManager.h | 477 + dom/media/MediaMetadataManager.h | 97 + dom/media/MediaPlaybackDelayPolicy.cpp | 166 + dom/media/MediaPlaybackDelayPolicy.h | 84 + dom/media/MediaPromiseDefs.h | 19 + dom/media/MediaQueue.h | 277 + dom/media/MediaRecorder.cpp | 1893 ++ dom/media/MediaRecorder.h | 187 + dom/media/MediaResource.cpp | 425 + dom/media/MediaResource.h | 283 + dom/media/MediaResourceCallback.h | 66 + dom/media/MediaResult.h | 82 + dom/media/MediaSegment.h | 501 + dom/media/MediaShutdownManager.cpp | 202 + dom/media/MediaShutdownManager.h | 96 + dom/media/MediaSpan.h | 131 + dom/media/MediaStatistics.h | 78 + dom/media/MediaStreamError.cpp | 112 + dom/media/MediaStreamError.h | 113 + dom/media/MediaStreamTrack.cpp | 635 + dom/media/MediaStreamTrack.h | 638 + dom/media/MediaStreamWindowCapturer.cpp | 77 + dom/media/MediaStreamWindowCapturer.h | 51 + dom/media/MediaTimer.cpp | 192 + dom/media/MediaTimer.h | 161 + dom/media/MediaTrack.cpp | 34 + dom/media/MediaTrack.h | 79 + dom/media/MediaTrackGraph.cpp | 4318 ++++ dom/media/MediaTrackGraph.h | 1345 ++ dom/media/MediaTrackGraphImpl.h | 1171 + dom/media/MediaTrackList.cpp | 142 + dom/media/MediaTrackList.h | 111 + dom/media/MediaTrackListener.cpp | 95 + dom/media/MediaTrackListener.h | 184 + dom/media/MemoryBlockCache.cpp | 213 + dom/media/MemoryBlockCache.h | 87 + dom/media/Pacer.h | 164 + dom/media/PeerConnection.sys.mjs | 2020 ++ dom/media/PeerConnectionIdp.sys.mjs | 378 + dom/media/PrincipalChangeObserver.h | 27 + dom/media/PrincipalHandle.h | 62 + dom/media/QueueObject.cpp | 29 + dom/media/QueueObject.h | 32 + dom/media/ReaderProxy.cpp | 212 + dom/media/ReaderProxy.h | 119 + dom/media/SeekJob.cpp | 31 + dom/media/SeekJob.h | 32 + dom/media/SeekTarget.h | 90 + dom/media/SelfRef.h | 46 + dom/media/SharedBuffer.h | 116 + dom/media/TimeUnits.cpp | 432 + dom/media/TimeUnits.h | 364 + dom/media/Tracing.cpp | 86 + dom/media/Tracing.h | 102 + dom/media/UnderrunHandler.h | 22 + dom/media/UnderrunHandlerLinux.cpp | 78 + dom/media/UnderrunHandlerNoop.cpp | 14 + dom/media/VideoFrameContainer.cpp | 257 + dom/media/VideoFrameContainer.h | 149 + dom/media/VideoFrameConverter.h | 451 + dom/media/VideoLimits.h | 21 + dom/media/VideoOutput.h | 307 + dom/media/VideoPlaybackQuality.cpp | 36 + dom/media/VideoPlaybackQuality.h | 48 + dom/media/VideoSegment.cpp | 108 + dom/media/VideoSegment.h | 187 + dom/media/VideoStreamTrack.cpp | 89 + dom/media/VideoStreamTrack.h | 58 + dom/media/VideoTrack.cpp | 92 + dom/media/VideoTrack.h | 61 + dom/media/VideoTrackList.cpp | 80 + dom/media/VideoTrackList.h | 51 + dom/media/VideoUtils.cpp | 1252 + dom/media/VideoUtils.h | 586 + dom/media/WavDumper.h | 136 + dom/media/WebMSample.h | 22788 +++++++++++++++++++ dom/media/XiphExtradata.cpp | 77 + dom/media/XiphExtradata.h | 27 + dom/media/autoplay/AutoplayPolicy.cpp | 496 + dom/media/autoplay/AutoplayPolicy.h | 80 + dom/media/autoplay/GVAutoplayPermissionRequest.cpp | 236 + dom/media/autoplay/GVAutoplayPermissionRequest.h | 86 + dom/media/autoplay/GVAutoplayRequestStatusIPC.h | 23 + dom/media/autoplay/GVAutoplayRequestUtils.h | 25 + dom/media/autoplay/moz.build | 32 + dom/media/autoplay/nsIAutoplay.idl | 17 + dom/media/autoplay/test/browser/audio.ogg | Bin 0 -> 14290 bytes dom/media/autoplay/test/browser/browser.toml | 37 + ...wser_autoplay_policy_detection_click_to_play.js | 120 + ...play_policy_detection_global_and_site_sticky.js | 167 + ...wser_autoplay_policy_detection_global_sticky.js | 104 + .../browser/browser_autoplay_policy_play_twice.js | 54 + .../browser_autoplay_policy_request_permission.js | 269 + .../browser/browser_autoplay_policy_touchScroll.js | 103 + .../browser_autoplay_policy_user_gestures.js | 276 + .../browser_autoplay_policy_webRTC_permission.js | 67 + .../browser/browser_autoplay_policy_web_audio.js | 220 + .../browser_autoplay_policy_web_audio_with_gum.js | 173 + .../test/browser/browser_autoplay_videoDocument.js | 80 + dom/media/autoplay/test/browser/file_empty.html | 8 + .../test/browser/file_mediaplayback_frame.html | 21 + .../test/browser/file_nonAutoplayAudio.html | 7 + dom/media/autoplay/test/browser/file_video.html | 9 + dom/media/autoplay/test/browser/head.js | 149 + .../autoplay/test/mochitest/AutoplayTestUtils.js | 46 + .../file_autoplay_gv_play_request_frame.html | 24 + .../file_autoplay_gv_play_request_window.html | 65 + .../file_autoplay_policy_activation_frame.html | 32 + .../file_autoplay_policy_activation_window.html | 80 + .../file_autoplay_policy_eventdown_activation.html | 85 + .../file_autoplay_policy_key_blacklist.html | 147 + ...autoplay_policy_play_before_loadedmetadata.html | 63 + .../file_autoplay_policy_unmute_pauses.html | 65 + dom/media/autoplay/test/mochitest/mochitest.toml | 69 + .../autoplay/test/mochitest/test_autoplay.html | 36 + .../mochitest/test_autoplay_contentEditable.html | 67 + .../mochitest/test_autoplay_gv_play_request.html | 221 + .../test/mochitest/test_autoplay_policy.html | 174 + .../mochitest/test_autoplay_policy_activation.html | 180 + .../test_autoplay_policy_eventdown_activation.html | 55 + .../test_autoplay_policy_key_blacklist.html | 47 + .../mochitest/test_autoplay_policy_permission.html | 80 + ...autoplay_policy_play_before_loadedmetadata.html | 73 + .../test_autoplay_policy_unmute_pauses.html | 64 + ...autoplay_policy_web_audio_AudioParamStream.html | 170 + ...y_policy_web_audio_createMediaStreamSource.html | 118 + ...licy_web_audio_mediaElementAudioSourceNode.html | 104 + ..._notResumePageInvokedSuspendedAudioContext.html | 95 + .../test/mochitest/test_streams_autoplay.html | 47 + dom/media/benchmark/sample | 7 + dom/media/bridge/IPeerConnection.idl | 62 + dom/media/bridge/MediaModule.cpp | 16 + dom/media/bridge/components.conf | 25 + dom/media/bridge/moz.build | 36 + dom/media/components.conf | 74 + dom/media/doctor/DDLifetime.cpp | 33 + dom/media/doctor/DDLifetime.h | 72 + dom/media/doctor/DDLifetimes.cpp | 84 + dom/media/doctor/DDLifetimes.h | 130 + dom/media/doctor/DDLogCategory.cpp | 30 + dom/media/doctor/DDLogCategory.h | 41 + dom/media/doctor/DDLogMessage.cpp | 42 + dom/media/doctor/DDLogMessage.h | 48 + dom/media/doctor/DDLogObject.cpp | 22 + dom/media/doctor/DDLogObject.h | 62 + dom/media/doctor/DDLogUtils.cpp | 11 + dom/media/doctor/DDLogUtils.h | 33 + dom/media/doctor/DDLogValue.cpp | 120 + dom/media/doctor/DDLogValue.h | 43 + dom/media/doctor/DDLoggedTypeTraits.h | 104 + dom/media/doctor/DDMediaLog.cpp | 27 + dom/media/doctor/DDMediaLog.h | 42 + dom/media/doctor/DDMediaLogs.cpp | 667 + dom/media/doctor/DDMediaLogs.h | 193 + dom/media/doctor/DDMessageIndex.h | 26 + dom/media/doctor/DDTimeStamp.cpp | 20 + dom/media/doctor/DDTimeStamp.h | 24 + dom/media/doctor/DecoderDoctorDiagnostics.cpp | 1319 ++ dom/media/doctor/DecoderDoctorDiagnostics.h | 167 + dom/media/doctor/DecoderDoctorLogger.cpp | 176 + dom/media/doctor/DecoderDoctorLogger.h | 472 + dom/media/doctor/MultiWriterQueue.h | 523 + dom/media/doctor/RollingNumber.h | 163 + dom/media/doctor/moz.build | 40 + dom/media/doctor/test/browser/browser.toml | 7 + .../doctor/test/browser/browser_decoderDoctor.js | 356 + .../test/browser/browser_doctor_notification.js | 265 + .../doctor/test/gtest/TestMultiWriterQueue.cpp | 382 + dom/media/doctor/test/gtest/TestRollingNumber.cpp | 146 + dom/media/doctor/test/gtest/moz.build | 19 + dom/media/driftcontrol/AudioChunkList.cpp | 119 + dom/media/driftcontrol/AudioChunkList.h | 124 + dom/media/driftcontrol/AudioDriftCorrection.cpp | 178 + dom/media/driftcontrol/AudioDriftCorrection.h | 80 + dom/media/driftcontrol/AudioResampler.cpp | 108 + dom/media/driftcontrol/AudioResampler.h | 99 + dom/media/driftcontrol/DriftController.cpp | 237 + dom/media/driftcontrol/DriftController.h | 163 + dom/media/driftcontrol/DynamicResampler.cpp | 284 + dom/media/driftcontrol/DynamicResampler.h | 350 + .../driftcontrol/gtest/TestAudioChunkList.cpp | 226 + .../gtest/TestAudioDriftCorrection.cpp | 529 + .../driftcontrol/gtest/TestAudioResampler.cpp | 677 + .../driftcontrol/gtest/TestDriftController.cpp | 168 + .../driftcontrol/gtest/TestDynamicResampler.cpp | 722 + dom/media/driftcontrol/gtest/moz.build | 21 + dom/media/driftcontrol/moz.build | 30 + dom/media/driftcontrol/plot.py | 135 + dom/media/eme/CDMCaps.cpp | 112 + dom/media/eme/CDMCaps.h | 82 + dom/media/eme/CDMProxy.h | 325 + dom/media/eme/DecryptorProxyCallback.h | 54 + dom/media/eme/DetailedPromise.cpp | 87 + dom/media/eme/DetailedPromise.h | 104 + dom/media/eme/EMEUtils.cpp | 248 + dom/media/eme/EMEUtils.h | 109 + dom/media/eme/KeySystemConfig.cpp | 360 + dom/media/eme/KeySystemConfig.h | 189 + dom/media/eme/KeySystemNames.h | 48 + dom/media/eme/MediaEncryptedEvent.cpp | 106 + dom/media/eme/MediaEncryptedEvent.h | 60 + dom/media/eme/MediaKeyError.cpp | 27 + dom/media/eme/MediaKeyError.h | 33 + dom/media/eme/MediaKeyMessageEvent.cpp | 101 + dom/media/eme/MediaKeyMessageEvent.h | 64 + dom/media/eme/MediaKeySession.cpp | 622 + dom/media/eme/MediaKeySession.h | 142 + dom/media/eme/MediaKeyStatusMap.cpp | 99 + dom/media/eme/MediaKeyStatusMap.h | 94 + dom/media/eme/MediaKeySystemAccess.cpp | 1161 + dom/media/eme/MediaKeySystemAccess.h | 84 + dom/media/eme/MediaKeySystemAccessManager.cpp | 702 + dom/media/eme/MediaKeySystemAccessManager.h | 237 + .../eme/MediaKeySystemAccessPermissionRequest.cpp | 91 + .../eme/MediaKeySystemAccessPermissionRequest.h | 74 + dom/media/eme/MediaKeys.cpp | 847 + dom/media/eme/MediaKeys.h | 236 + dom/media/eme/clearkey/ArrayUtils.h | 22 + dom/media/eme/clearkey/BigEndian.h | 59 + dom/media/eme/clearkey/ClearKeyBase64.cpp | 90 + dom/media/eme/clearkey/ClearKeyBase64.h | 29 + .../eme/clearkey/ClearKeyDecryptionManager.cpp | 289 + dom/media/eme/clearkey/ClearKeyDecryptionManager.h | 111 + dom/media/eme/clearkey/ClearKeyPersistence.cpp | 153 + dom/media/eme/clearkey/ClearKeyPersistence.h | 64 + dom/media/eme/clearkey/ClearKeySession.cpp | 72 + dom/media/eme/clearkey/ClearKeySession.h | 56 + dom/media/eme/clearkey/ClearKeySessionManager.cpp | 713 + dom/media/eme/clearkey/ClearKeySessionManager.h | 138 + dom/media/eme/clearkey/ClearKeyStorage.cpp | 194 + dom/media/eme/clearkey/ClearKeyStorage.h | 42 + dom/media/eme/clearkey/ClearKeyUtils.cpp | 661 + dom/media/eme/clearkey/ClearKeyUtils.h | 107 + dom/media/eme/clearkey/RefCounted.h | 85 + dom/media/eme/clearkey/gtest/TestClearKeyUtils.cpp | 81 + dom/media/eme/clearkey/gtest/moz.build | 15 + dom/media/eme/clearkey/moz.build | 37 + .../eme/mediadrm/MediaDrmCDMCallbackProxy.cpp | 115 + dom/media/eme/mediadrm/MediaDrmCDMCallbackProxy.h | 63 + dom/media/eme/mediadrm/MediaDrmCDMProxy.cpp | 453 + dom/media/eme/mediadrm/MediaDrmCDMProxy.h | 186 + dom/media/eme/mediadrm/MediaDrmProxySupport.cpp | 272 + dom/media/eme/mediadrm/MediaDrmProxySupport.h | 68 + dom/media/eme/mediadrm/moz.build | 19 + dom/media/eme/mediafoundation/WMFCDMImpl.cpp | 151 + dom/media/eme/mediafoundation/WMFCDMImpl.h | 124 + dom/media/eme/mediafoundation/WMFCDMProxy.cpp | 414 + dom/media/eme/mediafoundation/WMFCDMProxy.h | 149 + .../eme/mediafoundation/WMFCDMProxyCallback.cpp | 72 + .../eme/mediafoundation/WMFCDMProxyCallback.h | 38 + dom/media/eme/mediafoundation/moz.build | 21 + dom/media/eme/moz.build | 56 + dom/media/encoder/ContainerWriter.h | 75 + dom/media/encoder/EncodedFrame.h | 64 + dom/media/encoder/MediaEncoder.cpp | 1131 + dom/media/encoder/MediaEncoder.h | 401 + dom/media/encoder/Muxer.cpp | 185 + dom/media/encoder/Muxer.h | 71 + dom/media/encoder/OpusTrackEncoder.cpp | 441 + dom/media/encoder/OpusTrackEncoder.h | 117 + dom/media/encoder/TrackEncoder.cpp | 822 + dom/media/encoder/TrackEncoder.h | 501 + dom/media/encoder/TrackMetadataBase.h | 76 + dom/media/encoder/VP8TrackEncoder.cpp | 721 + dom/media/encoder/VP8TrackEncoder.h | 168 + dom/media/encoder/moz.build | 42 + dom/media/fake-cdm/cdm-fake.cpp | 63 + dom/media/fake-cdm/cdm-test-decryptor.cpp | 433 + dom/media/fake-cdm/cdm-test-decryptor.h | 106 + dom/media/fake-cdm/cdm-test-output-protection.h | 126 + dom/media/fake-cdm/cdm-test-storage.cpp | 194 + dom/media/fake-cdm/cdm-test-storage.h | 45 + dom/media/fake-cdm/manifest.json | 9 + dom/media/fake-cdm/moz.build | 33 + dom/media/flac/FlacDecoder.cpp | 45 + dom/media/flac/FlacDecoder.h | 30 + dom/media/flac/FlacDemuxer.cpp | 1047 + dom/media/flac/FlacDemuxer.h | 112 + dom/media/flac/FlacFrameParser.cpp | 245 + dom/media/flac/FlacFrameParser.h | 72 + dom/media/flac/moz.build | 24 + dom/media/fuzz/FuzzMedia.cpp | 66 + dom/media/fuzz/moz.build | 29 + dom/media/gmp-plugin-openh264/fakeopenh264.info | 4 + .../gmp-plugin-openh264/gmp-fake-openh264.cpp | 406 + dom/media/gmp-plugin-openh264/moz.build | 25 + dom/media/gmp/CDMStorageIdProvider.cpp | 68 + dom/media/gmp/CDMStorageIdProvider.h | 41 + dom/media/gmp/ChromiumCDMAdapter.cpp | 307 + dom/media/gmp/ChromiumCDMAdapter.h | 81 + dom/media/gmp/ChromiumCDMCallback.h | 56 + dom/media/gmp/ChromiumCDMCallbackProxy.cpp | 159 + dom/media/gmp/ChromiumCDMCallbackProxy.h | 62 + dom/media/gmp/ChromiumCDMChild.cpp | 866 + dom/media/gmp/ChromiumCDMChild.h | 148 + dom/media/gmp/ChromiumCDMParent.cpp | 1371 ++ dom/media/gmp/ChromiumCDMParent.h | 232 + dom/media/gmp/ChromiumCDMProxy.cpp | 638 + dom/media/gmp/ChromiumCDMProxy.h | 136 + dom/media/gmp/DecryptJob.cpp | 46 + dom/media/gmp/DecryptJob.h | 36 + dom/media/gmp/GMPCallbackBase.h | 24 + dom/media/gmp/GMPChild.cpp | 798 + dom/media/gmp/GMPChild.h | 123 + dom/media/gmp/GMPContentChild.cpp | 131 + dom/media/gmp/GMPContentChild.h | 70 + dom/media/gmp/GMPContentParent.cpp | 223 + dom/media/gmp/GMPContentParent.h | 108 + dom/media/gmp/GMPCrashHelper.h | 37 + dom/media/gmp/GMPCrashHelperHolder.cpp | 31 + dom/media/gmp/GMPCrashHelperHolder.h | 63 + dom/media/gmp/GMPDiskStorage.cpp | 454 + dom/media/gmp/GMPLoader.cpp | 203 + dom/media/gmp/GMPLoader.h | 80 + dom/media/gmp/GMPLog.h | 62 + dom/media/gmp/GMPMemoryStorage.cpp | 75 + dom/media/gmp/GMPMessageUtils.h | 190 + dom/media/gmp/GMPNativeTypes.h | 18 + dom/media/gmp/GMPParent.cpp | 1372 ++ dom/media/gmp/GMPParent.h | 258 + dom/media/gmp/GMPPlatform.cpp | 296 + dom/media/gmp/GMPPlatform.h | 45 + dom/media/gmp/GMPProcessChild.cpp | 56 + dom/media/gmp/GMPProcessChild.h | 40 + dom/media/gmp/GMPProcessParent.cpp | 382 + dom/media/gmp/GMPProcessParent.h | 105 + dom/media/gmp/GMPSanitizedExports.h | 22 + dom/media/gmp/GMPService.cpp | 574 + dom/media/gmp/GMPService.h | 129 + dom/media/gmp/GMPServiceChild.cpp | 649 + dom/media/gmp/GMPServiceChild.h | 176 + dom/media/gmp/GMPServiceParent.cpp | 2095 ++ dom/media/gmp/GMPServiceParent.h | 300 + dom/media/gmp/GMPSharedMemManager.cpp | 89 + dom/media/gmp/GMPSharedMemManager.h | 78 + dom/media/gmp/GMPStorage.h | 39 + dom/media/gmp/GMPStorageChild.cpp | 244 + dom/media/gmp/GMPStorageChild.h | 94 + dom/media/gmp/GMPStorageParent.cpp | 194 + dom/media/gmp/GMPStorageParent.h | 47 + dom/media/gmp/GMPTimerChild.cpp | 59 + dom/media/gmp/GMPTimerChild.h | 45 + dom/media/gmp/GMPTimerParent.cpp | 105 + dom/media/gmp/GMPTimerParent.h | 56 + dom/media/gmp/GMPTypes.ipdlh | 116 + dom/media/gmp/GMPUtils.cpp | 228 + dom/media/gmp/GMPUtils.h | 83 + dom/media/gmp/GMPVideoDecoderChild.cpp | 210 + dom/media/gmp/GMPVideoDecoderChild.h | 75 + dom/media/gmp/GMPVideoDecoderParent.cpp | 461 + dom/media/gmp/GMPVideoDecoderParent.h | 103 + dom/media/gmp/GMPVideoDecoderProxy.h | 57 + dom/media/gmp/GMPVideoEncodedFrameImpl.cpp | 226 + dom/media/gmp/GMPVideoEncodedFrameImpl.h | 116 + dom/media/gmp/GMPVideoEncoderChild.cpp | 203 + dom/media/gmp/GMPVideoEncoderChild.h | 75 + dom/media/gmp/GMPVideoEncoderParent.cpp | 307 + dom/media/gmp/GMPVideoEncoderParent.h | 85 + dom/media/gmp/GMPVideoEncoderProxy.h | 56 + dom/media/gmp/GMPVideoHost.cpp | 94 + dom/media/gmp/GMPVideoHost.h | 54 + dom/media/gmp/GMPVideoPlaneImpl.cpp | 179 + dom/media/gmp/GMPVideoPlaneImpl.h | 61 + dom/media/gmp/GMPVideoi420FrameImpl.cpp | 328 + dom/media/gmp/GMPVideoi420FrameImpl.h | 80 + dom/media/gmp/PChromiumCDM.ipdl | 129 + dom/media/gmp/PGMP.ipdl | 89 + dom/media/gmp/PGMPContent.ipdl | 37 + dom/media/gmp/PGMPService.ipdl | 42 + dom/media/gmp/PGMPStorage.ipdl | 37 + dom/media/gmp/PGMPTimer.ipdl | 26 + dom/media/gmp/PGMPVideoDecoder.ipdl | 50 + dom/media/gmp/PGMPVideoEncoder.ipdl | 48 + dom/media/gmp/README.txt | 1 + dom/media/gmp/gmp-api/gmp-entrypoints.h | 73 + dom/media/gmp/gmp-api/gmp-errors.h | 59 + dom/media/gmp/gmp-api/gmp-platform.h | 102 + dom/media/gmp/gmp-api/gmp-storage.h | 110 + dom/media/gmp/gmp-api/gmp-video-codec.h | 302 + dom/media/gmp/gmp-api/gmp-video-decode.h | 125 + dom/media/gmp/gmp-api/gmp-video-encode.h | 133 + dom/media/gmp/gmp-api/gmp-video-frame-encoded.h | 92 + dom/media/gmp/gmp-api/gmp-video-frame-i420.h | 142 + dom/media/gmp/gmp-api/gmp-video-frame.h | 48 + dom/media/gmp/gmp-api/gmp-video-host.h | 53 + dom/media/gmp/gmp-api/gmp-video-plane.h | 94 + dom/media/gmp/moz.build | 149 + .../gmp/mozIGeckoMediaPluginChromeService.idl | 58 + dom/media/gmp/mozIGeckoMediaPluginService.idl | 131 + dom/media/gmp/rlz/OWNERS | 4 + dom/media/gmp/rlz/README.mozilla | 4 + dom/media/gmp/rlz/lib/assert.h | 14 + dom/media/gmp/rlz/lib/crc8.cc | 90 + dom/media/gmp/rlz/lib/crc8.h | 24 + dom/media/gmp/rlz/lib/machine_id.cc | 93 + dom/media/gmp/rlz/lib/machine_id.h | 33 + dom/media/gmp/rlz/lib/string_utils.cc | 34 + dom/media/gmp/rlz/lib/string_utils.h | 20 + dom/media/gmp/rlz/mac/lib/machine_id_mac.cc | 322 + dom/media/gmp/rlz/moz.build | 34 + dom/media/gmp/rlz/win/lib/machine_id_win.cc | 136 + dom/media/gmp/widevine-adapter/WidevineFileIO.cpp | 100 + dom/media/gmp/widevine-adapter/WidevineFileIO.h | 41 + dom/media/gmp/widevine-adapter/WidevineUtils.cpp | 61 + dom/media/gmp/widevine-adapter/WidevineUtils.h | 84 + .../gmp/widevine-adapter/WidevineVideoFrame.cpp | 142 + .../gmp/widevine-adapter/WidevineVideoFrame.h | 54 + .../widevine-adapter/content_decryption_module.h | 1359 ++ .../content_decryption_module_export.h | 38 + .../content_decryption_module_ext.h | 64 + .../content_decryption_module_proxy.h | 121 + dom/media/gmp/widevine-adapter/moz.build | 21 + dom/media/gtest/AudioGenerator.h | 64 + dom/media/gtest/AudioVerifier.h | 155 + dom/media/gtest/Cargo.toml | 8 + dom/media/gtest/GMPTestMonitor.h | 40 + dom/media/gtest/MockCubeb.cpp | 739 + dom/media/gtest/MockCubeb.h | 595 + dom/media/gtest/MockMediaResource.cpp | 91 + dom/media/gtest/MockMediaResource.h | 56 + dom/media/gtest/TestAudioBuffer.cpp | 46 + dom/media/gtest/TestAudioBuffers.cpp | 68 + dom/media/gtest/TestAudioCallbackDriver.cpp | 477 + dom/media/gtest/TestAudioCompactor.cpp | 131 + dom/media/gtest/TestAudioDecoderInputTrack.cpp | 454 + dom/media/gtest/TestAudioDeviceEnumerator.cpp | 272 + dom/media/gtest/TestAudioInputProcessing.cpp | 395 + dom/media/gtest/TestAudioInputSource.cpp | 269 + dom/media/gtest/TestAudioMixer.cpp | 177 + dom/media/gtest/TestAudioPacketizer.cpp | 163 + dom/media/gtest/TestAudioRingBuffer.cpp | 1287 ++ dom/media/gtest/TestAudioSegment.cpp | 470 + dom/media/gtest/TestAudioSinkWrapper.cpp | 148 + dom/media/gtest/TestAudioTrackEncoder.cpp | 298 + dom/media/gtest/TestAudioTrackGraph.cpp | 2726 +++ dom/media/gtest/TestBenchmarkStorage.cpp | 92 + dom/media/gtest/TestBitWriter.cpp | 117 + dom/media/gtest/TestBlankVideoDataCreator.cpp | 30 + dom/media/gtest/TestBufferReader.cpp | 53 + dom/media/gtest/TestCDMStorage.cpp | 1342 ++ dom/media/gtest/TestCubebInputStream.cpp | 187 + dom/media/gtest/TestDataMutex.cpp | 46 + dom/media/gtest/TestDecoderBenchmark.cpp | 66 + dom/media/gtest/TestDeviceInputTrack.cpp | 558 + dom/media/gtest/TestDriftCompensation.cpp | 86 + dom/media/gtest/TestGMPCrossOrigin.cpp | 212 + dom/media/gtest/TestGMPRemoveAndDelete.cpp | 472 + dom/media/gtest/TestGMPUtils.cpp | 84 + dom/media/gtest/TestGroupId.cpp | 302 + dom/media/gtest/TestIntervalSet.cpp | 819 + dom/media/gtest/TestKeyValueStorage.cpp | 109 + dom/media/gtest/TestMP3Demuxer.cpp | 579 + dom/media/gtest/TestMP4Demuxer.cpp | 613 + dom/media/gtest/TestMediaCodecsSupport.cpp | 239 + dom/media/gtest/TestMediaDataDecoder.cpp | 97 + dom/media/gtest/TestMediaDataEncoder.cpp | 775 + dom/media/gtest/TestMediaEventSource.cpp | 490 + dom/media/gtest/TestMediaMIMETypes.cpp | 284 + dom/media/gtest/TestMediaQueue.cpp | 288 + dom/media/gtest/TestMediaSpan.cpp | 110 + dom/media/gtest/TestMediaUtils.cpp | 240 + dom/media/gtest/TestMuxer.cpp | 212 + dom/media/gtest/TestOggWriter.cpp | 62 + dom/media/gtest/TestOpusParser.cpp | 24 + dom/media/gtest/TestPacer.cpp | 189 + dom/media/gtest/TestRTCStatsTimestampMaker.cpp | 115 + dom/media/gtest/TestRust.cpp | 10 + dom/media/gtest/TestTimeUnit.cpp | 295 + dom/media/gtest/TestVPXDecoding.cpp | 96 + dom/media/gtest/TestVideoFrameConverter.cpp | 508 + dom/media/gtest/TestVideoSegment.cpp | 44 + dom/media/gtest/TestVideoTrackEncoder.cpp | 1467 ++ dom/media/gtest/TestVideoUtils.cpp | 128 + dom/media/gtest/TestWebMBuffered.cpp | 234 + dom/media/gtest/TestWebMWriter.cpp | 388 + dom/media/gtest/YUVBufferGenerator.cpp | 144 + dom/media/gtest/YUVBufferGenerator.h | 32 + dom/media/gtest/dash_dashinit.mp4 | Bin 0 -> 80388 bytes dom/media/gtest/hello.rs | 6 + dom/media/gtest/id3v2header.mp3 | Bin 0 -> 191302 bytes dom/media/gtest/moz.build | 148 + dom/media/gtest/mp4_demuxer/TestInterval.cpp | 88 + dom/media/gtest/mp4_demuxer/TestMP4.cpp | 133 + dom/media/gtest/mp4_demuxer/TestParser.cpp | 1022 + dom/media/gtest/mp4_demuxer/moz.build | 66 + dom/media/gtest/mp4_demuxer/test_case_1156505.mp4 | Bin 0 -> 296 bytes dom/media/gtest/mp4_demuxer/test_case_1181213.mp4 | Bin 0 -> 2834 bytes dom/media/gtest/mp4_demuxer/test_case_1181215.mp4 | Bin 0 -> 3086 bytes dom/media/gtest/mp4_demuxer/test_case_1181223.mp4 | Bin 0 -> 2834 bytes dom/media/gtest/mp4_demuxer/test_case_1181719.mp4 | Bin 0 -> 3095 bytes dom/media/gtest/mp4_demuxer/test_case_1185230.mp4 | Bin 0 -> 3250 bytes dom/media/gtest/mp4_demuxer/test_case_1187067.mp4 | Bin 0 -> 2835 bytes dom/media/gtest/mp4_demuxer/test_case_1200326.mp4 | Bin 0 -> 1694 bytes dom/media/gtest/mp4_demuxer/test_case_1204580.mp4 | Bin 0 -> 5833 bytes dom/media/gtest/mp4_demuxer/test_case_1216748.mp4 | Bin 0 -> 296 bytes dom/media/gtest/mp4_demuxer/test_case_1296473.mp4 | Bin 0 -> 5995 bytes dom/media/gtest/mp4_demuxer/test_case_1296532.mp4 | Bin 0 -> 152132 bytes .../gtest/mp4_demuxer/test_case_1301065-harder.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-i64max.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-i64min.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-max-ez.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-max-ok.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-overfl.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-u32max.mp4 | Bin 0 -> 632 bytes .../gtest/mp4_demuxer/test_case_1301065-u64max.mp4 | Bin 0 -> 632 bytes dom/media/gtest/mp4_demuxer/test_case_1301065.mp4 | Bin 0 -> 632 bytes dom/media/gtest/mp4_demuxer/test_case_1329061.mov | Bin 0 -> 93681 bytes dom/media/gtest/mp4_demuxer/test_case_1351094.mp4 | Bin 0 -> 80388 bytes dom/media/gtest/mp4_demuxer/test_case_1388991.mp4 | Bin 0 -> 288821 bytes dom/media/gtest/mp4_demuxer/test_case_1389299.mp4 | Bin 0 -> 152132 bytes dom/media/gtest/mp4_demuxer/test_case_1389527.mp4 | Bin 0 -> 92225 bytes dom/media/gtest/mp4_demuxer/test_case_1395244.mp4 | Bin 0 -> 13651 bytes dom/media/gtest/mp4_demuxer/test_case_1410565.mp4 | Bin 0 -> 955656 bytes ...t_case_1513651-2-sample-description-entries.mp4 | Bin 0 -> 1100 bytes ...test_case_1519617-cenc-init-with-track_id-0.mp4 | Bin 0 -> 767 bytes .../test_case_1519617-track2-trafs-removed.mp4 | Bin 0 -> 282228 bytes .../test_case_1519617-video-has-track_id-0.mp4 | Bin 0 -> 282024 bytes ...e-description-entires-with-identical-crypto.mp4 | Bin 0 -> 1119 bytes dom/media/gtest/negative_duration.mp4 | Bin 0 -> 684 bytes dom/media/gtest/noise.mp3 | Bin 0 -> 965257 bytes dom/media/gtest/noise_vbr.mp3 | Bin 0 -> 583679 bytes dom/media/gtest/short-zero-in-moov.mp4 | Bin 0 -> 13655 bytes dom/media/gtest/short-zero-inband.mov | Bin 0 -> 93641 bytes dom/media/gtest/small-shot-false-positive.mp3 | Bin 0 -> 6845 bytes dom/media/gtest/small-shot-partial-xing.mp3 | Bin 0 -> 6825 bytes dom/media/gtest/small-shot.mp3 | Bin 0 -> 6825 bytes dom/media/gtest/test.webm | Bin 0 -> 1980 bytes dom/media/gtest/test_InvalidElementId.webm | Bin 0 -> 1122 bytes dom/media/gtest/test_InvalidElementSize.webm | Bin 0 -> 1122 bytes .../gtest/test_InvalidLargeEBMLMaxIdLength.webm | Bin 0 -> 1122 bytes dom/media/gtest/test_InvalidLargeElementId.webm | Bin 0 -> 1129 bytes .../gtest/test_InvalidSmallEBMLMaxIdLength.webm | Bin 0 -> 1122 bytes .../gtest/test_ValidLargeEBMLMaxIdLength.webm | Bin 0 -> 1128 bytes .../gtest/test_ValidSmallEBMLMaxSizeLength.webm | Bin 0 -> 1116 bytes dom/media/gtest/test_case_1224361.vp8.ivf | Bin 0 -> 1497 bytes dom/media/gtest/test_case_1224363.vp8.ivf | Bin 0 -> 1388 bytes dom/media/gtest/test_case_1224369.vp8.ivf | Bin 0 -> 204 bytes dom/media/gtest/test_vbri.mp3 | Bin 0 -> 16519 bytes dom/media/hls/HLSDecoder.cpp | 310 + dom/media/hls/HLSDecoder.h | 79 + dom/media/hls/HLSDemuxer.cpp | 628 + dom/media/hls/HLSDemuxer.h | 137 + dom/media/hls/HLSUtils.cpp | 12 + dom/media/hls/HLSUtils.h | 21 + dom/media/hls/moz.build | 24 + dom/media/imagecapture/CaptureTask.cpp | 197 + dom/media/imagecapture/CaptureTask.h | 90 + dom/media/imagecapture/ImageCapture.cpp | 212 + dom/media/imagecapture/ImageCapture.h | 95 + dom/media/imagecapture/moz.build | 16 + dom/media/ipc/MFCDMChild.cpp | 449 + dom/media/ipc/MFCDMChild.h | 154 + dom/media/ipc/MFCDMParent.cpp | 1272 ++ dom/media/ipc/MFCDMParent.h | 168 + dom/media/ipc/MFCDMSerializers.h | 58 + dom/media/ipc/MFMediaEngineChild.cpp | 402 + dom/media/ipc/MFMediaEngineChild.h | 138 + dom/media/ipc/MFMediaEngineParent.cpp | 711 + dom/media/ipc/MFMediaEngineParent.h | 139 + dom/media/ipc/MFMediaEngineUtils.cpp | 199 + dom/media/ipc/MFMediaEngineUtils.h | 195 + dom/media/ipc/MediaIPCUtils.h | 376 + dom/media/ipc/PMFCDM.ipdl | 121 + dom/media/ipc/PMFMediaEngine.ipdl | 63 + dom/media/ipc/PMediaDecoderParams.ipdlh | 30 + dom/media/ipc/PRDD.ipdl | 128 + dom/media/ipc/PRemoteDecoder.ipdl | 80 + dom/media/ipc/PRemoteDecoderManager.ipdl | 70 + dom/media/ipc/RDDChild.cpp | 225 + dom/media/ipc/RDDChild.h | 86 + dom/media/ipc/RDDParent.cpp | 341 + dom/media/ipc/RDDParent.h | 85 + dom/media/ipc/RDDProcessHost.cpp | 299 + dom/media/ipc/RDDProcessHost.h | 160 + dom/media/ipc/RDDProcessImpl.cpp | 52 + dom/media/ipc/RDDProcessImpl.h | 39 + dom/media/ipc/RDDProcessManager.cpp | 413 + dom/media/ipc/RDDProcessManager.h | 128 + dom/media/ipc/RemoteAudioDecoder.cpp | 121 + dom/media/ipc/RemoteAudioDecoder.h | 53 + dom/media/ipc/RemoteDecodeUtils.cpp | 103 + dom/media/ipc/RemoteDecodeUtils.h | 31 + dom/media/ipc/RemoteDecoderChild.cpp | 315 + dom/media/ipc/RemoteDecoderChild.h | 90 + dom/media/ipc/RemoteDecoderManagerChild.cpp | 909 + dom/media/ipc/RemoteDecoderManagerChild.h | 153 + dom/media/ipc/RemoteDecoderManagerParent.cpp | 378 + dom/media/ipc/RemoteDecoderManagerParent.h | 101 + dom/media/ipc/RemoteDecoderModule.cpp | 84 + dom/media/ipc/RemoteDecoderModule.h | 52 + dom/media/ipc/RemoteDecoderParent.cpp | 228 + dom/media/ipc/RemoteDecoderParent.h | 74 + dom/media/ipc/RemoteImageHolder.cpp | 192 + dom/media/ipc/RemoteImageHolder.h | 71 + dom/media/ipc/RemoteMediaData.cpp | 370 + dom/media/ipc/RemoteMediaData.h | 394 + dom/media/ipc/RemoteMediaDataDecoder.cpp | 163 + dom/media/ipc/RemoteMediaDataDecoder.h | 68 + dom/media/ipc/RemoteVideoDecoder.cpp | 306 + dom/media/ipc/RemoteVideoDecoder.h | 80 + dom/media/ipc/ShmemRecycleAllocator.h | 60 + dom/media/ipc/moz.build | 107 + .../mediacapabilities/BenchmarkStorageChild.cpp | 36 + .../mediacapabilities/BenchmarkStorageChild.h | 28 + .../mediacapabilities/BenchmarkStorageParent.cpp | 130 + .../mediacapabilities/BenchmarkStorageParent.h | 43 + dom/media/mediacapabilities/DecoderBenchmark.cpp | 243 + dom/media/mediacapabilities/DecoderBenchmark.h | 77 + dom/media/mediacapabilities/KeyValueStorage.cpp | 234 + dom/media/mediacapabilities/KeyValueStorage.h | 48 + dom/media/mediacapabilities/MediaCapabilities.cpp | 656 + dom/media/mediacapabilities/MediaCapabilities.h | 104 + dom/media/mediacapabilities/PBenchmarkStorage.ipdl | 23 + dom/media/mediacapabilities/moz.build | 32 + dom/media/mediacontrol/AudioFocusManager.cpp | 134 + dom/media/mediacontrol/AudioFocusManager.h | 54 + dom/media/mediacontrol/ContentMediaController.cpp | 376 + dom/media/mediacontrol/ContentMediaController.h | 109 + .../mediacontrol/ContentPlaybackController.cpp | 210 + dom/media/mediacontrol/ContentPlaybackController.h | 73 + dom/media/mediacontrol/FetchImageHelper.cpp | 164 + dom/media/mediacontrol/FetchImageHelper.h | 82 + dom/media/mediacontrol/MediaControlIPC.h | 75 + dom/media/mediacontrol/MediaControlKeyManager.cpp | 228 + dom/media/mediacontrol/MediaControlKeyManager.h | 73 + dom/media/mediacontrol/MediaControlKeySource.cpp | 122 + dom/media/mediacontrol/MediaControlKeySource.h | 122 + dom/media/mediacontrol/MediaControlService.cpp | 540 + dom/media/mediacontrol/MediaControlService.h | 181 + dom/media/mediacontrol/MediaControlUtils.cpp | 26 + dom/media/mediacontrol/MediaControlUtils.h | 216 + dom/media/mediacontrol/MediaController.cpp | 561 + dom/media/mediacontrol/MediaController.h | 214 + dom/media/mediacontrol/MediaPlaybackStatus.cpp | 142 + dom/media/mediacontrol/MediaPlaybackStatus.h | 151 + dom/media/mediacontrol/MediaStatusManager.cpp | 482 + dom/media/mediacontrol/MediaStatusManager.h | 276 + dom/media/mediacontrol/PositionStateEvent.h | 60 + dom/media/mediacontrol/moz.build | 43 + dom/media/mediacontrol/tests/browser/browser.toml | 71 + .../browser/browser_audio_focus_management.js | 179 + ...ontrol_page_with_audible_and_inaudible_media.js | 94 + .../browser/browser_default_action_handler.js | 419 + ...wser_media_control_audio_focus_within_a_page.js | 355 + .../browser_media_control_before_media_starts.js | 205 + .../browser_media_control_captured_audio.js | 45 + .../browser/browser_media_control_keys_event.js | 62 + .../browser_media_control_main_controller.js | 338 + .../browser/browser_media_control_metadata.js | 413 + .../browser_media_control_non_eligible_media.js | 204 + .../browser_media_control_playback_state.js | 113 + .../browser_media_control_position_state.js | 147 + .../tests/browser/browser_media_control_seekto.js | 91 + .../browser/browser_media_control_stop_timer.js | 79 + .../browser_media_control_supported_keys.js | 127 + .../tests/browser/browser_nosrc_and_error_media.js | 102 + .../browser_only_control_non_real_time_media.js | 76 + ...ove_controllable_media_for_active_controller.js | 108 + .../browser/browser_resume_latest_paused_media.js | 189 + .../tests/browser/browser_seek_captured_audio.js | 59 + ...wser_stop_control_after_media_reaches_to_end.js | 108 + .../tests/browser/browser_suspend_inactive_tab.js | 131 + .../browser/file_audio_and_inaudible_media.html | 10 + .../mediacontrol/tests/browser/file_autoplay.html | 9 + .../tests/browser/file_empty_title.html | 9 + .../tests/browser/file_error_media.html | 9 + .../tests/browser/file_iframe_media.html | 94 + ...n_frame_with_multiple_child_session_frames.html | 11 + .../tests/browser/file_multiple_audible_media.html | 11 + .../tests/browser/file_muted_autoplay.html | 9 + .../tests/browser/file_no_src_media.html | 9 + .../tests/browser/file_non_autoplay.html | 11 + .../tests/browser/file_non_eligible_media.html | 14 + .../tests/browser/file_non_looping_media.html | 9 + dom/media/mediacontrol/tests/browser/head.js | 402 + .../tests/gtest/MediaKeyListenerTest.h | 39 + .../tests/gtest/TestAudioFocusManager.cpp | 163 + .../tests/gtest/TestMediaControlService.cpp | 64 + .../tests/gtest/TestMediaController.cpp | 204 + .../tests/gtest/TestMediaKeysEvent.cpp | 49 + .../tests/gtest/TestMediaKeysEventMac.mm | 145 + .../tests/gtest/TestMediaKeysEventMediaCenter.mm | 169 + dom/media/mediacontrol/tests/gtest/moz.build | 23 + dom/media/mediasession/MediaMetadata.cpp | 154 + dom/media/mediasession/MediaMetadata.h | 97 + dom/media/mediasession/MediaSession.cpp | 335 + dom/media/mediasession/MediaSession.h | 132 + dom/media/mediasession/MediaSessionIPCUtils.h | 102 + dom/media/mediasession/moz.build | 24 + .../mediasession/test/MediaSessionTestUtils.js | 30 + dom/media/mediasession/test/browser.toml | 9 + .../test/browser_active_mediasession_among_tabs.js | 198 + .../mediasession/test/crashtests/crashtests.list | 1 + .../test/crashtests/inactive-mediasession.html | 16 + .../mediasession/test/file_media_session.html | 30 + .../test/file_trigger_actionhanlder_frame.html | 40 + .../test/file_trigger_actionhanlder_window.html | 107 + dom/media/mediasession/test/mochitest.toml | 14 + .../mediasession/test/test_setactionhandler.html | 84 + .../test/test_trigger_actionhanlder.html | 56 + dom/media/mediasink/AudioDecoderInputTrack.cpp | 674 + dom/media/mediasink/AudioDecoderInputTrack.h | 234 + dom/media/mediasink/AudioSink.cpp | 649 + dom/media/mediasink/AudioSink.h | 180 + dom/media/mediasink/AudioSinkWrapper.cpp | 579 + dom/media/mediasink/AudioSinkWrapper.h | 170 + dom/media/mediasink/DecodedStream.cpp | 1196 + dom/media/mediasink/DecodedStream.h | 150 + dom/media/mediasink/MediaSink.h | 150 + dom/media/mediasink/VideoSink.cpp | 704 + dom/media/mediasink/VideoSink.h | 176 + dom/media/mediasink/moz.build | 26 + dom/media/mediasource/AsyncEventRunner.h | 32 + dom/media/mediasource/ContainerParser.cpp | 757 + dom/media/mediasource/ContainerParser.h | 97 + dom/media/mediasource/MediaSource.cpp | 704 + dom/media/mediasource/MediaSource.h | 182 + dom/media/mediasource/MediaSourceDecoder.cpp | 372 + dom/media/mediasource/MediaSourceDecoder.h | 101 + dom/media/mediasource/MediaSourceDemuxer.cpp | 549 + dom/media/mediasource/MediaSourceDemuxer.h | 170 + dom/media/mediasource/MediaSourceUtils.cpp | 49 + dom/media/mediasource/MediaSourceUtils.h | 20 + dom/media/mediasource/ResourceQueue.cpp | 204 + dom/media/mediasource/ResourceQueue.h | 88 + dom/media/mediasource/SourceBuffer.cpp | 783 + dom/media/mediasource/SourceBuffer.h | 210 + dom/media/mediasource/SourceBufferAttributes.h | 116 + dom/media/mediasource/SourceBufferList.cpp | 187 + dom/media/mediasource/SourceBufferList.h | 110 + dom/media/mediasource/SourceBufferResource.cpp | 144 + dom/media/mediasource/SourceBufferResource.h | 143 + dom/media/mediasource/SourceBufferTask.h | 126 + dom/media/mediasource/TrackBuffersManager.cpp | 3186 +++ dom/media/mediasource/TrackBuffersManager.h | 575 + .../mediasource/gtest/TestContainerParser.cpp | 148 + .../gtest/TestExtractAV1CodecDetails.cpp | 290 + .../gtest/TestExtractVPXCodecDetails.cpp | 141 + dom/media/mediasource/gtest/moz.build | 22 + dom/media/mediasource/moz.build | 45 + dom/media/mediasource/test/.eslintrc.js | 28 + dom/media/mediasource/test/1516754.webm | Bin 0 -> 1081344 bytes dom/media/mediasource/test/1516754.webm^headers^ | 1 + dom/media/mediasource/test/aac20-48000-64000-1.m4s | Bin 0 -> 24328 bytes .../test/aac20-48000-64000-1.m4s^headers^ | 1 + dom/media/mediasource/test/aac20-48000-64000-2.m4s | Bin 0 -> 24132 bytes .../test/aac20-48000-64000-2.m4s^headers^ | 1 + .../mediasource/test/aac20-48000-64000-init.mp4 | Bin 0 -> 1246 bytes .../test/aac20-48000-64000-init.mp4^headers^ | 1 + .../mediasource/test/aac51-48000-128000-1.m4s | Bin 0 -> 48979 bytes .../test/aac51-48000-128000-1.m4s^headers^ | 1 + .../mediasource/test/aac51-48000-128000-2.m4s | Bin 0 -> 47727 bytes .../test/aac51-48000-128000-2.m4s^headers^ | 1 + .../mediasource/test/aac51-48000-128000-init.mp4 | Bin 0 -> 634 bytes .../test/aac51-48000-128000-init.mp4^headers^ | 1 + dom/media/mediasource/test/avc3/init.mp4 | Bin 0 -> 687 bytes dom/media/mediasource/test/avc3/init.mp4^headers^ | 1 + dom/media/mediasource/test/avc3/segment1.m4s | Bin 0 -> 696869 bytes .../mediasource/test/avc3/segment1.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop1.m4s | Bin 0 -> 24424 bytes .../mediasource/test/bipbop/bipbop1.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop10.m4s | Bin 0 -> 18279 bytes .../mediasource/test/bipbop/bipbop10.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop11.m4s | Bin 0 -> 24607 bytes .../mediasource/test/bipbop/bipbop11.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop12.m4s | Bin 0 -> 22676 bytes .../mediasource/test/bipbop/bipbop12.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop13.m4s | Bin 0 -> 9847 bytes .../mediasource/test/bipbop/bipbop13.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop2.m4s | Bin 0 -> 22205 bytes .../mediasource/test/bipbop/bipbop2.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop2s.mp4 | Bin 0 -> 48024 bytes .../mediasource/test/bipbop/bipbop2s.mp4^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop3.m4s | Bin 0 -> 24013 bytes .../mediasource/test/bipbop/bipbop3.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop4.m4s | Bin 0 -> 23112 bytes .../mediasource/test/bipbop/bipbop4.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop5.m4s | Bin 0 -> 18367 bytes .../mediasource/test/bipbop/bipbop5.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop6.m4s | Bin 0 -> 24455 bytes .../mediasource/test/bipbop/bipbop6.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop7.m4s | Bin 0 -> 22442 bytes .../mediasource/test/bipbop/bipbop7.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop8.m4s | Bin 0 -> 24356 bytes .../mediasource/test/bipbop/bipbop8.m4s^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop9.m4s | Bin 0 -> 23252 bytes .../mediasource/test/bipbop/bipbop9.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_300-3s.webm | Bin 0 -> 79429 bytes .../test/bipbop/bipbop_300-3s.webm^headers^ | 1 + .../test/bipbop/bipbop_480_624kbps-video1.m4s | Bin 0 -> 66806 bytes .../bipbop/bipbop_480_624kbps-video1.m4s^headers^ | 1 + .../test/bipbop/bipbop_480_624kbps-video2.m4s | Bin 0 -> 65292 bytes .../bipbop/bipbop_480_624kbps-video2.m4s^headers^ | 1 + .../test/bipbop/bipbop_480_624kbps-videoinit.mp4 | Bin 0 -> 1410 bytes .../bipbop_480_624kbps-videoinit.mp4^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio1.m4s | Bin 0 -> 694 bytes .../test/bipbop/bipbop_audio1.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio10.m4s | Bin 0 -> 879 bytes .../test/bipbop/bipbop_audio10.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio11.m4s | Bin 0 -> 208 bytes .../test/bipbop/bipbop_audio11.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio2.m4s | Bin 0 -> 750 bytes .../test/bipbop/bipbop_audio2.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio3.m4s | Bin 0 -> 724 bytes .../test/bipbop/bipbop_audio3.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio4.m4s | Bin 0 -> 806 bytes .../test/bipbop/bipbop_audio4.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio5.m4s | Bin 0 -> 822 bytes .../test/bipbop/bipbop_audio5.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio6.m4s | Bin 0 -> 833 bytes .../test/bipbop/bipbop_audio6.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio7.m4s | Bin 0 -> 888 bytes .../test/bipbop/bipbop_audio7.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio8.m4s | Bin 0 -> 829 bytes .../test/bipbop/bipbop_audio8.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audio9.m4s | Bin 0 -> 778 bytes .../test/bipbop/bipbop_audio9.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_audioinit.mp4 | Bin 0 -> 825 bytes .../test/bipbop/bipbop_audioinit.mp4^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbop_dash.mpd | 48 + .../test/bipbop/bipbop_offset_0.0-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.0-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.0-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.0-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.0-init.mp4 | Bin 0 -> 1441 bytes .../bipbop/bipbop_offset_0.0-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.1-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.1-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.1-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.1-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.1-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.1-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.2-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.2-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.2-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.2-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.2-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.2-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.3-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.3-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.3-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.3-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.3-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.3-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.4-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.4-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.4-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.4-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.4-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.4-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.5-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.5-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.5-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.5-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.5-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.5-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.6-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.6-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.6-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.6-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.6-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.6-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.7-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.7-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.7-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.7-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.7-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.7-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.8-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.8-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.8-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.8-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.8-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.8-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_0.9-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_0.9-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.9-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_0.9-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_0.9-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_0.9-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_1.0-1.m4s | Bin 0 -> 110108 bytes .../test/bipbop/bipbop_offset_1.0-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_1.0-2.m4s | Bin 0 -> 116079 bytes .../test/bipbop/bipbop_offset_1.0-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_1.0-init.mp4 | Bin 0 -> 1453 bytes .../bipbop/bipbop_offset_1.0-init.mp4^headers^ | 1 + .../test/bipbop/bipbop_offset_1.1-1.m4s | Bin 0 -> 143079 bytes .../test/bipbop/bipbop_offset_1.1-1.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_1.1-2.m4s | Bin 0 -> 137858 bytes .../test/bipbop/bipbop_offset_1.1-2.m4s^headers^ | 1 + .../test/bipbop/bipbop_offset_1.1-init.mp4 | Bin 0 -> 1336 bytes .../bipbop/bipbop_offset_1.1-init.mp4^headers^ | 1 + .../bipbop/bipbop_trailing_skip_box_video1.m4s | Bin 0 -> 1023860 bytes .../bipbop_trailing_skip_box_video1.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video1.m4s | Bin 0 -> 23860 bytes .../test/bipbop/bipbop_video1.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video10.m4s | Bin 0 -> 18109 bytes .../test/bipbop/bipbop_video10.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video11.m4s | Bin 0 -> 23969 bytes .../test/bipbop/bipbop_video11.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video12.m4s | Bin 0 -> 21937 bytes .../test/bipbop/bipbop_video12.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video13.m4s | Bin 0 -> 16265 bytes .../test/bipbop/bipbop_video13.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video2.m4s | Bin 0 -> 21595 bytes .../test/bipbop/bipbop_video2.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video3.m4s | Bin 0 -> 23429 bytes .../test/bipbop/bipbop_video3.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video4.m4s | Bin 0 -> 22446 bytes .../test/bipbop/bipbop_video4.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video5.m4s | Bin 0 -> 18191 bytes .../test/bipbop/bipbop_video5.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video6.m4s | Bin 0 -> 23773 bytes .../test/bipbop/bipbop_video6.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video7.m4s | Bin 0 -> 21749 bytes .../test/bipbop/bipbop_video7.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video8.m4s | Bin 0 -> 23608 bytes .../test/bipbop/bipbop_video8.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_video9.m4s | Bin 0 -> 22553 bytes .../test/bipbop/bipbop_video9.m4s^headers^ | 1 + .../mediasource/test/bipbop/bipbop_videoinit.mp4 | Bin 0 -> 887 bytes .../test/bipbop/bipbop_videoinit.mp4^headers^ | 1 + dom/media/mediasource/test/bipbop/bipbopinit.mp4 | Bin 0 -> 1395 bytes .../test/bipbop/bipbopinit.mp4^headers^ | 1 + dom/media/mediasource/test/bug1718709_high_res.mp4 | Bin 0 -> 1038283 bytes dom/media/mediasource/test/bug1718709_low_res.mp4 | Bin 0 -> 245318 bytes dom/media/mediasource/test/crashtests/1005366.html | 27 + dom/media/mediasource/test/crashtests/1059035.html | 26 + dom/media/mediasource/test/crashtests/926665.html | 26 + dom/media/mediasource/test/crashtests/931388.html | 17 + .../mediasource/test/crashtests/crashtests.list | 4 + dom/media/mediasource/test/flac/00001.m4s | Bin 0 -> 658125 bytes dom/media/mediasource/test/flac/00001.m4s^headers^ | 1 + dom/media/mediasource/test/flac/00002.m4s | Bin 0 -> 685567 bytes dom/media/mediasource/test/flac/00002.m4s^headers^ | 1 + dom/media/mediasource/test/flac/00003.m4s | Bin 0 -> 747868 bytes dom/media/mediasource/test/flac/00003.m4s^headers^ | 1 + dom/media/mediasource/test/flac/IS.mp4 | Bin 0 -> 608 bytes dom/media/mediasource/test/flac/IS.mp4^headers^ | 1 + dom/media/mediasource/test/init-trackid2.mp4 | Bin 0 -> 9108 bytes .../mediasource/test/init-trackid2.mp4^headers^ | 1 + dom/media/mediasource/test/init-trackid3.mp4 | Bin 0 -> 9108 bytes .../mediasource/test/init-trackid3.mp4^headers^ | 1 + dom/media/mediasource/test/mediasource.js | 235 + dom/media/mediasource/test/mochitest.toml | 272 + dom/media/mediasource/test/mochitest_compat.toml | 330 + dom/media/mediasource/test/seek.webm | Bin 0 -> 215529 bytes dom/media/mediasource/test/seek.webm^headers^ | 1 + dom/media/mediasource/test/seek_lowres.webm | Bin 0 -> 100749 bytes .../mediasource/test/seek_lowres.webm^headers^ | 1 + dom/media/mediasource/test/segment-2.0001.m4s | Bin 0 -> 34778 bytes .../mediasource/test/segment-2.0001.m4s^headers^ | 1 + dom/media/mediasource/test/segment-2.0002.m4s | Bin 0 -> 34653 bytes .../mediasource/test/segment-2.0002.m4s^headers^ | 1 + dom/media/mediasource/test/segment-3.0001.m4s | Bin 0 -> 34787 bytes .../mediasource/test/segment-3.0001.m4s^headers^ | 1 + dom/media/mediasource/test/segment-3.0002.m4s | Bin 0 -> 34640 bytes .../mediasource/test/segment-3.0002.m4s^headers^ | 1 + .../mediasource/test/tags_before_cluster.webm | Bin 0 -> 111714 bytes .../test/tags_before_cluster.webm^header^ | 1 + dom/media/mediasource/test/test_AVC3_mp4.html | 38 + .../test/test_AbortAfterPartialMediaSegment.html | 62 + .../test/test_AppendPartialInitSegment.html | 43 + .../mediasource/test/test_AudioChange_mp4.html | 49 + .../test/test_AudioChange_mp4_WebAudio.html | 55 + .../mediasource/test/test_AutoRevocation.html | 40 + dom/media/mediasource/test/test_BufferedSeek.html | 44 + .../mediasource/test/test_BufferedSeek_mp4.html | 43 + dom/media/mediasource/test/test_BufferingWait.html | 52 + .../mediasource/test/test_BufferingWait_mp4.html | 49 + dom/media/mediasource/test/test_ChangeType.html | 84 + .../test_ChangeWhileWaitingOnMissingData_mp4.html | 37 + .../test/test_DifferentStreamStartTimes.html | 54 + .../test/test_DrainOnMissingData_mp4.html | 49 + .../mediasource/test/test_DurationChange.html | 71 + .../mediasource/test/test_DurationUpdated.html | 48 + .../mediasource/test/test_DurationUpdated_mp4.html | 47 + dom/media/mediasource/test/test_EndOfStream.html | 29 + .../mediasource/test/test_EndOfStream_mp4.html | 29 + dom/media/mediasource/test/test_EndedEvent.html | 31 + dom/media/mediasource/test/test_Eviction_mp4.html | 63 + .../mediasource/test/test_ExperimentalAsync.html | 102 + .../mediasource/test/test_FrameSelection.html | 64 + .../mediasource/test/test_FrameSelection_mp4.html | 49 + .../mediasource/test/test_HEAAC_extradata.html | 89 + .../test/test_HaveMetadataUnbufferedSeek.html | 38 + .../test/test_HaveMetadataUnbufferedSeek_mp4.html | 42 + .../test/test_InputBufferIsCleared.html | 58 + dom/media/mediasource/test/test_LiveSeekable.html | 84 + .../mediasource/test/test_LoadedDataFired_mp4.html | 57 + .../mediasource/test/test_LoadedMetadataFired.html | 31 + .../test/test_LoadedMetadataFired_mp4.html | 31 + dom/media/mediasource/test/test_MediaSource.html | 92 + .../test/test_MediaSource_capture_gc.html | 72 + .../test/test_MediaSource_disabled.html | 31 + .../test/test_MediaSource_flac_mp4.html | 33 + .../test/test_MediaSource_hevc_mp4.html | 42 + .../test/test_MediaSource_memory_reporting.html | 47 + .../mediasource/test/test_MediaSource_mp4.html | 90 + .../test/test_MultipleInitSegments.html | 49 + .../test/test_MultipleInitSegments_mp4.html | 44 + .../mediasource/test/test_NoAudioLoopBackData.html | 78 + .../test/test_NoAudioLoopBackData_Muted.html | 79 + .../mediasource/test/test_NoVideoLoopBackData.html | 81 + dom/media/mediasource/test/test_OnEvents.html | 42 + dom/media/mediasource/test/test_PlayEvents.html | 115 + .../test/test_PlayEventsAutoPlaying.html | 58 + .../test/test_PlayEventsAutoPlaying2.html | 58 + .../mediasource/test/test_RemoveSourceBuffer.html | 52 + ...ution_change_should_not_cause_video_freeze.html | 49 + .../test/test_ResumeAfterClearing_mp4.html | 44 + ...mless_looping_shorter_audio_than_video_MSE.html | 59 + .../mediasource/test/test_SeekNoData_mp4.html | 57 + dom/media/mediasource/test/test_SeekToEnd_mp4.html | 54 + .../mediasource/test/test_SeekToLastFrame_mp4.html | 34 + dom/media/mediasource/test/test_SeekTwice_mp4.html | 45 + .../test_SeekableBeforeAndAfterEndOfStream.html | 54 + ...est_SeekableBeforeAndAfterEndOfStreamSplit.html | 60 + ...SeekableBeforeAndAfterEndOfStreamSplit_mp4.html | 60 + ...test_SeekableBeforeAndAfterEndOfStream_mp4.html | 55 + .../mediasource/test/test_SeekedEvent_mp4.html | 48 + dom/media/mediasource/test/test_Sequence_mp4.html | 37 + dom/media/mediasource/test/test_SetModeThrows.html | 34 + dom/media/mediasource/test/test_SplitAppend.html | 36 + .../mediasource/test/test_SplitAppendDelay.html | 38 + .../test/test_SplitAppendDelay_mp4.html | 39 + .../mediasource/test/test_SplitAppend_mp4.html | 38 + dom/media/mediasource/test/test_Threshold_mp4.html | 73 + .../mediasource/test/test_TimestampOffset_mp4.html | 76 + .../mediasource/test/test_TruncatedDuration.html | 55 + .../test/test_TruncatedDuration_mp4.html | 59 + .../test/test_WMFUnmatchedAudioDataTime.html | 32 + .../test/test_WaitingOnMissingData.html | 60 + .../test/test_WaitingOnMissingDataEnded_mp4.html | 47 + .../test/test_WaitingOnMissingData_mp4.html | 61 + .../test/test_WaitingToEndedTransition_mp4.html | 52 + .../test/test_WebMTagsBeforeCluster.html | 47 + .../mediasource/test/test_trackidchange_mp4.html | 32 + .../mediasource/test/whitenoise-he-aac-5s.mp4 | Bin 0 -> 27078 bytes .../mediasource/test/wmf_mismatchedaudiotime.mp4 | Bin 0 -> 48906 bytes dom/media/metrics.yaml | 98 + dom/media/moz.build | 415 + dom/media/mp3/MP3Decoder.cpp | 44 + dom/media/mp3/MP3Decoder.h | 29 + dom/media/mp3/MP3Demuxer.cpp | 883 + dom/media/mp3/MP3Demuxer.h | 187 + dom/media/mp3/MP3FrameParser.cpp | 818 + dom/media/mp3/MP3FrameParser.h | 374 + dom/media/mp3/moz.build | 22 + dom/media/mp4/Atom.h | 21 + dom/media/mp4/AtomType.h | 29 + dom/media/mp4/Box.cpp | 230 + dom/media/mp4/Box.h | 100 + dom/media/mp4/BufferStream.cpp | 59 + dom/media/mp4/BufferStream.h | 45 + dom/media/mp4/ByteStream.h | 41 + dom/media/mp4/DecoderData.cpp | 356 + dom/media/mp4/DecoderData.h | 76 + dom/media/mp4/MP4Decoder.cpp | 242 + dom/media/mp4/MP4Decoder.h | 57 + dom/media/mp4/MP4Demuxer.cpp | 646 + dom/media/mp4/MP4Demuxer.h | 52 + dom/media/mp4/MP4Interval.h | 137 + dom/media/mp4/MP4Metadata.cpp | 510 + dom/media/mp4/MP4Metadata.h | 116 + dom/media/mp4/MoofParser.cpp | 1288 ++ dom/media/mp4/MoofParser.h | 361 + dom/media/mp4/ResourceStream.cpp | 56 + dom/media/mp4/ResourceStream.h | 48 + dom/media/mp4/SampleIterator.cpp | 712 + dom/media/mp4/SampleIterator.h | 134 + dom/media/mp4/SinfParser.cpp | 96 + dom/media/mp4/SinfParser.h | 56 + dom/media/mp4/moz.build | 45 + dom/media/nsIAudioDeviceInfo.idl | 54 + dom/media/nsIDocumentActivity.h | 31 + dom/media/nsIMediaDevice.idl | 18 + dom/media/nsIMediaManager.idl | 42 + dom/media/ogg/OggCodecState.cpp | 1806 ++ dom/media/ogg/OggCodecState.h | 638 + dom/media/ogg/OggCodecStore.cpp | 31 + dom/media/ogg/OggCodecStore.h | 37 + dom/media/ogg/OggDecoder.cpp | 82 + dom/media/ogg/OggDecoder.h | 29 + dom/media/ogg/OggDemuxer.cpp | 2246 ++ dom/media/ogg/OggDemuxer.h | 375 + dom/media/ogg/OggRLBox.h | 30 + dom/media/ogg/OggRLBoxTypes.h | 17 + dom/media/ogg/OggWriter.cpp | 196 + dom/media/ogg/OggWriter.h | 55 + dom/media/ogg/OpusParser.cpp | 219 + dom/media/ogg/OpusParser.h | 48 + dom/media/ogg/moz.build | 32 + dom/media/platforms/AllocationPolicy.cpp | 225 + dom/media/platforms/AllocationPolicy.h | 183 + dom/media/platforms/MediaCodecsSupport.cpp | 331 + dom/media/platforms/MediaCodecsSupport.h | 225 + dom/media/platforms/MediaTelemetryConstants.h | 21 + dom/media/platforms/PDMFactory.cpp | 915 + dom/media/platforms/PDMFactory.h | 113 + dom/media/platforms/PEMFactory.cpp | 206 + dom/media/platforms/PEMFactory.h | 53 + dom/media/platforms/PlatformDecoderModule.cpp | 55 + dom/media/platforms/PlatformDecoderModule.h | 563 + dom/media/platforms/PlatformEncoderModule.cpp | 182 + dom/media/platforms/PlatformEncoderModule.h | 385 + dom/media/platforms/ReorderQueue.h | 28 + dom/media/platforms/SimpleMap.h | 55 + dom/media/platforms/agnostic/AOMDecoder.cpp | 1073 + dom/media/platforms/agnostic/AOMDecoder.h | 287 + .../platforms/agnostic/AgnosticDecoderModule.cpp | 185 + .../platforms/agnostic/AgnosticDecoderModule.h | 39 + .../platforms/agnostic/BlankDecoderModule.cpp | 146 + dom/media/platforms/agnostic/BlankDecoderModule.h | 68 + dom/media/platforms/agnostic/DAV1DDecoder.cpp | 403 + dom/media/platforms/agnostic/DAV1DDecoder.h | 71 + .../platforms/agnostic/DummyMediaDataDecoder.cpp | 80 + .../platforms/agnostic/DummyMediaDataDecoder.h | 68 + dom/media/platforms/agnostic/NullDecoderModule.cpp | 57 + dom/media/platforms/agnostic/TheoraDecoder.cpp | 269 + dom/media/platforms/agnostic/TheoraDecoder.h | 63 + dom/media/platforms/agnostic/VPXDecoder.cpp | 679 + dom/media/platforms/agnostic/VPXDecoder.h | 208 + dom/media/platforms/agnostic/bytestreams/Adts.cpp | 94 + dom/media/platforms/agnostic/bytestreams/Adts.h | 22 + .../platforms/agnostic/bytestreams/AnnexB.cpp | 542 + dom/media/platforms/agnostic/bytestreams/AnnexB.h | 89 + .../agnostic/bytestreams/ByteStreamsUtils.h | 70 + dom/media/platforms/agnostic/bytestreams/H264.cpp | 1333 ++ dom/media/platforms/agnostic/bytestreams/H264.h | 547 + dom/media/platforms/agnostic/bytestreams/H265.cpp | 1300 ++ dom/media/platforms/agnostic/bytestreams/H265.h | 356 + .../agnostic/bytestreams/gtest/TestByteStreams.cpp | 787 + .../platforms/agnostic/bytestreams/gtest/moz.build | 11 + dom/media/platforms/agnostic/bytestreams/moz.build | 38 + .../agnostic/eme/ChromiumCDMVideoDecoder.cpp | 156 + .../agnostic/eme/ChromiumCDMVideoDecoder.h | 54 + .../agnostic/eme/DecryptThroughputLimit.h | 103 + .../platforms/agnostic/eme/EMEDecoderModule.cpp | 481 + .../platforms/agnostic/eme/EMEDecoderModule.h | 79 + .../agnostic/eme/SamplesWaitingForKey.cpp | 79 + .../platforms/agnostic/eme/SamplesWaitingForKey.h | 68 + dom/media/platforms/agnostic/eme/moz.build | 22 + .../platforms/agnostic/gmp/GMPDecoderModule.cpp | 94 + .../platforms/agnostic/gmp/GMPDecoderModule.h | 58 + .../platforms/agnostic/gmp/GMPVideoDecoder.cpp | 489 + dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h | 129 + dom/media/platforms/agnostic/gmp/moz.build | 24 + dom/media/platforms/android/AndroidDataEncoder.cpp | 505 + dom/media/platforms/android/AndroidDataEncoder.h | 121 + .../platforms/android/AndroidDecoderModule.cpp | 329 + dom/media/platforms/android/AndroidDecoderModule.h | 75 + .../platforms/android/AndroidEncoderModule.cpp | 47 + dom/media/platforms/android/AndroidEncoderModule.h | 29 + dom/media/platforms/android/JavaCallbacksSupport.h | 73 + dom/media/platforms/android/RemoteDataDecoder.cpp | 1184 + dom/media/platforms/android/RemoteDataDecoder.h | 112 + dom/media/platforms/apple/AppleATDecoder.cpp | 665 + dom/media/platforms/apple/AppleATDecoder.h | 80 + dom/media/platforms/apple/AppleDecoderModule.cpp | 251 + dom/media/platforms/apple/AppleDecoderModule.h | 62 + dom/media/platforms/apple/AppleEncoderModule.cpp | 42 + dom/media/platforms/apple/AppleEncoderModule.h | 29 + dom/media/platforms/apple/AppleUtils.h | 88 + dom/media/platforms/apple/AppleVTDecoder.cpp | 765 + dom/media/platforms/apple/AppleVTDecoder.h | 145 + dom/media/platforms/apple/AppleVTEncoder.cpp | 786 + dom/media/platforms/apple/AppleVTEncoder.h | 90 + dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp | 499 + dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h | 61 + dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp | 334 + dom/media/platforms/ffmpeg/FFmpegDataDecoder.h | 91 + dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp | 13 + dom/media/platforms/ffmpeg/FFmpegDecoderModule.h | 135 + dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp | 49 + dom/media/platforms/ffmpeg/FFmpegEncoderModule.h | 45 + dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp | 385 + dom/media/platforms/ffmpeg/FFmpegLibWrapper.h | 229 + dom/media/platforms/ffmpeg/FFmpegLibs.h | 54 + dom/media/platforms/ffmpeg/FFmpegLog.h | 38 + dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h | 34 + dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp | 243 + dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h | 47 + dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp | 1828 ++ dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h | 258 + dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp | 1155 + dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h | 110 + .../platforms/ffmpeg/FFmpegVideoFramePool.cpp | 414 + dom/media/platforms/ffmpeg/FFmpegVideoFramePool.h | 155 + dom/media/platforms/ffmpeg/README_mozilla | 11 + .../ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/ffmpeg57/include/libavcodec/avcodec.h | 5418 +++++ .../ffmpeg/ffmpeg57/include/libavcodec/avfft.h | 118 + .../ffmpeg/ffmpeg57/include/libavcodec/vaapi.h | 189 + .../ffmpeg/ffmpeg57/include/libavcodec/vdpau.h | 253 + .../ffmpeg/ffmpeg57/include/libavcodec/version.h | 210 + .../ffmpeg/ffmpeg57/include/libavutil/attributes.h | 168 + .../ffmpeg/ffmpeg57/include/libavutil/avconfig.h | 7 + .../ffmpeg/ffmpeg57/include/libavutil/avutil.h | 343 + .../ffmpeg/ffmpeg57/include/libavutil/buffer.h | 274 + .../ffmpeg57/include/libavutil/channel_layout.h | 223 + .../ffmpeg/ffmpeg57/include/libavutil/common.h | 519 + .../ffmpeg/ffmpeg57/include/libavutil/cpu.h | 117 + .../ffmpeg/ffmpeg57/include/libavutil/dict.h | 198 + .../ffmpeg/ffmpeg57/include/libavutil/error.h | 126 + .../ffmpeg/ffmpeg57/include/libavutil/frame.h | 713 + .../ffmpeg/ffmpeg57/include/libavutil/intfloat.h | 77 + .../ffmpeg/ffmpeg57/include/libavutil/log.h | 359 + .../ffmpeg/ffmpeg57/include/libavutil/macros.h | 50 + .../ffmpeg57/include/libavutil/mathematics.h | 165 + .../ffmpeg/ffmpeg57/include/libavutil/mem.h | 406 + .../ffmpeg/ffmpeg57/include/libavutil/pixfmt.h | 469 + .../ffmpeg/ffmpeg57/include/libavutil/rational.h | 173 + .../ffmpeg/ffmpeg57/include/libavutil/samplefmt.h | 271 + .../ffmpeg/ffmpeg57/include/libavutil/version.h | 129 + dom/media/platforms/ffmpeg/ffmpeg57/moz.build | 37 + .../ffmpeg/ffmpeg58/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/ffmpeg58/include/libavcodec/avcodec.h | 4184 ++++ .../ffmpeg/ffmpeg58/include/libavcodec/avfft.h | 118 + .../ffmpeg/ffmpeg58/include/libavcodec/bsf.h | 325 + .../ffmpeg/ffmpeg58/include/libavcodec/codec.h | 480 + .../ffmpeg58/include/libavcodec/codec_desc.h | 128 + .../ffmpeg/ffmpeg58/include/libavcodec/codec_id.h | 629 + .../ffmpeg/ffmpeg58/include/libavcodec/codec_par.h | 234 + .../ffmpeg/ffmpeg58/include/libavcodec/packet.h | 774 + .../ffmpeg/ffmpeg58/include/libavcodec/vaapi.h | 86 + .../ffmpeg/ffmpeg58/include/libavcodec/vdpau.h | 176 + .../ffmpeg/ffmpeg58/include/libavcodec/version.h | 137 + .../ffmpeg/ffmpeg58/include/libavutil/attributes.h | 167 + .../ffmpeg/ffmpeg58/include/libavutil/avconfig.h | 6 + .../ffmpeg/ffmpeg58/include/libavutil/avutil.h | 365 + .../ffmpeg/ffmpeg58/include/libavutil/buffer.h | 291 + .../ffmpeg58/include/libavutil/channel_layout.h | 232 + .../ffmpeg/ffmpeg58/include/libavutil/common.h | 560 + .../ffmpeg/ffmpeg58/include/libavutil/cpu.h | 130 + .../ffmpeg/ffmpeg58/include/libavutil/dict.h | 200 + .../ffmpeg/ffmpeg58/include/libavutil/error.h | 126 + .../ffmpeg/ffmpeg58/include/libavutil/frame.h | 893 + .../ffmpeg/ffmpeg58/include/libavutil/hwcontext.h | 584 + .../ffmpeg58/include/libavutil/hwcontext_drm.h | 169 + .../ffmpeg58/include/libavutil/hwcontext_vaapi.h | 117 + .../ffmpeg/ffmpeg58/include/libavutil/intfloat.h | 77 + .../ffmpeg/ffmpeg58/include/libavutil/log.h | 362 + .../ffmpeg/ffmpeg58/include/libavutil/macros.h | 50 + .../ffmpeg58/include/libavutil/mathematics.h | 242 + .../ffmpeg/ffmpeg58/include/libavutil/mem.h | 700 + .../ffmpeg/ffmpeg58/include/libavutil/pixfmt.h | 529 + .../ffmpeg/ffmpeg58/include/libavutil/rational.h | 214 + .../ffmpeg/ffmpeg58/include/libavutil/samplefmt.h | 272 + .../ffmpeg/ffmpeg58/include/libavutil/version.h | 139 + dom/media/platforms/ffmpeg/ffmpeg58/moz.build | 45 + .../ffmpeg/ffmpeg59/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/ffmpeg59/include/libavcodec/avcodec.h | 3204 +++ .../ffmpeg/ffmpeg59/include/libavcodec/avfft.h | 119 + .../ffmpeg/ffmpeg59/include/libavcodec/bsf.h | 320 + .../ffmpeg/ffmpeg59/include/libavcodec/codec.h | 513 + .../ffmpeg59/include/libavcodec/codec_desc.h | 128 + .../ffmpeg/ffmpeg59/include/libavcodec/codec_id.h | 637 + .../ffmpeg/ffmpeg59/include/libavcodec/codec_par.h | 236 + .../ffmpeg/ffmpeg59/include/libavcodec/defs.h | 171 + .../ffmpeg/ffmpeg59/include/libavcodec/packet.h | 724 + .../ffmpeg/ffmpeg59/include/libavcodec/vdpau.h | 156 + .../ffmpeg/ffmpeg59/include/libavcodec/version.h | 67 + .../ffmpeg/ffmpeg59/include/libavutil/attributes.h | 173 + .../ffmpeg/ffmpeg59/include/libavutil/avconfig.h | 6 + .../ffmpeg/ffmpeg59/include/libavutil/avutil.h | 366 + .../ffmpeg/ffmpeg59/include/libavutil/buffer.h | 324 + .../ffmpeg59/include/libavutil/channel_layout.h | 270 + .../ffmpeg/ffmpeg59/include/libavutil/common.h | 590 + .../ffmpeg/ffmpeg59/include/libavutil/cpu.h | 138 + .../ffmpeg/ffmpeg59/include/libavutil/dict.h | 215 + .../ffmpeg/ffmpeg59/include/libavutil/error.h | 158 + .../ffmpeg/ffmpeg59/include/libavutil/frame.h | 927 + .../ffmpeg/ffmpeg59/include/libavutil/hwcontext.h | 601 + .../ffmpeg59/include/libavutil/hwcontext_drm.h | 169 + .../ffmpeg59/include/libavutil/hwcontext_vaapi.h | 117 + .../ffmpeg/ffmpeg59/include/libavutil/intfloat.h | 73 + .../ffmpeg/ffmpeg59/include/libavutil/log.h | 388 + .../ffmpeg/ffmpeg59/include/libavutil/macros.h | 87 + .../ffmpeg59/include/libavutil/mathematics.h | 247 + .../ffmpeg/ffmpeg59/include/libavutil/mem.h | 708 + .../ffmpeg/ffmpeg59/include/libavutil/pixfmt.h | 808 + .../ffmpeg/ffmpeg59/include/libavutil/rational.h | 221 + .../ffmpeg/ffmpeg59/include/libavutil/samplefmt.h | 276 + .../ffmpeg/ffmpeg59/include/libavutil/version.h | 118 + dom/media/platforms/ffmpeg/ffmpeg59/moz.build | 45 + .../ffmpeg/ffmpeg60/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/ffmpeg60/include/libavcodec/avcodec.h | 3230 +++ .../ffmpeg/ffmpeg60/include/libavcodec/avdct.h | 85 + .../ffmpeg/ffmpeg60/include/libavcodec/avfft.h | 119 + .../ffmpeg/ffmpeg60/include/libavcodec/bsf.h | 335 + .../ffmpeg/ffmpeg60/include/libavcodec/codec.h | 387 + .../ffmpeg60/include/libavcodec/codec_desc.h | 128 + .../ffmpeg/ffmpeg60/include/libavcodec/codec_id.h | 669 + .../ffmpeg/ffmpeg60/include/libavcodec/codec_par.h | 247 + .../ffmpeg/ffmpeg60/include/libavcodec/defs.h | 203 + .../ffmpeg/ffmpeg60/include/libavcodec/packet.h | 730 + .../ffmpeg/ffmpeg60/include/libavcodec/vdpau.h | 156 + .../ffmpeg/ffmpeg60/include/libavcodec/version.h | 45 + .../ffmpeg60/include/libavcodec/version_major.h | 52 + .../ffmpeg/ffmpeg60/include/libavutil/attributes.h | 173 + .../ffmpeg/ffmpeg60/include/libavutil/avconfig.h | 6 + .../ffmpeg/ffmpeg60/include/libavutil/avutil.h | 371 + .../ffmpeg/ffmpeg60/include/libavutil/buffer.h | 324 + .../ffmpeg60/include/libavutil/channel_layout.h | 842 + .../ffmpeg/ffmpeg60/include/libavutil/common.h | 589 + .../ffmpeg/ffmpeg60/include/libavutil/cpu.h | 150 + .../ffmpeg/ffmpeg60/include/libavutil/dict.h | 259 + .../ffmpeg/ffmpeg60/include/libavutil/error.h | 158 + .../ffmpeg/ffmpeg60/include/libavutil/frame.h | 960 + .../ffmpeg/ffmpeg60/include/libavutil/hwcontext.h | 606 + .../ffmpeg60/include/libavutil/hwcontext_drm.h | 169 + .../ffmpeg60/include/libavutil/hwcontext_vaapi.h | 117 + .../ffmpeg/ffmpeg60/include/libavutil/intfloat.h | 73 + .../ffmpeg/ffmpeg60/include/libavutil/log.h | 388 + .../ffmpeg/ffmpeg60/include/libavutil/macros.h | 87 + .../ffmpeg60/include/libavutil/mathematics.h | 249 + .../ffmpeg/ffmpeg60/include/libavutil/mem.h | 613 + .../ffmpeg/ffmpeg60/include/libavutil/pixfmt.h | 891 + .../ffmpeg/ffmpeg60/include/libavutil/rational.h | 222 + .../ffmpeg/ffmpeg60/include/libavutil/samplefmt.h | 274 + .../ffmpeg/ffmpeg60/include/libavutil/version.h | 122 + dom/media/platforms/ffmpeg/ffmpeg60/moz.build | 45 + .../platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp | 163 + .../platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h | 42 + dom/media/platforms/ffmpeg/ffvpx/moz.build | 56 + .../ffmpeg/libav53/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/libav53/include/libavcodec/avcodec.h | 4761 ++++ .../ffmpeg/libav53/include/libavcodec/avfft.h | 99 + .../ffmpeg/libav53/include/libavcodec/dxva2.h | 71 + .../libav53/include/libavcodec/old_codec_ids.h | 398 + .../ffmpeg/libav53/include/libavcodec/opt.h | 34 + .../ffmpeg/libav53/include/libavcodec/vaapi.h | 167 + .../ffmpeg/libav53/include/libavcodec/vda.h | 144 + .../ffmpeg/libav53/include/libavcodec/vdpau.h | 88 + .../ffmpeg/libav53/include/libavcodec/version.h | 126 + .../ffmpeg/libav53/include/libavcodec/xvmc.h | 151 + .../ffmpeg/libav53/include/libavutil/adler32.h | 43 + .../ffmpeg/libav53/include/libavutil/aes.h | 57 + .../ffmpeg/libav53/include/libavutil/attributes.h | 136 + .../ffmpeg/libav53/include/libavutil/audio_fifo.h | 146 + .../libav53/include/libavutil/audioconvert.h | 130 + .../ffmpeg/libav53/include/libavutil/avassert.h | 66 + .../ffmpeg/libav53/include/libavutil/avconfig.h | 6 + .../ffmpeg/libav53/include/libavutil/avstring.h | 175 + .../ffmpeg/libav53/include/libavutil/avutil.h | 326 + .../ffmpeg/libav53/include/libavutil/base64.h | 65 + .../ffmpeg/libav53/include/libavutil/blowfish.h | 77 + .../ffmpeg/libav53/include/libavutil/bprint.h | 169 + .../ffmpeg/libav53/include/libavutil/bswap.h | 109 + .../ffmpeg/libav53/include/libavutil/common.h | 398 + .../ffmpeg/libav53/include/libavutil/cpu.h | 56 + .../ffmpeg/libav53/include/libavutil/crc.h | 44 + .../ffmpeg/libav53/include/libavutil/dict.h | 121 + .../ffmpeg/libav53/include/libavutil/error.h | 81 + .../ffmpeg/libav53/include/libavutil/eval.h | 113 + .../ffmpeg/libav53/include/libavutil/fifo.h | 141 + .../ffmpeg/libav53/include/libavutil/file.h | 52 + .../ffmpeg/libav53/include/libavutil/imgutils.h | 138 + .../ffmpeg/libav53/include/libavutil/intfloat.h | 73 + .../libav53/include/libavutil/intfloat_readwrite.h | 40 + .../libav53/include/libavutil/intreadwrite.h | 522 + .../ffmpeg/libav53/include/libavutil/lfg.h | 62 + .../ffmpeg/libav53/include/libavutil/log.h | 172 + .../ffmpeg/libav53/include/libavutil/lzo.h | 77 + .../ffmpeg/libav53/include/libavutil/mathematics.h | 122 + .../ffmpeg/libav53/include/libavutil/md5.h | 46 + .../ffmpeg/libav53/include/libavutil/mem.h | 136 + .../libav53/include/libavutil/old_pix_fmts.h | 171 + .../ffmpeg/libav53/include/libavutil/opt.h | 591 + .../ffmpeg/libav53/include/libavutil/parseutils.h | 124 + .../ffmpeg/libav53/include/libavutil/pixdesc.h | 177 + .../ffmpeg/libav53/include/libavutil/pixfmt.h | 198 + .../ffmpeg/libav53/include/libavutil/random_seed.h | 44 + .../ffmpeg/libav53/include/libavutil/rational.h | 144 + .../ffmpeg/libav53/include/libavutil/samplefmt.h | 148 + .../ffmpeg/libav53/include/libavutil/sha.h | 66 + .../ffmpeg/libav53/include/libavutil/time.h | 41 + .../ffmpeg/libav53/include/libavutil/timecode.h | 140 + .../ffmpeg/libav53/include/libavutil/timestamp.h | 74 + .../ffmpeg/libav53/include/libavutil/version.h | 132 + .../ffmpeg/libav53/include/libavutil/xtea.h | 62 + dom/media/platforms/ffmpeg/libav53/moz.build | 29 + .../ffmpeg/libav54/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/libav54/include/libavcodec/avcodec.h | 4658 ++++ .../ffmpeg/libav54/include/libavcodec/avfft.h | 116 + .../ffmpeg/libav54/include/libavcodec/dxva2.h | 88 + .../libav54/include/libavcodec/old_codec_ids.h | 366 + .../ffmpeg/libav54/include/libavcodec/vaapi.h | 173 + .../ffmpeg/libav54/include/libavcodec/vda.h | 217 + .../ffmpeg/libav54/include/libavcodec/vdpau.h | 94 + .../ffmpeg/libav54/include/libavcodec/version.h | 95 + .../ffmpeg/libav54/include/libavcodec/xvmc.h | 168 + .../ffmpeg/libav54/include/libavutil/adler32.h | 43 + .../ffmpeg/libav54/include/libavutil/aes.h | 67 + .../ffmpeg/libav54/include/libavutil/attributes.h | 122 + .../ffmpeg/libav54/include/libavutil/audio_fifo.h | 146 + .../libav54/include/libavutil/audioconvert.h | 6 + .../ffmpeg/libav54/include/libavutil/avassert.h | 66 + .../ffmpeg/libav54/include/libavutil/avconfig.h | 6 + .../ffmpeg/libav54/include/libavutil/avstring.h | 191 + .../ffmpeg/libav54/include/libavutil/avutil.h | 275 + .../ffmpeg/libav54/include/libavutil/base64.h | 65 + .../ffmpeg/libav54/include/libavutil/blowfish.h | 76 + .../ffmpeg/libav54/include/libavutil/bswap.h | 109 + .../libav54/include/libavutil/channel_layout.h | 182 + .../ffmpeg/libav54/include/libavutil/common.h | 406 + .../ffmpeg/libav54/include/libavutil/cpu.h | 84 + .../ffmpeg/libav54/include/libavutil/crc.h | 74 + .../ffmpeg/libav54/include/libavutil/dict.h | 129 + .../ffmpeg/libav54/include/libavutil/error.h | 83 + .../ffmpeg/libav54/include/libavutil/eval.h | 113 + .../ffmpeg/libav54/include/libavutil/fifo.h | 131 + .../ffmpeg/libav54/include/libavutil/file.h | 54 + .../ffmpeg/libav54/include/libavutil/imgutils.h | 138 + .../ffmpeg/libav54/include/libavutil/intfloat.h | 77 + .../libav54/include/libavutil/intfloat_readwrite.h | 40 + .../libav54/include/libavutil/intreadwrite.h | 549 + .../ffmpeg/libav54/include/libavutil/lfg.h | 62 + .../ffmpeg/libav54/include/libavutil/log.h | 173 + .../ffmpeg/libav54/include/libavutil/lzo.h | 66 + .../ffmpeg/libav54/include/libavutil/mathematics.h | 111 + .../ffmpeg/libav54/include/libavutil/md5.h | 51 + .../ffmpeg/libav54/include/libavutil/mem.h | 183 + .../libav54/include/libavutil/old_pix_fmts.h | 128 + .../ffmpeg/libav54/include/libavutil/opt.h | 516 + .../ffmpeg/libav54/include/libavutil/parseutils.h | 124 + .../ffmpeg/libav54/include/libavutil/pixdesc.h | 223 + .../ffmpeg/libav54/include/libavutil/pixfmt.h | 268 + .../ffmpeg/libav54/include/libavutil/random_seed.h | 44 + .../ffmpeg/libav54/include/libavutil/rational.h | 155 + .../ffmpeg/libav54/include/libavutil/samplefmt.h | 220 + .../ffmpeg/libav54/include/libavutil/sha.h | 76 + .../ffmpeg/libav54/include/libavutil/time.h | 39 + .../ffmpeg/libav54/include/libavutil/version.h | 87 + .../ffmpeg/libav54/include/libavutil/xtea.h | 61 + dom/media/platforms/ffmpeg/libav54/moz.build | 29 + .../ffmpeg/libav55/include/COPYING.LGPLv2.1 | 504 + .../ffmpeg/libav55/include/libavcodec/avcodec.h | 4356 ++++ .../ffmpeg/libav55/include/libavcodec/avfft.h | 118 + .../ffmpeg/libav55/include/libavcodec/dxva2.h | 88 + .../ffmpeg/libav55/include/libavcodec/vaapi.h | 173 + .../ffmpeg/libav55/include/libavcodec/vda.h | 142 + .../ffmpeg/libav55/include/libavcodec/vdpau.h | 189 + .../ffmpeg/libav55/include/libavcodec/version.h | 127 + .../ffmpeg/libav55/include/libavcodec/xvmc.h | 174 + .../ffmpeg/libav55/include/libavutil/adler32.h | 43 + .../ffmpeg/libav55/include/libavutil/aes.h | 67 + .../ffmpeg/libav55/include/libavutil/attributes.h | 126 + .../ffmpeg/libav55/include/libavutil/audio_fifo.h | 146 + .../libav55/include/libavutil/audioconvert.h | 6 + .../ffmpeg/libav55/include/libavutil/avassert.h | 66 + .../ffmpeg/libav55/include/libavutil/avconfig.h | 6 + .../ffmpeg/libav55/include/libavutil/avstring.h | 226 + .../ffmpeg/libav55/include/libavutil/avutil.h | 284 + .../ffmpeg/libav55/include/libavutil/base64.h | 65 + .../ffmpeg/libav55/include/libavutil/blowfish.h | 76 + .../ffmpeg/libav55/include/libavutil/bswap.h | 111 + .../ffmpeg/libav55/include/libavutil/buffer.h | 267 + .../libav55/include/libavutil/channel_layout.h | 186 + .../ffmpeg/libav55/include/libavutil/common.h | 406 + .../ffmpeg/libav55/include/libavutil/cpu.h | 87 + .../ffmpeg/libav55/include/libavutil/crc.h | 74 + .../ffmpeg/libav55/include/libavutil/dict.h | 146 + .../libav55/include/libavutil/downmix_info.h | 114 + .../ffmpeg/libav55/include/libavutil/error.h | 82 + .../ffmpeg/libav55/include/libavutil/eval.h | 113 + .../ffmpeg/libav55/include/libavutil/fifo.h | 131 + .../ffmpeg/libav55/include/libavutil/file.h | 54 + .../ffmpeg/libav55/include/libavutil/frame.h | 552 + .../ffmpeg/libav55/include/libavutil/hmac.h | 95 + .../ffmpeg/libav55/include/libavutil/imgutils.h | 138 + .../ffmpeg/libav55/include/libavutil/intfloat.h | 77 + .../libav55/include/libavutil/intreadwrite.h | 549 + .../ffmpeg/libav55/include/libavutil/lfg.h | 62 + .../ffmpeg/libav55/include/libavutil/log.h | 262 + .../ffmpeg/libav55/include/libavutil/lzo.h | 66 + .../ffmpeg/libav55/include/libavutil/macros.h | 48 + .../ffmpeg/libav55/include/libavutil/mathematics.h | 111 + .../ffmpeg/libav55/include/libavutil/md5.h | 51 + .../ffmpeg/libav55/include/libavutil/mem.h | 265 + .../libav55/include/libavutil/old_pix_fmts.h | 134 + .../ffmpeg/libav55/include/libavutil/opt.h | 516 + .../ffmpeg/libav55/include/libavutil/parseutils.h | 124 + .../ffmpeg/libav55/include/libavutil/pixdesc.h | 276 + .../ffmpeg/libav55/include/libavutil/pixfmt.h | 283 + .../ffmpeg/libav55/include/libavutil/random_seed.h | 44 + .../ffmpeg/libav55/include/libavutil/rational.h | 155 + .../ffmpeg/libav55/include/libavutil/samplefmt.h | 220 + .../ffmpeg/libav55/include/libavutil/sha.h | 76 + .../ffmpeg/libav55/include/libavutil/stereo3d.h | 147 + .../ffmpeg/libav55/include/libavutil/time.h | 39 + .../ffmpeg/libav55/include/libavutil/version.h | 116 + .../ffmpeg/libav55/include/libavutil/xtea.h | 61 + dom/media/platforms/ffmpeg/libav55/moz.build | 35 + dom/media/platforms/ffmpeg/moz.build | 32 + dom/media/platforms/moz.build | 136 + dom/media/platforms/omx/OmxCoreLibLinker.cpp | 113 + dom/media/platforms/omx/OmxCoreLibLinker.h | 36 + dom/media/platforms/omx/OmxDataDecoder.cpp | 1012 + dom/media/platforms/omx/OmxDataDecoder.h | 224 + dom/media/platforms/omx/OmxDecoderModule.cpp | 59 + dom/media/platforms/omx/OmxDecoderModule.h | 33 + dom/media/platforms/omx/OmxFunctionList.h | 13 + dom/media/platforms/omx/OmxPlatformLayer.cpp | 307 + dom/media/platforms/omx/OmxPlatformLayer.h | 103 + dom/media/platforms/omx/OmxPromiseLayer.cpp | 355 + dom/media/platforms/omx/OmxPromiseLayer.h | 243 + dom/media/platforms/omx/PureOmxPlatformLayer.cpp | 405 + dom/media/platforms/omx/PureOmxPlatformLayer.h | 110 + dom/media/platforms/omx/moz.build | 36 + dom/media/platforms/wmf/DXVA2Manager.cpp | 1251 + dom/media/platforms/wmf/DXVA2Manager.h | 90 + dom/media/platforms/wmf/MFCDMExtra.h | 19 + dom/media/platforms/wmf/MFCDMProxy.cpp | 97 + dom/media/platforms/wmf/MFCDMProxy.h | 75 + dom/media/platforms/wmf/MFCDMSession.cpp | 318 + dom/media/platforms/wmf/MFCDMSession.h | 93 + .../platforms/wmf/MFContentProtectionManager.cpp | 171 + .../platforms/wmf/MFContentProtectionManager.h | 81 + .../platforms/wmf/MFMediaEngineAudioStream.cpp | 137 + dom/media/platforms/wmf/MFMediaEngineAudioStream.h | 51 + .../platforms/wmf/MFMediaEngineDecoderModule.cpp | 185 + .../platforms/wmf/MFMediaEngineDecoderModule.h | 47 + dom/media/platforms/wmf/MFMediaEngineExtension.cpp | 88 + dom/media/platforms/wmf/MFMediaEngineExtension.h | 49 + dom/media/platforms/wmf/MFMediaEngineExtra.h | 12 + dom/media/platforms/wmf/MFMediaEngineNotify.cpp | 25 + dom/media/platforms/wmf/MFMediaEngineNotify.h | 55 + dom/media/platforms/wmf/MFMediaEngineStream.cpp | 596 + dom/media/platforms/wmf/MFMediaEngineStream.h | 228 + .../platforms/wmf/MFMediaEngineVideoStream.cpp | 375 + dom/media/platforms/wmf/MFMediaEngineVideoStream.h | 107 + dom/media/platforms/wmf/MFMediaSource.cpp | 606 + dom/media/platforms/wmf/MFMediaSource.h | 188 + dom/media/platforms/wmf/MFPMPHostWrapper.cpp | 92 + dom/media/platforms/wmf/MFPMPHostWrapper.h | 44 + dom/media/platforms/wmf/MFTDecoder.cpp | 430 + dom/media/platforms/wmf/MFTDecoder.h | 132 + dom/media/platforms/wmf/MFTEncoder.cpp | 754 + dom/media/platforms/wmf/MFTEncoder.h | 144 + dom/media/platforms/wmf/WMF.h | 198 + dom/media/platforms/wmf/WMFAudioMFTManager.cpp | 315 + dom/media/platforms/wmf/WMFAudioMFTManager.h | 69 + dom/media/platforms/wmf/WMFDataEncoderUtils.h | 154 + dom/media/platforms/wmf/WMFDecoderModule.cpp | 492 + dom/media/platforms/wmf/WMFDecoderModule.h | 70 + dom/media/platforms/wmf/WMFEncoderModule.cpp | 32 + dom/media/platforms/wmf/WMFEncoderModule.h | 27 + dom/media/platforms/wmf/WMFMediaDataDecoder.cpp | 272 + dom/media/platforms/wmf/WMFMediaDataDecoder.h | 182 + dom/media/platforms/wmf/WMFMediaDataEncoder.h | 347 + dom/media/platforms/wmf/WMFUtils.cpp | 628 + dom/media/platforms/wmf/WMFUtils.h | 123 + dom/media/platforms/wmf/WMFVideoMFTManager.cpp | 1014 + dom/media/platforms/wmf/WMFVideoMFTManager.h | 133 + .../wmf/gtest/TestCanCreateMFTDecoder.cpp | 15 + dom/media/platforms/wmf/gtest/moz.build | 15 + dom/media/platforms/wmf/metrics.yaml | 88 + dom/media/platforms/wmf/moz.build | 85 + dom/media/platforms/wrappers/AudioTrimmer.cpp | 218 + dom/media/platforms/wrappers/AudioTrimmer.h | 55 + .../platforms/wrappers/MediaChangeMonitor.cpp | 1100 + dom/media/platforms/wrappers/MediaChangeMonitor.h | 147 + .../platforms/wrappers/MediaDataDecoderProxy.cpp | 149 + .../platforms/wrappers/MediaDataDecoderProxy.h | 64 + dom/media/systemservices/CamerasChild.cpp | 538 + dom/media/systemservices/CamerasChild.h | 263 + dom/media/systemservices/CamerasParent.cpp | 1335 ++ dom/media/systemservices/CamerasParent.h | 187 + dom/media/systemservices/CamerasTypes.cpp | 26 + dom/media/systemservices/CamerasTypes.h | 61 + dom/media/systemservices/MediaChild.cpp | 93 + dom/media/systemservices/MediaChild.h | 60 + dom/media/systemservices/MediaParent.cpp | 536 + dom/media/systemservices/MediaParent.h | 91 + .../systemservices/MediaSystemResourceClient.cpp | 67 + .../systemservices/MediaSystemResourceClient.h | 91 + .../systemservices/MediaSystemResourceManager.cpp | 358 + .../systemservices/MediaSystemResourceManager.h | 81 + .../MediaSystemResourceManagerChild.cpp | 42 + .../MediaSystemResourceManagerChild.h | 65 + .../MediaSystemResourceManagerParent.cpp | 75 + .../MediaSystemResourceManagerParent.h | 59 + .../MediaSystemResourceMessageUtils.h | 24 + .../systemservices/MediaSystemResourceService.cpp | 222 + .../systemservices/MediaSystemResourceService.h | 83 + .../systemservices/MediaSystemResourceTypes.h | 23 + dom/media/systemservices/MediaTaskUtils.h | 52 + dom/media/systemservices/MediaUtils.cpp | 126 + dom/media/systemservices/MediaUtils.h | 332 + dom/media/systemservices/OSXRunLoopSingleton.cpp | 41 + dom/media/systemservices/OSXRunLoopSingleton.h | 24 + dom/media/systemservices/PCameras.ipdl | 93 + dom/media/systemservices/PMedia.ipdl | 55 + .../PMediaSystemResourceManager.ipdl | 38 + dom/media/systemservices/ShmemPool.cpp | 99 + dom/media/systemservices/ShmemPool.h | 181 + dom/media/systemservices/VideoEngine.cpp | 229 + dom/media/systemservices/VideoEngine.h | 129 + dom/media/systemservices/VideoFrameUtils.cpp | 90 + dom/media/systemservices/VideoFrameUtils.h | 48 + .../android_video_capture/device_info_android.cc | 316 + .../android_video_capture/device_info_android.h | 73 + .../videoengine/CaptureCapabilityAndroid.java | 25 + .../webrtc/videoengine/VideoCaptureAndroid.java | 216 + .../videoengine/VideoCaptureDeviceInfoAndroid.java | 121 + .../android_video_capture/video_capture_android.cc | 275 + .../android_video_capture/video_capture_android.h | 47 + dom/media/systemservices/moz.build | 114 + .../objc_video_capture/device_info.h | 62 + .../objc_video_capture/device_info.mm | 179 + .../objc_video_capture/device_info_avfoundation.h | 72 + .../objc_video_capture/device_info_avfoundation.mm | 225 + .../objc_video_capture/device_info_objc.h | 38 + .../objc_video_capture/device_info_objc.mm | 172 + .../objc_video_capture/rtc_video_capture_objc.h | 41 + .../objc_video_capture/rtc_video_capture_objc.mm | 371 + .../objc_video_capture/video_capture.h | 42 + .../objc_video_capture/video_capture.mm | 108 + .../video_capture_avfoundation.h | 91 + .../video_capture_avfoundation.mm | 327 + .../video_engine/desktop_capture_impl.cc | 782 + .../video_engine/desktop_capture_impl.h | 249 + .../video_engine/desktop_device_info.cc | 488 + .../video_engine/desktop_device_info.h | 84 + .../video_engine/placeholder_device_info.cc | 60 + .../video_engine/placeholder_device_info.h | 45 + .../video_engine/platform_uithread.cc | 198 + .../video_engine/platform_uithread.h | 96 + .../systemservices/video_engine/tab_capturer.cc | 331 + .../systemservices/video_engine/tab_capturer.h | 88 + .../video_engine/video_capture_factory.cc | 230 + .../video_engine/video_capture_factory.h | 89 + dom/media/test/16bit_wave_extrametadata.wav | Bin 0 -> 97814 bytes .../test/16bit_wave_extrametadata.wav^headers^ | 1 + dom/media/test/320x240.ogv | Bin 0 -> 28942 bytes dom/media/test/320x240.ogv^headers^ | 1 + dom/media/test/448636.ogv | Bin 0 -> 7799 bytes dom/media/test/448636.ogv^headers^ | 1 + dom/media/test/A4.ogv | Bin 0 -> 94372 bytes dom/media/test/A4.ogv^headers^ | 1 + dom/media/test/TestPatternHDR.mp4 | Bin 0 -> 179294 bytes dom/media/test/VID_0001.ogg | Bin 0 -> 633435 bytes dom/media/test/VID_0001.ogg^headers^ | 1 + dom/media/test/adts.aac | Bin 0 -> 8537 bytes dom/media/test/adts.aac^headers^ | 1 + dom/media/test/allowed.sjs | 61 + dom/media/test/ambisonics.mp4 | Bin 0 -> 1053904 bytes dom/media/test/ambisonics.mp4^headers^ | 1 + dom/media/test/audio-gaps-short.ogg | Bin 0 -> 5233 bytes dom/media/test/audio-gaps-short.ogg^headers^ | 1 + dom/media/test/audio-gaps.ogg | Bin 0 -> 12306 bytes dom/media/test/audio-gaps.ogg^headers^ | 1 + dom/media/test/audio-overhang.ogg | Bin 0 -> 45463 bytes dom/media/test/audio-overhang.ogg^headers^ | 1 + dom/media/test/audio.wav | Bin 0 -> 1422 bytes dom/media/test/audio.wav^headers^ | 1 + dom/media/test/av1.mp4 | Bin 0 -> 13089 bytes dom/media/test/av1.mp4^headers^ | 1 + dom/media/test/background_video.js | 224 + dom/media/test/badtags.ogg | Bin 0 -> 5033 bytes dom/media/test/badtags.ogg^headers^ | 1 + .../test/bear-640x360-a_frag-cenc-key_rotation.mp4 | Bin 0 -> 80372 bytes .../test/bear-640x360-v_frag-cenc-key_rotation.mp4 | Bin 0 -> 280361 bytes dom/media/test/beta-phrasebook.ogg | Bin 0 -> 47411 bytes dom/media/test/beta-phrasebook.ogg^headers^ | 1 + dom/media/test/big-buck-bunny-cenc-avc3-1.m4s | Bin 0 -> 60041 bytes .../test/big-buck-bunny-cenc-avc3-1.m4s^headers^ | 1 + dom/media/test/big-buck-bunny-cenc-avc3-init.mp4 | Bin 0 -> 819 bytes .../big-buck-bunny-cenc-avc3-init.mp4^headers^ | 1 + dom/media/test/big-short.wav | Bin 0 -> 12366 bytes dom/media/test/big-short.wav^headers^ | 1 + dom/media/test/big.wav | Bin 0 -> 102444 bytes dom/media/test/big.wav^headers^ | 1 + dom/media/test/bipbop-cenc-audio-key1.xml | 28 + dom/media/test/bipbop-cenc-audio-key2.xml | 28 + dom/media/test/bipbop-cenc-audio1.m4s | Bin 0 -> 921 bytes dom/media/test/bipbop-cenc-audio1.m4s^headers^ | 1 + dom/media/test/bipbop-cenc-audio2.m4s | Bin 0 -> 565 bytes dom/media/test/bipbop-cenc-audio2.m4s^headers^ | 1 + dom/media/test/bipbop-cenc-audio3.m4s | Bin 0 -> 977 bytes dom/media/test/bipbop-cenc-audio3.m4s^headers^ | 1 + dom/media/test/bipbop-cenc-audioinit.mp4 | Bin 0 -> 1000 bytes dom/media/test/bipbop-cenc-audioinit.mp4^headers^ | 1 + dom/media/test/bipbop-cenc-video-10s.mp4 | Bin 0 -> 299914 bytes dom/media/test/bipbop-cenc-video-10s.mp4^headers^ | 1 + dom/media/test/bipbop-cenc-video-key1.xml | 28 + dom/media/test/bipbop-cenc-video-key2.xml | 28 + dom/media/test/bipbop-cenc-video1.m4s | Bin 0 -> 25211 bytes dom/media/test/bipbop-cenc-video1.m4s^headers^ | 1 + dom/media/test/bipbop-cenc-video2.m4s | Bin 0 -> 22934 bytes dom/media/test/bipbop-cenc-video2.m4s^headers^ | 1 + dom/media/test/bipbop-cenc-videoinit.mp4 | Bin 0 -> 1058 bytes dom/media/test/bipbop-cenc-videoinit.mp4^headers^ | 1 + dom/media/test/bipbop-cenc.sh | 29 + ...ipbop-clearkey-keyrotation-clear-lead-audio.mp4 | Bin 0 -> 8675 bytes ...arkey-keyrotation-clear-lead-audio.mp4^headers^ | 1 + ...ipbop-clearkey-keyrotation-clear-lead-video.mp4 | Bin 0 -> 278040 bytes ...arkey-keyrotation-clear-lead-video.mp4^headers^ | 1 + dom/media/test/bipbop-frag-cenc.xml | 57 + dom/media/test/bipbop-lateaudio.mp4 | Bin 0 -> 70404 bytes dom/media/test/bipbop-lateaudio.mp4^headers^ | 1 + dom/media/test/bipbop-no-edts.mp4 | Bin 0 -> 285681 bytes dom/media/test/bipbop.mp4 | Bin 0 -> 285765 bytes .../test/bipbop_225w_175kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...bop_225w_175kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...bop_225w_175kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...bop_225w_175kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...bop_225w_175kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_225w_175kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 1020 bytes ..._225w_175kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...bop_225w_175kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...bop_225w_175kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...bop_225w_175kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...bop_225w_175kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_225w_175kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 1020 bytes ..._225w_175kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-video-key1-1.m4s | Bin 0 -> 37646 bytes ...bop_225w_175kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../bipbop_225w_175kbps-cenc-video-key1-init.mp4 | Bin 0 -> 1086 bytes ..._225w_175kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../test/bipbop_225w_175kbps-cenc-video-key2-1.m4s | Bin 0 -> 37646 bytes ...bop_225w_175kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../bipbop_225w_175kbps-cenc-video-key2-init.mp4 | Bin 0 -> 1086 bytes ..._225w_175kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_225w_175kbps.mp4 | Bin 0 -> 38713 bytes dom/media/test/bipbop_225w_175kbps.mp4^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...pbop_300_215kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...pbop_300_215kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...pbop_300_215kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...pbop_300_215kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_300_215kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 874 bytes ...p_300_215kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...pbop_300_215kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...pbop_300_215kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...pbop_300_215kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...pbop_300_215kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_300_215kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 874 bytes ...p_300_215kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-video-key1-1.m4s | Bin 0 -> 25211 bytes ...pbop_300_215kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-video-key1-2.m4s | Bin 0 -> 22938 bytes ...pbop_300_215kbps-cenc-video-key1-2.m4s^headers^ | 1 + .../bipbop_300_215kbps-cenc-video-key1-init.mp4 | Bin 0 -> 932 bytes ...p_300_215kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-video-key2-1.m4s | Bin 0 -> 25211 bytes ...pbop_300_215kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../test/bipbop_300_215kbps-cenc-video-key2-2.m4s | Bin 0 -> 22938 bytes ...pbop_300_215kbps-cenc-video-key2-2.m4s^headers^ | 1 + .../bipbop_300_215kbps-cenc-video-key2-init.mp4 | Bin 0 -> 932 bytes ...p_300_215kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_300_215kbps.mp4 | Bin 0 -> 48393 bytes .../bipbop_300wp_227kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...op_300wp_227kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...op_300wp_227kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...op_300wp_227kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...op_300wp_227kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 1020 bytes ...300wp_227kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...op_300wp_227kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...op_300wp_227kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...op_300wp_227kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...op_300wp_227kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 1020 bytes ...300wp_227kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-video-key1-1.m4s | Bin 0 -> 25211 bytes ...op_300wp_227kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-video-key1-2.m4s | Bin 0 -> 22938 bytes ...op_300wp_227kbps-cenc-video-key1-2.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-video-key1-init.mp4 | Bin 0 -> 1094 bytes ...300wp_227kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-video-key2-1.m4s | Bin 0 -> 25211 bytes ...op_300wp_227kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-video-key2-2.m4s | Bin 0 -> 22938 bytes ...op_300wp_227kbps-cenc-video-key2-2.m4s^headers^ | 1 + .../bipbop_300wp_227kbps-cenc-video-key2-init.mp4 | Bin 0 -> 1094 bytes ...300wp_227kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_300wp_227kbps.mp4 | Bin 0 -> 48355 bytes .../test/bipbop_360w_253kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...bop_360w_253kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...bop_360w_253kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...bop_360w_253kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...bop_360w_253kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_360w_253kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 1020 bytes ..._360w_253kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...bop_360w_253kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...bop_360w_253kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...bop_360w_253kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...bop_360w_253kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_360w_253kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 1020 bytes ..._360w_253kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-video-key1-1.m4s | Bin 0 -> 53149 bytes ...bop_360w_253kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../bipbop_360w_253kbps-cenc-video-key1-init.mp4 | Bin 0 -> 1088 bytes ..._360w_253kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../test/bipbop_360w_253kbps-cenc-video-key2-1.m4s | Bin 0 -> 53149 bytes ...bop_360w_253kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../bipbop_360w_253kbps-cenc-video-key2-init.mp4 | Bin 0 -> 1088 bytes ..._360w_253kbps-cenc-video-key2-init.mp4^headers^ | 1 + .../test/bipbop_360w_253kbps-clearkey-audio.webm | Bin 0 -> 7553 bytes ...ipbop_360w_253kbps-clearkey-audio.webm^headers^ | 1 + .../bipbop_360w_253kbps-clearkey-video-vp8.webm | Bin 0 -> 44671 bytes ...p_360w_253kbps-clearkey-video-vp8.webm^headers^ | 1 + .../bipbop_360w_253kbps-clearkey-video-vp9.webm | Bin 0 -> 46030 bytes ...p_360w_253kbps-clearkey-video-vp9.webm^headers^ | 1 + dom/media/test/bipbop_360w_253kbps.mp4 | Bin 0 -> 54218 bytes .../test/bipbop_480_624kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...pbop_480_624kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...pbop_480_624kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...pbop_480_624kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...pbop_480_624kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_480_624kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 874 bytes ...p_480_624kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...pbop_480_624kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...pbop_480_624kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...pbop_480_624kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...pbop_480_624kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_480_624kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 874 bytes ...p_480_624kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-video-key1-1.m4s | Bin 0 -> 68025 bytes ...pbop_480_624kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-video-key1-2.m4s | Bin 0 -> 66457 bytes ...pbop_480_624kbps-cenc-video-key1-2.m4s^headers^ | 1 + .../bipbop_480_624kbps-cenc-video-key1-init.mp4 | Bin 0 -> 932 bytes ...p_480_624kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-video-key2-1.m4s | Bin 0 -> 68025 bytes ...pbop_480_624kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../test/bipbop_480_624kbps-cenc-video-key2-2.m4s | Bin 0 -> 66457 bytes ...pbop_480_624kbps-cenc-video-key2-2.m4s^headers^ | 1 + .../bipbop_480_624kbps-cenc-video-key2-init.mp4 | Bin 0 -> 932 bytes ...p_480_624kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_480_624kbps.mp4 | Bin 0 -> 133264 bytes .../test/bipbop_480_959kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...pbop_480_959kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...pbop_480_959kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...pbop_480_959kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...pbop_480_959kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_480_959kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 874 bytes ...p_480_959kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...pbop_480_959kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...pbop_480_959kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...pbop_480_959kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...pbop_480_959kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_480_959kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 874 bytes ...p_480_959kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-video-key1-1.m4s | Bin 0 -> 101203 bytes ...pbop_480_959kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-video-key1-2.m4s | Bin 0 -> 99366 bytes ...pbop_480_959kbps-cenc-video-key1-2.m4s^headers^ | 1 + .../bipbop_480_959kbps-cenc-video-key1-init.mp4 | Bin 0 -> 932 bytes ...p_480_959kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-video-key2-1.m4s | Bin 0 -> 101203 bytes ...pbop_480_959kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../test/bipbop_480_959kbps-cenc-video-key2-2.m4s | Bin 0 -> 99366 bytes ...pbop_480_959kbps-cenc-video-key2-2.m4s^headers^ | 1 + .../bipbop_480_959kbps-cenc-video-key2-init.mp4 | Bin 0 -> 932 bytes ...p_480_959kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_480_959kbps.mp4 | Bin 0 -> 199351 bytes .../bipbop_480wp_1001kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...p_480wp_1001kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...p_480wp_1001kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...p_480wp_1001kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...p_480wp_1001kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 1020 bytes ...80wp_1001kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...p_480wp_1001kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...p_480wp_1001kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...p_480wp_1001kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...p_480wp_1001kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 1020 bytes ...80wp_1001kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-video-key1-1.m4s | Bin 0 -> 101203 bytes ...p_480wp_1001kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-video-key1-2.m4s | Bin 0 -> 99366 bytes ...p_480wp_1001kbps-cenc-video-key1-2.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-video-key1-init.mp4 | Bin 0 -> 1094 bytes ...80wp_1001kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-video-key2-1.m4s | Bin 0 -> 101203 bytes ...p_480wp_1001kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-video-key2-2.m4s | Bin 0 -> 99366 bytes ...p_480wp_1001kbps-cenc-video-key2-2.m4s^headers^ | 1 + .../bipbop_480wp_1001kbps-cenc-video-key2-init.mp4 | Bin 0 -> 1094 bytes ...80wp_1001kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_480wp_1001kbps.mp4 | Bin 0 -> 199911 bytes .../bipbop_480wp_663kbps-cenc-audio-key1-1.m4s | Bin 0 -> 921 bytes ...op_480wp_663kbps-cenc-audio-key1-1.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key1-2.m4s | Bin 0 -> 565 bytes ...op_480wp_663kbps-cenc-audio-key1-2.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key1-3.m4s | Bin 0 -> 977 bytes ...op_480wp_663kbps-cenc-audio-key1-3.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key1-4.m4s | Bin 0 -> 389 bytes ...op_480wp_663kbps-cenc-audio-key1-4.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key1-init.mp4 | Bin 0 -> 1020 bytes ...480wp_663kbps-cenc-audio-key1-init.mp4^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key2-1.m4s | Bin 0 -> 921 bytes ...op_480wp_663kbps-cenc-audio-key2-1.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key2-2.m4s | Bin 0 -> 565 bytes ...op_480wp_663kbps-cenc-audio-key2-2.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key2-3.m4s | Bin 0 -> 977 bytes ...op_480wp_663kbps-cenc-audio-key2-3.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key2-4.m4s | Bin 0 -> 389 bytes ...op_480wp_663kbps-cenc-audio-key2-4.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-audio-key2-init.mp4 | Bin 0 -> 1020 bytes ...480wp_663kbps-cenc-audio-key2-init.mp4^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-video-key1-1.m4s | Bin 0 -> 68025 bytes ...op_480wp_663kbps-cenc-video-key1-1.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-video-key1-2.m4s | Bin 0 -> 66457 bytes ...op_480wp_663kbps-cenc-video-key1-2.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-video-key1-init.mp4 | Bin 0 -> 1094 bytes ...480wp_663kbps-cenc-video-key1-init.mp4^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-video-key2-1.m4s | Bin 0 -> 68025 bytes ...op_480wp_663kbps-cenc-video-key2-1.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-video-key2-2.m4s | Bin 0 -> 66457 bytes ...op_480wp_663kbps-cenc-video-key2-2.m4s^headers^ | 1 + .../bipbop_480wp_663kbps-cenc-video-key2-init.mp4 | Bin 0 -> 1094 bytes ...480wp_663kbps-cenc-video-key2-init.mp4^headers^ | 1 + dom/media/test/bipbop_480wp_663kbps.mp4 | Bin 0 -> 133824 bytes dom/media/test/bipbop_audio_aac_22.05k.mp4 | Bin 0 -> 2424 bytes .../test/bipbop_audio_aac_22.05k.mp4^headers^ | 1 + dom/media/test/bipbop_audio_aac_44.1k.mp4 | Bin 0 -> 3239 bytes dom/media/test/bipbop_audio_aac_44.1k.mp4^headers^ | 1 + dom/media/test/bipbop_audio_aac_48k.mp4 | Bin 0 -> 3286 bytes dom/media/test/bipbop_audio_aac_48k.mp4^headers^ | 1 + dom/media/test/bipbop_audio_aac_88.2k.mp4 | Bin 0 -> 3769 bytes dom/media/test/bipbop_audio_aac_88.2k.mp4^headers^ | 1 + dom/media/test/bipbop_audio_aac_8k.mp4 | Bin 0 -> 1707 bytes dom/media/test/bipbop_audio_aac_8k.mp4^headers^ | 1 + dom/media/test/bipbop_audio_aac_96k.mp4 | Bin 0 -> 4010 bytes dom/media/test/bipbop_audio_aac_96k.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_10_0_audio_1.m4s | Bin 0 -> 1364 bytes .../test/bipbop_cbcs_10_0_audio_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_10_0_audio_init.mp4 | Bin 0 -> 936 bytes .../test/bipbop_cbcs_10_0_audio_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_10_0_video_1.m4s | Bin 0 -> 57044 bytes .../test/bipbop_cbcs_10_0_video_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_10_0_video_init.mp4 | Bin 0 -> 972 bytes .../test/bipbop_cbcs_10_0_video_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_1_9_audio_1.m4s | Bin 0 -> 1364 bytes .../test/bipbop_cbcs_1_9_audio_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_1_9_audio_init.mp4 | Bin 0 -> 936 bytes .../test/bipbop_cbcs_1_9_audio_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_1_9_video_1.m4s | Bin 0 -> 57044 bytes .../test/bipbop_cbcs_1_9_video_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_1_9_video_init.mp4 | Bin 0 -> 972 bytes .../test/bipbop_cbcs_1_9_video_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_5_5_audio_1.m4s | Bin 0 -> 1364 bytes .../test/bipbop_cbcs_5_5_audio_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_5_5_audio_init.mp4 | Bin 0 -> 936 bytes .../test/bipbop_cbcs_5_5_audio_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_5_5_video_1.m4s | Bin 0 -> 57044 bytes .../test/bipbop_cbcs_5_5_video_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_5_5_video_init.mp4 | Bin 0 -> 972 bytes .../test/bipbop_cbcs_5_5_video_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_7_7_audio_1.m4s | Bin 0 -> 1364 bytes .../test/bipbop_cbcs_7_7_audio_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_7_7_audio_init.mp4 | Bin 0 -> 936 bytes .../test/bipbop_cbcs_7_7_audio_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_7_7_video_1.m4s | Bin 0 -> 57044 bytes .../test/bipbop_cbcs_7_7_video_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_7_7_video_init.mp4 | Bin 0 -> 972 bytes .../test/bipbop_cbcs_7_7_video_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_9_8_audio_1.m4s | Bin 0 -> 1364 bytes .../test/bipbop_cbcs_9_8_audio_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_9_8_audio_init.mp4 | Bin 0 -> 936 bytes .../test/bipbop_cbcs_9_8_audio_init.mp4^headers^ | 1 + dom/media/test/bipbop_cbcs_9_8_video_1.m4s | Bin 0 -> 57044 bytes .../test/bipbop_cbcs_9_8_video_1.m4s^headers^ | 1 + dom/media/test/bipbop_cbcs_9_8_video_init.mp4 | Bin 0 -> 972 bytes .../test/bipbop_cbcs_9_8_video_init.mp4^headers^ | 1 + ...t_pixel_metadata_bigger_than_in_stream_vp8.webm | Bin 0 -> 48942 bytes ...etadata_bigger_than_in_stream_vp8.webm^headers^ | 1 + ...pixel_metadata_narrower_than_in_stream_vp8.webm | Bin 0 -> 48942 bytes ...adata_narrower_than_in_stream_vp8.webm^headers^ | 1 + ..._pixel_metadata_smaller_than_in_stream_vp8.webm | Bin 0 -> 48942 bytes ...tadata_smaller_than_in_stream_vp8.webm^headers^ | 1 + dom/media/test/bipbop_short_vp8.webm | Bin 0 -> 48942 bytes dom/media/test/bipbop_short_vp8.webm^headers^ | 1 + dom/media/test/black100x100-aspect3to2.ogv | Bin 0 -> 3428 bytes .../test/black100x100-aspect3to2.ogv^headers^ | 1 + dom/media/test/bogus.duh | 45 + dom/media/test/bogus.ogv | 45 + dom/media/test/bogus.ogv^headers^ | 1 + dom/media/test/bogus.wav | 45 + dom/media/test/bogus.wav^headers^ | 1 + dom/media/test/browser/browser.toml | 26 + .../browser_encrypted_play_time_telemetry.js | 269 + dom/media/test/browser/browser_partial.js | 56 + .../browser_tab_visibility_and_play_time.js | 218 + ...er_telemetry_video_hardware_decoding_support.js | 106 + dom/media/test/browser/file_empty_page.html | 8 + dom/media/test/browser/file_media.html | 10 + dom/media/test/browser/wmfme/browser.toml | 13 + .../test/browser/wmfme/browser_wmfme_crash.js | 52 + .../browser/wmfme/browser_wmfme_max_crashes.js | 69 + dom/media/test/browser/wmfme/file_video.html | 9 + dom/media/test/browser/wmfme/head.js | 201 + dom/media/test/bug1066943.webm | Bin 0 -> 18442 bytes dom/media/test/bug1066943.webm^headers^ | 1 + dom/media/test/bug1301226-odd.wav | Bin 0 -> 244 bytes dom/media/test/bug1301226-odd.wav^headers^ | 1 + dom/media/test/bug1301226.wav | Bin 0 -> 240 bytes dom/media/test/bug1301226.wav^headers^ | 1 + dom/media/test/bug1377278.webm | Bin 0 -> 215594 bytes dom/media/test/bug1377278.webm^headers^ | 1 + dom/media/test/bug1535980.webm | Bin 0 -> 81467 bytes dom/media/test/bug1535980.webm^headers^ | 1 + dom/media/test/bug1799787.webm | Bin 0 -> 1053 bytes dom/media/test/bug1799787.webm^headers^ | 1 + dom/media/test/bug461281.ogg | Bin 0 -> 16521 bytes dom/media/test/bug461281.ogg^headers^ | 1 + dom/media/test/bug482461-theora.ogv | Bin 0 -> 280904 bytes dom/media/test/bug482461-theora.ogv^headers^ | 1 + dom/media/test/bug482461.ogv | Bin 0 -> 305785 bytes dom/media/test/bug482461.ogv^headers^ | 1 + dom/media/test/bug495129.ogv | Bin 0 -> 122207 bytes dom/media/test/bug495129.ogv^headers^ | 1 + dom/media/test/bug495794.ogg | Bin 0 -> 4837 bytes dom/media/test/bug495794.ogg^headers^ | 1 + dom/media/test/bug498380.ogv | Bin 0 -> 65535 bytes dom/media/test/bug498380.ogv^headers^ | 1 + dom/media/test/bug498855-1.ogv | Bin 0 -> 20480 bytes dom/media/test/bug498855-1.ogv^headers^ | 1 + dom/media/test/bug498855-2.ogv | Bin 0 -> 20480 bytes dom/media/test/bug498855-2.ogv^headers^ | 1 + dom/media/test/bug498855-3.ogv | Bin 0 -> 20480 bytes dom/media/test/bug498855-3.ogv^headers^ | 1 + dom/media/test/bug499519.ogv | Bin 0 -> 20480 bytes dom/media/test/bug499519.ogv^headers^ | 1 + dom/media/test/bug500311.ogv | Bin 0 -> 55834 bytes dom/media/test/bug500311.ogv^headers^ | 1 + dom/media/test/bug501279.ogg | Bin 0 -> 2361 bytes dom/media/test/bug501279.ogg^headers^ | 1 + dom/media/test/bug504613.ogv | Bin 0 -> 35000 bytes dom/media/test/bug504613.ogv^headers^ | 1 + dom/media/test/bug504644.ogv | Bin 0 -> 131114 bytes dom/media/test/bug504644.ogv^headers^ | 1 + dom/media/test/bug504843.ogv | Bin 0 -> 65536 bytes dom/media/test/bug504843.ogv^headers^ | 1 + dom/media/test/bug506094.ogv | Bin 0 -> 8195 bytes dom/media/test/bug506094.ogv^headers^ | 1 + dom/media/test/bug516323.indexed.ogv | Bin 0 -> 162193 bytes dom/media/test/bug516323.indexed.ogv^headers^ | 1 + dom/media/test/bug516323.ogv | Bin 0 -> 161789 bytes dom/media/test/bug516323.ogv^headers^ | 1 + dom/media/test/bug520493.ogg | Bin 0 -> 3901 bytes dom/media/test/bug520493.ogg^headers^ | 1 + dom/media/test/bug520500.ogg | Bin 0 -> 21978 bytes dom/media/test/bug520500.ogg^headers^ | 1 + dom/media/test/bug520908.ogv | Bin 0 -> 28942 bytes dom/media/test/bug520908.ogv^headers^ | 1 + dom/media/test/bug523816.ogv | Bin 0 -> 40585 bytes dom/media/test/bug523816.ogv^headers^ | 1 + dom/media/test/bug533822.ogg | Bin 0 -> 35010 bytes dom/media/test/bug533822.ogg^headers^ | 1 + dom/media/test/bug556821.ogv | Bin 0 -> 196608 bytes dom/media/test/bug556821.ogv^headers^ | 1 + dom/media/test/bug557094.ogv | Bin 0 -> 76966 bytes dom/media/test/bug557094.ogv^headers^ | 1 + dom/media/test/bug604067.webm | Bin 0 -> 103227 bytes dom/media/test/bug604067.webm^headers^ | 1 + dom/media/test/bunny.webm | Bin 0 -> 195455 bytes dom/media/test/bunny_hd_5s.mp4 | Bin 0 -> 845651 bytes dom/media/test/can_play_type_dash.js | 27 + dom/media/test/can_play_type_ogg.js | 72 + dom/media/test/can_play_type_wave.js | 30 + dom/media/test/can_play_type_webm.js | 39 + dom/media/test/cancellable_request.sjs | 162 + dom/media/test/chain.ogg | Bin 0 -> 63610 bytes dom/media/test/chain.ogg^headers^ | 1 + dom/media/test/chain.ogv | Bin 0 -> 45463 bytes dom/media/test/chain.ogv^headers^ | 1 + dom/media/test/chain.opus | Bin 0 -> 50101 bytes dom/media/test/chain.opus^headers^ | 1 + dom/media/test/chained-audio-video.ogg | Bin 0 -> 92552 bytes dom/media/test/chained-audio-video.ogg^headers^ | 1 + dom/media/test/chained-video.ogv | Bin 0 -> 57906 bytes dom/media/test/chained-video.ogv^headers^ | 1 + dom/media/test/chrome/chrome.toml | 12 + .../test/chrome/test_accumulated_play_time.html | 694 + .../chrome/test_telemetry_source_buffer_type.html | 105 + dom/media/test/chromeHelper.js | 23 + dom/media/test/cloneElementVisually_helpers.js | 232 + dom/media/test/contentType.sjs | 77 + dom/media/test/crashtests/0-timescale.html | 14 + dom/media/test/crashtests/0-timescale.mp4 | Bin 0 -> 14718 bytes dom/media/test/crashtests/1012609.html | 9 + dom/media/test/crashtests/1015662.html | 4 + dom/media/test/crashtests/1028458.html | 23 + dom/media/test/crashtests/1041466.html | 21 + dom/media/test/crashtests/1045650.html | 18 + dom/media/test/crashtests/1080986.html | 3 + dom/media/test/crashtests/1080986.wav | Bin 0 -> 592 bytes dom/media/test/crashtests/1122218.html | 24 + dom/media/test/crashtests/1127188.html | 3 + dom/media/test/crashtests/1157994.html | 21 + dom/media/test/crashtests/1158427.html | 21 + dom/media/test/crashtests/1180881.html | 8 + dom/media/test/crashtests/1180881.webm | Bin 0 -> 524 bytes dom/media/test/crashtests/1185176.html | 24 + dom/media/test/crashtests/1185191.html | 21 + dom/media/test/crashtests/1185192.html | 18 + dom/media/test/crashtests/1197935.html | 8 + dom/media/test/crashtests/1197935.mp4 | Bin 0 -> 1806042 bytes dom/media/test/crashtests/1223670.html | 23 + dom/media/test/crashtests/1236639.html | 9 + dom/media/test/crashtests/1236639.mp3 | Bin 0 -> 1080 bytes dom/media/test/crashtests/1257700.html | 8 + dom/media/test/crashtests/1257700.webm | Bin 0 -> 59264 bytes dom/media/test/crashtests/1267263.html | 19 + dom/media/test/crashtests/1270303.html | 8 + dom/media/test/crashtests/1270303.webm | Bin 0 -> 5822 bytes dom/media/test/crashtests/1291702.html | 72 + dom/media/test/crashtests/1368490.html | 30 + dom/media/test/crashtests/1378826.html | 46 + dom/media/test/crashtests/1384248.html | 10 + dom/media/test/crashtests/1388372.html | 13 + dom/media/test/crashtests/1389304.html | 32 + dom/media/test/crashtests/1389304.mp4 | Bin 0 -> 198320 bytes dom/media/test/crashtests/1393272.webm | Bin 0 -> 6781 bytes dom/media/test/crashtests/1411322.html | 18 + dom/media/test/crashtests/1414444.mp4 | Bin 0 -> 34009 bytes dom/media/test/crashtests/1450845.html | 34 + dom/media/test/crashtests/1489160.html | 10 + dom/media/test/crashtests/1494073.html | 19 + dom/media/test/crashtests/1517199.html | 17 + dom/media/test/crashtests/1526044.html | 19 + dom/media/test/crashtests/1530897.webm | Bin 0 -> 509 bytes dom/media/test/crashtests/1538727.html | 14 + dom/media/test/crashtests/1545133.html | 34 + dom/media/test/crashtests/1547784.html | 33 + dom/media/test/crashtests/1547899.html | 20 + dom/media/test/crashtests/1560215.html | 20 + dom/media/test/crashtests/1569645.html | 23 + dom/media/test/crashtests/1575271.html | 25 + dom/media/test/crashtests/1577184.html | 15 + dom/media/test/crashtests/1587248.html | 23 + dom/media/test/crashtests/1594466.html | 22 + dom/media/test/crashtests/1601385.html | 12 + dom/media/test/crashtests/1601422.html | 20 + dom/media/test/crashtests/1604941.html | 22 + dom/media/test/crashtests/1608286.html | 50 + dom/media/test/crashtests/1673525.html | 15 + dom/media/test/crashtests/1673526-1.html | 20 + dom/media/test/crashtests/1673526-2.html | 20 + dom/media/test/crashtests/1693043.html | 21 + dom/media/test/crashtests/1696511.html | 22 + dom/media/test/crashtests/1697521.html | 19 + dom/media/test/crashtests/1708790.html | 22 + dom/media/test/crashtests/1709130.html | 19 + dom/media/test/crashtests/1734008.html | 22 + dom/media/test/crashtests/1734008.webm | Bin 0 -> 8253 bytes dom/media/test/crashtests/1741677.html | 15 + dom/media/test/crashtests/1748272.html | 12 + dom/media/test/crashtests/1752917.html | 18 + dom/media/test/crashtests/1762620.html | 8 + dom/media/test/crashtests/1765842.html | 8 + dom/media/test/crashtests/1765842.webm | Bin 0 -> 17210 bytes dom/media/test/crashtests/1787281.html | 13 + dom/media/test/crashtests/1787281.mp4 | Bin 0 -> 2736 bytes dom/media/test/crashtests/1798778.html | 11 + dom/media/test/crashtests/1830206.html | 12 + dom/media/test/crashtests/1830206.mp4 | Bin 0 -> 41224 bytes dom/media/test/crashtests/1833894.mp4 | Bin 0 -> 1000465 bytes dom/media/test/crashtests/1833896.mp4 | Bin 0 -> 38215 bytes dom/media/test/crashtests/1835118.adts | Bin 0 -> 132 bytes dom/media/test/crashtests/1835164.html | 13 + dom/media/test/crashtests/1835164.opus | Bin 0 -> 2250 bytes dom/media/test/crashtests/1839193.adts | Bin 0 -> 484 bytes dom/media/test/crashtests/1839193.html | 9 + dom/media/test/crashtests/1840002.webm | Bin 0 -> 512 bytes dom/media/test/crashtests/1845350.mp4 | Bin 0 -> 1045 bytes dom/media/test/crashtests/1848660.html | 10 + dom/media/test/crashtests/1848660.wav | 0 dom/media/test/crashtests/1850453.flac | Bin 0 -> 104472 bytes dom/media/test/crashtests/1850453.html | 12 + dom/media/test/crashtests/1859384.mp4 | Bin 0 -> 4128 bytes dom/media/test/crashtests/1859600.mp4 | Bin 0 -> 5172 bytes dom/media/test/crashtests/1860840.mp4 | Bin 0 -> 5172 bytes dom/media/test/crashtests/1864450.html | 12 + dom/media/test/crashtests/1872787.html | 14 + dom/media/test/crashtests/255ch.wav | Bin 0 -> 68318 bytes dom/media/test/crashtests/459439-1.html | 36 + dom/media/test/crashtests/466607-1.html | 14 + dom/media/test/crashtests/466945-1.html | 25 + dom/media/test/crashtests/468763-1.html | 1 + dom/media/test/crashtests/474744-1.html | 15 + dom/media/test/crashtests/481136-1.html | 3 + dom/media/test/crashtests/492286-1.xhtml | 1 + dom/media/test/crashtests/493915-1.html | 18 + dom/media/test/crashtests/495794-1.html | 8 + dom/media/test/crashtests/495794-1.ogg | Bin 0 -> 4837 bytes dom/media/test/crashtests/497734-1.xhtml | 21 + dom/media/test/crashtests/497734-2.html | 17 + dom/media/test/crashtests/576612-1.html | 15 + dom/media/test/crashtests/691096-1.html | 31 + dom/media/test/crashtests/752784-1.html | 15 + dom/media/test/crashtests/789075-1.html | 20 + dom/media/test/crashtests/789075.webm | Bin 0 -> 12294 bytes dom/media/test/crashtests/795892-1.html | 23 + dom/media/test/crashtests/844563.html | 5 + dom/media/test/crashtests/846612.html | 8 + dom/media/test/crashtests/852838.html | 11 + dom/media/test/crashtests/865004.html | 19 + dom/media/test/crashtests/865537-1.html | 13 + dom/media/test/crashtests/865550.html | 22 + dom/media/test/crashtests/868504.html | 14 + dom/media/test/crashtests/874869.html | 15 + dom/media/test/crashtests/874915.html | 24 + dom/media/test/crashtests/874934.html | 23 + dom/media/test/crashtests/874952.html | 11 + dom/media/test/crashtests/875144.html | 81 + dom/media/test/crashtests/875596.html | 12 + dom/media/test/crashtests/875911.html | 3 + dom/media/test/crashtests/876024-1.html | 5 + dom/media/test/crashtests/876024-2.html | 17 + dom/media/test/crashtests/876118.html | 16 + dom/media/test/crashtests/876207.html | 30 + dom/media/test/crashtests/876215.html | 14 + dom/media/test/crashtests/876249.html | 27 + dom/media/test/crashtests/876252.html | 23 + dom/media/test/crashtests/876834.html | 4 + dom/media/test/crashtests/877527.html | 37 + dom/media/test/crashtests/877820.html | 4 + dom/media/test/crashtests/878014.html | 31 + dom/media/test/crashtests/878328.html | 5 + dom/media/test/crashtests/878407.html | 11 + dom/media/test/crashtests/878478.html | 30 + dom/media/test/crashtests/880129.html | 9 + dom/media/test/crashtests/880202.html | 33 + dom/media/test/crashtests/880342-1.html | 208 + dom/media/test/crashtests/880342-2.html | 8 + dom/media/test/crashtests/880384.html | 8 + dom/media/test/crashtests/880404.html | 6 + dom/media/test/crashtests/880724.html | 13 + dom/media/test/crashtests/881775.html | 25 + dom/media/test/crashtests/882956.html | 15 + dom/media/test/crashtests/884459.html | 12 + dom/media/test/crashtests/889042.html | 4 + dom/media/test/crashtests/907986-1.html | 17 + dom/media/test/crashtests/907986-2.html | 17 + dom/media/test/crashtests/907986-3.html | 17 + dom/media/test/crashtests/907986-4.html | 15 + dom/media/test/crashtests/910171-1.html | 17 + dom/media/test/crashtests/920987.html | 6 + dom/media/test/crashtests/925619-1.html | 14 + dom/media/test/crashtests/925619-2.html | 15 + dom/media/test/crashtests/926619.html | 24 + dom/media/test/crashtests/933151.html | 16 + dom/media/test/crashtests/933156.html | 23 + dom/media/test/crashtests/944851.html | 17 + dom/media/test/crashtests/952756.html | 19 + dom/media/test/crashtests/986901.html | 16 + dom/media/test/crashtests/990794.html | 22 + dom/media/test/crashtests/995289.html | 9 + dom/media/test/crashtests/adts-truncated.aac | Bin 0 -> 512 bytes dom/media/test/crashtests/adts.aac | Bin 0 -> 8537 bytes dom/media/test/crashtests/analyser-channels-1.html | 16 + .../crashtests/audiocontext-after-unload-1.html | 27 + .../test/crashtests/audiocontext-after-xhr.html | 13 + .../crashtests/audiocontext-double-suspend.html | 5 + .../audioworkletnode-after-unload-1.html | 27 + .../test/crashtests/buffer-source-duration-1.html | 14 + .../test/crashtests/buffer-source-ended-1.html | 16 + .../buffer-source-resampling-start-1.html | 16 + .../buffer-source-slow-resampling-1.html | 34 + ...count-in-metadata-different-than-in-content.mp4 | Bin 0 -> 13651 bytes .../test/crashtests/convolver-memory-report-1.html | 25 + dom/media/test/crashtests/copyFromChannel-2.html | 16 + dom/media/test/crashtests/cors.webm | Bin 0 -> 215529 bytes dom/media/test/crashtests/cors.webm^headers^ | 1 + dom/media/test/crashtests/crashtests.list | 182 + .../crashtests/disconnect-wrong-destination.html | 13 + dom/media/test/crashtests/doppler-1.html | 23 + dom/media/test/crashtests/empty-buffer-source.html | 14 + dom/media/test/crashtests/empty-samples.webm | 0 ...ted-track-with-bad-sample-description-index.mp4 | Bin 0 -> 198320 bytes ...ncrypted-track-with-sample-missing-cenc-aux.mp4 | Bin 0 -> 152132 bytes .../crashtests/encrypted-track-without-tenc.mp4 | Bin 0 -> 152132 bytes dom/media/test/crashtests/invalidfmt.html | 8 + dom/media/test/crashtests/invalidfmt.wav | Bin 0 -> 115 bytes .../crashtests/media-element-source-seek-1.html | 27 + dom/media/test/crashtests/mp4_box_emptyrange.mp4 | Bin 0 -> 918 bytes dom/media/test/crashtests/noextradata-8ch.wav | Bin 0 -> 684 bytes .../crashtests/offline-buffer-source-ended-1.html | 15 + dom/media/test/crashtests/oscillator-ended-1.html | 15 + dom/media/test/crashtests/oscillator-ended-2.html | 15 + dom/media/test/crashtests/small-timebase.html | 10 + dom/media/test/crashtests/small-timebase.mp4 | Bin 0 -> 1355 bytes dom/media/test/crashtests/sound.ogg | Bin 0 -> 2603 bytes dom/media/test/crashtests/test.mp4 | Bin 0 -> 11817 bytes .../test/crashtests/track-with-zero-dimensions.mp4 | Bin 0 -> 11817 bytes ...ing_needed_and_last_sample_invalid_duration.ogg | Bin 0 -> 18307 bytes dom/media/test/crashtests/video-crash.webm | Bin 0 -> 58482 bytes .../crashtests/video-replay-after-audio-end.html | 43 + dom/media/test/dash/dash-manifest-garbled-webm.mpd | 35 + dom/media/test/dash/dash-manifest-garbled.mpd | 1 + dom/media/test/dash/dash-manifest-sjs.mpd | 35 + dom/media/test/dash/dash-manifest.mpd | 35 + dom/media/test/dash/dash-webm-audio-128k.webm | Bin 0 -> 41946 bytes dom/media/test/dash/dash-webm-video-320x180.webm | Bin 0 -> 35123 bytes dom/media/test/dash/dash-webm-video-428x240.webm | Bin 0 -> 50206 bytes dom/media/test/dash/garbled.webm | 1 + dom/media/test/dash_detect_stream_switch.sjs | 143 + dom/media/test/detodos-recorder-test.opus | Bin 0 -> 1507 bytes dom/media/test/detodos-recorder-test.opus^headers^ | 1 + dom/media/test/detodos-short.opus | Bin 0 -> 648 bytes dom/media/test/detodos-short.opus^headers^ | 1 + dom/media/test/detodos-short.webm | Bin 0 -> 1085 bytes dom/media/test/detodos-short.webm^headers^ | 1 + dom/media/test/detodos.opus | Bin 0 -> 6270 bytes dom/media/test/detodos.opus^headers^ | 1 + dom/media/test/detodos.webm | Bin 0 -> 11701 bytes dom/media/test/detodos.webm^headers^ | 1 + dom/media/test/dirac.ogg | Bin 0 -> 106338 bytes dom/media/test/dirac.ogg^headers^ | 1 + dom/media/test/dynamic_resource.sjs | 53 + dom/media/test/eme.js | 479 + dom/media/test/eme_standalone.js | 286 + dom/media/test/empty_size.mp3 | Bin 0 -> 90368 bytes dom/media/test/file_access_controls.html | 160 + dom/media/test/file_eme_createMediaKeys.html | 47 + dom/media/test/file_playback_and_bfcache.html | 57 + dom/media/test/flac-noheader-s16.flac | Bin 0 -> 242826 bytes dom/media/test/flac-noheader-s16.flac^headers^ | 1 + dom/media/test/flac-s24.flac | Bin 0 -> 980951 bytes dom/media/test/flac-s24.flac^headers^ | 1 + dom/media/test/flac-sample-cenc.mp4 | Bin 0 -> 336823 bytes dom/media/test/flac-sample-cenc.mp4^headers^ | 1 + dom/media/test/flac-sample.mp4 | Bin 0 -> 876556 bytes dom/media/test/flac-sample.mp4^headers^ | 1 + dom/media/test/force_octet_stream.mp4 | Bin 0 -> 13708 bytes dom/media/test/force_octet_stream.mp4^headers^ | 2 + dom/media/test/fragment_noplay.js | 19 + dom/media/test/fragment_play.js | 92 + dom/media/test/gUM_support.js | 103 + dom/media/test/gizmo-frag.mp4 | Bin 0 -> 152132 bytes dom/media/test/gizmo-noaudio.mp4 | Bin 0 -> 342980 bytes dom/media/test/gizmo-noaudio.mp4^headers^ | 1 + dom/media/test/gizmo-noaudio.webm | Bin 0 -> 112663 bytes dom/media/test/gizmo-noaudio.webm^headers^ | 1 + dom/media/test/gizmo-short.mp4 | Bin 0 -> 29905 bytes dom/media/test/gizmo-short.mp4^headers^ | 1 + dom/media/test/gizmo.mp4 | Bin 0 -> 455255 bytes dom/media/test/gizmo.mp4^headers^ | 1 + dom/media/test/gizmo.webm | Bin 0 -> 159035 bytes dom/media/test/gizmo.webm^headers^ | 1 + dom/media/test/gzipped_mp4.sjs | 25 + dom/media/test/hevc_white_frame.mp4 | Bin 0 -> 3358 bytes dom/media/test/hevc_white_frame.mp4^headers^ | 1 + dom/media/test/hevc_white_red_frames.mp4 | Bin 0 -> 29235 bytes dom/media/test/hevc_white_red_frames.mp4^headers^ | 1 + dom/media/test/hls/400x300_prog_index.m3u8 | 10 + dom/media/test/hls/400x300_prog_index_5s.m3u8 | 8 + dom/media/test/hls/400x300_seg0.ts | Bin 0 -> 291588 bytes dom/media/test/hls/400x300_seg0_5s.ts | Bin 0 -> 168636 bytes dom/media/test/hls/400x300_seg1.ts | Bin 0 -> 288204 bytes dom/media/test/hls/416x243_prog_index_5s.m3u8 | 8 + dom/media/test/hls/416x243_seg0_5s.ts | Bin 0 -> 197400 bytes dom/media/test/hls/640x480_prog_index.m3u8 | 10 + dom/media/test/hls/640x480_seg0.ts | Bin 0 -> 814228 bytes dom/media/test/hls/640x480_seg1.ts | Bin 0 -> 796368 bytes dom/media/test/hls/960x720_prog_index.m3u8 | 10 + dom/media/test/hls/960x720_seg0.ts | Bin 0 -> 1878120 bytes dom/media/test/hls/960x720_seg1.ts | Bin 0 -> 1839392 bytes dom/media/test/hls/bipbop_16x9_single.m3u8 | 5 + dom/media/test/hls/bipbop_4x3_single.m3u8 | 4 + dom/media/test/hls/bipbop_4x3_variant.m3u8 | 10 + dom/media/test/huge-id3.mp3 | Bin 0 -> 141774 bytes dom/media/test/huge-id3.mp3^headers^ | 1 + dom/media/test/id3tags.mp3 | Bin 0 -> 3530 bytes dom/media/test/id3tags.mp3^headers^ | 1 + dom/media/test/id3v1afterlongid3v2.mp3 | Bin 0 -> 10229 bytes dom/media/test/invalid-cmap-s0c0.opus | Bin 0 -> 6835 bytes dom/media/test/invalid-cmap-s0c0.opus^headers^ | 1 + dom/media/test/invalid-cmap-s0c2.opus | Bin 0 -> 6834 bytes dom/media/test/invalid-cmap-s0c2.opus^headers^ | 1 + dom/media/test/invalid-cmap-s1c2.opus | Bin 0 -> 6848 bytes dom/media/test/invalid-cmap-s1c2.opus^headers^ | 1 + dom/media/test/invalid-cmap-short.opus | Bin 0 -> 6854 bytes dom/media/test/invalid-cmap-short.opus^headers^ | 1 + .../test/invalid-discard_on_multi_blocks.webm | Bin 0 -> 19636 bytes .../invalid-discard_on_multi_blocks.webm^headers^ | 1 + dom/media/test/invalid-excess_discard.webm | Bin 0 -> 18442 bytes .../test/invalid-excess_discard.webm^headers^ | 1 + dom/media/test/invalid-excess_neg_discard.webm | Bin 0 -> 18442 bytes .../test/invalid-excess_neg_discard.webm^headers^ | 1 + dom/media/test/invalid-m0c0.opus | Bin 0 -> 2471 bytes dom/media/test/invalid-m0c0.opus^headers^ | 1 + dom/media/test/invalid-m0c3.opus | Bin 0 -> 2471 bytes dom/media/test/invalid-m0c3.opus^headers^ | 1 + dom/media/test/invalid-m1c0.opus | Bin 0 -> 6836 bytes dom/media/test/invalid-m1c0.opus^headers^ | 1 + dom/media/test/invalid-m1c9.opus | Bin 0 -> 6836 bytes dom/media/test/invalid-m1c9.opus^headers^ | 1 + dom/media/test/invalid-m2c0.opus | Bin 0 -> 2471 bytes dom/media/test/invalid-m2c0.opus^headers^ | 1 + dom/media/test/invalid-m2c1.opus | Bin 0 -> 2455 bytes dom/media/test/invalid-m2c1.opus^headers^ | 1 + dom/media/test/invalid-neg_discard.webm | Bin 0 -> 18442 bytes dom/media/test/invalid-neg_discard.webm^headers^ | 1 + dom/media/test/invalid-preskip.webm | Bin 0 -> 7251 bytes dom/media/test/invalid-preskip.webm^headers^ | 1 + dom/media/test/make-headers.sh | 18 + dom/media/test/manifest.js | 2556 +++ dom/media/test/midflight-redirect.sjs | 87 + dom/media/test/mochitest.toml | 951 + dom/media/test/mochitest_background_video.toml | 800 + dom/media/test/mochitest_bugs.toml | 812 + dom/media/test/mochitest_compat.toml | 1033 + dom/media/test/mochitest_eme.toml | 850 + dom/media/test/mochitest_eme_compat.toml | 793 + dom/media/test/mochitest_media_engine.toml | 10 + dom/media/test/mochitest_media_recorder.toml | 856 + dom/media/test/mochitest_seek.toml | 824 + dom/media/test/mochitest_stream.toml | 792 + dom/media/test/multi_id3v2.mp3 | Bin 0 -> 5039737 bytes dom/media/test/multiple-bos-more-header-fileds.ogg | Bin 0 -> 27527 bytes .../multiple-bos-more-header-fileds.ogg^headers^ | 1 + dom/media/test/multiple-bos.ogg | Bin 0 -> 33045 bytes dom/media/test/multiple-bos.ogg^headers^ | 1 + dom/media/test/no-container-codec-delay.webm | Bin 0 -> 66250 bytes dom/media/test/no-cues.webm | Bin 0 -> 220609 bytes dom/media/test/no-cues.webm^headers^ | 1 + dom/media/test/notags.mp3 | Bin 0 -> 2506 bytes dom/media/test/notags.mp3^headers^ | 1 + dom/media/test/opus-mapping2.mp4 | Bin 0 -> 308048 bytes dom/media/test/opus-mapping2.mp4^headers^ | 1 + dom/media/test/opus-mapping2.webm | Bin 0 -> 309387 bytes dom/media/test/opus-mapping2.webm^headers^ | 1 + dom/media/test/opus-sample-cenc.mp4 | Bin 0 -> 21958 bytes dom/media/test/opus-sample-cenc.mp4^headers^ | 1 + dom/media/test/opus-sample.mp4 | Bin 0 -> 105690 bytes dom/media/test/opus-sample.mp4^headers^ | 1 + dom/media/test/owl-funnier-id3.mp3 | Bin 0 -> 69603 bytes dom/media/test/owl-funnier-id3.mp3^headers^ | 1 + dom/media/test/owl-funny-id3.mp3 | Bin 0 -> 71696 bytes dom/media/test/owl-funny-id3.mp3^headers^ | 1 + dom/media/test/owl-short.mp3 | Bin 0 -> 11016 bytes dom/media/test/owl-short.mp3^headers^ | 1 + dom/media/test/owl.mp3 | Bin 0 -> 67430 bytes dom/media/test/owl.mp3^headers^ | 1 + .../test/padding-spanning-multiple-packets.mp3 | Bin 0 -> 117600 bytes dom/media/test/pixel_aspect_ratio.mp4 | Bin 0 -> 1806042 bytes dom/media/test/play_promise.js | 3 + dom/media/test/poster-test.jpg | Bin 0 -> 58493 bytes dom/media/test/r11025_msadpcm_c1.wav | Bin 0 -> 5978 bytes dom/media/test/r11025_msadpcm_c1.wav^headers^ | 1 + dom/media/test/r11025_s16_c1-short.wav | Bin 0 -> 8270 bytes dom/media/test/r11025_s16_c1-short.wav^headers^ | 1 + dom/media/test/r11025_s16_c1.wav | Bin 0 -> 22094 bytes dom/media/test/r11025_s16_c1.wav^headers^ | 1 + dom/media/test/r11025_s16_c1_trailing.wav | Bin 0 -> 22095 bytes dom/media/test/r11025_s16_c1_trailing.wav^headers^ | 1 + dom/media/test/r11025_u8_c1.wav | Bin 0 -> 11069 bytes dom/media/test/r11025_u8_c1.wav^headers^ | 1 + dom/media/test/r11025_u8_c1_trunc.wav | Bin 0 -> 20000 bytes dom/media/test/r11025_u8_c1_trunc.wav^headers^ | 1 + dom/media/test/r16000_u8_c1_list.wav | Bin 0 -> 68318 bytes dom/media/test/r16000_u8_c1_list.wav^headers^ | 1 + .../test/rdd_process_xpcom/RddProcessTest.cpp | 69 + dom/media/test/rdd_process_xpcom/RddProcessTest.h | 28 + dom/media/test/rdd_process_xpcom/components.conf | 15 + dom/media/test/rdd_process_xpcom/moz.build | 21 + .../test/rdd_process_xpcom/nsIRddProcessTest.idl | 25 + dom/media/test/reactivate_helper.html | 57 + dom/media/test/red-46x48.mp4 | Bin 0 -> 1548 bytes dom/media/test/red-46x48.mp4^headers^ | 1 + dom/media/test/red-48x46.mp4 | Bin 0 -> 1548 bytes dom/media/test/red-48x46.mp4^headers^ | 1 + dom/media/test/redirect.sjs | 35 + dom/media/test/referer.sjs | 49 + dom/media/test/reftest/av1hdr2020.mp4 | Bin 0 -> 109327 bytes dom/media/test/reftest/av1hdr2020.png | Bin 0 -> 4162799 bytes .../bipbop_300_215kbps.mp4.lastframe-ref.html | 4 + .../reftest/bipbop_300_215kbps.mp4.lastframe.html | 19 + dom/media/test/reftest/color_quads/720p.png | Bin 0 -> 8722 bytes .../720p.png.bt709.bt709.pc.gbrp.av1.mp4 | Bin 0 -> 968 bytes .../720p.png.bt709.bt709.pc.gbrp.av1.webm | Bin 0 -> 669 bytes .../720p.png.bt709.bt709.pc.gbrp.h264.mp4 | Bin 0 -> 1874 bytes .../720p.png.bt709.bt709.pc.gbrp.vp9.mp4 | Bin 0 -> 1102 bytes .../720p.png.bt709.bt709.pc.gbrp.vp9.webm | Bin 0 -> 808 bytes .../720p.png.bt709.bt709.pc.yuv420p.av1.mp4 | Bin 0 -> 1016 bytes .../720p.png.bt709.bt709.pc.yuv420p.av1.webm | Bin 0 -> 717 bytes .../720p.png.bt709.bt709.pc.yuv420p.h264.mp4 | Bin 0 -> 1951 bytes .../720p.png.bt709.bt709.pc.yuv420p.vp9.mp4 | Bin 0 -> 1116 bytes .../720p.png.bt709.bt709.pc.yuv420p.vp9.webm | Bin 0 -> 822 bytes .../720p.png.bt709.bt709.pc.yuv420p10.av1.mp4 | Bin 0 -> 1031 bytes .../720p.png.bt709.bt709.pc.yuv420p10.av1.webm | Bin 0 -> 732 bytes .../720p.png.bt709.bt709.pc.yuv420p10.h264.mp4 | Bin 0 -> 1990 bytes .../720p.png.bt709.bt709.pc.yuv420p10.vp9.mp4 | Bin 0 -> 1153 bytes .../720p.png.bt709.bt709.pc.yuv420p10.vp9.webm | Bin 0 -> 859 bytes .../720p.png.bt709.bt709.tv.gbrp.av1.mp4 | Bin 0 -> 968 bytes .../720p.png.bt709.bt709.tv.gbrp.av1.webm | Bin 0 -> 669 bytes .../720p.png.bt709.bt709.tv.gbrp.h264.mp4 | Bin 0 -> 1873 bytes .../720p.png.bt709.bt709.tv.gbrp.vp9.mp4 | Bin 0 -> 1102 bytes .../720p.png.bt709.bt709.tv.gbrp.vp9.webm | Bin 0 -> 808 bytes .../720p.png.bt709.bt709.tv.yuv420p.av1.mp4 | Bin 0 -> 1012 bytes .../720p.png.bt709.bt709.tv.yuv420p.av1.webm | Bin 0 -> 713 bytes .../720p.png.bt709.bt709.tv.yuv420p.h264.mp4 | Bin 0 -> 1946 bytes .../720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 | Bin 0 -> 1111 bytes .../720p.png.bt709.bt709.tv.yuv420p.vp9.webm | Bin 0 -> 817 bytes .../720p.png.bt709.bt709.tv.yuv420p10.av1.mp4 | Bin 0 -> 1036 bytes .../720p.png.bt709.bt709.tv.yuv420p10.av1.webm | Bin 0 -> 737 bytes .../720p.png.bt709.bt709.tv.yuv420p10.h264.mp4 | Bin 0 -> 1989 bytes .../720p.png.bt709.bt709.tv.yuv420p10.vp9.mp4 | Bin 0 -> 1148 bytes .../720p.png.bt709.bt709.tv.yuv420p10.vp9.webm | Bin 0 -> 854 bytes dom/media/test/reftest/color_quads/reftest.list | 69 + dom/media/test/reftest/frame_order.mp4 | Bin 0 -> 7971 bytes dom/media/test/reftest/frame_order_mp4-ref.html | 13 + dom/media/test/reftest/frame_order_mp4.html | 37 + dom/media/test/reftest/gen_combos.py | 257 + dom/media/test/reftest/generateREF.html | 104 + .../test/reftest/gizmo.mp4.55thframe-ref.html | 7 + dom/media/test/reftest/gizmo.mp4.seek.html | 36 + .../reftest/image-10bits-rendering-720-90-ref.html | 4 + .../image-10bits-rendering-720-90-video.html | 22 + .../reftest/image-10bits-rendering-720-ref.html | 4 + .../reftest/image-10bits-rendering-720-video.html | 19 + .../reftest/image-10bits-rendering-720.video.html | 19 + .../reftest/image-10bits-rendering-90-ref.html | 4 + .../reftest/image-10bits-rendering-90-video.html | 22 + .../test/reftest/image-10bits-rendering-ref.html | 4 + .../test/reftest/image-10bits-rendering-video.html | 22 + .../incorrect_display_in_bytestream_vp8-ref.html | 13 + .../incorrect_display_in_bytestream_vp8.html | 33 + .../incorrect_display_in_bytestream_vp8.webm | Bin 0 -> 84160 bytes .../incorrect_display_in_bytestream_vp9-ref.html | 12 + .../incorrect_display_in_bytestream_vp9.html | 33 + .../incorrect_display_in_bytestream_vp9.webm | Bin 0 -> 740554 bytes dom/media/test/reftest/reftest.list | 15 + dom/media/test/reftest/reftest_img.html | 20 + dom/media/test/reftest/reftest_video.html | 64 + .../test/reftest/short.mp4.firstframe-ref.html | 4 + dom/media/test/reftest/short.mp4.firstframe.html | 19 + .../test/reftest/short.mp4.lastframe-ref.html | 4 + dom/media/test/reftest/short.mp4.lastframe.html | 42 + .../reftest/uneven_frame_duration_video-ref.html | 7 + .../test/reftest/uneven_frame_duration_video.html | 39 + dom/media/test/reftest/uneven_frame_durations.mp4 | Bin 0 -> 2424023 bytes .../reftest/uneven_frame_durations_3.8s_frame.png | Bin 0 -> 224136 bytes dom/media/test/reftest/vp9hdr2020.png | Bin 0 -> 5083456 bytes dom/media/test/reftest/vp9hdr2020.webm | Bin 0 -> 108855 bytes dom/media/test/resolution-change.webm | Bin 0 -> 7166 bytes dom/media/test/resolution-change.webm^headers^ | 1 + .../test/sample-encrypted-sgpdstbl-sbgptraf.mp4 | Bin 0 -> 122703 bytes ...sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^ | 1 + dom/media/test/sample-fisbone-skeleton4.ogv | Bin 0 -> 8747 bytes .../test/sample-fisbone-skeleton4.ogv^headers^ | 1 + dom/media/test/sample-fisbone-wrong-header.ogv | Bin 0 -> 8703 bytes .../test/sample-fisbone-wrong-header.ogv^headers^ | 1 + dom/media/test/sample.3g2 | Bin 0 -> 28561 bytes dom/media/test/sample.3gp | Bin 0 -> 28561 bytes dom/media/test/seek-short.ogv | Bin 0 -> 79921 bytes dom/media/test/seek-short.ogv^headers^ | 1 + dom/media/test/seek-short.webm | Bin 0 -> 19267 bytes dom/media/test/seek-short.webm^headers^ | 1 + dom/media/test/seek.ogv | Bin 0 -> 285310 bytes dom/media/test/seek.ogv^headers^ | 1 + dom/media/test/seek.webm | Bin 0 -> 215529 bytes dom/media/test/seek.webm^headers^ | 1 + dom/media/test/seekLies.sjs | 22 + dom/media/test/seek_support.js | 61 + dom/media/test/seek_with_sound.ogg | Bin 0 -> 299507 bytes dom/media/test/seek_with_sound.ogg^headers^ | 1 + dom/media/test/short-aac-encrypted-audio.mp4 | Bin 0 -> 5267 bytes .../test/short-aac-encrypted-audio.mp4^headers^ | 1 + .../short-audio-fragmented-cenc-without-pssh.mp4 | Bin 0 -> 9261 bytes ...audio-fragmented-cenc-without-pssh.mp4^headers^ | 1 + dom/media/test/short-cenc-pssh-in-moof.mp4 | Bin 0 -> 14860 bytes dom/media/test/short-cenc.mp4 | Bin 0 -> 14860 bytes dom/media/test/short-cenc.xml | 37 + dom/media/test/short-video.ogv | Bin 0 -> 16049 bytes dom/media/test/short-video.ogv^headers^ | 1 + dom/media/test/short-vp9-encrypted-video.mp4 | Bin 0 -> 6727 bytes .../test/short-vp9-encrypted-video.mp4^headers^ | 1 + dom/media/test/short.mp4 | Bin 0 -> 13708 bytes dom/media/test/short.mp4.gz | Bin 0 -> 6708 bytes dom/media/test/short.mp4^headers^ | 1 + dom/media/test/shorter_audio_than_video_3s.webm | Bin 0 -> 31229 bytes .../test/shorter_audio_than_video_3s.webm^headers^ | 1 + dom/media/test/sin-441-1s-44100-afconvert.mp4 | Bin 0 -> 9738 bytes dom/media/test/sin-441-1s-44100-fdk_aac.mp4 | Bin 0 -> 9986 bytes dom/media/test/sin-441-1s-44100-lame.mp3 | Bin 0 -> 8586 bytes dom/media/test/sin-441-1s-44100.flac | Bin 0 -> 24203 bytes dom/media/test/sin-441-1s-44100.ogg | Bin 0 -> 5180 bytes dom/media/test/sin-441-1s-44100.opus | Bin 0 -> 10634 bytes dom/media/test/sine.webm | Bin 0 -> 17510 bytes dom/media/test/sine.webm^headers^ | 1 + .../test/single-xing-header-no-content-length.mp3 | Bin 0 -> 88834 bytes ...ngle-xing-header-no-content-length.mp3^headers^ | 3 + ...l-short-clearkey-subsample-encrypted-audio.webm | Bin 0 -> 30362 bytes ...learkey-subsample-encrypted-audio.webm^headers^ | 1 + ...l-short-clearkey-subsample-encrypted-video.webm | Bin 0 -> 46703 bytes ...learkey-subsample-encrypted-video.webm^headers^ | 1 + dom/media/test/small-shot-mp3.mp4 | Bin 0 -> 7491 bytes dom/media/test/small-shot-mp3.mp4^headers^ | 1 + dom/media/test/small-shot.flac | Bin 0 -> 16430 bytes dom/media/test/small-shot.m4a | Bin 0 -> 2710 bytes dom/media/test/small-shot.mp3 | Bin 0 -> 6825 bytes dom/media/test/small-shot.mp3^headers^ | 1 + dom/media/test/small-shot.ogg | Bin 0 -> 6416 bytes dom/media/test/small-shot.ogg^headers^ | 1 + dom/media/test/sound.ogg | Bin 0 -> 2603 bytes dom/media/test/sound.ogg^headers^ | 1 + dom/media/test/spacestorm-1000Hz-100ms.ogg | Bin 0 -> 3270 bytes .../test/spacestorm-1000Hz-100ms.ogg^headers^ | 1 + dom/media/test/split.webm | Bin 0 -> 105755 bytes dom/media/test/split.webm^headers^ | 1 + dom/media/test/street.mp4 | Bin 0 -> 1505751 bytes dom/media/test/street.mp4^headers^ | 1 + dom/media/test/sync.webm | Bin 0 -> 397383 bytes dom/media/test/test-1-mono.opus | Bin 0 -> 4086 bytes dom/media/test/test-1-mono.opus^headers^ | 1 + dom/media/test/test-2-stereo.opus | Bin 0 -> 24973 bytes dom/media/test/test-2-stereo.opus^headers^ | 1 + dom/media/test/test-3-LCR.opus | Bin 0 -> 39471 bytes dom/media/test/test-3-LCR.opus^headers^ | 1 + dom/media/test/test-4-quad.opus | Bin 0 -> 129906 bytes dom/media/test/test-4-quad.opus^headers^ | 1 + dom/media/test/test-5-5.0.opus | Bin 0 -> 164935 bytes dom/media/test/test-5-5.0.opus^headers^ | 1 + dom/media/test/test-6-5.1.opus | Bin 0 -> 288195 bytes dom/media/test/test-6-5.1.opus^headers^ | 1 + dom/media/test/test-7-6.1.opus | Bin 0 -> 401668 bytes dom/media/test/test-7-6.1.opus^headers^ | 1 + dom/media/test/test-8-7.1.opus | Bin 0 -> 543119 bytes dom/media/test/test-8-7.1.opus^headers^ | 1 + .../test/test-stereo-phase-inversion-180.opus | Bin 0 -> 14011 bytes .../test-stereo-phase-inversion-180.opus^headers^ | 1 + dom/media/test/test_VideoPlaybackQuality.html | 61 + .../test/test_VideoPlaybackQuality_disabled.html | 37 + dom/media/test/test_access_control.html | 62 + dom/media/test/test_arraybuffer.html | 83 + dom/media/test/test_aspectratio_mp4.html | 46 + dom/media/test/test_audio1.html | 33 + dom/media/test/test_audio2.html | 33 + dom/media/test/test_audioDocumentTitle.html | 56 + ...test_background_video_cancel_suspend_taint.html | 70 + ...st_background_video_cancel_suspend_visible.html | 69 + ...round_video_drawimage_with_suspended_video.html | 76 + .../test/test_background_video_ended_event.html | 48 + .../test_background_video_no_suspend_disabled.html | 36 + ...st_background_video_no_suspend_not_in_tree.html | 56 + ...test_background_video_no_suspend_short_vid.html | 38 + ...und_video_resume_after_end_show_last_frame.html | 141 + ...d_video_resume_looping_video_without_audio.html | 81 + dom/media/test/test_background_video_suspend.html | 81 + .../test/test_background_video_suspend_ends.html | 55 + .../test_background_video_suspend_ready_state.html | 74 + ..._background_video_tainted_by_capturestream.html | 46 + ...kground_video_tainted_by_createimagebitmap.html | 42 + ...test_background_video_tainted_by_drawimage.html | 58 + dom/media/test/test_buffered.html | 117 + dom/media/test/test_bug1113600.html | 50 + dom/media/test/test_bug1120222.html | 42 + dom/media/test/test_bug1242338.html | 66 + dom/media/test/test_bug1248229.html | 35 + .../test/test_bug1431810_opus_downmix_to_mono.html | 139 + dom/media/test/test_bug1512958.html | 74 + dom/media/test/test_bug1553262.html | 31 + dom/media/test/test_bug448534.html | 71 + dom/media/test/test_bug463162.xhtml | 78 + dom/media/test/test_bug465498.html | 83 + dom/media/test/test_bug495145.html | 95 + dom/media/test/test_bug495300.html | 63 + dom/media/test/test_bug654550.html | 84 + dom/media/test/test_bug686942.html | 68 + dom/media/test/test_bug726904.html | 56 + dom/media/test/test_bug874897.html | 68 + dom/media/test/test_bug879717.html | 130 + dom/media/test/test_bug895305.html | 42 + dom/media/test/test_bug919265.html | 30 + dom/media/test/test_can_play_type.html | 40 + dom/media/test/test_can_play_type_mpeg.html | 166 + dom/media/test/test_can_play_type_no_ogg.html | 42 + dom/media/test/test_can_play_type_ogg.html | 37 + dom/media/test/test_can_play_type_wave.html | 30 + dom/media/test/test_can_play_type_webm.html | 39 + dom/media/test/test_capture_stream_av_sync.html | 276 + dom/media/test/test_chaining.html | 92 + .../test_cloneElementVisually_ended_video.html | 48 + .../test_cloneElementVisually_mediastream.html | 70 + ...loneElementVisually_mediastream_multitrack.html | 88 + .../test/test_cloneElementVisually_no_suspend.html | 90 + .../test/test_cloneElementVisually_paused.html | 45 + .../test/test_cloneElementVisually_poster.html | 53 + .../test_cloneElementVisually_resource_change.html | 67 + dom/media/test/test_clone_media_element.html | 54 + dom/media/test/test_closing_connections.html | 58 + dom/media/test/test_constants.html | 228 + dom/media/test/test_controls.html | 33 + dom/media/test/test_cueless_webm_seek-1.html | 136 + dom/media/test/test_cueless_webm_seek-2.html | 126 + dom/media/test/test_cueless_webm_seek-3.html | 120 + dom/media/test/test_currentTime.html | 19 + dom/media/test/test_debug_data_helpers.html | 74 + dom/media/test/test_decode_error.html | 66 + dom/media/test/test_decode_error_crossorigin.html | 54 + dom/media/test/test_decoder_disable.html | 78 + dom/media/test/test_defaultMuted.html | 54 + dom/media/test/test_delay_load.html | 108 + dom/media/test/test_duration_after_error.html | 54 + dom/media/test/test_eme_autoplay.html | 115 + dom/media/test/test_eme_canvas_blocked.html | 58 + .../test/test_eme_createMediaKeys_iframes.html | 192 + dom/media/test/test_eme_detach_media_keys.html | 63 + ...ch_reattach_same_mediakeys_during_playback.html | 141 + dom/media/test/test_eme_getstatusforpolicy.html | 93 + dom/media/test/test_eme_initDataTypes.html | 130 + .../test/test_eme_mfcdm_generate_request.html | 93 + .../test/test_eme_mfcdm_getstatusforpolicy.html | 115 + dom/media/test/test_eme_missing_pssh.html | 92 + dom/media/test/test_eme_non_mse_fails.html | 95 + dom/media/test/test_eme_playback.html | 188 + dom/media/test/test_eme_protection_query.html | 250 + dom/media/test/test_eme_pssh_in_moof.html | 141 + .../test/test_eme_requestKeySystemAccess.html | 477 + ...uestMediaKeySystemAccess_with_app_approval.html | 202 + dom/media/test/test_eme_request_notifications.html | 82 + .../test/test_eme_sample_groups_playback.html | 130 + .../test/test_eme_session_callable_value.html | 34 + ...eme_setMediaKeys_before_attach_MediaSource.html | 37 + dom/media/test/test_eme_special_key_system.html | 63 + .../test_eme_stream_capture_blocked_case1.html | 59 + .../test_eme_stream_capture_blocked_case2.html | 52 + .../test_eme_stream_capture_blocked_case3.html | 50 + .../test/test_eme_unsetMediaKeys_then_capture.html | 108 + dom/media/test/test_eme_waitingforkey.html | 78 + .../test/test_eme_wideinve_l1_installation.html | 86 + dom/media/test/test_eme_wv_privacy.html | 53 + dom/media/test/test_empty_resource.html | 58 + dom/media/test/test_error_in_video_document.html | 59 + dom/media/test/test_error_on_404.html | 84 + dom/media/test/test_fastSeek-forwards.html | 77 + dom/media/test/test_fastSeek.html | 88 + dom/media/test/test_fragment_noplay.html | 127 + dom/media/test/test_fragment_play.html | 91 + dom/media/test/test_hevc_playback.html | 45 + dom/media/test/test_hevc_support.html | 47 + dom/media/test/test_hls_player_independency.html | 53 + dom/media/test/test_hw_video_decoding.html | 119 + dom/media/test/test_imagecapture.html | 128 + dom/media/test/test_info_leak.html | 174 + dom/media/test/test_invalid_reject.html | 58 + dom/media/test/test_invalid_reject_play.html | 44 + dom/media/test/test_invalid_seek.html | 32 + dom/media/test/test_load.html | 217 + dom/media/test/test_load_candidates.html | 84 + dom/media/test/test_load_same_resource.html | 106 + dom/media/test/test_load_source.html | 76 + dom/media/test/test_load_source_empty_type.html | 36 + dom/media/test/test_loop.html | 57 + dom/media/test/test_looping_eventsOrder.html | 52 + dom/media/test/test_media_selection.html | 142 + dom/media/test/test_media_sniffer.html | 67 + ...est_mediacapabilities_resistfingerprinting.html | 69 + .../test/test_mediarecorder_avoid_recursion.html | 61 + dom/media/test/test_mediarecorder_bitrate.html | 127 + dom/media/test/test_mediarecorder_creation.html | 45 + .../test/test_mediarecorder_creation_fail.html | 61 + ...order_fires_start_event_once_when_erroring.html | 45 + .../test/test_mediarecorder_multipletracks.html | 68 + .../test/test_mediarecorder_onerror_pause.html | 107 + .../test_mediarecorder_pause_resume_video.html | 130 + .../test_mediarecorder_playback_can_repeat.html | 87 + dom/media/test/test_mediarecorder_principals.html | 132 + ...test_mediarecorder_record_4ch_audiocontext.html | 76 + ...est_mediarecorder_record_addtracked_stream.html | 182 + .../test_mediarecorder_record_audiocontext.html | 65 + ...test_mediarecorder_record_audiocontext_mlk.html | 24 + .../test/test_mediarecorder_record_audionode.html | 135 + ..._mediarecorder_record_canvas_captureStream.html | 75 + ...arecorder_record_changing_video_resolution.html | 174 + ...t_mediarecorder_record_downsize_resolution.html | 148 + ...st_mediarecorder_record_getdata_afterstart.html | 81 + ...t_mediarecorder_record_gum_video_timeslice.html | 94 + ...arecorder_record_gum_video_timeslice_mixed.html | 100 + .../test_mediarecorder_record_immediate_stop.html | 115 + .../test_mediarecorder_record_no_timeslice.html | 106 + .../test/test_mediarecorder_record_session.html | 75 + .../test_mediarecorder_record_startstopstart.html | 75 + .../test/test_mediarecorder_record_timeslice.html | 105 + ...est_mediarecorder_record_upsize_resolution.html | 148 + .../test/test_mediarecorder_reload_crash.html | 29 + .../test/test_mediarecorder_state_event_order.html | 83 + .../test/test_mediarecorder_state_transition.html | 280 + .../test/test_mediarecorder_webm_support.html | 56 + dom/media/test/test_mediastream_as_eventarget.html | 33 + .../test_mediatrack_consuming_mediaresource.html | 198 + .../test_mediatrack_consuming_mediastream.html | 146 + dom/media/test/test_mediatrack_events.html | 135 + dom/media/test/test_mediatrack_parsing_ogg.html | 72 + .../test/test_mediatrack_replay_from_end.html | 160 + dom/media/test/test_metadata.html | 81 + .../test/test_midflight_redirect_blocked.html | 87 + dom/media/test/test_mixed_principals.html | 94 + dom/media/test/test_mozHasAudio.html | 42 + dom/media/test/test_mp3_broadcast.html | 52 + dom/media/test/test_mp3_with_multiple_ID3v2.html | 30 + .../test/test_multiple_mediastreamtracks.html | 47 + dom/media/test/test_networkState.html | 47 + dom/media/test/test_new_audio.html | 48 + dom/media/test/test_no_load_event.html | 53 + ...hen_removing_nonloaded_media_from_document.html | 46 + dom/media/test/test_paused.html | 21 + dom/media/test/test_paused_after_ended.html | 53 + dom/media/test/test_periodic_timeupdate.html | 100 + dom/media/test/test_play_events.html | 61 + dom/media/test/test_play_events_2.html | 60 + dom/media/test/test_play_promise_1.html | 42 + dom/media/test/test_play_promise_10.html | 40 + dom/media/test/test_play_promise_11.html | 40 + dom/media/test/test_play_promise_12.html | 45 + dom/media/test/test_play_promise_13.html | 49 + dom/media/test/test_play_promise_14.html | 56 + dom/media/test/test_play_promise_15.html | 51 + dom/media/test/test_play_promise_16.html | 47 + dom/media/test/test_play_promise_17.html | 43 + dom/media/test/test_play_promise_18.html | 46 + dom/media/test/test_play_promise_2.html | 43 + dom/media/test/test_play_promise_3.html | 47 + dom/media/test/test_play_promise_4.html | 41 + dom/media/test/test_play_promise_5.html | 44 + dom/media/test/test_play_promise_6.html | 45 + dom/media/test/test_play_promise_7.html | 47 + dom/media/test/test_play_promise_8.html | 47 + dom/media/test/test_play_promise_9.html | 44 + dom/media/test/test_play_twice.html | 95 + dom/media/test/test_playback.html | 108 + dom/media/test/test_playback_and_bfcache.html | 72 + dom/media/test/test_playback_errors.html | 48 + dom/media/test/test_playback_hls.html | 91 + dom/media/test/test_playback_rate.html | 175 + dom/media/test/test_playback_rate_playpause.html | 66 + dom/media/test/test_playback_reactivate.html | 77 + dom/media/test/test_played.html | 288 + dom/media/test/test_preload_actions.html | 581 + dom/media/test/test_preload_attribute.html | 44 + dom/media/test/test_preload_suspend.html | 112 + .../test_preserve_playbackrate_after_ui_play.html | 60 + dom/media/test/test_progress.html | 52 + dom/media/test/test_reactivate.html | 64 + dom/media/test/test_readyState.html | 51 + dom/media/test/test_referer.html | 88 + dom/media/test/test_replay_metadata.html | 120 + dom/media/test/test_reset_events_async.html | 58 + dom/media/test/test_reset_src.html | 98 + dom/media/test/test_resolution_change.html | 52 + dom/media/test/test_resume.html | 47 + dom/media/test/test_seamless_looping.html | 199 + ...mless_looping_cancel_looping_future_frames.html | 61 + dom/media/test/test_seamless_looping_duration.html | 63 + .../test_seamless_looping_media_element_state.html | 51 + ...looping_not_keep_painting_old_video_frames.html | 57 + ...est_seamless_looping_resume_video_decoding.html | 64 + .../test_seamless_looping_seek_current_time.html | 62 + ..._seamless_looping_shorter_audio_than_video.html | 37 + dom/media/test/test_seamless_looping_video.html | 73 + dom/media/test/test_seek-1.html | 84 + dom/media/test/test_seek-10.html | 56 + dom/media/test/test_seek-11.html | 76 + dom/media/test/test_seek-12.html | 59 + dom/media/test/test_seek-13.html | 72 + dom/media/test/test_seek-14.html | 43 + dom/media/test/test_seek-2.html | 76 + dom/media/test/test_seek-3.html | 68 + dom/media/test/test_seek-4.html | 70 + dom/media/test/test_seek-5.html | 69 + dom/media/test/test_seek-6.html | 64 + dom/media/test/test_seek-7.html | 59 + dom/media/test/test_seek-8.html | 42 + dom/media/test/test_seek-9.html | 41 + dom/media/test/test_seekLies.html | 28 + dom/media/test/test_seekToNextFrame.html | 95 + dom/media/test/test_seek_duration.html | 62 + dom/media/test/test_seek_negative.html | 77 + dom/media/test/test_seek_nosrc.html | 58 + dom/media/test/test_seek_out_of_range.html | 48 + dom/media/test/test_seek_promise_bug1344357.html | 35 + dom/media/test/test_seekable1.html | 66 + dom/media/test/test_setSinkId_after_loop.html | 126 + dom/media/test/test_source.html | 91 + dom/media/test/test_source_null.html | 33 + dom/media/test/test_source_write.html | 40 + dom/media/test/test_standalone.html | 61 + dom/media/test/test_streams_capture_origin.html | 90 + dom/media/test/test_streams_element_capture.html | 125 + .../test_streams_element_capture_mediatrack.html | 100 + .../test_streams_element_capture_playback.html | 47 + .../test/test_streams_element_capture_reset.html | 174 + .../test/test_streams_element_capture_twice.html | 79 + dom/media/test/test_streams_firstframe.html | 67 + dom/media/test/test_streams_gc.html | 44 + dom/media/test/test_streams_individual_pause.html | 77 + dom/media/test/test_streams_srcObject.html | 60 + dom/media/test/test_streams_tracks.html | 66 + .../test_suspend_media_by_inactive_docshell.html | 72 + .../test/test_temporary_file_blob_video_plays.html | 75 + dom/media/test/test_timeupdate_small_files.html | 88 + dom/media/test/test_unseekable.html | 101 + dom/media/test/test_videoDocumentTitle.html | 57 + .../test_videoPlaybackQuality_totalFrames.html | 46 + dom/media/test/test_video_dimensions.html | 96 + dom/media/test/test_video_gzip_encoding.html | 25 + dom/media/test/test_video_in_audio_element.html | 68 + dom/media/test/test_video_low_power_telemetry.html | 205 + .../test_video_stats_resistfingerprinting.html | 90 + dom/media/test/test_video_to_canvas.html | 68 + dom/media/test/test_volume.html | 41 + dom/media/test/test_vp9_superframes.html | 31 + dom/media/test/test_wav_ended1.html | 43 + dom/media/test/test_wav_ended2.html | 62 + dom/media/test/tone2s-silence4s-tone2s.opus | Bin 0 -> 67207 bytes .../test/two-xing-header-no-content-length.mp3 | Bin 0 -> 45594 bytes .../two-xing-header-no-content-length.mp3^headers^ | 3 + dom/media/test/variable-channel.ogg | Bin 0 -> 27749 bytes dom/media/test/variable-channel.ogg^headers^ | 1 + dom/media/test/variable-channel.opus | Bin 0 -> 46597 bytes dom/media/test/variable-channel.opus^headers^ | 1 + dom/media/test/variable-preskip.opus | Bin 0 -> 17660 bytes dom/media/test/variable-preskip.opus^headers^ | 1 + dom/media/test/variable-samplerate.ogg | Bin 0 -> 22325 bytes dom/media/test/variable-samplerate.ogg^headers^ | 1 + dom/media/test/variable-samplerate.opus | Bin 0 -> 28111 bytes dom/media/test/variable-samplerate.opus^headers^ | 1 + dom/media/test/vbr-head.mp3 | Bin 0 -> 4474 bytes dom/media/test/vbr-head.mp3^headers^ | 1 + dom/media/test/vbr.mp3 | Bin 0 -> 300553 bytes dom/media/test/vbr.mp3^headers^ | 1 + dom/media/test/very-short.mp3 | Bin 0 -> 1612 bytes dom/media/test/video-overhang.ogg | Bin 0 -> 301831 bytes dom/media/test/video-overhang.ogg^headers^ | 1 + dom/media/test/vp9-short.webm | Bin 0 -> 3107 bytes dom/media/test/vp9-short.webm^headers^ | 1 + dom/media/test/vp9-superframes.webm | Bin 0 -> 173187 bytes dom/media/test/vp9-superframes.webm^headers^ | 1 + dom/media/test/vp9.webm | Bin 0 -> 97465 bytes dom/media/test/vp9.webm^headers^ | 1 + dom/media/test/vp9cake-short.webm | Bin 0 -> 25155 bytes dom/media/test/vp9cake-short.webm^headers^ | 1 + dom/media/test/vp9cake.webm | Bin 0 -> 141743 bytes dom/media/test/vp9cake.webm^headers^ | 1 + dom/media/test/wave_metadata.wav | Bin 0 -> 42706 bytes dom/media/test/wave_metadata.wav^headers^ | 1 + dom/media/test/wave_metadata_bad_len.wav | Bin 0 -> 42706 bytes dom/media/test/wave_metadata_bad_len.wav^headers^ | 1 + dom/media/test/wave_metadata_bad_no_null.wav | Bin 0 -> 42706 bytes .../test/wave_metadata_bad_no_null.wav^headers^ | 1 + dom/media/test/wave_metadata_bad_utf8.wav | Bin 0 -> 42704 bytes dom/media/test/wave_metadata_bad_utf8.wav^headers^ | 1 + dom/media/test/wave_metadata_unknown_tag.wav | Bin 0 -> 42706 bytes .../test/wave_metadata_unknown_tag.wav^headers^ | 1 + dom/media/test/wave_metadata_utf8.wav | Bin 0 -> 42704 bytes dom/media/test/wave_metadata_utf8.wav^headers^ | 1 + dom/media/test/wavedata_alaw.wav | Bin 0 -> 11067 bytes dom/media/test/wavedata_alaw.wav^headers^ | 1 + dom/media/test/wavedata_float.wav | Bin 0 -> 176458 bytes dom/media/test/wavedata_float.wav^headers^ | 1 + dom/media/test/wavedata_s16.wav | Bin 0 -> 22062 bytes dom/media/test/wavedata_s16.wav^headers^ | 1 + dom/media/test/wavedata_s24.wav | Bin 0 -> 33071 bytes dom/media/test/wavedata_s24.wav^headers^ | 1 + dom/media/test/wavedata_u8.wav | Bin 0 -> 11037 bytes dom/media/test/wavedata_u8.wav^headers^ | 1 + dom/media/test/wavedata_ulaw.wav | Bin 0 -> 11067 bytes dom/media/test/wavedata_ulaw.wav^headers^ | 1 + dom/media/test/white-3s-black-1s.webm | Bin 0 -> 4662 bytes dom/media/test/white-3s-black-1s.webm^headers^ | 1 + dom/media/test/white-short.webm | Bin 0 -> 1573 bytes dom/media/tests/crashtests/1281695.html | 24 + dom/media/tests/crashtests/1306476.html | 47 + dom/media/tests/crashtests/1348381.html | 22 + dom/media/tests/crashtests/1367930_1.html | 39 + dom/media/tests/crashtests/1367930_2.html | 25 + dom/media/tests/crashtests/1429507_1.html | 29 + dom/media/tests/crashtests/1429507_2.html | 31 + dom/media/tests/crashtests/1443212.html | 13 + dom/media/tests/crashtests/1453030.html | 39 + dom/media/tests/crashtests/1468451.html | 10 + dom/media/tests/crashtests/1490700.html | 28 + dom/media/tests/crashtests/1505957.html | 31 + dom/media/tests/crashtests/1509442-1.html | 14 + dom/media/tests/crashtests/1509442.html | 7 + dom/media/tests/crashtests/1510848.html | 4 + dom/media/tests/crashtests/1511130.html | 12 + dom/media/tests/crashtests/1516292.html | 16 + dom/media/tests/crashtests/1573536.html | 18 + dom/media/tests/crashtests/1576938.html | 16 + dom/media/tests/crashtests/1594136.html | 15 + dom/media/tests/crashtests/1749308.html | 18 + dom/media/tests/crashtests/1764915.html | 14 + dom/media/tests/crashtests/1764933.html | 15 + dom/media/tests/crashtests/1764940.html | 14 + dom/media/tests/crashtests/1766668.html | 11 + dom/media/tests/crashtests/1783765.html | 22 + dom/media/tests/crashtests/780790.html | 16 + dom/media/tests/crashtests/791270.html | 17 + dom/media/tests/crashtests/791278.html | 20 + dom/media/tests/crashtests/791330.html | 35 + dom/media/tests/crashtests/799419.html | 32 + dom/media/tests/crashtests/801227.html | 35 + dom/media/tests/crashtests/802982.html | 27 + dom/media/tests/crashtests/812785.html | 70 + dom/media/tests/crashtests/822197.html | 28 + dom/media/tests/crashtests/834100.html | 25 + dom/media/tests/crashtests/836349.html | 20 + dom/media/tests/crashtests/837324.html | 25 + dom/media/tests/crashtests/855796.html | 66 + dom/media/tests/crashtests/860143.html | 25 + dom/media/tests/crashtests/861958.html | 24 + dom/media/tests/crashtests/863929.html | 66 + dom/media/tests/crashtests/crashtests.list | 42 + dom/media/tests/crashtests/datachannel-oom.html | 22 + dom/media/tools/generateGmpJson.py | 170 + dom/media/utils/MediaElementEventRunners.cpp | 140 + dom/media/utils/MediaElementEventRunners.h | 190 + dom/media/utils/PerformanceRecorder.cpp | 308 + dom/media/utils/PerformanceRecorder.h | 408 + dom/media/utils/TelemetryProbesReporter.cpp | 708 + dom/media/utils/TelemetryProbesReporter.h | 179 + dom/media/utils/gtest/TestPerformanceRecorder.cpp | 110 + dom/media/utils/gtest/moz.build | 15 + dom/media/utils/moz.build | 26 + dom/media/wave/WaveDecoder.cpp | 56 + dom/media/wave/WaveDecoder.h | 28 + dom/media/wave/WaveDemuxer.cpp | 760 + dom/media/wave/WaveDemuxer.h | 248 + dom/media/wave/moz.build | 20 + dom/media/webaudio/AlignedTArray.h | 115 + dom/media/webaudio/AlignmentUtils.h | 29 + dom/media/webaudio/AnalyserNode.cpp | 388 + dom/media/webaudio/AnalyserNode.h | 82 + dom/media/webaudio/AudioBlock.cpp | 166 + dom/media/webaudio/AudioBlock.h | 134 + dom/media/webaudio/AudioBuffer.cpp | 498 + dom/media/webaudio/AudioBuffer.h | 138 + dom/media/webaudio/AudioBufferSourceNode.cpp | 853 + dom/media/webaudio/AudioBufferSourceNode.h | 129 + dom/media/webaudio/AudioContext.cpp | 1405 ++ dom/media/webaudio/AudioContext.h | 479 + dom/media/webaudio/AudioDestinationNode.cpp | 675 + dom/media/webaudio/AudioDestinationNode.h | 133 + dom/media/webaudio/AudioEventTimeline.cpp | 513 + dom/media/webaudio/AudioEventTimeline.h | 418 + dom/media/webaudio/AudioListener.cpp | 121 + dom/media/webaudio/AudioListener.h | 85 + dom/media/webaudio/AudioNode.cpp | 608 + dom/media/webaudio/AudioNode.h | 290 + dom/media/webaudio/AudioNodeEngine.cpp | 437 + dom/media/webaudio/AudioNodeEngine.h | 389 + dom/media/webaudio/AudioNodeEngineGeneric.h | 58 + dom/media/webaudio/AudioNodeEngineGenericImpl.h | 341 + dom/media/webaudio/AudioNodeEngineNEON.cpp | 9 + dom/media/webaudio/AudioNodeEngineSSE2.cpp | 10 + dom/media/webaudio/AudioNodeEngineSSE4_2_FMA3.cpp | 10 + dom/media/webaudio/AudioNodeExternalInputTrack.cpp | 225 + dom/media/webaudio/AudioNodeExternalInputTrack.h | 46 + dom/media/webaudio/AudioNodeTrack.cpp | 594 + dom/media/webaudio/AudioNodeTrack.h | 233 + dom/media/webaudio/AudioParam.cpp | 170 + dom/media/webaudio/AudioParam.h | 239 + dom/media/webaudio/AudioParamDescriptorMap.h | 22 + dom/media/webaudio/AudioParamMap.cpp | 21 + dom/media/webaudio/AudioParamMap.h | 34 + dom/media/webaudio/AudioParamTimeline.h | 144 + dom/media/webaudio/AudioProcessingEvent.cpp | 44 + dom/media/webaudio/AudioProcessingEvent.h | 71 + dom/media/webaudio/AudioScheduledSourceNode.cpp | 17 + dom/media/webaudio/AudioScheduledSourceNode.h | 33 + dom/media/webaudio/AudioWorkletGlobalScope.cpp | 358 + dom/media/webaudio/AudioWorkletGlobalScope.h | 84 + dom/media/webaudio/AudioWorkletImpl.cpp | 88 + dom/media/webaudio/AudioWorkletImpl.h | 63 + dom/media/webaudio/AudioWorkletNode.cpp | 899 + dom/media/webaudio/AudioWorkletNode.h | 68 + dom/media/webaudio/AudioWorkletProcessor.cpp | 49 + dom/media/webaudio/AudioWorkletProcessor.h | 50 + dom/media/webaudio/BiquadFilterNode.cpp | 358 + dom/media/webaudio/BiquadFilterNode.h | 71 + dom/media/webaudio/ChannelMergerNode.cpp | 100 + dom/media/webaudio/ChannelMergerNode.h | 67 + dom/media/webaudio/ChannelSplitterNode.cpp | 105 + dom/media/webaudio/ChannelSplitterNode.h | 72 + dom/media/webaudio/ConstantSourceNode.cpp | 279 + dom/media/webaudio/ConstantSourceNode.h | 60 + dom/media/webaudio/ConvolverNode.cpp | 479 + dom/media/webaudio/ConvolverNode.h | 77 + dom/media/webaudio/DelayBuffer.cpp | 236 + dom/media/webaudio/DelayBuffer.h | 105 + dom/media/webaudio/DelayNode.cpp | 223 + dom/media/webaudio/DelayNode.h | 55 + dom/media/webaudio/DynamicsCompressorNode.cpp | 226 + dom/media/webaudio/DynamicsCompressorNode.h | 91 + dom/media/webaudio/FFTBlock.cpp | 231 + dom/media/webaudio/FFTBlock.h | 348 + dom/media/webaudio/GainNode.cpp | 149 + dom/media/webaudio/GainNode.h | 53 + dom/media/webaudio/IIRFilterNode.cpp | 266 + dom/media/webaudio/IIRFilterNode.h | 56 + dom/media/webaudio/MediaBufferDecoder.cpp | 764 + dom/media/webaudio/MediaBufferDecoder.h | 70 + dom/media/webaudio/MediaElementAudioSourceNode.cpp | 100 + dom/media/webaudio/MediaElementAudioSourceNode.h | 67 + .../webaudio/MediaStreamAudioDestinationNode.cpp | 150 + .../webaudio/MediaStreamAudioDestinationNode.h | 58 + dom/media/webaudio/MediaStreamAudioSourceNode.cpp | 278 + dom/media/webaudio/MediaStreamAudioSourceNode.h | 127 + .../webaudio/MediaStreamTrackAudioSourceNode.cpp | 200 + .../webaudio/MediaStreamTrackAudioSourceNode.h | 113 + dom/media/webaudio/OscillatorNode.cpp | 560 + dom/media/webaudio/OscillatorNode.h | 92 + dom/media/webaudio/PannerNode.cpp | 730 + dom/media/webaudio/PannerNode.h | 221 + dom/media/webaudio/PanningUtils.h | 62 + dom/media/webaudio/PeriodicWave.cpp | 146 + dom/media/webaudio/PeriodicWave.h | 59 + dom/media/webaudio/PlayingRefChangeHandler.h | 42 + dom/media/webaudio/ReportDecodeResultTask.h | 37 + dom/media/webaudio/ScriptProcessorNode.cpp | 549 + dom/media/webaudio/ScriptProcessorNode.h | 131 + dom/media/webaudio/StereoPannerNode.cpp | 197 + dom/media/webaudio/StereoPannerNode.h | 74 + dom/media/webaudio/ThreeDPoint.cpp | 38 + dom/media/webaudio/ThreeDPoint.h | 68 + dom/media/webaudio/WaveShaperNode.cpp | 389 + dom/media/webaudio/WaveShaperNode.h | 72 + dom/media/webaudio/WebAudioUtils.cpp | 114 + dom/media/webaudio/WebAudioUtils.h | 184 + dom/media/webaudio/blink/Biquad.cpp | 442 + dom/media/webaudio/blink/Biquad.h | 108 + dom/media/webaudio/blink/DenormalDisabler.h | 170 + dom/media/webaudio/blink/DynamicsCompressor.cpp | 315 + dom/media/webaudio/blink/DynamicsCompressor.h | 135 + .../webaudio/blink/DynamicsCompressorKernel.cpp | 496 + .../webaudio/blink/DynamicsCompressorKernel.h | 125 + dom/media/webaudio/blink/FFTConvolver.cpp | 118 + dom/media/webaudio/blink/FFTConvolver.h | 87 + dom/media/webaudio/blink/HRTFDatabase.cpp | 135 + dom/media/webaudio/blink/HRTFDatabase.h | 104 + dom/media/webaudio/blink/HRTFDatabaseLoader.cpp | 215 + dom/media/webaudio/blink/HRTFDatabaseLoader.h | 155 + dom/media/webaudio/blink/HRTFElevation.cpp | 317 + dom/media/webaudio/blink/HRTFElevation.h | 114 + dom/media/webaudio/blink/HRTFKernel.cpp | 109 + dom/media/webaudio/blink/HRTFKernel.h | 129 + dom/media/webaudio/blink/HRTFPanner.cpp | 328 + dom/media/webaudio/blink/HRTFPanner.h | 120 + dom/media/webaudio/blink/IIRFilter.cpp | 181 + dom/media/webaudio/blink/IIRFilter.h | 62 + .../webaudio/blink/IRC_Composite_C_R0195-incl.cpp | 4571 ++++ dom/media/webaudio/blink/PeriodicWave.cpp | 355 + dom/media/webaudio/blink/PeriodicWave.h | 123 + dom/media/webaudio/blink/README | 24 + dom/media/webaudio/blink/Reverb.cpp | 277 + dom/media/webaudio/blink/Reverb.h | 80 + .../webaudio/blink/ReverbAccumulationBuffer.cpp | 114 + .../webaudio/blink/ReverbAccumulationBuffer.h | 76 + dom/media/webaudio/blink/ReverbConvolver.cpp | 272 + dom/media/webaudio/blink/ReverbConvolver.h | 94 + dom/media/webaudio/blink/ReverbConvolverStage.cpp | 101 + dom/media/webaudio/blink/ReverbConvolverStage.h | 84 + dom/media/webaudio/blink/ReverbInputBuffer.cpp | 85 + dom/media/webaudio/blink/ReverbInputBuffer.h | 73 + dom/media/webaudio/blink/ZeroPole.cpp | 83 + dom/media/webaudio/blink/ZeroPole.h | 63 + dom/media/webaudio/blink/moz.build | 36 + dom/media/webaudio/moz.build | 153 + dom/media/webaudio/test/1856145.ogg | Bin 0 -> 5818 bytes dom/media/webaudio/test/8kHz-320kbps-6ch.aac | Bin 0 -> 22657 bytes dom/media/webaudio/test/audio-expected.wav | Bin 0 -> 190764 bytes dom/media/webaudio/test/audio-mono-expected-2.wav | Bin 0 -> 103788 bytes dom/media/webaudio/test/audio-mono-expected.wav | Bin 0 -> 103788 bytes dom/media/webaudio/test/audio-quad.wav | Bin 0 -> 5128 bytes dom/media/webaudio/test/audio.ogv | Bin 0 -> 16049 bytes .../test/audioBufferSourceNodeDetached_worker.js | 3 + dom/media/webaudio/test/audiovideo.mp4 | Bin 0 -> 139713 bytes dom/media/webaudio/test/blink/README | 9 + dom/media/webaudio/test/blink/audio-testing.js | 192 + dom/media/webaudio/test/blink/biquad-filters.js | 368 + dom/media/webaudio/test/blink/biquad-testing.js | 153 + .../webaudio/test/blink/convolution-testing.js | 182 + dom/media/webaudio/test/blink/mochitest.toml | 36 + .../webaudio/test/blink/panner-model-testing.js | 210 + .../test/blink/test_biquadFilterNodeAllPass.html | 32 + .../blink/test_biquadFilterNodeAutomation.html | 351 + .../test/blink/test_biquadFilterNodeBandPass.html | 34 + .../test_biquadFilterNodeGetFrequencyResponse.html | 261 + .../test/blink/test_biquadFilterNodeHighPass.html | 33 + .../test/blink/test_biquadFilterNodeHighShelf.html | 33 + .../test/blink/test_biquadFilterNodeLowPass.html | 34 + .../test/blink/test_biquadFilterNodeLowShelf.html | 34 + .../test/blink/test_biquadFilterNodeNotch.html | 33 + .../test/blink/test_biquadFilterNodePeaking.html | 34 + .../test/blink/test_biquadFilterNodeTail.html | 76 + .../webaudio/test/blink/test_iirFilterNode.html | 467 + .../test_iirFilterNodeGetFrequencyResponse.html | 97 + dom/media/webaudio/test/corsServer.sjs | 26 + .../test/file_nodeCreationDocumentGone.html | 4 + dom/media/webaudio/test/generate-test-files.py | 52 + .../test/half-a-second-1ch-44100-aac-afconvert.mp4 | Bin 0 -> 6560 bytes .../webaudio/test/half-a-second-1ch-44100-aac.aac | Bin 0 -> 4826 bytes .../webaudio/test/half-a-second-1ch-44100-aac.mp4 | Bin 0 -> 5584 bytes .../webaudio/test/half-a-second-1ch-44100-alaw.wav | Bin 0 -> 22142 bytes .../test/half-a-second-1ch-44100-flac.flac | Bin 0 -> 17320 bytes .../test/half-a-second-1ch-44100-libmp3lame.mp3 | Bin 0 -> 4615 bytes .../test/half-a-second-1ch-44100-libopus.mp4 | Bin 0 -> 7171 bytes .../test/half-a-second-1ch-44100-libopus.opus | Bin 0 -> 6469 bytes .../test/half-a-second-1ch-44100-libopus.webm | Bin 0 -> 6991 bytes .../test/half-a-second-1ch-44100-libvorbis.ogg | Bin 0 -> 4320 bytes .../test/half-a-second-1ch-44100-libvorbis.webm | Bin 0 -> 4878 bytes .../test/half-a-second-1ch-44100-mulaw.wav | Bin 0 -> 22142 bytes .../webaudio/test/half-a-second-1ch-44100.wav | Bin 0 -> 44144 bytes .../webaudio/test/half-a-second-1ch-48000-aac.aac | Bin 0 -> 4840 bytes .../webaudio/test/half-a-second-1ch-48000-aac.mp4 | Bin 0 -> 5592 bytes .../test/half-a-second-1ch-48000-flac.flac | Bin 0 -> 18577 bytes .../test/half-a-second-1ch-48000-libmp3lame.mp3 | Bin 0 -> 4461 bytes .../test/half-a-second-1ch-48000-libopus.mp4 | Bin 0 -> 6738 bytes .../test/half-a-second-1ch-48000-libopus.opus | Bin 0 -> 6031 bytes .../test/half-a-second-1ch-48000-libopus.webm | Bin 0 -> 6558 bytes .../test/half-a-second-1ch-48000-libvorbis.ogg | Bin 0 -> 4559 bytes .../test/half-a-second-1ch-48000-libvorbis.webm | Bin 0 -> 5142 bytes .../webaudio/test/half-a-second-1ch-48000.wav | Bin 0 -> 48044 bytes .../webaudio/test/half-a-second-2ch-44100-aac.aac | Bin 0 -> 8755 bytes .../webaudio/test/half-a-second-2ch-44100-aac.mp4 | Bin 0 -> 9513 bytes .../test/half-a-second-2ch-44100-flac.flac | Bin 0 -> 23279 bytes .../test/half-a-second-2ch-44100-libmp3lame.mp3 | Bin 0 -> 9030 bytes .../test/half-a-second-2ch-44100-libopus.mp4 | Bin 0 -> 11593 bytes .../test/half-a-second-2ch-44100-libopus.opus | Bin 0 -> 10905 bytes .../test/half-a-second-2ch-44100-libopus.webm | Bin 0 -> 11413 bytes .../test/half-a-second-2ch-44100-libvorbis.ogg | Bin 0 -> 5478 bytes .../test/half-a-second-2ch-44100-libvorbis.webm | Bin 0 -> 6033 bytes .../webaudio/test/half-a-second-2ch-44100.wav | Bin 0 -> 88244 bytes .../webaudio/test/half-a-second-2ch-48000-aac.aac | Bin 0 -> 8727 bytes .../webaudio/test/half-a-second-2ch-48000-aac.mp4 | Bin 0 -> 9479 bytes .../test/half-a-second-2ch-48000-flac.flac | Bin 0 -> 24984 bytes .../test/half-a-second-2ch-48000-libmp3lame.mp3 | Bin 0 -> 8685 bytes .../test/half-a-second-2ch-48000-libopus.mp4 | Bin 0 -> 12247 bytes .../test/half-a-second-2ch-48000-libopus.opus | Bin 0 -> 11559 bytes .../test/half-a-second-2ch-48000-libopus.webm | Bin 0 -> 12067 bytes .../test/half-a-second-2ch-48000-libvorbis.ogg | Bin 0 -> 5784 bytes .../test/half-a-second-2ch-48000-libvorbis.webm | Bin 0 -> 6364 bytes .../webaudio/test/half-a-second-2ch-48000.wav | Bin 0 -> 96044 bytes dom/media/webaudio/test/invalid.txt | 1 + dom/media/webaudio/test/invalidContent.flac | 1 + dom/media/webaudio/test/layouttest-glue.js | 18 + dom/media/webaudio/test/mochitest.toml | 332 + dom/media/webaudio/test/mochitest_audio.toml | 100 + dom/media/webaudio/test/mochitest_bugs.toml | 89 + dom/media/webaudio/test/mochitest_media.toml | 76 + dom/media/webaudio/test/nil-packet.ogg | Bin 0 -> 9760 bytes dom/media/webaudio/test/noaudio.webm | Bin 0 -> 105755 bytes dom/media/webaudio/test/sine-440-10s.opus | Bin 0 -> 94428 bytes dom/media/webaudio/test/sixteen-frames.mp3 | Bin 0 -> 625 bytes dom/media/webaudio/test/small-shot-expected.wav | Bin 0 -> 53036 bytes .../webaudio/test/small-shot-mono-expected.wav | Bin 0 -> 26540 bytes dom/media/webaudio/test/small-shot.mp3 | Bin 0 -> 6825 bytes dom/media/webaudio/test/small-shot.ogg | Bin 0 -> 6416 bytes dom/media/webaudio/test/sweep-300-330-1sec.opus | Bin 0 -> 8889 bytes dom/media/webaudio/test/test_AudioBuffer.html | 104 + dom/media/webaudio/test/test_AudioContext.html | 23 + .../webaudio/test/test_AudioContext_disabled.html | 56 + dom/media/webaudio/test/test_AudioListener.html | 26 + .../webaudio/test/test_AudioNodeDevtoolsAPI.html | 59 + .../webaudio/test/test_AudioParamDevtoolsAPI.html | 49 + .../webaudio/test/test_OfflineAudioContext.html | 118 + .../test/test_ScriptProcessorCollected1.html | 77 + .../test/test_WebAudioMemoryReporting.html | 54 + dom/media/webaudio/test/test_analyserNode.html | 178 + .../webaudio/test/test_analyserNodeMinimum.html | 51 + .../webaudio/test/test_analyserNodeOutput.html | 43 + .../test/test_analyserNodePassThrough.html | 47 + .../webaudio/test/test_analyserNodeWithGain.html | 47 + dom/media/webaudio/test/test_analyserScale.html | 58 + .../webaudio/test/test_audioBufferSourceNode.html | 44 + .../test/test_audioBufferSourceNodeDetached.html | 58 + .../test/test_audioBufferSourceNodeEnded.html | 36 + .../test_audioBufferSourceNodeLazyLoopParam.html | 47 + .../test/test_audioBufferSourceNodeLoop.html | 45 + .../test_audioBufferSourceNodeLoopStartEnd.html | 48 + ...test_audioBufferSourceNodeLoopStartEndSame.html | 44 + .../test/test_audioBufferSourceNodeNoStart.html | 33 + .../test/test_audioBufferSourceNodeNullBuffer.html | 31 + .../test/test_audioBufferSourceNodeOffset.html | 55 + .../test_audioBufferSourceNodePassThrough.html | 45 + .../test/test_audioBufferSourceNodeRate.html | 58 + dom/media/webaudio/test/test_audioContextGC.html | 162 + ...ioContextParams_recordNonDefaultSampleRate.html | 48 + .../test/test_audioContextParams_sampleRate.html | 82 + .../test/test_audioContextSuspendResumeClose.html | 419 + .../webaudio/test/test_audioDestinationNode.html | 26 + .../webaudio/test/test_audioParamChaining.html | 77 + .../test/test_audioParamExponentialRamp.html | 58 + dom/media/webaudio/test/test_audioParamGain.html | 61 + .../webaudio/test/test_audioParamLinearRamp.html | 54 + .../test/test_audioParamSetCurveAtTime.html | 54 + .../test/test_audioParamSetTargetAtTime.html | 54 + ..._audioParamSetTargetAtTimeZeroTimeConstant.html | 57 + .../test/test_audioParamSetValueAtTime.html | 52 + .../test_audioParamTimelineDestinationOffset.html | 45 + dom/media/webaudio/test/test_badConnect.html | 52 + dom/media/webaudio/test/test_biquadFilterNode.html | 86 + .../test/test_biquadFilterNodePassThrough.html | 47 + .../test/test_biquadFilterNodeWithGain.html | 61 + dom/media/webaudio/test/test_bug1027864.html | 74 + dom/media/webaudio/test/test_bug1056032.html | 35 + dom/media/webaudio/test/test_bug1113634.html | 58 + dom/media/webaudio/test/test_bug1118372.html | 46 + dom/media/webaudio/test/test_bug1255618.html | 41 + dom/media/webaudio/test/test_bug1267579.html | 46 + dom/media/webaudio/test/test_bug1355798.html | 30 + dom/media/webaudio/test/test_bug1447273.html | 175 + dom/media/webaudio/test/test_bug808374.html | 22 + dom/media/webaudio/test/test_bug827541.html | 24 + dom/media/webaudio/test/test_bug839753.html | 18 + dom/media/webaudio/test/test_bug845960.html | 18 + dom/media/webaudio/test/test_bug856771.html | 26 + dom/media/webaudio/test/test_bug866570.html | 18 + dom/media/webaudio/test/test_bug866737.html | 36 + dom/media/webaudio/test/test_bug867089.html | 43 + dom/media/webaudio/test/test_bug867174.html | 38 + dom/media/webaudio/test/test_bug873335.html | 22 + dom/media/webaudio/test/test_bug875221.html | 243 + dom/media/webaudio/test/test_bug875402.html | 50 + dom/media/webaudio/test/test_bug894150.html | 21 + dom/media/webaudio/test/test_bug956489.html | 56 + dom/media/webaudio/test/test_bug964376.html | 64 + dom/media/webaudio/test/test_bug966247.html | 46 + dom/media/webaudio/test/test_bug972678.html | 62 + .../webaudio/test/test_channelMergerNode.html | 57 + .../test/test_channelMergerNodeWithVolume.html | 60 + .../webaudio/test/test_channelSplitterNode.html | 71 + .../test/test_channelSplitterNodeWithVolume.html | 76 + ...test_convolver-upmixing-1-channel-response.html | 143 + dom/media/webaudio/test/test_convolverNode.html | 31 + .../test/test_convolverNodeChannelCount.html | 61 + ..._convolverNodeChannelInterpretationChanges.html | 169 + .../webaudio/test/test_convolverNodeDelay.html | 72 + .../test/test_convolverNodeFiniteInfluence.html | 44 + .../test/test_convolverNodeNormalization.html | 83 + dom/media/webaudio/test/test_convolverNodeOOM.html | 46 + .../test/test_convolverNodePassThrough.html | 48 + .../webaudio/test/test_convolverNodeWithGain.html | 62 + .../test/test_convolverNode_mono_mono.html | 72 + dom/media/webaudio/test/test_currentTime.html | 27 + .../test/test_decodeAudioDataOnDetachedBuffer.html | 50 + .../webaudio/test/test_decodeAudioDataPromise.html | 62 + dom/media/webaudio/test/test_decodeAudioError.html | 74 + .../webaudio/test/test_decodeMultichannel.html | 75 + dom/media/webaudio/test/test_decodeOpusTail.html | 28 + dom/media/webaudio/test/test_decoderDelay.html | 154 + dom/media/webaudio/test/test_delayNode.html | 101 + dom/media/webaudio/test/test_delayNodeAtMax.html | 53 + .../test/test_delayNodeChannelChanges.html | 98 + dom/media/webaudio/test/test_delayNodeCycles.html | 156 + .../webaudio/test/test_delayNodePassThrough.html | 53 + .../webaudio/test/test_delayNodeSmallMaxDelay.html | 43 + .../webaudio/test/test_delayNodeTailIncrease.html | 71 + .../test/test_delayNodeTailWithDisconnect.html | 95 + .../webaudio/test/test_delayNodeTailWithGain.html | 72 + .../test/test_delayNodeTailWithReconnect.html | 136 + .../webaudio/test/test_delayNodeWithGain.html | 54 + .../test/test_delaynode-channel-count-1.html | 104 + dom/media/webaudio/test/test_disconnectAll.html | 51 + .../webaudio/test/test_disconnectAudioParam.html | 58 + .../test/test_disconnectAudioParamFromOutput.html | 67 + .../webaudio/test/test_disconnectExceptions.html | 75 + .../test/test_disconnectFromAudioNode.html | 55 + .../test_disconnectFromAudioNodeAndOutput.html | 59 + ...t_disconnectFromAudioNodeAndOutputAndInput.html | 57 + ..._disconnectFromAudioNodeMultipleConnection.html | 56 + .../webaudio/test/test_disconnectFromOutput.html | 54 + .../webaudio/test/test_dynamicsCompressorNode.html | 68 + .../test_dynamicsCompressorNodePassThrough.html | 47 + .../test/test_dynamicsCompressorNodeWithGain.html | 51 + .../webaudio/test/test_event_listener_leaks.html | 47 + dom/media/webaudio/test/test_gainNode.html | 72 + dom/media/webaudio/test/test_gainNodeInLoop.html | 48 + .../webaudio/test/test_gainNodePassThrough.html | 49 + .../test/test_iirFilterNodePassThrough.html | 47 + dom/media/webaudio/test/test_maxChannelCount.html | 38 + dom/media/webaudio/test/test_mediaDecoding.html | 436 + .../test/test_mediaElementAudioSourceNode.html | 74 + ...est_mediaElementAudioSourceNodeCrossOrigin.html | 94 + .../test_mediaElementAudioSourceNodeFidelity.html | 137 + ...est_mediaElementAudioSourceNodePassThrough.html | 66 + .../test_mediaElementAudioSourceNodeVideo.html | 70 + .../test/test_mediaStreamAudioDestinationNode.html | 50 + .../test/test_mediaStreamAudioSourceNode.html | 50 + ...test_mediaStreamAudioSourceNodeCrossOrigin.html | 59 + .../test/test_mediaStreamAudioSourceNodeNoGC.html | 116 + ...test_mediaStreamAudioSourceNodePassThrough.html | 55 + .../test_mediaStreamAudioSourceNodeResampling.html | 74 + .../test/test_mediaStreamTrackAudioSourceNode.html | 54 + ...mediaStreamTrackAudioSourceNodeCrossOrigin.html | 54 + .../test_mediaStreamTrackAudioSourceNodeVideo.html | 27 + dom/media/webaudio/test/test_mixingRules.html | 402 + .../test/test_nodeCreationDocumentGone.html | 34 + .../webaudio/test/test_nodeToParamConnection.html | 58 + .../test/test_notAllowedToStartAudioContextGC.html | 57 + .../test_offlineDestinationChannelCountLess.html | 42 + .../test_offlineDestinationChannelCountMore.html | 46 + dom/media/webaudio/test/test_oscillatorNode.html | 60 + dom/media/webaudio/test/test_oscillatorNode2.html | 53 + .../test/test_oscillatorNodeNegativeFrequency.html | 50 + .../test/test_oscillatorNodePassThrough.html | 43 + .../webaudio/test/test_oscillatorNodeStart.html | 38 + .../webaudio/test/test_oscillatorTypeChange.html | 58 + dom/media/webaudio/test/test_pannerNode.html | 71 + dom/media/webaudio/test/test_pannerNodeAbove.html | 50 + .../test/test_pannerNodeAtZeroDistance.html | 140 + .../webaudio/test/test_pannerNodeChannelCount.html | 52 + .../webaudio/test/test_pannerNodeHRTFSymmetry.html | 76 + .../webaudio/test/test_pannerNodePassThrough.html | 53 + dom/media/webaudio/test/test_pannerNodeTail.html | 200 + .../test/test_pannerNode_audioparam_distance.html | 43 + .../webaudio/test/test_pannerNode_equalPower.html | 26 + .../webaudio/test/test_pannerNode_maxDistance.html | 61 + dom/media/webaudio/test/test_periodicWave.html | 130 + .../test/test_periodicWaveBandLimiting.html | 86 + .../test_periodicWaveDisableNormalization.html | 98 + ...retrospective-exponentialRampToValueAtTime.html | 51 + ...test_retrospective-linearRampToValueAtTime.html | 51 + .../test/test_retrospective-setTargetAtTime.html | 51 + .../test/test_retrospective-setValueAtTime.html | 54 + .../test_retrospective-setValueCurveAtTime.html | 49 + .../webaudio/test/test_scriptProcessorNode.html | 132 + .../test/test_scriptProcessorNodeChannelCount.html | 80 + .../test/test_scriptProcessorNodeNotConnected.html | 34 + .../test/test_scriptProcessorNodePassThrough.html | 103 + .../test_scriptProcessorNodeZeroInputOutput.html | 39 + .../test_scriptProcessorNode_playbackTime1.html | 52 + .../test_sequentialBufferSourceWithResampling.html | 72 + .../test_setValueCurveWithNonFiniteElements.html | 60 + dom/media/webaudio/test/test_singleSourceDest.html | 70 + dom/media/webaudio/test/test_slowStart.html | 48 + dom/media/webaudio/test/test_stereoPannerNode.html | 295 + .../test/test_stereoPannerNodePassThrough.html | 47 + .../webaudio/test/test_stereoPanningWithGain.html | 49 + dom/media/webaudio/test/test_waveDecoder.html | 69 + dom/media/webaudio/test/test_waveShaper.html | 60 + dom/media/webaudio/test/test_waveShaperGain.html | 73 + .../test/test_waveShaperInvalidLengthCurve.html | 66 + .../webaudio/test/test_waveShaperNoCurve.html | 43 + .../webaudio/test/test_waveShaperPassThrough.html | 55 + dom/media/webaudio/test/test_webAudio_muteTab.html | 93 + dom/media/webaudio/test/ting-44.1k-1ch.ogg | Bin 0 -> 8566 bytes dom/media/webaudio/test/ting-44.1k-1ch.wav | Bin 0 -> 61228 bytes dom/media/webaudio/test/ting-44.1k-2ch.ogg | Bin 0 -> 10422 bytes dom/media/webaudio/test/ting-44.1k-2ch.wav | Bin 0 -> 122412 bytes dom/media/webaudio/test/ting-48k-1ch.ogg | Bin 0 -> 8680 bytes dom/media/webaudio/test/ting-48k-1ch.wav | Bin 0 -> 66638 bytes dom/media/webaudio/test/ting-48k-2ch.ogg | Bin 0 -> 10701 bytes dom/media/webaudio/test/ting-48k-2ch.wav | Bin 0 -> 133232 bytes dom/media/webaudio/test/ting-dualchannel44.1.wav | Bin 0 -> 122412 bytes dom/media/webaudio/test/ting-dualchannel48.wav | Bin 0 -> 122412 bytes dom/media/webaudio/test/waveformatextensible.wav | Bin 0 -> 1024 bytes .../webaudio/test/waveformatextensiblebadmask.wav | Bin 0 -> 7136 bytes dom/media/webaudio/test/webaudio.js | 367 + dom/media/webcodecs/DecoderAgent.cpp | 491 + dom/media/webcodecs/DecoderAgent.h | 117 + dom/media/webcodecs/DecoderTemplate.cpp | 891 + dom/media/webcodecs/DecoderTemplate.h | 260 + dom/media/webcodecs/DecoderTypes.h | 117 + dom/media/webcodecs/EncodedVideoChunk.cpp | 261 + dom/media/webcodecs/EncodedVideoChunk.h | 119 + dom/media/webcodecs/EncoderAgent.cpp | 441 + dom/media/webcodecs/EncoderAgent.h | 116 + dom/media/webcodecs/EncoderTemplate.cpp | 1228 + dom/media/webcodecs/EncoderTemplate.h | 290 + dom/media/webcodecs/EncoderTypes.h | 103 + dom/media/webcodecs/VideoColorSpace.cpp | 48 + dom/media/webcodecs/VideoColorSpace.h | 64 + dom/media/webcodecs/VideoDecoder.cpp | 977 + dom/media/webcodecs/VideoDecoder.h | 79 + dom/media/webcodecs/VideoEncoder.cpp | 624 + dom/media/webcodecs/VideoEncoder.h | 78 + dom/media/webcodecs/VideoFrame.cpp | 2417 ++ dom/media/webcodecs/VideoFrame.h | 266 + dom/media/webcodecs/WebCodecsUtils.cpp | 578 + dom/media/webcodecs/WebCodecsUtils.h | 239 + dom/media/webcodecs/crashtests/1839270.html | 13 + dom/media/webcodecs/crashtests/1848460.html | 17 + dom/media/webcodecs/crashtests/1849271.html | 27 + dom/media/webcodecs/crashtests/1864475.html | 14 + dom/media/webcodecs/crashtests/crashtests.list | 4 + dom/media/webcodecs/moz.build | 54 + dom/media/webcodecs/test/mochitest.toml | 6 + .../test/test_videoFrame_mismatched_codedSize.html | 30 + dom/media/webm/EbmlComposer.cpp | 185 + dom/media/webm/EbmlComposer.h | 81 + dom/media/webm/NesteggPacketHolder.h | 135 + dom/media/webm/WebMBufferedParser.cpp | 676 + dom/media/webm/WebMBufferedParser.h | 309 + dom/media/webm/WebMDecoder.cpp | 125 + dom/media/webm/WebMDecoder.h | 35 + dom/media/webm/WebMDemuxer.cpp | 1361 ++ dom/media/webm/WebMDemuxer.h | 293 + dom/media/webm/WebMWriter.cpp | 111 + dom/media/webm/WebMWriter.h | 69 + dom/media/webm/moz.build | 28 + dom/media/webrtc/CubebDeviceEnumerator.cpp | 338 + dom/media/webrtc/CubebDeviceEnumerator.h | 87 + dom/media/webrtc/MediaEngine.h | 66 + dom/media/webrtc/MediaEngineFake.cpp | 653 + dom/media/webrtc/MediaEngineFake.h | 40 + dom/media/webrtc/MediaEnginePrefs.h | 99 + dom/media/webrtc/MediaEngineRemoteVideoSource.cpp | 907 + dom/media/webrtc/MediaEngineRemoteVideoSource.h | 241 + dom/media/webrtc/MediaEngineSource.cpp | 69 + dom/media/webrtc/MediaEngineSource.h | 255 + dom/media/webrtc/MediaEngineWebRTC.cpp | 303 + dom/media/webrtc/MediaEngineWebRTC.h | 53 + dom/media/webrtc/MediaEngineWebRTCAudio.cpp | 1219 + dom/media/webrtc/MediaEngineWebRTCAudio.h | 301 + dom/media/webrtc/MediaTrackConstraints.cpp | 560 + dom/media/webrtc/MediaTrackConstraints.h | 371 + dom/media/webrtc/MediaTransportChild.h | 46 + dom/media/webrtc/MediaTransportParent.h | 70 + dom/media/webrtc/PMediaTransport.ipdl | 101 + dom/media/webrtc/PWebrtcGlobal.ipdl | 40 + dom/media/webrtc/PeerIdentity.cpp | 80 + dom/media/webrtc/PeerIdentity.h | 70 + dom/media/webrtc/RTCCertificate.cpp | 438 + dom/media/webrtc/RTCCertificate.h | 98 + dom/media/webrtc/RTCIdentityProviderRegistrar.cpp | 70 + dom/media/webrtc/RTCIdentityProviderRegistrar.h | 59 + dom/media/webrtc/SineWaveGenerator.h | 58 + dom/media/webrtc/WebrtcGlobal.h | 238 + dom/media/webrtc/WebrtcIPCTraits.h | 89 + dom/media/webrtc/common/CandidateInfo.h | 27 + dom/media/webrtc/common/CommonTypes.h | 52 + dom/media/webrtc/common/EncodingConstraints.h | 57 + dom/media/webrtc/common/NullDeleter.h | 14 + dom/media/webrtc/common/NullTransport.h | 52 + dom/media/webrtc/common/Wrapper.h | 157 + dom/media/webrtc/common/YuvStamper.cpp | 393 + dom/media/webrtc/common/YuvStamper.h | 77 + dom/media/webrtc/common/browser_logging/CSFLog.cpp | 83 + dom/media/webrtc/common/browser_logging/CSFLog.h | 58 + .../webrtc/common/browser_logging/WebRtcLog.cpp | 190 + .../webrtc/common/browser_logging/WebRtcLog.h | 23 + dom/media/webrtc/common/csf_common.h | 85 + dom/media/webrtc/common/moz.build | 23 + dom/media/webrtc/common/time_profiling/timecard.c | 112 + dom/media/webrtc/common/time_profiling/timecard.h | 73 + dom/media/webrtc/jsapi/MediaTransportHandler.cpp | 1727 ++ dom/media/webrtc/jsapi/MediaTransportHandler.h | 167 + .../webrtc/jsapi/MediaTransportHandlerIPC.cpp | 471 + dom/media/webrtc/jsapi/MediaTransportHandlerIPC.h | 96 + dom/media/webrtc/jsapi/MediaTransportParent.cpp | 238 + dom/media/webrtc/jsapi/PacketDumper.cpp | 160 + dom/media/webrtc/jsapi/PacketDumper.h | 55 + dom/media/webrtc/jsapi/PeerConnectionCtx.cpp | 650 + dom/media/webrtc/jsapi/PeerConnectionCtx.h | 192 + dom/media/webrtc/jsapi/PeerConnectionImpl.cpp | 4653 ++++ dom/media/webrtc/jsapi/PeerConnectionImpl.h | 980 + dom/media/webrtc/jsapi/RTCDTMFSender.cpp | 159 + dom/media/webrtc/jsapi/RTCDTMFSender.h | 78 + dom/media/webrtc/jsapi/RTCDtlsTransport.cpp | 70 + dom/media/webrtc/jsapi/RTCDtlsTransport.h | 44 + dom/media/webrtc/jsapi/RTCEncodedAudioFrame.cpp | 96 + dom/media/webrtc/jsapi/RTCEncodedAudioFrame.h | 52 + dom/media/webrtc/jsapi/RTCEncodedFrameBase.cpp | 73 + dom/media/webrtc/jsapi/RTCEncodedFrameBase.h | 58 + dom/media/webrtc/jsapi/RTCEncodedVideoFrame.cpp | 116 + dom/media/webrtc/jsapi/RTCEncodedVideoFrame.h | 61 + dom/media/webrtc/jsapi/RTCRtpReceiver.cpp | 1046 + dom/media/webrtc/jsapi/RTCRtpReceiver.h | 215 + dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp | 84 + dom/media/webrtc/jsapi/RTCRtpScriptTransform.h | 63 + dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp | 449 + dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h | 197 + dom/media/webrtc/jsapi/RTCRtpSender.cpp | 1811 ++ dom/media/webrtc/jsapi/RTCRtpSender.h | 276 + dom/media/webrtc/jsapi/RTCRtpTransceiver.cpp | 1167 + dom/media/webrtc/jsapi/RTCRtpTransceiver.h | 255 + dom/media/webrtc/jsapi/RTCSctpTransport.cpp | 53 + dom/media/webrtc/jsapi/RTCSctpTransport.h | 65 + dom/media/webrtc/jsapi/RTCStatsIdGenerator.cpp | 88 + dom/media/webrtc/jsapi/RTCStatsIdGenerator.h | 42 + dom/media/webrtc/jsapi/RTCStatsReport.cpp | 213 + dom/media/webrtc/jsapi/RTCStatsReport.h | 205 + .../webrtc/jsapi/RTCTransformEventRunnable.cpp | 80 + dom/media/webrtc/jsapi/RTCTransformEventRunnable.h | 38 + dom/media/webrtc/jsapi/RemoteTrackSource.cpp | 73 + dom/media/webrtc/jsapi/RemoteTrackSource.h | 64 + dom/media/webrtc/jsapi/WebrtcGlobalChild.h | 41 + dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp | 803 + dom/media/webrtc/jsapi/WebrtcGlobalInformation.h | 101 + dom/media/webrtc/jsapi/WebrtcGlobalParent.h | 51 + .../webrtc/jsapi/WebrtcGlobalStatsHistory.cpp | 280 + dom/media/webrtc/jsapi/WebrtcGlobalStatsHistory.h | 85 + dom/media/webrtc/jsapi/moz.build | 62 + dom/media/webrtc/jsep/JsepCodecDescription.h | 1179 + dom/media/webrtc/jsep/JsepSession.h | 287 + dom/media/webrtc/jsep/JsepSessionImpl.cpp | 2473 ++ dom/media/webrtc/jsep/JsepSessionImpl.h | 284 + dom/media/webrtc/jsep/JsepTrack.cpp | 706 + dom/media/webrtc/jsep/JsepTrack.h | 323 + dom/media/webrtc/jsep/JsepTrackEncoding.h | 62 + dom/media/webrtc/jsep/JsepTransceiver.h | 220 + dom/media/webrtc/jsep/JsepTransport.h | 102 + dom/media/webrtc/jsep/SsrcGenerator.cpp | 22 + dom/media/webrtc/jsep/SsrcGenerator.h | 20 + dom/media/webrtc/jsep/moz.build | 18 + dom/media/webrtc/libwebrtcglue/AudioConduit.cpp | 1050 + dom/media/webrtc/libwebrtcglue/AudioConduit.h | 299 + dom/media/webrtc/libwebrtcglue/CallWorkerThread.h | 116 + dom/media/webrtc/libwebrtcglue/CodecConfig.h | 237 + .../webrtc/libwebrtcglue/FrameTransformer.cpp | 87 + dom/media/webrtc/libwebrtcglue/FrameTransformer.h | 79 + .../webrtc/libwebrtcglue/FrameTransformerProxy.cpp | 258 + .../webrtc/libwebrtcglue/FrameTransformerProxy.h | 124 + dom/media/webrtc/libwebrtcglue/GmpVideoCodec.cpp | 22 + dom/media/webrtc/libwebrtcglue/GmpVideoCodec.h | 27 + .../webrtc/libwebrtcglue/MediaConduitControl.h | 79 + .../webrtc/libwebrtcglue/MediaConduitErrors.h | 46 + .../webrtc/libwebrtcglue/MediaConduitInterface.cpp | 152 + .../webrtc/libwebrtcglue/MediaConduitInterface.h | 499 + dom/media/webrtc/libwebrtcglue/MediaDataCodec.cpp | 70 + dom/media/webrtc/libwebrtcglue/MediaDataCodec.h | 32 + dom/media/webrtc/libwebrtcglue/RtpRtcpConfig.h | 24 + dom/media/webrtc/libwebrtcglue/RunningStat.h | 48 + dom/media/webrtc/libwebrtcglue/SystemTime.cpp | 60 + dom/media/webrtc/libwebrtcglue/SystemTime.h | 44 + dom/media/webrtc/libwebrtcglue/TaskQueueWrapper.h | 181 + dom/media/webrtc/libwebrtcglue/VideoConduit.cpp | 2083 ++ dom/media/webrtc/libwebrtcglue/VideoConduit.h | 496 + .../webrtc/libwebrtcglue/VideoStreamFactory.cpp | 399 + .../webrtc/libwebrtcglue/VideoStreamFactory.h | 132 + .../webrtc/libwebrtcglue/WebrtcCallWrapper.cpp | 105 + dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.h | 114 + .../webrtc/libwebrtcglue/WebrtcGmpVideoCodec.cpp | 1043 + .../webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h | 507 + dom/media/webrtc/libwebrtcglue/WebrtcImageBuffer.h | 53 + .../libwebrtcglue/WebrtcMediaDataDecoderCodec.cpp | 209 + .../libwebrtcglue/WebrtcMediaDataDecoderCodec.h | 70 + .../libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp | 535 + .../libwebrtcglue/WebrtcMediaDataEncoderCodec.h | 78 + .../libwebrtcglue/WebrtcVideoCodecFactory.cpp | 139 + .../webrtc/libwebrtcglue/WebrtcVideoCodecFactory.h | 124 + dom/media/webrtc/libwebrtcglue/moz.build | 37 + dom/media/webrtc/metrics.yaml | 366 + dom/media/webrtc/moz.build | 132 + dom/media/webrtc/sdp/HybridSdpParser.cpp | 88 + dom/media/webrtc/sdp/HybridSdpParser.h | 38 + dom/media/webrtc/sdp/ParsingResultComparer.cpp | 331 + dom/media/webrtc/sdp/ParsingResultComparer.h | 57 + dom/media/webrtc/sdp/RsdparsaSdp.cpp | 125 + dom/media/webrtc/sdp/RsdparsaSdp.h | 72 + dom/media/webrtc/sdp/RsdparsaSdpAttributeList.cpp | 1301 ++ dom/media/webrtc/sdp/RsdparsaSdpAttributeList.h | 157 + dom/media/webrtc/sdp/RsdparsaSdpGlue.cpp | 106 + dom/media/webrtc/sdp/RsdparsaSdpGlue.h | 36 + dom/media/webrtc/sdp/RsdparsaSdpInc.h | 510 + dom/media/webrtc/sdp/RsdparsaSdpMediaSection.cpp | 253 + dom/media/webrtc/sdp/RsdparsaSdpMediaSection.h | 71 + dom/media/webrtc/sdp/RsdparsaSdpParser.cpp | 73 + dom/media/webrtc/sdp/RsdparsaSdpParser.h | 34 + dom/media/webrtc/sdp/Sdp.h | 166 + dom/media/webrtc/sdp/SdpAttribute.cpp | 1562 ++ dom/media/webrtc/sdp/SdpAttribute.h | 1892 ++ dom/media/webrtc/sdp/SdpAttributeList.h | 90 + dom/media/webrtc/sdp/SdpEnum.h | 64 + dom/media/webrtc/sdp/SdpHelper.cpp | 800 + dom/media/webrtc/sdp/SdpHelper.h | 109 + dom/media/webrtc/sdp/SdpLog.cpp | 68 + dom/media/webrtc/sdp/SdpLog.h | 17 + dom/media/webrtc/sdp/SdpMediaSection.cpp | 197 + dom/media/webrtc/sdp/SdpMediaSection.h | 317 + dom/media/webrtc/sdp/SdpParser.h | 81 + dom/media/webrtc/sdp/SdpPref.cpp | 107 + dom/media/webrtc/sdp/SdpPref.h | 82 + dom/media/webrtc/sdp/SdpTelemetry.cpp | 63 + dom/media/webrtc/sdp/SdpTelemetry.h | 43 + dom/media/webrtc/sdp/SipccSdp.cpp | 173 + dom/media/webrtc/sdp/SipccSdp.h | 82 + dom/media/webrtc/sdp/SipccSdpAttributeList.cpp | 1386 ++ dom/media/webrtc/sdp/SipccSdpAttributeList.h | 145 + dom/media/webrtc/sdp/SipccSdpMediaSection.cpp | 401 + dom/media/webrtc/sdp/SipccSdpMediaSection.h | 101 + dom/media/webrtc/sdp/SipccSdpParser.cpp | 88 + dom/media/webrtc/sdp/SipccSdpParser.h | 35 + dom/media/webrtc/sdp/moz.build | 48 + dom/media/webrtc/sdp/rsdparsa_capi/Cargo.toml | 12 + .../webrtc/sdp/rsdparsa_capi/src/attribute.rs | 1472 ++ dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs | 298 + .../webrtc/sdp/rsdparsa_capi/src/media_section.rs | 233 + dom/media/webrtc/sdp/rsdparsa_capi/src/network.rs | 266 + dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs | 199 + dom/media/webrtc/tests/crashtests/1770075.html | 8 + dom/media/webrtc/tests/crashtests/1789908.html | 25 + dom/media/webrtc/tests/crashtests/1799168.html | 16 + dom/media/webrtc/tests/crashtests/1816708.html | 21 + dom/media/webrtc/tests/crashtests/1821477.html | 16 + dom/media/webrtc/tests/crashtests/crashtests.list | 8 + .../tests/crashtests/getUserMedia-audio.html | 7 + dom/media/webrtc/tests/fuzztests/moz.build | 22 + .../webrtc/tests/fuzztests/sdp_parser_libfuzz.cpp | 30 + .../mochitests/NetworkPreparationChromeScript.js | 43 + .../tests/mochitests/addTurnsSelfsignedCert.js | 32 + dom/media/webrtc/tests/mochitests/blacksilence.js | 134 + dom/media/webrtc/tests/mochitests/dataChannel.js | 352 + dom/media/webrtc/tests/mochitests/head.js | 1435 ++ .../tests/mochitests/helpers_from_wpt/sdp.js | 889 + dom/media/webrtc/tests/mochitests/iceTestUtils.js | 302 + .../tests/mochitests/identity/identityPcTest.js | 86 + .../webrtc/tests/mochitests/identity/idp-bad.js | 1 + .../webrtc/tests/mochitests/identity/idp-min.js | 24 + .../mochitests/identity/idp-redirect-http-trick.js | 3 + .../identity/idp-redirect-http-trick.js^headers^ | 2 + .../tests/mochitests/identity/idp-redirect-http.js | 3 + .../identity/idp-redirect-http.js^headers^ | 2 + .../identity/idp-redirect-https-double.js | 3 + .../identity/idp-redirect-https-double.js^headers^ | 2 + .../identity/idp-redirect-https-odd-path.js | 3 + .../idp-redirect-https-odd-path.js^headers^ | 2 + .../mochitests/identity/idp-redirect-https.js | 3 + .../identity/idp-redirect-https.js^headers^ | 2 + dom/media/webrtc/tests/mochitests/identity/idp.js | 119 + dom/media/webrtc/tests/mochitests/identity/idp.sjs | 18 + .../webrtc/tests/mochitests/identity/login.html | 31 + .../tests/mochitests/identity/mochitest.toml | 57 + .../mochitests/identity/test_fingerprints.html | 91 + .../identity/test_getIdentityAssertion.html | 101 + .../tests/mochitests/identity/test_idpproxy.html | 178 + .../mochitests/identity/test_loginNeeded.html | 72 + .../test_peerConnection_asymmetricIsolation.html | 31 + .../identity/test_peerConnection_peerIdentity.html | 21 + .../identity/test_setIdentityProvider.html | 67 + .../test_setIdentityProviderWithErrors.html | 57 + .../webrtc/tests/mochitests/mediaStreamPlayback.js | 242 + dom/media/webrtc/tests/mochitests/mochitest.toml | 85 + .../tests/mochitests/mochitest_datachannel.toml | 65 + .../tests/mochitests/mochitest_getusermedia.toml | 156 + .../tests/mochitests/mochitest_peerconnection.toml | 489 + dom/media/webrtc/tests/mochitests/network.js | 16 + dom/media/webrtc/tests/mochitests/nonTrickleIce.js | 97 + dom/media/webrtc/tests/mochitests/parser_rtp.js | 131 + dom/media/webrtc/tests/mochitests/pc.js | 2496 ++ .../peerconnection_audio_forced_sample_rate.js | 32 + dom/media/webrtc/tests/mochitests/sdpUtils.js | 398 + dom/media/webrtc/tests/mochitests/simulcast.js | 233 + dom/media/webrtc/tests/mochitests/stats.js | 1647 ++ dom/media/webrtc/tests/mochitests/templates.js | 615 + .../webrtc/tests/mochitests/test_1488832.html | 37 + .../webrtc/tests/mochitests/test_1717318.html | 26 + dom/media/webrtc/tests/mochitests/test_a_noOp.html | 32 + .../mochitests/test_dataChannel_basicAudio.html | 25 + .../test_dataChannel_basicAudioVideo.html | 26 + .../test_dataChannel_basicAudioVideoCombined.html | 26 + .../test_dataChannel_basicAudioVideoNoBundle.html | 27 + .../mochitests/test_dataChannel_basicDataOnly.html | 24 + .../mochitests/test_dataChannel_basicVideo.html | 25 + .../mochitests/test_dataChannel_bug1013809.html | 27 + ...test_dataChannel_dataOnlyBufferedAmountLow.html | 25 + .../mochitests/test_dataChannel_dtlsVersions.html | 38 + .../test_dataChannel_hostnameObfuscation.html | 59 + .../tests/mochitests/test_dataChannel_noOffer.html | 33 + .../tests/mochitests/test_dataChannel_stats.html | 50 + .../mochitests/test_defaultAudioConstraints.html | 80 + .../tests/mochitests/test_enumerateDevices.html | 141 + .../test_enumerateDevices_getUserMediaFake.html | 63 + .../mochitests/test_enumerateDevices_iframe.html | 28 + .../test_enumerateDevices_iframe_pre_gum.html | 22 + .../mochitests/test_enumerateDevices_legacy.html | 147 + .../test_enumerateDevices_navigation.html | 54 + .../mochitests/test_fingerprinting_resistance.html | 112 + .../tests/mochitests/test_forceSampleRate.html | 23 + .../test_getUserMedia_GC_MediaStream.html | 55 + .../test_getUserMedia_active_autoplay.html | 61 + .../test_getUserMedia_addTrackRemoveTrack.html | 169 + ...t_getUserMedia_addtrack_removetrack_events.html | 113 + .../mochitests/test_getUserMedia_audioCapture.html | 131 + .../test_getUserMedia_audioConstraints.html | 93 + ...erMedia_audioConstraints_concurrentIframes.html | 150 + ...erMedia_audioConstraints_concurrentStreams.html | 123 + .../mochitests/test_getUserMedia_basicAudio.html | 27 + .../test_getUserMedia_basicAudio_loopback.html | 102 + .../test_getUserMedia_basicScreenshare.html | 261 + .../test_getUserMedia_basicTabshare.html | 67 + .../mochitests/test_getUserMedia_basicVideo.html | 30 + .../test_getUserMedia_basicVideoAudio.html | 30 + ...erMedia_basicVideo_playAfterLoadedmetadata.html | 42 + .../test_getUserMedia_basicWindowshare.html | 39 + .../mochitests/test_getUserMedia_bug1223696.html | 54 + .../mochitests/test_getUserMedia_callbacks.html | 35 + .../mochitests/test_getUserMedia_constraints.html | 166 + .../test_getUserMedia_cubebDisabled.html | 42 + ...test_getUserMedia_cubebDisabledFakeStreams.html | 43 + .../mochitests/test_getUserMedia_getTrackById.html | 50 + .../mochitests/test_getUserMedia_gumWithinGum.html | 38 + .../test_getUserMedia_loadedmetadata.html | 39 + ...est_getUserMedia_mediaElementCapture_audio.html | 123 + ...st_getUserMedia_mediaElementCapture_tracks.html | 179 + ...est_getUserMedia_mediaElementCapture_video.html | 91 + .../test_getUserMedia_mediaStreamClone.html | 260 + .../test_getUserMedia_mediaStreamConstructors.html | 171 + .../test_getUserMedia_mediaStreamTrackClone.html | 170 + .../test_getUserMedia_nonDefaultRate.html | 35 + .../mochitests/test_getUserMedia_peerIdentity.html | 61 + .../mochitests/test_getUserMedia_permission.html | 104 + .../test_getUserMedia_permission_iframe.html | 30 + .../test_getUserMedia_playAudioTwice.html | 25 + .../test_getUserMedia_playVideoAudioTwice.html | 26 + .../test_getUserMedia_playVideoTwice.html | 26 + .../mochitests/test_getUserMedia_scarySources.html | 51 + .../test_getUserMedia_spinEventLoop.html | 28 + .../test_getUserMedia_trackCloneCleanup.html | 32 + .../mochitests/test_getUserMedia_trackEnded.html | 68 + .../webrtc/tests/mochitests/test_groupId.html | 53 + .../webrtc/tests/mochitests/test_multi_mics.html | 61 + .../tests/mochitests/test_ondevicechange.html | 180 + ...nection_addAudioTrackToExistingVideoStream.html | 64 + .../test_peerConnection_addDataChannel.html | 33 + ...test_peerConnection_addDataChannelNoBundle.html | 44 + .../test_peerConnection_addSecondAudioStream.html | 51 + ...eerConnection_addSecondAudioStreamNoBundle.html | 59 + .../test_peerConnection_addSecondVideoStream.html | 53 + ...eerConnection_addSecondVideoStreamNoBundle.html | 60 + ...peerConnection_addtrack_removetrack_events.html | 75 + ...eerConnection_answererAddSecondAudioStream.html | 32 + .../test_peerConnection_audioChannels.html | 102 + .../test_peerConnection_audioCodecs.html | 90 + ...st_peerConnection_audioContributingSources.html | 144 + ...onnection_audioRenegotiationInactiveAnswer.html | 69 + ...peerConnection_audioSynchronizationSources.html | 95 + ..._audioSynchronizationSourcesUnidirectional.html | 54 + .../mochitests/test_peerConnection_basicAudio.html | 25 + ...onnection_basicAudioDynamicPtMissingRtpmap.html | 36 + .../test_peerConnection_basicAudioNATRelay.html | 47 + .../test_peerConnection_basicAudioNATRelayTCP.html | 42 + ...onnection_basicAudioNATRelayTCPWithStun300.html | 54 + .../test_peerConnection_basicAudioNATRelayTLS.html | 41 + ...erConnection_basicAudioNATRelayWithStun300.html | 53 + .../test_peerConnection_basicAudioNATSrflx.html | 44 + ...est_peerConnection_basicAudioNoisyUDPBlock.html | 41 + ...test_peerConnection_basicAudioPcmaPcmuOnly.html | 39 + .../test_peerConnection_basicAudioRelayPolicy.html | 83 + .../test_peerConnection_basicAudioRequireEOC.html | 35 + ...ection_basicAudioVerifyRtpHeaderExtensions.html | 63 + .../test_peerConnection_basicAudioVideo.html | 24 + ...est_peerConnection_basicAudioVideoCombined.html | 24 + ...est_peerConnection_basicAudioVideoNoBundle.html | 25 + ...onnection_basicAudioVideoNoBundleNoRtcpMux.html | 39 + ...st_peerConnection_basicAudioVideoNoRtcpMux.html | 38 + ...peerConnection_basicAudioVideoTransceivers.html | 31 + ...peerConnection_basicAudioVideoVerifyExtmap.html | 97 + ...ection_basicAudioVideoVerifyExtmapSendonly.html | 97 + ...ction_basicAudioVideoVerifyTooLongMidFails.html | 47 + ...erConnection_basicAudio_forced_higher_rate.html | 19 + ...eerConnection_basicAudio_forced_lower_rate.html | 19 + .../test_peerConnection_basicH264Video.html | 26 + .../test_peerConnection_basicScreenshare.html | 64 + .../mochitests/test_peerConnection_basicVideo.html | 23 + ...ection_basicVideoVerifyRtpHeaderExtensions.html | 82 + .../test_peerConnection_basicWindowshare.html | 25 + .../mochitests/test_peerConnection_bug1013809.html | 25 + .../mochitests/test_peerConnection_bug1042791.html | 36 + .../mochitests/test_peerConnection_bug1227781.html | 37 + .../mochitests/test_peerConnection_bug1512281.html | 47 + .../mochitests/test_peerConnection_bug1773067.html | 32 + .../mochitests/test_peerConnection_bug822674.html | 26 + .../mochitests/test_peerConnection_bug825703.html | 140 + .../mochitests/test_peerConnection_bug827843.html | 50 + .../mochitests/test_peerConnection_bug834153.html | 36 + .../mochitests/test_peerConnection_callbacks.html | 86 + ...est_peerConnection_captureStream_canvas_2d.html | 81 + ...rConnection_captureStream_canvas_2d_noSSRC.html | 83 + ..._peerConnection_captureStream_canvas_webgl.html | 130 + .../test_peerConnection_capturedVideo.html | 81 + .../test_peerConnection_certificates.html | 186 + .../test_peerConnection_checkPacketDumpHook.html | 107 + .../mochitests/test_peerConnection_close.html | 134 + .../test_peerConnection_closeDuringIce.html | 79 + ...est_peerConnection_codecNegotiationFailure.html | 111 + .../test_peerConnection_constructedStream.html | 67 + ...peerConnection_disabledVideoPreNegotiation.html | 45 + .../test_peerConnection_encodingsNegotiation.html | 85 + .../test_peerConnection_errorCallbacks.html | 55 + .../test_peerConnection_extmapRenegotiation.html | 325 + ...nection_forwarding_basicAudioVideoCombined.html | 41 + ..._peerConnection_gatherWithSetConfiguration.html | 450 + .../test_peerConnection_gatherWithStun300.html | 269 + .../test_peerConnection_gatherWithStun300IPv6.html | 283 + .../mochitests/test_peerConnection_glean.html | 596 + .../mochitests/test_peerConnection_iceFailure.html | 84 + .../mochitests/test_peerConnection_insertDTMF.html | 75 + .../test_peerConnection_localReofferRollback.html | 44 + .../test_peerConnection_localRollback.html | 47 + .../test_peerConnection_maxFsConstraint.html | 114 + ...onnection_multiple_captureStream_canvas_2d.html | 115 + .../test_peerConnection_noTrickleAnswer.html | 25 + .../test_peerConnection_noTrickleOffer.html | 25 + .../test_peerConnection_noTrickleOfferAnswer.html | 26 + .../test_peerConnection_nonDefaultRate.html | 200 + ...t_peerConnection_offerRequiresReceiveAudio.html | 23 + ...t_peerConnection_offerRequiresReceiveVideo.html | 23 + ...rConnection_offerRequiresReceiveVideoAudio.html | 23 + .../test_peerConnection_portRestrictions.html | 63 + .../test_peerConnection_promiseSendOnly.html | 61 + .../test_peerConnection_recordReceiveTrack.html | 101 + .../mochitests/test_peerConnection_relayOnly.html | 60 + .../test_peerConnection_remoteReofferRollback.html | 50 + .../test_peerConnection_remoteRollback.html | 51 + .../test_peerConnection_removeAudioTrack.html | 63 + ...est_peerConnection_removeThenAddAudioTrack.html | 93 + ...Connection_removeThenAddAudioTrackNoBundle.html | 82 + ...est_peerConnection_removeThenAddVideoTrack.html | 98 + ...Connection_removeThenAddVideoTrackNoBundle.html | 89 + .../test_peerConnection_removeVideoTrack.html | 64 + ...st_peerConnection_renderAfterRenegotiation.html | 89 + ...ction_replaceNullTrackThenRenegotiateAudio.html | 53 + ...ction_replaceNullTrackThenRenegotiateVideo.html | 63 + .../test_peerConnection_replaceTrack.html | 186 + .../test_peerConnection_replaceTrack_camera.html | 48 + .../test_peerConnection_replaceTrack_disabled.html | 60 + ...est_peerConnection_replaceTrack_microphone.html | 46 + ...peerConnection_replaceVideoThenRenegotiate.html | 74 + .../mochitests/test_peerConnection_restartIce.html | 41 + .../test_peerConnection_restartIceBadAnswer.html | 58 + ...onnection_restartIceLocalAndRemoteRollback.html | 82 + ...eLocalAndRemoteRollbackNoSubsequentRestart.html | 77 + ...est_peerConnection_restartIceLocalRollback.html | 76 + ...restartIceLocalRollbackNoSubsequentRestart.html | 60 + .../test_peerConnection_restartIceNoBundle.html | 43 + ...peerConnection_restartIceNoBundleNoRtcpMux.html | 44 + .../test_peerConnection_restartIceNoRtcpMux.html | 43 + ...rConnection_restrictBandwidthTargetBitrate.html | 29 + ...t_peerConnection_restrictBandwidthWithTias.html | 30 + .../mochitests/test_peerConnection_rtcp_rsize.html | 81 + .../test_peerConnection_scaleResolution.html | 119 + ...onnection_scaleResolution_oldSetParameters.html | 122 + ...t_peerConnection_sender_and_receiver_stats.html | 73 + ...rConnection_setLocalAnswerInHaveLocalOffer.html | 34 + ...test_peerConnection_setLocalAnswerInStable.html | 34 + ...rConnection_setLocalOfferInHaveRemoteOffer.html | 31 + .../test_peerConnection_setParameters.html | 470 + ..._peerConnection_setParameters_maxFramerate.html | 63 + ...etParameters_maxFramerate_oldSetParameters.html | 60 + ...rConnection_setParameters_oldSetParameters.html | 86 + ...ection_setParameters_scaleResolutionDownBy.html | 98 + ...ers_scaleResolutionDownBy_oldSetParameters.html | 96 + ...onnection_setRemoteAnswerInHaveRemoteOffer.html | 34 + ...est_peerConnection_setRemoteAnswerInStable.html | 34 + ...rConnection_setRemoteOfferInHaveLocalOffer.html | 37 + .../test_peerConnection_sillyCodecPriorities.html | 99 + .../test_peerConnection_simulcastAnswer.html | 121 + ...peerConnection_simulcastAnswer_lowResFirst.html | 113 + ...mulcastAnswer_lowResFirst_oldSetParameters.html | 115 + ...onnection_simulcastAnswer_oldSetParameters.html | 115 + ...test_peerConnection_simulcastOddResolution.html | 183 + ...on_simulcastOddResolution_oldSetParameters.html | 172 + .../test_peerConnection_simulcastOffer.html | 109 + ..._peerConnection_simulcastOffer_lowResFirst.html | 109 + ...imulcastOffer_lowResFirst_oldSetParameters.html | 112 + ...Connection_simulcastOffer_oldSetParameters.html | 112 + .../mochitests/test_peerConnection_stats.html | 42 + .../test_peerConnection_stats_jitter.html | 58 + .../test_peerConnection_stats_oneway.html | 65 + .../test_peerConnection_stats_relayProtocol.html | 58 + .../test_peerConnection_stereoFmtpPref.html | 61 + .../test_peerConnection_syncSetDescription.html | 53 + .../test_peerConnection_telephoneEventFirst.html | 56 + ...t_peerConnection_threeUnbundledConnections.html | 134 + .../test_peerConnection_throwInCallbacks.html | 82 + .../mochitests/test_peerConnection_toJSON.html | 39 + .../test_peerConnection_trackDisabling.html | 114 + .../test_peerConnection_trackDisabling_clones.html | 168 + ...test_peerConnection_trackless_sender_stats.html | 56 + .../test_peerConnection_twoAudioStreams.html | 23 + ...t_peerConnection_twoAudioTracksInOneStream.html | 37 + .../test_peerConnection_twoAudioVideoStreams.html | 26 + ...eerConnection_twoAudioVideoStreamsCombined.html | 70 + ...ction_twoAudioVideoStreamsCombinedNoBundle.html | 107 + .../test_peerConnection_twoVideoStreams.html | 23 + ...t_peerConnection_twoVideoTracksInOneStream.html | 37 + ...erConnection_verifyAudioAfterRenegotiation.html | 99 + .../test_peerConnection_verifyDescriptions.html | 58 + ...erConnection_verifyVideoAfterRenegotiation.html | 123 + .../test_peerConnection_videoCodecs.html | 142 + ...onnection_videoRenegotiationInactiveAnswer.html | 95 + .../mochitests/test_peerConnection_webAudio.html | 43 + .../webrtc/tests/mochitests/test_selftest.html | 37 + .../test_setSinkId-echoCancellation.html | 110 + .../mochitests/test_setSinkId-stream-source.html | 138 + .../webrtc/tests/mochitests/test_setSinkId.html | 83 + .../test_setSinkId_default_addTrack.html | 52 + .../mochitests/test_setSinkId_preMutedElement.html | 98 + .../tests/mochitests/test_unfocused_pref.html | 49 + dom/media/webrtc/tests/mochitests/turnConfig.js | 16 + dom/media/webrtc/third_party_build/README.md | 17 + .../third_party_build/build_no_op_commits.sh | 126 + .../webrtc/third_party_build/cherry_pick_commit.py | 420 + .../third_party_build/commit-build-file-changes.sh | 47 + .../webrtc/third_party_build/default_config_env | 45 + .../webrtc/third_party_build/default_mozconfig | 7 + .../third_party_build/detect_upstream_revert.sh | 113 + .../webrtc/third_party_build/elm_arcconfig.patch | 10 + dom/media/webrtc/third_party_build/elm_rebase.sh | 271 + .../webrtc/third_party_build/extract-for-git.py | 146 + .../third_party_build/fast-forward-libwebrtc.sh | 159 + .../webrtc/third_party_build/fetch_github_repo.py | 146 + .../webrtc/third_party_build/filter_git_changes.py | 87 + .../webrtc/third_party_build/gn-configs/README.md | 16 + .../third_party_build/gn-configs/webrtc.json | 91 + .../webrtc/third_party_build/lookup_branch_head.py | 98 + dom/media/webrtc/third_party_build/loop-ff.sh | 284 + .../third_party_build/make_upstream_revert_noop.sh | 115 + .../third_party_build/pre-warmed-milestone.cache | 1 + dom/media/webrtc/third_party_build/prep_repo.sh | 114 + .../third_party_build/push_official_branch.sh | 50 + .../third_party_build/restore_elm_arcconfig.py | 28 + .../third_party_build/restore_patch_stack.py | 131 + .../webrtc/third_party_build/run_operations.py | 97 + .../webrtc/third_party_build/save_patch_stack.py | 192 + .../third_party_build/update_default_config.sh | 67 + .../webrtc/third_party_build/use_config_env.sh | 89 + .../webrtc/third_party_build/vendor-libwebrtc.py | 432 + .../webrtc/third_party_build/vendor_and_commit.py | 308 + .../webrtc/third_party_build/verify_vendoring.sh | 91 + dom/media/webrtc/third_party_build/webrtc.mozbuild | 48 + .../third_party_build/write_default_config.py | 124 + dom/media/webrtc/transport/README | 45 + dom/media/webrtc/transport/SrtpFlow.cpp | 259 + dom/media/webrtc/transport/SrtpFlow.h | 69 + .../webrtc/transport/WebrtcTCPSocketWrapper.cpp | 123 + .../webrtc/transport/WebrtcTCPSocketWrapper.h | 69 + dom/media/webrtc/transport/build/moz.build | 44 + dom/media/webrtc/transport/common.build | 94 + dom/media/webrtc/transport/dtlsidentity.cpp | 331 + dom/media/webrtc/transport/dtlsidentity.h | 102 + dom/media/webrtc/transport/fuzztest/moz.build | 31 + .../transport/fuzztest/stun_parser_libfuzz.cpp | 35 + .../transport/ipc/NrIceStunAddrMessageUtils.h | 54 + dom/media/webrtc/transport/ipc/PStunAddrsParams.h | 33 + .../webrtc/transport/ipc/PStunAddrsRequest.ipdl | 35 + .../webrtc/transport/ipc/PWebrtcTCPSocket.ipdl | 42 + .../webrtc/transport/ipc/StunAddrsRequestChild.cpp | 45 + .../webrtc/transport/ipc/StunAddrsRequestChild.h | 64 + .../transport/ipc/StunAddrsRequestParent.cpp | 262 + .../webrtc/transport/ipc/StunAddrsRequestParent.h | 82 + .../webrtc/transport/ipc/WebrtcProxyConfig.ipdlh | 23 + dom/media/webrtc/transport/ipc/WebrtcTCPSocket.cpp | 784 + dom/media/webrtc/transport/ipc/WebrtcTCPSocket.h | 104 + .../webrtc/transport/ipc/WebrtcTCPSocketCallback.h | 28 + .../webrtc/transport/ipc/WebrtcTCPSocketChild.cpp | 96 + .../webrtc/transport/ipc/WebrtcTCPSocketChild.h | 47 + .../webrtc/transport/ipc/WebrtcTCPSocketLog.cpp | 11 + .../webrtc/transport/ipc/WebrtcTCPSocketLog.h | 20 + .../webrtc/transport/ipc/WebrtcTCPSocketParent.cpp | 122 + .../webrtc/transport/ipc/WebrtcTCPSocketParent.h | 59 + dom/media/webrtc/transport/ipc/moz.build | 54 + dom/media/webrtc/transport/logging.h | 65 + dom/media/webrtc/transport/m_cpp_utils.h | 25 + dom/media/webrtc/transport/mdns_service/Cargo.toml | 14 + .../webrtc/transport/mdns_service/mdns_service.h | 28 + dom/media/webrtc/transport/mdns_service/src/lib.rs | 843 + dom/media/webrtc/transport/mediapacket.cpp | 145 + dom/media/webrtc/transport/mediapacket.h | 117 + dom/media/webrtc/transport/moz.build | 23 + .../webrtc/transport/nr_socket_proxy_config.cpp | 34 + .../webrtc/transport/nr_socket_proxy_config.h | 41 + dom/media/webrtc/transport/nr_socket_prsock.cpp | 1727 ++ dom/media/webrtc/transport/nr_socket_prsock.h | 321 + dom/media/webrtc/transport/nr_socket_tcp.cpp | 310 + dom/media/webrtc/transport/nr_socket_tcp.h | 117 + dom/media/webrtc/transport/nr_timer.cpp | 256 + dom/media/webrtc/transport/nricectx.cpp | 1105 + dom/media/webrtc/transport/nricectx.h | 421 + dom/media/webrtc/transport/nricemediastream.cpp | 712 + dom/media/webrtc/transport/nricemediastream.h | 225 + dom/media/webrtc/transport/nriceresolver.cpp | 234 + dom/media/webrtc/transport/nriceresolver.h | 119 + dom/media/webrtc/transport/nriceresolverfake.cpp | 171 + dom/media/webrtc/transport/nriceresolverfake.h | 137 + dom/media/webrtc/transport/nricestunaddr.cpp | 93 + dom/media/webrtc/transport/nricestunaddr.h | 36 + .../webrtc/transport/nrinterfaceprioritizer.cpp | 255 + .../webrtc/transport/nrinterfaceprioritizer.h | 17 + dom/media/webrtc/transport/rlogconnector.cpp | 186 + dom/media/webrtc/transport/rlogconnector.h | 127 + dom/media/webrtc/transport/runnable_utils.h | 222 + dom/media/webrtc/transport/sigslot.h | 619 + dom/media/webrtc/transport/simpletokenbucket.cpp | 60 + dom/media/webrtc/transport/simpletokenbucket.h | 54 + dom/media/webrtc/transport/srtp/README_MOZILLA | 7 + dom/media/webrtc/transport/srtp/moz.build | 8 + dom/media/webrtc/transport/stun_socket_filter.cpp | 431 + dom/media/webrtc/transport/stun_socket_filter.h | 41 + .../webrtc/transport/test/TestSyncRunnable.cpp | 56 + .../test/buffered_stun_socket_unittest.cpp | 244 + dom/media/webrtc/transport/test/dummysocket.h | 217 + .../transport/test/gtest_ringbuffer_dumper.h | 78 + dom/media/webrtc/transport/test/gtest_utils.h | 201 + dom/media/webrtc/transport/test/ice_unittest.cpp | 4398 ++++ dom/media/webrtc/transport/test/moz.build | 103 + .../webrtc/transport/test/mtransport_test_utils.h | 57 + .../transport/test/multi_tcp_socket_unittest.cpp | 500 + .../webrtc/transport/test/nrappkit_unittest.cpp | 123 + .../test/proxy_tunnel_socket_unittest.cpp | 277 + .../transport/test/rlogconnector_unittest.cpp | 255 + .../transport/test/runnable_utils_unittest.cpp | 353 + dom/media/webrtc/transport/test/sctp_unittest.cpp | 381 + .../transport/test/simpletokenbucket_unittest.cpp | 114 + .../test/sockettransportservice_unittest.cpp | 180 + dom/media/webrtc/transport/test/stunserver.cpp | 652 + dom/media/webrtc/transport/test/stunserver.h | 123 + .../transport/test/test_nr_socket_ice_unittest.cpp | 409 + .../transport/test/test_nr_socket_unittest.cpp | 799 + .../webrtc/transport/test/transport_unittests.cpp | 1396 ++ dom/media/webrtc/transport/test/turn_unittest.cpp | 431 + .../transport/test/webrtcproxychannel_unittest.cpp | 753 + dom/media/webrtc/transport/test_nr_socket.cpp | 1137 + dom/media/webrtc/transport/test_nr_socket.h | 366 + dom/media/webrtc/transport/third_party/moz.build | 39 + .../webrtc/transport/third_party/nICEr/COPYRIGHT | 36 + .../webrtc/transport/third_party/nICEr/README | 74 + .../webrtc/transport/third_party/nICEr/moz.yaml | 117 + .../webrtc/transport/third_party/nICEr/nicer.gyp | 276 + .../third_party/nICEr/non-unified-build.patch | 40 + .../third_party/nICEr/src/crypto/nr_crypto.c | 67 + .../third_party/nICEr/src/crypto/nr_crypto.h | 52 + .../third_party/nICEr/src/ice/ice_candidate.c | 1052 + .../third_party/nICEr/src/ice/ice_candidate.h | 124 + .../third_party/nICEr/src/ice/ice_candidate_pair.c | 689 + .../third_party/nICEr/src/ice/ice_candidate_pair.h | 101 + .../third_party/nICEr/src/ice/ice_codeword.h | 41 + .../third_party/nICEr/src/ice/ice_component.c | 1786 ++ .../third_party/nICEr/src/ice/ice_component.h | 111 + .../transport/third_party/nICEr/src/ice/ice_ctx.c | 1125 + .../transport/third_party/nICEr/src/ice/ice_ctx.h | 188 + .../third_party/nICEr/src/ice/ice_handler.h | 84 + .../third_party/nICEr/src/ice/ice_media_stream.c | 1087 + .../third_party/nICEr/src/ice/ice_media_stream.h | 146 + .../third_party/nICEr/src/ice/ice_parser.c | 564 + .../third_party/nICEr/src/ice/ice_peer_ctx.c | 875 + .../third_party/nICEr/src/ice/ice_peer_ctx.h | 101 + .../transport/third_party/nICEr/src/ice/ice_reg.h | 81 + .../third_party/nICEr/src/ice/ice_socket.c | 404 + .../third_party/nICEr/src/ice/ice_socket.h | 98 + .../third_party/nICEr/src/net/local_addr.c | 70 + .../third_party/nICEr/src/net/local_addr.h | 62 + .../nICEr/src/net/nr_interface_prioritizer.c | 88 + .../nICEr/src/net/nr_interface_prioritizer.h | 66 + .../third_party/nICEr/src/net/nr_resolver.c | 85 + .../third_party/nICEr/src/net/nr_resolver.h | 96 + .../third_party/nICEr/src/net/nr_socket.c | 187 + .../third_party/nICEr/src/net/nr_socket.h | 123 + .../third_party/nICEr/src/net/nr_socket_local.h | 41 + .../nICEr/src/net/nr_socket_multi_tcp.c | 642 + .../nICEr/src/net/nr_socket_multi_tcp.h | 53 + .../third_party/nICEr/src/net/nr_socket_wrapper.c | 84 + .../third_party/nICEr/src/net/nr_socket_wrapper.h | 63 + .../third_party/nICEr/src/net/transport_addr.c | 559 + .../third_party/nICEr/src/net/transport_addr.h | 128 + .../third_party/nICEr/src/net/transport_addr_reg.c | 230 + .../third_party/nICEr/src/net/transport_addr_reg.h | 46 + .../third_party/nICEr/src/stun/addrs-bsd.c | 110 + .../third_party/nICEr/src/stun/addrs-bsd.h | 13 + .../third_party/nICEr/src/stun/addrs-netlink.c | 285 + .../third_party/nICEr/src/stun/addrs-netlink.h | 45 + .../third_party/nICEr/src/stun/addrs-win32.c | 205 + .../third_party/nICEr/src/stun/addrs-win32.h | 13 + .../transport/third_party/nICEr/src/stun/addrs.c | 176 + .../transport/third_party/nICEr/src/stun/addrs.h | 43 + .../nICEr/src/stun/nr_socket_buffered_stun.c | 656 + .../nICEr/src/stun/nr_socket_buffered_stun.h | 66 + .../third_party/nICEr/src/stun/nr_socket_turn.c | 195 + .../third_party/nICEr/src/stun/nr_socket_turn.h | 48 + .../transport/third_party/nICEr/src/stun/stun.h | 218 + .../third_party/nICEr/src/stun/stun_build.c | 611 + .../third_party/nICEr/src/stun/stun_build.h | 147 + .../third_party/nICEr/src/stun/stun_client_ctx.c | 888 + .../third_party/nICEr/src/stun/stun_client_ctx.h | 200 + .../third_party/nICEr/src/stun/stun_codec.c | 1550 ++ .../third_party/nICEr/src/stun/stun_codec.h | 78 + .../third_party/nICEr/src/stun/stun_hint.c | 245 + .../third_party/nICEr/src/stun/stun_hint.h | 44 + .../third_party/nICEr/src/stun/stun_msg.c | 364 + .../third_party/nICEr/src/stun/stun_msg.h | 208 + .../third_party/nICEr/src/stun/stun_proc.c | 554 + .../third_party/nICEr/src/stun/stun_proc.h | 53 + .../third_party/nICEr/src/stun/stun_reg.h | 58 + .../third_party/nICEr/src/stun/stun_server_ctx.c | 468 + .../third_party/nICEr/src/stun/stun_server_ctx.h | 80 + .../third_party/nICEr/src/stun/stun_util.c | 352 + .../third_party/nICEr/src/stun/stun_util.h | 62 + .../third_party/nICEr/src/stun/turn_client_ctx.c | 1277 ++ .../third_party/nICEr/src/stun/turn_client_ctx.h | 161 + .../transport/third_party/nICEr/src/util/cb_args.c | 57 + .../transport/third_party/nICEr/src/util/cb_args.h | 41 + .../third_party/nICEr/src/util/ice_util.c | 71 + .../third_party/nICEr/src/util/ice_util.h | 41 + .../transport/third_party/nrappkit/COPYRIGHT | 159 + .../webrtc/transport/third_party/nrappkit/README | 133 + .../transport/third_party/nrappkit/README_MOZILLA | 21 + .../transport/third_party/nrappkit/nrappkit.gyp | 251 + .../third_party/nrappkit/src/event/async_timer.h | 54 + .../third_party/nrappkit/src/event/async_wait.h | 83 + .../nrappkit/src/event/async_wait_int.h | 62 + .../transport/third_party/nrappkit/src/log/r_log.c | 696 + .../transport/third_party/nrappkit/src/log/r_log.h | 85 + .../third_party/nrappkit/src/plugin/nr_plugin.h | 57 + .../src/port/android/include/android_funcs.h | 62 + .../src/port/android/include/csi_platform.h | 55 + .../nrappkit/src/port/android/include/sys/ttycom.h | 38 + .../nrappkit/src/port/android/port-impl.mk | 31 + .../src/port/darwin/include/csi_platform.h | 57 + .../nrappkit/src/port/generic/include/sys/queue.h | 562 + .../nrappkit/src/port/linux/include/csi_platform.h | 55 + .../nrappkit/src/port/linux/include/linux_funcs.h | 62 + .../nrappkit/src/port/linux/include/sys/ttycom.h | 38 + .../nrappkit/src/port/linux/port-impl.mk | 31 + .../nrappkit/src/port/win32/include/csi_platform.h | 107 + .../third_party/nrappkit/src/registry/c2ru.c | 320 + .../third_party/nrappkit/src/registry/c2ru.h | 96 + .../third_party/nrappkit/src/registry/registry.c | 604 + .../third_party/nrappkit/src/registry/registry.h | 154 + .../nrappkit/src/registry/registry_int.h | 97 + .../nrappkit/src/registry/registry_local.c | 1168 + .../nrappkit/src/registry/registry_vtbl.h | 96 + .../third_party/nrappkit/src/registry/registrycb.c | 440 + .../third_party/nrappkit/src/share/nr_api.h | 51 + .../third_party/nrappkit/src/share/nr_common.h | 108 + .../third_party/nrappkit/src/share/nr_reg_keys.h | 167 + .../third_party/nrappkit/src/stats/nrstats.h | 118 + .../third_party/nrappkit/src/util/byteorder.c | 73 + .../third_party/nrappkit/src/util/byteorder.h | 47 + .../transport/third_party/nrappkit/src/util/hex.c | 109 + .../transport/third_party/nrappkit/src/util/hex.h | 47 + .../third_party/nrappkit/src/util/libekr/assoc.h | 90 + .../third_party/nrappkit/src/util/libekr/debug.c | 127 + .../third_party/nrappkit/src/util/libekr/debug.h | 94 + .../third_party/nrappkit/src/util/libekr/r_assoc.c | 539 + .../third_party/nrappkit/src/util/libekr/r_assoc.h | 126 + .../nrappkit/src/util/libekr/r_common.h | 100 + .../third_party/nrappkit/src/util/libekr/r_crc32.c | 175 + .../third_party/nrappkit/src/util/libekr/r_crc32.h | 14 + .../third_party/nrappkit/src/util/libekr/r_data.c | 248 + .../third_party/nrappkit/src/util/libekr/r_data.h | 108 + .../nrappkit/src/util/libekr/r_defaults.h | 91 + .../nrappkit/src/util/libekr/r_errors.c | 136 + .../nrappkit/src/util/libekr/r_errors.h | 127 + .../nrappkit/src/util/libekr/r_includes.h | 98 + .../third_party/nrappkit/src/util/libekr/r_list.c | 273 + .../third_party/nrappkit/src/util/libekr/r_list.h | 106 + .../nrappkit/src/util/libekr/r_macros.h | 137 + .../nrappkit/src/util/libekr/r_memory.c | 198 + .../nrappkit/src/util/libekr/r_memory.h | 101 + .../nrappkit/src/util/libekr/r_replace.c | 107 + .../nrappkit/src/util/libekr/r_thread.h | 68 + .../third_party/nrappkit/src/util/libekr/r_time.c | 235 + .../third_party/nrappkit/src/util/libekr/r_time.h | 109 + .../third_party/nrappkit/src/util/libekr/r_types.h | 213 + .../third_party/nrappkit/src/util/p_buf.c | 215 + .../third_party/nrappkit/src/util/p_buf.h | 72 + .../transport/third_party/nrappkit/src/util/util.c | 775 + .../transport/third_party/nrappkit/src/util/util.h | 68 + dom/media/webrtc/transport/transportflow.cpp | 74 + dom/media/webrtc/transport/transportflow.h | 105 + dom/media/webrtc/transport/transportlayer.cpp | 49 + dom/media/webrtc/transport/transportlayer.h | 108 + dom/media/webrtc/transport/transportlayerdtls.cpp | 1558 ++ dom/media/webrtc/transport/transportlayerdtls.h | 187 + dom/media/webrtc/transport/transportlayerice.cpp | 168 + dom/media/webrtc/transport/transportlayerice.h | 60 + dom/media/webrtc/transport/transportlayerlog.cpp | 48 + dom/media/webrtc/transport/transportlayerlog.h | 38 + .../webrtc/transport/transportlayerloopback.cpp | 119 + .../webrtc/transport/transportlayerloopback.h | 108 + dom/media/webrtc/transport/transportlayersrtp.cpp | 222 + dom/media/webrtc/transport/transportlayersrtp.h | 43 + dom/media/webrtc/transportbridge/MediaPipeline.cpp | 1596 ++ dom/media/webrtc/transportbridge/MediaPipeline.h | 455 + .../webrtc/transportbridge/MediaPipelineFilter.cpp | 152 + .../webrtc/transportbridge/MediaPipelineFilter.h | 89 + dom/media/webrtc/transportbridge/RtpLogger.cpp | 67 + dom/media/webrtc/transportbridge/RtpLogger.h | 28 + dom/media/webrtc/transportbridge/moz.build | 27 + dom/media/webspeech/moz.build | 12 + .../recognition/OnlineSpeechRecognitionService.cpp | 462 + .../recognition/OnlineSpeechRecognitionService.h | 132 + dom/media/webspeech/recognition/SpeechGrammar.cpp | 57 + dom/media/webspeech/recognition/SpeechGrammar.h | 64 + .../webspeech/recognition/SpeechGrammarList.cpp | 76 + .../webspeech/recognition/SpeechGrammarList.h | 73 + .../webspeech/recognition/SpeechRecognition.cpp | 1170 + .../webspeech/recognition/SpeechRecognition.h | 314 + .../recognition/SpeechRecognitionAlternative.cpp | 44 + .../recognition/SpeechRecognitionAlternative.h | 49 + .../recognition/SpeechRecognitionResult.cpp | 59 + .../recognition/SpeechRecognitionResult.h | 54 + .../recognition/SpeechRecognitionResultList.cpp | 58 + .../recognition/SpeechRecognitionResultList.h | 53 + .../webspeech/recognition/SpeechTrackListener.cpp | 100 + .../webspeech/recognition/SpeechTrackListener.h | 55 + dom/media/webspeech/recognition/endpointer.cc | 193 + dom/media/webspeech/recognition/endpointer.h | 180 + .../webspeech/recognition/energy_endpointer.cc | 393 + .../webspeech/recognition/energy_endpointer.h | 180 + .../recognition/energy_endpointer_params.cc | 77 + .../recognition/energy_endpointer_params.h | 159 + dom/media/webspeech/recognition/moz.build | 64 + .../recognition/nsISpeechRecognitionService.idl | 43 + .../test/FakeSpeechRecognitionService.cpp | 118 + .../test/FakeSpeechRecognitionService.h | 40 + dom/media/webspeech/recognition/test/head.js | 200 + dom/media/webspeech/recognition/test/hello.ogg | Bin 0 -> 11328 bytes .../webspeech/recognition/test/hello.ogg^headers^ | 1 + .../recognition/test/http_requesthandler.sjs | 87 + .../webspeech/recognition/test/mochitest.toml | 44 + dom/media/webspeech/recognition/test/silence.ogg | Bin 0 -> 106941 bytes .../recognition/test/silence.ogg^headers^ | 1 + .../webspeech/recognition/test/sinoid+hello.ogg | Bin 0 -> 29514 bytes .../recognition/test/sinoid+hello.ogg^headers^ | 1 + .../webspeech/recognition/test/test_abort.html | 73 + .../recognition/test/test_audio_capture_error.html | 42 + .../test/test_call_start_from_end_handler.html | 102 + .../recognition/test/test_nested_eventloop.html | 82 + .../recognition/test/test_online_400_response.html | 47 + .../test/test_online_empty_result_handling.html | 48 + .../recognition/test/test_online_hangup.html | 47 + .../recognition/test/test_online_http.html | 87 + .../recognition/test/test_online_http_webkit.html | 88 + .../test_online_malformed_result_handling.html | 48 + .../recognition/test/test_preference_enable.html | 43 + .../test/test_recognition_service_error.html | 45 + .../test_success_without_recognition_service.html | 45 + .../webspeech/recognition/test/test_timeout.html | 42 + dom/media/webspeech/synth/SpeechSynthesis.cpp | 333 + dom/media/webspeech/synth/SpeechSynthesis.h | 88 + .../webspeech/synth/SpeechSynthesisUtterance.cpp | 137 + .../webspeech/synth/SpeechSynthesisUtterance.h | 115 + dom/media/webspeech/synth/SpeechSynthesisVoice.cpp | 72 + dom/media/webspeech/synth/SpeechSynthesisVoice.h | 55 + .../synth/android/SpeechSynthesisService.cpp | 215 + .../synth/android/SpeechSynthesisService.h | 68 + dom/media/webspeech/synth/android/components.conf | 17 + dom/media/webspeech/synth/android/moz.build | 19 + .../synth/cocoa/OSXSpeechSynthesizerService.h | 42 + .../synth/cocoa/OSXSpeechSynthesizerService.mm | 461 + dom/media/webspeech/synth/cocoa/components.conf | 17 + dom/media/webspeech/synth/cocoa/moz.build | 15 + dom/media/webspeech/synth/crashtests/1230428.html | 32 + .../webspeech/synth/crashtests/crashtests.list | 1 + .../webspeech/synth/ipc/PSpeechSynthesis.ipdl | 51 + .../synth/ipc/PSpeechSynthesisRequest.ipdl | 48 + .../webspeech/synth/ipc/SpeechSynthesisChild.cpp | 175 + .../webspeech/synth/ipc/SpeechSynthesisChild.h | 111 + .../webspeech/synth/ipc/SpeechSynthesisParent.cpp | 221 + .../webspeech/synth/ipc/SpeechSynthesisParent.h | 104 + dom/media/webspeech/synth/moz.build | 65 + dom/media/webspeech/synth/nsISpeechService.idl | 143 + .../webspeech/synth/nsISynthVoiceRegistry.idl | 82 + dom/media/webspeech/synth/nsSpeechTask.cpp | 389 + dom/media/webspeech/synth/nsSpeechTask.h | 128 + dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp | 790 + dom/media/webspeech/synth/nsSynthVoiceRegistry.h | 101 + .../synth/speechd/SpeechDispatcherService.cpp | 558 + .../synth/speechd/SpeechDispatcherService.h | 67 + dom/media/webspeech/synth/speechd/components.conf | 17 + dom/media/webspeech/synth/speechd/moz.build | 15 + dom/media/webspeech/synth/test/common.js | 104 + dom/media/webspeech/synth/test/components.conf | 17 + .../webspeech/synth/test/file_bfcache_page1.html | 18 + .../webspeech/synth/test/file_bfcache_page2.html | 14 + .../webspeech/synth/test/file_global_queue.html | 69 + .../synth/test/file_global_queue_cancel.html | 88 + .../synth/test/file_global_queue_pause.html | 130 + .../synth/test/file_indirect_service_events.html | 102 + dom/media/webspeech/synth/test/file_setup.html | 96 + .../webspeech/synth/test/file_speech_cancel.html | 100 + .../webspeech/synth/test/file_speech_error.html | 46 + .../webspeech/synth/test/file_speech_queue.html | 86 + .../test/file_speech_repeating_utterance.html | 26 + .../webspeech/synth/test/file_speech_simple.html | 53 + dom/media/webspeech/synth/test/mochitest.toml | 40 + .../webspeech/synth/test/nsFakeSynthServices.cpp | 288 + .../webspeech/synth/test/nsFakeSynthServices.h | 42 + .../synth/test/startup/file_voiceschanged.html | 32 + .../webspeech/synth/test/startup/mochitest.toml | 8 + .../synth/test/startup/test_voiceschanged.html | 32 + dom/media/webspeech/synth/test/test_bfcache.html | 46 + .../webspeech/synth/test/test_global_queue.html | 35 + .../synth/test/test_global_queue_cancel.html | 35 + .../synth/test/test_global_queue_pause.html | 35 + .../synth/test/test_indirect_service_events.html | 36 + dom/media/webspeech/synth/test/test_setup.html | 32 + .../webspeech/synth/test/test_speech_cancel.html | 35 + .../webspeech/synth/test/test_speech_error.html | 35 + .../webspeech/synth/test/test_speech_queue.html | 37 + .../test/test_speech_repeating_utterance.html | 18 + .../webspeech/synth/test/test_speech_simple.html | 34 + dom/media/webspeech/synth/windows/SapiService.cpp | 445 + dom/media/webspeech/synth/windows/SapiService.h | 57 + dom/media/webspeech/synth/windows/components.conf | 17 + dom/media/webspeech/synth/windows/moz.build | 17 + dom/media/webvtt/TextTrack.cpp | 383 + dom/media/webvtt/TextTrack.h | 148 + dom/media/webvtt/TextTrackCue.cpp | 258 + dom/media/webvtt/TextTrackCue.h | 342 + dom/media/webvtt/TextTrackCueList.cpp | 125 + dom/media/webvtt/TextTrackCueList.h | 73 + dom/media/webvtt/TextTrackList.cpp | 190 + dom/media/webvtt/TextTrackList.h | 79 + dom/media/webvtt/TextTrackRegion.cpp | 58 + dom/media/webvtt/TextTrackRegion.h | 138 + dom/media/webvtt/WebVTTListener.cpp | 212 + dom/media/webvtt/WebVTTListener.h | 69 + dom/media/webvtt/WebVTTParserWrapper.sys.mjs | 56 + dom/media/webvtt/components.conf | 14 + dom/media/webvtt/moz.build | 52 + dom/media/webvtt/nsIWebVTTListener.idl | 37 + dom/media/webvtt/nsIWebVTTParserWrapper.idl | 94 + dom/media/webvtt/package.json | 6 + dom/media/webvtt/test/crashtests/1304948.html | 33 + dom/media/webvtt/test/crashtests/1319486.html | 27 + dom/media/webvtt/test/crashtests/1533909.html | 17 + dom/media/webvtt/test/crashtests/882549.html | 13 + dom/media/webvtt/test/crashtests/894104.html | 20 + dom/media/webvtt/test/crashtests/crashtests.list | 5 + dom/media/webvtt/test/mochitest/bad-signature.vtt | 1 + dom/media/webvtt/test/mochitest/basic.vtt | 29 + dom/media/webvtt/test/mochitest/bug883173.vtt | 16 + dom/media/webvtt/test/mochitest/long.vtt | 8001 +++++++ dom/media/webvtt/test/mochitest/manifest.js | 27 + dom/media/webvtt/test/mochitest/mochitest.toml | 81 + dom/media/webvtt/test/mochitest/parser.vtt | 6 + dom/media/webvtt/test/mochitest/region.vtt | 6 + dom/media/webvtt/test/mochitest/sequential.vtt | 10 + .../webvtt/test/mochitest/test_bug1018933.html | 50 + .../webvtt/test/mochitest/test_bug1242594.html | 46 + .../webvtt/test/mochitest/test_bug883173.html | 39 + .../webvtt/test/mochitest/test_bug895091.html | 60 + .../webvtt/test/mochitest/test_bug957847.html | 30 + .../mochitest/test_testtrack_cors_no_response.html | 41 + .../webvtt/test/mochitest/test_texttrack.html | 158 + .../test_texttrack_cors_preload_none.html | 40 + .../test_texttrack_mode_change_during_loading.html | 75 + .../webvtt/test/mochitest/test_texttrack_moz.html | 60 + .../webvtt/test/mochitest/test_texttrackcue.html | 299 + .../test/mochitest/test_texttrackcue_moz.html | 34 + .../test/mochitest/test_texttrackevents_video.html | 91 + .../webvtt/test/mochitest/test_texttracklist.html | 51 + .../test/mochitest/test_texttracklist_moz.html | 34 + .../test/mochitest/test_texttrackregion.html | 58 + .../test/mochitest/test_trackelementevent.html | 77 + .../test/mochitest/test_trackelementsrc.html | 50 + .../webvtt/test/mochitest/test_trackevent.html | 69 + .../webvtt/test/mochitest/test_vttparser.html | 44 + .../mochitest/test_webvtt_empty_displaystate.html | 98 + .../mochitest/test_webvtt_event_same_time.html | 63 + .../test_webvtt_infinite_processing_loop.html | 49 + .../mochitest/test_webvtt_overlapping_time.html | 100 + .../test/mochitest/test_webvtt_positionalign.html | 113 + .../webvtt/test/mochitest/test_webvtt_seeking.html | 110 + ...pdate_display_after_adding_or_removing_cue.html | 93 + .../webvtt/test/mochitest/vttPositionAlign.vtt | 86 + dom/media/webvtt/test/reftest/black.mp4 | Bin 0 -> 15036 bytes .../test/reftest/cues_time_overlapping.webvtt | 7 + dom/media/webvtt/test/reftest/reftest.list | 3 + .../test/reftest/vtt_overlapping_time-ref.html | 29 + .../webvtt/test/reftest/vtt_overlapping_time.html | 30 + .../test/reftest/vtt_reflow_display-ref.html | 28 + .../webvtt/test/reftest/vtt_reflow_display.css | 33 + .../webvtt/test/reftest/vtt_reflow_display.html | 37 + .../vtt_update_display_after_removed_cue.html | 36 + .../vtt_update_display_after_removed_cue_ref.html | 6 + dom/media/webvtt/test/reftest/white.webm | Bin 0 -> 10880 bytes dom/media/webvtt/test/xpcshell/test_parser.js | 158 + dom/media/webvtt/test/xpcshell/xpcshell.toml | 3 + dom/media/webvtt/update-webvtt.js | 61 + dom/media/webvtt/vtt.sys.mjs | 1644 ++ 4647 files changed, 674223 insertions(+) create mode 100644 dom/media/ADTSDecoder.cpp create mode 100644 dom/media/ADTSDecoder.h create mode 100644 dom/media/ADTSDemuxer.cpp create mode 100644 dom/media/ADTSDemuxer.h create mode 100644 dom/media/AsyncLogger.h create mode 100644 dom/media/AudibilityMonitor.h create mode 100644 dom/media/AudioBufferUtils.h create mode 100644 dom/media/AudioCaptureTrack.cpp create mode 100644 dom/media/AudioCaptureTrack.h create mode 100644 dom/media/AudioChannelFormat.cpp create mode 100644 dom/media/AudioChannelFormat.h create mode 100644 dom/media/AudioCompactor.cpp create mode 100644 dom/media/AudioCompactor.h create mode 100644 dom/media/AudioConfig.cpp create mode 100644 dom/media/AudioConfig.h create mode 100644 dom/media/AudioConverter.cpp create mode 100644 dom/media/AudioConverter.h create mode 100644 dom/media/AudioDeviceInfo.cpp create mode 100644 dom/media/AudioDeviceInfo.h create mode 100644 dom/media/AudioInputSource.cpp create mode 100644 dom/media/AudioInputSource.h create mode 100644 dom/media/AudioMixer.h create mode 100644 dom/media/AudioPacketizer.h create mode 100644 dom/media/AudioRingBuffer.cpp create mode 100644 dom/media/AudioRingBuffer.h create mode 100644 dom/media/AudioSampleFormat.h create mode 100644 dom/media/AudioSegment.cpp create mode 100644 dom/media/AudioSegment.h create mode 100644 dom/media/AudioStream.cpp create mode 100644 dom/media/AudioStream.h create mode 100644 dom/media/AudioStreamTrack.cpp create mode 100644 dom/media/AudioStreamTrack.h create mode 100644 dom/media/AudioTrack.cpp create mode 100644 dom/media/AudioTrack.h create mode 100644 dom/media/AudioTrackList.cpp create mode 100644 dom/media/AudioTrackList.h create mode 100644 dom/media/BackgroundVideoDecodingPermissionObserver.cpp create mode 100644 dom/media/BackgroundVideoDecodingPermissionObserver.h create mode 100644 dom/media/BaseMediaResource.cpp create mode 100644 dom/media/BaseMediaResource.h create mode 100644 dom/media/Benchmark.cpp create mode 100644 dom/media/Benchmark.h create mode 100644 dom/media/BitReader.cpp create mode 100644 dom/media/BitReader.h create mode 100644 dom/media/BitWriter.cpp create mode 100644 dom/media/BitWriter.h create mode 100644 dom/media/BufferMediaResource.h create mode 100644 dom/media/BufferReader.h create mode 100644 dom/media/ByteWriter.h create mode 100644 dom/media/CallbackThreadRegistry.cpp create mode 100644 dom/media/CallbackThreadRegistry.h create mode 100644 dom/media/CanvasCaptureMediaStream.cpp create mode 100644 dom/media/CanvasCaptureMediaStream.h create mode 100644 dom/media/ChannelMediaDecoder.cpp create mode 100644 dom/media/ChannelMediaDecoder.h create mode 100644 dom/media/ChannelMediaResource.cpp create mode 100644 dom/media/ChannelMediaResource.h create mode 100644 dom/media/CloneableWithRangeMediaResource.cpp create mode 100644 dom/media/CloneableWithRangeMediaResource.h create mode 100644 dom/media/CrossGraphPort.cpp create mode 100644 dom/media/CrossGraphPort.h create mode 100644 dom/media/CubebInputStream.cpp create mode 100644 dom/media/CubebInputStream.h create mode 100644 dom/media/CubebUtils.cpp create mode 100644 dom/media/CubebUtils.h create mode 100644 dom/media/DOMMediaStream.cpp create mode 100644 dom/media/DOMMediaStream.h create mode 100644 dom/media/DecoderTraits.cpp create mode 100644 dom/media/DecoderTraits.h create mode 100644 dom/media/DeviceInputTrack.cpp create mode 100644 dom/media/DeviceInputTrack.h create mode 100644 dom/media/DriftCompensation.h create mode 100644 dom/media/EncoderTraits.cpp create mode 100644 dom/media/EncoderTraits.h create mode 100644 dom/media/ExternalEngineStateMachine.cpp create mode 100644 dom/media/ExternalEngineStateMachine.h create mode 100644 dom/media/FileBlockCache.cpp create mode 100644 dom/media/FileBlockCache.h create mode 100644 dom/media/FileMediaResource.cpp create mode 100644 dom/media/FileMediaResource.h create mode 100644 dom/media/ForwardedInputTrack.cpp create mode 100644 dom/media/ForwardedInputTrack.h create mode 100644 dom/media/FrameStatistics.h create mode 100644 dom/media/GetUserMediaRequest.cpp create mode 100644 dom/media/GetUserMediaRequest.h create mode 100644 dom/media/GraphDriver.cpp create mode 100644 dom/media/GraphDriver.h create mode 100644 dom/media/GraphRunner.cpp create mode 100644 dom/media/GraphRunner.h create mode 100644 dom/media/IdpSandbox.sys.mjs create mode 100644 dom/media/ImageToI420.cpp create mode 100644 dom/media/ImageToI420.h create mode 100644 dom/media/Intervals.h create mode 100644 dom/media/MPSCQueue.h create mode 100644 dom/media/MediaBlockCacheBase.h create mode 100644 dom/media/MediaCache.cpp create mode 100644 dom/media/MediaCache.h create mode 100644 dom/media/MediaChannelStatistics.h create mode 100644 dom/media/MediaContainerType.cpp create mode 100644 dom/media/MediaContainerType.h create mode 100644 dom/media/MediaData.cpp create mode 100644 dom/media/MediaData.h create mode 100644 dom/media/MediaDataDemuxer.h create mode 100644 dom/media/MediaDecoder.cpp create mode 100644 dom/media/MediaDecoder.h create mode 100644 dom/media/MediaDecoderOwner.h create mode 100644 dom/media/MediaDecoderStateMachine.cpp create mode 100644 dom/media/MediaDecoderStateMachine.h create mode 100644 dom/media/MediaDecoderStateMachineBase.cpp create mode 100644 dom/media/MediaDecoderStateMachineBase.h create mode 100644 dom/media/MediaDeviceInfo.cpp create mode 100644 dom/media/MediaDeviceInfo.h create mode 100644 dom/media/MediaDevices.cpp create mode 100644 dom/media/MediaDevices.h create mode 100644 dom/media/MediaEventSource.h create mode 100644 dom/media/MediaFormatReader.cpp create mode 100644 dom/media/MediaFormatReader.h create mode 100644 dom/media/MediaInfo.cpp create mode 100644 dom/media/MediaInfo.h create mode 100644 dom/media/MediaMIMETypes.cpp create mode 100644 dom/media/MediaMIMETypes.h create mode 100644 dom/media/MediaManager.cpp create mode 100644 dom/media/MediaManager.h create mode 100644 dom/media/MediaMetadataManager.h create mode 100644 dom/media/MediaPlaybackDelayPolicy.cpp create mode 100644 dom/media/MediaPlaybackDelayPolicy.h create mode 100644 dom/media/MediaPromiseDefs.h create mode 100644 dom/media/MediaQueue.h create mode 100644 dom/media/MediaRecorder.cpp create mode 100644 dom/media/MediaRecorder.h create mode 100644 dom/media/MediaResource.cpp create mode 100644 dom/media/MediaResource.h create mode 100644 dom/media/MediaResourceCallback.h create mode 100644 dom/media/MediaResult.h create mode 100644 dom/media/MediaSegment.h create mode 100644 dom/media/MediaShutdownManager.cpp create mode 100644 dom/media/MediaShutdownManager.h create mode 100644 dom/media/MediaSpan.h create mode 100644 dom/media/MediaStatistics.h create mode 100644 dom/media/MediaStreamError.cpp create mode 100644 dom/media/MediaStreamError.h create mode 100644 dom/media/MediaStreamTrack.cpp create mode 100644 dom/media/MediaStreamTrack.h create mode 100644 dom/media/MediaStreamWindowCapturer.cpp create mode 100644 dom/media/MediaStreamWindowCapturer.h create mode 100644 dom/media/MediaTimer.cpp create mode 100644 dom/media/MediaTimer.h create mode 100644 dom/media/MediaTrack.cpp create mode 100644 dom/media/MediaTrack.h create mode 100644 dom/media/MediaTrackGraph.cpp create mode 100644 dom/media/MediaTrackGraph.h create mode 100644 dom/media/MediaTrackGraphImpl.h create mode 100644 dom/media/MediaTrackList.cpp create mode 100644 dom/media/MediaTrackList.h create mode 100644 dom/media/MediaTrackListener.cpp create mode 100644 dom/media/MediaTrackListener.h create mode 100644 dom/media/MemoryBlockCache.cpp create mode 100644 dom/media/MemoryBlockCache.h create mode 100644 dom/media/Pacer.h create mode 100644 dom/media/PeerConnection.sys.mjs create mode 100644 dom/media/PeerConnectionIdp.sys.mjs create mode 100644 dom/media/PrincipalChangeObserver.h create mode 100644 dom/media/PrincipalHandle.h create mode 100644 dom/media/QueueObject.cpp create mode 100644 dom/media/QueueObject.h create mode 100644 dom/media/ReaderProxy.cpp create mode 100644 dom/media/ReaderProxy.h create mode 100644 dom/media/SeekJob.cpp create mode 100644 dom/media/SeekJob.h create mode 100644 dom/media/SeekTarget.h create mode 100644 dom/media/SelfRef.h create mode 100644 dom/media/SharedBuffer.h create mode 100644 dom/media/TimeUnits.cpp create mode 100644 dom/media/TimeUnits.h create mode 100644 dom/media/Tracing.cpp create mode 100644 dom/media/Tracing.h create mode 100644 dom/media/UnderrunHandler.h create mode 100644 dom/media/UnderrunHandlerLinux.cpp create mode 100644 dom/media/UnderrunHandlerNoop.cpp create mode 100644 dom/media/VideoFrameContainer.cpp create mode 100644 dom/media/VideoFrameContainer.h create mode 100644 dom/media/VideoFrameConverter.h create mode 100644 dom/media/VideoLimits.h create mode 100644 dom/media/VideoOutput.h create mode 100644 dom/media/VideoPlaybackQuality.cpp create mode 100644 dom/media/VideoPlaybackQuality.h create mode 100644 dom/media/VideoSegment.cpp create mode 100644 dom/media/VideoSegment.h create mode 100644 dom/media/VideoStreamTrack.cpp create mode 100644 dom/media/VideoStreamTrack.h create mode 100644 dom/media/VideoTrack.cpp create mode 100644 dom/media/VideoTrack.h create mode 100644 dom/media/VideoTrackList.cpp create mode 100644 dom/media/VideoTrackList.h create mode 100644 dom/media/VideoUtils.cpp create mode 100644 dom/media/VideoUtils.h create mode 100644 dom/media/WavDumper.h create mode 100644 dom/media/WebMSample.h create mode 100644 dom/media/XiphExtradata.cpp create mode 100644 dom/media/XiphExtradata.h create mode 100644 dom/media/autoplay/AutoplayPolicy.cpp create mode 100644 dom/media/autoplay/AutoplayPolicy.h create mode 100644 dom/media/autoplay/GVAutoplayPermissionRequest.cpp create mode 100644 dom/media/autoplay/GVAutoplayPermissionRequest.h create mode 100644 dom/media/autoplay/GVAutoplayRequestStatusIPC.h create mode 100644 dom/media/autoplay/GVAutoplayRequestUtils.h create mode 100644 dom/media/autoplay/moz.build create mode 100644 dom/media/autoplay/nsIAutoplay.idl create mode 100644 dom/media/autoplay/test/browser/audio.ogg create mode 100644 dom/media/autoplay/test/browser/browser.toml create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_detection_click_to_play.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_detection_global_and_site_sticky.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_detection_global_sticky.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_play_twice.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_request_permission.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_touchScroll.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_user_gestures.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_webRTC_permission.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_web_audio.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_policy_web_audio_with_gum.js create mode 100644 dom/media/autoplay/test/browser/browser_autoplay_videoDocument.js create mode 100644 dom/media/autoplay/test/browser/file_empty.html create mode 100644 dom/media/autoplay/test/browser/file_mediaplayback_frame.html create mode 100644 dom/media/autoplay/test/browser/file_nonAutoplayAudio.html create mode 100644 dom/media/autoplay/test/browser/file_video.html create mode 100644 dom/media/autoplay/test/browser/head.js create mode 100644 dom/media/autoplay/test/mochitest/AutoplayTestUtils.js create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_gv_play_request_frame.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_gv_play_request_window.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_policy_activation_frame.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_policy_activation_window.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_policy_eventdown_activation.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_policy_key_blacklist.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_policy_play_before_loadedmetadata.html create mode 100644 dom/media/autoplay/test/mochitest/file_autoplay_policy_unmute_pauses.html create mode 100644 dom/media/autoplay/test/mochitest/mochitest.toml create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_contentEditable.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_gv_play_request.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_activation.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_eventdown_activation.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_key_blacklist.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_permission.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_play_before_loadedmetadata.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_unmute_pauses.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_web_audio_AudioParamStream.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_web_audio_createMediaStreamSource.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_web_audio_mediaElementAudioSourceNode.html create mode 100644 dom/media/autoplay/test/mochitest/test_autoplay_policy_web_audio_notResumePageInvokedSuspendedAudioContext.html create mode 100644 dom/media/autoplay/test/mochitest/test_streams_autoplay.html create mode 100755 dom/media/benchmark/sample create mode 100644 dom/media/bridge/IPeerConnection.idl create mode 100644 dom/media/bridge/MediaModule.cpp create mode 100644 dom/media/bridge/components.conf create mode 100644 dom/media/bridge/moz.build create mode 100644 dom/media/components.conf create mode 100644 dom/media/doctor/DDLifetime.cpp create mode 100644 dom/media/doctor/DDLifetime.h create mode 100644 dom/media/doctor/DDLifetimes.cpp create mode 100644 dom/media/doctor/DDLifetimes.h create mode 100644 dom/media/doctor/DDLogCategory.cpp create mode 100644 dom/media/doctor/DDLogCategory.h create mode 100644 dom/media/doctor/DDLogMessage.cpp create mode 100644 dom/media/doctor/DDLogMessage.h create mode 100644 dom/media/doctor/DDLogObject.cpp create mode 100644 dom/media/doctor/DDLogObject.h create mode 100644 dom/media/doctor/DDLogUtils.cpp create mode 100644 dom/media/doctor/DDLogUtils.h create mode 100644 dom/media/doctor/DDLogValue.cpp create mode 100644 dom/media/doctor/DDLogValue.h create mode 100644 dom/media/doctor/DDLoggedTypeTraits.h create mode 100644 dom/media/doctor/DDMediaLog.cpp create mode 100644 dom/media/doctor/DDMediaLog.h create mode 100644 dom/media/doctor/DDMediaLogs.cpp create mode 100644 dom/media/doctor/DDMediaLogs.h create mode 100644 dom/media/doctor/DDMessageIndex.h create mode 100644 dom/media/doctor/DDTimeStamp.cpp create mode 100644 dom/media/doctor/DDTimeStamp.h create mode 100644 dom/media/doctor/DecoderDoctorDiagnostics.cpp create mode 100644 dom/media/doctor/DecoderDoctorDiagnostics.h create mode 100644 dom/media/doctor/DecoderDoctorLogger.cpp create mode 100644 dom/media/doctor/DecoderDoctorLogger.h create mode 100644 dom/media/doctor/MultiWriterQueue.h create mode 100644 dom/media/doctor/RollingNumber.h create mode 100644 dom/media/doctor/moz.build create mode 100644 dom/media/doctor/test/browser/browser.toml create mode 100644 dom/media/doctor/test/browser/browser_decoderDoctor.js create mode 100644 dom/media/doctor/test/browser/browser_doctor_notification.js create mode 100644 dom/media/doctor/test/gtest/TestMultiWriterQueue.cpp create mode 100644 dom/media/doctor/test/gtest/TestRollingNumber.cpp create mode 100644 dom/media/doctor/test/gtest/moz.build create mode 100644 dom/media/driftcontrol/AudioChunkList.cpp create mode 100644 dom/media/driftcontrol/AudioChunkList.h create mode 100644 dom/media/driftcontrol/AudioDriftCorrection.cpp create mode 100644 dom/media/driftcontrol/AudioDriftCorrection.h create mode 100644 dom/media/driftcontrol/AudioResampler.cpp create mode 100644 dom/media/driftcontrol/AudioResampler.h create mode 100644 dom/media/driftcontrol/DriftController.cpp create mode 100644 dom/media/driftcontrol/DriftController.h create mode 100644 dom/media/driftcontrol/DynamicResampler.cpp create mode 100644 dom/media/driftcontrol/DynamicResampler.h create mode 100644 dom/media/driftcontrol/gtest/TestAudioChunkList.cpp create mode 100644 dom/media/driftcontrol/gtest/TestAudioDriftCorrection.cpp create mode 100644 dom/media/driftcontrol/gtest/TestAudioResampler.cpp create mode 100644 dom/media/driftcontrol/gtest/TestDriftController.cpp create mode 100644 dom/media/driftcontrol/gtest/TestDynamicResampler.cpp create mode 100644 dom/media/driftcontrol/gtest/moz.build create mode 100644 dom/media/driftcontrol/moz.build create mode 100755 dom/media/driftcontrol/plot.py create mode 100644 dom/media/eme/CDMCaps.cpp create mode 100644 dom/media/eme/CDMCaps.h create mode 100644 dom/media/eme/CDMProxy.h create mode 100644 dom/media/eme/DecryptorProxyCallback.h create mode 100644 dom/media/eme/DetailedPromise.cpp create mode 100644 dom/media/eme/DetailedPromise.h create mode 100644 dom/media/eme/EMEUtils.cpp create mode 100644 dom/media/eme/EMEUtils.h create mode 100644 dom/media/eme/KeySystemConfig.cpp create mode 100644 dom/media/eme/KeySystemConfig.h create mode 100644 dom/media/eme/KeySystemNames.h create mode 100644 dom/media/eme/MediaEncryptedEvent.cpp create mode 100644 dom/media/eme/MediaEncryptedEvent.h create mode 100644 dom/media/eme/MediaKeyError.cpp create mode 100644 dom/media/eme/MediaKeyError.h create mode 100644 dom/media/eme/MediaKeyMessageEvent.cpp create mode 100644 dom/media/eme/MediaKeyMessageEvent.h create mode 100644 dom/media/eme/MediaKeySession.cpp create mode 100644 dom/media/eme/MediaKeySession.h create mode 100644 dom/media/eme/MediaKeyStatusMap.cpp create mode 100644 dom/media/eme/MediaKeyStatusMap.h create mode 100644 dom/media/eme/MediaKeySystemAccess.cpp create mode 100644 dom/media/eme/MediaKeySystemAccess.h create mode 100644 dom/media/eme/MediaKeySystemAccessManager.cpp create mode 100644 dom/media/eme/MediaKeySystemAccessManager.h create mode 100644 dom/media/eme/MediaKeySystemAccessPermissionRequest.cpp create mode 100644 dom/media/eme/MediaKeySystemAccessPermissionRequest.h create mode 100644 dom/media/eme/MediaKeys.cpp create mode 100644 dom/media/eme/MediaKeys.h create mode 100644 dom/media/eme/clearkey/ArrayUtils.h create mode 100644 dom/media/eme/clearkey/BigEndian.h create mode 100644 dom/media/eme/clearkey/ClearKeyBase64.cpp create mode 100644 dom/media/eme/clearkey/ClearKeyBase64.h create mode 100644 dom/media/eme/clearkey/ClearKeyDecryptionManager.cpp create mode 100644 dom/media/eme/clearkey/ClearKeyDecryptionManager.h create mode 100644 dom/media/eme/clearkey/ClearKeyPersistence.cpp create mode 100644 dom/media/eme/clearkey/ClearKeyPersistence.h create mode 100644 dom/media/eme/clearkey/ClearKeySession.cpp create mode 100644 dom/media/eme/clearkey/ClearKeySession.h create mode 100644 dom/media/eme/clearkey/ClearKeySessionManager.cpp create mode 100644 dom/media/eme/clearkey/ClearKeySessionManager.h create mode 100644 dom/media/eme/clearkey/ClearKeyStorage.cpp create mode 100644 dom/media/eme/clearkey/ClearKeyStorage.h create mode 100644 dom/media/eme/clearkey/ClearKeyUtils.cpp create mode 100644 dom/media/eme/clearkey/ClearKeyUtils.h create mode 100644 dom/media/eme/clearkey/RefCounted.h create mode 100644 dom/media/eme/clearkey/gtest/TestClearKeyUtils.cpp create mode 100644 dom/media/eme/clearkey/gtest/moz.build create mode 100644 dom/media/eme/clearkey/moz.build create mode 100644 dom/media/eme/mediadrm/MediaDrmCDMCallbackProxy.cpp create mode 100644 dom/media/eme/mediadrm/MediaDrmCDMCallbackProxy.h create mode 100644 dom/media/eme/mediadrm/MediaDrmCDMProxy.cpp create mode 100644 dom/media/eme/mediadrm/MediaDrmCDMProxy.h create mode 100644 dom/media/eme/mediadrm/MediaDrmProxySupport.cpp create mode 100644 dom/media/eme/mediadrm/MediaDrmProxySupport.h create mode 100644 dom/media/eme/mediadrm/moz.build create mode 100644 dom/media/eme/mediafoundation/WMFCDMImpl.cpp create mode 100644 dom/media/eme/mediafoundation/WMFCDMImpl.h create mode 100644 dom/media/eme/mediafoundation/WMFCDMProxy.cpp create mode 100644 dom/media/eme/mediafoundation/WMFCDMProxy.h create mode 100644 dom/media/eme/mediafoundation/WMFCDMProxyCallback.cpp create mode 100644 dom/media/eme/mediafoundation/WMFCDMProxyCallback.h create mode 100644 dom/media/eme/mediafoundation/moz.build create mode 100644 dom/media/eme/moz.build create mode 100644 dom/media/encoder/ContainerWriter.h create mode 100644 dom/media/encoder/EncodedFrame.h create mode 100644 dom/media/encoder/MediaEncoder.cpp create mode 100644 dom/media/encoder/MediaEncoder.h create mode 100644 dom/media/encoder/Muxer.cpp create mode 100644 dom/media/encoder/Muxer.h create mode 100644 dom/media/encoder/OpusTrackEncoder.cpp create mode 100644 dom/media/encoder/OpusTrackEncoder.h create mode 100644 dom/media/encoder/TrackEncoder.cpp create mode 100644 dom/media/encoder/TrackEncoder.h create mode 100644 dom/media/encoder/TrackMetadataBase.h create mode 100644 dom/media/encoder/VP8TrackEncoder.cpp create mode 100644 dom/media/encoder/VP8TrackEncoder.h create mode 100644 dom/media/encoder/moz.build create mode 100644 dom/media/fake-cdm/cdm-fake.cpp create mode 100644 dom/media/fake-cdm/cdm-test-decryptor.cpp create mode 100644 dom/media/fake-cdm/cdm-test-decryptor.h create mode 100644 dom/media/fake-cdm/cdm-test-output-protection.h create mode 100644 dom/media/fake-cdm/cdm-test-storage.cpp create mode 100644 dom/media/fake-cdm/cdm-test-storage.h create mode 100644 dom/media/fake-cdm/manifest.json create mode 100644 dom/media/fake-cdm/moz.build create mode 100644 dom/media/flac/FlacDecoder.cpp create mode 100644 dom/media/flac/FlacDecoder.h create mode 100644 dom/media/flac/FlacDemuxer.cpp create mode 100644 dom/media/flac/FlacDemuxer.h create mode 100644 dom/media/flac/FlacFrameParser.cpp create mode 100644 dom/media/flac/FlacFrameParser.h create mode 100644 dom/media/flac/moz.build create mode 100644 dom/media/fuzz/FuzzMedia.cpp create mode 100644 dom/media/fuzz/moz.build create mode 100644 dom/media/gmp-plugin-openh264/fakeopenh264.info create mode 100644 dom/media/gmp-plugin-openh264/gmp-fake-openh264.cpp create mode 100644 dom/media/gmp-plugin-openh264/moz.build create mode 100644 dom/media/gmp/CDMStorageIdProvider.cpp create mode 100644 dom/media/gmp/CDMStorageIdProvider.h create mode 100644 dom/media/gmp/ChromiumCDMAdapter.cpp create mode 100644 dom/media/gmp/ChromiumCDMAdapter.h create mode 100644 dom/media/gmp/ChromiumCDMCallback.h create mode 100644 dom/media/gmp/ChromiumCDMCallbackProxy.cpp create mode 100644 dom/media/gmp/ChromiumCDMCallbackProxy.h create mode 100644 dom/media/gmp/ChromiumCDMChild.cpp create mode 100644 dom/media/gmp/ChromiumCDMChild.h create mode 100644 dom/media/gmp/ChromiumCDMParent.cpp create mode 100644 dom/media/gmp/ChromiumCDMParent.h create mode 100644 dom/media/gmp/ChromiumCDMProxy.cpp create mode 100644 dom/media/gmp/ChromiumCDMProxy.h create mode 100644 dom/media/gmp/DecryptJob.cpp create mode 100644 dom/media/gmp/DecryptJob.h create mode 100644 dom/media/gmp/GMPCallbackBase.h create mode 100644 dom/media/gmp/GMPChild.cpp create mode 100644 dom/media/gmp/GMPChild.h create mode 100644 dom/media/gmp/GMPContentChild.cpp create mode 100644 dom/media/gmp/GMPContentChild.h create mode 100644 dom/media/gmp/GMPContentParent.cpp create mode 100644 dom/media/gmp/GMPContentParent.h create mode 100644 dom/media/gmp/GMPCrashHelper.h create mode 100644 dom/media/gmp/GMPCrashHelperHolder.cpp create mode 100644 dom/media/gmp/GMPCrashHelperHolder.h create mode 100644 dom/media/gmp/GMPDiskStorage.cpp create mode 100644 dom/media/gmp/GMPLoader.cpp create mode 100644 dom/media/gmp/GMPLoader.h create mode 100644 dom/media/gmp/GMPLog.h create mode 100644 dom/media/gmp/GMPMemoryStorage.cpp create mode 100644 dom/media/gmp/GMPMessageUtils.h create mode 100644 dom/media/gmp/GMPNativeTypes.h create mode 100644 dom/media/gmp/GMPParent.cpp create mode 100644 dom/media/gmp/GMPParent.h create mode 100644 dom/media/gmp/GMPPlatform.cpp create mode 100644 dom/media/gmp/GMPPlatform.h create mode 100644 dom/media/gmp/GMPProcessChild.cpp create mode 100644 dom/media/gmp/GMPProcessChild.h create mode 100644 dom/media/gmp/GMPProcessParent.cpp create mode 100644 dom/media/gmp/GMPProcessParent.h create mode 100644 dom/media/gmp/GMPSanitizedExports.h create mode 100644 dom/media/gmp/GMPService.cpp create mode 100644 dom/media/gmp/GMPService.h create mode 100644 dom/media/gmp/GMPServiceChild.cpp create mode 100644 dom/media/gmp/GMPServiceChild.h create mode 100644 dom/media/gmp/GMPServiceParent.cpp create mode 100644 dom/media/gmp/GMPServiceParent.h create mode 100644 dom/media/gmp/GMPSharedMemManager.cpp create mode 100644 dom/media/gmp/GMPSharedMemManager.h create mode 100644 dom/media/gmp/GMPStorage.h create mode 100644 dom/media/gmp/GMPStorageChild.cpp create mode 100644 dom/media/gmp/GMPStorageChild.h create mode 100644 dom/media/gmp/GMPStorageParent.cpp create mode 100644 dom/media/gmp/GMPStorageParent.h create mode 100644 dom/media/gmp/GMPTimerChild.cpp create mode 100644 dom/media/gmp/GMPTimerChild.h create mode 100644 dom/media/gmp/GMPTimerParent.cpp create mode 100644 dom/media/gmp/GMPTimerParent.h create mode 100644 dom/media/gmp/GMPTypes.ipdlh create mode 100644 dom/media/gmp/GMPUtils.cpp create mode 100644 dom/media/gmp/GMPUtils.h create mode 100644 dom/media/gmp/GMPVideoDecoderChild.cpp create mode 100644 dom/media/gmp/GMPVideoDecoderChild.h create mode 100644 dom/media/gmp/GMPVideoDecoderParent.cpp create mode 100644 dom/media/gmp/GMPVideoDecoderParent.h create mode 100644 dom/media/gmp/GMPVideoDecoderProxy.h create mode 100644 dom/media/gmp/GMPVideoEncodedFrameImpl.cpp create mode 100644 dom/media/gmp/GMPVideoEncodedFrameImpl.h create mode 100644 dom/media/gmp/GMPVideoEncoderChild.cpp create mode 100644 dom/media/gmp/GMPVideoEncoderChild.h create mode 100644 dom/media/gmp/GMPVideoEncoderParent.cpp create mode 100644 dom/media/gmp/GMPVideoEncoderParent.h create mode 100644 dom/media/gmp/GMPVideoEncoderProxy.h create mode 100644 dom/media/gmp/GMPVideoHost.cpp create mode 100644 dom/media/gmp/GMPVideoHost.h create mode 100644 dom/media/gmp/GMPVideoPlaneImpl.cpp create mode 100644 dom/media/gmp/GMPVideoPlaneImpl.h create mode 100644 dom/media/gmp/GMPVideoi420FrameImpl.cpp create mode 100644 dom/media/gmp/GMPVideoi420FrameImpl.h create mode 100644 dom/media/gmp/PChromiumCDM.ipdl create mode 100644 dom/media/gmp/PGMP.ipdl create mode 100644 dom/media/gmp/PGMPContent.ipdl create mode 100644 dom/media/gmp/PGMPService.ipdl create mode 100644 dom/media/gmp/PGMPStorage.ipdl create mode 100644 dom/media/gmp/PGMPTimer.ipdl create mode 100644 dom/media/gmp/PGMPVideoDecoder.ipdl create mode 100644 dom/media/gmp/PGMPVideoEncoder.ipdl create mode 100644 dom/media/gmp/README.txt create mode 100644 dom/media/gmp/gmp-api/gmp-entrypoints.h create mode 100644 dom/media/gmp/gmp-api/gmp-errors.h create mode 100644 dom/media/gmp/gmp-api/gmp-platform.h create mode 100644 dom/media/gmp/gmp-api/gmp-storage.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-codec.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-decode.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-encode.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-frame-encoded.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-frame-i420.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-frame.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-host.h create mode 100644 dom/media/gmp/gmp-api/gmp-video-plane.h create mode 100644 dom/media/gmp/moz.build create mode 100644 dom/media/gmp/mozIGeckoMediaPluginChromeService.idl create mode 100644 dom/media/gmp/mozIGeckoMediaPluginService.idl create mode 100644 dom/media/gmp/rlz/OWNERS create mode 100644 dom/media/gmp/rlz/README.mozilla create mode 100644 dom/media/gmp/rlz/lib/assert.h create mode 100644 dom/media/gmp/rlz/lib/crc8.cc create mode 100644 dom/media/gmp/rlz/lib/crc8.h create mode 100644 dom/media/gmp/rlz/lib/machine_id.cc create mode 100644 dom/media/gmp/rlz/lib/machine_id.h create mode 100644 dom/media/gmp/rlz/lib/string_utils.cc create mode 100644 dom/media/gmp/rlz/lib/string_utils.h create mode 100644 dom/media/gmp/rlz/mac/lib/machine_id_mac.cc create mode 100644 dom/media/gmp/rlz/moz.build create mode 100644 dom/media/gmp/rlz/win/lib/machine_id_win.cc create mode 100644 dom/media/gmp/widevine-adapter/WidevineFileIO.cpp create mode 100644 dom/media/gmp/widevine-adapter/WidevineFileIO.h create mode 100644 dom/media/gmp/widevine-adapter/WidevineUtils.cpp create mode 100644 dom/media/gmp/widevine-adapter/WidevineUtils.h create mode 100644 dom/media/gmp/widevine-adapter/WidevineVideoFrame.cpp create mode 100644 dom/media/gmp/widevine-adapter/WidevineVideoFrame.h create mode 100644 dom/media/gmp/widevine-adapter/content_decryption_module.h create mode 100644 dom/media/gmp/widevine-adapter/content_decryption_module_export.h create mode 100644 dom/media/gmp/widevine-adapter/content_decryption_module_ext.h create mode 100644 dom/media/gmp/widevine-adapter/content_decryption_module_proxy.h create mode 100644 dom/media/gmp/widevine-adapter/moz.build create mode 100644 dom/media/gtest/AudioGenerator.h create mode 100644 dom/media/gtest/AudioVerifier.h create mode 100644 dom/media/gtest/Cargo.toml create mode 100644 dom/media/gtest/GMPTestMonitor.h create mode 100644 dom/media/gtest/MockCubeb.cpp create mode 100644 dom/media/gtest/MockCubeb.h create mode 100644 dom/media/gtest/MockMediaResource.cpp create mode 100644 dom/media/gtest/MockMediaResource.h create mode 100644 dom/media/gtest/TestAudioBuffer.cpp create mode 100644 dom/media/gtest/TestAudioBuffers.cpp create mode 100644 dom/media/gtest/TestAudioCallbackDriver.cpp create mode 100644 dom/media/gtest/TestAudioCompactor.cpp create mode 100644 dom/media/gtest/TestAudioDecoderInputTrack.cpp create mode 100644 dom/media/gtest/TestAudioDeviceEnumerator.cpp create mode 100644 dom/media/gtest/TestAudioInputProcessing.cpp create mode 100644 dom/media/gtest/TestAudioInputSource.cpp create mode 100644 dom/media/gtest/TestAudioMixer.cpp create mode 100644 dom/media/gtest/TestAudioPacketizer.cpp create mode 100644 dom/media/gtest/TestAudioRingBuffer.cpp create mode 100644 dom/media/gtest/TestAudioSegment.cpp create mode 100644 dom/media/gtest/TestAudioSinkWrapper.cpp create mode 100644 dom/media/gtest/TestAudioTrackEncoder.cpp create mode 100644 dom/media/gtest/TestAudioTrackGraph.cpp create mode 100644 dom/media/gtest/TestBenchmarkStorage.cpp create mode 100644 dom/media/gtest/TestBitWriter.cpp create mode 100644 dom/media/gtest/TestBlankVideoDataCreator.cpp create mode 100644 dom/media/gtest/TestBufferReader.cpp create mode 100644 dom/media/gtest/TestCDMStorage.cpp create mode 100644 dom/media/gtest/TestCubebInputStream.cpp create mode 100644 dom/media/gtest/TestDataMutex.cpp create mode 100644 dom/media/gtest/TestDecoderBenchmark.cpp create mode 100644 dom/media/gtest/TestDeviceInputTrack.cpp create mode 100644 dom/media/gtest/TestDriftCompensation.cpp create mode 100644 dom/media/gtest/TestGMPCrossOrigin.cpp create mode 100644 dom/media/gtest/TestGMPRemoveAndDelete.cpp create mode 100644 dom/media/gtest/TestGMPUtils.cpp create mode 100644 dom/media/gtest/TestGroupId.cpp create mode 100644 dom/media/gtest/TestIntervalSet.cpp create mode 100644 dom/media/gtest/TestKeyValueStorage.cpp create mode 100644 dom/media/gtest/TestMP3Demuxer.cpp create mode 100644 dom/media/gtest/TestMP4Demuxer.cpp create mode 100644 dom/media/gtest/TestMediaCodecsSupport.cpp create mode 100644 dom/media/gtest/TestMediaDataDecoder.cpp create mode 100644 dom/media/gtest/TestMediaDataEncoder.cpp create mode 100644 dom/media/gtest/TestMediaEventSource.cpp create mode 100644 dom/media/gtest/TestMediaMIMETypes.cpp create mode 100644 dom/media/gtest/TestMediaQueue.cpp create mode 100644 dom/media/gtest/TestMediaSpan.cpp create mode 100644 dom/media/gtest/TestMediaUtils.cpp create mode 100644 dom/media/gtest/TestMuxer.cpp create mode 100644 dom/media/gtest/TestOggWriter.cpp create mode 100644 dom/media/gtest/TestOpusParser.cpp create mode 100644 dom/media/gtest/TestPacer.cpp create mode 100644 dom/media/gtest/TestRTCStatsTimestampMaker.cpp create mode 100644 dom/media/gtest/TestRust.cpp create mode 100644 dom/media/gtest/TestTimeUnit.cpp create mode 100644 dom/media/gtest/TestVPXDecoding.cpp create mode 100644 dom/media/gtest/TestVideoFrameConverter.cpp create mode 100644 dom/media/gtest/TestVideoSegment.cpp create mode 100644 dom/media/gtest/TestVideoTrackEncoder.cpp create mode 100644 dom/media/gtest/TestVideoUtils.cpp create mode 100644 dom/media/gtest/TestWebMBuffered.cpp create mode 100644 dom/media/gtest/TestWebMWriter.cpp create mode 100644 dom/media/gtest/YUVBufferGenerator.cpp create mode 100644 dom/media/gtest/YUVBufferGenerator.h create mode 100644 dom/media/gtest/dash_dashinit.mp4 create mode 100644 dom/media/gtest/hello.rs create mode 100644 dom/media/gtest/id3v2header.mp3 create mode 100644 dom/media/gtest/moz.build create mode 100644 dom/media/gtest/mp4_demuxer/TestInterval.cpp create mode 100644 dom/media/gtest/mp4_demuxer/TestMP4.cpp create mode 100644 dom/media/gtest/mp4_demuxer/TestParser.cpp create mode 100644 dom/media/gtest/mp4_demuxer/moz.build create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1156505.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1181213.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1181215.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1181223.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1181719.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1185230.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1187067.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1200326.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1204580.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1216748.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1296473.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1296532.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-harder.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-i64max.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-i64min.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-max-ez.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-max-ok.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-overfl.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-u32max.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065-u64max.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1301065.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1329061.mov create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1351094.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1388991.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1389299.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1389527.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1395244.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1410565.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1513651-2-sample-description-entries.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1519617-cenc-init-with-track_id-0.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1519617-track2-trafs-removed.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1519617-video-has-track_id-0.mp4 create mode 100644 dom/media/gtest/mp4_demuxer/test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4 create mode 100644 dom/media/gtest/negative_duration.mp4 create mode 100644 dom/media/gtest/noise.mp3 create mode 100644 dom/media/gtest/noise_vbr.mp3 create mode 100644 dom/media/gtest/short-zero-in-moov.mp4 create mode 100644 dom/media/gtest/short-zero-inband.mov create mode 100644 dom/media/gtest/small-shot-false-positive.mp3 create mode 100644 dom/media/gtest/small-shot-partial-xing.mp3 create mode 100644 dom/media/gtest/small-shot.mp3 create mode 100644 dom/media/gtest/test.webm create mode 100644 dom/media/gtest/test_InvalidElementId.webm create mode 100644 dom/media/gtest/test_InvalidElementSize.webm create mode 100644 dom/media/gtest/test_InvalidLargeEBMLMaxIdLength.webm create mode 100644 dom/media/gtest/test_InvalidLargeElementId.webm create mode 100644 dom/media/gtest/test_InvalidSmallEBMLMaxIdLength.webm create mode 100644 dom/media/gtest/test_ValidLargeEBMLMaxIdLength.webm create mode 100644 dom/media/gtest/test_ValidSmallEBMLMaxSizeLength.webm create mode 100644 dom/media/gtest/test_case_1224361.vp8.ivf create mode 100644 dom/media/gtest/test_case_1224363.vp8.ivf create mode 100644 dom/media/gtest/test_case_1224369.vp8.ivf create mode 100644 dom/media/gtest/test_vbri.mp3 create mode 100644 dom/media/hls/HLSDecoder.cpp create mode 100644 dom/media/hls/HLSDecoder.h create mode 100644 dom/media/hls/HLSDemuxer.cpp create mode 100644 dom/media/hls/HLSDemuxer.h create mode 100644 dom/media/hls/HLSUtils.cpp create mode 100644 dom/media/hls/HLSUtils.h create mode 100644 dom/media/hls/moz.build create mode 100644 dom/media/imagecapture/CaptureTask.cpp create mode 100644 dom/media/imagecapture/CaptureTask.h create mode 100644 dom/media/imagecapture/ImageCapture.cpp create mode 100644 dom/media/imagecapture/ImageCapture.h create mode 100644 dom/media/imagecapture/moz.build create mode 100644 dom/media/ipc/MFCDMChild.cpp create mode 100644 dom/media/ipc/MFCDMChild.h create mode 100644 dom/media/ipc/MFCDMParent.cpp create mode 100644 dom/media/ipc/MFCDMParent.h create mode 100644 dom/media/ipc/MFCDMSerializers.h create mode 100644 dom/media/ipc/MFMediaEngineChild.cpp create mode 100644 dom/media/ipc/MFMediaEngineChild.h create mode 100644 dom/media/ipc/MFMediaEngineParent.cpp create mode 100644 dom/media/ipc/MFMediaEngineParent.h create mode 100644 dom/media/ipc/MFMediaEngineUtils.cpp create mode 100644 dom/media/ipc/MFMediaEngineUtils.h create mode 100644 dom/media/ipc/MediaIPCUtils.h create mode 100644 dom/media/ipc/PMFCDM.ipdl create mode 100644 dom/media/ipc/PMFMediaEngine.ipdl create mode 100644 dom/media/ipc/PMediaDecoderParams.ipdlh create mode 100644 dom/media/ipc/PRDD.ipdl create mode 100644 dom/media/ipc/PRemoteDecoder.ipdl create mode 100644 dom/media/ipc/PRemoteDecoderManager.ipdl create mode 100644 dom/media/ipc/RDDChild.cpp create mode 100644 dom/media/ipc/RDDChild.h create mode 100644 dom/media/ipc/RDDParent.cpp create mode 100644 dom/media/ipc/RDDParent.h create mode 100644 dom/media/ipc/RDDProcessHost.cpp create mode 100644 dom/media/ipc/RDDProcessHost.h create mode 100644 dom/media/ipc/RDDProcessImpl.cpp create mode 100644 dom/media/ipc/RDDProcessImpl.h create mode 100644 dom/media/ipc/RDDProcessManager.cpp create mode 100644 dom/media/ipc/RDDProcessManager.h create mode 100644 dom/media/ipc/RemoteAudioDecoder.cpp create mode 100644 dom/media/ipc/RemoteAudioDecoder.h create mode 100644 dom/media/ipc/RemoteDecodeUtils.cpp create mode 100644 dom/media/ipc/RemoteDecodeUtils.h create mode 100644 dom/media/ipc/RemoteDecoderChild.cpp create mode 100644 dom/media/ipc/RemoteDecoderChild.h create mode 100644 dom/media/ipc/RemoteDecoderManagerChild.cpp create mode 100644 dom/media/ipc/RemoteDecoderManagerChild.h create mode 100644 dom/media/ipc/RemoteDecoderManagerParent.cpp create mode 100644 dom/media/ipc/RemoteDecoderManagerParent.h create mode 100644 dom/media/ipc/RemoteDecoderModule.cpp create mode 100644 dom/media/ipc/RemoteDecoderModule.h create mode 100644 dom/media/ipc/RemoteDecoderParent.cpp create mode 100644 dom/media/ipc/RemoteDecoderParent.h create mode 100644 dom/media/ipc/RemoteImageHolder.cpp create mode 100644 dom/media/ipc/RemoteImageHolder.h create mode 100644 dom/media/ipc/RemoteMediaData.cpp create mode 100644 dom/media/ipc/RemoteMediaData.h create mode 100644 dom/media/ipc/RemoteMediaDataDecoder.cpp create mode 100644 dom/media/ipc/RemoteMediaDataDecoder.h create mode 100644 dom/media/ipc/RemoteVideoDecoder.cpp create mode 100644 dom/media/ipc/RemoteVideoDecoder.h create mode 100644 dom/media/ipc/ShmemRecycleAllocator.h create mode 100644 dom/media/ipc/moz.build create mode 100644 dom/media/mediacapabilities/BenchmarkStorageChild.cpp create mode 100644 dom/media/mediacapabilities/BenchmarkStorageChild.h create mode 100644 dom/media/mediacapabilities/BenchmarkStorageParent.cpp create mode 100644 dom/media/mediacapabilities/BenchmarkStorageParent.h create mode 100644 dom/media/mediacapabilities/DecoderBenchmark.cpp create mode 100644 dom/media/mediacapabilities/DecoderBenchmark.h create mode 100644 dom/media/mediacapabilities/KeyValueStorage.cpp create mode 100644 dom/media/mediacapabilities/KeyValueStorage.h create mode 100644 dom/media/mediacapabilities/MediaCapabilities.cpp create mode 100644 dom/media/mediacapabilities/MediaCapabilities.h create mode 100644 dom/media/mediacapabilities/PBenchmarkStorage.ipdl create mode 100644 dom/media/mediacapabilities/moz.build create mode 100644 dom/media/mediacontrol/AudioFocusManager.cpp create mode 100644 dom/media/mediacontrol/AudioFocusManager.h create mode 100644 dom/media/mediacontrol/ContentMediaController.cpp create mode 100644 dom/media/mediacontrol/ContentMediaController.h create mode 100644 dom/media/mediacontrol/ContentPlaybackController.cpp create mode 100644 dom/media/mediacontrol/ContentPlaybackController.h create mode 100644 dom/media/mediacontrol/FetchImageHelper.cpp create mode 100644 dom/media/mediacontrol/FetchImageHelper.h create mode 100644 dom/media/mediacontrol/MediaControlIPC.h create mode 100644 dom/media/mediacontrol/MediaControlKeyManager.cpp create mode 100644 dom/media/mediacontrol/MediaControlKeyManager.h create mode 100644 dom/media/mediacontrol/MediaControlKeySource.cpp create mode 100644 dom/media/mediacontrol/MediaControlKeySource.h create mode 100644 dom/media/mediacontrol/MediaControlService.cpp create mode 100644 dom/media/mediacontrol/MediaControlService.h create mode 100644 dom/media/mediacontrol/MediaControlUtils.cpp create mode 100644 dom/media/mediacontrol/MediaControlUtils.h create mode 100644 dom/media/mediacontrol/MediaController.cpp create mode 100644 dom/media/mediacontrol/MediaController.h create mode 100644 dom/media/mediacontrol/MediaPlaybackStatus.cpp create mode 100644 dom/media/mediacontrol/MediaPlaybackStatus.h create mode 100644 dom/media/mediacontrol/MediaStatusManager.cpp create mode 100644 dom/media/mediacontrol/MediaStatusManager.h create mode 100644 dom/media/mediacontrol/PositionStateEvent.h create mode 100644 dom/media/mediacontrol/moz.build create mode 100644 dom/media/mediacontrol/tests/browser/browser.toml create mode 100644 dom/media/mediacontrol/tests/browser/browser_audio_focus_management.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_control_page_with_audible_and_inaudible_media.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_default_action_handler.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_audio_focus_within_a_page.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_before_media_starts.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_captured_audio.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_keys_event.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_main_controller.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_metadata.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_non_eligible_media.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_playback_state.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_position_state.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_seekto.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_stop_timer.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_media_control_supported_keys.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_nosrc_and_error_media.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_only_control_non_real_time_media.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_remove_controllable_media_for_active_controller.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_resume_latest_paused_media.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_seek_captured_audio.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_stop_control_after_media_reaches_to_end.js create mode 100644 dom/media/mediacontrol/tests/browser/browser_suspend_inactive_tab.js create mode 100644 dom/media/mediacontrol/tests/browser/file_audio_and_inaudible_media.html create mode 100644 dom/media/mediacontrol/tests/browser/file_autoplay.html create mode 100644 dom/media/mediacontrol/tests/browser/file_empty_title.html create mode 100644 dom/media/mediacontrol/tests/browser/file_error_media.html create mode 100644 dom/media/mediacontrol/tests/browser/file_iframe_media.html create mode 100644 dom/media/mediacontrol/tests/browser/file_main_frame_with_multiple_child_session_frames.html create mode 100644 dom/media/mediacontrol/tests/browser/file_multiple_audible_media.html create mode 100644 dom/media/mediacontrol/tests/browser/file_muted_autoplay.html create mode 100644 dom/media/mediacontrol/tests/browser/file_no_src_media.html create mode 100644 dom/media/mediacontrol/tests/browser/file_non_autoplay.html create mode 100644 dom/media/mediacontrol/tests/browser/file_non_eligible_media.html create mode 100644 dom/media/mediacontrol/tests/browser/file_non_looping_media.html create mode 100644 dom/media/mediacontrol/tests/browser/head.js create mode 100644 dom/media/mediacontrol/tests/gtest/MediaKeyListenerTest.h create mode 100644 dom/media/mediacontrol/tests/gtest/TestAudioFocusManager.cpp create mode 100644 dom/media/mediacontrol/tests/gtest/TestMediaControlService.cpp create mode 100644 dom/media/mediacontrol/tests/gtest/TestMediaController.cpp create mode 100644 dom/media/mediacontrol/tests/gtest/TestMediaKeysEvent.cpp create mode 100644 dom/media/mediacontrol/tests/gtest/TestMediaKeysEventMac.mm create mode 100644 dom/media/mediacontrol/tests/gtest/TestMediaKeysEventMediaCenter.mm create mode 100644 dom/media/mediacontrol/tests/gtest/moz.build create mode 100644 dom/media/mediasession/MediaMetadata.cpp create mode 100644 dom/media/mediasession/MediaMetadata.h create mode 100644 dom/media/mediasession/MediaSession.cpp create mode 100644 dom/media/mediasession/MediaSession.h create mode 100644 dom/media/mediasession/MediaSessionIPCUtils.h create mode 100644 dom/media/mediasession/moz.build create mode 100644 dom/media/mediasession/test/MediaSessionTestUtils.js create mode 100644 dom/media/mediasession/test/browser.toml create mode 100644 dom/media/mediasession/test/browser_active_mediasession_among_tabs.js create mode 100644 dom/media/mediasession/test/crashtests/crashtests.list create mode 100644 dom/media/mediasession/test/crashtests/inactive-mediasession.html create mode 100644 dom/media/mediasession/test/file_media_session.html create mode 100644 dom/media/mediasession/test/file_trigger_actionhanlder_frame.html create mode 100644 dom/media/mediasession/test/file_trigger_actionhanlder_window.html create mode 100644 dom/media/mediasession/test/mochitest.toml create mode 100644 dom/media/mediasession/test/test_setactionhandler.html create mode 100644 dom/media/mediasession/test/test_trigger_actionhanlder.html create mode 100644 dom/media/mediasink/AudioDecoderInputTrack.cpp create mode 100644 dom/media/mediasink/AudioDecoderInputTrack.h create mode 100644 dom/media/mediasink/AudioSink.cpp create mode 100644 dom/media/mediasink/AudioSink.h create mode 100644 dom/media/mediasink/AudioSinkWrapper.cpp create mode 100644 dom/media/mediasink/AudioSinkWrapper.h create mode 100644 dom/media/mediasink/DecodedStream.cpp create mode 100644 dom/media/mediasink/DecodedStream.h create mode 100644 dom/media/mediasink/MediaSink.h create mode 100644 dom/media/mediasink/VideoSink.cpp create mode 100644 dom/media/mediasink/VideoSink.h create mode 100644 dom/media/mediasink/moz.build create mode 100644 dom/media/mediasource/AsyncEventRunner.h create mode 100644 dom/media/mediasource/ContainerParser.cpp create mode 100644 dom/media/mediasource/ContainerParser.h create mode 100644 dom/media/mediasource/MediaSource.cpp create mode 100644 dom/media/mediasource/MediaSource.h create mode 100644 dom/media/mediasource/MediaSourceDecoder.cpp create mode 100644 dom/media/mediasource/MediaSourceDecoder.h create mode 100644 dom/media/mediasource/MediaSourceDemuxer.cpp create mode 100644 dom/media/mediasource/MediaSourceDemuxer.h create mode 100644 dom/media/mediasource/MediaSourceUtils.cpp create mode 100644 dom/media/mediasource/MediaSourceUtils.h create mode 100644 dom/media/mediasource/ResourceQueue.cpp create mode 100644 dom/media/mediasource/ResourceQueue.h create mode 100644 dom/media/mediasource/SourceBuffer.cpp create mode 100644 dom/media/mediasource/SourceBuffer.h create mode 100644 dom/media/mediasource/SourceBufferAttributes.h create mode 100644 dom/media/mediasource/SourceBufferList.cpp create mode 100644 dom/media/mediasource/SourceBufferList.h create mode 100644 dom/media/mediasource/SourceBufferResource.cpp create mode 100644 dom/media/mediasource/SourceBufferResource.h create mode 100644 dom/media/mediasource/SourceBufferTask.h create mode 100644 dom/media/mediasource/TrackBuffersManager.cpp create mode 100644 dom/media/mediasource/TrackBuffersManager.h create mode 100644 dom/media/mediasource/gtest/TestContainerParser.cpp create mode 100644 dom/media/mediasource/gtest/TestExtractAV1CodecDetails.cpp create mode 100644 dom/media/mediasource/gtest/TestExtractVPXCodecDetails.cpp create mode 100644 dom/media/mediasource/gtest/moz.build create mode 100644 dom/media/mediasource/moz.build create mode 100644 dom/media/mediasource/test/.eslintrc.js create mode 100644 dom/media/mediasource/test/1516754.webm create mode 100644 dom/media/mediasource/test/1516754.webm^headers^ create mode 100644 dom/media/mediasource/test/aac20-48000-64000-1.m4s create mode 100644 dom/media/mediasource/test/aac20-48000-64000-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/aac20-48000-64000-2.m4s create mode 100644 dom/media/mediasource/test/aac20-48000-64000-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/aac20-48000-64000-init.mp4 create mode 100644 dom/media/mediasource/test/aac20-48000-64000-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/aac51-48000-128000-1.m4s create mode 100644 dom/media/mediasource/test/aac51-48000-128000-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/aac51-48000-128000-2.m4s create mode 100644 dom/media/mediasource/test/aac51-48000-128000-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/aac51-48000-128000-init.mp4 create mode 100644 dom/media/mediasource/test/aac51-48000-128000-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/avc3/init.mp4 create mode 100644 dom/media/mediasource/test/avc3/init.mp4^headers^ create mode 100644 dom/media/mediasource/test/avc3/segment1.m4s create mode 100644 dom/media/mediasource/test/avc3/segment1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop10.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop10.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop11.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop11.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop12.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop12.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop13.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop13.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop2s.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop2s.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop3.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop3.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop4.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop4.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop5.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop5.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop6.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop6.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop7.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop7.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop8.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop8.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop9.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop9.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_300-3s.webm create mode 100644 dom/media/mediasource/test/bipbop/bipbop_300-3s.webm^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_480_624kbps-video1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_480_624kbps-video1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_480_624kbps-video2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_480_624kbps-video2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_480_624kbps-videoinit.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_480_624kbps-videoinit.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio10.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio10.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio11.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio11.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio3.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio3.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio4.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio4.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio5.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio5.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio6.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio6.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio7.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio7.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio8.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio8.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio9.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audio9.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audioinit.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_audioinit.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_dash.mpd create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.0-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.0-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.0-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.0-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.0-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.0-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.1-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.1-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.1-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.1-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.1-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.1-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.2-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.2-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.2-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.2-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.2-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.2-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.3-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.3-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.3-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.3-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.3-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.3-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.4-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.4-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.4-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.4-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.4-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.4-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.5-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.5-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.5-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.5-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.5-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.5-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.6-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.6-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.6-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.6-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.6-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.6-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.7-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.7-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.7-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.7-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.7-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.7-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.8-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.8-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.8-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.8-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.8-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.8-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.9-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.9-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.9-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.9-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.9-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_0.9-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.0-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.0-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.0-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.0-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.0-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.0-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.1-1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.1-1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.1-2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.1-2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.1-init.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_offset_1.1-init.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_trailing_skip_box_video1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_trailing_skip_box_video1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video1.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video1.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video10.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video10.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video11.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video11.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video12.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video12.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video13.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video13.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video2.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video2.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video3.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video3.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video4.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video4.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video5.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video5.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video6.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video6.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video7.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video7.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video8.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video8.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video9.m4s create mode 100644 dom/media/mediasource/test/bipbop/bipbop_video9.m4s^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbop_videoinit.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbop_videoinit.mp4^headers^ create mode 100644 dom/media/mediasource/test/bipbop/bipbopinit.mp4 create mode 100644 dom/media/mediasource/test/bipbop/bipbopinit.mp4^headers^ create mode 100644 dom/media/mediasource/test/bug1718709_high_res.mp4 create mode 100644 dom/media/mediasource/test/bug1718709_low_res.mp4 create mode 100644 dom/media/mediasource/test/crashtests/1005366.html create mode 100644 dom/media/mediasource/test/crashtests/1059035.html create mode 100644 dom/media/mediasource/test/crashtests/926665.html create mode 100644 dom/media/mediasource/test/crashtests/931388.html create mode 100644 dom/media/mediasource/test/crashtests/crashtests.list create mode 100644 dom/media/mediasource/test/flac/00001.m4s create mode 100644 dom/media/mediasource/test/flac/00001.m4s^headers^ create mode 100644 dom/media/mediasource/test/flac/00002.m4s create mode 100644 dom/media/mediasource/test/flac/00002.m4s^headers^ create mode 100644 dom/media/mediasource/test/flac/00003.m4s create mode 100644 dom/media/mediasource/test/flac/00003.m4s^headers^ create mode 100644 dom/media/mediasource/test/flac/IS.mp4 create mode 100644 dom/media/mediasource/test/flac/IS.mp4^headers^ create mode 100644 dom/media/mediasource/test/init-trackid2.mp4 create mode 100644 dom/media/mediasource/test/init-trackid2.mp4^headers^ create mode 100644 dom/media/mediasource/test/init-trackid3.mp4 create mode 100644 dom/media/mediasource/test/init-trackid3.mp4^headers^ create mode 100644 dom/media/mediasource/test/mediasource.js create mode 100644 dom/media/mediasource/test/mochitest.toml create mode 100644 dom/media/mediasource/test/mochitest_compat.toml create mode 100644 dom/media/mediasource/test/seek.webm create mode 100644 dom/media/mediasource/test/seek.webm^headers^ create mode 100644 dom/media/mediasource/test/seek_lowres.webm create mode 100644 dom/media/mediasource/test/seek_lowres.webm^headers^ create mode 100644 dom/media/mediasource/test/segment-2.0001.m4s create mode 100644 dom/media/mediasource/test/segment-2.0001.m4s^headers^ create mode 100644 dom/media/mediasource/test/segment-2.0002.m4s create mode 100644 dom/media/mediasource/test/segment-2.0002.m4s^headers^ create mode 100644 dom/media/mediasource/test/segment-3.0001.m4s create mode 100644 dom/media/mediasource/test/segment-3.0001.m4s^headers^ create mode 100644 dom/media/mediasource/test/segment-3.0002.m4s create mode 100644 dom/media/mediasource/test/segment-3.0002.m4s^headers^ create mode 100644 dom/media/mediasource/test/tags_before_cluster.webm create mode 100644 dom/media/mediasource/test/tags_before_cluster.webm^header^ create mode 100644 dom/media/mediasource/test/test_AVC3_mp4.html create mode 100644 dom/media/mediasource/test/test_AbortAfterPartialMediaSegment.html create mode 100644 dom/media/mediasource/test/test_AppendPartialInitSegment.html create mode 100644 dom/media/mediasource/test/test_AudioChange_mp4.html create mode 100644 dom/media/mediasource/test/test_AudioChange_mp4_WebAudio.html create mode 100644 dom/media/mediasource/test/test_AutoRevocation.html create mode 100644 dom/media/mediasource/test/test_BufferedSeek.html create mode 100644 dom/media/mediasource/test/test_BufferedSeek_mp4.html create mode 100644 dom/media/mediasource/test/test_BufferingWait.html create mode 100644 dom/media/mediasource/test/test_BufferingWait_mp4.html create mode 100644 dom/media/mediasource/test/test_ChangeType.html create mode 100644 dom/media/mediasource/test/test_ChangeWhileWaitingOnMissingData_mp4.html create mode 100644 dom/media/mediasource/test/test_DifferentStreamStartTimes.html create mode 100644 dom/media/mediasource/test/test_DrainOnMissingData_mp4.html create mode 100644 dom/media/mediasource/test/test_DurationChange.html create mode 100644 dom/media/mediasource/test/test_DurationUpdated.html create mode 100644 dom/media/mediasource/test/test_DurationUpdated_mp4.html create mode 100644 dom/media/mediasource/test/test_EndOfStream.html create mode 100644 dom/media/mediasource/test/test_EndOfStream_mp4.html create mode 100644 dom/media/mediasource/test/test_EndedEvent.html create mode 100644 dom/media/mediasource/test/test_Eviction_mp4.html create mode 100644 dom/media/mediasource/test/test_ExperimentalAsync.html create mode 100644 dom/media/mediasource/test/test_FrameSelection.html create mode 100644 dom/media/mediasource/test/test_FrameSelection_mp4.html create mode 100644 dom/media/mediasource/test/test_HEAAC_extradata.html create mode 100644 dom/media/mediasource/test/test_HaveMetadataUnbufferedSeek.html create mode 100644 dom/media/mediasource/test/test_HaveMetadataUnbufferedSeek_mp4.html create mode 100644 dom/media/mediasource/test/test_InputBufferIsCleared.html create mode 100644 dom/media/mediasource/test/test_LiveSeekable.html create mode 100644 dom/media/mediasource/test/test_LoadedDataFired_mp4.html create mode 100644 dom/media/mediasource/test/test_LoadedMetadataFired.html create mode 100644 dom/media/mediasource/test/test_LoadedMetadataFired_mp4.html create mode 100644 dom/media/mediasource/test/test_MediaSource.html create mode 100644 dom/media/mediasource/test/test_MediaSource_capture_gc.html create mode 100644 dom/media/mediasource/test/test_MediaSource_disabled.html create mode 100644 dom/media/mediasource/test/test_MediaSource_flac_mp4.html create mode 100644 dom/media/mediasource/test/test_MediaSource_hevc_mp4.html create mode 100644 dom/media/mediasource/test/test_MediaSource_memory_reporting.html create mode 100644 dom/media/mediasource/test/test_MediaSource_mp4.html create mode 100644 dom/media/mediasource/test/test_MultipleInitSegments.html create mode 100644 dom/media/mediasource/test/test_MultipleInitSegments_mp4.html create mode 100644 dom/media/mediasource/test/test_NoAudioLoopBackData.html create mode 100644 dom/media/mediasource/test/test_NoAudioLoopBackData_Muted.html create mode 100644 dom/media/mediasource/test/test_NoVideoLoopBackData.html create mode 100644 dom/media/mediasource/test/test_OnEvents.html create mode 100644 dom/media/mediasource/test/test_PlayEvents.html create mode 100644 dom/media/mediasource/test/test_PlayEventsAutoPlaying.html create mode 100644 dom/media/mediasource/test/test_PlayEventsAutoPlaying2.html create mode 100644 dom/media/mediasource/test/test_RemoveSourceBuffer.html create mode 100644 dom/media/mediasource/test/test_Resolution_change_should_not_cause_video_freeze.html create mode 100644 dom/media/mediasource/test/test_ResumeAfterClearing_mp4.html create mode 100644 dom/media/mediasource/test/test_Seamless_looping_shorter_audio_than_video_MSE.html create mode 100644 dom/media/mediasource/test/test_SeekNoData_mp4.html create mode 100644 dom/media/mediasource/test/test_SeekToEnd_mp4.html create mode 100644 dom/media/mediasource/test/test_SeekToLastFrame_mp4.html create mode 100644 dom/media/mediasource/test/test_SeekTwice_mp4.html create mode 100644 dom/media/mediasource/test/test_SeekableBeforeAndAfterEndOfStream.html create mode 100644 dom/media/mediasource/test/test_SeekableBeforeAndAfterEndOfStreamSplit.html create mode 100644 dom/media/mediasource/test/test_SeekableBeforeAndAfterEndOfStreamSplit_mp4.html create mode 100644 dom/media/mediasource/test/test_SeekableBeforeAndAfterEndOfStream_mp4.html create mode 100644 dom/media/mediasource/test/test_SeekedEvent_mp4.html create mode 100644 dom/media/mediasource/test/test_Sequence_mp4.html create mode 100644 dom/media/mediasource/test/test_SetModeThrows.html create mode 100644 dom/media/mediasource/test/test_SplitAppend.html create mode 100644 dom/media/mediasource/test/test_SplitAppendDelay.html create mode 100644 dom/media/mediasource/test/test_SplitAppendDelay_mp4.html create mode 100644 dom/media/mediasource/test/test_SplitAppend_mp4.html create mode 100644 dom/media/mediasource/test/test_Threshold_mp4.html create mode 100644 dom/media/mediasource/test/test_TimestampOffset_mp4.html create mode 100644 dom/media/mediasource/test/test_TruncatedDuration.html create mode 100644 dom/media/mediasource/test/test_TruncatedDuration_mp4.html create mode 100644 dom/media/mediasource/test/test_WMFUnmatchedAudioDataTime.html create mode 100644 dom/media/mediasource/test/test_WaitingOnMissingData.html create mode 100644 dom/media/mediasource/test/test_WaitingOnMissingDataEnded_mp4.html create mode 100644 dom/media/mediasource/test/test_WaitingOnMissingData_mp4.html create mode 100644 dom/media/mediasource/test/test_WaitingToEndedTransition_mp4.html create mode 100644 dom/media/mediasource/test/test_WebMTagsBeforeCluster.html create mode 100644 dom/media/mediasource/test/test_trackidchange_mp4.html create mode 100644 dom/media/mediasource/test/whitenoise-he-aac-5s.mp4 create mode 100644 dom/media/mediasource/test/wmf_mismatchedaudiotime.mp4 create mode 100644 dom/media/metrics.yaml create mode 100644 dom/media/moz.build create mode 100644 dom/media/mp3/MP3Decoder.cpp create mode 100644 dom/media/mp3/MP3Decoder.h create mode 100644 dom/media/mp3/MP3Demuxer.cpp create mode 100644 dom/media/mp3/MP3Demuxer.h create mode 100644 dom/media/mp3/MP3FrameParser.cpp create mode 100644 dom/media/mp3/MP3FrameParser.h create mode 100644 dom/media/mp3/moz.build create mode 100644 dom/media/mp4/Atom.h create mode 100644 dom/media/mp4/AtomType.h create mode 100644 dom/media/mp4/Box.cpp create mode 100644 dom/media/mp4/Box.h create mode 100644 dom/media/mp4/BufferStream.cpp create mode 100644 dom/media/mp4/BufferStream.h create mode 100644 dom/media/mp4/ByteStream.h create mode 100644 dom/media/mp4/DecoderData.cpp create mode 100644 dom/media/mp4/DecoderData.h create mode 100644 dom/media/mp4/MP4Decoder.cpp create mode 100644 dom/media/mp4/MP4Decoder.h create mode 100644 dom/media/mp4/MP4Demuxer.cpp create mode 100644 dom/media/mp4/MP4Demuxer.h create mode 100644 dom/media/mp4/MP4Interval.h create mode 100644 dom/media/mp4/MP4Metadata.cpp create mode 100644 dom/media/mp4/MP4Metadata.h create mode 100644 dom/media/mp4/MoofParser.cpp create mode 100644 dom/media/mp4/MoofParser.h create mode 100644 dom/media/mp4/ResourceStream.cpp create mode 100644 dom/media/mp4/ResourceStream.h create mode 100644 dom/media/mp4/SampleIterator.cpp create mode 100644 dom/media/mp4/SampleIterator.h create mode 100644 dom/media/mp4/SinfParser.cpp create mode 100644 dom/media/mp4/SinfParser.h create mode 100644 dom/media/mp4/moz.build create mode 100644 dom/media/nsIAudioDeviceInfo.idl create mode 100644 dom/media/nsIDocumentActivity.h create mode 100644 dom/media/nsIMediaDevice.idl create mode 100644 dom/media/nsIMediaManager.idl create mode 100644 dom/media/ogg/OggCodecState.cpp create mode 100644 dom/media/ogg/OggCodecState.h create mode 100644 dom/media/ogg/OggCodecStore.cpp create mode 100644 dom/media/ogg/OggCodecStore.h create mode 100644 dom/media/ogg/OggDecoder.cpp create mode 100644 dom/media/ogg/OggDecoder.h create mode 100644 dom/media/ogg/OggDemuxer.cpp create mode 100644 dom/media/ogg/OggDemuxer.h create mode 100644 dom/media/ogg/OggRLBox.h create mode 100644 dom/media/ogg/OggRLBoxTypes.h create mode 100644 dom/media/ogg/OggWriter.cpp create mode 100644 dom/media/ogg/OggWriter.h create mode 100644 dom/media/ogg/OpusParser.cpp create mode 100644 dom/media/ogg/OpusParser.h create mode 100644 dom/media/ogg/moz.build create mode 100644 dom/media/platforms/AllocationPolicy.cpp create mode 100644 dom/media/platforms/AllocationPolicy.h create mode 100644 dom/media/platforms/MediaCodecsSupport.cpp create mode 100644 dom/media/platforms/MediaCodecsSupport.h create mode 100644 dom/media/platforms/MediaTelemetryConstants.h create mode 100644 dom/media/platforms/PDMFactory.cpp create mode 100644 dom/media/platforms/PDMFactory.h create mode 100644 dom/media/platforms/PEMFactory.cpp create mode 100644 dom/media/platforms/PEMFactory.h create mode 100644 dom/media/platforms/PlatformDecoderModule.cpp create mode 100644 dom/media/platforms/PlatformDecoderModule.h create mode 100644 dom/media/platforms/PlatformEncoderModule.cpp create mode 100644 dom/media/platforms/PlatformEncoderModule.h create mode 100644 dom/media/platforms/ReorderQueue.h create mode 100644 dom/media/platforms/SimpleMap.h create mode 100644 dom/media/platforms/agnostic/AOMDecoder.cpp create mode 100644 dom/media/platforms/agnostic/AOMDecoder.h create mode 100644 dom/media/platforms/agnostic/AgnosticDecoderModule.cpp create mode 100644 dom/media/platforms/agnostic/AgnosticDecoderModule.h create mode 100644 dom/media/platforms/agnostic/BlankDecoderModule.cpp create mode 100644 dom/media/platforms/agnostic/BlankDecoderModule.h create mode 100644 dom/media/platforms/agnostic/DAV1DDecoder.cpp create mode 100644 dom/media/platforms/agnostic/DAV1DDecoder.h create mode 100644 dom/media/platforms/agnostic/DummyMediaDataDecoder.cpp create mode 100644 dom/media/platforms/agnostic/DummyMediaDataDecoder.h create mode 100644 dom/media/platforms/agnostic/NullDecoderModule.cpp create mode 100644 dom/media/platforms/agnostic/TheoraDecoder.cpp create mode 100644 dom/media/platforms/agnostic/TheoraDecoder.h create mode 100644 dom/media/platforms/agnostic/VPXDecoder.cpp create mode 100644 dom/media/platforms/agnostic/VPXDecoder.h create mode 100644 dom/media/platforms/agnostic/bytestreams/Adts.cpp create mode 100644 dom/media/platforms/agnostic/bytestreams/Adts.h create mode 100644 dom/media/platforms/agnostic/bytestreams/AnnexB.cpp create mode 100644 dom/media/platforms/agnostic/bytestreams/AnnexB.h create mode 100644 dom/media/platforms/agnostic/bytestreams/ByteStreamsUtils.h create mode 100644 dom/media/platforms/agnostic/bytestreams/H264.cpp create mode 100644 dom/media/platforms/agnostic/bytestreams/H264.h create mode 100644 dom/media/platforms/agnostic/bytestreams/H265.cpp create mode 100644 dom/media/platforms/agnostic/bytestreams/H265.h create mode 100644 dom/media/platforms/agnostic/bytestreams/gtest/TestByteStreams.cpp create mode 100644 dom/media/platforms/agnostic/bytestreams/gtest/moz.build create mode 100644 dom/media/platforms/agnostic/bytestreams/moz.build create mode 100644 dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.cpp create mode 100644 dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.h create mode 100644 dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h create mode 100644 dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp create mode 100644 dom/media/platforms/agnostic/eme/EMEDecoderModule.h create mode 100644 dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp create mode 100644 dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h create mode 100644 dom/media/platforms/agnostic/eme/moz.build create mode 100644 dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp create mode 100644 dom/media/platforms/agnostic/gmp/GMPDecoderModule.h create mode 100644 dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp create mode 100644 dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h create mode 100644 dom/media/platforms/agnostic/gmp/moz.build create mode 100644 dom/media/platforms/android/AndroidDataEncoder.cpp create mode 100644 dom/media/platforms/android/AndroidDataEncoder.h create mode 100644 dom/media/platforms/android/AndroidDecoderModule.cpp create mode 100644 dom/media/platforms/android/AndroidDecoderModule.h create mode 100644 dom/media/platforms/android/AndroidEncoderModule.cpp create mode 100644 dom/media/platforms/android/AndroidEncoderModule.h create mode 100644 dom/media/platforms/android/JavaCallbacksSupport.h create mode 100644 dom/media/platforms/android/RemoteDataDecoder.cpp create mode 100644 dom/media/platforms/android/RemoteDataDecoder.h create mode 100644 dom/media/platforms/apple/AppleATDecoder.cpp create mode 100644 dom/media/platforms/apple/AppleATDecoder.h create mode 100644 dom/media/platforms/apple/AppleDecoderModule.cpp create mode 100644 dom/media/platforms/apple/AppleDecoderModule.h create mode 100644 dom/media/platforms/apple/AppleEncoderModule.cpp create mode 100644 dom/media/platforms/apple/AppleEncoderModule.h create mode 100644 dom/media/platforms/apple/AppleUtils.h create mode 100644 dom/media/platforms/apple/AppleVTDecoder.cpp create mode 100644 dom/media/platforms/apple/AppleVTDecoder.h create mode 100644 dom/media/platforms/apple/AppleVTEncoder.cpp create mode 100644 dom/media/platforms/apple/AppleVTEncoder.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegDataDecoder.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegDecoderModule.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegEncoderModule.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegLibWrapper.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegLibs.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegLog.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h create mode 100644 dom/media/platforms/ffmpeg/FFmpegVideoFramePool.cpp create mode 100644 dom/media/platforms/ffmpeg/FFmpegVideoFramePool.h create mode 100644 dom/media/platforms/ffmpeg/README_mozilla create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vaapi.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/buffer.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/channel_layout.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/frame.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/macros.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg57/moz.build create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/bsf.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/codec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/codec_desc.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/codec_id.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/codec_par.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/packet.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/vaapi.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/buffer.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/channel_layout.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/frame.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/hwcontext.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/hwcontext_drm.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/hwcontext_vaapi.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/macros.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg58/moz.build create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/bsf.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/codec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/codec_desc.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/codec_id.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/codec_par.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/defs.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/packet.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/buffer.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/channel_layout.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/frame.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/hwcontext.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/hwcontext_drm.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/hwcontext_vaapi.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/macros.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg59/moz.build create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avdct.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/bsf.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_desc.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_id.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/codec_par.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/defs.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/packet.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavcodec/version_major.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/buffer.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/channel_layout.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/frame.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext_drm.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/hwcontext_vaapi.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/macros.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/ffmpeg60/moz.build create mode 100644 dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp create mode 100644 dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h create mode 100644 dom/media/platforms/ffmpeg/ffvpx/moz.build create mode 100644 dom/media/platforms/ffmpeg/libav53/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/dxva2.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/old_codec_ids.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/opt.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/vaapi.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/vda.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavcodec/xvmc.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/adler32.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/aes.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/audio_fifo.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/audioconvert.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/avassert.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/avstring.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/base64.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/blowfish.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/bprint.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/bswap.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/crc.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/eval.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/fifo.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/file.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/imgutils.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat_readwrite.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/intreadwrite.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/lfg.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/lzo.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/md5.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/old_pix_fmts.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/opt.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/parseutils.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/pixdesc.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/random_seed.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/sha.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/time.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/timecode.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/timestamp.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/libav53/include/libavutil/xtea.h create mode 100644 dom/media/platforms/ffmpeg/libav53/moz.build create mode 100644 dom/media/platforms/ffmpeg/libav54/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/dxva2.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/old_codec_ids.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/vaapi.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/vda.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavcodec/xvmc.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/adler32.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/aes.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/audio_fifo.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/audioconvert.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/avassert.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/avstring.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/base64.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/blowfish.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/bswap.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/channel_layout.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/crc.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/eval.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/fifo.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/file.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/imgutils.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat_readwrite.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/intreadwrite.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/lfg.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/lzo.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/md5.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/old_pix_fmts.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/opt.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/parseutils.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/pixdesc.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/random_seed.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/sha.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/time.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/libav54/include/libavutil/xtea.h create mode 100644 dom/media/platforms/ffmpeg/libav54/moz.build create mode 100644 dom/media/platforms/ffmpeg/libav55/include/COPYING.LGPLv2.1 create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/avcodec.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/avfft.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/dxva2.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/vaapi.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/vda.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/vdpau.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/version.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavcodec/xvmc.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/adler32.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/aes.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/attributes.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/audio_fifo.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/audioconvert.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/avassert.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/avconfig.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/avstring.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/avutil.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/base64.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/blowfish.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/bswap.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/buffer.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/channel_layout.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/common.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/cpu.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/crc.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/dict.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/downmix_info.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/error.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/eval.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/fifo.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/file.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/frame.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/hmac.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/imgutils.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/intfloat.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/intreadwrite.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/lfg.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/log.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/lzo.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/macros.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/mathematics.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/md5.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/mem.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/old_pix_fmts.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/opt.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/parseutils.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/pixdesc.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/pixfmt.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/random_seed.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/rational.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/samplefmt.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/sha.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/stereo3d.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/time.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/version.h create mode 100644 dom/media/platforms/ffmpeg/libav55/include/libavutil/xtea.h create mode 100644 dom/media/platforms/ffmpeg/libav55/moz.build create mode 100644 dom/media/platforms/ffmpeg/moz.build create mode 100644 dom/media/platforms/moz.build create mode 100644 dom/media/platforms/omx/OmxCoreLibLinker.cpp create mode 100644 dom/media/platforms/omx/OmxCoreLibLinker.h create mode 100644 dom/media/platforms/omx/OmxDataDecoder.cpp create mode 100644 dom/media/platforms/omx/OmxDataDecoder.h create mode 100644 dom/media/platforms/omx/OmxDecoderModule.cpp create mode 100644 dom/media/platforms/omx/OmxDecoderModule.h create mode 100644 dom/media/platforms/omx/OmxFunctionList.h create mode 100644 dom/media/platforms/omx/OmxPlatformLayer.cpp create mode 100644 dom/media/platforms/omx/OmxPlatformLayer.h create mode 100644 dom/media/platforms/omx/OmxPromiseLayer.cpp create mode 100644 dom/media/platforms/omx/OmxPromiseLayer.h create mode 100644 dom/media/platforms/omx/PureOmxPlatformLayer.cpp create mode 100644 dom/media/platforms/omx/PureOmxPlatformLayer.h create mode 100644 dom/media/platforms/omx/moz.build create mode 100644 dom/media/platforms/wmf/DXVA2Manager.cpp create mode 100644 dom/media/platforms/wmf/DXVA2Manager.h create mode 100644 dom/media/platforms/wmf/MFCDMExtra.h create mode 100644 dom/media/platforms/wmf/MFCDMProxy.cpp create mode 100644 dom/media/platforms/wmf/MFCDMProxy.h create mode 100644 dom/media/platforms/wmf/MFCDMSession.cpp create mode 100644 dom/media/platforms/wmf/MFCDMSession.h create mode 100644 dom/media/platforms/wmf/MFContentProtectionManager.cpp create mode 100644 dom/media/platforms/wmf/MFContentProtectionManager.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineAudioStream.cpp create mode 100644 dom/media/platforms/wmf/MFMediaEngineAudioStream.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineDecoderModule.cpp create mode 100644 dom/media/platforms/wmf/MFMediaEngineDecoderModule.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineExtension.cpp create mode 100644 dom/media/platforms/wmf/MFMediaEngineExtension.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineExtra.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineNotify.cpp create mode 100644 dom/media/platforms/wmf/MFMediaEngineNotify.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineStream.cpp create mode 100644 dom/media/platforms/wmf/MFMediaEngineStream.h create mode 100644 dom/media/platforms/wmf/MFMediaEngineVideoStream.cpp create mode 100644 dom/media/platforms/wmf/MFMediaEngineVideoStream.h create mode 100644 dom/media/platforms/wmf/MFMediaSource.cpp create mode 100644 dom/media/platforms/wmf/MFMediaSource.h create mode 100644 dom/media/platforms/wmf/MFPMPHostWrapper.cpp create mode 100644 dom/media/platforms/wmf/MFPMPHostWrapper.h create mode 100644 dom/media/platforms/wmf/MFTDecoder.cpp create mode 100644 dom/media/platforms/wmf/MFTDecoder.h create mode 100644 dom/media/platforms/wmf/MFTEncoder.cpp create mode 100644 dom/media/platforms/wmf/MFTEncoder.h create mode 100644 dom/media/platforms/wmf/WMF.h create mode 100644 dom/media/platforms/wmf/WMFAudioMFTManager.cpp create mode 100644 dom/media/platforms/wmf/WMFAudioMFTManager.h create mode 100644 dom/media/platforms/wmf/WMFDataEncoderUtils.h create mode 100644 dom/media/platforms/wmf/WMFDecoderModule.cpp create mode 100644 dom/media/platforms/wmf/WMFDecoderModule.h create mode 100644 dom/media/platforms/wmf/WMFEncoderModule.cpp create mode 100644 dom/media/platforms/wmf/WMFEncoderModule.h create mode 100644 dom/media/platforms/wmf/WMFMediaDataDecoder.cpp create mode 100644 dom/media/platforms/wmf/WMFMediaDataDecoder.h create mode 100644 dom/media/platforms/wmf/WMFMediaDataEncoder.h create mode 100644 dom/media/platforms/wmf/WMFUtils.cpp create mode 100644 dom/media/platforms/wmf/WMFUtils.h create mode 100644 dom/media/platforms/wmf/WMFVideoMFTManager.cpp create mode 100644 dom/media/platforms/wmf/WMFVideoMFTManager.h create mode 100644 dom/media/platforms/wmf/gtest/TestCanCreateMFTDecoder.cpp create mode 100644 dom/media/platforms/wmf/gtest/moz.build create mode 100644 dom/media/platforms/wmf/metrics.yaml create mode 100644 dom/media/platforms/wmf/moz.build create mode 100644 dom/media/platforms/wrappers/AudioTrimmer.cpp create mode 100644 dom/media/platforms/wrappers/AudioTrimmer.h create mode 100644 dom/media/platforms/wrappers/MediaChangeMonitor.cpp create mode 100644 dom/media/platforms/wrappers/MediaChangeMonitor.h create mode 100644 dom/media/platforms/wrappers/MediaDataDecoderProxy.cpp create mode 100644 dom/media/platforms/wrappers/MediaDataDecoderProxy.h create mode 100644 dom/media/systemservices/CamerasChild.cpp create mode 100644 dom/media/systemservices/CamerasChild.h create mode 100644 dom/media/systemservices/CamerasParent.cpp create mode 100644 dom/media/systemservices/CamerasParent.h create mode 100644 dom/media/systemservices/CamerasTypes.cpp create mode 100644 dom/media/systemservices/CamerasTypes.h create mode 100644 dom/media/systemservices/MediaChild.cpp create mode 100644 dom/media/systemservices/MediaChild.h create mode 100644 dom/media/systemservices/MediaParent.cpp create mode 100644 dom/media/systemservices/MediaParent.h create mode 100644 dom/media/systemservices/MediaSystemResourceClient.cpp create mode 100644 dom/media/systemservices/MediaSystemResourceClient.h create mode 100644 dom/media/systemservices/MediaSystemResourceManager.cpp create mode 100644 dom/media/systemservices/MediaSystemResourceManager.h create mode 100644 dom/media/systemservices/MediaSystemResourceManagerChild.cpp create mode 100644 dom/media/systemservices/MediaSystemResourceManagerChild.h create mode 100644 dom/media/systemservices/MediaSystemResourceManagerParent.cpp create mode 100644 dom/media/systemservices/MediaSystemResourceManagerParent.h create mode 100644 dom/media/systemservices/MediaSystemResourceMessageUtils.h create mode 100644 dom/media/systemservices/MediaSystemResourceService.cpp create mode 100644 dom/media/systemservices/MediaSystemResourceService.h create mode 100644 dom/media/systemservices/MediaSystemResourceTypes.h create mode 100644 dom/media/systemservices/MediaTaskUtils.h create mode 100644 dom/media/systemservices/MediaUtils.cpp create mode 100644 dom/media/systemservices/MediaUtils.h create mode 100644 dom/media/systemservices/OSXRunLoopSingleton.cpp create mode 100644 dom/media/systemservices/OSXRunLoopSingleton.h create mode 100644 dom/media/systemservices/PCameras.ipdl create mode 100644 dom/media/systemservices/PMedia.ipdl create mode 100644 dom/media/systemservices/PMediaSystemResourceManager.ipdl create mode 100644 dom/media/systemservices/ShmemPool.cpp create mode 100644 dom/media/systemservices/ShmemPool.h create mode 100644 dom/media/systemservices/VideoEngine.cpp create mode 100644 dom/media/systemservices/VideoEngine.h create mode 100644 dom/media/systemservices/VideoFrameUtils.cpp create mode 100644 dom/media/systemservices/VideoFrameUtils.h create mode 100644 dom/media/systemservices/android_video_capture/device_info_android.cc create mode 100644 dom/media/systemservices/android_video_capture/device_info_android.h create mode 100644 dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java create mode 100644 dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java create mode 100644 dom/media/systemservices/android_video_capture/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java create mode 100644 dom/media/systemservices/android_video_capture/video_capture_android.cc create mode 100644 dom/media/systemservices/android_video_capture/video_capture_android.h create mode 100644 dom/media/systemservices/moz.build create mode 100644 dom/media/systemservices/objc_video_capture/device_info.h create mode 100644 dom/media/systemservices/objc_video_capture/device_info.mm create mode 100644 dom/media/systemservices/objc_video_capture/device_info_avfoundation.h create mode 100644 dom/media/systemservices/objc_video_capture/device_info_avfoundation.mm create mode 100644 dom/media/systemservices/objc_video_capture/device_info_objc.h create mode 100644 dom/media/systemservices/objc_video_capture/device_info_objc.mm create mode 100644 dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.h create mode 100644 dom/media/systemservices/objc_video_capture/rtc_video_capture_objc.mm create mode 100644 dom/media/systemservices/objc_video_capture/video_capture.h create mode 100644 dom/media/systemservices/objc_video_capture/video_capture.mm create mode 100644 dom/media/systemservices/objc_video_capture/video_capture_avfoundation.h create mode 100644 dom/media/systemservices/objc_video_capture/video_capture_avfoundation.mm create mode 100644 dom/media/systemservices/video_engine/desktop_capture_impl.cc create mode 100644 dom/media/systemservices/video_engine/desktop_capture_impl.h create mode 100644 dom/media/systemservices/video_engine/desktop_device_info.cc create mode 100644 dom/media/systemservices/video_engine/desktop_device_info.h create mode 100644 dom/media/systemservices/video_engine/placeholder_device_info.cc create mode 100644 dom/media/systemservices/video_engine/placeholder_device_info.h create mode 100644 dom/media/systemservices/video_engine/platform_uithread.cc create mode 100644 dom/media/systemservices/video_engine/platform_uithread.h create mode 100644 dom/media/systemservices/video_engine/tab_capturer.cc create mode 100644 dom/media/systemservices/video_engine/tab_capturer.h create mode 100644 dom/media/systemservices/video_engine/video_capture_factory.cc create mode 100644 dom/media/systemservices/video_engine/video_capture_factory.h create mode 100644 dom/media/test/16bit_wave_extrametadata.wav create mode 100644 dom/media/test/16bit_wave_extrametadata.wav^headers^ create mode 100644 dom/media/test/320x240.ogv create mode 100644 dom/media/test/320x240.ogv^headers^ create mode 100644 dom/media/test/448636.ogv create mode 100644 dom/media/test/448636.ogv^headers^ create mode 100644 dom/media/test/A4.ogv create mode 100644 dom/media/test/A4.ogv^headers^ create mode 100644 dom/media/test/TestPatternHDR.mp4 create mode 100644 dom/media/test/VID_0001.ogg create mode 100644 dom/media/test/VID_0001.ogg^headers^ create mode 100644 dom/media/test/adts.aac create mode 100644 dom/media/test/adts.aac^headers^ create mode 100644 dom/media/test/allowed.sjs create mode 100644 dom/media/test/ambisonics.mp4 create mode 100644 dom/media/test/ambisonics.mp4^headers^ create mode 100644 dom/media/test/audio-gaps-short.ogg create mode 100644 dom/media/test/audio-gaps-short.ogg^headers^ create mode 100644 dom/media/test/audio-gaps.ogg create mode 100644 dom/media/test/audio-gaps.ogg^headers^ create mode 100644 dom/media/test/audio-overhang.ogg create mode 100644 dom/media/test/audio-overhang.ogg^headers^ create mode 100644 dom/media/test/audio.wav create mode 100644 dom/media/test/audio.wav^headers^ create mode 100644 dom/media/test/av1.mp4 create mode 100644 dom/media/test/av1.mp4^headers^ create mode 100644 dom/media/test/background_video.js create mode 100644 dom/media/test/badtags.ogg create mode 100644 dom/media/test/badtags.ogg^headers^ create mode 100644 dom/media/test/bear-640x360-a_frag-cenc-key_rotation.mp4 create mode 100644 dom/media/test/bear-640x360-v_frag-cenc-key_rotation.mp4 create mode 100644 dom/media/test/beta-phrasebook.ogg create mode 100644 dom/media/test/beta-phrasebook.ogg^headers^ create mode 100644 dom/media/test/big-buck-bunny-cenc-avc3-1.m4s create mode 100644 dom/media/test/big-buck-bunny-cenc-avc3-1.m4s^headers^ create mode 100644 dom/media/test/big-buck-bunny-cenc-avc3-init.mp4 create mode 100644 dom/media/test/big-buck-bunny-cenc-avc3-init.mp4^headers^ create mode 100644 dom/media/test/big-short.wav create mode 100644 dom/media/test/big-short.wav^headers^ create mode 100644 dom/media/test/big.wav create mode 100644 dom/media/test/big.wav^headers^ create mode 100644 dom/media/test/bipbop-cenc-audio-key1.xml create mode 100644 dom/media/test/bipbop-cenc-audio-key2.xml create mode 100644 dom/media/test/bipbop-cenc-audio1.m4s create mode 100644 dom/media/test/bipbop-cenc-audio1.m4s^headers^ create mode 100644 dom/media/test/bipbop-cenc-audio2.m4s create mode 100644 dom/media/test/bipbop-cenc-audio2.m4s^headers^ create mode 100644 dom/media/test/bipbop-cenc-audio3.m4s create mode 100644 dom/media/test/bipbop-cenc-audio3.m4s^headers^ create mode 100644 dom/media/test/bipbop-cenc-audioinit.mp4 create mode 100644 dom/media/test/bipbop-cenc-audioinit.mp4^headers^ create mode 100644 dom/media/test/bipbop-cenc-video-10s.mp4 create mode 100644 dom/media/test/bipbop-cenc-video-10s.mp4^headers^ create mode 100644 dom/media/test/bipbop-cenc-video-key1.xml create mode 100644 dom/media/test/bipbop-cenc-video-key2.xml create mode 100644 dom/media/test/bipbop-cenc-video1.m4s create mode 100644 dom/media/test/bipbop-cenc-video1.m4s^headers^ create mode 100644 dom/media/test/bipbop-cenc-video2.m4s create mode 100644 dom/media/test/bipbop-cenc-video2.m4s^headers^ create mode 100644 dom/media/test/bipbop-cenc-videoinit.mp4 create mode 100644 dom/media/test/bipbop-cenc-videoinit.mp4^headers^ create mode 100644 dom/media/test/bipbop-cenc.sh create mode 100644 dom/media/test/bipbop-clearkey-keyrotation-clear-lead-audio.mp4 create mode 100644 dom/media/test/bipbop-clearkey-keyrotation-clear-lead-audio.mp4^headers^ create mode 100644 dom/media/test/bipbop-clearkey-keyrotation-clear-lead-video.mp4 create mode 100644 dom/media/test/bipbop-clearkey-keyrotation-clear-lead-video.mp4^headers^ create mode 100644 dom/media/test/bipbop-frag-cenc.xml create mode 100644 dom/media/test/bipbop-lateaudio.mp4 create mode 100644 dom/media/test/bipbop-lateaudio.mp4^headers^ create mode 100644 dom/media/test/bipbop-no-edts.mp4 create mode 100644 dom/media/test/bipbop.mp4 create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_225w_175kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_225w_175kbps.mp4 create mode 100644 dom/media/test/bipbop_225w_175kbps.mp4^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key1-2.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key2-2.m4s create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_300_215kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300_215kbps.mp4 create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key1-2.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key2-2.m4s create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_300wp_227kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_300wp_227kbps.mp4 create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_360w_253kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-clearkey-audio.webm create mode 100644 dom/media/test/bipbop_360w_253kbps-clearkey-audio.webm^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-clearkey-video-vp8.webm create mode 100644 dom/media/test/bipbop_360w_253kbps-clearkey-video-vp8.webm^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps-clearkey-video-vp9.webm create mode 100644 dom/media/test/bipbop_360w_253kbps-clearkey-video-vp9.webm^headers^ create mode 100644 dom/media/test/bipbop_360w_253kbps.mp4 create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key1-2.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key2-2.m4s create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480_624kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_624kbps.mp4 create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key1-2.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key2-2.m4s create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480_959kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480_959kbps.mp4 create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key1-2.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key2-2.m4s create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_1001kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_1001kbps.mp4 create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-1.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-2.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-3.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-4.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-1.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-2.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-3.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-3.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-4.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-4.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-audio-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key1-1.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key1-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key1-2.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key1-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key1-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key1-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key2-1.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key2-1.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key2-2.m4s create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key2-2.m4s^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key2-init.mp4 create mode 100644 dom/media/test/bipbop_480wp_663kbps-cenc-video-key2-init.mp4^headers^ create mode 100644 dom/media/test/bipbop_480wp_663kbps.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_22.05k.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_22.05k.mp4^headers^ create mode 100644 dom/media/test/bipbop_audio_aac_44.1k.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_44.1k.mp4^headers^ create mode 100644 dom/media/test/bipbop_audio_aac_48k.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_48k.mp4^headers^ create mode 100644 dom/media/test/bipbop_audio_aac_88.2k.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_88.2k.mp4^headers^ create mode 100644 dom/media/test/bipbop_audio_aac_8k.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_8k.mp4^headers^ create mode 100644 dom/media/test/bipbop_audio_aac_96k.mp4 create mode 100644 dom/media/test/bipbop_audio_aac_96k.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_10_0_audio_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_10_0_audio_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_10_0_audio_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_10_0_audio_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_10_0_video_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_10_0_video_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_10_0_video_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_10_0_video_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_1_9_audio_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_1_9_audio_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_1_9_audio_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_1_9_audio_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_1_9_video_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_1_9_video_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_1_9_video_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_1_9_video_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_5_5_audio_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_5_5_audio_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_5_5_audio_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_5_5_audio_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_5_5_video_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_5_5_video_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_5_5_video_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_5_5_video_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_7_7_audio_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_7_7_audio_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_7_7_audio_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_7_7_audio_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_7_7_video_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_7_7_video_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_7_7_video_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_7_7_video_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_9_8_audio_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_9_8_audio_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_9_8_audio_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_9_8_audio_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_cbcs_9_8_video_1.m4s create mode 100644 dom/media/test/bipbop_cbcs_9_8_video_1.m4s^headers^ create mode 100644 dom/media/test/bipbop_cbcs_9_8_video_init.mp4 create mode 100644 dom/media/test/bipbop_cbcs_9_8_video_init.mp4^headers^ create mode 100644 dom/media/test/bipbop_short_pixel_metadata_bigger_than_in_stream_vp8.webm create mode 100644 dom/media/test/bipbop_short_pixel_metadata_bigger_than_in_stream_vp8.webm^headers^ create mode 100644 dom/media/test/bipbop_short_pixel_metadata_narrower_than_in_stream_vp8.webm create mode 100644 dom/media/test/bipbop_short_pixel_metadata_narrower_than_in_stream_vp8.webm^headers^ create mode 100644 dom/media/test/bipbop_short_pixel_metadata_smaller_than_in_stream_vp8.webm create mode 100644 dom/media/test/bipbop_short_pixel_metadata_smaller_than_in_stream_vp8.webm^headers^ create mode 100644 dom/media/test/bipbop_short_vp8.webm create mode 100644 dom/media/test/bipbop_short_vp8.webm^headers^ create mode 100644 dom/media/test/black100x100-aspect3to2.ogv create mode 100644 dom/media/test/black100x100-aspect3to2.ogv^headers^ create mode 100644 dom/media/test/bogus.duh create mode 100644 dom/media/test/bogus.ogv create mode 100644 dom/media/test/bogus.ogv^headers^ create mode 100644 dom/media/test/bogus.wav create mode 100644 dom/media/test/bogus.wav^headers^ create mode 100644 dom/media/test/browser/browser.toml create mode 100644 dom/media/test/browser/browser_encrypted_play_time_telemetry.js create mode 100644 dom/media/test/browser/browser_partial.js create mode 100644 dom/media/test/browser/browser_tab_visibility_and_play_time.js create mode 100644 dom/media/test/browser/browser_telemetry_video_hardware_decoding_support.js create mode 100644 dom/media/test/browser/file_empty_page.html create mode 100644 dom/media/test/browser/file_media.html create mode 100644 dom/media/test/browser/wmfme/browser.toml create mode 100644 dom/media/test/browser/wmfme/browser_wmfme_crash.js create mode 100644 dom/media/test/browser/wmfme/browser_wmfme_max_crashes.js create mode 100644 dom/media/test/browser/wmfme/file_video.html create mode 100644 dom/media/test/browser/wmfme/head.js create mode 100644 dom/media/test/bug1066943.webm create mode 100644 dom/media/test/bug1066943.webm^headers^ create mode 100644 dom/media/test/bug1301226-odd.wav create mode 100644 dom/media/test/bug1301226-odd.wav^headers^ create mode 100644 dom/media/test/bug1301226.wav create mode 100644 dom/media/test/bug1301226.wav^headers^ create mode 100644 dom/media/test/bug1377278.webm create mode 100644 dom/media/test/bug1377278.webm^headers^ create mode 100644 dom/media/test/bug1535980.webm create mode 100644 dom/media/test/bug1535980.webm^headers^ create mode 100644 dom/media/test/bug1799787.webm create mode 100644 dom/media/test/bug1799787.webm^headers^ create mode 100644 dom/media/test/bug461281.ogg create mode 100644 dom/media/test/bug461281.ogg^headers^ create mode 100644 dom/media/test/bug482461-theora.ogv create mode 100644 dom/media/test/bug482461-theora.ogv^headers^ create mode 100644 dom/media/test/bug482461.ogv create mode 100644 dom/media/test/bug482461.ogv^headers^ create mode 100644 dom/media/test/bug495129.ogv create mode 100644 dom/media/test/bug495129.ogv^headers^ create mode 100644 dom/media/test/bug495794.ogg create mode 100644 dom/media/test/bug495794.ogg^headers^ create mode 100644 dom/media/test/bug498380.ogv create mode 100644 dom/media/test/bug498380.ogv^headers^ create mode 100644 dom/media/test/bug498855-1.ogv create mode 100644 dom/media/test/bug498855-1.ogv^headers^ create mode 100644 dom/media/test/bug498855-2.ogv create mode 100644 dom/media/test/bug498855-2.ogv^headers^ create mode 100644 dom/media/test/bug498855-3.ogv create mode 100644 dom/media/test/bug498855-3.ogv^headers^ create mode 100644 dom/media/test/bug499519.ogv create mode 100644 dom/media/test/bug499519.ogv^headers^ create mode 100644 dom/media/test/bug500311.ogv create mode 100644 dom/media/test/bug500311.ogv^headers^ create mode 100644 dom/media/test/bug501279.ogg create mode 100644 dom/media/test/bug501279.ogg^headers^ create mode 100644 dom/media/test/bug504613.ogv create mode 100644 dom/media/test/bug504613.ogv^headers^ create mode 100644 dom/media/test/bug504644.ogv create mode 100644 dom/media/test/bug504644.ogv^headers^ create mode 100644 dom/media/test/bug504843.ogv create mode 100644 dom/media/test/bug504843.ogv^headers^ create mode 100644 dom/media/test/bug506094.ogv create mode 100644 dom/media/test/bug506094.ogv^headers^ create mode 100644 dom/media/test/bug516323.indexed.ogv create mode 100644 dom/media/test/bug516323.indexed.ogv^headers^ create mode 100644 dom/media/test/bug516323.ogv create mode 100644 dom/media/test/bug516323.ogv^headers^ create mode 100644 dom/media/test/bug520493.ogg create mode 100644 dom/media/test/bug520493.ogg^headers^ create mode 100644 dom/media/test/bug520500.ogg create mode 100644 dom/media/test/bug520500.ogg^headers^ create mode 100644 dom/media/test/bug520908.ogv create mode 100644 dom/media/test/bug520908.ogv^headers^ create mode 100644 dom/media/test/bug523816.ogv create mode 100644 dom/media/test/bug523816.ogv^headers^ create mode 100644 dom/media/test/bug533822.ogg create mode 100644 dom/media/test/bug533822.ogg^headers^ create mode 100644 dom/media/test/bug556821.ogv create mode 100644 dom/media/test/bug556821.ogv^headers^ create mode 100644 dom/media/test/bug557094.ogv create mode 100644 dom/media/test/bug557094.ogv^headers^ create mode 100644 dom/media/test/bug604067.webm create mode 100644 dom/media/test/bug604067.webm^headers^ create mode 100644 dom/media/test/bunny.webm create mode 100644 dom/media/test/bunny_hd_5s.mp4 create mode 100644 dom/media/test/can_play_type_dash.js create mode 100644 dom/media/test/can_play_type_ogg.js create mode 100644 dom/media/test/can_play_type_wave.js create mode 100644 dom/media/test/can_play_type_webm.js create mode 100644 dom/media/test/cancellable_request.sjs create mode 100644 dom/media/test/chain.ogg create mode 100644 dom/media/test/chain.ogg^headers^ create mode 100644 dom/media/test/chain.ogv create mode 100644 dom/media/test/chain.ogv^headers^ create mode 100644 dom/media/test/chain.opus create mode 100644 dom/media/test/chain.opus^headers^ create mode 100644 dom/media/test/chained-audio-video.ogg create mode 100644 dom/media/test/chained-audio-video.ogg^headers^ create mode 100644 dom/media/test/chained-video.ogv create mode 100644 dom/media/test/chained-video.ogv^headers^ create mode 100644 dom/media/test/chrome/chrome.toml create mode 100644 dom/media/test/chrome/test_accumulated_play_time.html create mode 100644 dom/media/test/chrome/test_telemetry_source_buffer_type.html create mode 100644 dom/media/test/chromeHelper.js create mode 100644 dom/media/test/cloneElementVisually_helpers.js create mode 100644 dom/media/test/contentType.sjs create mode 100644 dom/media/test/crashtests/0-timescale.html create mode 100644 dom/media/test/crashtests/0-timescale.mp4 create mode 100644 dom/media/test/crashtests/1012609.html create mode 100644 dom/media/test/crashtests/1015662.html create mode 100644 dom/media/test/crashtests/1028458.html create mode 100644 dom/media/test/crashtests/1041466.html create mode 100644 dom/media/test/crashtests/1045650.html create mode 100644 dom/media/test/crashtests/1080986.html create mode 100644 dom/media/test/crashtests/1080986.wav create mode 100644 dom/media/test/crashtests/1122218.html create mode 100644 dom/media/test/crashtests/1127188.html create mode 100644 dom/media/test/crashtests/1157994.html create mode 100644 dom/media/test/crashtests/1158427.html create mode 100644 dom/media/test/crashtests/1180881.html create mode 100644 dom/media/test/crashtests/1180881.webm create mode 100644 dom/media/test/crashtests/1185176.html create mode 100644 dom/media/test/crashtests/1185191.html create mode 100644 dom/media/test/crashtests/1185192.html create mode 100644 dom/media/test/crashtests/1197935.html create mode 100644 dom/media/test/crashtests/1197935.mp4 create mode 100644 dom/media/test/crashtests/1223670.html create mode 100644 dom/media/test/crashtests/1236639.html create mode 100644 dom/media/test/crashtests/1236639.mp3 create mode 100644 dom/media/test/crashtests/1257700.html create mode 100644 dom/media/test/crashtests/1257700.webm create mode 100644 dom/media/test/crashtests/1267263.html create mode 100644 dom/media/test/crashtests/1270303.html create mode 100644 dom/media/test/crashtests/1270303.webm create mode 100644 dom/media/test/crashtests/1291702.html create mode 100644 dom/media/test/crashtests/1368490.html create mode 100644 dom/media/test/crashtests/1378826.html create mode 100644 dom/media/test/crashtests/1384248.html create mode 100644 dom/media/test/crashtests/1388372.html create mode 100644 dom/media/test/crashtests/1389304.html create mode 100644 dom/media/test/crashtests/1389304.mp4 create mode 100644 dom/media/test/crashtests/1393272.webm create mode 100644 dom/media/test/crashtests/1411322.html create mode 100644 dom/media/test/crashtests/1414444.mp4 create mode 100644 dom/media/test/crashtests/1450845.html create mode 100644 dom/media/test/crashtests/1489160.html create mode 100644 dom/media/test/crashtests/1494073.html create mode 100644 dom/media/test/crashtests/1517199.html create mode 100644 dom/media/test/crashtests/1526044.html create mode 100644 dom/media/test/crashtests/1530897.webm create mode 100644 dom/media/test/crashtests/1538727.html create mode 100644 dom/media/test/crashtests/1545133.html create mode 100644 dom/media/test/crashtests/1547784.html create mode 100644 dom/media/test/crashtests/1547899.html create mode 100644 dom/media/test/crashtests/1560215.html create mode 100644 dom/media/test/crashtests/1569645.html create mode 100644 dom/media/test/crashtests/1575271.html create mode 100644 dom/media/test/crashtests/1577184.html create mode 100644 dom/media/test/crashtests/1587248.html create mode 100644 dom/media/test/crashtests/1594466.html create mode 100644 dom/media/test/crashtests/1601385.html create mode 100644 dom/media/test/crashtests/1601422.html create mode 100644 dom/media/test/crashtests/1604941.html create mode 100644 dom/media/test/crashtests/1608286.html create mode 100644 dom/media/test/crashtests/1673525.html create mode 100644 dom/media/test/crashtests/1673526-1.html create mode 100644 dom/media/test/crashtests/1673526-2.html create mode 100644 dom/media/test/crashtests/1693043.html create mode 100644 dom/media/test/crashtests/1696511.html create mode 100644 dom/media/test/crashtests/1697521.html create mode 100644 dom/media/test/crashtests/1708790.html create mode 100644 dom/media/test/crashtests/1709130.html create mode 100644 dom/media/test/crashtests/1734008.html create mode 100644 dom/media/test/crashtests/1734008.webm create mode 100644 dom/media/test/crashtests/1741677.html create mode 100644 dom/media/test/crashtests/1748272.html create mode 100644 dom/media/test/crashtests/1752917.html create mode 100644 dom/media/test/crashtests/1762620.html create mode 100644 dom/media/test/crashtests/1765842.html create mode 100644 dom/media/test/crashtests/1765842.webm create mode 100644 dom/media/test/crashtests/1787281.html create mode 100644 dom/media/test/crashtests/1787281.mp4 create mode 100644 dom/media/test/crashtests/1798778.html create mode 100644 dom/media/test/crashtests/1830206.html create mode 100644 dom/media/test/crashtests/1830206.mp4 create mode 100644 dom/media/test/crashtests/1833894.mp4 create mode 100644 dom/media/test/crashtests/1833896.mp4 create mode 100644 dom/media/test/crashtests/1835118.adts create mode 100644 dom/media/test/crashtests/1835164.html create mode 100644 dom/media/test/crashtests/1835164.opus create mode 100644 dom/media/test/crashtests/1839193.adts create mode 100644 dom/media/test/crashtests/1839193.html create mode 100644 dom/media/test/crashtests/1840002.webm create mode 100644 dom/media/test/crashtests/1845350.mp4 create mode 100644 dom/media/test/crashtests/1848660.html create mode 100644 dom/media/test/crashtests/1848660.wav create mode 100644 dom/media/test/crashtests/1850453.flac create mode 100644 dom/media/test/crashtests/1850453.html create mode 100644 dom/media/test/crashtests/1859384.mp4 create mode 100644 dom/media/test/crashtests/1859600.mp4 create mode 100644 dom/media/test/crashtests/1860840.mp4 create mode 100644 dom/media/test/crashtests/1864450.html create mode 100644 dom/media/test/crashtests/1872787.html create mode 100644 dom/media/test/crashtests/255ch.wav create mode 100644 dom/media/test/crashtests/459439-1.html create mode 100644 dom/media/test/crashtests/466607-1.html create mode 100644 dom/media/test/crashtests/466945-1.html create mode 100644 dom/media/test/crashtests/468763-1.html create mode 100644 dom/media/test/crashtests/474744-1.html create mode 100644 dom/media/test/crashtests/481136-1.html create mode 100644 dom/media/test/crashtests/492286-1.xhtml create mode 100644 dom/media/test/crashtests/493915-1.html create mode 100644 dom/media/test/crashtests/495794-1.html create mode 100644 dom/media/test/crashtests/495794-1.ogg create mode 100644 dom/media/test/crashtests/497734-1.xhtml create mode 100644 dom/media/test/crashtests/497734-2.html create mode 100644 dom/media/test/crashtests/576612-1.html create mode 100644 dom/media/test/crashtests/691096-1.html create mode 100644 dom/media/test/crashtests/752784-1.html create mode 100644 dom/media/test/crashtests/789075-1.html create mode 100644 dom/media/test/crashtests/789075.webm create mode 100644 dom/media/test/crashtests/795892-1.html create mode 100644 dom/media/test/crashtests/844563.html create mode 100644 dom/media/test/crashtests/846612.html create mode 100644 dom/media/test/crashtests/852838.html create mode 100644 dom/media/test/crashtests/865004.html create mode 100644 dom/media/test/crashtests/865537-1.html create mode 100644 dom/media/test/crashtests/865550.html create mode 100644 dom/media/test/crashtests/868504.html create mode 100644 dom/media/test/crashtests/874869.html create mode 100644 dom/media/test/crashtests/874915.html create mode 100644 dom/media/test/crashtests/874934.html create mode 100644 dom/media/test/crashtests/874952.html create mode 100644 dom/media/test/crashtests/875144.html create mode 100644 dom/media/test/crashtests/875596.html create mode 100644 dom/media/test/crashtests/875911.html create mode 100644 dom/media/test/crashtests/876024-1.html create mode 100644 dom/media/test/crashtests/876024-2.html create mode 100644 dom/media/test/crashtests/876118.html create mode 100644 dom/media/test/crashtests/876207.html create mode 100644 dom/media/test/crashtests/876215.html create mode 100644 dom/media/test/crashtests/876249.html create mode 100644 dom/media/test/crashtests/876252.html create mode 100644 dom/media/test/crashtests/876834.html create mode 100644 dom/media/test/crashtests/877527.html create mode 100644 dom/media/test/crashtests/877820.html create mode 100644 dom/media/test/crashtests/878014.html create mode 100644 dom/media/test/crashtests/878328.html create mode 100644 dom/media/test/crashtests/878407.html create mode 100644 dom/media/test/crashtests/878478.html create mode 100644 dom/media/test/crashtests/880129.html create mode 100644 dom/media/test/crashtests/880202.html create mode 100644 dom/media/test/crashtests/880342-1.html create mode 100644 dom/media/test/crashtests/880342-2.html create mode 100644 dom/media/test/crashtests/880384.html create mode 100644 dom/media/test/crashtests/880404.html create mode 100644 dom/media/test/crashtests/880724.html create mode 100644 dom/media/test/crashtests/881775.html create mode 100644 dom/media/test/crashtests/882956.html create mode 100644 dom/media/test/crashtests/884459.html create mode 100644 dom/media/test/crashtests/889042.html create mode 100644 dom/media/test/crashtests/907986-1.html create mode 100644 dom/media/test/crashtests/907986-2.html create mode 100644 dom/media/test/crashtests/907986-3.html create mode 100644 dom/media/test/crashtests/907986-4.html create mode 100644 dom/media/test/crashtests/910171-1.html create mode 100644 dom/media/test/crashtests/920987.html create mode 100644 dom/media/test/crashtests/925619-1.html create mode 100644 dom/media/test/crashtests/925619-2.html create mode 100644 dom/media/test/crashtests/926619.html create mode 100644 dom/media/test/crashtests/933151.html create mode 100644 dom/media/test/crashtests/933156.html create mode 100644 dom/media/test/crashtests/944851.html create mode 100644 dom/media/test/crashtests/952756.html create mode 100644 dom/media/test/crashtests/986901.html create mode 100644 dom/media/test/crashtests/990794.html create mode 100644 dom/media/test/crashtests/995289.html create mode 100644 dom/media/test/crashtests/adts-truncated.aac create mode 100644 dom/media/test/crashtests/adts.aac create mode 100644 dom/media/test/crashtests/analyser-channels-1.html create mode 100644 dom/media/test/crashtests/audiocontext-after-unload-1.html create mode 100644 dom/media/test/crashtests/audiocontext-after-xhr.html create mode 100644 dom/media/test/crashtests/audiocontext-double-suspend.html create mode 100644 dom/media/test/crashtests/audioworkletnode-after-unload-1.html create mode 100644 dom/media/test/crashtests/buffer-source-duration-1.html create mode 100644 dom/media/test/crashtests/buffer-source-ended-1.html create mode 100644 dom/media/test/crashtests/buffer-source-resampling-start-1.html create mode 100644 dom/media/test/crashtests/buffer-source-slow-resampling-1.html create mode 100644 dom/media/test/crashtests/channel-count-in-metadata-different-than-in-content.mp4 create mode 100644 dom/media/test/crashtests/convolver-memory-report-1.html create mode 100644 dom/media/test/crashtests/copyFromChannel-2.html create mode 100644 dom/media/test/crashtests/cors.webm create mode 100644 dom/media/test/crashtests/cors.webm^headers^ create mode 100644 dom/media/test/crashtests/crashtests.list create mode 100644 dom/media/test/crashtests/disconnect-wrong-destination.html create mode 100644 dom/media/test/crashtests/doppler-1.html create mode 100644 dom/media/test/crashtests/empty-buffer-source.html create mode 100644 dom/media/test/crashtests/empty-samples.webm create mode 100644 dom/media/test/crashtests/encrypted-track-with-bad-sample-description-index.mp4 create mode 100644 dom/media/test/crashtests/encrypted-track-with-sample-missing-cenc-aux.mp4 create mode 100644 dom/media/test/crashtests/encrypted-track-without-tenc.mp4 create mode 100644 dom/media/test/crashtests/invalidfmt.html create mode 100644 dom/media/test/crashtests/invalidfmt.wav create mode 100644 dom/media/test/crashtests/media-element-source-seek-1.html create mode 100644 dom/media/test/crashtests/mp4_box_emptyrange.mp4 create mode 100644 dom/media/test/crashtests/noextradata-8ch.wav create mode 100644 dom/media/test/crashtests/offline-buffer-source-ended-1.html create mode 100644 dom/media/test/crashtests/oscillator-ended-1.html create mode 100644 dom/media/test/crashtests/oscillator-ended-2.html create mode 100644 dom/media/test/crashtests/small-timebase.html create mode 100644 dom/media/test/crashtests/small-timebase.mp4 create mode 100644 dom/media/test/crashtests/sound.ogg create mode 100644 dom/media/test/crashtests/test.mp4 create mode 100644 dom/media/test/crashtests/track-with-zero-dimensions.mp4 create mode 100644 dom/media/test/crashtests/trimming_needed_and_last_sample_invalid_duration.ogg create mode 100644 dom/media/test/crashtests/video-crash.webm create mode 100644 dom/media/test/crashtests/video-replay-after-audio-end.html create mode 100644 dom/media/test/dash/dash-manifest-garbled-webm.mpd create mode 100644 dom/media/test/dash/dash-manifest-garbled.mpd create mode 100644 dom/media/test/dash/dash-manifest-sjs.mpd create mode 100644 dom/media/test/dash/dash-manifest.mpd create mode 100644 dom/media/test/dash/dash-webm-audio-128k.webm create mode 100644 dom/media/test/dash/dash-webm-video-320x180.webm create mode 100644 dom/media/test/dash/dash-webm-video-428x240.webm create mode 100644 dom/media/test/dash/garbled.webm create mode 100644 dom/media/test/dash_detect_stream_switch.sjs create mode 100644 dom/media/test/detodos-recorder-test.opus create mode 100644 dom/media/test/detodos-recorder-test.opus^headers^ create mode 100644 dom/media/test/detodos-short.opus create mode 100644 dom/media/test/detodos-short.opus^headers^ create mode 100644 dom/media/test/detodos-short.webm create mode 100644 dom/media/test/detodos-short.webm^headers^ create mode 100644 dom/media/test/detodos.opus create mode 100644 dom/media/test/detodos.opus^headers^ create mode 100644 dom/media/test/detodos.webm create mode 100644 dom/media/test/detodos.webm^headers^ create mode 100644 dom/media/test/dirac.ogg create mode 100644 dom/media/test/dirac.ogg^headers^ create mode 100644 dom/media/test/dynamic_resource.sjs create mode 100644 dom/media/test/eme.js create mode 100644 dom/media/test/eme_standalone.js create mode 100644 dom/media/test/empty_size.mp3 create mode 100644 dom/media/test/file_access_controls.html create mode 100644 dom/media/test/file_eme_createMediaKeys.html create mode 100644 dom/media/test/file_playback_and_bfcache.html create mode 100644 dom/media/test/flac-noheader-s16.flac create mode 100644 dom/media/test/flac-noheader-s16.flac^headers^ create mode 100644 dom/media/test/flac-s24.flac create mode 100644 dom/media/test/flac-s24.flac^headers^ create mode 100644 dom/media/test/flac-sample-cenc.mp4 create mode 100644 dom/media/test/flac-sample-cenc.mp4^headers^ create mode 100644 dom/media/test/flac-sample.mp4 create mode 100644 dom/media/test/flac-sample.mp4^headers^ create mode 100644 dom/media/test/force_octet_stream.mp4 create mode 100644 dom/media/test/force_octet_stream.mp4^headers^ create mode 100644 dom/media/test/fragment_noplay.js create mode 100644 dom/media/test/fragment_play.js create mode 100644 dom/media/test/gUM_support.js create mode 100644 dom/media/test/gizmo-frag.mp4 create mode 100644 dom/media/test/gizmo-noaudio.mp4 create mode 100644 dom/media/test/gizmo-noaudio.mp4^headers^ create mode 100644 dom/media/test/gizmo-noaudio.webm create mode 100644 dom/media/test/gizmo-noaudio.webm^headers^ create mode 100644 dom/media/test/gizmo-short.mp4 create mode 100644 dom/media/test/gizmo-short.mp4^headers^ create mode 100644 dom/media/test/gizmo.mp4 create mode 100644 dom/media/test/gizmo.mp4^headers^ create mode 100644 dom/media/test/gizmo.webm create mode 100644 dom/media/test/gizmo.webm^headers^ create mode 100644 dom/media/test/gzipped_mp4.sjs create mode 100644 dom/media/test/hevc_white_frame.mp4 create mode 100644 dom/media/test/hevc_white_frame.mp4^headers^ create mode 100644 dom/media/test/hevc_white_red_frames.mp4 create mode 100644 dom/media/test/hevc_white_red_frames.mp4^headers^ create mode 100644 dom/media/test/hls/400x300_prog_index.m3u8 create mode 100644 dom/media/test/hls/400x300_prog_index_5s.m3u8 create mode 100644 dom/media/test/hls/400x300_seg0.ts create mode 100644 dom/media/test/hls/400x300_seg0_5s.ts create mode 100644 dom/media/test/hls/400x300_seg1.ts create mode 100644 dom/media/test/hls/416x243_prog_index_5s.m3u8 create mode 100644 dom/media/test/hls/416x243_seg0_5s.ts create mode 100644 dom/media/test/hls/640x480_prog_index.m3u8 create mode 100644 dom/media/test/hls/640x480_seg0.ts create mode 100644 dom/media/test/hls/640x480_seg1.ts create mode 100644 dom/media/test/hls/960x720_prog_index.m3u8 create mode 100644 dom/media/test/hls/960x720_seg0.ts create mode 100644 dom/media/test/hls/960x720_seg1.ts create mode 100644 dom/media/test/hls/bipbop_16x9_single.m3u8 create mode 100644 dom/media/test/hls/bipbop_4x3_single.m3u8 create mode 100644 dom/media/test/hls/bipbop_4x3_variant.m3u8 create mode 100644 dom/media/test/huge-id3.mp3 create mode 100644 dom/media/test/huge-id3.mp3^headers^ create mode 100644 dom/media/test/id3tags.mp3 create mode 100644 dom/media/test/id3tags.mp3^headers^ create mode 100644 dom/media/test/id3v1afterlongid3v2.mp3 create mode 100644 dom/media/test/invalid-cmap-s0c0.opus create mode 100644 dom/media/test/invalid-cmap-s0c0.opus^headers^ create mode 100644 dom/media/test/invalid-cmap-s0c2.opus create mode 100644 dom/media/test/invalid-cmap-s0c2.opus^headers^ create mode 100644 dom/media/test/invalid-cmap-s1c2.opus create mode 100644 dom/media/test/invalid-cmap-s1c2.opus^headers^ create mode 100644 dom/media/test/invalid-cmap-short.opus create mode 100644 dom/media/test/invalid-cmap-short.opus^headers^ create mode 100644 dom/media/test/invalid-discard_on_multi_blocks.webm create mode 100644 dom/media/test/invalid-discard_on_multi_blocks.webm^headers^ create mode 100644 dom/media/test/invalid-excess_discard.webm create mode 100644 dom/media/test/invalid-excess_discard.webm^headers^ create mode 100644 dom/media/test/invalid-excess_neg_discard.webm create mode 100644 dom/media/test/invalid-excess_neg_discard.webm^headers^ create mode 100644 dom/media/test/invalid-m0c0.opus create mode 100644 dom/media/test/invalid-m0c0.opus^headers^ create mode 100644 dom/media/test/invalid-m0c3.opus create mode 100644 dom/media/test/invalid-m0c3.opus^headers^ create mode 100644 dom/media/test/invalid-m1c0.opus create mode 100644 dom/media/test/invalid-m1c0.opus^headers^ create mode 100644 dom/media/test/invalid-m1c9.opus create mode 100644 dom/media/test/invalid-m1c9.opus^headers^ create mode 100644 dom/media/test/invalid-m2c0.opus create mode 100644 dom/media/test/invalid-m2c0.opus^headers^ create mode 100644 dom/media/test/invalid-m2c1.opus create mode 100644 dom/media/test/invalid-m2c1.opus^headers^ create mode 100644 dom/media/test/invalid-neg_discard.webm create mode 100644 dom/media/test/invalid-neg_discard.webm^headers^ create mode 100644 dom/media/test/invalid-preskip.webm create mode 100644 dom/media/test/invalid-preskip.webm^headers^ create mode 100644 dom/media/test/make-headers.sh create mode 100644 dom/media/test/manifest.js create mode 100644 dom/media/test/midflight-redirect.sjs create mode 100644 dom/media/test/mochitest.toml create mode 100644 dom/media/test/mochitest_background_video.toml create mode 100644 dom/media/test/mochitest_bugs.toml create mode 100644 dom/media/test/mochitest_compat.toml create mode 100644 dom/media/test/mochitest_eme.toml create mode 100644 dom/media/test/mochitest_eme_compat.toml create mode 100644 dom/media/test/mochitest_media_engine.toml create mode 100644 dom/media/test/mochitest_media_recorder.toml create mode 100644 dom/media/test/mochitest_seek.toml create mode 100644 dom/media/test/mochitest_stream.toml create mode 100644 dom/media/test/multi_id3v2.mp3 create mode 100644 dom/media/test/multiple-bos-more-header-fileds.ogg create mode 100644 dom/media/test/multiple-bos-more-header-fileds.ogg^headers^ create mode 100644 dom/media/test/multiple-bos.ogg create mode 100644 dom/media/test/multiple-bos.ogg^headers^ create mode 100644 dom/media/test/no-container-codec-delay.webm create mode 100644 dom/media/test/no-cues.webm create mode 100644 dom/media/test/no-cues.webm^headers^ create mode 100644 dom/media/test/notags.mp3 create mode 100644 dom/media/test/notags.mp3^headers^ create mode 100644 dom/media/test/opus-mapping2.mp4 create mode 100644 dom/media/test/opus-mapping2.mp4^headers^ create mode 100644 dom/media/test/opus-mapping2.webm create mode 100644 dom/media/test/opus-mapping2.webm^headers^ create mode 100644 dom/media/test/opus-sample-cenc.mp4 create mode 100644 dom/media/test/opus-sample-cenc.mp4^headers^ create mode 100644 dom/media/test/opus-sample.mp4 create mode 100644 dom/media/test/opus-sample.mp4^headers^ create mode 100644 dom/media/test/owl-funnier-id3.mp3 create mode 100644 dom/media/test/owl-funnier-id3.mp3^headers^ create mode 100644 dom/media/test/owl-funny-id3.mp3 create mode 100644 dom/media/test/owl-funny-id3.mp3^headers^ create mode 100644 dom/media/test/owl-short.mp3 create mode 100644 dom/media/test/owl-short.mp3^headers^ create mode 100644 dom/media/test/owl.mp3 create mode 100644 dom/media/test/owl.mp3^headers^ create mode 100644 dom/media/test/padding-spanning-multiple-packets.mp3 create mode 100644 dom/media/test/pixel_aspect_ratio.mp4 create mode 100644 dom/media/test/play_promise.js create mode 100644 dom/media/test/poster-test.jpg create mode 100644 dom/media/test/r11025_msadpcm_c1.wav create mode 100644 dom/media/test/r11025_msadpcm_c1.wav^headers^ create mode 100644 dom/media/test/r11025_s16_c1-short.wav create mode 100644 dom/media/test/r11025_s16_c1-short.wav^headers^ create mode 100644 dom/media/test/r11025_s16_c1.wav create mode 100644 dom/media/test/r11025_s16_c1.wav^headers^ create mode 100644 dom/media/test/r11025_s16_c1_trailing.wav create mode 100644 dom/media/test/r11025_s16_c1_trailing.wav^headers^ create mode 100644 dom/media/test/r11025_u8_c1.wav create mode 100644 dom/media/test/r11025_u8_c1.wav^headers^ create mode 100644 dom/media/test/r11025_u8_c1_trunc.wav create mode 100644 dom/media/test/r11025_u8_c1_trunc.wav^headers^ create mode 100644 dom/media/test/r16000_u8_c1_list.wav create mode 100644 dom/media/test/r16000_u8_c1_list.wav^headers^ create mode 100644 dom/media/test/rdd_process_xpcom/RddProcessTest.cpp create mode 100644 dom/media/test/rdd_process_xpcom/RddProcessTest.h create mode 100644 dom/media/test/rdd_process_xpcom/components.conf create mode 100644 dom/media/test/rdd_process_xpcom/moz.build create mode 100644 dom/media/test/rdd_process_xpcom/nsIRddProcessTest.idl create mode 100644 dom/media/test/reactivate_helper.html create mode 100644 dom/media/test/red-46x48.mp4 create mode 100644 dom/media/test/red-46x48.mp4^headers^ create mode 100644 dom/media/test/red-48x46.mp4 create mode 100644 dom/media/test/red-48x46.mp4^headers^ create mode 100644 dom/media/test/redirect.sjs create mode 100644 dom/media/test/referer.sjs create mode 100644 dom/media/test/reftest/av1hdr2020.mp4 create mode 100644 dom/media/test/reftest/av1hdr2020.png create mode 100644 dom/media/test/reftest/bipbop_300_215kbps.mp4.lastframe-ref.html create mode 100644 dom/media/test/reftest/bipbop_300_215kbps.mp4.lastframe.html create mode 100644 dom/media/test/reftest/color_quads/720p.png create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.gbrp.av1.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.gbrp.av1.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.gbrp.h264.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.gbrp.vp9.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.gbrp.vp9.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p.h264.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p10.h264.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.gbrp.av1.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.gbrp.av1.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.gbrp.h264.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p.h264.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p10.h264.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.mp4 create mode 100644 dom/media/test/reftest/color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.webm create mode 100644 dom/media/test/reftest/color_quads/reftest.list create mode 100644 dom/media/test/reftest/frame_order.mp4 create mode 100644 dom/media/test/reftest/frame_order_mp4-ref.html create mode 100644 dom/media/test/reftest/frame_order_mp4.html create mode 100644 dom/media/test/reftest/gen_combos.py create mode 100644 dom/media/test/reftest/generateREF.html create mode 100644 dom/media/test/reftest/gizmo.mp4.55thframe-ref.html create mode 100644 dom/media/test/reftest/gizmo.mp4.seek.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-720-90-ref.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-720-90-video.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-720-ref.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-720-video.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-720.video.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-90-ref.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-90-video.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-ref.html create mode 100644 dom/media/test/reftest/image-10bits-rendering-video.html create mode 100644 dom/media/test/reftest/incorrect_display_in_bytestream_vp8-ref.html create mode 100644 dom/media/test/reftest/incorrect_display_in_bytestream_vp8.html create mode 100644 dom/media/test/reftest/incorrect_display_in_bytestream_vp8.webm create mode 100644 dom/media/test/reftest/incorrect_display_in_bytestream_vp9-ref.html create mode 100644 dom/media/test/reftest/incorrect_display_in_bytestream_vp9.html create mode 100644 dom/media/test/reftest/incorrect_display_in_bytestream_vp9.webm create mode 100644 dom/media/test/reftest/reftest.list create mode 100644 dom/media/test/reftest/reftest_img.html create mode 100644 dom/media/test/reftest/reftest_video.html create mode 100644 dom/media/test/reftest/short.mp4.firstframe-ref.html create mode 100644 dom/media/test/reftest/short.mp4.firstframe.html create mode 100644 dom/media/test/reftest/short.mp4.lastframe-ref.html create mode 100644 dom/media/test/reftest/short.mp4.lastframe.html create mode 100644 dom/media/test/reftest/uneven_frame_duration_video-ref.html create mode 100644 dom/media/test/reftest/uneven_frame_duration_video.html create mode 100644 dom/media/test/reftest/uneven_frame_durations.mp4 create mode 100644 dom/media/test/reftest/uneven_frame_durations_3.8s_frame.png create mode 100644 dom/media/test/reftest/vp9hdr2020.png create mode 100644 dom/media/test/reftest/vp9hdr2020.webm create mode 100644 dom/media/test/resolution-change.webm create mode 100644 dom/media/test/resolution-change.webm^headers^ create mode 100644 dom/media/test/sample-encrypted-sgpdstbl-sbgptraf.mp4 create mode 100644 dom/media/test/sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^ create mode 100644 dom/media/test/sample-fisbone-skeleton4.ogv create mode 100644 dom/media/test/sample-fisbone-skeleton4.ogv^headers^ create mode 100644 dom/media/test/sample-fisbone-wrong-header.ogv create mode 100644 dom/media/test/sample-fisbone-wrong-header.ogv^headers^ create mode 100644 dom/media/test/sample.3g2 create mode 100644 dom/media/test/sample.3gp create mode 100644 dom/media/test/seek-short.ogv create mode 100644 dom/media/test/seek-short.ogv^headers^ create mode 100644 dom/media/test/seek-short.webm create mode 100644 dom/media/test/seek-short.webm^headers^ create mode 100644 dom/media/test/seek.ogv create mode 100644 dom/media/test/seek.ogv^headers^ create mode 100644 dom/media/test/seek.webm create mode 100644 dom/media/test/seek.webm^headers^ create mode 100644 dom/media/test/seekLies.sjs create mode 100644 dom/media/test/seek_support.js create mode 100644 dom/media/test/seek_with_sound.ogg create mode 100644 dom/media/test/seek_with_sound.ogg^headers^ create mode 100644 dom/media/test/short-aac-encrypted-audio.mp4 create mode 100644 dom/media/test/short-aac-encrypted-audio.mp4^headers^ create mode 100644 dom/media/test/short-audio-fragmented-cenc-without-pssh.mp4 create mode 100644 dom/media/test/short-audio-fragmented-cenc-without-pssh.mp4^headers^ create mode 100644 dom/media/test/short-cenc-pssh-in-moof.mp4 create mode 100644 dom/media/test/short-cenc.mp4 create mode 100644 dom/media/test/short-cenc.xml create mode 100644 dom/media/test/short-video.ogv create mode 100644 dom/media/test/short-video.ogv^headers^ create mode 100644 dom/media/test/short-vp9-encrypted-video.mp4 create mode 100644 dom/media/test/short-vp9-encrypted-video.mp4^headers^ create mode 100644 dom/media/test/short.mp4 create mode 100644 dom/media/test/short.mp4.gz create mode 100644 dom/media/test/short.mp4^headers^ create mode 100644 dom/media/test/shorter_audio_than_video_3s.webm create mode 100644 dom/media/test/shorter_audio_than_video_3s.webm^headers^ create mode 100644 dom/media/test/sin-441-1s-44100-afconvert.mp4 create mode 100644 dom/media/test/sin-441-1s-44100-fdk_aac.mp4 create mode 100644 dom/media/test/sin-441-1s-44100-lame.mp3 create mode 100644 dom/media/test/sin-441-1s-44100.flac create mode 100644 dom/media/test/sin-441-1s-44100.ogg create mode 100644 dom/media/test/sin-441-1s-44100.opus create mode 100644 dom/media/test/sine.webm create mode 100644 dom/media/test/sine.webm^headers^ create mode 100644 dom/media/test/single-xing-header-no-content-length.mp3 create mode 100644 dom/media/test/single-xing-header-no-content-length.mp3^headers^ create mode 100644 dom/media/test/sintel-short-clearkey-subsample-encrypted-audio.webm create mode 100644 dom/media/test/sintel-short-clearkey-subsample-encrypted-audio.webm^headers^ create mode 100644 dom/media/test/sintel-short-clearkey-subsample-encrypted-video.webm create mode 100644 dom/media/test/sintel-short-clearkey-subsample-encrypted-video.webm^headers^ create mode 100644 dom/media/test/small-shot-mp3.mp4 create mode 100644 dom/media/test/small-shot-mp3.mp4^headers^ create mode 100644 dom/media/test/small-shot.flac create mode 100644 dom/media/test/small-shot.m4a create mode 100644 dom/media/test/small-shot.mp3 create mode 100644 dom/media/test/small-shot.mp3^headers^ create mode 100644 dom/media/test/small-shot.ogg create mode 100644 dom/media/test/small-shot.ogg^headers^ create mode 100644 dom/media/test/sound.ogg create mode 100644 dom/media/test/sound.ogg^headers^ create mode 100644 dom/media/test/spacestorm-1000Hz-100ms.ogg create mode 100644 dom/media/test/spacestorm-1000Hz-100ms.ogg^headers^ create mode 100644 dom/media/test/split.webm create mode 100644 dom/media/test/split.webm^headers^ create mode 100644 dom/media/test/street.mp4 create mode 100644 dom/media/test/street.mp4^headers^ create mode 100644 dom/media/test/sync.webm create mode 100644 dom/media/test/test-1-mono.opus create mode 100644 dom/media/test/test-1-mono.opus^headers^ create mode 100644 dom/media/test/test-2-stereo.opus create mode 100644 dom/media/test/test-2-stereo.opus^headers^ create mode 100644 dom/media/test/test-3-LCR.opus create mode 100644 dom/media/test/test-3-LCR.opus^headers^ create mode 100644 dom/media/test/test-4-quad.opus create mode 100644 dom/media/test/test-4-quad.opus^headers^ create mode 100644 dom/media/test/test-5-5.0.opus create mode 100644 dom/media/test/test-5-5.0.opus^headers^ create mode 100644 dom/media/test/test-6-5.1.opus create mode 100644 dom/media/test/test-6-5.1.opus^headers^ create mode 100644 dom/media/test/test-7-6.1.opus create mode 100644 dom/media/test/test-7-6.1.opus^headers^ create mode 100644 dom/media/test/test-8-7.1.opus create mode 100644 dom/media/test/test-8-7.1.opus^headers^ create mode 100644 dom/media/test/test-stereo-phase-inversion-180.opus create mode 100644 dom/media/test/test-stereo-phase-inversion-180.opus^headers^ create mode 100644 dom/media/test/test_VideoPlaybackQuality.html create mode 100644 dom/media/test/test_VideoPlaybackQuality_disabled.html create mode 100644 dom/media/test/test_access_control.html create mode 100644 dom/media/test/test_arraybuffer.html create mode 100644 dom/media/test/test_aspectratio_mp4.html create mode 100644 dom/media/test/test_audio1.html create mode 100644 dom/media/test/test_audio2.html create mode 100644 dom/media/test/test_audioDocumentTitle.html create mode 100644 dom/media/test/test_background_video_cancel_suspend_taint.html create mode 100644 dom/media/test/test_background_video_cancel_suspend_visible.html create mode 100644 dom/media/test/test_background_video_drawimage_with_suspended_video.html create mode 100644 dom/media/test/test_background_video_ended_event.html create mode 100644 dom/media/test/test_background_video_no_suspend_disabled.html create mode 100644 dom/media/test/test_background_video_no_suspend_not_in_tree.html create mode 100644 dom/media/test/test_background_video_no_suspend_short_vid.html create mode 100644 dom/media/test/test_background_video_resume_after_end_show_last_frame.html create mode 100644 dom/media/test/test_background_video_resume_looping_video_without_audio.html create mode 100644 dom/media/test/test_background_video_suspend.html create mode 100644 dom/media/test/test_background_video_suspend_ends.html create mode 100644 dom/media/test/test_background_video_suspend_ready_state.html create mode 100644 dom/media/test/test_background_video_tainted_by_capturestream.html create mode 100644 dom/media/test/test_background_video_tainted_by_createimagebitmap.html create mode 100644 dom/media/test/test_background_video_tainted_by_drawimage.html create mode 100644 dom/media/test/test_buffered.html create mode 100644 dom/media/test/test_bug1113600.html create mode 100644 dom/media/test/test_bug1120222.html create mode 100644 dom/media/test/test_bug1242338.html create mode 100644 dom/media/test/test_bug1248229.html create mode 100644 dom/media/test/test_bug1431810_opus_downmix_to_mono.html create mode 100644 dom/media/test/test_bug1512958.html create mode 100644 dom/media/test/test_bug1553262.html create mode 100644 dom/media/test/test_bug448534.html create mode 100644 dom/media/test/test_bug463162.xhtml create mode 100644 dom/media/test/test_bug465498.html create mode 100644 dom/media/test/test_bug495145.html create mode 100644 dom/media/test/test_bug495300.html create mode 100644 dom/media/test/test_bug654550.html create mode 100644 dom/media/test/test_bug686942.html create mode 100644 dom/media/test/test_bug726904.html create mode 100644 dom/media/test/test_bug874897.html create mode 100644 dom/media/test/test_bug879717.html create mode 100644 dom/media/test/test_bug895305.html create mode 100644 dom/media/test/test_bug919265.html create mode 100644 dom/media/test/test_can_play_type.html create mode 100644 dom/media/test/test_can_play_type_mpeg.html create mode 100644 dom/media/test/test_can_play_type_no_ogg.html create mode 100644 dom/media/test/test_can_play_type_ogg.html create mode 100644 dom/media/test/test_can_play_type_wave.html create mode 100644 dom/media/test/test_can_play_type_webm.html create mode 100644 dom/media/test/test_capture_stream_av_sync.html create mode 100644 dom/media/test/test_chaining.html create mode 100644 dom/media/test/test_cloneElementVisually_ended_video.html create mode 100644 dom/media/test/test_cloneElementVisually_mediastream.html create mode 100644 dom/media/test/test_cloneElementVisually_mediastream_multitrack.html create mode 100644 dom/media/test/test_cloneElementVisually_no_suspend.html create mode 100644 dom/media/test/test_cloneElementVisually_paused.html create mode 100644 dom/media/test/test_cloneElementVisually_poster.html create mode 100644 dom/media/test/test_cloneElementVisually_resource_change.html create mode 100644 dom/media/test/test_clone_media_element.html create mode 100644 dom/media/test/test_closing_connections.html create mode 100644 dom/media/test/test_constants.html create mode 100644 dom/media/test/test_controls.html create mode 100644 dom/media/test/test_cueless_webm_seek-1.html create mode 100644 dom/media/test/test_cueless_webm_seek-2.html create mode 100644 dom/media/test/test_cueless_webm_seek-3.html create mode 100644 dom/media/test/test_currentTime.html create mode 100644 dom/media/test/test_debug_data_helpers.html create mode 100644 dom/media/test/test_decode_error.html create mode 100644 dom/media/test/test_decode_error_crossorigin.html create mode 100644 dom/media/test/test_decoder_disable.html create mode 100644 dom/media/test/test_defaultMuted.html create mode 100644 dom/media/test/test_delay_load.html create mode 100644 dom/media/test/test_duration_after_error.html create mode 100644 dom/media/test/test_eme_autoplay.html create mode 100644 dom/media/test/test_eme_canvas_blocked.html create mode 100644 dom/media/test/test_eme_createMediaKeys_iframes.html create mode 100644 dom/media/test/test_eme_detach_media_keys.html create mode 100644 dom/media/test/test_eme_detach_reattach_same_mediakeys_during_playback.html create mode 100644 dom/media/test/test_eme_getstatusforpolicy.html create mode 100644 dom/media/test/test_eme_initDataTypes.html create mode 100644 dom/media/test/test_eme_mfcdm_generate_request.html create mode 100644 dom/media/test/test_eme_mfcdm_getstatusforpolicy.html create mode 100644 dom/media/test/test_eme_missing_pssh.html create mode 100644 dom/media/test/test_eme_non_mse_fails.html create mode 100644 dom/media/test/test_eme_playback.html create mode 100644 dom/media/test/test_eme_protection_query.html create mode 100644 dom/media/test/test_eme_pssh_in_moof.html create mode 100644 dom/media/test/test_eme_requestKeySystemAccess.html create mode 100644 dom/media/test/test_eme_requestMediaKeySystemAccess_with_app_approval.html create mode 100644 dom/media/test/test_eme_request_notifications.html create mode 100644 dom/media/test/test_eme_sample_groups_playback.html create mode 100644 dom/media/test/test_eme_session_callable_value.html create mode 100644 dom/media/test/test_eme_setMediaKeys_before_attach_MediaSource.html create mode 100644 dom/media/test/test_eme_special_key_system.html create mode 100644 dom/media/test/test_eme_stream_capture_blocked_case1.html create mode 100644 dom/media/test/test_eme_stream_capture_blocked_case2.html create mode 100644 dom/media/test/test_eme_stream_capture_blocked_case3.html create mode 100644 dom/media/test/test_eme_unsetMediaKeys_then_capture.html create mode 100644 dom/media/test/test_eme_waitingforkey.html create mode 100644 dom/media/test/test_eme_wideinve_l1_installation.html create mode 100644 dom/media/test/test_eme_wv_privacy.html create mode 100644 dom/media/test/test_empty_resource.html create mode 100644 dom/media/test/test_error_in_video_document.html create mode 100644 dom/media/test/test_error_on_404.html create mode 100644 dom/media/test/test_fastSeek-forwards.html create mode 100644 dom/media/test/test_fastSeek.html create mode 100644 dom/media/test/test_fragment_noplay.html create mode 100644 dom/media/test/test_fragment_play.html create mode 100644 dom/media/test/test_hevc_playback.html create mode 100644 dom/media/test/test_hevc_support.html create mode 100644 dom/media/test/test_hls_player_independency.html create mode 100644 dom/media/test/test_hw_video_decoding.html create mode 100644 dom/media/test/test_imagecapture.html create mode 100644 dom/media/test/test_info_leak.html create mode 100644 dom/media/test/test_invalid_reject.html create mode 100644 dom/media/test/test_invalid_reject_play.html create mode 100644 dom/media/test/test_invalid_seek.html create mode 100644 dom/media/test/test_load.html create mode 100644 dom/media/test/test_load_candidates.html create mode 100644 dom/media/test/test_load_same_resource.html create mode 100644 dom/media/test/test_load_source.html create mode 100644 dom/media/test/test_load_source_empty_type.html create mode 100644 dom/media/test/test_loop.html create mode 100644 dom/media/test/test_looping_eventsOrder.html create mode 100644 dom/media/test/test_media_selection.html create mode 100644 dom/media/test/test_media_sniffer.html create mode 100644 dom/media/test/test_mediacapabilities_resistfingerprinting.html create mode 100644 dom/media/test/test_mediarecorder_avoid_recursion.html create mode 100644 dom/media/test/test_mediarecorder_bitrate.html create mode 100644 dom/media/test/test_mediarecorder_creation.html create mode 100644 dom/media/test/test_mediarecorder_creation_fail.html create mode 100644 dom/media/test/test_mediarecorder_fires_start_event_once_when_erroring.html create mode 100644 dom/media/test/test_mediarecorder_multipletracks.html create mode 100644 dom/media/test/test_mediarecorder_onerror_pause.html create mode 100644 dom/media/test/test_mediarecorder_pause_resume_video.html create mode 100644 dom/media/test/test_mediarecorder_playback_can_repeat.html create mode 100644 dom/media/test/test_mediarecorder_principals.html create mode 100644 dom/media/test/test_mediarecorder_record_4ch_audiocontext.html create mode 100644 dom/media/test/test_mediarecorder_record_addtracked_stream.html create mode 100644 dom/media/test/test_mediarecorder_record_audiocontext.html create mode 100644 dom/media/test/test_mediarecorder_record_audiocontext_mlk.html create mode 100644 dom/media/test/test_mediarecorder_record_audionode.html create mode 100644 dom/media/test/test_mediarecorder_record_canvas_captureStream.html create mode 100644 dom/media/test/test_mediarecorder_record_changing_video_resolution.html create mode 100644 dom/media/test/test_mediarecorder_record_downsize_resolution.html create mode 100644 dom/media/test/test_mediarecorder_record_getdata_afterstart.html create mode 100644 dom/media/test/test_mediarecorder_record_gum_video_timeslice.html create mode 100644 dom/media/test/test_mediarecorder_record_gum_video_timeslice_mixed.html create mode 100644 dom/media/test/test_mediarecorder_record_immediate_stop.html create mode 100644 dom/media/test/test_mediarecorder_record_no_timeslice.html create mode 100644 dom/media/test/test_mediarecorder_record_session.html create mode 100644 dom/media/test/test_mediarecorder_record_startstopstart.html create mode 100644 dom/media/test/test_mediarecorder_record_timeslice.html create mode 100644 dom/media/test/test_mediarecorder_record_upsize_resolution.html create mode 100644 dom/media/test/test_mediarecorder_reload_crash.html create mode 100644 dom/media/test/test_mediarecorder_state_event_order.html create mode 100644 dom/media/test/test_mediarecorder_state_transition.html create mode 100644 dom/media/test/test_mediarecorder_webm_support.html create mode 100644 dom/media/test/test_mediastream_as_eventarget.html create mode 100644 dom/media/test/test_mediatrack_consuming_mediaresource.html create mode 100644 dom/media/test/test_mediatrack_consuming_mediastream.html create mode 100644 dom/media/test/test_mediatrack_events.html create mode 100644 dom/media/test/test_mediatrack_parsing_ogg.html create mode 100644 dom/media/test/test_mediatrack_replay_from_end.html create mode 100644 dom/media/test/test_metadata.html create mode 100644 dom/media/test/test_midflight_redirect_blocked.html create mode 100644 dom/media/test/test_mixed_principals.html create mode 100644 dom/media/test/test_mozHasAudio.html create mode 100644 dom/media/test/test_mp3_broadcast.html create mode 100644 dom/media/test/test_mp3_with_multiple_ID3v2.html create mode 100644 dom/media/test/test_multiple_mediastreamtracks.html create mode 100644 dom/media/test/test_networkState.html create mode 100644 dom/media/test/test_new_audio.html create mode 100644 dom/media/test/test_no_load_event.html create mode 100644 dom/media/test/test_not_reset_playbackRate_when_removing_nonloaded_media_from_document.html create mode 100644 dom/media/test/test_paused.html create mode 100644 dom/media/test/test_paused_after_ended.html create mode 100644 dom/media/test/test_periodic_timeupdate.html create mode 100644 dom/media/test/test_play_events.html create mode 100644 dom/media/test/test_play_events_2.html create mode 100644 dom/media/test/test_play_promise_1.html create mode 100644 dom/media/test/test_play_promise_10.html create mode 100644 dom/media/test/test_play_promise_11.html create mode 100644 dom/media/test/test_play_promise_12.html create mode 100644 dom/media/test/test_play_promise_13.html create mode 100644 dom/media/test/test_play_promise_14.html create mode 100644 dom/media/test/test_play_promise_15.html create mode 100644 dom/media/test/test_play_promise_16.html create mode 100644 dom/media/test/test_play_promise_17.html create mode 100644 dom/media/test/test_play_promise_18.html create mode 100644 dom/media/test/test_play_promise_2.html create mode 100644 dom/media/test/test_play_promise_3.html create mode 100644 dom/media/test/test_play_promise_4.html create mode 100644 dom/media/test/test_play_promise_5.html create mode 100644 dom/media/test/test_play_promise_6.html create mode 100644 dom/media/test/test_play_promise_7.html create mode 100644 dom/media/test/test_play_promise_8.html create mode 100644 dom/media/test/test_play_promise_9.html create mode 100644 dom/media/test/test_play_twice.html create mode 100644 dom/media/test/test_playback.html create mode 100644 dom/media/test/test_playback_and_bfcache.html create mode 100644 dom/media/test/test_playback_errors.html create mode 100644 dom/media/test/test_playback_hls.html create mode 100644 dom/media/test/test_playback_rate.html create mode 100644 dom/media/test/test_playback_rate_playpause.html create mode 100644 dom/media/test/test_playback_reactivate.html create mode 100644 dom/media/test/test_played.html create mode 100644 dom/media/test/test_preload_actions.html create mode 100644 dom/media/test/test_preload_attribute.html create mode 100644 dom/media/test/test_preload_suspend.html create mode 100644 dom/media/test/test_preserve_playbackrate_after_ui_play.html create mode 100644 dom/media/test/test_progress.html create mode 100644 dom/media/test/test_reactivate.html create mode 100644 dom/media/test/test_readyState.html create mode 100644 dom/media/test/test_referer.html create mode 100644 dom/media/test/test_replay_metadata.html create mode 100644 dom/media/test/test_reset_events_async.html create mode 100644 dom/media/test/test_reset_src.html create mode 100644 dom/media/test/test_resolution_change.html create mode 100644 dom/media/test/test_resume.html create mode 100644 dom/media/test/test_seamless_looping.html create mode 100644 dom/media/test/test_seamless_looping_cancel_looping_future_frames.html create mode 100644 dom/media/test/test_seamless_looping_duration.html create mode 100644 dom/media/test/test_seamless_looping_media_element_state.html create mode 100644 dom/media/test/test_seamless_looping_not_keep_painting_old_video_frames.html create mode 100644 dom/media/test/test_seamless_looping_resume_video_decoding.html create mode 100644 dom/media/test/test_seamless_looping_seek_current_time.html create mode 100644 dom/media/test/test_seamless_looping_shorter_audio_than_video.html create mode 100644 dom/media/test/test_seamless_looping_video.html create mode 100644 dom/media/test/test_seek-1.html create mode 100644 dom/media/test/test_seek-10.html create mode 100644 dom/media/test/test_seek-11.html create mode 100644 dom/media/test/test_seek-12.html create mode 100644 dom/media/test/test_seek-13.html create mode 100644 dom/media/test/test_seek-14.html create mode 100644 dom/media/test/test_seek-2.html create mode 100644 dom/media/test/test_seek-3.html create mode 100644 dom/media/test/test_seek-4.html create mode 100644 dom/media/test/test_seek-5.html create mode 100644 dom/media/test/test_seek-6.html create mode 100644 dom/media/test/test_seek-7.html create mode 100644 dom/media/test/test_seek-8.html create mode 100644 dom/media/test/test_seek-9.html create mode 100644 dom/media/test/test_seekLies.html create mode 100644 dom/media/test/test_seekToNextFrame.html create mode 100644 dom/media/test/test_seek_duration.html create mode 100644 dom/media/test/test_seek_negative.html create mode 100644 dom/media/test/test_seek_nosrc.html create mode 100644 dom/media/test/test_seek_out_of_range.html create mode 100644 dom/media/test/test_seek_promise_bug1344357.html create mode 100644 dom/media/test/test_seekable1.html create mode 100644 dom/media/test/test_setSinkId_after_loop.html create mode 100644 dom/media/test/test_source.html create mode 100644 dom/media/test/test_source_null.html create mode 100644 dom/media/test/test_source_write.html create mode 100644 dom/media/test/test_standalone.html create mode 100644 dom/media/test/test_streams_capture_origin.html create mode 100644 dom/media/test/test_streams_element_capture.html create mode 100644 dom/media/test/test_streams_element_capture_mediatrack.html create mode 100644 dom/media/test/test_streams_element_capture_playback.html create mode 100644 dom/media/test/test_streams_element_capture_reset.html create mode 100644 dom/media/test/test_streams_element_capture_twice.html create mode 100644 dom/media/test/test_streams_firstframe.html create mode 100644 dom/media/test/test_streams_gc.html create mode 100644 dom/media/test/test_streams_individual_pause.html create mode 100644 dom/media/test/test_streams_srcObject.html create mode 100644 dom/media/test/test_streams_tracks.html create mode 100644 dom/media/test/test_suspend_media_by_inactive_docshell.html create mode 100644 dom/media/test/test_temporary_file_blob_video_plays.html create mode 100644 dom/media/test/test_timeupdate_small_files.html create mode 100644 dom/media/test/test_unseekable.html create mode 100644 dom/media/test/test_videoDocumentTitle.html create mode 100644 dom/media/test/test_videoPlaybackQuality_totalFrames.html create mode 100644 dom/media/test/test_video_dimensions.html create mode 100644 dom/media/test/test_video_gzip_encoding.html create mode 100644 dom/media/test/test_video_in_audio_element.html create mode 100644 dom/media/test/test_video_low_power_telemetry.html create mode 100644 dom/media/test/test_video_stats_resistfingerprinting.html create mode 100644 dom/media/test/test_video_to_canvas.html create mode 100644 dom/media/test/test_volume.html create mode 100644 dom/media/test/test_vp9_superframes.html create mode 100644 dom/media/test/test_wav_ended1.html create mode 100644 dom/media/test/test_wav_ended2.html create mode 100644 dom/media/test/tone2s-silence4s-tone2s.opus create mode 100644 dom/media/test/two-xing-header-no-content-length.mp3 create mode 100644 dom/media/test/two-xing-header-no-content-length.mp3^headers^ create mode 100644 dom/media/test/variable-channel.ogg create mode 100644 dom/media/test/variable-channel.ogg^headers^ create mode 100644 dom/media/test/variable-channel.opus create mode 100644 dom/media/test/variable-channel.opus^headers^ create mode 100644 dom/media/test/variable-preskip.opus create mode 100644 dom/media/test/variable-preskip.opus^headers^ create mode 100644 dom/media/test/variable-samplerate.ogg create mode 100644 dom/media/test/variable-samplerate.ogg^headers^ create mode 100644 dom/media/test/variable-samplerate.opus create mode 100644 dom/media/test/variable-samplerate.opus^headers^ create mode 100644 dom/media/test/vbr-head.mp3 create mode 100644 dom/media/test/vbr-head.mp3^headers^ create mode 100644 dom/media/test/vbr.mp3 create mode 100644 dom/media/test/vbr.mp3^headers^ create mode 100644 dom/media/test/very-short.mp3 create mode 100644 dom/media/test/video-overhang.ogg create mode 100644 dom/media/test/video-overhang.ogg^headers^ create mode 100644 dom/media/test/vp9-short.webm create mode 100644 dom/media/test/vp9-short.webm^headers^ create mode 100644 dom/media/test/vp9-superframes.webm create mode 100644 dom/media/test/vp9-superframes.webm^headers^ create mode 100644 dom/media/test/vp9.webm create mode 100644 dom/media/test/vp9.webm^headers^ create mode 100644 dom/media/test/vp9cake-short.webm create mode 100644 dom/media/test/vp9cake-short.webm^headers^ create mode 100644 dom/media/test/vp9cake.webm create mode 100644 dom/media/test/vp9cake.webm^headers^ create mode 100644 dom/media/test/wave_metadata.wav create mode 100644 dom/media/test/wave_metadata.wav^headers^ create mode 100644 dom/media/test/wave_metadata_bad_len.wav create mode 100644 dom/media/test/wave_metadata_bad_len.wav^headers^ create mode 100644 dom/media/test/wave_metadata_bad_no_null.wav create mode 100644 dom/media/test/wave_metadata_bad_no_null.wav^headers^ create mode 100644 dom/media/test/wave_metadata_bad_utf8.wav create mode 100644 dom/media/test/wave_metadata_bad_utf8.wav^headers^ create mode 100644 dom/media/test/wave_metadata_unknown_tag.wav create mode 100644 dom/media/test/wave_metadata_unknown_tag.wav^headers^ create mode 100644 dom/media/test/wave_metadata_utf8.wav create mode 100644 dom/media/test/wave_metadata_utf8.wav^headers^ create mode 100644 dom/media/test/wavedata_alaw.wav create mode 100644 dom/media/test/wavedata_alaw.wav^headers^ create mode 100644 dom/media/test/wavedata_float.wav create mode 100644 dom/media/test/wavedata_float.wav^headers^ create mode 100644 dom/media/test/wavedata_s16.wav create mode 100644 dom/media/test/wavedata_s16.wav^headers^ create mode 100644 dom/media/test/wavedata_s24.wav create mode 100644 dom/media/test/wavedata_s24.wav^headers^ create mode 100644 dom/media/test/wavedata_u8.wav create mode 100644 dom/media/test/wavedata_u8.wav^headers^ create mode 100644 dom/media/test/wavedata_ulaw.wav create mode 100644 dom/media/test/wavedata_ulaw.wav^headers^ create mode 100644 dom/media/test/white-3s-black-1s.webm create mode 100644 dom/media/test/white-3s-black-1s.webm^headers^ create mode 100644 dom/media/test/white-short.webm create mode 100644 dom/media/tests/crashtests/1281695.html create mode 100644 dom/media/tests/crashtests/1306476.html create mode 100644 dom/media/tests/crashtests/1348381.html create mode 100644 dom/media/tests/crashtests/1367930_1.html create mode 100644 dom/media/tests/crashtests/1367930_2.html create mode 100644 dom/media/tests/crashtests/1429507_1.html create mode 100644 dom/media/tests/crashtests/1429507_2.html create mode 100644 dom/media/tests/crashtests/1443212.html create mode 100644 dom/media/tests/crashtests/1453030.html create mode 100644 dom/media/tests/crashtests/1468451.html create mode 100644 dom/media/tests/crashtests/1490700.html create mode 100644 dom/media/tests/crashtests/1505957.html create mode 100644 dom/media/tests/crashtests/1509442-1.html create mode 100644 dom/media/tests/crashtests/1509442.html create mode 100644 dom/media/tests/crashtests/1510848.html create mode 100644 dom/media/tests/crashtests/1511130.html create mode 100644 dom/media/tests/crashtests/1516292.html create mode 100644 dom/media/tests/crashtests/1573536.html create mode 100644 dom/media/tests/crashtests/1576938.html create mode 100644 dom/media/tests/crashtests/1594136.html create mode 100644 dom/media/tests/crashtests/1749308.html create mode 100644 dom/media/tests/crashtests/1764915.html create mode 100644 dom/media/tests/crashtests/1764933.html create mode 100644 dom/media/tests/crashtests/1764940.html create mode 100644 dom/media/tests/crashtests/1766668.html create mode 100644 dom/media/tests/crashtests/1783765.html create mode 100644 dom/media/tests/crashtests/780790.html create mode 100644 dom/media/tests/crashtests/791270.html create mode 100644 dom/media/tests/crashtests/791278.html create mode 100644 dom/media/tests/crashtests/791330.html create mode 100644 dom/media/tests/crashtests/799419.html create mode 100644 dom/media/tests/crashtests/801227.html create mode 100644 dom/media/tests/crashtests/802982.html create mode 100644 dom/media/tests/crashtests/812785.html create mode 100644 dom/media/tests/crashtests/822197.html create mode 100644 dom/media/tests/crashtests/834100.html create mode 100644 dom/media/tests/crashtests/836349.html create mode 100644 dom/media/tests/crashtests/837324.html create mode 100644 dom/media/tests/crashtests/855796.html create mode 100644 dom/media/tests/crashtests/860143.html create mode 100644 dom/media/tests/crashtests/861958.html create mode 100644 dom/media/tests/crashtests/863929.html create mode 100644 dom/media/tests/crashtests/crashtests.list create mode 100644 dom/media/tests/crashtests/datachannel-oom.html create mode 100644 dom/media/tools/generateGmpJson.py create mode 100644 dom/media/utils/MediaElementEventRunners.cpp create mode 100644 dom/media/utils/MediaElementEventRunners.h create mode 100644 dom/media/utils/PerformanceRecorder.cpp create mode 100644 dom/media/utils/PerformanceRecorder.h create mode 100644 dom/media/utils/TelemetryProbesReporter.cpp create mode 100644 dom/media/utils/TelemetryProbesReporter.h create mode 100644 dom/media/utils/gtest/TestPerformanceRecorder.cpp create mode 100644 dom/media/utils/gtest/moz.build create mode 100644 dom/media/utils/moz.build create mode 100644 dom/media/wave/WaveDecoder.cpp create mode 100644 dom/media/wave/WaveDecoder.h create mode 100644 dom/media/wave/WaveDemuxer.cpp create mode 100644 dom/media/wave/WaveDemuxer.h create mode 100644 dom/media/wave/moz.build create mode 100644 dom/media/webaudio/AlignedTArray.h create mode 100644 dom/media/webaudio/AlignmentUtils.h create mode 100644 dom/media/webaudio/AnalyserNode.cpp create mode 100644 dom/media/webaudio/AnalyserNode.h create mode 100644 dom/media/webaudio/AudioBlock.cpp create mode 100644 dom/media/webaudio/AudioBlock.h create mode 100644 dom/media/webaudio/AudioBuffer.cpp create mode 100644 dom/media/webaudio/AudioBuffer.h create mode 100644 dom/media/webaudio/AudioBufferSourceNode.cpp create mode 100644 dom/media/webaudio/AudioBufferSourceNode.h create mode 100644 dom/media/webaudio/AudioContext.cpp create mode 100644 dom/media/webaudio/AudioContext.h create mode 100644 dom/media/webaudio/AudioDestinationNode.cpp create mode 100644 dom/media/webaudio/AudioDestinationNode.h create mode 100644 dom/media/webaudio/AudioEventTimeline.cpp create mode 100644 dom/media/webaudio/AudioEventTimeline.h create mode 100644 dom/media/webaudio/AudioListener.cpp create mode 100644 dom/media/webaudio/AudioListener.h create mode 100644 dom/media/webaudio/AudioNode.cpp create mode 100644 dom/media/webaudio/AudioNode.h create mode 100644 dom/media/webaudio/AudioNodeEngine.cpp create mode 100644 dom/media/webaudio/AudioNodeEngine.h create mode 100644 dom/media/webaudio/AudioNodeEngineGeneric.h create mode 100644 dom/media/webaudio/AudioNodeEngineGenericImpl.h create mode 100644 dom/media/webaudio/AudioNodeEngineNEON.cpp create mode 100644 dom/media/webaudio/AudioNodeEngineSSE2.cpp create mode 100644 dom/media/webaudio/AudioNodeEngineSSE4_2_FMA3.cpp create mode 100644 dom/media/webaudio/AudioNodeExternalInputTrack.cpp create mode 100644 dom/media/webaudio/AudioNodeExternalInputTrack.h create mode 100644 dom/media/webaudio/AudioNodeTrack.cpp create mode 100644 dom/media/webaudio/AudioNodeTrack.h create mode 100644 dom/media/webaudio/AudioParam.cpp create mode 100644 dom/media/webaudio/AudioParam.h create mode 100644 dom/media/webaudio/AudioParamDescriptorMap.h create mode 100644 dom/media/webaudio/AudioParamMap.cpp create mode 100644 dom/media/webaudio/AudioParamMap.h create mode 100644 dom/media/webaudio/AudioParamTimeline.h create mode 100644 dom/media/webaudio/AudioProcessingEvent.cpp create mode 100644 dom/media/webaudio/AudioProcessingEvent.h create mode 100644 dom/media/webaudio/AudioScheduledSourceNode.cpp create mode 100644 dom/media/webaudio/AudioScheduledSourceNode.h create mode 100644 dom/media/webaudio/AudioWorkletGlobalScope.cpp create mode 100644 dom/media/webaudio/AudioWorkletGlobalScope.h create mode 100644 dom/media/webaudio/AudioWorkletImpl.cpp create mode 100644 dom/media/webaudio/AudioWorkletImpl.h create mode 100644 dom/media/webaudio/AudioWorkletNode.cpp create mode 100644 dom/media/webaudio/AudioWorkletNode.h create mode 100644 dom/media/webaudio/AudioWorkletProcessor.cpp create mode 100644 dom/media/webaudio/AudioWorkletProcessor.h create mode 100644 dom/media/webaudio/BiquadFilterNode.cpp create mode 100644 dom/media/webaudio/BiquadFilterNode.h create mode 100644 dom/media/webaudio/ChannelMergerNode.cpp create mode 100644 dom/media/webaudio/ChannelMergerNode.h create mode 100644 dom/media/webaudio/ChannelSplitterNode.cpp create mode 100644 dom/media/webaudio/ChannelSplitterNode.h create mode 100644 dom/media/webaudio/ConstantSourceNode.cpp create mode 100644 dom/media/webaudio/ConstantSourceNode.h create mode 100644 dom/media/webaudio/ConvolverNode.cpp create mode 100644 dom/media/webaudio/ConvolverNode.h create mode 100644 dom/media/webaudio/DelayBuffer.cpp create mode 100644 dom/media/webaudio/DelayBuffer.h create mode 100644 dom/media/webaudio/DelayNode.cpp create mode 100644 dom/media/webaudio/DelayNode.h create mode 100644 dom/media/webaudio/DynamicsCompressorNode.cpp create mode 100644 dom/media/webaudio/DynamicsCompressorNode.h create mode 100644 dom/media/webaudio/FFTBlock.cpp create mode 100644 dom/media/webaudio/FFTBlock.h create mode 100644 dom/media/webaudio/GainNode.cpp create mode 100644 dom/media/webaudio/GainNode.h create mode 100644 dom/media/webaudio/IIRFilterNode.cpp create mode 100644 dom/media/webaudio/IIRFilterNode.h create mode 100644 dom/media/webaudio/MediaBufferDecoder.cpp create mode 100644 dom/media/webaudio/MediaBufferDecoder.h create mode 100644 dom/media/webaudio/MediaElementAudioSourceNode.cpp create mode 100644 dom/media/webaudio/MediaElementAudioSourceNode.h create mode 100644 dom/media/webaudio/MediaStreamAudioDestinationNode.cpp create mode 100644 dom/media/webaudio/MediaStreamAudioDestinationNode.h create mode 100644 dom/media/webaudio/MediaStreamAudioSourceNode.cpp create mode 100644 dom/media/webaudio/MediaStreamAudioSourceNode.h create mode 100644 dom/media/webaudio/MediaStreamTrackAudioSourceNode.cpp create mode 100644 dom/media/webaudio/MediaStreamTrackAudioSourceNode.h create mode 100644 dom/media/webaudio/OscillatorNode.cpp create mode 100644 dom/media/webaudio/OscillatorNode.h create mode 100644 dom/media/webaudio/PannerNode.cpp create mode 100644 dom/media/webaudio/PannerNode.h create mode 100644 dom/media/webaudio/PanningUtils.h create mode 100644 dom/media/webaudio/PeriodicWave.cpp create mode 100644 dom/media/webaudio/PeriodicWave.h create mode 100644 dom/media/webaudio/PlayingRefChangeHandler.h create mode 100644 dom/media/webaudio/ReportDecodeResultTask.h create mode 100644 dom/media/webaudio/ScriptProcessorNode.cpp create mode 100644 dom/media/webaudio/ScriptProcessorNode.h create mode 100644 dom/media/webaudio/StereoPannerNode.cpp create mode 100644 dom/media/webaudio/StereoPannerNode.h create mode 100644 dom/media/webaudio/ThreeDPoint.cpp create mode 100644 dom/media/webaudio/ThreeDPoint.h create mode 100644 dom/media/webaudio/WaveShaperNode.cpp create mode 100644 dom/media/webaudio/WaveShaperNode.h create mode 100644 dom/media/webaudio/WebAudioUtils.cpp create mode 100644 dom/media/webaudio/WebAudioUtils.h create mode 100644 dom/media/webaudio/blink/Biquad.cpp create mode 100644 dom/media/webaudio/blink/Biquad.h create mode 100644 dom/media/webaudio/blink/DenormalDisabler.h create mode 100644 dom/media/webaudio/blink/DynamicsCompressor.cpp create mode 100644 dom/media/webaudio/blink/DynamicsCompressor.h create mode 100644 dom/media/webaudio/blink/DynamicsCompressorKernel.cpp create mode 100644 dom/media/webaudio/blink/DynamicsCompressorKernel.h create mode 100644 dom/media/webaudio/blink/FFTConvolver.cpp create mode 100644 dom/media/webaudio/blink/FFTConvolver.h create mode 100644 dom/media/webaudio/blink/HRTFDatabase.cpp create mode 100644 dom/media/webaudio/blink/HRTFDatabase.h create mode 100644 dom/media/webaudio/blink/HRTFDatabaseLoader.cpp create mode 100644 dom/media/webaudio/blink/HRTFDatabaseLoader.h create mode 100644 dom/media/webaudio/blink/HRTFElevation.cpp create mode 100644 dom/media/webaudio/blink/HRTFElevation.h create mode 100644 dom/media/webaudio/blink/HRTFKernel.cpp create mode 100644 dom/media/webaudio/blink/HRTFKernel.h create mode 100644 dom/media/webaudio/blink/HRTFPanner.cpp create mode 100644 dom/media/webaudio/blink/HRTFPanner.h create mode 100644 dom/media/webaudio/blink/IIRFilter.cpp create mode 100644 dom/media/webaudio/blink/IIRFilter.h create mode 100644 dom/media/webaudio/blink/IRC_Composite_C_R0195-incl.cpp create mode 100644 dom/media/webaudio/blink/PeriodicWave.cpp create mode 100644 dom/media/webaudio/blink/PeriodicWave.h create mode 100644 dom/media/webaudio/blink/README create mode 100644 dom/media/webaudio/blink/Reverb.cpp create mode 100644 dom/media/webaudio/blink/Reverb.h create mode 100644 dom/media/webaudio/blink/ReverbAccumulationBuffer.cpp create mode 100644 dom/media/webaudio/blink/ReverbAccumulationBuffer.h create mode 100644 dom/media/webaudio/blink/ReverbConvolver.cpp create mode 100644 dom/media/webaudio/blink/ReverbConvolver.h create mode 100644 dom/media/webaudio/blink/ReverbConvolverStage.cpp create mode 100644 dom/media/webaudio/blink/ReverbConvolverStage.h create mode 100644 dom/media/webaudio/blink/ReverbInputBuffer.cpp create mode 100644 dom/media/webaudio/blink/ReverbInputBuffer.h create mode 100644 dom/media/webaudio/blink/ZeroPole.cpp create mode 100644 dom/media/webaudio/blink/ZeroPole.h create mode 100644 dom/media/webaudio/blink/moz.build create mode 100644 dom/media/webaudio/moz.build create mode 100644 dom/media/webaudio/test/1856145.ogg create mode 100644 dom/media/webaudio/test/8kHz-320kbps-6ch.aac create mode 100644 dom/media/webaudio/test/audio-expected.wav create mode 100644 dom/media/webaudio/test/audio-mono-expected-2.wav create mode 100644 dom/media/webaudio/test/audio-mono-expected.wav create mode 100644 dom/media/webaudio/test/audio-quad.wav create mode 100644 dom/media/webaudio/test/audio.ogv create mode 100644 dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js create mode 100644 dom/media/webaudio/test/audiovideo.mp4 create mode 100644 dom/media/webaudio/test/blink/README create mode 100644 dom/media/webaudio/test/blink/audio-testing.js create mode 100644 dom/media/webaudio/test/blink/biquad-filters.js create mode 100644 dom/media/webaudio/test/blink/biquad-testing.js create mode 100644 dom/media/webaudio/test/blink/convolution-testing.js create mode 100644 dom/media/webaudio/test/blink/mochitest.toml create mode 100644 dom/media/webaudio/test/blink/panner-model-testing.js create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html create mode 100644 dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html create mode 100644 dom/media/webaudio/test/blink/test_iirFilterNode.html create mode 100644 dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html create mode 100644 dom/media/webaudio/test/corsServer.sjs create mode 100644 dom/media/webaudio/test/file_nodeCreationDocumentGone.html create mode 100755 dom/media/webaudio/test/generate-test-files.py create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-aac-afconvert.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-aac.aac create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-aac.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-alaw.wav create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-flac.flac create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-libmp3lame.mp3 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-libopus.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-libopus.opus create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-libopus.webm create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.ogg create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.webm create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100-mulaw.wav create mode 100644 dom/media/webaudio/test/half-a-second-1ch-44100.wav create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-aac.aac create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-aac.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-flac.flac create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-libmp3lame.mp3 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-libopus.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-libopus.opus create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-libopus.webm create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.ogg create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.webm create mode 100644 dom/media/webaudio/test/half-a-second-1ch-48000.wav create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-aac.aac create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-aac.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-flac.flac create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-libmp3lame.mp3 create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-libopus.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-libopus.opus create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-libopus.webm create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.ogg create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.webm create mode 100644 dom/media/webaudio/test/half-a-second-2ch-44100.wav create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-aac.aac create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-aac.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-flac.flac create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-libmp3lame.mp3 create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-libopus.mp4 create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-libopus.opus create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-libopus.webm create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.ogg create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.webm create mode 100644 dom/media/webaudio/test/half-a-second-2ch-48000.wav create mode 100644 dom/media/webaudio/test/invalid.txt create mode 100644 dom/media/webaudio/test/invalidContent.flac create mode 100644 dom/media/webaudio/test/layouttest-glue.js create mode 100644 dom/media/webaudio/test/mochitest.toml create mode 100644 dom/media/webaudio/test/mochitest_audio.toml create mode 100644 dom/media/webaudio/test/mochitest_bugs.toml create mode 100644 dom/media/webaudio/test/mochitest_media.toml create mode 100644 dom/media/webaudio/test/nil-packet.ogg create mode 100644 dom/media/webaudio/test/noaudio.webm create mode 100644 dom/media/webaudio/test/sine-440-10s.opus create mode 100644 dom/media/webaudio/test/sixteen-frames.mp3 create mode 100644 dom/media/webaudio/test/small-shot-expected.wav create mode 100644 dom/media/webaudio/test/small-shot-mono-expected.wav create mode 100644 dom/media/webaudio/test/small-shot.mp3 create mode 100644 dom/media/webaudio/test/small-shot.ogg create mode 100644 dom/media/webaudio/test/sweep-300-330-1sec.opus create mode 100644 dom/media/webaudio/test/test_AudioBuffer.html create mode 100644 dom/media/webaudio/test/test_AudioContext.html create mode 100644 dom/media/webaudio/test/test_AudioContext_disabled.html create mode 100644 dom/media/webaudio/test/test_AudioListener.html create mode 100644 dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html create mode 100644 dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html create mode 100644 dom/media/webaudio/test/test_OfflineAudioContext.html create mode 100644 dom/media/webaudio/test/test_ScriptProcessorCollected1.html create mode 100644 dom/media/webaudio/test/test_WebAudioMemoryReporting.html create mode 100644 dom/media/webaudio/test/test_analyserNode.html create mode 100644 dom/media/webaudio/test/test_analyserNodeMinimum.html create mode 100644 dom/media/webaudio/test/test_analyserNodeOutput.html create mode 100644 dom/media/webaudio/test/test_analyserNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_analyserNodeWithGain.html create mode 100644 dom/media/webaudio/test/test_analyserScale.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNode.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_audioBufferSourceNodeRate.html create mode 100644 dom/media/webaudio/test/test_audioContextGC.html create mode 100644 dom/media/webaudio/test/test_audioContextParams_recordNonDefaultSampleRate.html create mode 100644 dom/media/webaudio/test/test_audioContextParams_sampleRate.html create mode 100644 dom/media/webaudio/test/test_audioContextSuspendResumeClose.html create mode 100644 dom/media/webaudio/test/test_audioDestinationNode.html create mode 100644 dom/media/webaudio/test/test_audioParamChaining.html create mode 100644 dom/media/webaudio/test/test_audioParamExponentialRamp.html create mode 100644 dom/media/webaudio/test/test_audioParamGain.html create mode 100644 dom/media/webaudio/test/test_audioParamLinearRamp.html create mode 100644 dom/media/webaudio/test/test_audioParamSetCurveAtTime.html create mode 100644 dom/media/webaudio/test/test_audioParamSetTargetAtTime.html create mode 100644 dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html create mode 100644 dom/media/webaudio/test/test_audioParamSetValueAtTime.html create mode 100644 dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html create mode 100644 dom/media/webaudio/test/test_badConnect.html create mode 100644 dom/media/webaudio/test/test_biquadFilterNode.html create mode 100644 dom/media/webaudio/test/test_biquadFilterNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_biquadFilterNodeWithGain.html create mode 100644 dom/media/webaudio/test/test_bug1027864.html create mode 100644 dom/media/webaudio/test/test_bug1056032.html create mode 100644 dom/media/webaudio/test/test_bug1113634.html create mode 100644 dom/media/webaudio/test/test_bug1118372.html create mode 100644 dom/media/webaudio/test/test_bug1255618.html create mode 100644 dom/media/webaudio/test/test_bug1267579.html create mode 100644 dom/media/webaudio/test/test_bug1355798.html create mode 100644 dom/media/webaudio/test/test_bug1447273.html create mode 100644 dom/media/webaudio/test/test_bug808374.html create mode 100644 dom/media/webaudio/test/test_bug827541.html create mode 100644 dom/media/webaudio/test/test_bug839753.html create mode 100644 dom/media/webaudio/test/test_bug845960.html create mode 100644 dom/media/webaudio/test/test_bug856771.html create mode 100644 dom/media/webaudio/test/test_bug866570.html create mode 100644 dom/media/webaudio/test/test_bug866737.html create mode 100644 dom/media/webaudio/test/test_bug867089.html create mode 100644 dom/media/webaudio/test/test_bug867174.html create mode 100644 dom/media/webaudio/test/test_bug873335.html create mode 100644 dom/media/webaudio/test/test_bug875221.html create mode 100644 dom/media/webaudio/test/test_bug875402.html create mode 100644 dom/media/webaudio/test/test_bug894150.html create mode 100644 dom/media/webaudio/test/test_bug956489.html create mode 100644 dom/media/webaudio/test/test_bug964376.html create mode 100644 dom/media/webaudio/test/test_bug966247.html create mode 100644 dom/media/webaudio/test/test_bug972678.html create mode 100644 dom/media/webaudio/test/test_channelMergerNode.html create mode 100644 dom/media/webaudio/test/test_channelMergerNodeWithVolume.html create mode 100644 dom/media/webaudio/test/test_channelSplitterNode.html create mode 100644 dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html create mode 100644 dom/media/webaudio/test/test_convolver-upmixing-1-channel-response.html create mode 100644 dom/media/webaudio/test/test_convolverNode.html create mode 100644 dom/media/webaudio/test/test_convolverNodeChannelCount.html create mode 100644 dom/media/webaudio/test/test_convolverNodeChannelInterpretationChanges.html create mode 100644 dom/media/webaudio/test/test_convolverNodeDelay.html create mode 100644 dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html create mode 100644 dom/media/webaudio/test/test_convolverNodeNormalization.html create mode 100644 dom/media/webaudio/test/test_convolverNodeOOM.html create mode 100644 dom/media/webaudio/test/test_convolverNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_convolverNodeWithGain.html create mode 100644 dom/media/webaudio/test/test_convolverNode_mono_mono.html create mode 100644 dom/media/webaudio/test/test_currentTime.html create mode 100644 dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html create mode 100644 dom/media/webaudio/test/test_decodeAudioDataPromise.html create mode 100644 dom/media/webaudio/test/test_decodeAudioError.html create mode 100644 dom/media/webaudio/test/test_decodeMultichannel.html create mode 100644 dom/media/webaudio/test/test_decodeOpusTail.html create mode 100644 dom/media/webaudio/test/test_decoderDelay.html create mode 100644 dom/media/webaudio/test/test_delayNode.html create mode 100644 dom/media/webaudio/test/test_delayNodeAtMax.html create mode 100644 dom/media/webaudio/test/test_delayNodeChannelChanges.html create mode 100644 dom/media/webaudio/test/test_delayNodeCycles.html create mode 100644 dom/media/webaudio/test/test_delayNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html create mode 100644 dom/media/webaudio/test/test_delayNodeTailIncrease.html create mode 100644 dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html create mode 100644 dom/media/webaudio/test/test_delayNodeTailWithGain.html create mode 100644 dom/media/webaudio/test/test_delayNodeTailWithReconnect.html create mode 100644 dom/media/webaudio/test/test_delayNodeWithGain.html create mode 100644 dom/media/webaudio/test/test_delaynode-channel-count-1.html create mode 100644 dom/media/webaudio/test/test_disconnectAll.html create mode 100644 dom/media/webaudio/test/test_disconnectAudioParam.html create mode 100644 dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html create mode 100644 dom/media/webaudio/test/test_disconnectExceptions.html create mode 100644 dom/media/webaudio/test/test_disconnectFromAudioNode.html create mode 100644 dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html create mode 100644 dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html create mode 100644 dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html create mode 100644 dom/media/webaudio/test/test_disconnectFromOutput.html create mode 100644 dom/media/webaudio/test/test_dynamicsCompressorNode.html create mode 100644 dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html create mode 100644 dom/media/webaudio/test/test_event_listener_leaks.html create mode 100644 dom/media/webaudio/test/test_gainNode.html create mode 100644 dom/media/webaudio/test/test_gainNodeInLoop.html create mode 100644 dom/media/webaudio/test/test_gainNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_iirFilterNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_maxChannelCount.html create mode 100644 dom/media/webaudio/test/test_mediaDecoding.html create mode 100644 dom/media/webaudio/test/test_mediaElementAudioSourceNode.html create mode 100644 dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html create mode 100644 dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html create mode 100644 dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html create mode 100644 dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html create mode 100644 dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html create mode 100644 dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html create mode 100644 dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html create mode 100644 dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html create mode 100644 dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNode.html create mode 100644 dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeCrossOrigin.html create mode 100644 dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeVideo.html create mode 100644 dom/media/webaudio/test/test_mixingRules.html create mode 100644 dom/media/webaudio/test/test_nodeCreationDocumentGone.html create mode 100644 dom/media/webaudio/test/test_nodeToParamConnection.html create mode 100644 dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html create mode 100644 dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html create mode 100644 dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html create mode 100644 dom/media/webaudio/test/test_oscillatorNode.html create mode 100644 dom/media/webaudio/test/test_oscillatorNode2.html create mode 100644 dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html create mode 100644 dom/media/webaudio/test/test_oscillatorNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_oscillatorNodeStart.html create mode 100644 dom/media/webaudio/test/test_oscillatorTypeChange.html create mode 100644 dom/media/webaudio/test/test_pannerNode.html create mode 100644 dom/media/webaudio/test/test_pannerNodeAbove.html create mode 100644 dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html create mode 100644 dom/media/webaudio/test/test_pannerNodeChannelCount.html create mode 100644 dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html create mode 100644 dom/media/webaudio/test/test_pannerNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_pannerNodeTail.html create mode 100644 dom/media/webaudio/test/test_pannerNode_audioparam_distance.html create mode 100644 dom/media/webaudio/test/test_pannerNode_equalPower.html create mode 100644 dom/media/webaudio/test/test_pannerNode_maxDistance.html create mode 100644 dom/media/webaudio/test/test_periodicWave.html create mode 100644 dom/media/webaudio/test/test_periodicWaveBandLimiting.html create mode 100644 dom/media/webaudio/test/test_periodicWaveDisableNormalization.html create mode 100644 dom/media/webaudio/test/test_retrospective-exponentialRampToValueAtTime.html create mode 100644 dom/media/webaudio/test/test_retrospective-linearRampToValueAtTime.html create mode 100644 dom/media/webaudio/test/test_retrospective-setTargetAtTime.html create mode 100644 dom/media/webaudio/test/test_retrospective-setValueAtTime.html create mode 100644 dom/media/webaudio/test/test_retrospective-setValueCurveAtTime.html create mode 100644 dom/media/webaudio/test/test_scriptProcessorNode.html create mode 100644 dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html create mode 100644 dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html create mode 100644 dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html create mode 100644 dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html create mode 100644 dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html create mode 100644 dom/media/webaudio/test/test_setValueCurveWithNonFiniteElements.html create mode 100644 dom/media/webaudio/test/test_singleSourceDest.html create mode 100644 dom/media/webaudio/test/test_slowStart.html create mode 100644 dom/media/webaudio/test/test_stereoPannerNode.html create mode 100644 dom/media/webaudio/test/test_stereoPannerNodePassThrough.html create mode 100644 dom/media/webaudio/test/test_stereoPanningWithGain.html create mode 100644 dom/media/webaudio/test/test_waveDecoder.html create mode 100644 dom/media/webaudio/test/test_waveShaper.html create mode 100644 dom/media/webaudio/test/test_waveShaperGain.html create mode 100644 dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html create mode 100644 dom/media/webaudio/test/test_waveShaperNoCurve.html create mode 100644 dom/media/webaudio/test/test_waveShaperPassThrough.html create mode 100644 dom/media/webaudio/test/test_webAudio_muteTab.html create mode 100644 dom/media/webaudio/test/ting-44.1k-1ch.ogg create mode 100644 dom/media/webaudio/test/ting-44.1k-1ch.wav create mode 100644 dom/media/webaudio/test/ting-44.1k-2ch.ogg create mode 100644 dom/media/webaudio/test/ting-44.1k-2ch.wav create mode 100644 dom/media/webaudio/test/ting-48k-1ch.ogg create mode 100644 dom/media/webaudio/test/ting-48k-1ch.wav create mode 100644 dom/media/webaudio/test/ting-48k-2ch.ogg create mode 100644 dom/media/webaudio/test/ting-48k-2ch.wav create mode 100644 dom/media/webaudio/test/ting-dualchannel44.1.wav create mode 100644 dom/media/webaudio/test/ting-dualchannel48.wav create mode 100644 dom/media/webaudio/test/waveformatextensible.wav create mode 100644 dom/media/webaudio/test/waveformatextensiblebadmask.wav create mode 100644 dom/media/webaudio/test/webaudio.js create mode 100644 dom/media/webcodecs/DecoderAgent.cpp create mode 100644 dom/media/webcodecs/DecoderAgent.h create mode 100644 dom/media/webcodecs/DecoderTemplate.cpp create mode 100644 dom/media/webcodecs/DecoderTemplate.h create mode 100644 dom/media/webcodecs/DecoderTypes.h create mode 100644 dom/media/webcodecs/EncodedVideoChunk.cpp create mode 100644 dom/media/webcodecs/EncodedVideoChunk.h create mode 100644 dom/media/webcodecs/EncoderAgent.cpp create mode 100644 dom/media/webcodecs/EncoderAgent.h create mode 100644 dom/media/webcodecs/EncoderTemplate.cpp create mode 100644 dom/media/webcodecs/EncoderTemplate.h create mode 100644 dom/media/webcodecs/EncoderTypes.h create mode 100644 dom/media/webcodecs/VideoColorSpace.cpp create mode 100644 dom/media/webcodecs/VideoColorSpace.h create mode 100644 dom/media/webcodecs/VideoDecoder.cpp create mode 100644 dom/media/webcodecs/VideoDecoder.h create mode 100644 dom/media/webcodecs/VideoEncoder.cpp create mode 100644 dom/media/webcodecs/VideoEncoder.h create mode 100644 dom/media/webcodecs/VideoFrame.cpp create mode 100644 dom/media/webcodecs/VideoFrame.h create mode 100644 dom/media/webcodecs/WebCodecsUtils.cpp create mode 100644 dom/media/webcodecs/WebCodecsUtils.h create mode 100644 dom/media/webcodecs/crashtests/1839270.html create mode 100644 dom/media/webcodecs/crashtests/1848460.html create mode 100644 dom/media/webcodecs/crashtests/1849271.html create mode 100644 dom/media/webcodecs/crashtests/1864475.html create mode 100644 dom/media/webcodecs/crashtests/crashtests.list create mode 100644 dom/media/webcodecs/moz.build create mode 100644 dom/media/webcodecs/test/mochitest.toml create mode 100644 dom/media/webcodecs/test/test_videoFrame_mismatched_codedSize.html create mode 100644 dom/media/webm/EbmlComposer.cpp create mode 100644 dom/media/webm/EbmlComposer.h create mode 100644 dom/media/webm/NesteggPacketHolder.h create mode 100644 dom/media/webm/WebMBufferedParser.cpp create mode 100644 dom/media/webm/WebMBufferedParser.h create mode 100644 dom/media/webm/WebMDecoder.cpp create mode 100644 dom/media/webm/WebMDecoder.h create mode 100644 dom/media/webm/WebMDemuxer.cpp create mode 100644 dom/media/webm/WebMDemuxer.h create mode 100644 dom/media/webm/WebMWriter.cpp create mode 100644 dom/media/webm/WebMWriter.h create mode 100644 dom/media/webm/moz.build create mode 100644 dom/media/webrtc/CubebDeviceEnumerator.cpp create mode 100644 dom/media/webrtc/CubebDeviceEnumerator.h create mode 100644 dom/media/webrtc/MediaEngine.h create mode 100644 dom/media/webrtc/MediaEngineFake.cpp create mode 100644 dom/media/webrtc/MediaEngineFake.h create mode 100644 dom/media/webrtc/MediaEnginePrefs.h create mode 100644 dom/media/webrtc/MediaEngineRemoteVideoSource.cpp create mode 100644 dom/media/webrtc/MediaEngineRemoteVideoSource.h create mode 100644 dom/media/webrtc/MediaEngineSource.cpp create mode 100644 dom/media/webrtc/MediaEngineSource.h create mode 100644 dom/media/webrtc/MediaEngineWebRTC.cpp create mode 100644 dom/media/webrtc/MediaEngineWebRTC.h create mode 100644 dom/media/webrtc/MediaEngineWebRTCAudio.cpp create mode 100644 dom/media/webrtc/MediaEngineWebRTCAudio.h create mode 100644 dom/media/webrtc/MediaTrackConstraints.cpp create mode 100644 dom/media/webrtc/MediaTrackConstraints.h create mode 100644 dom/media/webrtc/MediaTransportChild.h create mode 100644 dom/media/webrtc/MediaTransportParent.h create mode 100644 dom/media/webrtc/PMediaTransport.ipdl create mode 100644 dom/media/webrtc/PWebrtcGlobal.ipdl create mode 100644 dom/media/webrtc/PeerIdentity.cpp create mode 100644 dom/media/webrtc/PeerIdentity.h create mode 100644 dom/media/webrtc/RTCCertificate.cpp create mode 100644 dom/media/webrtc/RTCCertificate.h create mode 100644 dom/media/webrtc/RTCIdentityProviderRegistrar.cpp create mode 100644 dom/media/webrtc/RTCIdentityProviderRegistrar.h create mode 100644 dom/media/webrtc/SineWaveGenerator.h create mode 100644 dom/media/webrtc/WebrtcGlobal.h create mode 100644 dom/media/webrtc/WebrtcIPCTraits.h create mode 100644 dom/media/webrtc/common/CandidateInfo.h create mode 100644 dom/media/webrtc/common/CommonTypes.h create mode 100644 dom/media/webrtc/common/EncodingConstraints.h create mode 100644 dom/media/webrtc/common/NullDeleter.h create mode 100644 dom/media/webrtc/common/NullTransport.h create mode 100644 dom/media/webrtc/common/Wrapper.h create mode 100644 dom/media/webrtc/common/YuvStamper.cpp create mode 100644 dom/media/webrtc/common/YuvStamper.h create mode 100644 dom/media/webrtc/common/browser_logging/CSFLog.cpp create mode 100644 dom/media/webrtc/common/browser_logging/CSFLog.h create mode 100644 dom/media/webrtc/common/browser_logging/WebRtcLog.cpp create mode 100644 dom/media/webrtc/common/browser_logging/WebRtcLog.h create mode 100644 dom/media/webrtc/common/csf_common.h create mode 100644 dom/media/webrtc/common/moz.build create mode 100644 dom/media/webrtc/common/time_profiling/timecard.c create mode 100644 dom/media/webrtc/common/time_profiling/timecard.h create mode 100644 dom/media/webrtc/jsapi/MediaTransportHandler.cpp create mode 100644 dom/media/webrtc/jsapi/MediaTransportHandler.h create mode 100644 dom/media/webrtc/jsapi/MediaTransportHandlerIPC.cpp create mode 100644 dom/media/webrtc/jsapi/MediaTransportHandlerIPC.h create mode 100644 dom/media/webrtc/jsapi/MediaTransportParent.cpp create mode 100644 dom/media/webrtc/jsapi/PacketDumper.cpp create mode 100644 dom/media/webrtc/jsapi/PacketDumper.h create mode 100644 dom/media/webrtc/jsapi/PeerConnectionCtx.cpp create mode 100644 dom/media/webrtc/jsapi/PeerConnectionCtx.h create mode 100644 dom/media/webrtc/jsapi/PeerConnectionImpl.cpp create mode 100644 dom/media/webrtc/jsapi/PeerConnectionImpl.h create mode 100644 dom/media/webrtc/jsapi/RTCDTMFSender.cpp create mode 100644 dom/media/webrtc/jsapi/RTCDTMFSender.h create mode 100644 dom/media/webrtc/jsapi/RTCDtlsTransport.cpp create mode 100644 dom/media/webrtc/jsapi/RTCDtlsTransport.h create mode 100644 dom/media/webrtc/jsapi/RTCEncodedAudioFrame.cpp create mode 100644 dom/media/webrtc/jsapi/RTCEncodedAudioFrame.h create mode 100644 dom/media/webrtc/jsapi/RTCEncodedFrameBase.cpp create mode 100644 dom/media/webrtc/jsapi/RTCEncodedFrameBase.h create mode 100644 dom/media/webrtc/jsapi/RTCEncodedVideoFrame.cpp create mode 100644 dom/media/webrtc/jsapi/RTCEncodedVideoFrame.h create mode 100644 dom/media/webrtc/jsapi/RTCRtpReceiver.cpp create mode 100644 dom/media/webrtc/jsapi/RTCRtpReceiver.h create mode 100644 dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp create mode 100644 dom/media/webrtc/jsapi/RTCRtpScriptTransform.h create mode 100644 dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp create mode 100644 dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h create mode 100644 dom/media/webrtc/jsapi/RTCRtpSender.cpp create mode 100644 dom/media/webrtc/jsapi/RTCRtpSender.h create mode 100644 dom/media/webrtc/jsapi/RTCRtpTransceiver.cpp create mode 100644 dom/media/webrtc/jsapi/RTCRtpTransceiver.h create mode 100644 dom/media/webrtc/jsapi/RTCSctpTransport.cpp create mode 100644 dom/media/webrtc/jsapi/RTCSctpTransport.h create mode 100644 dom/media/webrtc/jsapi/RTCStatsIdGenerator.cpp create mode 100644 dom/media/webrtc/jsapi/RTCStatsIdGenerator.h create mode 100644 dom/media/webrtc/jsapi/RTCStatsReport.cpp create mode 100644 dom/media/webrtc/jsapi/RTCStatsReport.h create mode 100644 dom/media/webrtc/jsapi/RTCTransformEventRunnable.cpp create mode 100644 dom/media/webrtc/jsapi/RTCTransformEventRunnable.h create mode 100644 dom/media/webrtc/jsapi/RemoteTrackSource.cpp create mode 100644 dom/media/webrtc/jsapi/RemoteTrackSource.h create mode 100644 dom/media/webrtc/jsapi/WebrtcGlobalChild.h create mode 100644 dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp create mode 100644 dom/media/webrtc/jsapi/WebrtcGlobalInformation.h create mode 100644 dom/media/webrtc/jsapi/WebrtcGlobalParent.h create mode 100644 dom/media/webrtc/jsapi/WebrtcGlobalStatsHistory.cpp create mode 100644 dom/media/webrtc/jsapi/WebrtcGlobalStatsHistory.h create mode 100644 dom/media/webrtc/jsapi/moz.build create mode 100644 dom/media/webrtc/jsep/JsepCodecDescription.h create mode 100644 dom/media/webrtc/jsep/JsepSession.h create mode 100644 dom/media/webrtc/jsep/JsepSessionImpl.cpp create mode 100644 dom/media/webrtc/jsep/JsepSessionImpl.h create mode 100644 dom/media/webrtc/jsep/JsepTrack.cpp create mode 100644 dom/media/webrtc/jsep/JsepTrack.h create mode 100644 dom/media/webrtc/jsep/JsepTrackEncoding.h create mode 100644 dom/media/webrtc/jsep/JsepTransceiver.h create mode 100644 dom/media/webrtc/jsep/JsepTransport.h create mode 100644 dom/media/webrtc/jsep/SsrcGenerator.cpp create mode 100644 dom/media/webrtc/jsep/SsrcGenerator.h create mode 100644 dom/media/webrtc/jsep/moz.build create mode 100644 dom/media/webrtc/libwebrtcglue/AudioConduit.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/AudioConduit.h create mode 100644 dom/media/webrtc/libwebrtcglue/CallWorkerThread.h create mode 100644 dom/media/webrtc/libwebrtcglue/CodecConfig.h create mode 100644 dom/media/webrtc/libwebrtcglue/FrameTransformer.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/FrameTransformer.h create mode 100644 dom/media/webrtc/libwebrtcglue/FrameTransformerProxy.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/FrameTransformerProxy.h create mode 100644 dom/media/webrtc/libwebrtcglue/GmpVideoCodec.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/GmpVideoCodec.h create mode 100644 dom/media/webrtc/libwebrtcglue/MediaConduitControl.h create mode 100644 dom/media/webrtc/libwebrtcglue/MediaConduitErrors.h create mode 100644 dom/media/webrtc/libwebrtcglue/MediaConduitInterface.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/MediaConduitInterface.h create mode 100644 dom/media/webrtc/libwebrtcglue/MediaDataCodec.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/MediaDataCodec.h create mode 100644 dom/media/webrtc/libwebrtcglue/RtpRtcpConfig.h create mode 100644 dom/media/webrtc/libwebrtcglue/RunningStat.h create mode 100644 dom/media/webrtc/libwebrtcglue/SystemTime.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/SystemTime.h create mode 100644 dom/media/webrtc/libwebrtcglue/TaskQueueWrapper.h create mode 100644 dom/media/webrtc/libwebrtcglue/VideoConduit.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/VideoConduit.h create mode 100644 dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/VideoStreamFactory.h create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.h create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcImageBuffer.h create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcMediaDataDecoderCodec.h create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcVideoCodecFactory.cpp create mode 100644 dom/media/webrtc/libwebrtcglue/WebrtcVideoCodecFactory.h create mode 100644 dom/media/webrtc/libwebrtcglue/moz.build create mode 100644 dom/media/webrtc/metrics.yaml create mode 100644 dom/media/webrtc/moz.build create mode 100644 dom/media/webrtc/sdp/HybridSdpParser.cpp create mode 100644 dom/media/webrtc/sdp/HybridSdpParser.h create mode 100644 dom/media/webrtc/sdp/ParsingResultComparer.cpp create mode 100644 dom/media/webrtc/sdp/ParsingResultComparer.h create mode 100644 dom/media/webrtc/sdp/RsdparsaSdp.cpp create mode 100644 dom/media/webrtc/sdp/RsdparsaSdp.h create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpAttributeList.cpp create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpAttributeList.h create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpGlue.cpp create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpGlue.h create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpInc.h create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpMediaSection.cpp create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpMediaSection.h create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpParser.cpp create mode 100644 dom/media/webrtc/sdp/RsdparsaSdpParser.h create mode 100644 dom/media/webrtc/sdp/Sdp.h create mode 100644 dom/media/webrtc/sdp/SdpAttribute.cpp create mode 100644 dom/media/webrtc/sdp/SdpAttribute.h create mode 100644 dom/media/webrtc/sdp/SdpAttributeList.h create mode 100644 dom/media/webrtc/sdp/SdpEnum.h create mode 100644 dom/media/webrtc/sdp/SdpHelper.cpp create mode 100644 dom/media/webrtc/sdp/SdpHelper.h create mode 100644 dom/media/webrtc/sdp/SdpLog.cpp create mode 100644 dom/media/webrtc/sdp/SdpLog.h create mode 100644 dom/media/webrtc/sdp/SdpMediaSection.cpp create mode 100644 dom/media/webrtc/sdp/SdpMediaSection.h create mode 100644 dom/media/webrtc/sdp/SdpParser.h create mode 100644 dom/media/webrtc/sdp/SdpPref.cpp create mode 100644 dom/media/webrtc/sdp/SdpPref.h create mode 100644 dom/media/webrtc/sdp/SdpTelemetry.cpp create mode 100644 dom/media/webrtc/sdp/SdpTelemetry.h create mode 100644 dom/media/webrtc/sdp/SipccSdp.cpp create mode 100644 dom/media/webrtc/sdp/SipccSdp.h create mode 100644 dom/media/webrtc/sdp/SipccSdpAttributeList.cpp create mode 100644 dom/media/webrtc/sdp/SipccSdpAttributeList.h create mode 100644 dom/media/webrtc/sdp/SipccSdpMediaSection.cpp create mode 100644 dom/media/webrtc/sdp/SipccSdpMediaSection.h create mode 100644 dom/media/webrtc/sdp/SipccSdpParser.cpp create mode 100644 dom/media/webrtc/sdp/SipccSdpParser.h create mode 100644 dom/media/webrtc/sdp/moz.build create mode 100644 dom/media/webrtc/sdp/rsdparsa_capi/Cargo.toml create mode 100644 dom/media/webrtc/sdp/rsdparsa_capi/src/attribute.rs create mode 100644 dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs create mode 100644 dom/media/webrtc/sdp/rsdparsa_capi/src/media_section.rs create mode 100644 dom/media/webrtc/sdp/rsdparsa_capi/src/network.rs create mode 100644 dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs create mode 100644 dom/media/webrtc/tests/crashtests/1770075.html create mode 100644 dom/media/webrtc/tests/crashtests/1789908.html create mode 100644 dom/media/webrtc/tests/crashtests/1799168.html create mode 100644 dom/media/webrtc/tests/crashtests/1816708.html create mode 100644 dom/media/webrtc/tests/crashtests/1821477.html create mode 100644 dom/media/webrtc/tests/crashtests/crashtests.list create mode 100644 dom/media/webrtc/tests/crashtests/getUserMedia-audio.html create mode 100644 dom/media/webrtc/tests/fuzztests/moz.build create mode 100644 dom/media/webrtc/tests/fuzztests/sdp_parser_libfuzz.cpp create mode 100644 dom/media/webrtc/tests/mochitests/NetworkPreparationChromeScript.js create mode 100644 dom/media/webrtc/tests/mochitests/addTurnsSelfsignedCert.js create mode 100644 dom/media/webrtc/tests/mochitests/blacksilence.js create mode 100644 dom/media/webrtc/tests/mochitests/dataChannel.js create mode 100644 dom/media/webrtc/tests/mochitests/head.js create mode 100644 dom/media/webrtc/tests/mochitests/helpers_from_wpt/sdp.js create mode 100644 dom/media/webrtc/tests/mochitests/iceTestUtils.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/identityPcTest.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-bad.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-min.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-http-trick.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-http-trick.js^headers^ create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-http.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-http.js^headers^ create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-https-double.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-https-double.js^headers^ create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-https-odd-path.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-https-odd-path.js^headers^ create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-https.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp-redirect-https.js^headers^ create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp.js create mode 100644 dom/media/webrtc/tests/mochitests/identity/idp.sjs create mode 100644 dom/media/webrtc/tests/mochitests/identity/login.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/mochitest.toml create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_fingerprints.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_getIdentityAssertion.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_idpproxy.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_loginNeeded.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_peerConnection_asymmetricIsolation.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_peerConnection_peerIdentity.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_setIdentityProvider.html create mode 100644 dom/media/webrtc/tests/mochitests/identity/test_setIdentityProviderWithErrors.html create mode 100644 dom/media/webrtc/tests/mochitests/mediaStreamPlayback.js create mode 100644 dom/media/webrtc/tests/mochitests/mochitest.toml create mode 100644 dom/media/webrtc/tests/mochitests/mochitest_datachannel.toml create mode 100644 dom/media/webrtc/tests/mochitests/mochitest_getusermedia.toml create mode 100644 dom/media/webrtc/tests/mochitests/mochitest_peerconnection.toml create mode 100644 dom/media/webrtc/tests/mochitests/network.js create mode 100644 dom/media/webrtc/tests/mochitests/nonTrickleIce.js create mode 100644 dom/media/webrtc/tests/mochitests/parser_rtp.js create mode 100644 dom/media/webrtc/tests/mochitests/pc.js create mode 100644 dom/media/webrtc/tests/mochitests/peerconnection_audio_forced_sample_rate.js create mode 100644 dom/media/webrtc/tests/mochitests/sdpUtils.js create mode 100644 dom/media/webrtc/tests/mochitests/simulcast.js create mode 100644 dom/media/webrtc/tests/mochitests/stats.js create mode 100644 dom/media/webrtc/tests/mochitests/templates.js create mode 100644 dom/media/webrtc/tests/mochitests/test_1488832.html create mode 100644 dom/media/webrtc/tests/mochitests/test_1717318.html create mode 100644 dom/media/webrtc/tests/mochitests/test_a_noOp.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_basicAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_basicAudioVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_basicAudioVideoCombined.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_basicAudioVideoNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_basicDataOnly.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_basicVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_bug1013809.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_dataOnlyBufferedAmountLow.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_dtlsVersions.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_hostnameObfuscation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_noOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_dataChannel_stats.html create mode 100644 dom/media/webrtc/tests/mochitests/test_defaultAudioConstraints.html create mode 100644 dom/media/webrtc/tests/mochitests/test_enumerateDevices.html create mode 100644 dom/media/webrtc/tests/mochitests/test_enumerateDevices_getUserMediaFake.html create mode 100644 dom/media/webrtc/tests/mochitests/test_enumerateDevices_iframe.html create mode 100644 dom/media/webrtc/tests/mochitests/test_enumerateDevices_iframe_pre_gum.html create mode 100644 dom/media/webrtc/tests/mochitests/test_enumerateDevices_legacy.html create mode 100644 dom/media/webrtc/tests/mochitests/test_enumerateDevices_navigation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_fingerprinting_resistance.html create mode 100644 dom/media/webrtc/tests/mochitests/test_forceSampleRate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_GC_MediaStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_active_autoplay.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_addTrackRemoveTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_addtrack_removetrack_events.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_audioCapture.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_audioConstraints.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_audioConstraints_concurrentIframes.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_audioConstraints_concurrentStreams.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicAudio_loopback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicScreenshare.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicTabshare.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicVideoAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicVideo_playAfterLoadedmetadata.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_basicWindowshare.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_bug1223696.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_callbacks.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_constraints.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_cubebDisabled.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_cubebDisabledFakeStreams.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_getTrackById.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_gumWithinGum.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_loadedmetadata.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_mediaElementCapture_audio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_mediaElementCapture_tracks.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_mediaElementCapture_video.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_mediaStreamClone.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_mediaStreamConstructors.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_mediaStreamTrackClone.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_nonDefaultRate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_peerIdentity.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_permission.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_permission_iframe.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_playAudioTwice.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_playVideoAudioTwice.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_playVideoTwice.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_scarySources.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_spinEventLoop.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_trackCloneCleanup.html create mode 100644 dom/media/webrtc/tests/mochitests/test_getUserMedia_trackEnded.html create mode 100644 dom/media/webrtc/tests/mochitests/test_groupId.html create mode 100644 dom/media/webrtc/tests/mochitests/test_multi_mics.html create mode 100644 dom/media/webrtc/tests/mochitests/test_ondevicechange.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addAudioTrackToExistingVideoStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addDataChannel.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addDataChannelNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addSecondAudioStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addSecondAudioStreamNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addSecondVideoStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addSecondVideoStreamNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_addtrack_removetrack_events.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_answererAddSecondAudioStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_audioChannels.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_audioCodecs.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_audioContributingSources.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_audioRenegotiationInactiveAnswer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_audioSynchronizationSources.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_audioSynchronizationSourcesUnidirectional.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioDynamicPtMissingRtpmap.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNATRelay.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNATRelayTCP.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNATRelayTCPWithStun300.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNATRelayTLS.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNATRelayWithStun300.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNATSrflx.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioNoisyUDPBlock.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioPcmaPcmuOnly.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioRelayPolicy.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioRequireEOC.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVerifyRtpHeaderExtensions.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoCombined.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoNoBundleNoRtcpMux.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoNoRtcpMux.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoTransceivers.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoVerifyExtmap.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoVerifyExtmapSendonly.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudioVideoVerifyTooLongMidFails.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudio_forced_higher_rate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicAudio_forced_lower_rate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicH264Video.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicScreenshare.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicVideoVerifyRtpHeaderExtensions.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_basicWindowshare.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug1013809.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug1042791.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug1227781.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug1512281.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug1773067.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug822674.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug825703.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug827843.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_bug834153.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_callbacks.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_captureStream_canvas_2d.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_captureStream_canvas_2d_noSSRC.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_captureStream_canvas_webgl.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_capturedVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_certificates.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_checkPacketDumpHook.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_close.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_closeDuringIce.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_codecNegotiationFailure.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_constructedStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_disabledVideoPreNegotiation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_encodingsNegotiation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_errorCallbacks.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_extmapRenegotiation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_forwarding_basicAudioVideoCombined.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_gatherWithSetConfiguration.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_gatherWithStun300.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_gatherWithStun300IPv6.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_glean.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_iceFailure.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_insertDTMF.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_localReofferRollback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_localRollback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_maxFsConstraint.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_multiple_captureStream_canvas_2d.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_noTrickleAnswer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_noTrickleOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_noTrickleOfferAnswer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_nonDefaultRate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_offerRequiresReceiveAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_offerRequiresReceiveVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_offerRequiresReceiveVideoAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_portRestrictions.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_promiseSendOnly.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_recordReceiveTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_relayOnly.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_remoteReofferRollback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_remoteRollback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_removeAudioTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_removeThenAddAudioTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_removeThenAddAudioTrackNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_removeThenAddVideoTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_removeThenAddVideoTrackNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_removeVideoTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_renderAfterRenegotiation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceNullTrackThenRenegotiateAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceNullTrackThenRenegotiateVideo.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceTrack_camera.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceTrack_disabled.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceTrack_microphone.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_replaceVideoThenRenegotiate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIce.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceBadAnswer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceLocalAndRemoteRollback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceLocalAndRemoteRollbackNoSubsequentRestart.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceLocalRollback.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceLocalRollbackNoSubsequentRestart.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceNoBundleNoRtcpMux.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restartIceNoRtcpMux.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restrictBandwidthTargetBitrate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_restrictBandwidthWithTias.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_rtcp_rsize.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_scaleResolution.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_scaleResolution_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_sender_and_receiver_stats.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setLocalAnswerInHaveLocalOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setLocalAnswerInStable.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setLocalOfferInHaveRemoteOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setParameters_maxFramerate.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setParameters_maxFramerate_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setParameters_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setParameters_scaleResolutionDownBy.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setParameters_scaleResolutionDownBy_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setRemoteAnswerInHaveRemoteOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setRemoteAnswerInStable.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_setRemoteOfferInHaveLocalOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_sillyCodecPriorities.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastAnswer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastAnswer_lowResFirst.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastAnswer_lowResFirst_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastAnswer_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastOddResolution.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastOddResolution_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastOffer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastOffer_lowResFirst.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastOffer_lowResFirst_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_simulcastOffer_oldSetParameters.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_stats.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_stats_jitter.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_stats_oneway.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_stats_relayProtocol.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_stereoFmtpPref.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_syncSetDescription.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_telephoneEventFirst.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_threeUnbundledConnections.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_throwInCallbacks.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_trackDisabling.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_trackDisabling_clones.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_trackless_sender_stats.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoAudioStreams.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoAudioTracksInOneStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoAudioVideoStreams.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoAudioVideoStreamsCombined.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoAudioVideoStreamsCombinedNoBundle.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoVideoStreams.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_twoVideoTracksInOneStream.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_verifyAudioAfterRenegotiation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_verifyDescriptions.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_verifyVideoAfterRenegotiation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_videoCodecs.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_videoRenegotiationInactiveAnswer.html create mode 100644 dom/media/webrtc/tests/mochitests/test_peerConnection_webAudio.html create mode 100644 dom/media/webrtc/tests/mochitests/test_selftest.html create mode 100644 dom/media/webrtc/tests/mochitests/test_setSinkId-echoCancellation.html create mode 100644 dom/media/webrtc/tests/mochitests/test_setSinkId-stream-source.html create mode 100644 dom/media/webrtc/tests/mochitests/test_setSinkId.html create mode 100644 dom/media/webrtc/tests/mochitests/test_setSinkId_default_addTrack.html create mode 100644 dom/media/webrtc/tests/mochitests/test_setSinkId_preMutedElement.html create mode 100644 dom/media/webrtc/tests/mochitests/test_unfocused_pref.html create mode 100644 dom/media/webrtc/tests/mochitests/turnConfig.js create mode 100644 dom/media/webrtc/third_party_build/README.md create mode 100644 dom/media/webrtc/third_party_build/build_no_op_commits.sh create mode 100644 dom/media/webrtc/third_party_build/cherry_pick_commit.py create mode 100644 dom/media/webrtc/third_party_build/commit-build-file-changes.sh create mode 100644 dom/media/webrtc/third_party_build/default_config_env create mode 100644 dom/media/webrtc/third_party_build/default_mozconfig create mode 100644 dom/media/webrtc/third_party_build/detect_upstream_revert.sh create mode 100644 dom/media/webrtc/third_party_build/elm_arcconfig.patch create mode 100644 dom/media/webrtc/third_party_build/elm_rebase.sh create mode 100644 dom/media/webrtc/third_party_build/extract-for-git.py create mode 100644 dom/media/webrtc/third_party_build/fast-forward-libwebrtc.sh create mode 100644 dom/media/webrtc/third_party_build/fetch_github_repo.py create mode 100644 dom/media/webrtc/third_party_build/filter_git_changes.py create mode 100644 dom/media/webrtc/third_party_build/gn-configs/README.md create mode 100644 dom/media/webrtc/third_party_build/gn-configs/webrtc.json create mode 100644 dom/media/webrtc/third_party_build/lookup_branch_head.py create mode 100644 dom/media/webrtc/third_party_build/loop-ff.sh create mode 100755 dom/media/webrtc/third_party_build/make_upstream_revert_noop.sh create mode 100644 dom/media/webrtc/third_party_build/pre-warmed-milestone.cache create mode 100644 dom/media/webrtc/third_party_build/prep_repo.sh create mode 100644 dom/media/webrtc/third_party_build/push_official_branch.sh create mode 100644 dom/media/webrtc/third_party_build/restore_elm_arcconfig.py create mode 100644 dom/media/webrtc/third_party_build/restore_patch_stack.py create mode 100644 dom/media/webrtc/third_party_build/run_operations.py create mode 100644 dom/media/webrtc/third_party_build/save_patch_stack.py create mode 100644 dom/media/webrtc/third_party_build/update_default_config.sh create mode 100644 dom/media/webrtc/third_party_build/use_config_env.sh create mode 100644 dom/media/webrtc/third_party_build/vendor-libwebrtc.py create mode 100644 dom/media/webrtc/third_party_build/vendor_and_commit.py create mode 100644 dom/media/webrtc/third_party_build/verify_vendoring.sh create mode 100644 dom/media/webrtc/third_party_build/webrtc.mozbuild create mode 100644 dom/media/webrtc/third_party_build/write_default_config.py create mode 100644 dom/media/webrtc/transport/README create mode 100644 dom/media/webrtc/transport/SrtpFlow.cpp create mode 100644 dom/media/webrtc/transport/SrtpFlow.h create mode 100644 dom/media/webrtc/transport/WebrtcTCPSocketWrapper.cpp create mode 100644 dom/media/webrtc/transport/WebrtcTCPSocketWrapper.h create mode 100644 dom/media/webrtc/transport/build/moz.build create mode 100644 dom/media/webrtc/transport/common.build create mode 100644 dom/media/webrtc/transport/dtlsidentity.cpp create mode 100644 dom/media/webrtc/transport/dtlsidentity.h create mode 100644 dom/media/webrtc/transport/fuzztest/moz.build create mode 100644 dom/media/webrtc/transport/fuzztest/stun_parser_libfuzz.cpp create mode 100644 dom/media/webrtc/transport/ipc/NrIceStunAddrMessageUtils.h create mode 100644 dom/media/webrtc/transport/ipc/PStunAddrsParams.h create mode 100644 dom/media/webrtc/transport/ipc/PStunAddrsRequest.ipdl create mode 100644 dom/media/webrtc/transport/ipc/PWebrtcTCPSocket.ipdl create mode 100644 dom/media/webrtc/transport/ipc/StunAddrsRequestChild.cpp create mode 100644 dom/media/webrtc/transport/ipc/StunAddrsRequestChild.h create mode 100644 dom/media/webrtc/transport/ipc/StunAddrsRequestParent.cpp create mode 100644 dom/media/webrtc/transport/ipc/StunAddrsRequestParent.h create mode 100644 dom/media/webrtc/transport/ipc/WebrtcProxyConfig.ipdlh create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocket.cpp create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocket.h create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketCallback.h create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketChild.cpp create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketChild.h create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketLog.cpp create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketLog.h create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketParent.cpp create mode 100644 dom/media/webrtc/transport/ipc/WebrtcTCPSocketParent.h create mode 100644 dom/media/webrtc/transport/ipc/moz.build create mode 100644 dom/media/webrtc/transport/logging.h create mode 100644 dom/media/webrtc/transport/m_cpp_utils.h create mode 100644 dom/media/webrtc/transport/mdns_service/Cargo.toml create mode 100644 dom/media/webrtc/transport/mdns_service/mdns_service.h create mode 100644 dom/media/webrtc/transport/mdns_service/src/lib.rs create mode 100644 dom/media/webrtc/transport/mediapacket.cpp create mode 100644 dom/media/webrtc/transport/mediapacket.h create mode 100644 dom/media/webrtc/transport/moz.build create mode 100644 dom/media/webrtc/transport/nr_socket_proxy_config.cpp create mode 100644 dom/media/webrtc/transport/nr_socket_proxy_config.h create mode 100644 dom/media/webrtc/transport/nr_socket_prsock.cpp create mode 100644 dom/media/webrtc/transport/nr_socket_prsock.h create mode 100644 dom/media/webrtc/transport/nr_socket_tcp.cpp create mode 100644 dom/media/webrtc/transport/nr_socket_tcp.h create mode 100644 dom/media/webrtc/transport/nr_timer.cpp create mode 100644 dom/media/webrtc/transport/nricectx.cpp create mode 100644 dom/media/webrtc/transport/nricectx.h create mode 100644 dom/media/webrtc/transport/nricemediastream.cpp create mode 100644 dom/media/webrtc/transport/nricemediastream.h create mode 100644 dom/media/webrtc/transport/nriceresolver.cpp create mode 100644 dom/media/webrtc/transport/nriceresolver.h create mode 100644 dom/media/webrtc/transport/nriceresolverfake.cpp create mode 100644 dom/media/webrtc/transport/nriceresolverfake.h create mode 100644 dom/media/webrtc/transport/nricestunaddr.cpp create mode 100644 dom/media/webrtc/transport/nricestunaddr.h create mode 100644 dom/media/webrtc/transport/nrinterfaceprioritizer.cpp create mode 100644 dom/media/webrtc/transport/nrinterfaceprioritizer.h create mode 100644 dom/media/webrtc/transport/rlogconnector.cpp create mode 100644 dom/media/webrtc/transport/rlogconnector.h create mode 100644 dom/media/webrtc/transport/runnable_utils.h create mode 100644 dom/media/webrtc/transport/sigslot.h create mode 100644 dom/media/webrtc/transport/simpletokenbucket.cpp create mode 100644 dom/media/webrtc/transport/simpletokenbucket.h create mode 100644 dom/media/webrtc/transport/srtp/README_MOZILLA create mode 100644 dom/media/webrtc/transport/srtp/moz.build create mode 100644 dom/media/webrtc/transport/stun_socket_filter.cpp create mode 100644 dom/media/webrtc/transport/stun_socket_filter.h create mode 100644 dom/media/webrtc/transport/test/TestSyncRunnable.cpp create mode 100644 dom/media/webrtc/transport/test/buffered_stun_socket_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/dummysocket.h create mode 100644 dom/media/webrtc/transport/test/gtest_ringbuffer_dumper.h create mode 100644 dom/media/webrtc/transport/test/gtest_utils.h create mode 100644 dom/media/webrtc/transport/test/ice_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/moz.build create mode 100644 dom/media/webrtc/transport/test/mtransport_test_utils.h create mode 100644 dom/media/webrtc/transport/test/multi_tcp_socket_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/nrappkit_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/proxy_tunnel_socket_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/rlogconnector_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/runnable_utils_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/sctp_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/simpletokenbucket_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/sockettransportservice_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/stunserver.cpp create mode 100644 dom/media/webrtc/transport/test/stunserver.h create mode 100644 dom/media/webrtc/transport/test/test_nr_socket_ice_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/test_nr_socket_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/transport_unittests.cpp create mode 100644 dom/media/webrtc/transport/test/turn_unittest.cpp create mode 100644 dom/media/webrtc/transport/test/webrtcproxychannel_unittest.cpp create mode 100644 dom/media/webrtc/transport/test_nr_socket.cpp create mode 100644 dom/media/webrtc/transport/test_nr_socket.h create mode 100644 dom/media/webrtc/transport/third_party/moz.build create mode 100644 dom/media/webrtc/transport/third_party/nICEr/COPYRIGHT create mode 100644 dom/media/webrtc/transport/third_party/nICEr/README create mode 100644 dom/media/webrtc/transport/third_party/nICEr/moz.yaml create mode 100644 dom/media/webrtc/transport/third_party/nICEr/nicer.gyp create mode 100644 dom/media/webrtc/transport/third_party/nICEr/non-unified-build.patch create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/crypto/nr_crypto.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/crypto/nr_crypto.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_candidate.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_candidate.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_candidate_pair.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_candidate_pair.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_codeword.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_component.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_component.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_ctx.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_ctx.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_handler.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_media_stream.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_media_stream.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_parser.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_peer_ctx.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_peer_ctx.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_reg.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_socket.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/ice/ice_socket.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/local_addr.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/local_addr.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_interface_prioritizer.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_interface_prioritizer.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_resolver.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_resolver.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket_local.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket_multi_tcp.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket_multi_tcp.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket_wrapper.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/nr_socket_wrapper.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/transport_addr.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/transport_addr.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/transport_addr_reg.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/net/transport_addr_reg.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs-bsd.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs-bsd.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs-netlink.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs-netlink.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs-win32.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs-win32.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/nr_socket_buffered_stun.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/nr_socket_buffered_stun.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/nr_socket_turn.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/nr_socket_turn.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_build.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_build.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_client_ctx.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_client_ctx.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_codec.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_codec.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_hint.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_hint.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_msg.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_msg.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_proc.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_proc.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_reg.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_server_ctx.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_server_ctx.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_util.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/stun_util.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/turn_client_ctx.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/stun/turn_client_ctx.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/util/cb_args.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/util/cb_args.h create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/util/ice_util.c create mode 100644 dom/media/webrtc/transport/third_party/nICEr/src/util/ice_util.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/COPYRIGHT create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/README create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/README_MOZILLA create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/nrappkit.gyp create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/event/async_timer.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/event/async_wait.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/event/async_wait_int.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/plugin/nr_plugin.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/android/include/android_funcs.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/android/include/csi_platform.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/android/include/sys/ttycom.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/android/port-impl.mk create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/darwin/include/csi_platform.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/generic/include/sys/queue.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/linux/include/csi_platform.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/linux/include/linux_funcs.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/linux/include/sys/ttycom.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/linux/port-impl.mk create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/port/win32/include/csi_platform.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/c2ru.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/c2ru.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_int.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_local.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_vtbl.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/registry/registrycb.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/share/nr_api.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/share/nr_common.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/share/nr_reg_keys.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/stats/nrstats.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/byteorder.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/byteorder.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/hex.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/hex.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/assoc.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/debug.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/debug.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_common.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_defaults.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_errors.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_errors.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_includes.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_macros.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_replace.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_thread.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_time.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_time.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_types.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.h create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/util.c create mode 100644 dom/media/webrtc/transport/third_party/nrappkit/src/util/util.h create mode 100644 dom/media/webrtc/transport/transportflow.cpp create mode 100644 dom/media/webrtc/transport/transportflow.h create mode 100644 dom/media/webrtc/transport/transportlayer.cpp create mode 100644 dom/media/webrtc/transport/transportlayer.h create mode 100644 dom/media/webrtc/transport/transportlayerdtls.cpp create mode 100644 dom/media/webrtc/transport/transportlayerdtls.h create mode 100644 dom/media/webrtc/transport/transportlayerice.cpp create mode 100644 dom/media/webrtc/transport/transportlayerice.h create mode 100644 dom/media/webrtc/transport/transportlayerlog.cpp create mode 100644 dom/media/webrtc/transport/transportlayerlog.h create mode 100644 dom/media/webrtc/transport/transportlayerloopback.cpp create mode 100644 dom/media/webrtc/transport/transportlayerloopback.h create mode 100644 dom/media/webrtc/transport/transportlayersrtp.cpp create mode 100644 dom/media/webrtc/transport/transportlayersrtp.h create mode 100644 dom/media/webrtc/transportbridge/MediaPipeline.cpp create mode 100644 dom/media/webrtc/transportbridge/MediaPipeline.h create mode 100644 dom/media/webrtc/transportbridge/MediaPipelineFilter.cpp create mode 100644 dom/media/webrtc/transportbridge/MediaPipelineFilter.h create mode 100644 dom/media/webrtc/transportbridge/RtpLogger.cpp create mode 100644 dom/media/webrtc/transportbridge/RtpLogger.h create mode 100644 dom/media/webrtc/transportbridge/moz.build create mode 100644 dom/media/webspeech/moz.build create mode 100644 dom/media/webspeech/recognition/OnlineSpeechRecognitionService.cpp create mode 100644 dom/media/webspeech/recognition/OnlineSpeechRecognitionService.h create mode 100644 dom/media/webspeech/recognition/SpeechGrammar.cpp create mode 100644 dom/media/webspeech/recognition/SpeechGrammar.h create mode 100644 dom/media/webspeech/recognition/SpeechGrammarList.cpp create mode 100644 dom/media/webspeech/recognition/SpeechGrammarList.h create mode 100644 dom/media/webspeech/recognition/SpeechRecognition.cpp create mode 100644 dom/media/webspeech/recognition/SpeechRecognition.h create mode 100644 dom/media/webspeech/recognition/SpeechRecognitionAlternative.cpp create mode 100644 dom/media/webspeech/recognition/SpeechRecognitionAlternative.h create mode 100644 dom/media/webspeech/recognition/SpeechRecognitionResult.cpp create mode 100644 dom/media/webspeech/recognition/SpeechRecognitionResult.h create mode 100644 dom/media/webspeech/recognition/SpeechRecognitionResultList.cpp create mode 100644 dom/media/webspeech/recognition/SpeechRecognitionResultList.h create mode 100644 dom/media/webspeech/recognition/SpeechTrackListener.cpp create mode 100644 dom/media/webspeech/recognition/SpeechTrackListener.h create mode 100644 dom/media/webspeech/recognition/endpointer.cc create mode 100644 dom/media/webspeech/recognition/endpointer.h create mode 100644 dom/media/webspeech/recognition/energy_endpointer.cc create mode 100644 dom/media/webspeech/recognition/energy_endpointer.h create mode 100644 dom/media/webspeech/recognition/energy_endpointer_params.cc create mode 100644 dom/media/webspeech/recognition/energy_endpointer_params.h create mode 100644 dom/media/webspeech/recognition/moz.build create mode 100644 dom/media/webspeech/recognition/nsISpeechRecognitionService.idl create mode 100644 dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp create mode 100644 dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.h create mode 100644 dom/media/webspeech/recognition/test/head.js create mode 100644 dom/media/webspeech/recognition/test/hello.ogg create mode 100644 dom/media/webspeech/recognition/test/hello.ogg^headers^ create mode 100644 dom/media/webspeech/recognition/test/http_requesthandler.sjs create mode 100644 dom/media/webspeech/recognition/test/mochitest.toml create mode 100644 dom/media/webspeech/recognition/test/silence.ogg create mode 100644 dom/media/webspeech/recognition/test/silence.ogg^headers^ create mode 100644 dom/media/webspeech/recognition/test/sinoid+hello.ogg create mode 100644 dom/media/webspeech/recognition/test/sinoid+hello.ogg^headers^ create mode 100644 dom/media/webspeech/recognition/test/test_abort.html create mode 100644 dom/media/webspeech/recognition/test/test_audio_capture_error.html create mode 100644 dom/media/webspeech/recognition/test/test_call_start_from_end_handler.html create mode 100644 dom/media/webspeech/recognition/test/test_nested_eventloop.html create mode 100644 dom/media/webspeech/recognition/test/test_online_400_response.html create mode 100644 dom/media/webspeech/recognition/test/test_online_empty_result_handling.html create mode 100644 dom/media/webspeech/recognition/test/test_online_hangup.html create mode 100644 dom/media/webspeech/recognition/test/test_online_http.html create mode 100644 dom/media/webspeech/recognition/test/test_online_http_webkit.html create mode 100644 dom/media/webspeech/recognition/test/test_online_malformed_result_handling.html create mode 100644 dom/media/webspeech/recognition/test/test_preference_enable.html create mode 100644 dom/media/webspeech/recognition/test/test_recognition_service_error.html create mode 100644 dom/media/webspeech/recognition/test/test_success_without_recognition_service.html create mode 100644 dom/media/webspeech/recognition/test/test_timeout.html create mode 100644 dom/media/webspeech/synth/SpeechSynthesis.cpp create mode 100644 dom/media/webspeech/synth/SpeechSynthesis.h create mode 100644 dom/media/webspeech/synth/SpeechSynthesisUtterance.cpp create mode 100644 dom/media/webspeech/synth/SpeechSynthesisUtterance.h create mode 100644 dom/media/webspeech/synth/SpeechSynthesisVoice.cpp create mode 100644 dom/media/webspeech/synth/SpeechSynthesisVoice.h create mode 100644 dom/media/webspeech/synth/android/SpeechSynthesisService.cpp create mode 100644 dom/media/webspeech/synth/android/SpeechSynthesisService.h create mode 100644 dom/media/webspeech/synth/android/components.conf create mode 100644 dom/media/webspeech/synth/android/moz.build create mode 100644 dom/media/webspeech/synth/cocoa/OSXSpeechSynthesizerService.h create mode 100644 dom/media/webspeech/synth/cocoa/OSXSpeechSynthesizerService.mm create mode 100644 dom/media/webspeech/synth/cocoa/components.conf create mode 100644 dom/media/webspeech/synth/cocoa/moz.build create mode 100644 dom/media/webspeech/synth/crashtests/1230428.html create mode 100644 dom/media/webspeech/synth/crashtests/crashtests.list create mode 100644 dom/media/webspeech/synth/ipc/PSpeechSynthesis.ipdl create mode 100644 dom/media/webspeech/synth/ipc/PSpeechSynthesisRequest.ipdl create mode 100644 dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp create mode 100644 dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h create mode 100644 dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp create mode 100644 dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h create mode 100644 dom/media/webspeech/synth/moz.build create mode 100644 dom/media/webspeech/synth/nsISpeechService.idl create mode 100644 dom/media/webspeech/synth/nsISynthVoiceRegistry.idl create mode 100644 dom/media/webspeech/synth/nsSpeechTask.cpp create mode 100644 dom/media/webspeech/synth/nsSpeechTask.h create mode 100644 dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp create mode 100644 dom/media/webspeech/synth/nsSynthVoiceRegistry.h create mode 100644 dom/media/webspeech/synth/speechd/SpeechDispatcherService.cpp create mode 100644 dom/media/webspeech/synth/speechd/SpeechDispatcherService.h create mode 100644 dom/media/webspeech/synth/speechd/components.conf create mode 100644 dom/media/webspeech/synth/speechd/moz.build create mode 100644 dom/media/webspeech/synth/test/common.js create mode 100644 dom/media/webspeech/synth/test/components.conf create mode 100644 dom/media/webspeech/synth/test/file_bfcache_page1.html create mode 100644 dom/media/webspeech/synth/test/file_bfcache_page2.html create mode 100644 dom/media/webspeech/synth/test/file_global_queue.html create mode 100644 dom/media/webspeech/synth/test/file_global_queue_cancel.html create mode 100644 dom/media/webspeech/synth/test/file_global_queue_pause.html create mode 100644 dom/media/webspeech/synth/test/file_indirect_service_events.html create mode 100644 dom/media/webspeech/synth/test/file_setup.html create mode 100644 dom/media/webspeech/synth/test/file_speech_cancel.html create mode 100644 dom/media/webspeech/synth/test/file_speech_error.html create mode 100644 dom/media/webspeech/synth/test/file_speech_queue.html create mode 100644 dom/media/webspeech/synth/test/file_speech_repeating_utterance.html create mode 100644 dom/media/webspeech/synth/test/file_speech_simple.html create mode 100644 dom/media/webspeech/synth/test/mochitest.toml create mode 100644 dom/media/webspeech/synth/test/nsFakeSynthServices.cpp create mode 100644 dom/media/webspeech/synth/test/nsFakeSynthServices.h create mode 100644 dom/media/webspeech/synth/test/startup/file_voiceschanged.html create mode 100644 dom/media/webspeech/synth/test/startup/mochitest.toml create mode 100644 dom/media/webspeech/synth/test/startup/test_voiceschanged.html create mode 100644 dom/media/webspeech/synth/test/test_bfcache.html create mode 100644 dom/media/webspeech/synth/test/test_global_queue.html create mode 100644 dom/media/webspeech/synth/test/test_global_queue_cancel.html create mode 100644 dom/media/webspeech/synth/test/test_global_queue_pause.html create mode 100644 dom/media/webspeech/synth/test/test_indirect_service_events.html create mode 100644 dom/media/webspeech/synth/test/test_setup.html create mode 100644 dom/media/webspeech/synth/test/test_speech_cancel.html create mode 100644 dom/media/webspeech/synth/test/test_speech_error.html create mode 100644 dom/media/webspeech/synth/test/test_speech_queue.html create mode 100644 dom/media/webspeech/synth/test/test_speech_repeating_utterance.html create mode 100644 dom/media/webspeech/synth/test/test_speech_simple.html create mode 100644 dom/media/webspeech/synth/windows/SapiService.cpp create mode 100644 dom/media/webspeech/synth/windows/SapiService.h create mode 100644 dom/media/webspeech/synth/windows/components.conf create mode 100644 dom/media/webspeech/synth/windows/moz.build create mode 100644 dom/media/webvtt/TextTrack.cpp create mode 100644 dom/media/webvtt/TextTrack.h create mode 100644 dom/media/webvtt/TextTrackCue.cpp create mode 100644 dom/media/webvtt/TextTrackCue.h create mode 100644 dom/media/webvtt/TextTrackCueList.cpp create mode 100644 dom/media/webvtt/TextTrackCueList.h create mode 100644 dom/media/webvtt/TextTrackList.cpp create mode 100644 dom/media/webvtt/TextTrackList.h create mode 100644 dom/media/webvtt/TextTrackRegion.cpp create mode 100644 dom/media/webvtt/TextTrackRegion.h create mode 100644 dom/media/webvtt/WebVTTListener.cpp create mode 100644 dom/media/webvtt/WebVTTListener.h create mode 100644 dom/media/webvtt/WebVTTParserWrapper.sys.mjs create mode 100644 dom/media/webvtt/components.conf create mode 100644 dom/media/webvtt/moz.build create mode 100644 dom/media/webvtt/nsIWebVTTListener.idl create mode 100644 dom/media/webvtt/nsIWebVTTParserWrapper.idl create mode 100644 dom/media/webvtt/package.json create mode 100644 dom/media/webvtt/test/crashtests/1304948.html create mode 100644 dom/media/webvtt/test/crashtests/1319486.html create mode 100644 dom/media/webvtt/test/crashtests/1533909.html create mode 100644 dom/media/webvtt/test/crashtests/882549.html create mode 100644 dom/media/webvtt/test/crashtests/894104.html create mode 100644 dom/media/webvtt/test/crashtests/crashtests.list create mode 100644 dom/media/webvtt/test/mochitest/bad-signature.vtt create mode 100644 dom/media/webvtt/test/mochitest/basic.vtt create mode 100644 dom/media/webvtt/test/mochitest/bug883173.vtt create mode 100644 dom/media/webvtt/test/mochitest/long.vtt create mode 100644 dom/media/webvtt/test/mochitest/manifest.js create mode 100644 dom/media/webvtt/test/mochitest/mochitest.toml create mode 100644 dom/media/webvtt/test/mochitest/parser.vtt create mode 100644 dom/media/webvtt/test/mochitest/region.vtt create mode 100644 dom/media/webvtt/test/mochitest/sequential.vtt create mode 100644 dom/media/webvtt/test/mochitest/test_bug1018933.html create mode 100644 dom/media/webvtt/test/mochitest/test_bug1242594.html create mode 100644 dom/media/webvtt/test/mochitest/test_bug883173.html create mode 100644 dom/media/webvtt/test/mochitest/test_bug895091.html create mode 100644 dom/media/webvtt/test/mochitest/test_bug957847.html create mode 100644 dom/media/webvtt/test/mochitest/test_testtrack_cors_no_response.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrack.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrack_cors_preload_none.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrack_mode_change_during_loading.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrack_moz.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrackcue.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrackcue_moz.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrackevents_video.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttracklist.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttracklist_moz.html create mode 100644 dom/media/webvtt/test/mochitest/test_texttrackregion.html create mode 100644 dom/media/webvtt/test/mochitest/test_trackelementevent.html create mode 100644 dom/media/webvtt/test/mochitest/test_trackelementsrc.html create mode 100644 dom/media/webvtt/test/mochitest/test_trackevent.html create mode 100644 dom/media/webvtt/test/mochitest/test_vttparser.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_empty_displaystate.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_event_same_time.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_infinite_processing_loop.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_overlapping_time.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_positionalign.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_seeking.html create mode 100644 dom/media/webvtt/test/mochitest/test_webvtt_update_display_after_adding_or_removing_cue.html create mode 100644 dom/media/webvtt/test/mochitest/vttPositionAlign.vtt create mode 100644 dom/media/webvtt/test/reftest/black.mp4 create mode 100644 dom/media/webvtt/test/reftest/cues_time_overlapping.webvtt create mode 100644 dom/media/webvtt/test/reftest/reftest.list create mode 100644 dom/media/webvtt/test/reftest/vtt_overlapping_time-ref.html create mode 100644 dom/media/webvtt/test/reftest/vtt_overlapping_time.html create mode 100644 dom/media/webvtt/test/reftest/vtt_reflow_display-ref.html create mode 100644 dom/media/webvtt/test/reftest/vtt_reflow_display.css create mode 100644 dom/media/webvtt/test/reftest/vtt_reflow_display.html create mode 100644 dom/media/webvtt/test/reftest/vtt_update_display_after_removed_cue.html create mode 100644 dom/media/webvtt/test/reftest/vtt_update_display_after_removed_cue_ref.html create mode 100644 dom/media/webvtt/test/reftest/white.webm create mode 100644 dom/media/webvtt/test/xpcshell/test_parser.js create mode 100644 dom/media/webvtt/test/xpcshell/xpcshell.toml create mode 100644 dom/media/webvtt/update-webvtt.js create mode 100644 dom/media/webvtt/vtt.sys.mjs (limited to 'dom/media') diff --git a/dom/media/ADTSDecoder.cpp b/dom/media/ADTSDecoder.cpp new file mode 100644 index 0000000000..4df1cb1885 --- /dev/null +++ b/dom/media/ADTSDecoder.cpp @@ -0,0 +1,46 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ADTSDecoder.h" +#include "MediaContainerType.h" +#include "PDMFactory.h" + +namespace mozilla { + +/* static */ +bool ADTSDecoder::IsEnabled() { + RefPtr platform = new PDMFactory(); + return !platform->SupportsMimeType("audio/mp4a-latm"_ns).isEmpty(); +} + +/* static */ +bool ADTSDecoder::IsSupportedType(const MediaContainerType& aContainerType) { + if (aContainerType.Type() == MEDIAMIMETYPE("audio/aac") || + aContainerType.Type() == MEDIAMIMETYPE("audio/aacp") || + aContainerType.Type() == MEDIAMIMETYPE("audio/x-aac")) { + return IsEnabled() && (aContainerType.ExtendedType().Codecs().IsEmpty() || + aContainerType.ExtendedType().Codecs() == "aac"); + } + + return false; +} + +/* static */ +nsTArray> ADTSDecoder::GetTracksInfo( + const MediaContainerType& aType) { + nsTArray> tracks; + if (!IsSupportedType(aType)) { + return tracks; + } + + tracks.AppendElement( + CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters( + "audio/mp4a-latm"_ns, aType)); + + return tracks; +} + +} // namespace mozilla diff --git a/dom/media/ADTSDecoder.h b/dom/media/ADTSDecoder.h new file mode 100644 index 0000000000..0260c52de1 --- /dev/null +++ b/dom/media/ADTSDecoder.h @@ -0,0 +1,30 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ADTS_DECODER_H_ +#define ADTS_DECODER_H_ + +#include "mozilla/UniquePtr.h" +#include "nsTArray.h" + +namespace mozilla { + +class MediaContainerType; +class TrackInfo; + +class ADTSDecoder { + public: + // Returns true if the ADTS backend is pref'ed on, and we're running on a + // platform that is likely to have decoders for the format. + static bool IsEnabled(); + static bool IsSupportedType(const MediaContainerType& aContainerType); + static nsTArray> GetTracksInfo( + const MediaContainerType& aType); +}; + +} // namespace mozilla + +#endif // !ADTS_DECODER_H_ diff --git a/dom/media/ADTSDemuxer.cpp b/dom/media/ADTSDemuxer.cpp new file mode 100644 index 0000000000..29ea270461 --- /dev/null +++ b/dom/media/ADTSDemuxer.cpp @@ -0,0 +1,818 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ADTSDemuxer.h" + +#include "TimeUnits.h" +#include "VideoUtils.h" +#include "mozilla/Logging.h" +#include "mozilla/UniquePtr.h" +#include + +extern mozilla::LazyLogModule gMediaDemuxerLog; +#define LOG(msg, ...) \ + MOZ_LOG(gMediaDemuxerLog, LogLevel::Debug, msg, ##__VA_ARGS__) +#define ADTSLOG(msg, ...) \ + DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Debug, msg, ##__VA_ARGS__) +#define ADTSLOGV(msg, ...) \ + DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Verbose, msg, ##__VA_ARGS__) + +namespace mozilla { +namespace adts { + +// adts::FrameHeader - Holds the ADTS frame header and its parsing +// state. +// +// ADTS Frame Structure +// +// 11111111 1111BCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP(QQQQQQQQ +// QQQQQQQQ) +// +// Header consists of 7 or 9 bytes(without or with CRC). +// Letter Length(bits) Description +// { sync } 12 syncword 0xFFF, all bits must be 1 +// B 1 MPEG Version: 0 for MPEG-4, 1 for MPEG-2 +// C 2 Layer: always 0 +// D 1 protection absent, Warning, set to 1 if there is no +// CRC and 0 if there is CRC +// E 2 profile, the MPEG-4 Audio Object Type minus 1 +// F 4 MPEG-4 Sampling Frequency Index (15 is forbidden) +// H 3 MPEG-4 Channel Configuration (in the case of 0, the +// channel configuration is sent via an in-band PCE) +// M 13 frame length, this value must include 7 or 9 bytes of +// header length: FrameLength = +// (ProtectionAbsent == 1 ? 7 : 9) + size(AACFrame) +// O 11 Buffer fullness +// P 2 Number of AAC frames(RDBs) in ADTS frame minus 1, for +// maximum compatibility always use 1 AAC frame per ADTS +// frame +// Q 16 CRC if protection absent is 0 +class FrameHeader { + public: + uint32_t mFrameLength{}; + uint32_t mSampleRate{}; + uint32_t mSamples{}; + uint32_t mChannels{}; + uint8_t mObjectType{}; + uint8_t mSamplingIndex{}; + uint8_t mChannelConfig{}; + uint8_t mNumAACFrames{}; + bool mHaveCrc{}; + + // Returns whether aPtr matches a valid ADTS header sync marker + static bool MatchesSync(const uint8_t* aPtr) { + return aPtr[0] == 0xFF && (aPtr[1] & 0xF6) == 0xF0; + } + + FrameHeader() { Reset(); } + + // Header size + uint64_t HeaderSize() const { return (mHaveCrc) ? 9 : 7; } + + bool IsValid() const { return mFrameLength > 0; } + + // Resets the state to allow for a new parsing session. + void Reset() { PodZero(this); } + + // Returns whether the byte creates a valid sequence up to this point. + bool Parse(const uint8_t* aPtr) { + const uint8_t* p = aPtr; + + if (!MatchesSync(p)) { + return false; + } + + // AAC has 1024 samples per frame per channel. + mSamples = 1024; + + mHaveCrc = !(p[1] & 0x01); + mObjectType = ((p[2] & 0xC0) >> 6) + 1; + mSamplingIndex = (p[2] & 0x3C) >> 2; + mChannelConfig = (p[2] & 0x01) << 2 | (p[3] & 0xC0) >> 6; + mFrameLength = static_cast( + (p[3] & 0x03) << 11 | (p[4] & 0xFF) << 3 | (p[5] & 0xE0) >> 5); + mNumAACFrames = (p[6] & 0x03) + 1; + + static const uint32_t SAMPLE_RATES[] = {96000, 88200, 64000, 48000, 44100, + 32000, 24000, 22050, 16000, 12000, + 11025, 8000, 7350}; + if (mSamplingIndex >= ArrayLength(SAMPLE_RATES)) { + LOG(("ADTS: Init() failure: invalid sample-rate index value: %" PRIu32 + ".", + mSamplingIndex)); + return false; + } + mSampleRate = SAMPLE_RATES[mSamplingIndex]; + + MOZ_ASSERT(mChannelConfig < 8); + mChannels = (mChannelConfig == 7) ? 8 : mChannelConfig; + + return true; + } +}; + +// adts::Frame - Frame meta container used to parse and hold a frame +// header and side info. +class Frame { + public: + Frame() : mOffset(0) {} + + uint64_t Offset() const { return mOffset; } + size_t Length() const { + // TODO: If fields are zero'd when invalid, this check wouldn't be + // necessary. + if (!mHeader.IsValid()) { + return 0; + } + + return mHeader.mFrameLength; + } + + // Returns the offset to the start of frame's raw data. + uint64_t PayloadOffset() const { return mOffset + mHeader.HeaderSize(); } + + // Returns the length of the frame's raw data (excluding the header) in bytes. + size_t PayloadLength() const { + // TODO: If fields are zero'd when invalid, this check wouldn't be + // necessary. + if (!mHeader.IsValid()) { + return 0; + } + + return mHeader.mFrameLength - mHeader.HeaderSize(); + } + + // Returns the parsed frame header. + const FrameHeader& Header() const { return mHeader; } + + bool IsValid() const { return mHeader.IsValid(); } + + // Resets the frame header and data. + void Reset() { + mHeader.Reset(); + mOffset = 0; + } + + // Returns whether the valid + bool Parse(uint64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd) { + MOZ_ASSERT(aStart && aEnd); + + bool found = false; + const uint8_t* ptr = aStart; + // Require at least 7 bytes of data at the end of the buffer for the minimum + // ADTS frame header. + while (ptr < aEnd - 7 && !found) { + found = mHeader.Parse(ptr); + ptr++; + } + + mOffset = aOffset + (static_cast(ptr - aStart)) - 1u; + + return found; + } + + private: + // The offset to the start of the header. + uint64_t mOffset; + + // The currently parsed frame header. + FrameHeader mHeader; +}; + +class FrameParser { + public: + // Returns the currently parsed frame. Reset via Reset or EndFrameSession. + const Frame& CurrentFrame() const { return mFrame; } + + // Returns the first parsed frame. Reset via Reset. + const Frame& FirstFrame() const { return mFirstFrame; } + + // Resets the parser. Don't use between frames as first frame data is reset. + void Reset() { + EndFrameSession(); + mFirstFrame.Reset(); + } + + // Clear the last parsed frame to allow for next frame parsing, i.e.: + // - sets PrevFrame to CurrentFrame + // - resets the CurrentFrame + // - resets ID3Header if no valid header was parsed yet + void EndFrameSession() { mFrame.Reset(); } + + // Parses contents of given ByteReader for a valid frame header and returns + // true if one was found. After returning, the variable passed to + // 'aBytesToSkip' holds the amount of bytes to be skipped (if any) in order to + // jump across a large ID3v2 tag spanning multiple buffers. + bool Parse(uint64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd) { + const bool found = mFrame.Parse(aOffset, aStart, aEnd); + + if (mFrame.Length() && !mFirstFrame.Length()) { + mFirstFrame = mFrame; + } + + return found; + } + + private: + // We keep the first parsed frame around for static info access, the + // previously parsed frame for debugging and the currently parsed frame. + Frame mFirstFrame; + Frame mFrame; +}; + +// Initialize the AAC AudioSpecificConfig. +// Only handles two-byte version for AAC-LC. +static void InitAudioSpecificConfig(const Frame& frame, + MediaByteBuffer* aBuffer) { + const FrameHeader& header = frame.Header(); + MOZ_ASSERT(header.IsValid()); + + int audioObjectType = header.mObjectType; + int samplingFrequencyIndex = header.mSamplingIndex; + int channelConfig = header.mChannelConfig; + + uint8_t asc[2]; + asc[0] = (audioObjectType & 0x1F) << 3 | (samplingFrequencyIndex & 0x0E) >> 1; + asc[1] = (samplingFrequencyIndex & 0x01) << 7 | (channelConfig & 0x0F) << 3; + + aBuffer->AppendElements(asc, 2); +} + +} // namespace adts + +using media::TimeUnit; + +// ADTSDemuxer + +ADTSDemuxer::ADTSDemuxer(MediaResource* aSource) : mSource(aSource) { + DDLINKCHILD("source", aSource); +} + +bool ADTSDemuxer::InitInternal() { + if (!mTrackDemuxer) { + mTrackDemuxer = new ADTSTrackDemuxer(mSource); + DDLINKCHILD("track demuxer", mTrackDemuxer.get()); + } + return mTrackDemuxer->Init(); +} + +RefPtr ADTSDemuxer::Init() { + if (!InitInternal()) { + ADTSLOG("Init() failure: waiting for data"); + + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, + __func__); + } + + ADTSLOG("Init() successful"); + return InitPromise::CreateAndResolve(NS_OK, __func__); +} + +uint32_t ADTSDemuxer::GetNumberTracks(TrackInfo::TrackType aType) const { + return (aType == TrackInfo::kAudioTrack) ? 1 : 0; +} + +already_AddRefed ADTSDemuxer::GetTrackDemuxer( + TrackInfo::TrackType aType, uint32_t aTrackNumber) { + if (!mTrackDemuxer) { + return nullptr; + } + + return RefPtr(mTrackDemuxer).forget(); +} + +bool ADTSDemuxer::IsSeekable() const { + int64_t length = mSource->GetLength(); + return length > -1; +} + +// ADTSTrackDemuxer +ADTSTrackDemuxer::ADTSTrackDemuxer(MediaResource* aSource) + : mSource(aSource), + mParser(new adts::FrameParser()), + mOffset(0), + mNumParsedFrames(0), + mFrameIndex(0), + mTotalFrameLen(0), + mSamplesPerFrame(0), + mSamplesPerSecond(0), + mChannels(0) { + DDLINKCHILD("source", aSource); + Reset(); +} + +ADTSTrackDemuxer::~ADTSTrackDemuxer() { delete mParser; } + +bool ADTSTrackDemuxer::Init() { + FastSeek(TimeUnit::Zero()); + // Read the first frame to fetch sample rate and other meta data. + RefPtr frame(GetNextFrame(FindNextFrame(true))); + + ADTSLOG("Init StreamLength()=%" PRId64 " first-frame-found=%d", + StreamLength(), !!frame); + + if (!frame) { + return false; + } + + // Rewind back to the stream begin to avoid dropping the first frame. + FastSeek(TimeUnit::Zero()); + + if (!mSamplesPerSecond) { + return false; + } + + if (!mInfo) { + mInfo = MakeUnique(); + } + + mInfo->mRate = mSamplesPerSecond; + mInfo->mChannels = mChannels; + mInfo->mBitDepth = 16; + mInfo->mDuration = Duration(); + + // AAC Specific information + mInfo->mMimeType = "audio/mp4a-latm"; + + // Configure AAC codec-specific values. + // For AAC, mProfile and mExtendedProfile contain the audioObjectType from + // Table 1.3 -- Audio Profile definition, ISO/IEC 14496-3. Eg. 2 == AAC LC + mInfo->mProfile = mInfo->mExtendedProfile = + mParser->FirstFrame().Header().mObjectType; + AudioCodecSpecificBinaryBlob blob; + InitAudioSpecificConfig(mParser->FirstFrame(), blob.mBinaryBlob); + mInfo->mCodecSpecificConfig = AudioCodecSpecificVariant{std::move(blob)}; + + ADTSLOG("Init mInfo={mRate=%u mChannels=%u mBitDepth=%u mDuration=%" PRId64 + "}", + mInfo->mRate, mInfo->mChannels, mInfo->mBitDepth, + mInfo->mDuration.ToMicroseconds()); + + // AAC encoder delay can be 2112 (typical value when using Apple AAC encoder), + // or 1024 (typical value when encoding using fdk_aac, often via ffmpeg). + // See + // https://developer.apple.com/library/content/documentation/QuickTime/QTFF/QTFFAppenG/QTFFAppenG.html + // In an attempt to not trim valid audio data, and because ADTS doesn't + // provide a way to know this pre-roll value, this offets by 1024 frames. + mPreRoll = TimeUnit(1024, mSamplesPerSecond); + return mChannels; +} + +UniquePtr ADTSTrackDemuxer::GetInfo() const { + return mInfo->Clone(); +} + +RefPtr ADTSTrackDemuxer::Seek( + const TimeUnit& aTime) { + // Efficiently seek to the position. + const TimeUnit time = aTime > mPreRoll ? aTime - mPreRoll : TimeUnit::Zero(); + FastSeek(time); + // Correct seek position by scanning the next frames. + const TimeUnit seekTime = ScanUntil(time); + + return SeekPromise::CreateAndResolve(seekTime, __func__); +} + +TimeUnit ADTSTrackDemuxer::FastSeek(const TimeUnit& aTime) { + ADTSLOG("FastSeek(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mOffset=%" PRIu64, + aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames, + mFrameIndex, mOffset); + + const uint64_t firstFrameOffset = mParser->FirstFrame().Offset(); + if (!aTime.ToMicroseconds()) { + // Quick seek to the beginning of the stream. + mOffset = firstFrameOffset; + } else if (AverageFrameLength() > 0) { + mOffset = + firstFrameOffset + + AssertedCast(AssertedCast(FrameIndexFromTime(aTime)) * + AverageFrameLength()); + } + + const int64_t streamLength = StreamLength(); + if (mOffset > firstFrameOffset && streamLength > 0) { + mOffset = std::min(static_cast(streamLength - 1), mOffset); + } + + mFrameIndex = FrameIndexFromOffset(mOffset); + mParser->EndFrameSession(); + + ADTSLOG("FastSeek End avgFrameLen=%f mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mFirstFrameOffset=%" PRIu64 + " mOffset=%" PRIu64 " SL=%" PRIu64 "", + AverageFrameLength(), mNumParsedFrames, mFrameIndex, firstFrameOffset, + mOffset, streamLength); + + return Duration(mFrameIndex); +} + +TimeUnit ADTSTrackDemuxer::ScanUntil(const TimeUnit& aTime) { + ADTSLOG("ScanUntil(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mOffset=%" PRIu64, + aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames, + mFrameIndex, mOffset); + + if (!aTime.ToMicroseconds()) { + return FastSeek(aTime); + } + + if (Duration(mFrameIndex) > aTime) { + FastSeek(aTime); + } + + while (SkipNextFrame(FindNextFrame()) && Duration(mFrameIndex + 1) < aTime) { + ADTSLOGV("ScanUntil* avgFrameLen=%f mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mOffset=%" PRIu64 " Duration=%" PRId64, + AverageFrameLength(), mNumParsedFrames, mFrameIndex, mOffset, + Duration(mFrameIndex + 1).ToMicroseconds()); + } + + ADTSLOG("ScanUntil End avgFrameLen=%f mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mOffset=%" PRIu64, + AverageFrameLength(), mNumParsedFrames, mFrameIndex, mOffset); + + return Duration(mFrameIndex); +} + +RefPtr ADTSTrackDemuxer::GetSamples( + int32_t aNumSamples) { + ADTSLOGV("GetSamples(%d) Begin mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64 + " mSamplesPerFrame=%d " + "mSamplesPerSecond=%d mChannels=%d", + aNumSamples, mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, + mSamplesPerFrame, mSamplesPerSecond, mChannels); + + MOZ_ASSERT(aNumSamples); + + RefPtr frames = new SamplesHolder(); + + while (aNumSamples--) { + RefPtr frame(GetNextFrame(FindNextFrame())); + if (!frame) break; + frames->AppendSample(frame); + } + + ADTSLOGV( + "GetSamples() End mSamples.Size()=%zu aNumSamples=%d mOffset=%" PRIu64 + " mNumParsedFrames=%" PRIu64 " mFrameIndex=%" PRId64 + " mTotalFrameLen=%" PRIu64 + " mSamplesPerFrame=%d mSamplesPerSecond=%d " + "mChannels=%d", + frames->GetSamples().Length(), aNumSamples, mOffset, mNumParsedFrames, + mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, + mChannels); + + if (frames->GetSamples().IsEmpty()) { + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, + __func__); + } + + return SamplesPromise::CreateAndResolve(frames, __func__); +} + +void ADTSTrackDemuxer::Reset() { + ADTSLOG("Reset()"); + MOZ_ASSERT(mParser); + if (mParser) { + mParser->Reset(); + } + FastSeek(TimeUnit::Zero()); +} + +RefPtr +ADTSTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold) { + // Will not be called for audio-only resources. + return SkipAccessPointPromise::CreateAndReject( + SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__); +} + +int64_t ADTSTrackDemuxer::GetResourceOffset() const { + return AssertedCast(mOffset); +} + +media::TimeIntervals ADTSTrackDemuxer::GetBuffered() { + auto duration = Duration(); + + if (duration.IsInfinite()) { + return media::TimeIntervals(); + } + + AutoPinned stream(mSource.GetResource()); + return GetEstimatedBufferedTimeRanges(stream, duration.ToMicroseconds()); +} + +int64_t ADTSTrackDemuxer::StreamLength() const { return mSource.GetLength(); } + +TimeUnit ADTSTrackDemuxer::Duration() const { + if (!mNumParsedFrames) { + return TimeUnit::Invalid(); + } + + const int64_t streamLen = StreamLength(); + if (streamLen < 0) { + // Unknown length, we can't estimate duration, this is probably a live + // stream. + return TimeUnit::FromInfinity(); + } + const int64_t firstFrameOffset = + AssertedCast(mParser->FirstFrame().Offset()); + int64_t numFrames = + AssertedCast(AssertedCast(streamLen - firstFrameOffset) / + AverageFrameLength()); + return Duration(numFrames); +} + +TimeUnit ADTSTrackDemuxer::Duration(int64_t aNumFrames) const { + if (!mSamplesPerSecond) { + return TimeUnit::Invalid(); + } + + return TimeUnit(aNumFrames * mSamplesPerFrame, mSamplesPerSecond); +} + +const adts::Frame& ADTSTrackDemuxer::FindNextFrame( + bool findFirstFrame /*= false*/) { + static const int BUFFER_SIZE = 4096; + static const int MAX_SKIPPED_BYTES = 10 * BUFFER_SIZE; + + ADTSLOGV("FindNext() Begin mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64 + " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d", + mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, + mSamplesPerFrame, mSamplesPerSecond, mChannels); + + uint8_t buffer[BUFFER_SIZE]; + uint32_t read = 0; + + bool foundFrame = false; + uint64_t frameHeaderOffset = mOffset; + + // Prepare the parser for the next frame parsing session. + mParser->EndFrameSession(); + + // Check whether we've found a valid ADTS frame. + while (!foundFrame) { + if ((read = Read(buffer, AssertedCast(frameHeaderOffset), + BUFFER_SIZE)) == 0) { + ADTSLOG("FindNext() EOS without a frame"); + break; + } + + if (frameHeaderOffset - mOffset > MAX_SKIPPED_BYTES) { + ADTSLOG("FindNext() exceeded MAX_SKIPPED_BYTES without a frame"); + break; + } + + const adts::Frame& currentFrame = mParser->CurrentFrame(); + foundFrame = mParser->Parse(frameHeaderOffset, buffer, buffer + read); + if (findFirstFrame && foundFrame) { + // Check for sync marker after the found frame, since it's + // possible to find sync marker in AAC data. If sync marker + // exists after the current frame then we've found a frame + // header. + uint64_t nextFrameHeaderOffset = + currentFrame.Offset() + currentFrame.Length(); + uint32_t read = + Read(buffer, AssertedCast(nextFrameHeaderOffset), 2); + if (read != 2 || !adts::FrameHeader::MatchesSync(buffer)) { + frameHeaderOffset = currentFrame.Offset() + 1; + mParser->Reset(); + foundFrame = false; + continue; + } + } + + if (foundFrame) { + break; + } + + // Minimum header size is 7 bytes. + uint64_t advance = read - 7; + + // Check for offset overflow. + if (frameHeaderOffset + advance <= frameHeaderOffset) { + break; + } + + frameHeaderOffset += advance; + } + + if (!foundFrame || !mParser->CurrentFrame().Length()) { + ADTSLOG( + "FindNext() Exit foundFrame=%d mParser->CurrentFrame().Length()=%zu ", + foundFrame, mParser->CurrentFrame().Length()); + mParser->Reset(); + return mParser->CurrentFrame(); + } + + ADTSLOGV("FindNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " frameHeaderOffset=%" PRId64 + " mTotalFrameLen=%" PRIu64 + " mSamplesPerFrame=%d mSamplesPerSecond=%d" + " mChannels=%d", + mOffset, mNumParsedFrames, mFrameIndex, frameHeaderOffset, + mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels); + + return mParser->CurrentFrame(); +} + +bool ADTSTrackDemuxer::SkipNextFrame(const adts::Frame& aFrame) { + if (!mNumParsedFrames || !aFrame.Length()) { + RefPtr frame(GetNextFrame(aFrame)); + return frame; + } + + UpdateState(aFrame); + + ADTSLOGV("SkipNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64 + " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d", + mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, + mSamplesPerFrame, mSamplesPerSecond, mChannels); + + return true; +} + +already_AddRefed ADTSTrackDemuxer::GetNextFrame( + const adts::Frame& aFrame) { + ADTSLOG("GetNext() Begin({mOffset=%" PRIu64 " HeaderSize()=%" PRIu64 + " Length()=%zu})", + aFrame.Offset(), aFrame.Header().HeaderSize(), + aFrame.PayloadLength()); + if (!aFrame.IsValid()) return nullptr; + + const int64_t offset = AssertedCast(aFrame.PayloadOffset()); + const uint32_t length = aFrame.PayloadLength(); + + RefPtr frame = new MediaRawData(); + frame->mOffset = offset; + + UniquePtr frameWriter(frame->CreateWriter()); + if (!frameWriter->SetSize(length)) { + ADTSLOG("GetNext() Exit failed to allocated media buffer"); + return nullptr; + } + + const uint32_t read = + Read(frameWriter->Data(), offset, AssertedCast(length)); + if (read != length) { + ADTSLOG("GetNext() Exit read=%u frame->Size()=%zu", read, frame->Size()); + return nullptr; + } + + UpdateState(aFrame); + + TimeUnit rawpts = Duration(mFrameIndex - 1) - mPreRoll; + TimeUnit rawDuration = Duration(1); + TimeUnit rawend = rawpts + rawDuration; + + frame->mTime = std::max(TimeUnit::Zero(), rawpts); + frame->mDuration = Duration(1); + frame->mTimecode = frame->mTime; + frame->mKeyframe = true; + + // Handle decoder delay. A packet must be trimmed if its pts, adjusted for + // decoder delay, is negative. A packet can be trimmed entirely. + if (rawpts.IsNegative()) { + frame->mDuration = std::max(TimeUnit::Zero(), rawend - frame->mTime); + } + + // ADTS frames can have a presentation duration of zero, e.g. when a frame is + // part of preroll. + MOZ_ASSERT(frame->mDuration.IsPositiveOrZero()); + + ADTSLOG("ADTS packet demuxed: pts [%lf, %lf] (duration: %lf)", + frame->mTime.ToSeconds(), frame->GetEndTime().ToSeconds(), + frame->mDuration.ToSeconds()); + + // Indicate original packet information to trim after decoding. + if (frame->mDuration != rawDuration) { + frame->mOriginalPresentationWindow = + Some(media::TimeInterval{rawpts, rawend}); + ADTSLOG("Total packet time excluding trimming: [%lf, %lf]", + rawpts.ToSeconds(), rawend.ToSeconds()); + } + + ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 + " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64 + " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d", + mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, + mSamplesPerFrame, mSamplesPerSecond, mChannels); + + return frame.forget(); +} + +int64_t ADTSTrackDemuxer::FrameIndexFromOffset(uint64_t aOffset) const { + int64_t frameIndex = 0; + + if (AverageFrameLength() > 0) { + frameIndex = AssertedCast( + AssertedCast(aOffset - mParser->FirstFrame().Offset()) / + AverageFrameLength()); + MOZ_ASSERT(frameIndex >= 0); + } + + ADTSLOGV("FrameIndexFromOffset(%" PRId64 ") -> %" PRId64, aOffset, + frameIndex); + return frameIndex; +} + +int64_t ADTSTrackDemuxer::FrameIndexFromTime(const TimeUnit& aTime) const { + int64_t frameIndex = 0; + if (mSamplesPerSecond > 0 && mSamplesPerFrame > 0) { + frameIndex = AssertedCast(aTime.ToSeconds() * mSamplesPerSecond / + mSamplesPerFrame) - + 1; + } + + ADTSLOGV("FrameIndexFromOffset(%fs) -> %" PRId64, aTime.ToSeconds(), + frameIndex); + return std::max(0, frameIndex); +} + +void ADTSTrackDemuxer::UpdateState(const adts::Frame& aFrame) { + uint32_t frameLength = aFrame.Length(); + // Prevent overflow. + if (mTotalFrameLen + frameLength < mTotalFrameLen) { + // These variables have a linear dependency and are only used to derive the + // average frame length. + mTotalFrameLen /= 2; + mNumParsedFrames /= 2; + } + + // Full frame parsed, move offset to its end. + mOffset = aFrame.Offset() + frameLength; + mTotalFrameLen += frameLength; + + if (!mSamplesPerFrame) { + const adts::FrameHeader& header = aFrame.Header(); + mSamplesPerFrame = header.mSamples; + mSamplesPerSecond = header.mSampleRate; + mChannels = header.mChannels; + } + + ++mNumParsedFrames; + ++mFrameIndex; + MOZ_ASSERT(mFrameIndex > 0); +} + +uint32_t ADTSTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset, + int32_t aSize) { + ADTSLOGV("ADTSTrackDemuxer::Read(%p %" PRId64 " %d)", aBuffer, aOffset, + aSize); + + const int64_t streamLen = StreamLength(); + if (mInfo && streamLen > 0) { + int64_t max = streamLen > aOffset ? streamLen - aOffset : 0; + // Prevent blocking reads after successful initialization. + aSize = std::min(aSize, AssertedCast(max)); + } + + uint32_t read = 0; + ADTSLOGV("ADTSTrackDemuxer::Read -> ReadAt(%d)", aSize); + const nsresult rv = mSource.ReadAt(aOffset, reinterpret_cast(aBuffer), + static_cast(aSize), &read); + NS_ENSURE_SUCCESS(rv, 0); + return read; +} + +double ADTSTrackDemuxer::AverageFrameLength() const { + if (mNumParsedFrames) { + return AssertedCast(mTotalFrameLen) / + AssertedCast(mNumParsedFrames); + } + + return 0.0; +} + +/* static */ +bool ADTSDemuxer::ADTSSniffer(const uint8_t* aData, const uint32_t aLength) { + if (aLength < 7) { + return false; + } + if (!adts::FrameHeader::MatchesSync(aData)) { + return false; + } + auto parser = MakeUnique(); + + if (!parser->Parse(0, aData, aData + aLength)) { + return false; + } + const adts::Frame& currentFrame = parser->CurrentFrame(); + // Check for sync marker after the found frame, since it's + // possible to find sync marker in AAC data. If sync marker + // exists after the current frame then we've found a frame + // header. + uint64_t nextFrameHeaderOffset = + currentFrame.Offset() + currentFrame.Length(); + return aLength > nextFrameHeaderOffset && + aLength - nextFrameHeaderOffset >= 2 && + adts::FrameHeader::MatchesSync(aData + nextFrameHeaderOffset); +} + +} // namespace mozilla diff --git a/dom/media/ADTSDemuxer.h b/dom/media/ADTSDemuxer.h new file mode 100644 index 0000000000..40ff44898e --- /dev/null +++ b/dom/media/ADTSDemuxer.h @@ -0,0 +1,152 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ADTS_DEMUXER_H_ +#define ADTS_DEMUXER_H_ + +#include "mozilla/Attributes.h" +#include "mozilla/Maybe.h" +#include "MediaDataDemuxer.h" +#include "MediaResource.h" + +namespace mozilla { + +namespace adts { +class Frame; +class FrameParser; +} // namespace adts + +class ADTSTrackDemuxer; + +DDLoggedTypeDeclNameAndBase(ADTSDemuxer, MediaDataDemuxer); + +class ADTSDemuxer : public MediaDataDemuxer, + public DecoderDoctorLifeLogger { + public: + // MediaDataDemuxer interface. + explicit ADTSDemuxer(MediaResource* aSource); + RefPtr Init() override; + uint32_t GetNumberTracks(TrackInfo::TrackType aType) const override; + already_AddRefed GetTrackDemuxer( + TrackInfo::TrackType aType, uint32_t aTrackNumber) override; + bool IsSeekable() const override; + + // Return true if a valid ADTS frame header could be found. + static bool ADTSSniffer(const uint8_t* aData, const uint32_t aLength); + + private: + bool InitInternal(); + + RefPtr mSource; + RefPtr mTrackDemuxer; +}; + +DDLoggedTypeNameAndBase(ADTSTrackDemuxer, MediaTrackDemuxer); + +class ADTSTrackDemuxer : public MediaTrackDemuxer, + public DecoderDoctorLifeLogger { + public: + explicit ADTSTrackDemuxer(MediaResource* aSource); + + // Initializes the track demuxer by reading the first frame for meta data. + // Returns initialization success state. + bool Init(); + + // Returns the total stream length if known, -1 otherwise. + int64_t StreamLength() const; + + // Returns the estimated stream duration, or a 0-duration if unknown. + media::TimeUnit Duration() const; + + // Returns the estimated duration up to the given frame number, + // or a 0-duration if unknown. + media::TimeUnit Duration(int64_t aNumFrames) const; + + // MediaTrackDemuxer interface. + UniquePtr GetInfo() const override; + RefPtr Seek(const media::TimeUnit& aTime) override; + RefPtr GetSamples(int32_t aNumSamples = 1) override; + void Reset() override; + RefPtr SkipToNextRandomAccessPoint( + const media::TimeUnit& aTimeThreshold) override; + int64_t GetResourceOffset() const override; + media::TimeIntervals GetBuffered() override; + + private: + // Destructor. + ~ADTSTrackDemuxer(); + + // Fast approximate seeking to given time. + media::TimeUnit FastSeek(const media::TimeUnit& aTime); + + // Seeks by scanning the stream up to the given time for more accurate + // results. + media::TimeUnit ScanUntil(const media::TimeUnit& aTime); + + // Finds the next valid frame and returns its byte range. + const adts::Frame& FindNextFrame(bool findFirstFrame = false); + + // Skips the next frame given the provided byte range. + bool SkipNextFrame(const adts::Frame& aFrame); + + // Returns the next ADTS frame, if available. + already_AddRefed GetNextFrame(const adts::Frame& aFrame); + + // Updates post-read meta data. + void UpdateState(const adts::Frame& aFrame); + + // Returns the frame index for the given offset. + int64_t FrameIndexFromOffset(uint64_t aOffset) const; + + // Returns the frame index for the given time. + int64_t FrameIndexFromTime(const media::TimeUnit& aTime) const; + + // Reads aSize bytes into aBuffer from the source starting at aOffset. + // Returns the actual size read. + uint32_t Read(uint8_t* aBuffer, int64_t aOffset, int32_t aSize); + + // Returns the average frame length derived from the previously parsed frames. + double AverageFrameLength() const; + + // The (hopefully) ADTS resource. + MediaResourceIndex mSource; + + // ADTS frame parser used to detect frames and extract side info. + adts::FrameParser* mParser; + + // Current byte offset in the source stream. + uint64_t mOffset; + + // Total parsed frames. + uint64_t mNumParsedFrames; + + // Current frame index. + int64_t mFrameIndex; + + // Sum of parsed frames' lengths in bytes. + uint64_t mTotalFrameLen; + + // Samples per frame metric derived from frame headers or 0 if none available. + uint32_t mSamplesPerFrame; + + // Samples per second metric derived from frame headers or 0 if none + // available. + uint32_t mSamplesPerSecond; + + // Channel count derived from frame headers or 0 if none available. + uint32_t mChannels; + + // Audio track config info. + UniquePtr mInfo; + + // Amount of pre-roll time when seeking. + // AAC encoder delay is by default 2112 audio frames. + media::TimeUnit mPreRoll; +}; + +} // namespace mozilla + +#endif // !ADTS_DEMUXER_H_ diff --git a/dom/media/AsyncLogger.h b/dom/media/AsyncLogger.h new file mode 100644 index 0000000000..adc4101382 --- /dev/null +++ b/dom/media/AsyncLogger.h @@ -0,0 +1,305 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* Implementation of an asynchronous lock-free logging system. */ + +#ifndef mozilla_dom_AsyncLogger_h +#define mozilla_dom_AsyncLogger_h + +#include +#include +#include +#include "mozilla/ArrayUtils.h" +#include "mozilla/Attributes.h" +#include "mozilla/BaseProfilerMarkerTypes.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Sprintf.h" +#include "mozilla/TimeStamp.h" +#include "GeckoProfiler.h" +#include "MPSCQueue.h" + +#if defined(_WIN32) +# include +# define getpid() _getpid() +#else +# include +#endif + +namespace mozilla { + +// Allows writing 0-terminated C-strings in a buffer, and returns the start +// index of the string that's been appended. Automatically truncates the strings +// as needed if the storage is too small, returning true when that's the case. +class MOZ_STACK_CLASS StringWriter { + public: + StringWriter(char* aMemory, size_t aLength) + : mMemory(aMemory), mWriteIndex(0), mLength(aLength) {} + + bool AppendCString(const char* aString, size_t* aIndexStart) { + *aIndexStart = mWriteIndex; + if (!aString) { + return false; + } + size_t toCopy = strlen(aString); + bool truncated = false; + + if (toCopy > Available()) { + truncated = true; + toCopy = Available() - 1; + } + + memcpy(&(mMemory[mWriteIndex]), aString, toCopy); + mWriteIndex += toCopy; + mMemory[mWriteIndex] = 0; + mWriteIndex++; + + return truncated; + } + + private: + size_t Available() { + MOZ_ASSERT(mLength > mWriteIndex); + return mLength - mWriteIndex; + } + + char* mMemory; + size_t mWriteIndex; + size_t mLength; +}; + +const size_t PAYLOAD_TOTAL_SIZE = 2 << 9; + +// This class implements a lock-free asynchronous logger, that +// adds profiler markers. +// Any thread can use this logger without external synchronization and without +// being blocked. This log is suitable for use in real-time audio threads. +// This class uses a thread internally, and must be started and stopped +// manually. +// If profiling is disabled, all the calls are no-op and cheap. +class AsyncLogger { + public: + enum class TracingPhase : uint8_t { BEGIN, END, COMPLETE }; + + const char TRACING_PHASE_STRINGS[3] = {'B', 'E', 'X'}; + + struct TextPayload { + char mPayload[PAYLOAD_TOTAL_SIZE - MPSC_MSG_RESERVED]; + }; + + // The order of the fields is important here to minimize padding. + struct TracePayload { +#define MEMBERS_EXCEPT_NAME \ + /* If this marker is of phase B or E (begin or end), this is the time at \ + * which it was captured. */ \ + TimeStamp mTimestamp; \ + /* The thread on which this tracepoint was gathered. */ \ + ProfilerThreadId mTID; \ + /* If this marker is of phase X (COMPLETE), this holds the duration of the \ + * event in microseconds. Else, the value is not used. */ \ + uint32_t mDurationUs; \ + /* A trace payload can be either: \ + * - Begin - this marks the beginning of a temporal region \ + * - End - this marks the end of a temporal region \ + * - Complete - this is a timestamp and a length, forming complete a \ + * temporal region */ \ + TracingPhase mPhase; \ + /* Offset at which the comment part of the string starts, in mName */ \ + uint8_t mCommentStart; + + MEMBERS_EXCEPT_NAME; + + private: + // Mock structure, to know where the first character of the name will be. + struct MembersWithChar { + MEMBERS_EXCEPT_NAME; + char c; + }; + static constexpr size_t scRemainingSpaceForName = + PAYLOAD_TOTAL_SIZE - offsetof(MembersWithChar, c) - + ((MPSC_MSG_RESERVED + alignof(MembersWithChar) - 1) & + ~(alignof(MembersWithChar) - 1)); +#undef MEMBERS_EXCEPT_NAME + + public: + // An arbitrary string, usually containing a function signature or a + // recognizable tag of some sort, to be displayed when analyzing the + // profile. + char mName[scRemainingSpaceForName]; + }; + + // The goal here is to make it easy on the allocator. We pack a pointer in the + // message struct, and we still want to do power of two allocations to + // minimize allocator slop. + static_assert(sizeof(MPSCQueue::Message) == PAYLOAD_TOTAL_SIZE, + "MPSCQueue internal allocations has an unexpected size."); + + explicit AsyncLogger() : mThread(nullptr), mRunning(false) {} + + void Start() { + MOZ_ASSERT(!mRunning, "Double calls to AsyncLogger::Start"); + mRunning = true; + Run(); + } + + void Stop() { + if (mRunning) { + mRunning = false; + } + } + + // Log something that has a beginning and an end + void Log(const char* aName, const char* aCategory, const char* aComment, + TracingPhase aPhase) { + if (!Enabled()) { + return; + } + + auto* msg = new MPSCQueue::Message(); + + msg->data.mTID = profiler_current_thread_id(); + msg->data.mPhase = aPhase; + msg->data.mTimestamp = TimeStamp::Now(); + msg->data.mDurationUs = 0; // unused, duration is end - begin + + StringWriter writer(msg->data.mName, ArrayLength(msg->data.mName)); + + size_t commentIndex; + DebugOnly truncated = writer.AppendCString(aName, &commentIndex); + MOZ_ASSERT(!truncated, "Tracing payload truncated: name"); + + if (aComment) { + truncated = writer.AppendCString(aComment, &commentIndex); + MOZ_ASSERT(!truncated, "Tracing payload truncated: comment"); + msg->data.mCommentStart = commentIndex; + } else { + msg->data.mCommentStart = 0; + } + mMessageQueueProfiler.Push(msg); + } + + // Log something that has a beginning and a duration + void LogDuration(const char* aName, const char* aCategory, uint64_t aDuration, + uint64_t aFrames, uint64_t aSampleRate) { + if (Enabled()) { + auto* msg = new MPSCQueue::Message(); + msg->data.mTID = profiler_current_thread_id(); + msg->data.mPhase = TracingPhase::COMPLETE; + msg->data.mTimestamp = TimeStamp::Now(); + msg->data.mDurationUs = + (static_cast(aFrames) / aSampleRate) * 1e6; + size_t len = std::min(strlen(aName), ArrayLength(msg->data.mName)); + memcpy(msg->data.mName, aName, len); + msg->data.mName[len] = 0; + mMessageQueueProfiler.Push(msg); + } + } + + bool Enabled() { return mRunning; } + + private: + void Run() { + mThread.reset(new std::thread([this]() { + AUTO_PROFILER_REGISTER_THREAD("AsyncLogger"); + while (mRunning) { + { + struct TracingMarkerWithComment { + static constexpr Span MarkerTypeName() { + return MakeStringSpan("Real-Time"); + } + static void StreamJSONMarkerData( + baseprofiler::SpliceableJSONWriter& aWriter, + const ProfilerString8View& aText) { + aWriter.StringProperty("name", aText); + } + static MarkerSchema MarkerTypeDisplay() { + using MS = MarkerSchema; + MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable}; + schema.SetChartLabel("{marker.data.name}"); + schema.SetTableLabel("{marker.name} - {marker.data.name}"); + schema.AddKeyLabelFormatSearchable("name", "Comment", + MS::Format::String, + MS::Searchable::Searchable); + return schema; + } + }; + + struct TracingMarker { + static constexpr Span MarkerTypeName() { + return MakeStringSpan("Real-time"); + } + static void StreamJSONMarkerData( + baseprofiler::SpliceableJSONWriter& aWriter) {} + static MarkerSchema MarkerTypeDisplay() { + using MS = MarkerSchema; + MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable}; + // Nothing outside the defaults. + return schema; + } + }; + + TracePayload message; + while (mMessageQueueProfiler.Pop(&message) && mRunning) { + if (message.mPhase != TracingPhase::COMPLETE) { + if (!message.mCommentStart) { + profiler_add_marker( + ProfilerString8View::WrapNullTerminatedString( + message.mName), + geckoprofiler::category::MEDIA_RT, + {MarkerThreadId(message.mTID), + (message.mPhase == TracingPhase::BEGIN) + ? MarkerTiming::IntervalStart(message.mTimestamp) + : MarkerTiming::IntervalEnd(message.mTimestamp)}, + TracingMarker{}); + } else { + profiler_add_marker( + ProfilerString8View::WrapNullTerminatedString( + message.mName), + geckoprofiler::category::MEDIA_RT, + {MarkerThreadId(message.mTID), + (message.mPhase == TracingPhase::BEGIN) + ? MarkerTiming::IntervalStart(message.mTimestamp) + : MarkerTiming::IntervalEnd(message.mTimestamp)}, + TracingMarkerWithComment{}, + ProfilerString8View::WrapNullTerminatedString( + &(message.mName[message.mCommentStart]))); + } + } else { + profiler_add_marker( + ProfilerString8View::WrapNullTerminatedString(message.mName), + geckoprofiler::category::MEDIA_RT, + {MarkerThreadId(message.mTID), + MarkerTiming::Interval( + message.mTimestamp, + message.mTimestamp + TimeDuration::FromMicroseconds( + message.mDurationUs))}, + TracingMarker{}); + } + } + } + Sleep(); + } + })); + // cleanup is done via mRunning + mThread->detach(); + } + + uint64_t NowInUs() { + static TimeStamp base = TimeStamp::Now(); + return (TimeStamp::Now() - base).ToMicroseconds(); + } + + void Sleep() { std::this_thread::sleep_for(std::chrono::milliseconds(10)); } + + std::unique_ptr mThread; + MPSCQueue mMessageQueueProfiler; + std::atomic mRunning; +}; + +} // end namespace mozilla + +#if defined(_WIN32) +# undef getpid +#endif + +#endif // mozilla_dom_AsyncLogger_h diff --git a/dom/media/AudibilityMonitor.h b/dom/media/AudibilityMonitor.h new file mode 100644 index 0000000000..fdcf474403 --- /dev/null +++ b/dom/media/AudibilityMonitor.h @@ -0,0 +1,100 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_AUDIBILITYMONITOR_H_ +#define DOM_MEDIA_AUDIBILITYMONITOR_H_ + +#include + +#include "AudioSampleFormat.h" +#include "WebAudioUtils.h" +#include "AudioBlock.h" +#include "MediaData.h" + +namespace mozilla { + +class AudibilityMonitor { + public: + // ≈ 20 * log10(pow(2, 12)), noise floor of 12bit audio + const float AUDIBILITY_THRESHOLD = + dom::WebAudioUtils::ConvertDecibelsToLinear(-72.); + + AudibilityMonitor(uint32_t aSamplerate, float aSilenceDurationSeconds) + : mSamplerate(aSamplerate), + mSilenceDurationSeconds(aSilenceDurationSeconds), + mSilentFramesInARow(0), + mEverAudible(false) {} + + void Process(const AudioData* aData) { + ProcessInterleaved(aData->Data(), aData->mChannels); + } + + void Process(const AudioBlock& aData) { + if (aData.IsNull() || aData.IsMuted()) { + mSilentFramesInARow += aData.GetDuration(); + return; + } + ProcessPlanar(aData.ChannelData(), aData.GetDuration()); + } + + void ProcessPlanar(Span aPlanar, TrackTime aFrames) { + uint32_t lastFrameAudibleAcrossChannels = 0; + for (uint32_t channel = 0; channel < aPlanar.Length(); channel++) { + uint32_t lastSampleAudible = 0; + for (uint32_t frame = 0; frame < aFrames; frame++) { + if (std::fabs(aPlanar[channel][frame]) > AUDIBILITY_THRESHOLD) { + mEverAudible = true; + mSilentFramesInARow = 0; + lastSampleAudible = frame; + } + } + lastFrameAudibleAcrossChannels = + std::max(lastFrameAudibleAcrossChannels, lastSampleAudible); + } + mSilentFramesInARow += aFrames - lastFrameAudibleAcrossChannels - 1; + } + + void ProcessInterleaved(const Span& aInterleaved, + size_t aChannels) { + MOZ_ASSERT(aInterleaved.Length() % aChannels == 0); + uint32_t frameCount = aInterleaved.Length() / aChannels; + AudioDataValue* samples = aInterleaved.Elements(); + + uint32_t readIndex = 0; + for (uint32_t i = 0; i < frameCount; i++) { + bool atLeastOneAudible = false; + for (uint32_t j = 0; j < aChannels; j++) { + if (std::fabs(AudioSampleToFloat(samples[readIndex++])) > + AUDIBILITY_THRESHOLD) { + atLeastOneAudible = true; + } + } + if (atLeastOneAudible) { + mSilentFramesInARow = 0; + mEverAudible = true; + } else { + mSilentFramesInARow++; + } + } + } + + // A stream is considered audible if there was audible content in the last + // `mSilenceDurationSeconds` seconds, or it has never been audible for now. + bool RecentlyAudible() { + return mEverAudible && (static_cast(mSilentFramesInARow) / + mSamplerate) < mSilenceDurationSeconds; + } + + private: + const uint32_t mSamplerate; + const float mSilenceDurationSeconds; + uint64_t mSilentFramesInARow; + bool mEverAudible; +}; + +}; // namespace mozilla + +#endif // DOM_MEDIA_AUDIBILITYMONITOR_H_ diff --git a/dom/media/AudioBufferUtils.h b/dom/media/AudioBufferUtils.h new file mode 100644 index 0000000000..3d0d1e9b6b --- /dev/null +++ b/dom/media/AudioBufferUtils.h @@ -0,0 +1,220 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_SCRATCHBUFFER_H_ +#define MOZILLA_SCRATCHBUFFER_H_ + +#include "AudioSegment.h" +#include "mozilla/PodOperations.h" +#include "mozilla/UniquePtr.h" +#include "nsDebug.h" + +#include + +namespace mozilla { + +/** + * The classes in this file provide a interface that uses frames as a unit. + * However, they store their offsets in samples (because it's handy for pointer + * operations). Those functions can convert between the two units. + */ +static inline uint32_t FramesToSamples(uint32_t aChannels, uint32_t aFrames) { + return aFrames * aChannels; +} + +static inline uint32_t SamplesToFrames(uint32_t aChannels, uint32_t aSamples) { + MOZ_ASSERT(!(aSamples % aChannels), "Frame alignment is wrong."); + return aSamples / aChannels; +} + +/** + * Class that gets a buffer pointer from an audio callback and provides a safe + * interface to manipulate this buffer, and to ensure we are not missing frames + * by the end of the callback. + */ +template +class AudioCallbackBufferWrapper { + public: + AudioCallbackBufferWrapper() + : mBuffer(nullptr), mSamples(0), mSampleWriteOffset(1), mChannels(0) {} + + explicit AudioCallbackBufferWrapper(uint32_t aChannels) + : mBuffer(nullptr), + mSamples(0), + mSampleWriteOffset(1), + mChannels(aChannels) + + { + MOZ_ASSERT(aChannels); + } + + AudioCallbackBufferWrapper& operator=( + const AudioCallbackBufferWrapper& aOther) { + MOZ_ASSERT(!aOther.mBuffer, + "Don't use this ctor after AudioCallbackDriver::Init"); + MOZ_ASSERT(aOther.mSamples == 0, + "Don't use this ctor after AudioCallbackDriver::Init"); + MOZ_ASSERT(aOther.mSampleWriteOffset == 1, + "Don't use this ctor after AudioCallbackDriver::Init"); + MOZ_ASSERT(aOther.mChannels != 0); + + mBuffer = nullptr; + mSamples = 0; + mSampleWriteOffset = 1; + mChannels = aOther.mChannels; + + return *this; + } + + /** + * Set the buffer in this wrapper. This is to be called at the beginning of + * the callback. + */ + void SetBuffer(T* aBuffer, uint32_t aFrames) { + MOZ_ASSERT(!mBuffer && !mSamples, "SetBuffer called twice."); + mBuffer = aBuffer; + mSamples = FramesToSamples(mChannels, aFrames); + mSampleWriteOffset = 0; + } + + /** + * Write some frames to the internal buffer. Free space in the buffer should + * be checked prior to calling these. + */ + void WriteFrames(T* aBuffer, uint32_t aFrames) { + MOZ_ASSERT(aFrames <= Available(), + "Writing more that we can in the audio buffer."); + + PodCopy(mBuffer + mSampleWriteOffset, aBuffer, + FramesToSamples(mChannels, aFrames)); + mSampleWriteOffset += FramesToSamples(mChannels, aFrames); + } + void WriteFrames(const AudioChunk& aChunk, uint32_t aFrames) { + MOZ_ASSERT(aFrames <= Available(), + "Writing more that we can in the audio buffer."); + + InterleaveAndConvertBuffer(aChunk.ChannelData().Elements(), aFrames, + aChunk.mVolume, aChunk.ChannelCount(), + mBuffer + mSampleWriteOffset); + mSampleWriteOffset += FramesToSamples(mChannels, aFrames); + } + + /** + * Number of frames that can be written to the buffer. + */ + uint32_t Available() { + return SamplesToFrames(mChannels, mSamples - mSampleWriteOffset); + } + + /** + * Check that the buffer is completly filled, and reset internal state so this + * instance can be reused. + */ + void BufferFilled() { + MOZ_ASSERT(Available() == 0, "Frames should have been written"); + MOZ_ASSERT(mBuffer, "Buffer not set."); + mSamples = 0; + mSampleWriteOffset = 0; + mBuffer = nullptr; + } + + private: + /* This is not an owned pointer, but the pointer passed to use via the audio + * callback. */ + T* mBuffer; + /* The number of samples of this audio buffer. */ + uint32_t mSamples; + /* The position at which new samples should be written. We want to return to + * the audio callback iff this is equal to mSamples. */ + uint32_t mSampleWriteOffset; + uint32_t mChannels; +}; + +/** + * This is a class that interfaces with the AudioCallbackBufferWrapper, and is + * responsible for storing the excess of data produced by the MediaTrackGraph + * because of different rounding constraints, to be used the next time the audio + * backend calls back. + */ +template +class SpillBuffer { + public: + SpillBuffer() : mBuffer(nullptr), mPosition(0), mChannels(0) {} + + explicit SpillBuffer(uint32_t aChannels) + : mPosition(0), mChannels(aChannels) { + MOZ_ASSERT(aChannels); + mBuffer = MakeUnique(BLOCK_SIZE * mChannels); + PodZero(mBuffer.get(), BLOCK_SIZE * mChannels); + } + + SpillBuffer& operator=(SpillBuffer& aOther) { + MOZ_ASSERT(aOther.mPosition == 0, + "Don't use this ctor after AudioCallbackDriver::Init"); + MOZ_ASSERT(aOther.mChannels != 0); + MOZ_ASSERT(aOther.mBuffer); + + mPosition = aOther.mPosition; + mChannels = aOther.mChannels; + mBuffer = std::move(aOther.mBuffer); + + return *this; + } + + SpillBuffer& operator=(SpillBuffer&& aOther) { + return this->operator=(aOther); + } + + /* Empty the spill buffer into the buffer of the audio callback. This returns + * the number of frames written. */ + uint32_t Empty(AudioCallbackBufferWrapper& aBuffer) { + uint32_t framesToWrite = + std::min(aBuffer.Available(), SamplesToFrames(mChannels, mPosition)); + + aBuffer.WriteFrames(mBuffer.get(), framesToWrite); + + mPosition -= FramesToSamples(mChannels, framesToWrite); + // If we didn't empty the spill buffer for some reason, shift the remaining + // data down + if (mPosition > 0) { + MOZ_ASSERT(FramesToSamples(mChannels, framesToWrite) + mPosition <= + BLOCK_SIZE * mChannels); + PodMove(mBuffer.get(), + mBuffer.get() + FramesToSamples(mChannels, framesToWrite), + mPosition); + } + + return framesToWrite; + } + /* Fill the spill buffer from aInput. + * Return the number of frames written to the spill buffer */ + uint32_t Fill(const AudioChunk& aInput) { + uint32_t framesToWrite = + std::min(static_cast(aInput.mDuration), + BLOCK_SIZE - SamplesToFrames(mChannels, mPosition)); + + MOZ_ASSERT(FramesToSamples(mChannels, framesToWrite) + mPosition <= + BLOCK_SIZE * mChannels); + InterleaveAndConvertBuffer( + aInput.ChannelData().Elements(), framesToWrite, aInput.mVolume, + aInput.ChannelCount(), mBuffer.get() + mPosition); + + mPosition += FramesToSamples(mChannels, framesToWrite); + + return framesToWrite; + } + + private: + /* The spilled data. */ + UniquePtr mBuffer; + /* The current write position, in samples, in the buffer when filling, or the + * amount of buffer filled when emptying. */ + uint32_t mPosition; + uint32_t mChannels; +}; + +} // namespace mozilla + +#endif // MOZILLA_SCRATCHBUFFER_H_ diff --git a/dom/media/AudioCaptureTrack.cpp b/dom/media/AudioCaptureTrack.cpp new file mode 100644 index 0000000000..0a7aaccb5c --- /dev/null +++ b/dom/media/AudioCaptureTrack.cpp @@ -0,0 +1,97 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaTrackGraph.h" +#include "MediaTrackListener.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Unused.h" + +#include "AudioSegment.h" +#include "mozilla/Logging.h" +#include "mozilla/Attributes.h" +#include "AudioCaptureTrack.h" +#include "ImageContainer.h" +#include "AudioNodeEngine.h" +#include "AudioNodeTrack.h" +#include "AudioNodeExternalInputTrack.h" +#include "webaudio/MediaStreamAudioDestinationNode.h" +#include +#include "DOMMediaStream.h" + +using namespace mozilla::layers; +using namespace mozilla::dom; +using namespace mozilla::gfx; + +namespace mozilla { + +// We are mixing to mono until PeerConnection can accept stereo +static const uint32_t MONO = 1; + +AudioCaptureTrack::AudioCaptureTrack(TrackRate aRate) + : ProcessedMediaTrack(aRate, MediaSegment::AUDIO, new AudioSegment()), + mStarted(false) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_COUNT_CTOR(AudioCaptureTrack); +} + +AudioCaptureTrack::~AudioCaptureTrack() { MOZ_COUNT_DTOR(AudioCaptureTrack); } + +void AudioCaptureTrack::Start() { + QueueControlMessageWithNoShutdown( + [self = RefPtr{this}, this] { mStarted = true; }); +} + +void AudioCaptureTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + if (!mStarted) { + return; + } + + uint32_t inputCount = mInputs.Length(); + + if (mEnded) { + return; + } + + // If the captured track is connected back to a object on the page (be it an + // HTMLMediaElement with a track as source, or an AudioContext), a cycle + // situation occur. This can work if it's an AudioContext with at least one + // DelayNode, but the MTG will mute the whole cycle otherwise. + if (InMutedCycle() || inputCount == 0) { + GetData()->AppendNullData(aTo - aFrom); + } else { + // We mix down all the tracks of all inputs, to a stereo track. Everything + // is {up,down}-mixed to stereo. + mMixer.StartMixing(); + AudioSegment output; + for (uint32_t i = 0; i < inputCount; i++) { + MediaTrack* s = mInputs[i]->GetSource(); + AudioSegment* inputSegment = s->GetData(); + TrackTime inputStart = s->GraphTimeToTrackTimeWithBlocking(aFrom); + TrackTime inputEnd = s->GraphTimeToTrackTimeWithBlocking(aTo); + AudioSegment toMix; + if (s->Ended() && inputSegment->GetDuration() <= inputStart) { + toMix.AppendNullData(aTo - aFrom); + } else { + toMix.AppendSlice(*inputSegment, inputStart, inputEnd); + // Care for tracks blocked in the [aTo, aFrom] range. + if (inputEnd - inputStart < aTo - aFrom) { + toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart)); + } + } + toMix.Mix(mMixer, MONO, Graph()->GraphRate()); + } + AudioChunk* mixed = mMixer.MixedChunk(); + MOZ_ASSERT(mixed->ChannelCount() == MONO); + // Now we have mixed data, simply append it. + GetData()->AppendAndConsumeChunk(std::move(*mixed)); + } +} + +uint32_t AudioCaptureTrack::NumberOfChannels() const { + return GetData()->MaxChannelCount(); +} + +} // namespace mozilla diff --git a/dom/media/AudioCaptureTrack.h b/dom/media/AudioCaptureTrack.h new file mode 100644 index 0000000000..985d5640e2 --- /dev/null +++ b/dom/media/AudioCaptureTrack.h @@ -0,0 +1,39 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIOCAPTURETRACK_H_ +#define MOZILLA_AUDIOCAPTURETRACK_H_ + +#include "MediaTrackGraph.h" +#include "AudioMixer.h" +#include + +namespace mozilla { + +class AbstractThread; +class DOMMediaStream; + +/** + * See MediaTrackGraph::CreateAudioCaptureTrack. + */ +class AudioCaptureTrack : public ProcessedMediaTrack { + public: + explicit AudioCaptureTrack(TrackRate aRate); + virtual ~AudioCaptureTrack(); + + void Start(); + + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + + uint32_t NumberOfChannels() const override; + + protected: + AudioMixer mMixer; + bool mStarted; + bool mTrackCreated; +}; +} // namespace mozilla + +#endif /* MOZILLA_AUDIOCAPTURETRACK_H_ */ diff --git a/dom/media/AudioChannelFormat.cpp b/dom/media/AudioChannelFormat.cpp new file mode 100644 index 0000000000..6caf72c3ba --- /dev/null +++ b/dom/media/AudioChannelFormat.cpp @@ -0,0 +1,16 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioChannelFormat.h" + +#include + +namespace mozilla { + +uint32_t GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2) { + return std::max(aChannels1, aChannels2); +} + +} // namespace mozilla diff --git a/dom/media/AudioChannelFormat.h b/dom/media/AudioChannelFormat.h new file mode 100644 index 0000000000..1b913f5535 --- /dev/null +++ b/dom/media/AudioChannelFormat.h @@ -0,0 +1,253 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIOCHANNELFORMAT_H_ +#define MOZILLA_AUDIOCHANNELFORMAT_H_ + +#include + +#include "mozilla/PodOperations.h" +#include "nsTArrayForwardDeclare.h" +#include "AudioSampleFormat.h" +#include "nsTArray.h" + +namespace mozilla { + +/* + * This file provides utilities for upmixing and downmixing channels. + * + * The channel layouts, upmixing and downmixing are consistent with the + * Web Audio spec. + * + * Channel layouts for up to 6 channels: + * mono { M } + * stereo { L, R } + * { L, R, C } + * quad { L, R, SL, SR } + * { L, R, C, SL, SR } + * 5.1 { L, R, C, LFE, SL, SR } + * + * Only 1, 2, 4 and 6 are currently defined in Web Audio. + */ + +enum { + SURROUND_L, + SURROUND_R, + SURROUND_C, + SURROUND_LFE, + SURROUND_SL, + SURROUND_SR +}; + +const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6; + +// This is defined by some Windows SDK header. +#undef IGNORE + +const int IGNORE = CUSTOM_CHANNEL_LAYOUTS; +const float IGNORE_F = 0.0f; + +const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] = {0, 5, 9, + 12, 14}; + +/** + * Return a channel count whose channel layout includes all the channels from + * aChannels1 and aChannels2. + */ +uint32_t GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2); + +/** + * DownMixMatrix represents a conversion matrix efficiently by exploiting the + * fact that each input channel contributes to at most one output channel, + * except possibly for the C input channel in layouts that have one. Also, + * every input channel is multiplied by the same coefficient for every output + * channel it contributes to. + */ +const float SQRT_ONE_HALF = 0.7071067811865476f; + +struct DownMixMatrix { + // Every input channel c is copied to output channel mInputDestination[c] + // after multiplying by mInputCoefficient[c]. + uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS]; + // If not IGNORE, then the C channel is copied to this output channel after + // multiplying by its coefficient. + uint8_t mCExtraDestination; + float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS]; +}; + +static const DownMixMatrix gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS * + (CUSTOM_CHANNEL_LAYOUTS - 1) / + 2] = { + // Downmixes to mono + {{0, 0}, IGNORE, {0.5f, 0.5f}}, + {{0, IGNORE, IGNORE}, IGNORE, {1.0f, IGNORE_F, IGNORE_F}}, + {{0, 0, 0, 0}, IGNORE, {0.25f, 0.25f, 0.25f, 0.25f}}, + {{0, IGNORE, IGNORE, IGNORE, IGNORE}, + IGNORE, + {1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F}}, + {{0, 0, 0, IGNORE, 0, 0}, + IGNORE, + {SQRT_ONE_HALF, SQRT_ONE_HALF, 1.0f, IGNORE_F, 0.5f, 0.5f}}, + // Downmixes to stereo + {{0, 1, IGNORE}, IGNORE, {1.0f, 1.0f, IGNORE_F}}, + {{0, 1, 0, 1}, IGNORE, {0.5f, 0.5f, 0.5f, 0.5f}}, + {{0, 1, IGNORE, IGNORE, IGNORE}, + IGNORE, + {1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F}}, + {{0, 1, 0, IGNORE, 0, 1}, + 1, + {1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, SQRT_ONE_HALF, SQRT_ONE_HALF}}, + // Downmixes to 3-channel + {{0, 1, 2, IGNORE}, IGNORE, {1.0f, 1.0f, 1.0f, IGNORE_F}}, + {{0, 1, 2, IGNORE, IGNORE}, IGNORE, {1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F}}, + {{0, 1, 2, IGNORE, IGNORE, IGNORE}, + IGNORE, + {1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F}}, + // Downmixes to quad + {{0, 1, 2, 3, IGNORE}, IGNORE, {1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F}}, + {{0, 1, 0, IGNORE, 2, 3}, + 1, + {1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, 1.0f, 1.0f}}, + // Downmixes to 5-channel + {{0, 1, 2, 3, 4, IGNORE}, + IGNORE, + {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F}}}; + +/** + * Given an array of input channels, downmix to aOutputChannelCount, and copy + * the results to the channel buffers in aOutputChannels. Don't call this with + * input count <= output count. + */ +template +void AudioChannelsDownMix(Span aInputChannels, + Span aOutputChannels, + uint32_t aDuration) { + uint32_t inputChannelCount = aInputChannels.Length(); + uint32_t outputChannelCount = aOutputChannels.Length(); + NS_ASSERTION(inputChannelCount > outputChannelCount, "Nothing to do"); + + if (inputChannelCount > 6) { + // Just drop the unknown channels. + for (uint32_t o = 0; o < outputChannelCount; ++o) { + ConvertAudioSamples(aInputChannels[o], aOutputChannels[o], aDuration); + } + return; + } + + // Ignore unknown channels, they're just dropped. + inputChannelCount = std::min(6, inputChannelCount); + + const DownMixMatrix& m = + gDownMixMatrices[gMixingMatrixIndexByChannels[outputChannelCount - 1] + + inputChannelCount - outputChannelCount - 1]; + + // This is slow, but general. We can define custom code for special + // cases later. + for (DstT* outChannel : aOutputChannels) { + std::fill_n(outChannel, aDuration, static_cast(0)); + } + for (uint32_t c = 0; c < inputChannelCount; ++c) { + uint32_t dstIndex = m.mInputDestination[c]; + if (dstIndex == IGNORE) { + continue; + } + AddAudioSamplesWithScale(aInputChannels[c], aOutputChannels[dstIndex], + aDuration, m.mInputCoefficient[c]); + } + // Utilize the fact that in every layout, C is the only channel that may + // contribute to more than one output channel. + uint32_t dstIndex = m.mCExtraDestination; + if (dstIndex != IGNORE) { + AddAudioSamplesWithScale(aInputChannels[SURROUND_C], + aOutputChannels[dstIndex], aDuration, + m.mInputCoefficient[SURROUND_C]); + } +} + +/** + * UpMixMatrix represents a conversion matrix by exploiting the fact that + * each output channel comes from at most one input channel. + */ +struct UpMixMatrix { + uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS]; +}; + +static const UpMixMatrix gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS * + (CUSTOM_CHANNEL_LAYOUTS - 1) / 2] = { + // Upmixes from mono + {{0, 0}}, + {{0, IGNORE, IGNORE}}, + {{0, 0, IGNORE, IGNORE}}, + {{0, IGNORE, IGNORE, IGNORE, IGNORE}}, + {{IGNORE, IGNORE, 0, IGNORE, IGNORE, IGNORE}}, + // Upmixes from stereo + {{0, 1, IGNORE}}, + {{0, 1, IGNORE, IGNORE}}, + {{0, 1, IGNORE, IGNORE, IGNORE}}, + {{0, 1, IGNORE, IGNORE, IGNORE, IGNORE}}, + // Upmixes from 3-channel + {{0, 1, 2, IGNORE}}, + {{0, 1, 2, IGNORE, IGNORE}}, + {{0, 1, 2, IGNORE, IGNORE, IGNORE}}, + // Upmixes from quad + {{0, 1, 2, 3, IGNORE}}, + {{0, 1, IGNORE, IGNORE, 2, 3}}, + // Upmixes from 5-channel + {{0, 1, 2, 3, 4, IGNORE}}}; + +/** + * Given an array of input channel data, and an output channel count, + * replaces the array with an array of upmixed channels. + * This shuffles the array and may set some channel buffers to aZeroChannel. + * Don't call this with input count >= output count. + * This may return *more* channels than requested. In that case, downmixing + * is required to to get to aOutputChannelCount. (This is how we handle + * odd cases like 3 -> 4 upmixing.) + * If aChannelArray.Length() was the input to one of a series of + * GetAudioChannelsSuperset calls resulting in aOutputChannelCount, + * no downmixing will be required. + */ +template +void AudioChannelsUpMix(nsTArray* aChannelArray, + uint32_t aOutputChannelCount, const T* aZeroChannel) { + uint32_t inputChannelCount = aChannelArray->Length(); + uint32_t outputChannelCount = + GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount); + NS_ASSERTION(outputChannelCount > inputChannelCount, "No up-mix needed"); + MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels"); + MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels"); + + aChannelArray->SetLength(outputChannelCount); + + if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS && + outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) { + const UpMixMatrix& m = + gUpMixMatrices[gMixingMatrixIndexByChannels[inputChannelCount - 1] + + outputChannelCount - inputChannelCount - 1]; + + const T* outputChannels[CUSTOM_CHANNEL_LAYOUTS]; + + for (uint32_t i = 0; i < outputChannelCount; ++i) { + uint8_t channelIndex = m.mInputDestination[i]; + if (channelIndex == IGNORE) { + outputChannels[i] = aZeroChannel; + } else { + outputChannels[i] = aChannelArray->ElementAt(channelIndex); + } + } + for (uint32_t i = 0; i < outputChannelCount; ++i) { + aChannelArray->ElementAt(i) = outputChannels[i]; + } + return; + } + + for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) { + aChannelArray->ElementAt(i) = aZeroChannel; + } +} + +} // namespace mozilla + +#endif /* MOZILLA_AUDIOCHANNELFORMAT_H_ */ diff --git a/dom/media/AudioCompactor.cpp b/dom/media/AudioCompactor.cpp new file mode 100644 index 0000000000..54e723d55e --- /dev/null +++ b/dom/media/AudioCompactor.cpp @@ -0,0 +1,65 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#include "AudioCompactor.h" +#if defined(MOZ_MEMORY) +# include "mozmemory.h" +#endif + +namespace mozilla { + +static size_t MallocGoodSize(size_t aSize) { +#if defined(MOZ_MEMORY) + return malloc_good_size(aSize); +#else + return aSize; +#endif +} + +static size_t TooMuchSlop(size_t aSize, size_t aAllocSize, size_t aMaxSlop) { + // If the allocated size is less then our target size, then we + // are chunking. This means it will be completely filled with + // zero slop. + size_t slop = (aAllocSize > aSize) ? (aAllocSize - aSize) : 0; + return slop > aMaxSlop; +} + +uint32_t AudioCompactor::GetChunkSamples(uint32_t aFrames, uint32_t aChannels, + size_t aMaxSlop) { + size_t size = AudioDataSize(aFrames, aChannels); + size_t chunkSize = MallocGoodSize(size); + + // Reduce the chunk size until we meet our slop goal or the chunk + // approaches an unreasonably small size. + while (chunkSize > 64 && TooMuchSlop(size, chunkSize, aMaxSlop)) { + chunkSize = MallocGoodSize(chunkSize / 2); + } + + // Calculate the number of samples based on expected malloc size + // in order to allow as many frames as possible to be packed. + return chunkSize / sizeof(AudioDataValue); +} + +uint32_t AudioCompactor::NativeCopy::operator()(AudioDataValue* aBuffer, + uint32_t aSamples) { + NS_ASSERTION(aBuffer, "cannot copy to null buffer pointer"); + NS_ASSERTION(aSamples, "cannot copy zero values"); + + size_t bufferBytes = aSamples * sizeof(AudioDataValue); + size_t maxBytes = std::min(bufferBytes, mSourceBytes - mNextByte); + uint32_t frames = maxBytes / BytesPerFrame(mChannels); + size_t bytes = frames * BytesPerFrame(mChannels); + + NS_ASSERTION((mNextByte + bytes) <= mSourceBytes, + "tried to copy beyond source buffer"); + NS_ASSERTION(bytes <= bufferBytes, "tried to copy beyond destination buffer"); + + memcpy(aBuffer, mSource + mNextByte, bytes); + + mNextByte += bytes; + return frames; +} + +} // namespace mozilla diff --git a/dom/media/AudioCompactor.h b/dom/media/AudioCompactor.h new file mode 100644 index 0000000000..8281686977 --- /dev/null +++ b/dom/media/AudioCompactor.h @@ -0,0 +1,128 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(AudioCompactor_h) +# define AudioCompactor_h + +# include "MediaQueue.h" +# include "MediaData.h" +# include "VideoUtils.h" + +namespace mozilla { + +class AudioCompactor { + public: + explicit AudioCompactor(MediaQueue& aQueue) : mQueue(aQueue) { + // Determine padding size used by AlignedBuffer. + size_t paddedSize = AlignedAudioBuffer::AlignmentPaddingSize(); + mSamplesPadding = paddedSize / sizeof(AudioDataValue); + if (mSamplesPadding * sizeof(AudioDataValue) < paddedSize) { + // Round up. + mSamplesPadding++; + } + } + + // Push audio data into the underlying queue with minimal heap allocation + // slop. This method is responsible for allocating AudioDataValue[] buffers. + // The caller must provide a functor to copy the data into the buffers. The + // functor must provide the following signature: + // + // uint32_t operator()(AudioDataValue *aBuffer, uint32_t aSamples); + // + // The functor must copy as many complete frames as possible to the provided + // buffer given its length (in AudioDataValue elements). The number of frames + // copied must be returned. This copy functor must support being called + // multiple times in order to copy the audio data fully. The copy functor + // must copy full frames as partial frames will be ignored. + template + bool Push(int64_t aOffset, int64_t aTime, int32_t aSampleRate, + uint32_t aFrames, uint32_t aChannels, CopyFunc aCopyFunc) { + auto time = media::TimeUnit::FromMicroseconds(aTime); + + // If we are losing more than a reasonable amount to padding, try to chunk + // the data. + size_t maxSlop = AudioDataSize(aFrames, aChannels) / MAX_SLOP_DIVISOR; + + while (aFrames > 0) { + uint32_t samples = GetChunkSamples(aFrames, aChannels, maxSlop); + if (samples / aChannels > mSamplesPadding / aChannels + 1) { + samples -= mSamplesPadding; + } + AlignedAudioBuffer buffer(samples); + if (!buffer) { + return false; + } + + // Copy audio data to buffer using caller-provided functor. + uint32_t framesCopied = aCopyFunc(buffer.get(), samples); + + NS_ASSERTION(framesCopied <= aFrames, "functor copied too many frames"); + buffer.SetLength(size_t(framesCopied) * aChannels); + + auto duration = media::TimeUnit(framesCopied, aSampleRate); + if (!duration.IsValid()) { + return false; + } + + RefPtr data = new AudioData(aOffset, time, std::move(buffer), + aChannels, aSampleRate); + MOZ_DIAGNOSTIC_ASSERT(duration == data->mDuration, "must be equal"); + mQueue.Push(data); + + // Remove the frames we just pushed into the queue and loop if there is + // more to be done. + time += duration; + aFrames -= framesCopied; + + // NOTE: No need to update aOffset as its only an approximation anyway. + } + + return true; + } + + // Copy functor suitable for copying audio samples already in the + // AudioDataValue format/layout expected by AudioStream on this platform. + class NativeCopy { + public: + NativeCopy(const uint8_t* aSource, size_t aSourceBytes, uint32_t aChannels) + : mSource(aSource), + mSourceBytes(aSourceBytes), + mChannels(aChannels), + mNextByte(0) {} + + uint32_t operator()(AudioDataValue* aBuffer, uint32_t aSamples); + + private: + const uint8_t* const mSource; + const size_t mSourceBytes; + const uint32_t mChannels; + size_t mNextByte; + }; + + // Allow 12.5% slop before chunking kicks in. Public so that the gtest can + // access it. + static const size_t MAX_SLOP_DIVISOR = 8; + + private: + // Compute the number of AudioDataValue samples that will be fit the most + // frames while keeping heap allocation slop less than the given threshold. + static uint32_t GetChunkSamples(uint32_t aFrames, uint32_t aChannels, + size_t aMaxSlop); + + static size_t BytesPerFrame(uint32_t aChannels) { + return sizeof(AudioDataValue) * aChannels; + } + + static size_t AudioDataSize(uint32_t aFrames, uint32_t aChannels) { + return aFrames * BytesPerFrame(aChannels); + } + + MediaQueue& mQueue; + size_t mSamplesPadding; +}; + +} // namespace mozilla + +#endif // AudioCompactor_h diff --git a/dom/media/AudioConfig.cpp b/dom/media/AudioConfig.cpp new file mode 100644 index 0000000000..b23d96f8fa --- /dev/null +++ b/dom/media/AudioConfig.cpp @@ -0,0 +1,385 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioConfig.h" +#include "nsString.h" +#include + +namespace mozilla { + +using ChannelLayout = AudioConfig::ChannelLayout; + +/** + * AudioConfig::ChannelLayout + */ + +/* + SMPTE channel layout (also known as wave order) + DUAL-MONO L R + DUAL-MONO-LFE L R LFE + MONO M + MONO-LFE M LFE + STEREO L R + STEREO-LFE L R LFE + 3F L R C + 3F-LFE L R C LFE + 2F1 L R S + 2F1-LFE L R LFE S + 3F1 L R C S + 3F1-LFE L R C LFE S + 2F2 L R LS RS + 2F2-LFE L R LFE LS RS + 3F2 L R C LS RS + 3F2-LFE L R C LFE LS RS + 3F3R-LFE L R C LFE BC LS RS + 3F4-LFE L R C LFE Rls Rrs LS RS +*/ + +void AudioConfig::ChannelLayout::UpdateChannelMap() { + mValid = mChannels.Length() <= MAX_CHANNELS; + mChannelMap = UNKNOWN_MAP; + if (mValid) { + mChannelMap = Map(); + mValid = mChannelMap > 0; + } +} + +auto AudioConfig::ChannelLayout::Map() const -> ChannelMap { + if (mChannelMap != UNKNOWN_MAP) { + return mChannelMap; + } + if (mChannels.Length() > MAX_CHANNELS) { + return UNKNOWN_MAP; + } + ChannelMap map = UNKNOWN_MAP; + for (size_t i = 0; i < mChannels.Length(); i++) { + if (uint32_t(mChannels[i]) > sizeof(ChannelMap) * 8) { + return UNKNOWN_MAP; + } + ChannelMap mask = 1 << mChannels[i]; + if (mChannels[i] == CHANNEL_INVALID || (mChannelMap & mask)) { + // Invalid configuration. + return UNKNOWN_MAP; + } + map |= mask; + } + return map; +} + +const AudioConfig::Channel* +AudioConfig::ChannelLayout::DefaultLayoutForChannels(uint32_t aChannels) const { + switch (aChannels) { + case 1: // MONO + { + static const Channel config[] = {CHANNEL_FRONT_CENTER}; + return config; + } + case 2: // STEREO + { + static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT}; + return config; + } + case 3: // 3F + { + static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER}; + return config; + } + case 4: // QUAD + { + static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT}; + return config; + } + case 5: // 3F2 + { + static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_SIDE_LEFT, + CHANNEL_SIDE_RIGHT}; + return config; + } + case 6: // 3F2-LFE + { + static const Channel config[] = { + CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, + CHANNEL_LFE, CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT}; + return config; + } + case 7: // 3F3R-LFE + { + static const Channel config[] = { + CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, + CHANNEL_LFE, CHANNEL_BACK_CENTER, CHANNEL_SIDE_LEFT, + CHANNEL_SIDE_RIGHT}; + return config; + } + case 8: // 3F4-LFE + { + static const Channel config[] = { + CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, + CHANNEL_LFE, CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT, + CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT}; + return config; + } + default: + return nullptr; + } +} + +/* static */ AudioConfig::ChannelLayout +AudioConfig::ChannelLayout::SMPTEDefault(const ChannelLayout& aChannelLayout) { + if (!aChannelLayout.IsValid()) { + return aChannelLayout; + } + return SMPTEDefault(aChannelLayout.Map()); +} + +/* static */ +ChannelLayout AudioConfig::ChannelLayout::SMPTEDefault(ChannelMap aMap) { + // First handle the most common cases. + switch (aMap) { + case LMONO_MAP: + return ChannelLayout{CHANNEL_FRONT_CENTER}; + case LSTEREO_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT}; + case L3F_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER}; + case L3F_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_LFE}; + case L2F1_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_BACK_CENTER}; + case L2F1_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_LFE, + CHANNEL_BACK_CENTER}; + case L3F1_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_BACK_CENTER}; + case L3F1_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_LFE, + CHANNEL_BACK_CENTER}; + case L2F2_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT}; + case L2F2_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_LFE, + CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT}; + case LQUAD_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT}; + case LQUAD_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_LFE, + CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT}; + case L3F2_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_SIDE_LEFT, + CHANNEL_SIDE_RIGHT}; + case L3F2_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_LFE, + CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT}; + case L3F2_BACK_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_BACK_LEFT, + CHANNEL_BACK_RIGHT}; + case L3F2_BACK_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_LFE, + CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT}; + case L3F3R_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_LFE, + CHANNEL_BACK_CENTER, CHANNEL_SIDE_LEFT, + CHANNEL_SIDE_RIGHT}; + case L3F4_LFE_MAP: + return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, CHANNEL_LFE, + CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT, + CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT}; + default: + break; + } + + static_assert(MAX_CHANNELS <= sizeof(ChannelMap) * 8, + "Must be able to fit channels on bit mask"); + AutoTArray layout; + uint32_t channels = 0; + + uint32_t i = 0; + while (aMap) { + if (aMap & 1) { + channels++; + if (channels > MAX_CHANNELS) { + return ChannelLayout(); + } + layout.AppendElement(static_cast(i)); + } + aMap >>= 1; + i++; + } + return ChannelLayout(channels, layout.Elements()); +} + +nsCString AudioConfig::ChannelLayout::ChannelMapToString( + const ChannelMap aChannelMap) { + nsCString rv; + + constexpr const std::array CHANNEL_NAME = {"Front left", + "Front right", + "Front center", + "Low frequency", + "Back left", + "Back right", + "Front left of center", + "Front right of center", + "Back center", + "Side left", + "Side right", + "Top center", + "Top front left", + "Top front center", + "Top front right", + "Top back left", + "Top back center", + "Top back right"}; + + rv.AppendPrintf("0x%08x", aChannelMap); + rv.Append("["); + bool empty = true; + for (size_t i = 0; i < CHANNEL_NAME.size(); i++) { + if (aChannelMap & (1 << i)) { + if (!empty) { + rv.Append("|"); + } + empty = false; + rv.Append(CHANNEL_NAME[i]); + } + } + rv.Append("]"); + + return rv; +} + +bool AudioConfig::ChannelLayout::MappingTable(const ChannelLayout& aOther, + nsTArray* aMap) const { + if (!IsValid() || !aOther.IsValid() || Map() != aOther.Map()) { + if (aMap) { + aMap->SetLength(0); + } + return false; + } + if (!aMap) { + return true; + } + aMap->SetLength(Count()); + for (uint32_t i = 0; i < Count(); i++) { + for (uint32_t j = 0; j < Count(); j++) { + if (aOther[j] == mChannels[i]) { + (*aMap)[j] = i; + break; + } + } + } + return true; +} + +/** + * AudioConfig::ChannelConfig + */ + +/* static */ const char* AudioConfig::FormatToString( + AudioConfig::SampleFormat aFormat) { + switch (aFormat) { + case FORMAT_U8: + return "unsigned 8 bit"; + case FORMAT_S16: + return "signed 16 bit"; + case FORMAT_S24: + return "signed 24 bit MSB"; + case FORMAT_S24LSB: + return "signed 24 bit LSB"; + case FORMAT_S32: + return "signed 32 bit"; + case FORMAT_FLT: + return "32 bit floating point"; + case FORMAT_NONE: + return "none"; + default: + return "unknown"; + } +} +/* static */ +uint32_t AudioConfig::SampleSize(AudioConfig::SampleFormat aFormat) { + switch (aFormat) { + case FORMAT_U8: + return 1; + case FORMAT_S16: + return 2; + case FORMAT_S24: + [[fallthrough]]; + case FORMAT_S24LSB: + [[fallthrough]]; + case FORMAT_S32: + [[fallthrough]]; + case FORMAT_FLT: + return 4; + case FORMAT_NONE: + default: + return 0; + } +} + +/* static */ +uint32_t AudioConfig::FormatToBits(AudioConfig::SampleFormat aFormat) { + switch (aFormat) { + case FORMAT_U8: + return 8; + case FORMAT_S16: + return 16; + case FORMAT_S24LSB: + [[fallthrough]]; + case FORMAT_S24: + return 24; + case FORMAT_S32: + [[fallthrough]]; + case FORMAT_FLT: + return 32; + case FORMAT_NONE: + [[fallthrough]]; + default: + return 0; + } +} + +AudioConfig::AudioConfig(const ChannelLayout& aChannelLayout, uint32_t aRate, + AudioConfig::SampleFormat aFormat, bool aInterleaved) + : mChannelLayout(aChannelLayout), + mChannels(aChannelLayout.Count()), + mRate(aRate), + mFormat(aFormat), + mInterleaved(aInterleaved) {} + +AudioConfig::AudioConfig(const ChannelLayout& aChannelLayout, + uint32_t aChannels, uint32_t aRate, + AudioConfig::SampleFormat aFormat, bool aInterleaved) + : mChannelLayout(aChannelLayout), + mChannels(aChannels), + mRate(aRate), + mFormat(aFormat), + mInterleaved(aInterleaved) {} + +AudioConfig::AudioConfig(uint32_t aChannels, uint32_t aRate, + AudioConfig::SampleFormat aFormat, bool aInterleaved) + : mChannelLayout(aChannels), + mChannels(aChannels), + mRate(aRate), + mFormat(aFormat), + mInterleaved(aInterleaved) {} + +} // namespace mozilla diff --git a/dom/media/AudioConfig.h b/dom/media/AudioConfig.h new file mode 100644 index 0000000000..e31aa55d2c --- /dev/null +++ b/dom/media/AudioConfig.h @@ -0,0 +1,274 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(AudioLayout_h) +# define AudioLayout_h + +# include +# include +# include "mozilla/MathAlgorithms.h" +# include "nsTArray.h" +# include "cubeb/cubeb.h" + +namespace mozilla { + +class AudioConfig { + public: + // Channel definition is conveniently defined to be in the same order as + // WAVEFORMAT && SMPTE, even though this is unused for now. + enum Channel { + CHANNEL_INVALID = -1, + CHANNEL_FRONT_LEFT = 0, + CHANNEL_FRONT_RIGHT, + CHANNEL_FRONT_CENTER, + CHANNEL_LFE, + CHANNEL_BACK_LEFT, + CHANNEL_BACK_RIGHT, + CHANNEL_FRONT_LEFT_OF_CENTER, + CHANNEL_FRONT_RIGHT_OF_CENTER, + CHANNEL_BACK_CENTER, + CHANNEL_SIDE_LEFT, + CHANNEL_SIDE_RIGHT, + // From WAVEFORMAT definition. + CHANNEL_TOP_CENTER, + CHANNEL_TOP_FRONT_LEFT, + CHANNEL_TOP_FRONT_CENTER, + CHANNEL_TOP_FRONT_RIGHT, + CHANNEL_TOP_BACK_LEFT, + CHANNEL_TOP_BACK_CENTER, + CHANNEL_TOP_BACK_RIGHT + }; + + class ChannelLayout { + public: + // The maximum number of channels a channel map can represent. + static constexpr uint32_t MAX_CHANNELS = 32; + + using ChannelMap = uint32_t; + + ChannelLayout() : mChannelMap(UNKNOWN_MAP), mValid(false) {} + explicit ChannelLayout(uint32_t aChannels) + : ChannelLayout(aChannels, DefaultLayoutForChannels(aChannels)) {} + ChannelLayout(uint32_t aChannels, const Channel* aConfig) + : ChannelLayout() { + if (aChannels == 0 || !aConfig) { + return; + } + mChannels.AppendElements(aConfig, aChannels); + UpdateChannelMap(); + } + explicit ChannelLayout(std::initializer_list aChannelList) + : ChannelLayout(aChannelList.size(), aChannelList.begin()) {} + bool operator==(const ChannelLayout& aOther) const { + return mChannels == aOther.mChannels; + } + bool operator!=(const ChannelLayout& aOther) const { + return mChannels != aOther.mChannels; + } + const Channel& operator[](uint32_t aIndex) const { + MOZ_ASSERT(mChannels.Length() > aIndex); + return mChannels[aIndex]; + } + uint32_t Count() const { return mChannels.Length(); } + ChannelMap Map() const; + + // Calculate the mapping table from the current layout to aOther such that + // one can easily go from one layout to the other by doing: + // out[channel] = in[map[channel]]. + // Returns true if the reordering is possible or false otherwise. + // If true, then aMap, if set, will be updated to contain the mapping table + // allowing conversion from the current layout to aOther. + // If aMap is empty, then MappingTable can be used to simply determine if + // the current layout can be easily reordered to aOther. + bool MappingTable(const ChannelLayout& aOther, + nsTArray* aMap = nullptr) const; + bool IsValid() const { return mValid; } + bool HasChannel(Channel aChannel) const { + return mChannelMap & (1 << aChannel); + } + // Return the number of channels found in this ChannelMap. + static uint32_t Channels(ChannelMap aMap) { + static_assert(sizeof(ChannelMap) == sizeof(uint32_t), + "Must adjust ChannelMap type"); + return CountPopulation32(aMap); + } + + static ChannelLayout SMPTEDefault(const ChannelLayout& aChannelLayout); + static ChannelLayout SMPTEDefault(ChannelMap aMap); + // Convert a channel map to a human readable string for debugging purposes. + static nsCString ChannelMapToString(const ChannelMap aChannelMap); + + static constexpr ChannelMap UNKNOWN_MAP = 0; + + // Common channel layout definitions. + static constexpr ChannelMap LMONO_MAP = 1 << CHANNEL_FRONT_CENTER; + static constexpr ChannelMap LMONO_LFE_MAP = + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_LFE; + static constexpr ChannelMap LSTEREO_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT; + static constexpr ChannelMap LSTEREO_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | 1 << CHANNEL_LFE; + static constexpr ChannelMap L3F_MAP = 1 << CHANNEL_FRONT_LEFT | + 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER; + static constexpr ChannelMap L3F_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_LFE; + static constexpr ChannelMap L2F1_MAP = 1 << CHANNEL_FRONT_LEFT | + 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_BACK_CENTER; + static constexpr ChannelMap L2F1_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | 1 << CHANNEL_LFE | + 1 << CHANNEL_BACK_CENTER; + static constexpr ChannelMap L3F1_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_BACK_CENTER; + static constexpr ChannelMap LSURROUND_MAP = L3F1_MAP; + static constexpr ChannelMap L3F1_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_LFE | 1 << CHANNEL_BACK_CENTER; + static constexpr ChannelMap L2F2_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_SIDE_LEFT | 1 << CHANNEL_SIDE_RIGHT; + static constexpr ChannelMap L2F2_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | 1 << CHANNEL_LFE | + 1 << CHANNEL_SIDE_LEFT | 1 << CHANNEL_SIDE_RIGHT; + static constexpr ChannelMap LQUAD_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_BACK_LEFT | 1 << CHANNEL_BACK_RIGHT; + static constexpr ChannelMap LQUAD_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | 1 << CHANNEL_LFE | + 1 << CHANNEL_BACK_LEFT | 1 << CHANNEL_BACK_RIGHT; + static constexpr ChannelMap L3F2_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_SIDE_LEFT | + 1 << CHANNEL_SIDE_RIGHT; + static constexpr ChannelMap L3F2_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_LFE | 1 << CHANNEL_SIDE_LEFT | + 1 << CHANNEL_SIDE_RIGHT; + // 3F2_LFE Alias + static constexpr ChannelMap L5POINT1_SURROUND_MAP = L3F2_LFE_MAP; + static constexpr ChannelMap L3F2_BACK_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_BACK_LEFT | + 1 << CHANNEL_BACK_RIGHT; + static constexpr ChannelMap L3F2_BACK_LFE_MAP = + L3F2_BACK_MAP | 1 << CHANNEL_LFE; + static constexpr ChannelMap L3F3R_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_LFE | + 1 << CHANNEL_BACK_CENTER | 1 << CHANNEL_SIDE_LEFT | + 1 << CHANNEL_SIDE_RIGHT; + static ChannelLayout L3F4_LFE; + static constexpr ChannelMap L3F4_LFE_MAP = + 1 << CHANNEL_FRONT_LEFT | 1 << CHANNEL_FRONT_RIGHT | + 1 << CHANNEL_FRONT_CENTER | 1 << CHANNEL_LFE | 1 << CHANNEL_BACK_LEFT | + 1 << CHANNEL_BACK_RIGHT | 1 << CHANNEL_SIDE_LEFT | + 1 << CHANNEL_SIDE_RIGHT; + // 3F4_LFE Alias + static ChannelLayout L7POINT1_SURROUND; + static constexpr ChannelMap L7POINT1_SURROUND_MAP = L3F4_LFE_MAP; + + // Statically check that we can static_cast a Gecko ChannelLayout to a + // cubeb_channel_layout. + static_assert(CUBEB_LAYOUT_UNDEFINED == UNKNOWN_MAP); + static_assert(CUBEB_LAYOUT_MONO == LMONO_MAP); + static_assert(CUBEB_LAYOUT_MONO_LFE == LMONO_LFE_MAP); + static_assert(CUBEB_LAYOUT_STEREO == LSTEREO_MAP); + static_assert(CUBEB_LAYOUT_STEREO_LFE == LSTEREO_LFE_MAP); + static_assert(CUBEB_LAYOUT_3F == L3F_MAP); + static_assert(CUBEB_LAYOUT_3F_LFE == L3F_LFE_MAP); + static_assert(CUBEB_LAYOUT_2F1 == L2F1_MAP); + static_assert(CUBEB_LAYOUT_2F1_LFE == L2F1_LFE_MAP); + static_assert(CUBEB_LAYOUT_3F1 == L3F1_MAP); + static_assert(CUBEB_LAYOUT_3F1_LFE == L3F1_LFE_MAP); + static_assert(CUBEB_LAYOUT_2F2 == L2F2_MAP); + static_assert(CUBEB_LAYOUT_3F2_LFE == L3F2_LFE_MAP); + static_assert(CUBEB_LAYOUT_QUAD == LQUAD_MAP); + static_assert(CUBEB_LAYOUT_QUAD_LFE == LQUAD_LFE_MAP); + static_assert(CUBEB_LAYOUT_3F2 == L3F2_MAP); + static_assert(CUBEB_LAYOUT_3F2_LFE == L3F2_LFE_MAP); + static_assert(CUBEB_LAYOUT_3F2_BACK == L3F2_BACK_MAP); + static_assert(CUBEB_LAYOUT_3F2_LFE_BACK == L3F2_BACK_LFE_MAP); + static_assert(CUBEB_LAYOUT_3F3R_LFE == L3F3R_LFE_MAP); + static_assert(CUBEB_LAYOUT_3F4_LFE == L3F4_LFE_MAP); + + private: + void UpdateChannelMap(); + const Channel* DefaultLayoutForChannels(uint32_t aChannels) const; + CopyableAutoTArray mChannels; + ChannelMap mChannelMap; + bool mValid; + }; + + enum SampleFormat { + FORMAT_NONE = 0, + FORMAT_U8, + FORMAT_S16, + FORMAT_S24LSB, + FORMAT_S24, + FORMAT_S32, + FORMAT_FLT, + FORMAT_DEFAULT = FORMAT_FLT + }; + + AudioConfig(const ChannelLayout& aChannelLayout, uint32_t aRate, + AudioConfig::SampleFormat aFormat = FORMAT_DEFAULT, + bool aInterleaved = true); + AudioConfig(const ChannelLayout& aChannelLayout, uint32_t aChannels, + uint32_t aRate, + AudioConfig::SampleFormat aFormat = FORMAT_DEFAULT, + bool aInterleaved = true); + // Will create a channel configuration from default SMPTE ordering. + AudioConfig(uint32_t aChannels, uint32_t aRate, + AudioConfig::SampleFormat aFormat = FORMAT_DEFAULT, + bool aInterleaved = true); + + const ChannelLayout& Layout() const { return mChannelLayout; } + uint32_t Channels() const { + if (!mChannelLayout.IsValid()) { + return mChannels; + } + return mChannelLayout.Count(); + } + uint32_t Rate() const { return mRate; } + SampleFormat Format() const { return mFormat; } + bool Interleaved() const { return mInterleaved; } + bool operator==(const AudioConfig& aOther) const { + return mChannelLayout == aOther.mChannelLayout && mRate == aOther.mRate && + mFormat == aOther.mFormat && mInterleaved == aOther.mInterleaved; + } + bool operator!=(const AudioConfig& aOther) const { + return !(*this == aOther); + } + + bool IsValid() const { + return mChannelLayout.IsValid() && Format() != FORMAT_NONE && Rate() > 0; + } + + static const char* FormatToString(SampleFormat aFormat); + static uint32_t SampleSize(SampleFormat aFormat); + static uint32_t FormatToBits(SampleFormat aFormat); + + private: + // Channels configuration. + ChannelLayout mChannelLayout; + + // Channel count. + uint32_t mChannels; + + // Sample rate. + uint32_t mRate; + + // Sample format. + SampleFormat mFormat; + + bool mInterleaved; +}; + +} // namespace mozilla + +#endif // AudioLayout_h diff --git a/dom/media/AudioConverter.cpp b/dom/media/AudioConverter.cpp new file mode 100644 index 0000000000..9f2c32ceed --- /dev/null +++ b/dom/media/AudioConverter.cpp @@ -0,0 +1,485 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioConverter.h" +#include +#include +#include + +/* + * Parts derived from MythTV AudioConvert Class + * Created by Jean-Yves Avenard. + * + * Copyright (C) Bubblestuff Pty Ltd 2013 + * Copyright (C) foobum@gmail.com 2010 + */ + +namespace mozilla { + +AudioConverter::AudioConverter(const AudioConfig& aIn, const AudioConfig& aOut) + : mIn(aIn), mOut(aOut), mResampler(nullptr) { + MOZ_DIAGNOSTIC_ASSERT(CanConvert(aIn, aOut), + "The conversion is not supported"); + mIn.Layout().MappingTable(mOut.Layout(), &mChannelOrderMap); + if (aIn.Rate() != aOut.Rate()) { + RecreateResampler(); + } +} + +AudioConverter::~AudioConverter() { + if (mResampler) { + speex_resampler_destroy(mResampler); + mResampler = nullptr; + } +} + +bool AudioConverter::CanConvert(const AudioConfig& aIn, + const AudioConfig& aOut) { + if (aIn.Format() != aOut.Format() || + aIn.Interleaved() != aOut.Interleaved()) { + NS_WARNING("No format conversion is supported at this stage"); + return false; + } + if (aIn.Channels() != aOut.Channels() && aOut.Channels() > 2) { + NS_WARNING( + "Only down/upmixing to mono or stereo is supported at this stage"); + return false; + } + if (!aOut.Interleaved()) { + NS_WARNING("planar audio format not supported"); + return false; + } + return true; +} + +bool AudioConverter::CanWorkInPlace() const { + bool needDownmix = mIn.Channels() > mOut.Channels(); + bool needUpmix = mIn.Channels() < mOut.Channels(); + bool canDownmixInPlace = + mIn.Channels() * AudioConfig::SampleSize(mIn.Format()) >= + mOut.Channels() * AudioConfig::SampleSize(mOut.Format()); + bool needResample = mIn.Rate() != mOut.Rate(); + bool canResampleInPlace = mIn.Rate() >= mOut.Rate(); + // We should be able to work in place if 1s of audio input takes less space + // than 1s of audio output. However, as we downmix before resampling we can't + // perform any upsampling in place (e.g. if incoming rate >= outgoing rate) + return !needUpmix && (!needDownmix || canDownmixInPlace) && + (!needResample || canResampleInPlace); +} + +size_t AudioConverter::ProcessInternal(void* aOut, const void* aIn, + size_t aFrames) { + if (!aFrames) { + return 0; + } + + if (mIn.Channels() > mOut.Channels()) { + return DownmixAudio(aOut, aIn, aFrames); + } + + if (mIn.Channels() < mOut.Channels()) { + return UpmixAudio(aOut, aIn, aFrames); + } + + if (mIn.Layout() != mOut.Layout() && CanReorderAudio()) { + ReOrderInterleavedChannels(aOut, aIn, aFrames); + } else if (aIn != aOut) { + memmove(aOut, aIn, FramesOutToBytes(aFrames)); + } + return aFrames; +} + +// Reorder interleaved channels. +// Can work in place (e.g aOut == aIn). +template +void _ReOrderInterleavedChannels(AudioDataType* aOut, const AudioDataType* aIn, + uint32_t aFrames, uint32_t aChannels, + const uint8_t* aChannelOrderMap) { + MOZ_DIAGNOSTIC_ASSERT(aChannels <= AudioConfig::ChannelLayout::MAX_CHANNELS); + AudioDataType val[AudioConfig::ChannelLayout::MAX_CHANNELS]; + for (uint32_t i = 0; i < aFrames; i++) { + for (uint32_t j = 0; j < aChannels; j++) { + val[j] = aIn[aChannelOrderMap[j]]; + } + for (uint32_t j = 0; j < aChannels; j++) { + aOut[j] = val[j]; + } + aOut += aChannels; + aIn += aChannels; + } +} + +void AudioConverter::ReOrderInterleavedChannels(void* aOut, const void* aIn, + size_t aFrames) const { + MOZ_DIAGNOSTIC_ASSERT(mIn.Channels() == mOut.Channels()); + MOZ_DIAGNOSTIC_ASSERT(CanReorderAudio()); + + if (mChannelOrderMap.IsEmpty() || mOut.Channels() == 1 || + mOut.Layout() == mIn.Layout()) { + // If channel count is 1, planar and non-planar formats are the same or + // there's nothing to reorder, or if we don't know how to re-order. + if (aOut != aIn) { + memmove(aOut, aIn, FramesOutToBytes(aFrames)); + } + return; + } + + uint32_t bits = AudioConfig::FormatToBits(mOut.Format()); + switch (bits) { + case 8: + _ReOrderInterleavedChannels((uint8_t*)aOut, (const uint8_t*)aIn, aFrames, + mIn.Channels(), mChannelOrderMap.Elements()); + break; + case 16: + _ReOrderInterleavedChannels((int16_t*)aOut, (const int16_t*)aIn, aFrames, + mIn.Channels(), mChannelOrderMap.Elements()); + break; + default: + MOZ_DIAGNOSTIC_ASSERT(AudioConfig::SampleSize(mOut.Format()) == 4); + _ReOrderInterleavedChannels((int32_t*)aOut, (const int32_t*)aIn, aFrames, + mIn.Channels(), mChannelOrderMap.Elements()); + break; + } +} + +static inline int16_t clipTo15(int32_t aX) { + return aX < -32768 ? -32768 : aX <= 32767 ? aX : 32767; +} + +template +static void dumbUpDownMix(TYPE* aOut, int32_t aOutChannels, const TYPE* aIn, + int32_t aInChannels, int32_t aFrames) { + if (aIn == aOut) { + return; + } + int32_t commonChannels = std::min(aInChannels, aOutChannels); + + for (int32_t i = 0; i < aFrames; i++) { + for (int32_t j = 0; j < commonChannels; j++) { + aOut[i * aOutChannels + j] = aIn[i * aInChannels + j]; + } + if (aOutChannels > aInChannels) { + for (int32_t j = 0; j < aInChannels - aOutChannels; j++) { + aOut[i * aOutChannels + j] = 0; + } + } + } +} + +size_t AudioConverter::DownmixAudio(void* aOut, const void* aIn, + size_t aFrames) const { + MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 || + mIn.Format() == AudioConfig::FORMAT_FLT); + MOZ_DIAGNOSTIC_ASSERT(mIn.Channels() >= mOut.Channels()); + MOZ_DIAGNOSTIC_ASSERT(mOut.Layout() == AudioConfig::ChannelLayout(2) || + mOut.Layout() == AudioConfig::ChannelLayout(1)); + + uint32_t inChannels = mIn.Channels(); + uint32_t outChannels = mOut.Channels(); + + if (inChannels == outChannels) { + if (aOut != aIn) { + memmove(aOut, aIn, FramesOutToBytes(aFrames)); + } + return aFrames; + } + + if (!mIn.Layout().IsValid() || !mOut.Layout().IsValid()) { + // Dumb copy dropping extra channels. + if (mIn.Format() == AudioConfig::FORMAT_FLT) { + dumbUpDownMix(static_cast(aOut), outChannels, + static_cast(aIn), inChannels, aFrames); + } else if (mIn.Format() == AudioConfig::FORMAT_S16) { + dumbUpDownMix(static_cast(aOut), outChannels, + static_cast(aIn), inChannels, aFrames); + } else { + MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); + } + return aFrames; + } + + MOZ_ASSERT( + mIn.Layout() == AudioConfig::ChannelLayout::SMPTEDefault(mIn.Layout()), + "Can only downmix input data in SMPTE layout"); + if (inChannels > 2) { + if (mIn.Format() == AudioConfig::FORMAT_FLT) { + // Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows + // 5-8. + static const float dmatrix[6][8][2] = { + /*3*/ {{0.5858f, 0}, {0, 0.5858f}, {0.4142f, 0.4142f}}, + /*4*/ + {{0.4226f, 0}, {0, 0.4226f}, {0.366f, 0.2114f}, {0.2114f, 0.366f}}, + /*5*/ + {{0.6510f, 0}, + {0, 0.6510f}, + {0.4600f, 0.4600f}, + {0.5636f, 0.3254f}, + {0.3254f, 0.5636f}}, + /*6*/ + {{0.5290f, 0}, + {0, 0.5290f}, + {0.3741f, 0.3741f}, + {0.3741f, 0.3741f}, + {0.4582f, 0.2645f}, + {0.2645f, 0.4582f}}, + /*7*/ + {{0.4553f, 0}, + {0, 0.4553f}, + {0.3220f, 0.3220f}, + {0.3220f, 0.3220f}, + {0.2788f, 0.2788f}, + {0.3943f, 0.2277f}, + {0.2277f, 0.3943f}}, + /*8*/ + {{0.3886f, 0}, + {0, 0.3886f}, + {0.2748f, 0.2748f}, + {0.2748f, 0.2748f}, + {0.3366f, 0.1943f}, + {0.1943f, 0.3366f}, + {0.3366f, 0.1943f}, + {0.1943f, 0.3366f}}, + }; + // Re-write the buffer with downmixed data + const float* in = static_cast(aIn); + float* out = static_cast(aOut); + for (uint32_t i = 0; i < aFrames; i++) { + float sampL = 0.0; + float sampR = 0.0; + for (uint32_t j = 0; j < inChannels; j++) { + sampL += in[i * inChannels + j] * dmatrix[inChannels - 3][j][0]; + sampR += in[i * inChannels + j] * dmatrix[inChannels - 3][j][1]; + } + if (outChannels == 2) { + *out++ = sampL; + *out++ = sampR; + } else { + *out++ = (sampL + sampR) * 0.5; + } + } + } else if (mIn.Format() == AudioConfig::FORMAT_S16) { + // Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows + // 5-8. Coefficients in Q14. + static const int16_t dmatrix[6][8][2] = { + /*3*/ {{9598, 0}, {0, 9598}, {6786, 6786}}, + /*4*/ {{6925, 0}, {0, 6925}, {5997, 3462}, {3462, 5997}}, + /*5*/ + {{10663, 0}, {0, 10663}, {7540, 7540}, {9234, 5331}, {5331, 9234}}, + /*6*/ + {{8668, 0}, + {0, 8668}, + {6129, 6129}, + {6129, 6129}, + {7507, 4335}, + {4335, 7507}}, + /*7*/ + {{7459, 0}, + {0, 7459}, + {5275, 5275}, + {5275, 5275}, + {4568, 4568}, + {6460, 3731}, + {3731, 6460}}, + /*8*/ + {{6368, 0}, + {0, 6368}, + {4502, 4502}, + {4502, 4502}, + {5514, 3184}, + {3184, 5514}, + {5514, 3184}, + {3184, 5514}}}; + // Re-write the buffer with downmixed data + const int16_t* in = static_cast(aIn); + int16_t* out = static_cast(aOut); + for (uint32_t i = 0; i < aFrames; i++) { + int32_t sampL = 0; + int32_t sampR = 0; + for (uint32_t j = 0; j < inChannels; j++) { + sampL += in[i * inChannels + j] * dmatrix[inChannels - 3][j][0]; + sampR += in[i * inChannels + j] * dmatrix[inChannels - 3][j][1]; + } + sampL = clipTo15((sampL + 8192) >> 14); + sampR = clipTo15((sampR + 8192) >> 14); + if (outChannels == 2) { + *out++ = sampL; + *out++ = sampR; + } else { + *out++ = (sampL + sampR) * 0.5; + } + } + } else { + MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); + } + return aFrames; + } + + MOZ_DIAGNOSTIC_ASSERT(inChannels == 2 && outChannels == 1); + if (mIn.Format() == AudioConfig::FORMAT_FLT) { + const float* in = static_cast(aIn); + float* out = static_cast(aOut); + for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { + float sample = 0.0; + // The sample of the buffer would be interleaved. + sample = (in[fIdx * inChannels] + in[fIdx * inChannels + 1]) * 0.5; + *out++ = sample; + } + } else if (mIn.Format() == AudioConfig::FORMAT_S16) { + const int16_t* in = static_cast(aIn); + int16_t* out = static_cast(aOut); + for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { + int32_t sample = 0.0; + // The sample of the buffer would be interleaved. + sample = (in[fIdx * inChannels] + in[fIdx * inChannels + 1]) * 0.5; + *out++ = sample; + } + } else { + MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); + } + return aFrames; +} + +size_t AudioConverter::ResampleAudio(void* aOut, const void* aIn, + size_t aFrames) { + if (!mResampler) { + return 0; + } + uint32_t outframes = ResampleRecipientFrames(aFrames); + uint32_t inframes = aFrames; + + int error; + if (mOut.Format() == AudioConfig::FORMAT_FLT) { + const float* in = reinterpret_cast(aIn); + float* out = reinterpret_cast(aOut); + error = speex_resampler_process_interleaved_float(mResampler, in, &inframes, + out, &outframes); + } else if (mOut.Format() == AudioConfig::FORMAT_S16) { + const int16_t* in = reinterpret_cast(aIn); + int16_t* out = reinterpret_cast(aOut); + error = speex_resampler_process_interleaved_int(mResampler, in, &inframes, + out, &outframes); + } else { + MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); + error = RESAMPLER_ERR_ALLOC_FAILED; + } + MOZ_ASSERT(error == RESAMPLER_ERR_SUCCESS); + if (error != RESAMPLER_ERR_SUCCESS) { + speex_resampler_destroy(mResampler); + mResampler = nullptr; + return 0; + } + MOZ_ASSERT(inframes == aFrames, "Some frames will be dropped"); + return outframes; +} + +void AudioConverter::RecreateResampler() { + if (mResampler) { + speex_resampler_destroy(mResampler); + } + int error; + mResampler = speex_resampler_init(mOut.Channels(), mIn.Rate(), mOut.Rate(), + SPEEX_RESAMPLER_QUALITY_DEFAULT, &error); + + if (error == RESAMPLER_ERR_SUCCESS) { + speex_resampler_skip_zeros(mResampler); + } else { + NS_WARNING("Failed to initialize resampler."); + mResampler = nullptr; + } +} + +size_t AudioConverter::DrainResampler(void* aOut) { + if (!mResampler) { + return 0; + } + int frames = speex_resampler_get_input_latency(mResampler); + AlignedByteBuffer buffer(FramesOutToBytes(frames)); + if (!buffer) { + // OOM + return 0; + } + frames = ResampleAudio(aOut, buffer.Data(), frames); + // Tore down the resampler as it's easier than handling follow-up. + RecreateResampler(); + return frames; +} + +size_t AudioConverter::UpmixAudio(void* aOut, const void* aIn, + size_t aFrames) const { + MOZ_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 || + mIn.Format() == AudioConfig::FORMAT_FLT); + MOZ_ASSERT(mIn.Channels() < mOut.Channels()); + MOZ_ASSERT(mIn.Channels() == 1, "Can only upmix mono for now"); + MOZ_ASSERT(mOut.Channels() == 2, "Can only upmix to stereo for now"); + + if (!mIn.Layout().IsValid() || !mOut.Layout().IsValid() || + mOut.Channels() != 2) { + // Dumb copy the channels and insert silence for the extra channels. + if (mIn.Format() == AudioConfig::FORMAT_FLT) { + dumbUpDownMix(static_cast(aOut), mOut.Channels(), + static_cast(aIn), mIn.Channels(), aFrames); + } else if (mIn.Format() == AudioConfig::FORMAT_S16) { + dumbUpDownMix(static_cast(aOut), mOut.Channels(), + static_cast(aIn), mIn.Channels(), aFrames); + } else { + MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); + } + return aFrames; + } + + // Upmix mono to stereo. + // This is a very dumb mono to stereo upmixing, power levels are preserved + // following the calculation: left = right = -3dB*mono. + if (mIn.Format() == AudioConfig::FORMAT_FLT) { + const float m3db = std::sqrt(0.5); // -3dB = sqrt(1/2) + const float* in = static_cast(aIn); + float* out = static_cast(aOut); + for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { + float sample = in[fIdx] * m3db; + // The samples of the buffer would be interleaved. + *out++ = sample; + *out++ = sample; + } + } else if (mIn.Format() == AudioConfig::FORMAT_S16) { + const int16_t* in = static_cast(aIn); + int16_t* out = static_cast(aOut); + for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { + int16_t sample = + ((int32_t)in[fIdx] * 11585) >> 14; // close enough to i*sqrt(0.5) + // The samples of the buffer would be interleaved. + *out++ = sample; + *out++ = sample; + } + } else { + MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); + } + + return aFrames; +} + +size_t AudioConverter::ResampleRecipientFrames(size_t aFrames) const { + if (!aFrames && mIn.Rate() != mOut.Rate()) { + if (!mResampler) { + return 0; + } + // We drain by pushing in get_input_latency() samples of 0 + aFrames = speex_resampler_get_input_latency(mResampler); + } + return (uint64_t)aFrames * mOut.Rate() / mIn.Rate() + 1; +} + +size_t AudioConverter::FramesOutToSamples(size_t aFrames) const { + return aFrames * mOut.Channels(); +} + +size_t AudioConverter::SamplesInToFrames(size_t aSamples) const { + return aSamples / mIn.Channels(); +} + +size_t AudioConverter::FramesOutToBytes(size_t aFrames) const { + return FramesOutToSamples(aFrames) * AudioConfig::SampleSize(mOut.Format()); +} +} // namespace mozilla diff --git a/dom/media/AudioConverter.h b/dom/media/AudioConverter.h new file mode 100644 index 0000000000..0ace580b26 --- /dev/null +++ b/dom/media/AudioConverter.h @@ -0,0 +1,277 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(AudioConverter_h) +# define AudioConverter_h + +# include "MediaInfo.h" + +// Forward declaration +typedef struct SpeexResamplerState_ SpeexResamplerState; + +namespace mozilla { + +template +struct AudioDataBufferTypeChooser; +template <> +struct AudioDataBufferTypeChooser { + typedef uint8_t Type; +}; +template <> +struct AudioDataBufferTypeChooser { + typedef int16_t Type; +}; +template <> +struct AudioDataBufferTypeChooser { + typedef int32_t Type; +}; +template <> +struct AudioDataBufferTypeChooser { + typedef int32_t Type; +}; +template <> +struct AudioDataBufferTypeChooser { + typedef int32_t Type; +}; +template <> +struct AudioDataBufferTypeChooser { + typedef float Type; +}; + +// 'Value' is the type used externally to deal with stored value. +// AudioDataBuffer can perform conversion between different SampleFormat +// content. +template ::Type> +class AudioDataBuffer { + public: + AudioDataBuffer() = default; + AudioDataBuffer(Value* aBuffer, size_t aLength) : mBuffer(aBuffer, aLength) {} + explicit AudioDataBuffer(const AudioDataBuffer& aOther) + : mBuffer(aOther.mBuffer) {} + AudioDataBuffer(AudioDataBuffer&& aOther) + : mBuffer(std::move(aOther.mBuffer)) {} + template + explicit AudioDataBuffer( + const AudioDataBuffer& other) { + // TODO: Convert from different type, may use asm routines. + MOZ_CRASH("Conversion not implemented yet"); + } + + // A u8, s16 and float aligned buffer can only be treated as + // FORMAT_U8, FORMAT_S16 and FORMAT_FLT respectively. + // So allow them as copy and move constructors. + explicit AudioDataBuffer(const AlignedByteBuffer& aBuffer) + : mBuffer(aBuffer) { + static_assert(Format == AudioConfig::FORMAT_U8, + "Conversion not implemented yet"); + } + explicit AudioDataBuffer(const AlignedShortBuffer& aBuffer) + : mBuffer(aBuffer) { + static_assert(Format == AudioConfig::FORMAT_S16, + "Conversion not implemented yet"); + } + explicit AudioDataBuffer(const AlignedFloatBuffer& aBuffer) + : mBuffer(aBuffer) { + static_assert(Format == AudioConfig::FORMAT_FLT, + "Conversion not implemented yet"); + } + explicit AudioDataBuffer(AlignedByteBuffer&& aBuffer) + : mBuffer(std::move(aBuffer)) { + static_assert(Format == AudioConfig::FORMAT_U8, + "Conversion not implemented yet"); + } + explicit AudioDataBuffer(AlignedShortBuffer&& aBuffer) + : mBuffer(std::move(aBuffer)) { + static_assert(Format == AudioConfig::FORMAT_S16, + "Conversion not implemented yet"); + } + explicit AudioDataBuffer(AlignedFloatBuffer&& aBuffer) + : mBuffer(std::move(aBuffer)) { + static_assert(Format == AudioConfig::FORMAT_FLT, + "Conversion not implemented yet"); + } + AudioDataBuffer& operator=(AudioDataBuffer&& aOther) { + mBuffer = std::move(aOther.mBuffer); + return *this; + } + AudioDataBuffer& operator=(const AudioDataBuffer& aOther) { + mBuffer = aOther.mBuffer; + return *this; + } + + Value* Data() const { return mBuffer.Data(); } + size_t Length() const { return mBuffer.Length(); } + size_t Size() const { return mBuffer.Size(); } + AlignedBuffer Forget() { + // Correct type -> Just give values as-is. + return std::move(mBuffer); + } + + private: + AlignedBuffer mBuffer; +}; + +typedef AudioDataBuffer AudioSampleBuffer; + +class AudioConverter { + public: + AudioConverter(const AudioConfig& aIn, const AudioConfig& aOut); + ~AudioConverter(); + + // Convert the AudioDataBuffer. + // Conversion will be done in place if possible. Otherwise a new buffer will + // be returned. + // Providing an empty buffer and resampling is expected, the resampler + // will be drained. + template + AudioDataBuffer Process( + AudioDataBuffer&& aBuffer) { + MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == mOut.Format() && + mIn.Format() == Format); + AudioDataBuffer buffer = std::move(aBuffer); + if (CanWorkInPlace()) { + AlignedBuffer temp = buffer.Forget(); + Process(temp, temp.Data(), SamplesInToFrames(temp.Length())); + return AudioDataBuffer(std::move(temp)); + ; + } + return Process(buffer); + } + + template + AudioDataBuffer Process( + const AudioDataBuffer& aBuffer) { + MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == mOut.Format() && + mIn.Format() == Format); + // Perform the downmixing / reordering in temporary buffer. + size_t frames = SamplesInToFrames(aBuffer.Length()); + AlignedBuffer temp1; + if (!temp1.SetLength(FramesOutToSamples(frames))) { + return AudioDataBuffer(std::move(temp1)); + } + frames = ProcessInternal(temp1.Data(), aBuffer.Data(), frames); + if (mIn.Rate() == mOut.Rate()) { + MOZ_ALWAYS_TRUE(temp1.SetLength(FramesOutToSamples(frames))); + return AudioDataBuffer(std::move(temp1)); + } + + // At this point, temp1 contains the buffer reordered and downmixed. + // If we are downsampling we can re-use it. + AlignedBuffer* outputBuffer = &temp1; + AlignedBuffer temp2; + if (!frames || mOut.Rate() > mIn.Rate()) { + // We are upsampling or about to drain, we can't work in place. + // Allocate another temporary buffer where the upsampling will occur. + if (!temp2.SetLength( + FramesOutToSamples(ResampleRecipientFrames(frames)))) { + return AudioDataBuffer(std::move(temp2)); + } + outputBuffer = &temp2; + } + if (!frames) { + frames = DrainResampler(outputBuffer->Data()); + } else { + frames = ResampleAudio(outputBuffer->Data(), temp1.Data(), frames); + } + MOZ_ALWAYS_TRUE(outputBuffer->SetLength(FramesOutToSamples(frames))); + return AudioDataBuffer(std::move(*outputBuffer)); + } + + // Attempt to convert the AudioDataBuffer in place. + // Will return 0 if the conversion wasn't possible. + template + size_t Process(Value* aBuffer, size_t aFrames) { + MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == mOut.Format()); + if (!CanWorkInPlace()) { + return 0; + } + size_t frames = ProcessInternal(aBuffer, aBuffer, aFrames); + if (frames && mIn.Rate() != mOut.Rate()) { + frames = ResampleAudio(aBuffer, aBuffer, aFrames); + } + return frames; + } + + template + size_t Process(AlignedBuffer& aOutBuffer, const Value* aInBuffer, + size_t aFrames) { + MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == mOut.Format()); + MOZ_ASSERT((aFrames && aInBuffer) || !aFrames); + // Up/down mixing first + if (!aOutBuffer.SetLength(FramesOutToSamples(aFrames))) { + MOZ_ALWAYS_TRUE(aOutBuffer.SetLength(0)); + return 0; + } + size_t frames = ProcessInternal(aOutBuffer.Data(), aInBuffer, aFrames); + MOZ_ASSERT(frames == aFrames); + // Check if resampling is needed + if (mIn.Rate() == mOut.Rate()) { + return frames; + } + // Prepare output in cases of drain or up-sampling + if ((!frames || mOut.Rate() > mIn.Rate()) && + !aOutBuffer.SetLength( + FramesOutToSamples(ResampleRecipientFrames(frames)))) { + MOZ_ALWAYS_TRUE(aOutBuffer.SetLength(0)); + return 0; + } + if (!frames) { + frames = DrainResampler(aOutBuffer.Data()); + } else { + frames = ResampleAudio(aOutBuffer.Data(), aInBuffer, frames); + } + // Update with the actual buffer length + MOZ_ALWAYS_TRUE(aOutBuffer.SetLength(FramesOutToSamples(frames))); + return frames; + } + + bool CanWorkInPlace() const; + bool CanReorderAudio() const { + return mIn.Layout().MappingTable(mOut.Layout()); + } + static bool CanConvert(const AudioConfig& aIn, const AudioConfig& aOut); + + const AudioConfig& InputConfig() const { return mIn; } + const AudioConfig& OutputConfig() const { return mOut; } + + private: + const AudioConfig mIn; + const AudioConfig mOut; + // mChannelOrderMap will be empty if we do not know how to proceed with this + // channel layout. + AutoTArray + mChannelOrderMap; + /** + * ProcessInternal + * Parameters: + * aOut : destination buffer where converted samples will be copied + * aIn : source buffer + * aSamples: number of frames in source buffer + * + * Return Value: number of frames converted or 0 if error + */ + size_t ProcessInternal(void* aOut, const void* aIn, size_t aFrames); + void ReOrderInterleavedChannels(void* aOut, const void* aIn, + size_t aFrames) const; + size_t DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const; + size_t UpmixAudio(void* aOut, const void* aIn, size_t aFrames) const; + + size_t FramesOutToSamples(size_t aFrames) const; + size_t SamplesInToFrames(size_t aSamples) const; + size_t FramesOutToBytes(size_t aFrames) const; + + // Resampler context. + SpeexResamplerState* mResampler; + size_t ResampleAudio(void* aOut, const void* aIn, size_t aFrames); + size_t ResampleRecipientFrames(size_t aFrames) const; + void RecreateResampler(); + size_t DrainResampler(void* aOut); +}; + +} // namespace mozilla + +#endif /* AudioConverter_h */ diff --git a/dom/media/AudioDeviceInfo.cpp b/dom/media/AudioDeviceInfo.cpp new file mode 100644 index 0000000000..b37efafa32 --- /dev/null +++ b/dom/media/AudioDeviceInfo.cpp @@ -0,0 +1,165 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioDeviceInfo.h" + +NS_IMPL_ISUPPORTS(AudioDeviceInfo, nsIAudioDeviceInfo) + +using namespace mozilla; +using namespace mozilla::CubebUtils; + +AudioDeviceInfo::AudioDeviceInfo(cubeb_device_info* aInfo) + : AudioDeviceInfo(aInfo->devid, NS_ConvertUTF8toUTF16(aInfo->friendly_name), + NS_ConvertUTF8toUTF16(aInfo->group_id), + NS_ConvertUTF8toUTF16(aInfo->vendor_name), aInfo->type, + aInfo->state, aInfo->preferred, aInfo->format, + aInfo->default_format, aInfo->max_channels, + aInfo->default_rate, aInfo->max_rate, aInfo->min_rate, + aInfo->latency_lo, aInfo->latency_hi) {} + +AudioDeviceInfo::AudioDeviceInfo( + AudioDeviceID aID, const nsAString& aName, const nsAString& aGroupId, + const nsAString& aVendor, uint16_t aType, uint16_t aState, + uint16_t aPreferred, uint16_t aSupportedFormat, uint16_t aDefaultFormat, + uint32_t aMaxChannels, uint32_t aDefaultRate, uint32_t aMaxRate, + uint32_t aMinRate, uint32_t aMaxLatency, uint32_t aMinLatency) + : mDeviceId(aID), + mName(aName), + mGroupId(aGroupId), + mVendor(aVendor), + mType(aType), + mState(aState), + mPreferred(aPreferred), + mSupportedFormat(aSupportedFormat), + mDefaultFormat(aDefaultFormat), + mMaxChannels(aMaxChannels), + mDefaultRate(aDefaultRate), + mMaxRate(aMaxRate), + mMinRate(aMinRate), + mMaxLatency(aMaxLatency), + mMinLatency(aMinLatency) { + MOZ_ASSERT( + mType == TYPE_UNKNOWN || mType == TYPE_INPUT || mType == TYPE_OUTPUT, + "Wrong type"); + MOZ_ASSERT(mState == STATE_DISABLED || mState == STATE_UNPLUGGED || + mState == STATE_ENABLED, + "Wrong state"); + MOZ_ASSERT( + mPreferred == PREF_NONE || mPreferred == PREF_ALL || + mPreferred & (PREF_MULTIMEDIA | PREF_VOICE | PREF_NOTIFICATION), + "Wrong preferred value"); + MOZ_ASSERT(mSupportedFormat & (FMT_S16LE | FMT_S16BE | FMT_F32LE | FMT_F32BE), + "Wrong supported format"); + MOZ_ASSERT(mDefaultFormat == FMT_S16LE || mDefaultFormat == FMT_S16BE || + mDefaultFormat == FMT_F32LE || mDefaultFormat == FMT_F32BE, + "Wrong default format"); +} + +AudioDeviceID AudioDeviceInfo::DeviceID() const { return mDeviceId; } +const nsString& AudioDeviceInfo::Name() const { return mName; } +uint32_t AudioDeviceInfo::MaxChannels() const { return mMaxChannels; } +uint32_t AudioDeviceInfo::Type() const { return mType; } +uint32_t AudioDeviceInfo::State() const { return mState; } +const nsString& AudioDeviceInfo::GroupID() const { return mGroupId; } + +bool AudioDeviceInfo::Preferred() const { return mPreferred; } + +/* readonly attribute DOMString name; */ +NS_IMETHODIMP +AudioDeviceInfo::GetName(nsAString& aName) { + aName = mName; + return NS_OK; +} + +/* readonly attribute DOMString groupId; */ +NS_IMETHODIMP +AudioDeviceInfo::GetGroupId(nsAString& aGroupId) { + aGroupId = mGroupId; + return NS_OK; +} + +/* readonly attribute DOMString vendor; */ +NS_IMETHODIMP +AudioDeviceInfo::GetVendor(nsAString& aVendor) { + aVendor = mVendor; + return NS_OK; +} + +/* readonly attribute unsigned short type; */ +NS_IMETHODIMP +AudioDeviceInfo::GetType(uint16_t* aType) { + *aType = mType; + return NS_OK; +} + +/* readonly attribute unsigned short state; */ +NS_IMETHODIMP +AudioDeviceInfo::GetState(uint16_t* aState) { + *aState = mState; + return NS_OK; +} + +/* readonly attribute unsigned short preferred; */ +NS_IMETHODIMP +AudioDeviceInfo::GetPreferred(uint16_t* aPreferred) { + *aPreferred = mPreferred; + return NS_OK; +} + +/* readonly attribute unsigned short supportedFormat; */ +NS_IMETHODIMP +AudioDeviceInfo::GetSupportedFormat(uint16_t* aSupportedFormat) { + *aSupportedFormat = mSupportedFormat; + return NS_OK; +} + +/* readonly attribute unsigned short defaultFormat; */ +NS_IMETHODIMP +AudioDeviceInfo::GetDefaultFormat(uint16_t* aDefaultFormat) { + *aDefaultFormat = mDefaultFormat; + return NS_OK; +} + +/* readonly attribute unsigned long maxChannels; */ +NS_IMETHODIMP +AudioDeviceInfo::GetMaxChannels(uint32_t* aMaxChannels) { + *aMaxChannels = mMaxChannels; + return NS_OK; +} + +/* readonly attribute unsigned long defaultRate; */ +NS_IMETHODIMP +AudioDeviceInfo::GetDefaultRate(uint32_t* aDefaultRate) { + *aDefaultRate = mDefaultRate; + return NS_OK; +} + +/* readonly attribute unsigned long maxRate; */ +NS_IMETHODIMP +AudioDeviceInfo::GetMaxRate(uint32_t* aMaxRate) { + *aMaxRate = mMaxRate; + return NS_OK; +} + +/* readonly attribute unsigned long minRate; */ +NS_IMETHODIMP +AudioDeviceInfo::GetMinRate(uint32_t* aMinRate) { + *aMinRate = mMinRate; + return NS_OK; +} + +/* readonly attribute unsigned long maxLatency; */ +NS_IMETHODIMP +AudioDeviceInfo::GetMaxLatency(uint32_t* aMaxLatency) { + *aMaxLatency = mMaxLatency; + return NS_OK; +} + +/* readonly attribute unsigned long minLatency; */ +NS_IMETHODIMP +AudioDeviceInfo::GetMinLatency(uint32_t* aMinLatency) { + *aMinLatency = mMinLatency; + return NS_OK; +} diff --git a/dom/media/AudioDeviceInfo.h b/dom/media/AudioDeviceInfo.h new file mode 100644 index 0000000000..f08f8681c5 --- /dev/null +++ b/dom/media/AudioDeviceInfo.h @@ -0,0 +1,59 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AudioDeviceInfo_H +#define MOZILLA_AudioDeviceInfo_H + +#include "nsIAudioDeviceInfo.h" +#include "CubebUtils.h" +#include "mozilla/Maybe.h" + +// This is mapped to the cubeb_device_info. +class AudioDeviceInfo final : public nsIAudioDeviceInfo { + public: + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSIAUDIODEVICEINFO + + using AudioDeviceID = mozilla::CubebUtils::AudioDeviceID; + + AudioDeviceInfo(const AudioDeviceID aID, const nsAString& aName, + const nsAString& aGroupId, const nsAString& aVendor, + uint16_t aType, uint16_t aState, uint16_t aPreferred, + uint16_t aSupportedFormat, uint16_t aDefaultFormat, + uint32_t aMaxChannels, uint32_t aDefaultRate, + uint32_t aMaxRate, uint32_t aMinRate, uint32_t aMaxLatency, + uint32_t aMinLatency); + explicit AudioDeviceInfo(cubeb_device_info* aInfo); + + AudioDeviceID DeviceID() const; + const nsString& Name() const; + uint32_t DefaultRate() const { return mDefaultRate; } + uint32_t MaxChannels() const; + uint32_t Type() const; + uint32_t State() const; + const nsString& GroupID() const; + bool Preferred() const; + + private: + virtual ~AudioDeviceInfo() = default; + + const AudioDeviceID mDeviceId; + const nsString mName; + const nsString mGroupId; + const nsString mVendor; + const uint16_t mType; + const uint16_t mState; + const uint16_t mPreferred; + const uint16_t mSupportedFormat; + const uint16_t mDefaultFormat; + const uint32_t mMaxChannels; + const uint32_t mDefaultRate; + const uint32_t mMaxRate; + const uint32_t mMinRate; + const uint32_t mMaxLatency; + const uint32_t mMinLatency; +}; + +#endif // MOZILLA_AudioDeviceInfo_H diff --git a/dom/media/AudioInputSource.cpp b/dom/media/AudioInputSource.cpp new file mode 100644 index 0000000000..1ba2d81938 --- /dev/null +++ b/dom/media/AudioInputSource.cpp @@ -0,0 +1,241 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "AudioInputSource.h" + +#include "CallbackThreadRegistry.h" +#include "GraphDriver.h" +#include "Tracing.h" + +namespace mozilla { + +extern mozilla::LazyLogModule gMediaTrackGraphLog; + +#ifdef LOG_INTERNAL +# undef LOG_INTERNAL +#endif // LOG_INTERNAL +#define LOG_INTERNAL(level, msg, ...) \ + MOZ_LOG(gMediaTrackGraphLog, LogLevel::level, (msg, ##__VA_ARGS__)) + +#ifdef LOG +# undef LOG +#endif // LOG +#define LOG(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__) + +#ifdef LOGW +# undef LOGW +#endif // LOGW +#define LOGW(msg, ...) LOG_INTERNAL(Warning, msg, ##__VA_ARGS__) + +#ifdef LOGE +# undef LOGE +#endif // LOGE +#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__) + +#ifdef LOGV +# undef LOGV +#endif // LOGV +#define LOGV(msg, ...) LOG_INTERNAL(Verbose, msg, ##__VA_ARGS__) + +AudioInputSource::AudioInputSource(RefPtr&& aListener, + Id aSourceId, + CubebUtils::AudioDeviceID aDeviceId, + uint32_t aChannelCount, bool aIsVoice, + const PrincipalHandle& aPrincipalHandle, + TrackRate aSourceRate, TrackRate aTargetRate) + : mId(aSourceId), + mDeviceId(aDeviceId), + mChannelCount(aChannelCount), + mRate(aSourceRate), + mIsVoice(aIsVoice), + mPrincipalHandle(aPrincipalHandle), + mSandboxed(CubebUtils::SandboxEnabled()), + mAudioThreadId(ProfilerThreadId{}), + mEventListener(std::move(aListener)), + mTaskThread(CUBEB_TASK_THREAD), + mDriftCorrector(static_cast(aSourceRate), + static_cast(aTargetRate), aPrincipalHandle) { + MOZ_ASSERT(mChannelCount > 0); + MOZ_ASSERT(mEventListener); +} + +void AudioInputSource::Start() { + // This is called on MediaTrackGraph's graph thread, which can be the cubeb + // stream's callback thread. Running cubeb operations within cubeb stream + // callback thread can cause the deadlock on Linux, so we dispatch those + // operations to the task thread. + MOZ_ASSERT(mTaskThread); + + LOG("AudioInputSource %p, start", this); + MOZ_ALWAYS_SUCCEEDS(mTaskThread->Dispatch( + NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() mutable { + self->mStream = CubebInputStream::Create( + self->mDeviceId, self->mChannelCount, + static_cast(self->mRate), self->mIsVoice, self.get()); + if (!self->mStream) { + LOGE("AudioInputSource %p, cannot create an audio input stream!", + self.get()); + return; + } + + if (uint32_t latency = 0; + self->mStream->Latency(&latency) == CUBEB_OK) { + Data data(LatencyChangeData{media::TimeUnit(latency, self->mRate)}); + if (self->mSPSCQueue.Enqueue(data) == 0) { + LOGE("AudioInputSource %p, failed to enqueue latency change", + self.get()); + } + } + if (int r = self->mStream->Start(); r != CUBEB_OK) { + LOGE( + "AudioInputSource %p, cannot start its audio input stream! The " + "stream is destroyed directly!", + self.get()); + self->mStream = nullptr; + } + }))); +} + +void AudioInputSource::Stop() { + // This is called on MediaTrackGraph's graph thread, which can be the cubeb + // stream's callback thread. Running cubeb operations within cubeb stream + // callback thread can cause the deadlock on Linux, so we dispatch those + // operations to the task thread. + MOZ_ASSERT(mTaskThread); + + LOG("AudioInputSource %p, stop", this); + MOZ_ALWAYS_SUCCEEDS(mTaskThread->Dispatch( + NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() mutable { + if (!self->mStream) { + LOGE("AudioInputSource %p, has no audio input stream to stop!", + self.get()); + return; + } + if (int r = self->mStream->Stop(); r != CUBEB_OK) { + LOGE( + "AudioInputSource %p, cannot stop its audio input stream! The " + "stream is going to be destroyed forcefully", + self.get()); + } + self->mStream = nullptr; + }))); +} + +AudioSegment AudioInputSource::GetAudioSegment(TrackTime aDuration, + Consumer aConsumer) { + if (aConsumer == Consumer::Changed) { + // Reset queue's consumer thread to acquire its mReadIndex on the new + // thread. + mSPSCQueue.ResetConsumerThreadId(); + } + + AudioSegment raw; + Maybe latency; + while (mSPSCQueue.AvailableRead()) { + Data data; + DebugOnly reads = mSPSCQueue.Dequeue(&data, 1); + MOZ_ASSERT(reads); + MOZ_ASSERT(!data.is()); + if (data.is()) { + raw.AppendAndConsumeChunk(std::move(data.as())); + } else if (data.is()) { + latency = Some(data.as().mLatency); + } + } + + if (latency) { + mDriftCorrector.SetSourceLatency(*latency); + } + return mDriftCorrector.RequestFrames(raw, static_cast(aDuration)); +} + +long AudioInputSource::DataCallback(const void* aBuffer, long aFrames) { + TRACE_AUDIO_CALLBACK_BUDGET("AudioInputSource real-time budget", aFrames, + mRate); + TRACE("AudioInputSource::DataCallback"); + + const AudioDataValue* source = + reinterpret_cast(aBuffer); + + AudioChunk c = AudioChunk::FromInterleavedBuffer( + source, static_cast(aFrames), mChannelCount, mPrincipalHandle); + + // Reset queue's producer to avoid hitting the assertion for checking the + // consistency of mSPSCQueue's mProducerId in Enqueue. This can happen when: + // 1) cubeb stream is reinitialized behind the scenes for the device changed + // events, e.g., users plug/unplug a TRRS mic into/from the built-in jack port + // of some old macbooks. + // 2) After Start() to Stop() cycle finishes, user call Start() again. + if (CheckThreadIdChanged()) { + mSPSCQueue.ResetProducerThreadId(); + if (!mSandboxed) { + CallbackThreadRegistry::Get()->Register(mAudioThreadId, + "NativeAudioCallback"); + } + } + + Data data(c); + int writes = mSPSCQueue.Enqueue(data); + if (writes == 0) { + LOGW("AudioInputSource %p, buffer is full. Dropping %ld frames", this, + aFrames); + } else { + LOGV("AudioInputSource %p, enqueue %ld frames (%d AudioChunks)", this, + aFrames, writes); + } + return aFrames; +} + +void AudioInputSource::StateCallback(cubeb_state aState) { + EventListener::State state; + if (aState == CUBEB_STATE_STARTED) { + LOG("AudioInputSource %p, stream started", this); + state = EventListener::State::Started; + } else if (aState == CUBEB_STATE_STOPPED) { + LOG("AudioInputSource %p, stream stopped", this); + state = EventListener::State::Stopped; + } else if (aState == CUBEB_STATE_DRAINED) { + LOG("AudioInputSource %p, stream is drained", this); + state = EventListener::State::Drained; + } else { + MOZ_ASSERT(aState == CUBEB_STATE_ERROR); + LOG("AudioInputSource %p, error happend", this); + state = EventListener::State::Error; + } + // This can be called on any thread, so we forward the event to main thread + // first. + NS_DispatchToMainThread( + NS_NewRunnableFunction(__func__, [self = RefPtr(this), s = state] { + self->mEventListener->AudioStateCallback(self->mId, s); + })); +} + +void AudioInputSource::DeviceChangedCallback() { + LOG("AudioInputSource %p, device changed", this); + // This can be called on any thread, so we forward the event to main thread + // first. + NS_DispatchToMainThread( + NS_NewRunnableFunction(__func__, [self = RefPtr(this)] { + self->mEventListener->AudioDeviceChanged(self->mId); + })); +} + +bool AudioInputSource::CheckThreadIdChanged() { + ProfilerThreadId id = profiler_current_thread_id(); + if (id != mAudioThreadId) { + mAudioThreadId = id; + return true; + } + return false; +} + +#undef LOG_INTERNAL +#undef LOG +#undef LOGW +#undef LOGE +#undef LOGV + +} // namespace mozilla diff --git a/dom/media/AudioInputSource.h b/dom/media/AudioInputSource.h new file mode 100644 index 0000000000..b44a3ae43a --- /dev/null +++ b/dom/media/AudioInputSource.h @@ -0,0 +1,141 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_AudioInputSource_H_ +#define DOM_MEDIA_AudioInputSource_H_ + +#include "AudioDriftCorrection.h" +#include "AudioSegment.h" +#include "CubebInputStream.h" +#include "CubebUtils.h" +#include "TimeUnits.h" +#include "mozilla/ProfilerUtils.h" +#include "mozilla/RefPtr.h" +#include "mozilla/SPSCQueue.h" +#include "mozilla/SharedThreadPool.h" +#include "mozilla/Variant.h" + +namespace mozilla { + +// This is an interface to operate an input-only audio stream within a +// cubeb-task thread on a specific thread. Once the class instance is created, +// all its operations must be called on the same thread. +// +// The audio data is periodically produced by the underlying audio stream on the +// stream's callback thread, and can be safely read by GetAudioSegment() on a +// specific thread. +class AudioInputSource : public CubebInputStream::Listener { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInputSource, override); + + using Id = uint32_t; + + class EventListener { + public: + NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING; + + // This two events will be fired on main thread. + virtual void AudioDeviceChanged(Id aId) = 0; + enum class State { Started, Stopped, Drained, Error }; + virtual void AudioStateCallback(Id aId, State aState) = 0; + + protected: + EventListener() = default; + virtual ~EventListener() = default; + }; + + AudioInputSource(RefPtr&& aListener, Id aSourceId, + CubebUtils::AudioDeviceID aDeviceId, uint32_t aChannelCount, + bool aIsVoice, const PrincipalHandle& aPrincipalHandle, + TrackRate aSourceRate, TrackRate aTargetRate); + + // The following functions should always be called in the same thread: They + // are always run on MediaTrackGraph's graph thread. + // Starts producing audio data. + void Start(); + // Stops producing audio data. + void Stop(); + // Returns the AudioSegment with aDuration of data inside. + // The graph thread can change behind the scene, e.g., cubeb stream reinit due + // to default output device changed). When this happens, we need to notify + // mSPSCQueue to change its data consumer. + enum class Consumer { Same, Changed }; + AudioSegment GetAudioSegment(TrackTime aDuration, Consumer aConsumer); + + // CubebInputStream::Listener interface: These are used only for the + // underlying audio stream. No user should call these APIs. + // This will be fired on audio callback thread. + long DataCallback(const void* aBuffer, long aFrames) override; + // This can be fired on any thread. + void StateCallback(cubeb_state aState) override; + // This can be fired on any thread. + void DeviceChangedCallback() override; + + // Any threads: + // The unique id of this source. + const Id mId; + // The id of this audio device producing the data. + const CubebUtils::AudioDeviceID mDeviceId; + // The channel count of audio data produced. + const uint32_t mChannelCount; + // The sample rate of the audio data produced. + const TrackRate mRate; + // Indicate whether the audio stream is for voice or not. + const bool mIsVoice; + // The principal of the audio data produced. + const PrincipalHandle mPrincipalHandle; + + protected: + ~AudioInputSource() = default; + + private: + // Underlying audio thread only. + bool CheckThreadIdChanged(); + + // Any thread. + const bool mSandboxed; + + // Thread id of the underlying audio thread. Underlying audio thread only. + std::atomic mAudioThreadId; + + // Forward the underlying event from main thread. + const RefPtr mEventListener; + + // Shared thread pool containing only one thread for cubeb operations. + // The cubeb operations: Start() and Stop() will be called on + // MediaTrackGraph's graph thread, which can be the cubeb stream's callback + // thread. Running cubeb operations within cubeb stream callback thread can + // cause the deadlock on Linux, so we dispatch those operations to the task + // thread. + const RefPtr mTaskThread; + + // Correct the drift between the underlying audio stream and its reader. + AudioDriftCorrection mDriftCorrector; + + // An input-only cubeb stream operated within mTaskThread. + UniquePtr mStream; + + struct Empty {}; + + struct LatencyChangeData { + media::TimeUnit mLatency; + }; + + struct Data : public Variant { + Data() : Variant(AsVariant(Empty())) {} + explicit Data(AudioChunk aChunk) : Variant(AsVariant(std::move(aChunk))) {} + explicit Data(LatencyChangeData aLatencyChangeData) + : Variant(AsVariant(std::move(aLatencyChangeData))) {} + }; + + // A single-producer-single-consumer lock-free queue whose data is produced by + // the audio callback thread and consumed by AudioInputSource's data reader. + SPSCQueue mSPSCQueue{30}; +}; + +} // namespace mozilla + +#endif // DOM_MEDIA_AudioInputSource_H_ diff --git a/dom/media/AudioMixer.h b/dom/media/AudioMixer.h new file mode 100644 index 0000000000..bd8c02a828 --- /dev/null +++ b/dom/media/AudioMixer.h @@ -0,0 +1,112 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIOMIXER_H_ +#define MOZILLA_AUDIOMIXER_H_ + +#include "AudioSampleFormat.h" +#include "AudioSegment.h" +#include "AudioStream.h" +#include "nsTArray.h" +#include "mozilla/NotNull.h" +#include "mozilla/PodOperations.h" + +namespace mozilla { + +struct MixerCallbackReceiver { + // MixerCallback MAY modify aMixedBuffer but MUST clear + // aMixedBuffer->mBuffer if its data is to live longer than the duration of + // the callback. + virtual void MixerCallback(AudioChunk* aMixedBuffer, + uint32_t aSampleRate) = 0; +}; +/** + * This class mixes multiple streams of audio together to output a single audio + * stream. + * + * AudioMixer::Mix is to be called repeatedly with buffers that have the same + * length, sample rate, sample format and channel count. This class works with + * planar buffers. + * + * When all the tracks have been mixed, calling MixedChunk() will provide + * a buffer containing the mixed audio data. + * + * This class is not thread safe. + */ +class AudioMixer { + public: + AudioMixer() { mChunk.mBufferFormat = AUDIO_OUTPUT_FORMAT; } + + ~AudioMixer() = default; + + void StartMixing() { + mChunk.mDuration = 0; + mSampleRate = 0; + } + + /* Get the data from the mixer. This is supposed to be called when all the + * tracks have been mixed in. The caller MAY modify the chunk but MUST clear + * mBuffer if its data needs to survive the next call to Mix(). */ + AudioChunk* MixedChunk() { + MOZ_ASSERT(mSampleRate, "Mix not called for this cycle?"); + mSampleRate = 0; + return &mChunk; + }; + + /* Add a buffer to the mix. The buffer can be null if there's nothing to mix + * but the callback is still needed. */ + void Mix(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames, + uint32_t aSampleRate) { + if (!mChunk.mDuration) { + mChunk.mDuration = aFrames; + MOZ_ASSERT(aChannels > 0); + mChunk.mChannelData.SetLength(aChannels); + mSampleRate = aSampleRate; + EnsureCapacityAndSilence(); + } + + MOZ_ASSERT(aFrames == mChunk.mDuration); + MOZ_ASSERT(aChannels == mChunk.ChannelCount()); + MOZ_ASSERT(aSampleRate == mSampleRate); + + if (!aSamples) { + return; + } + + for (uint32_t i = 0; i < aFrames * aChannels; i++) { + mChunk.ChannelDataForWrite(0)[i] += aSamples[i]; + } + } + + private: + void EnsureCapacityAndSilence() { + uint32_t sampleCount = mChunk.mDuration * mChunk.ChannelCount(); + if (!mChunk.mBuffer || sampleCount > mSampleCapacity) { + CheckedInt bufferSize(sizeof(AudioDataValue)); + bufferSize *= sampleCount; + mChunk.mBuffer = SharedBuffer::Create(bufferSize); + mSampleCapacity = sampleCount; + } + MOZ_ASSERT(!mChunk.mBuffer->IsShared()); + mChunk.mChannelData[0] = + static_cast(mChunk.mBuffer.get())->Data(); + for (size_t i = 1; i < mChunk.ChannelCount(); ++i) { + mChunk.mChannelData[i] = + mChunk.ChannelData()[0] + i * mChunk.mDuration; + } + PodZero(mChunk.ChannelDataForWrite(0), sampleCount); + } + + /* Buffer containing the mixed audio data. */ + AudioChunk mChunk; + /* Size allocated for mChunk.mBuffer. */ + uint32_t mSampleCapacity = 0; + /* Sample rate the of the mixed data. */ + uint32_t mSampleRate = 0; +}; + +} // namespace mozilla + +#endif // MOZILLA_AUDIOMIXER_H_ diff --git a/dom/media/AudioPacketizer.h b/dom/media/AudioPacketizer.h new file mode 100644 index 0000000000..8df04c0c5c --- /dev/null +++ b/dom/media/AudioPacketizer.h @@ -0,0 +1,174 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioPacketizer_h_ +#define AudioPacketizer_h_ + +#include +#include +#include +#include + +// Enable this to warn when `Output` has been called but not enough data was +// buffered. +// #define LOG_PACKETIZER_UNDERRUN + +namespace mozilla { +/** + * This class takes arbitrary input data, and returns packets of a specific + * size. In the process, it can convert audio samples from 16bit integers to + * float (or vice-versa). + * + * Input and output, as well as length units in the public interface are + * interleaved frames. + * + * Allocations of output buffer can be performed by this class. Buffers can + * simply be delete-d. This is because packets are intended to be sent off to + * non-gecko code using normal pointers/length pairs + * + * Alternatively, consumers can pass in a buffer in which the output is copied. + * The buffer needs to be large enough to store a packet worth of audio. + * + * The implementation uses a circular buffer using absolute virtual indices. + */ +template +class AudioPacketizer { + public: + AudioPacketizer(uint32_t aPacketSize, uint32_t aChannels) + : mPacketSize(aPacketSize), + mChannels(aChannels), + mReadIndex(0), + mWriteIndex(0), + // Start off with a single packet + mStorage(new InputType[aPacketSize * aChannels]), + mLength(aPacketSize * aChannels) { + MOZ_ASSERT(aPacketSize > 0 && aChannels > 0, + "The packet size and the number of channel should be strictly " + "positive"); + } + + void Input(const InputType* aFrames, uint32_t aFrameCount) { + uint32_t inputSamples = aFrameCount * mChannels; + // Need to grow the storage. This should rarely happen, if at all, once the + // array has the right size. + if (inputSamples > EmptySlots()) { + // Calls to Input and Output are roughtly interleaved + // (Input,Output,Input,Output, etc.), or balanced + // (Input,Input,Input,Output,Output,Output), so we update the buffer to + // the exact right size in order to not waste space. + uint32_t newLength = AvailableSamples() + inputSamples; + uint32_t toCopy = AvailableSamples(); + UniquePtr oldStorage = std::move(mStorage); + mStorage = mozilla::MakeUnique(newLength); + // Copy the old data at the beginning of the new storage. + if (WriteIndex() >= ReadIndex()) { + PodCopy(mStorage.get(), oldStorage.get() + ReadIndex(), + AvailableSamples()); + } else { + uint32_t firstPartLength = mLength - ReadIndex(); + uint32_t secondPartLength = AvailableSamples() - firstPartLength; + PodCopy(mStorage.get(), oldStorage.get() + ReadIndex(), + firstPartLength); + PodCopy(mStorage.get() + firstPartLength, oldStorage.get(), + secondPartLength); + } + mWriteIndex = toCopy; + mReadIndex = 0; + mLength = newLength; + } + + if (WriteIndex() + inputSamples <= mLength) { + PodCopy(mStorage.get() + WriteIndex(), aFrames, aFrameCount * mChannels); + } else { + uint32_t firstPartLength = mLength - WriteIndex(); + uint32_t secondPartLength = inputSamples - firstPartLength; + PodCopy(mStorage.get() + WriteIndex(), aFrames, firstPartLength); + PodCopy(mStorage.get(), aFrames + firstPartLength, secondPartLength); + } + + mWriteIndex += inputSamples; + } + + OutputType* Output() { + uint32_t samplesNeeded = mPacketSize * mChannels; + OutputType* out = new OutputType[samplesNeeded]; + + Output(out); + + return out; + } + + void Output(OutputType* aOutputBuffer) { + uint32_t samplesNeeded = mPacketSize * mChannels; + + // Under-run. Pad the end of the buffer with silence. + if (AvailableSamples() < samplesNeeded) { +#ifdef LOG_PACKETIZER_UNDERRUN + char buf[256]; + snprintf(buf, 256, + "AudioPacketizer %p underrun: available: %u, needed: %u\n", this, + AvailableSamples(), samplesNeeded); + NS_WARNING(buf); +#endif + uint32_t zeros = samplesNeeded - AvailableSamples(); + PodZero(aOutputBuffer + AvailableSamples(), zeros); + samplesNeeded -= zeros; + } + if (ReadIndex() + samplesNeeded <= mLength) { + ConvertAudioSamples(mStorage.get() + ReadIndex(), + aOutputBuffer, samplesNeeded); + } else { + uint32_t firstPartLength = mLength - ReadIndex(); + uint32_t secondPartLength = samplesNeeded - firstPartLength; + ConvertAudioSamples( + mStorage.get() + ReadIndex(), aOutputBuffer, firstPartLength); + ConvertAudioSamples( + mStorage.get(), aOutputBuffer + firstPartLength, secondPartLength); + } + mReadIndex += samplesNeeded; + } + + void Clear() { + mReadIndex = 0; + mWriteIndex = 0; + } + + uint32_t PacketsAvailable() const { + return AvailableSamples() / mChannels / mPacketSize; + } + + uint32_t FramesAvailable() const { return AvailableSamples() / mChannels; } + + bool Empty() const { return mWriteIndex == mReadIndex; } + + bool Full() const { return mWriteIndex - mReadIndex == mLength; } + + // Size of one packet of audio, in frames + const uint32_t mPacketSize; + // Number of channels of the stream flowing through this packetizer + const uint32_t mChannels; + + private: + uint32_t ReadIndex() const { return mReadIndex % mLength; } + + uint32_t WriteIndex() const { return mWriteIndex % mLength; } + + uint32_t AvailableSamples() const { return mWriteIndex - mReadIndex; } + + uint32_t EmptySlots() const { return mLength - AvailableSamples(); } + + // Two virtual index into the buffer: the read position and the write + // position. + uint64_t mReadIndex; + uint64_t mWriteIndex; + // Storage for the samples + mozilla::UniquePtr mStorage; + // Length of the buffer, in samples + uint32_t mLength; +}; + +} // namespace mozilla + +#endif // AudioPacketizer_h_ diff --git a/dom/media/AudioRingBuffer.cpp b/dom/media/AudioRingBuffer.cpp new file mode 100644 index 0000000000..475de653b8 --- /dev/null +++ b/dom/media/AudioRingBuffer.cpp @@ -0,0 +1,606 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioRingBuffer.h" + +#include "MediaData.h" +#include "mozilla/Assertions.h" +#include "mozilla/Maybe.h" +#include "mozilla/PodOperations.h" + +namespace mozilla { + +/** + * RingBuffer is used to preallocate a buffer of a specific size in bytes and + * then to use it for writing and reading values without requiring re-allocation + * or memory moving. Note that re-allocations can happen if the length of the + * buffer is explicitly set to something larger than is already allocated. + * Also note that the total byte size of the buffer modulo the size of the + * chosen type must be zero. The RingBuffer has been created with audio sample + * values types in mind which are integer or float. However, it can be used with + * any trivial type. It is _not_ thread-safe! The constructor can be called on + * any thread but the reads and write must happen on the same thread, which can + * be different than the construction thread. + */ +template +class RingBuffer final { + public: + explicit RingBuffer(AlignedByteBuffer&& aMemoryBuffer) + : mStorage(ConvertToSpan(aMemoryBuffer)), + mMemoryBuffer(std::move(aMemoryBuffer)) { + MOZ_ASSERT(std::is_trivial::value); + } + + /** + * Write `aSamples` number of zeros in the buffer, before any existing data. + */ + uint32_t PrependSilence(uint32_t aSamples) { + MOZ_ASSERT(aSamples); + return Prepend(Span(), aSamples); + } + + /** + * Write `aSamples` number of zeros in the buffer. + */ + uint32_t WriteSilence(uint32_t aSamples) { + MOZ_ASSERT(aSamples); + return Write(Span(), aSamples); + } + + /** + * Copy `aBuffer` to the RingBuffer. + */ + uint32_t Write(const Span& aBuffer) { + MOZ_ASSERT(!aBuffer.IsEmpty()); + return Write(aBuffer, aBuffer.Length()); + } + + private: + /** + * Copy `aSamples` number of elements from `aBuffer` to the beginning of the + * RingBuffer. If `aBuffer` is empty prepend `aSamples` of zeros. + */ + uint32_t Prepend(const Span& aBuffer, uint32_t aSamples) { + MOZ_ASSERT(aSamples > 0); + MOZ_ASSERT(aBuffer.IsEmpty() || aBuffer.Length() == aSamples); + + if (IsFull()) { + return 0; + } + + uint32_t toWrite = std::min(AvailableWrite(), aSamples); + uint32_t part2 = std::min(mReadIndex, toWrite); + uint32_t part1 = toWrite - part2; + + Span part2Buffer = mStorage.Subspan(mReadIndex - part2, part2); + Span part1Buffer = mStorage.Subspan(Capacity() - part1, part1); + + if (!aBuffer.IsEmpty()) { + Span fromPart1 = aBuffer.To(part1); + Span fromPart2 = aBuffer.Subspan(part1, part2); + + CopySpan(part1Buffer, fromPart1); + CopySpan(part2Buffer, fromPart2); + } else { + // aBuffer is empty, prepend zeros. + PodZero(part1Buffer.Elements(), part1Buffer.Length()); + PodZero(part2Buffer.Elements(), part2Buffer.Length()); + } + + mReadIndex = NextIndex(mReadIndex, Capacity() - toWrite); + + return toWrite; + } + + /** + * Copy `aSamples` number of elements from `aBuffer` to the RingBuffer. If + * `aBuffer` is empty append `aSamples` of zeros. + */ + uint32_t Write(const Span& aBuffer, uint32_t aSamples) { + MOZ_ASSERT(aSamples > 0); + MOZ_ASSERT(aBuffer.IsEmpty() || aBuffer.Length() == aSamples); + + if (IsFull()) { + return 0; + } + + uint32_t toWrite = std::min(AvailableWrite(), aSamples); + uint32_t part1 = std::min(Capacity() - mWriteIndex, toWrite); + uint32_t part2 = toWrite - part1; + + Span part1Buffer = mStorage.Subspan(mWriteIndex, part1); + Span part2Buffer = mStorage.To(part2); + + if (!aBuffer.IsEmpty()) { + Span fromPart1 = aBuffer.To(part1); + Span fromPart2 = aBuffer.Subspan(part1, part2); + + CopySpan(part1Buffer, fromPart1); + CopySpan(part2Buffer, fromPart2); + } else { + // The aBuffer is empty, append zeros. + PodZero(part1Buffer.Elements(), part1Buffer.Length()); + PodZero(part2Buffer.Elements(), part2Buffer.Length()); + } + + mWriteIndex = NextIndex(mWriteIndex, toWrite); + + return toWrite; + } + + public: + /** + * Copy `aSamples` number of elements from `aBuffer` to the RingBuffer. The + * `aBuffer` does not change. + */ + uint32_t Write(const RingBuffer& aBuffer, uint32_t aSamples) { + MOZ_ASSERT(aSamples); + + if (IsFull()) { + return 0; + } + + uint32_t toWriteThis = std::min(AvailableWrite(), aSamples); + uint32_t toReadThat = std::min(aBuffer.AvailableRead(), toWriteThis); + uint32_t part1 = + std::min(aBuffer.Capacity() - aBuffer.mReadIndex, toReadThat); + uint32_t part2 = toReadThat - part1; + + Span part1Buffer = aBuffer.mStorage.Subspan(aBuffer.mReadIndex, part1); + DebugOnly ret = Write(part1Buffer); + MOZ_ASSERT(ret == part1); + if (part2) { + Span part2Buffer = aBuffer.mStorage.To(part2); + ret = Write(part2Buffer); + MOZ_ASSERT(ret == part2); + } + + return toReadThat; + } + + /** + * Copy `aBuffer.Length()` number of elements from RingBuffer to `aBuffer`. + */ + uint32_t Read(const Span& aBuffer) { + MOZ_ASSERT(!aBuffer.IsEmpty()); + MOZ_ASSERT(aBuffer.size() <= std::numeric_limits::max()); + + if (IsEmpty()) { + return 0; + } + + uint32_t toRead = std::min(AvailableRead(), aBuffer.Length()); + uint32_t part1 = std::min(Capacity() - mReadIndex, toRead); + uint32_t part2 = toRead - part1; + + Span part1Buffer = mStorage.Subspan(mReadIndex, part1); + Span part2Buffer = mStorage.To(part2); + + Span toPart1 = aBuffer.To(part1); + Span toPart2 = aBuffer.Subspan(part1, part2); + + CopySpan(toPart1, part1Buffer); + CopySpan(toPart2, part2Buffer); + + mReadIndex = NextIndex(mReadIndex, toRead); + + return toRead; + } + + /** + * Provide `aCallable` that will be called with the internal linear read + * buffers and the number of samples available for reading. The `aCallable` + * will be called at most 2 times. The `aCallable` must return the number of + * samples that have been actually read. If that number is smaller than the + * available number of samples, provided in the argument, the `aCallable` will + * not be called again. The RingBuffer's available read samples will be + * decreased by the number returned from the `aCallable`. + * + * The important aspects of this method are that first, it makes it possible + * to avoid extra copies to an intermediates buffer, and second, each buffer + * provided to `aCallable is a linear piece of memory which can be used + * directly to a resampler for example. + * + * In general, the problem with ring buffers is that they cannot provide one + * linear chunk of memory so extra copies, to a linear buffer, are often + * needed. This method bridge that gap by breaking the ring buffer's + * internal read memory into linear pieces and making it available through + * the `aCallable`. In the body of the `aCallable` those buffers can be used + * directly without any copy or intermediate steps. + */ + uint32_t ReadNoCopy( + std::function&)>&& aCallable) { + if (IsEmpty()) { + return 0; + } + + uint32_t part1 = std::min(Capacity() - mReadIndex, AvailableRead()); + uint32_t part2 = AvailableRead() - part1; + + Span part1Buffer = mStorage.Subspan(mReadIndex, part1); + uint32_t toRead = aCallable(part1Buffer); + MOZ_ASSERT(toRead <= part1); + + if (toRead == part1 && part2) { + Span part2Buffer = mStorage.To(part2); + toRead += aCallable(part2Buffer); + MOZ_ASSERT(toRead <= part1 + part2); + } + + mReadIndex = NextIndex(mReadIndex, toRead); + + return toRead; + } + + /** + * Remove the next `aSamples` number of samples from the ring buffer. + */ + uint32_t Discard(uint32_t aSamples) { + MOZ_ASSERT(aSamples); + + if (IsEmpty()) { + return 0; + } + + uint32_t toDiscard = std::min(AvailableRead(), aSamples); + mReadIndex = NextIndex(mReadIndex, toDiscard); + + return toDiscard; + } + + /** + * Empty the ring buffer. + */ + uint32_t Clear() { + if (IsEmpty()) { + return 0; + } + + uint32_t toDiscard = AvailableRead(); + mReadIndex = NextIndex(mReadIndex, toDiscard); + + return toDiscard; + } + + /** + * Set the ring buffer to the requested size. NB: In bytes. + * + * Re-allocates memory if a larger buffer is requested than what is already + * allocated. + */ + bool SetLengthBytes(uint32_t aLengthBytes) { + MOZ_ASSERT(aLengthBytes % sizeof(T) == 0, + "Length in bytes is not a whole number of samples"); + + uint32_t lengthSamples = aLengthBytes / sizeof(T); + uint32_t oldLengthSamples = Capacity(); + uint32_t availableRead = AvailableRead(); + if (!mMemoryBuffer.SetLength(aLengthBytes)) { + return false; + } + + // mStorage may now have been deallocated. + mStorage = ConvertToSpan(mMemoryBuffer); + if (mWriteIndex < mReadIndex) { + // The old data wrapped around the end of the (old) buffer. It needs to be + // moved so it is continuous. + const uint32_t toMove = mWriteIndex; + + // The bit that goes between the old and the new end of the buffer. + const uint32_t toMove1 = + std::min(lengthSamples - oldLengthSamples, toMove); + { + // [0, toMove1) -> [oldLength, oldLength + toMove1). + Span from1 = mStorage.Subspan(0, toMove1); + Span to1 = mStorage.Subspan(oldLengthSamples, toMove1); + PodMove(to1.Elements(), from1.Elements(), toMove1); + } + + // The last bit of data that starts at 0. Could be empty. + const uint32_t toMove2 = toMove - toMove1; + { + // [toMove1, toMove) -> [0, toMove2). + Span from2 = mStorage.Subspan(toMove1, toMove2); + Span to2 = mStorage.Subspan(0, toMove2); + PodMove(to2.Elements(), from2.Elements(), toMove2); + } + + mWriteIndex = NextIndex(mReadIndex, availableRead); + } + + return true; + } + + /** + * Returns true if the full capacity of the ring buffer is being used. When + * full any attempt to write more samples to the ring buffer will fail. + */ + bool IsFull() const { return (mWriteIndex + 1) % Capacity() == mReadIndex; } + + /** + * Returns true if the ring buffer is empty. When empty any attempt to read + * more samples from the ring buffer will fail. + */ + bool IsEmpty() const { return mWriteIndex == mReadIndex; } + + /** + * The number of samples available for writing. + */ + uint32_t AvailableWrite() const { + /* We subtract one element here to always keep at least one sample + * free in the buffer, to distinguish between full and empty array. */ + uint32_t rv = mReadIndex - mWriteIndex - 1; + if (mWriteIndex >= mReadIndex) { + rv += Capacity(); + } + return rv; + } + + /** + * The number of samples available for reading. + */ + uint32_t AvailableRead() const { + if (mWriteIndex >= mReadIndex) { + return mWriteIndex - mReadIndex; + } + return mWriteIndex + Capacity() - mReadIndex; + } + + /** + * The number of samples this ring buffer can hold. + */ + uint32_t Capacity() const { return mStorage.Length(); } + + private: + uint32_t NextIndex(uint32_t aIndex, uint32_t aStep) const { + MOZ_ASSERT(aStep < Capacity()); + MOZ_ASSERT(aIndex < Capacity()); + return (aIndex + aStep) % Capacity(); + } + + Span ConvertToSpan(const AlignedByteBuffer& aOther) const { + MOZ_ASSERT(aOther.Length() % sizeof(T) == 0); + return Span(reinterpret_cast(aOther.Data()), + aOther.Length() / sizeof(T)); + } + + void CopySpan(Span& aTo, const Span& aFrom) { + MOZ_ASSERT(aTo.Length() == aFrom.Length()); + std::copy(aFrom.cbegin(), aFrom.cend(), aTo.begin()); + } + + private: + uint32_t mReadIndex = 0; + uint32_t mWriteIndex = 0; + /* Points to the mMemoryBuffer. */ + Span mStorage; + /* The actual allocated memory set from outside. It is set in the ctor and it + * is not used again. It is here to control the lifetime of the memory. The + * memory is accessed through the mStorage. The idea is that the memory used + * from the RingBuffer can be pre-allocated. Note that a re-allocation will + * happen if the length in bytes is set to something larger than is already + * allocated. */ + AlignedByteBuffer mMemoryBuffer; +}; + +/** AudioRingBuffer **/ + +/* The private members of AudioRingBuffer. */ +class AudioRingBuffer::AudioRingBufferPrivate { + public: + AudioSampleFormat mSampleFormat = AUDIO_FORMAT_SILENCE; + Maybe> mFloatRingBuffer; + Maybe> mIntRingBuffer; + Maybe mBackingBuffer; +}; + +AudioRingBuffer::AudioRingBuffer(uint32_t aSizeInBytes) + : mPtr(MakeUnique()) { + mPtr->mBackingBuffer.emplace(aSizeInBytes); + MOZ_ASSERT(mPtr->mBackingBuffer); +} + +AudioRingBuffer::~AudioRingBuffer() = default; + +void AudioRingBuffer::SetSampleFormat(AudioSampleFormat aFormat) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_SILENCE); + MOZ_ASSERT(aFormat == AUDIO_FORMAT_S16 || aFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mIntRingBuffer); + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + MOZ_ASSERT(mPtr->mBackingBuffer); + + mPtr->mSampleFormat = aFormat; + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + mPtr->mIntRingBuffer.emplace(mPtr->mBackingBuffer.extract()); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return; + } + mPtr->mFloatRingBuffer.emplace(mPtr->mBackingBuffer.extract()); + MOZ_ASSERT(!mPtr->mBackingBuffer); +} + +uint32_t AudioRingBuffer::Write(const Span& aBuffer) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mIntRingBuffer); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return mPtr->mFloatRingBuffer->Write(aBuffer); +} + +uint32_t AudioRingBuffer::Write(const Span& aBuffer) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16); + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return mPtr->mIntRingBuffer->Write(aBuffer); +} + +uint32_t AudioRingBuffer::Write(const AudioRingBuffer& aBuffer, + uint32_t aSamples) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->Write(aBuffer.mPtr->mIntRingBuffer.ref(), + aSamples); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->Write(aBuffer.mPtr->mFloatRingBuffer.ref(), + aSamples); +} + +uint32_t AudioRingBuffer::PrependSilence(uint32_t aSamples) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->PrependSilence(aSamples); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->PrependSilence(aSamples); +} + +uint32_t AudioRingBuffer::WriteSilence(uint32_t aSamples) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->WriteSilence(aSamples); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->WriteSilence(aSamples); +} + +uint32_t AudioRingBuffer::Read(const Span& aBuffer) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mIntRingBuffer); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return mPtr->mFloatRingBuffer->Read(aBuffer); +} + +uint32_t AudioRingBuffer::Read(const Span& aBuffer) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16); + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return mPtr->mIntRingBuffer->Read(aBuffer); +} + +uint32_t AudioRingBuffer::ReadNoCopy( + std::function&)>&& aCallable) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mIntRingBuffer); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return mPtr->mFloatRingBuffer->ReadNoCopy(std::move(aCallable)); +} + +uint32_t AudioRingBuffer::ReadNoCopy( + std::function&)>&& aCallable) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16); + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + MOZ_ASSERT(!mPtr->mBackingBuffer); + return mPtr->mIntRingBuffer->ReadNoCopy(std::move(aCallable)); +} + +uint32_t AudioRingBuffer::Discard(uint32_t aSamples) { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->Discard(aSamples); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->Discard(aSamples); +} + +uint32_t AudioRingBuffer::Clear() { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + MOZ_ASSERT(mPtr->mIntRingBuffer); + return mPtr->mIntRingBuffer->Clear(); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + MOZ_ASSERT(mPtr->mFloatRingBuffer); + return mPtr->mFloatRingBuffer->Clear(); +} + +bool AudioRingBuffer::SetLengthBytes(uint32_t aLengthBytes) { + if (mPtr->mFloatRingBuffer) { + return mPtr->mFloatRingBuffer->SetLengthBytes(aLengthBytes); + } + if (mPtr->mIntRingBuffer) { + return mPtr->mIntRingBuffer->SetLengthBytes(aLengthBytes); + } + if (mPtr->mBackingBuffer) { + return mPtr->mBackingBuffer->SetLength(aLengthBytes); + } + MOZ_ASSERT_UNREACHABLE("Unexpected"); + return true; +} + +uint32_t AudioRingBuffer::Capacity() const { + if (mPtr->mFloatRingBuffer) { + return mPtr->mFloatRingBuffer->Capacity(); + } + if (mPtr->mIntRingBuffer) { + return mPtr->mIntRingBuffer->Capacity(); + } + MOZ_ASSERT_UNREACHABLE("Unexpected"); + return 0; +} + +bool AudioRingBuffer::IsFull() const { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->IsFull(); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->IsFull(); +} + +bool AudioRingBuffer::IsEmpty() const { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->IsEmpty(); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->IsEmpty(); +} + +uint32_t AudioRingBuffer::AvailableWrite() const { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->AvailableWrite(); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->AvailableWrite(); +} + +uint32_t AudioRingBuffer::AvailableRead() const { + MOZ_ASSERT(mPtr->mSampleFormat == AUDIO_FORMAT_S16 || + mPtr->mSampleFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(!mPtr->mBackingBuffer); + if (mPtr->mSampleFormat == AUDIO_FORMAT_S16) { + MOZ_ASSERT(!mPtr->mFloatRingBuffer); + return mPtr->mIntRingBuffer->AvailableRead(); + } + MOZ_ASSERT(!mPtr->mIntRingBuffer); + return mPtr->mFloatRingBuffer->AvailableRead(); +} + +} // namespace mozilla diff --git a/dom/media/AudioRingBuffer.h b/dom/media/AudioRingBuffer.h new file mode 100644 index 0000000000..892a7cd408 --- /dev/null +++ b/dom/media/AudioRingBuffer.h @@ -0,0 +1,135 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIO_RING_BUFFER_H_ +#define MOZILLA_AUDIO_RING_BUFFER_H_ + +#include "AudioSampleFormat.h" +#include "mozilla/Span.h" + +#include + +namespace mozilla { + +/** + * AudioRingBuffer works with audio sample format float or short. The + * implementation wrap around the RingBuffer thus it is not thread-safe. Reads + * and writes must happen in the same thread which may be different than the + * construction thread. The memory is pre-allocated in the constructor, but may + * also be re-allocated on the fly should a larger length be needed. The sample + * format has to be specified in order to be used. + */ +class AudioRingBuffer final { + public: + explicit AudioRingBuffer(uint32_t aSizeInBytes); + ~AudioRingBuffer(); + + /** + * Set the sample format to either short or float. The sample format must be + * set before the using any other method. + */ + void SetSampleFormat(AudioSampleFormat aFormat); + + /** + * Write `aBuffer.Length()` number of samples when the format is float. + */ + uint32_t Write(const Span& aBuffer); + + /** + * Write `aBuffer.Length()` number of samples when the format is short. + */ + uint32_t Write(const Span& aBuffer); + + /** + * Write `aSamples` number of samples from `aBuffer`. Note the `aBuffer` does + * not change. + */ + uint32_t Write(const AudioRingBuffer& aBuffer, uint32_t aSamples); + + /** + * Write `aSamples` number of zeros before the beginning of the existing data. + */ + uint32_t PrependSilence(uint32_t aSamples); + + /** + * Write `aSamples` number of zeros. + */ + uint32_t WriteSilence(uint32_t aSamples); + + /** + * Read `aBuffer.Length()` number of samples when the format is float. + */ + uint32_t Read(const Span& aBuffer); + + /** + * Read `aBuffer.Length()` number of samples when the format is short. + */ + uint32_t Read(const Span& aBuffer); + + /** + * Read the internal buffer without extra copies when sample format is float. + * Check also the RingBuffer::ReadNoCopy() for more details. + */ + uint32_t ReadNoCopy( + std::function&)>&& aCallable); + + /** + * Read the internal buffer without extra copies when sample format is short. + * Check also the RingBuffer::ReadNoCopy() for more details. + */ + uint32_t ReadNoCopy( + std::function&)>&& aCallable); + + /** + * Remove `aSamples` number of samples. + */ + uint32_t Discard(uint32_t aSamples); + + /** + * Remove all available samples. + */ + uint32_t Clear(); + + /** + * Set the length of the ring buffer in bytes. Must be divisible by the sample + * size. Will not deallocate memory if the underlying buffer is large enough. + * Returns false if setting the length requires allocating memory and the + * allocation fails. + */ + bool SetLengthBytes(uint32_t aLengthBytes); + + /** + * Return the number of samples this buffer can hold. + */ + uint32_t Capacity() const; + + /** + * Return true if the buffer is full. + */ + bool IsFull() const; + + /** + * Return true if the buffer is empty. + */ + bool IsEmpty() const; + + /** + * Return the number of samples available for writing. + */ + uint32_t AvailableWrite() const; + + /** + * Return the number of samples available for reading. + */ + uint32_t AvailableRead() const; + + private: + class AudioRingBufferPrivate; + UniquePtr mPtr; +}; + +} // namespace mozilla + +#endif // MOZILLA_AUDIO_RING_BUFFER_H_ diff --git a/dom/media/AudioSampleFormat.h b/dom/media/AudioSampleFormat.h new file mode 100644 index 0000000000..1cec31a385 --- /dev/null +++ b/dom/media/AudioSampleFormat.h @@ -0,0 +1,236 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIOSAMPLEFORMAT_H_ +#define MOZILLA_AUDIOSAMPLEFORMAT_H_ + +#include "mozilla/Assertions.h" +#include + +namespace mozilla { + +/** + * Audio formats supported in MediaTracks and media elements. + * + * Only one of these is supported by AudioStream, and that is determined + * at compile time (roughly, FLOAT32 on desktops, S16 on mobile). Media decoders + * produce that format only; queued AudioData always uses that format. + */ +enum AudioSampleFormat { + // Silence: format will be chosen later + AUDIO_FORMAT_SILENCE, + // Native-endian signed 16-bit audio samples + AUDIO_FORMAT_S16, + // Signed 32-bit float samples + AUDIO_FORMAT_FLOAT32, + // The format used for output by AudioStream. + AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_FLOAT32 +}; + +enum { MAX_AUDIO_SAMPLE_SIZE = sizeof(float) }; + +template +class AudioSampleTraits; + +template <> +class AudioSampleTraits { + public: + using Type = float; +}; +template <> +class AudioSampleTraits { + public: + using Type = int16_t; +}; + +using AudioDataValue = AudioSampleTraits::Type; + +template +class AudioSampleTypeToFormat; + +template <> +class AudioSampleTypeToFormat { + public: + static const AudioSampleFormat Format = AUDIO_FORMAT_FLOAT32; +}; + +template <> +class AudioSampleTypeToFormat { + public: + static const AudioSampleFormat Format = AUDIO_FORMAT_S16; +}; + +// Single-sample conversion +/* + * Use "2^N" conversion since it's simple, fast, "bit transparent", used by + * many other libraries and apparently behaves reasonably. + * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html + * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html + */ +inline float AudioSampleToFloat(float aValue) { return aValue; } +inline float AudioSampleToFloat(int16_t aValue) { + return static_cast(aValue) / 32768.0f; +} +inline float AudioSampleToFloat(int32_t aValue) { + return static_cast(aValue) / (float)(1U << 31); +} + +template +T FloatToAudioSample(float aValue); + +template <> +inline float FloatToAudioSample(float aValue) { + return aValue; +} +template <> +inline int16_t FloatToAudioSample(float aValue) { + float v = aValue * 32768.0f; + float clamped = std::max(-32768.0f, std::min(32767.0f, v)); + return int16_t(clamped); +} + +template +T UInt8bitToAudioSample(uint8_t aValue); + +template <> +inline float UInt8bitToAudioSample(uint8_t aValue) { + return static_cast(aValue) * (static_cast(2) / UINT8_MAX) - + static_cast(1); +} +template <> +inline int16_t UInt8bitToAudioSample(uint8_t aValue) { + return static_cast((aValue << 8) + aValue + INT16_MIN); +} + +template +T IntegerToAudioSample(int16_t aValue); + +template <> +inline float IntegerToAudioSample(int16_t aValue) { + return static_cast(aValue) / 32768.0f; +} +template <> +inline int16_t IntegerToAudioSample(int16_t aValue) { + return aValue; +} + +template +T Int24bitToAudioSample(int32_t aValue); + +template <> +inline float Int24bitToAudioSample(int32_t aValue) { + return static_cast(aValue) / static_cast(1 << 23); +} +template <> +inline int16_t Int24bitToAudioSample(int32_t aValue) { + return static_cast(aValue / 256); +} + +template +inline void ConvertAudioSample(SrcT aIn, DstT& aOut); + +template <> +inline void ConvertAudioSample(int16_t aIn, int16_t& aOut) { + aOut = aIn; +} + +template <> +inline void ConvertAudioSample(int16_t aIn, float& aOut) { + aOut = AudioSampleToFloat(aIn); +} + +template <> +inline void ConvertAudioSample(float aIn, float& aOut) { + aOut = aIn; +} + +template <> +inline void ConvertAudioSample(float aIn, int16_t& aOut) { + aOut = FloatToAudioSample(aIn); +} + +// Sample buffer conversion + +template +inline void ConvertAudioSamples(const From* aFrom, To* aTo, int aCount) { + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample(AudioSampleToFloat(aFrom[i])); + } +} +inline void ConvertAudioSamples(const int16_t* aFrom, int16_t* aTo, + int aCount) { + memcpy(aTo, aFrom, sizeof(*aTo) * aCount); +} +inline void ConvertAudioSamples(const float* aFrom, float* aTo, int aCount) { + memcpy(aTo, aFrom, sizeof(*aTo) * aCount); +} + +// Sample buffer conversion with scale + +template +inline void ConvertAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount, + float aScale) { + if (aScale == 1.0f) { + ConvertAudioSamples(aFrom, aTo, aCount); + return; + } + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample(AudioSampleToFloat(aFrom[i]) * aScale); + } +} +inline void ConvertAudioSamplesWithScale(const int16_t* aFrom, int16_t* aTo, + int aCount, float aScale) { + if (aScale == 1.0f) { + ConvertAudioSamples(aFrom, aTo, aCount); + return; + } + if (0.0f <= aScale && aScale < 1.0f) { + int32_t scale = int32_t((1 << 16) * aScale); + for (int i = 0; i < aCount; ++i) { + aTo[i] = int16_t((int32_t(aFrom[i]) * scale) >> 16); + } + return; + } + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample(AudioSampleToFloat(aFrom[i]) * aScale); + } +} + +template +inline void AddAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount, + float aScale) { + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample(AudioSampleToFloat(aTo[i]) + + AudioSampleToFloat(aFrom[i]) * aScale); + } +} + +// In place audio sample scaling. +inline void ScaleAudioSamples(float* aBuffer, int aCount, float aScale) { + for (int32_t i = 0; i < aCount; ++i) { + aBuffer[i] *= aScale; + } +} + +inline void ScaleAudioSamples(short* aBuffer, int aCount, float aScale) { + int32_t volume = int32_t((1 << 16) * aScale); + for (int32_t i = 0; i < aCount; ++i) { + aBuffer[i] = short((int32_t(aBuffer[i]) * volume) >> 16); + } +} + +inline const void* AddAudioSampleOffset(const void* aBase, + AudioSampleFormat aFormat, + int32_t aOffset) { + static_assert(AUDIO_FORMAT_S16 == 1, "Bad constant"); + static_assert(AUDIO_FORMAT_FLOAT32 == 2, "Bad constant"); + MOZ_ASSERT(aFormat == AUDIO_FORMAT_S16 || aFormat == AUDIO_FORMAT_FLOAT32); + + return static_cast(aBase) + aFormat * 2 * aOffset; +} + +} // namespace mozilla + +#endif /* MOZILLA_AUDIOSAMPLEFORMAT_H_ */ diff --git a/dom/media/AudioSegment.cpp b/dom/media/AudioSegment.cpp new file mode 100644 index 0000000000..243cdffd0e --- /dev/null +++ b/dom/media/AudioSegment.cpp @@ -0,0 +1,292 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioSegment.h" +#include "AudioMixer.h" +#include "AudioChannelFormat.h" +#include "MediaTrackGraph.h" // for nsAutoRefTraits +#include + +namespace mozilla { + +const uint8_t + SilentChannel::gZeroChannel[MAX_AUDIO_SAMPLE_SIZE * + SilentChannel::AUDIO_PROCESSING_FRAMES] = {0}; + +template <> +const float* SilentChannel::ZeroChannel() { + return reinterpret_cast(SilentChannel::gZeroChannel); +} + +template <> +const int16_t* SilentChannel::ZeroChannel() { + return reinterpret_cast(SilentChannel::gZeroChannel); +} + +void AudioSegment::ApplyVolume(float aVolume) { + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + ci->mVolume *= aVolume; + } +} + +template +void AudioSegment::Resample(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, uint32_t aInRate, + uint32_t aOutRate) { + mDuration = 0; + + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + AutoTArray, GUESS_AUDIO_CHANNELS> output; + AutoTArray bufferPtrs; + AudioChunk& c = *ci; + // If this chunk is null, don't bother resampling, just alter its duration + if (c.IsNull()) { + c.mDuration = (c.mDuration * aOutRate) / aInRate; + mDuration += c.mDuration; + continue; + } + uint32_t channels = c.mChannelData.Length(); + // This might introduce a discontinuity, but a channel count change in the + // middle of a stream is not that common. This also initializes the + // resampler as late as possible. + if (channels != *aResamplerChannelCount) { + SpeexResamplerState* state = + speex_resampler_init(channels, aInRate, aOutRate, + SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr); + MOZ_ASSERT(state); + aResampler.own(state); + *aResamplerChannelCount = channels; + } + output.SetLength(channels); + bufferPtrs.SetLength(channels); + uint32_t inFrames = c.mDuration; + // Round up to allocate; the last frame may not be used. + NS_ASSERTION((UINT64_MAX - aInRate + 1) / c.mDuration >= aOutRate, + "Dropping samples"); + uint32_t outSize = + (static_cast(c.mDuration) * aOutRate + aInRate - 1) / aInRate; + for (uint32_t i = 0; i < channels; i++) { + T* out = output[i].AppendElements(outSize); + uint32_t outFrames = outSize; + + const T* in = static_cast(c.mChannelData[i]); + dom::WebAudioUtils::SpeexResamplerProcess(aResampler.get(), i, in, + &inFrames, out, &outFrames); + MOZ_ASSERT(inFrames == c.mDuration); + + bufferPtrs[i] = out; + output[i].SetLength(outFrames); + } + MOZ_ASSERT(channels > 0); + c.mDuration = output[0].Length(); + c.mBuffer = new mozilla::SharedChannelArrayBuffer(std::move(output)); + for (uint32_t i = 0; i < channels; i++) { + c.mChannelData[i] = bufferPtrs[i]; + } + mDuration += c.mDuration; + } +} + +void AudioSegment::ResampleChunks(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, + uint32_t aInRate, uint32_t aOutRate) { + if (mChunks.IsEmpty()) { + return; + } + + AudioSampleFormat format = AUDIO_FORMAT_SILENCE; + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + if (ci->mBufferFormat != AUDIO_FORMAT_SILENCE) { + format = ci->mBufferFormat; + } + } + + switch (format) { + // If the format is silence at this point, all the chunks are silent. The + // actual function we use does not matter, it's just a matter of changing + // the chunks duration. + case AUDIO_FORMAT_SILENCE: + case AUDIO_FORMAT_FLOAT32: + Resample(aResampler, aResamplerChannelCount, aInRate, aOutRate); + break; + case AUDIO_FORMAT_S16: + Resample(aResampler, aResamplerChannelCount, aInRate, aOutRate); + break; + default: + MOZ_ASSERT(false); + break; + } +} + +size_t AudioSegment::WriteToInterleavedBuffer(nsTArray& aBuffer, + uint32_t aChannels) const { + size_t offset = 0; + if (GetDuration() <= 0) { + MOZ_ASSERT(GetDuration() == 0); + return offset; + } + + // Calculate how many samples in this segment + size_t frames = static_cast(GetDuration()); + CheckedInt samples(frames); + samples *= static_cast(aChannels); + MOZ_ASSERT(samples.isValid()); + if (!samples.isValid()) { + return offset; + } + + // Enlarge buffer space if needed + if (samples.value() > aBuffer.Capacity()) { + aBuffer.SetCapacity(samples.value()); + } + aBuffer.SetLengthAndRetainStorage(samples.value()); + aBuffer.ClearAndRetainStorage(); + + // Convert the de-interleaved chunks into an interleaved buffer. Note that + // we may upmix or downmix the audio data if the channel in the chunks + // mismatch with aChannels + for (ConstChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + const AudioChunk& c = *ci; + size_t samplesInChunk = static_cast(c.mDuration) * aChannels; + switch (c.mBufferFormat) { + case AUDIO_FORMAT_S16: + WriteChunk(c, aChannels, c.mVolume, + aBuffer.Elements() + offset); + break; + case AUDIO_FORMAT_FLOAT32: + WriteChunk(c, aChannels, c.mVolume, aBuffer.Elements() + offset); + break; + case AUDIO_FORMAT_SILENCE: + PodZero(aBuffer.Elements() + offset, samplesInChunk); + break; + default: + MOZ_ASSERT_UNREACHABLE("Unknown format"); + PodZero(aBuffer.Elements() + offset, samplesInChunk); + break; + } + offset += samplesInChunk; + } + MOZ_DIAGNOSTIC_ASSERT(samples.value() == offset, + "Segment's duration is incorrect"); + aBuffer.SetLengthAndRetainStorage(offset); + return offset; +} + +// This helps to to safely get a pointer to the position we want to start +// writing a planar audio buffer, depending on the channel and the offset in the +// buffer. +static AudioDataValue* PointerForOffsetInChannel(AudioDataValue* aData, + size_t aLengthSamples, + uint32_t aChannelCount, + uint32_t aChannel, + uint32_t aOffsetSamples) { + size_t samplesPerChannel = aLengthSamples / aChannelCount; + size_t beginningOfChannel = samplesPerChannel * aChannel; + MOZ_ASSERT(aChannel * samplesPerChannel + aOffsetSamples < aLengthSamples, + "Offset request out of bounds."); + return aData + beginningOfChannel + aOffsetSamples; +} + +template +static void DownMixChunk(const AudioChunk& aChunk, + Span aOutputChannels) { + Span channelData = aChunk.ChannelData(); + uint32_t frameCount = aChunk.mDuration; + if (channelData.Length() > aOutputChannels.Length()) { + // Down mix. + AudioChannelsDownMix(channelData, aOutputChannels, frameCount); + for (AudioDataValue* outChannel : aOutputChannels) { + ScaleAudioSamples(outChannel, frameCount, aChunk.mVolume); + } + } else { + // The channel count is already what we want. + for (uint32_t channel = 0; channel < aOutputChannels.Length(); channel++) { + ConvertAudioSamplesWithScale(channelData[channel], + aOutputChannels[channel], frameCount, + aChunk.mVolume); + } + } +} + +void AudioChunk::DownMixTo( + Span aOutputChannelPtrs) const { + switch (mBufferFormat) { + case AUDIO_FORMAT_FLOAT32: + DownMixChunk(*this, aOutputChannelPtrs); + return; + case AUDIO_FORMAT_S16: + DownMixChunk(*this, aOutputChannelPtrs); + return; + case AUDIO_FORMAT_SILENCE: + for (AudioDataValue* outChannel : aOutputChannelPtrs) { + std::fill_n(outChannel, mDuration, static_cast(0)); + } + return; + // Avoid `default:` so that `-Wswitch` catches missing enumerators at + // compile time. + } + MOZ_ASSERT_UNREACHABLE("buffer format"); +} + +void AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels, + uint32_t aSampleRate) { + AutoTArray + buf; + AudioChunk upMixChunk; + uint32_t offsetSamples = 0; + uint32_t duration = GetDuration(); + + if (duration <= 0) { + MOZ_ASSERT(duration == 0); + return; + } + + uint32_t outBufferLength = duration * aOutputChannels; + buf.SetLength(outBufferLength); + + AutoTArray outChannelPtrs; + outChannelPtrs.SetLength(aOutputChannels); + + uint32_t frames; + for (ChunkIterator ci(*this); !ci.IsEnded(); + ci.Next(), offsetSamples += frames) { + const AudioChunk& c = *ci; + frames = c.mDuration; + for (uint32_t channel = 0; channel < aOutputChannels; channel++) { + outChannelPtrs[channel] = + PointerForOffsetInChannel(buf.Elements(), outBufferLength, + aOutputChannels, channel, offsetSamples); + } + + // If the chunk is silent, simply write the right number of silence in the + // buffers. + if (c.mBufferFormat == AUDIO_FORMAT_SILENCE) { + for (AudioDataValue* outChannel : outChannelPtrs) { + PodZero(outChannel, frames); + } + continue; + } + // We need to upmix and downmix appropriately, depending on the + // desired input and output channels. + const AudioChunk* downMixInput = &c; + if (c.ChannelCount() < aOutputChannels) { + // Up-mix. + upMixChunk = c; + AudioChannelsUpMix(&upMixChunk.mChannelData, aOutputChannels, + SilentChannel::gZeroChannel); + downMixInput = &upMixChunk; + } + downMixInput->DownMixTo(outChannelPtrs); + } + + if (offsetSamples) { + MOZ_ASSERT(offsetSamples == outBufferLength / aOutputChannels, + "We forgot to write some samples?"); + aMixer.Mix(buf.Elements(), aOutputChannels, offsetSamples, aSampleRate); + } +} + +} // namespace mozilla diff --git a/dom/media/AudioSegment.h b/dom/media/AudioSegment.h new file mode 100644 index 0000000000..006f996c39 --- /dev/null +++ b/dom/media/AudioSegment.h @@ -0,0 +1,482 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIOSEGMENT_H_ +#define MOZILLA_AUDIOSEGMENT_H_ + +#include +#include "MediaSegment.h" +#include "AudioSampleFormat.h" +#include "AudioChannelFormat.h" +#include "SharedBuffer.h" +#include "WebAudioUtils.h" +#include "mozilla/ScopeExit.h" +#include "nsAutoRef.h" +#ifdef MOZILLA_INTERNAL_API +# include "mozilla/TimeStamp.h" +#endif +#include + +namespace mozilla { +struct AudioChunk; +class AudioSegment; +} // namespace mozilla +MOZ_DECLARE_RELOCATE_USING_MOVE_CONSTRUCTOR(mozilla::AudioChunk) + +/** + * This allows compilation of nsTArray and + * AutoTArray since without it, static analysis fails on the + * mChunks member being a non-memmovable AutoTArray. + * + * Note that AudioSegment(const AudioSegment&) is deleted, so this should + * never come into effect. + */ +MOZ_DECLARE_RELOCATE_USING_MOVE_CONSTRUCTOR(mozilla::AudioSegment) + +namespace mozilla { + +template +class SharedChannelArrayBuffer : public ThreadSharedObject { + public: + explicit SharedChannelArrayBuffer(nsTArray >&& aBuffers) + : mBuffers(std::move(aBuffers)) {} + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override { + size_t amount = 0; + amount += mBuffers.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mBuffers.Length(); i++) { + amount += mBuffers[i].ShallowSizeOfExcludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + nsTArray > mBuffers; +}; + +class AudioMixer; + +/** + * For auto-arrays etc, guess this as the common number of channels. + */ +const int GUESS_AUDIO_CHANNELS = 2; + +// We ensure that the graph advances in steps that are multiples of the Web +// Audio block size +const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7; +const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS; + +template +static void InterleaveAndConvertBuffer(const SrcT* const* aSourceChannels, + uint32_t aLength, float aVolume, + uint32_t aChannels, DestT* aOutput) { + DestT* output = aOutput; + for (size_t i = 0; i < aLength; ++i) { + for (size_t channel = 0; channel < aChannels; ++channel) { + float v = AudioSampleToFloat(aSourceChannels[channel][i]) * aVolume; + *output = FloatToAudioSample(v); + ++output; + } + } +} + +template +static void DeinterleaveAndConvertBuffer(const SrcT* aSourceBuffer, + uint32_t aFrames, uint32_t aChannels, + DestT** aOutput) { + for (size_t i = 0; i < aChannels; i++) { + size_t interleavedIndex = i; + for (size_t j = 0; j < aFrames; j++) { + ConvertAudioSample(aSourceBuffer[interleavedIndex], aOutput[i][j]); + interleavedIndex += aChannels; + } + } +} + +class SilentChannel { + public: + static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */ + static const uint8_t + gZeroChannel[MAX_AUDIO_SAMPLE_SIZE * AUDIO_PROCESSING_FRAMES]; + // We take advantage of the fact that zero in float and zero in int have the + // same all-zeros bit layout. + template + static const T* ZeroChannel(); +}; + +/** + * Given an array of input channels (aChannelData), downmix to aOutputChannels, + * interleave the channel data. A total of aOutputChannels*aDuration + * interleaved samples will be copied to a channel buffer in aOutput. + */ +template +void DownmixAndInterleave(Span aChannelData, + int32_t aDuration, float aVolume, + uint32_t aOutputChannels, DestT* aOutput) { + if (aChannelData.Length() == aOutputChannels) { + InterleaveAndConvertBuffer(aChannelData.Elements(), aDuration, aVolume, + aOutputChannels, aOutput); + } else { + AutoTArray outputChannelData; + AutoTArray + outputBuffers; + outputChannelData.SetLength(aOutputChannels); + outputBuffers.SetLength(aDuration * aOutputChannels); + for (uint32_t i = 0; i < aOutputChannels; i++) { + outputChannelData[i] = outputBuffers.Elements() + aDuration * i; + } + AudioChannelsDownMix(aChannelData, outputChannelData, + aDuration); + InterleaveAndConvertBuffer(outputChannelData.Elements(), aDuration, aVolume, + aOutputChannels, aOutput); + } +} + +/** + * An AudioChunk represents a multi-channel buffer of audio samples. + * It references an underlying ThreadSharedObject which manages the lifetime + * of the buffer. An AudioChunk maintains its own duration and channel data + * pointers so it can represent a subinterval of a buffer without copying. + * An AudioChunk can store its individual channels anywhere; it maintains + * separate pointers to each channel's buffer. + */ +struct AudioChunk { + typedef mozilla::AudioSampleFormat SampleFormat; + + AudioChunk() = default; + + template + AudioChunk(already_AddRefed aBuffer, + const nsTArray& aChannelData, TrackTime aDuration, + PrincipalHandle aPrincipalHandle) + : mDuration(aDuration), + mBuffer(aBuffer), + mBufferFormat(AudioSampleTypeToFormat::Format), + mPrincipalHandle(std::move(aPrincipalHandle)) { + MOZ_ASSERT(!mBuffer == aChannelData.IsEmpty(), "Appending invalid data ?"); + for (const T* data : aChannelData) { + mChannelData.AppendElement(data); + } + } + + // Generic methods + void SliceTo(TrackTime aStart, TrackTime aEnd) { + MOZ_ASSERT(aStart >= 0, "Slice out of bounds: invalid start"); + MOZ_ASSERT(aStart < aEnd, "Slice out of bounds: invalid range"); + MOZ_ASSERT(aEnd <= mDuration, "Slice out of bounds: invalid end"); + + if (mBuffer) { + MOZ_ASSERT(aStart < INT32_MAX, + "Can't slice beyond 32-bit sample lengths"); + for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { + mChannelData[channel] = AddAudioSampleOffset( + mChannelData[channel], mBufferFormat, int32_t(aStart)); + } + } + mDuration = aEnd - aStart; + } + TrackTime GetDuration() const { return mDuration; } + bool CanCombineWithFollowing(const AudioChunk& aOther) const { + if (aOther.mBuffer != mBuffer) { + return false; + } + if (!mBuffer) { + return true; + } + if (aOther.mVolume != mVolume) { + return false; + } + if (aOther.mPrincipalHandle != mPrincipalHandle) { + return false; + } + NS_ASSERTION(aOther.mBufferFormat == mBufferFormat, + "Wrong metadata about buffer"); + NS_ASSERTION(aOther.mChannelData.Length() == mChannelData.Length(), + "Mismatched channel count"); + if (mDuration > INT32_MAX) { + return false; + } + for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { + if (aOther.mChannelData[channel] != + AddAudioSampleOffset(mChannelData[channel], mBufferFormat, + int32_t(mDuration))) { + return false; + } + } + return true; + } + bool IsNull() const { return mBuffer == nullptr; } + void SetNull(TrackTime aDuration) { + mBuffer = nullptr; + mChannelData.Clear(); + mDuration = aDuration; + mVolume = 1.0f; + mBufferFormat = AUDIO_FORMAT_SILENCE; + mPrincipalHandle = PRINCIPAL_HANDLE_NONE; + } + + uint32_t ChannelCount() const { return mChannelData.Length(); } + + bool IsMuted() const { return mVolume == 0.0f; } + + size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const { + return SizeOfExcludingThis(aMallocSizeOf, true); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf, bool aUnshared) const { + size_t amount = 0; + + // Possibly owned: + // - mBuffer - Can hold data that is also in the decoded audio queue. If it + // is not shared, or unshared == false it gets counted. + if (mBuffer && (!aUnshared || !mBuffer->IsShared())) { + amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + + // Memory in the array is owned by mBuffer. + amount += mChannelData.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + template + Span ChannelData() const { + MOZ_ASSERT(AudioSampleTypeToFormat::Format == mBufferFormat); + return Span(reinterpret_cast(mChannelData.Elements()), + mChannelData.Length()); + } + + /** + * ChannelFloatsForWrite() should be used only when mBuffer is owned solely + * by the calling thread. + */ + template + T* ChannelDataForWrite(size_t aChannel) { + MOZ_ASSERT(AudioSampleTypeToFormat::Format == mBufferFormat); + MOZ_ASSERT(!mBuffer->IsShared()); + return static_cast(const_cast(mChannelData[aChannel])); + } + + template + static AudioChunk FromInterleavedBuffer( + const T* aBuffer, size_t aFrames, uint32_t aChannels, + const PrincipalHandle& aPrincipalHandle) { + CheckedInt bufferSize(sizeof(T)); + bufferSize *= aFrames; + bufferSize *= aChannels; + RefPtr buffer = SharedBuffer::Create(bufferSize); + + AutoTArray deinterleaved; + if (aChannels == 1) { + PodCopy(static_cast(buffer->Data()), aBuffer, aFrames); + deinterleaved.AppendElement(static_cast(buffer->Data())); + } else { + deinterleaved.SetLength(aChannels); + T* samples = static_cast(buffer->Data()); + + size_t offset = 0; + for (uint32_t i = 0; i < aChannels; ++i) { + deinterleaved[i] = samples + offset; + offset += aFrames; + } + + DeinterleaveAndConvertBuffer(aBuffer, static_cast(aFrames), + aChannels, deinterleaved.Elements()); + } + + AutoTArray channelData; + channelData.AppendElements(deinterleaved); + return AudioChunk(buffer.forget(), channelData, + static_cast(aFrames), aPrincipalHandle); + } + + const PrincipalHandle& GetPrincipalHandle() const { return mPrincipalHandle; } + + // aOutputChannels must contain pointers to channel data of length mDuration. + void DownMixTo(Span aOutputChannels) const; + + TrackTime mDuration = 0; // in frames within the buffer + RefPtr mBuffer; // the buffer object whose lifetime is + // managed; null means data is all zeroes + // one pointer per channel; empty if and only if mBuffer is null + CopyableAutoTArray mChannelData; + float mVolume = 1.0f; // volume multiplier to apply + // format of frames in mBuffer (or silence if mBuffer is null) + SampleFormat mBufferFormat = AUDIO_FORMAT_SILENCE; + // principalHandle for the data in this chunk. + // This can be compared to an nsIPrincipal* when back on main thread. + PrincipalHandle mPrincipalHandle = PRINCIPAL_HANDLE_NONE; +}; + +/** + * A list of audio samples consisting of a sequence of slices of SharedBuffers. + * The audio rate is determined by the track, not stored in this class. + */ +class AudioSegment : public MediaSegmentBase { + // The channel count that MaxChannelCount() returned last time it was called. + uint32_t mMemoizedMaxChannelCount = 0; + + public: + typedef mozilla::AudioSampleFormat SampleFormat; + + AudioSegment() : MediaSegmentBase(AUDIO) {} + + AudioSegment(AudioSegment&& aSegment) = default; + + AudioSegment(const AudioSegment&) = delete; + AudioSegment& operator=(const AudioSegment&) = delete; + + ~AudioSegment() = default; + + // Resample the whole segment in place. `aResampler` is an instance of a + // resampler, initialized with `aResamplerChannelCount` channels. If this + // function finds a chunk with more channels, `aResampler` is destroyed and a + // new resampler is created, and `aResamplerChannelCount` is updated with the + // new channel count value. + void ResampleChunks(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, uint32_t aInRate, + uint32_t aOutRate); + + template + void AppendFrames(already_AddRefed aBuffer, + const nsTArray& aChannelData, TrackTime aDuration, + const PrincipalHandle& aPrincipalHandle) { + AppendAndConsumeChunk(AudioChunk(std::move(aBuffer), aChannelData, + aDuration, aPrincipalHandle)); + } + void AppendSegment(const AudioSegment* aSegment) { + MOZ_ASSERT(aSegment); + + for (const AudioChunk& c : aSegment->mChunks) { + AudioChunk* chunk = AppendChunk(c.GetDuration()); + chunk->mBuffer = c.mBuffer; + chunk->mChannelData = c.mChannelData; + chunk->mBufferFormat = c.mBufferFormat; + chunk->mPrincipalHandle = c.mPrincipalHandle; + } + } + template + void AppendFromInterleavedBuffer(const T* aBuffer, size_t aFrames, + uint32_t aChannels, + const PrincipalHandle& aPrincipalHandle) { + AppendAndConsumeChunk(AudioChunk::FromInterleavedBuffer( + aBuffer, aFrames, aChannels, aPrincipalHandle)); + } + // Write the segement data into an interleaved buffer. Do mixing if the + // AudioChunk's channel count in the segment is different from aChannels. + // Returns sample count of the converted audio data. The converted data will + // be stored into aBuffer. + size_t WriteToInterleavedBuffer(nsTArray& aBuffer, + uint32_t aChannels) const; + // Consumes aChunk, and append it to the segment if its duration is not zero. + void AppendAndConsumeChunk(AudioChunk&& aChunk) { + AudioChunk unused; + AudioChunk* chunk = &unused; + + // Always consume aChunk. The chunk's mBuffer can be non-null even if its + // duration is 0. + auto consume = MakeScopeExit([&] { + chunk->mBuffer = std::move(aChunk.mBuffer); + chunk->mChannelData = std::move(aChunk.mChannelData); + + MOZ_ASSERT(chunk->mBuffer || chunk->mChannelData.IsEmpty(), + "Appending invalid data ?"); + + chunk->mVolume = aChunk.mVolume; + chunk->mBufferFormat = aChunk.mBufferFormat; + chunk->mPrincipalHandle = std::move(aChunk.mPrincipalHandle); + }); + + if (aChunk.GetDuration() == 0) { + return; + } + + if (!mChunks.IsEmpty() && + mChunks.LastElement().CanCombineWithFollowing(aChunk)) { + mChunks.LastElement().mDuration += aChunk.GetDuration(); + mDuration += aChunk.GetDuration(); + return; + } + + chunk = AppendChunk(aChunk.mDuration); + } + void ApplyVolume(float aVolume); + // Mix the segment into a mixer, keeping it planar, up or down mixing to + // aChannelCount channels. + void Mix(AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate); + + // Returns the maximum channel count across all chunks in this segment. + // Should there be no chunk with a channel count we return the memoized return + // value from last time this method was called. + uint32_t MaxChannelCount() { + uint32_t channelCount = 0; + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + if (ci->ChannelCount()) { + channelCount = std::max(channelCount, ci->ChannelCount()); + } + } + if (channelCount == 0) { + return mMemoizedMaxChannelCount; + } + return mMemoizedMaxChannelCount = channelCount; + } + + static Type StaticType() { return AUDIO; } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + PrincipalHandle GetOldestPrinciple() const { + const AudioChunk* chunk = mChunks.IsEmpty() ? nullptr : &mChunks[0]; + return chunk ? chunk->GetPrincipalHandle() : PRINCIPAL_HANDLE_NONE; + } + + // Iterate on each chunks until the input function returns true. + template + void IterateOnChunks(const Function&& aFunction) { + for (uint32_t idx = 0; idx < mChunks.Length(); idx++) { + if (aFunction(&mChunks[idx])) { + return; + } + } + } + + private: + template + void Resample(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, uint32_t aInRate, + uint32_t aOutRate); +}; + +template +void WriteChunk(const AudioChunk& aChunk, uint32_t aOutputChannels, + float aVolume, AudioDataValue* aOutputBuffer) { + CopyableAutoTArray channelData; + channelData.AppendElements(aChunk.ChannelData()); + + if (channelData.Length() < aOutputChannels) { + // Up-mix. Note that this might actually make channelData have more + // than aOutputChannels temporarily. + AudioChannelsUpMix(&channelData, aOutputChannels, + SilentChannel::ZeroChannel()); + } + if (channelData.Length() > aOutputChannels) { + // Down-mix. + DownmixAndInterleave(channelData, aChunk.mDuration, aVolume, + aOutputChannels, aOutputBuffer); + } else { + InterleaveAndConvertBuffer(channelData.Elements(), aChunk.mDuration, + aVolume, aOutputChannels, aOutputBuffer); + } +} + +} // namespace mozilla + +#endif /* MOZILLA_AUDIOSEGMENT_H_ */ diff --git a/dom/media/AudioStream.cpp b/dom/media/AudioStream.cpp new file mode 100644 index 0000000000..7d80a3738e --- /dev/null +++ b/dom/media/AudioStream.cpp @@ -0,0 +1,756 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#include +#include +#include +#include "mozilla/Logging.h" +#include "prdtoa.h" +#include "AudioStream.h" +#include "VideoUtils.h" +#include "mozilla/dom/AudioDeviceInfo.h" +#include "mozilla/Monitor.h" +#include "mozilla/Mutex.h" +#include "mozilla/Sprintf.h" +#include "mozilla/Unused.h" +#include +#include "mozilla/Telemetry.h" +#include "CubebUtils.h" +#include "nsNativeCharsetUtils.h" +#include "nsPrintfCString.h" +#include "AudioConverter.h" +#include "UnderrunHandler.h" +#if defined(XP_WIN) +# include "nsXULAppAPI.h" +#endif +#include "Tracing.h" +#include "webaudio/blink/DenormalDisabler.h" +#include "CallbackThreadRegistry.h" +#include "mozilla/StaticPrefs_media.h" + +#include "RLBoxSoundTouch.h" + +namespace mozilla { + +#undef LOG +#undef LOGW +#undef LOGE + +LazyLogModule gAudioStreamLog("AudioStream"); +// For simple logs +#define LOG(x, ...) \ + MOZ_LOG(gAudioStreamLog, mozilla::LogLevel::Debug, \ + ("%p " x, this, ##__VA_ARGS__)) +#define LOGW(x, ...) \ + MOZ_LOG(gAudioStreamLog, mozilla::LogLevel::Warning, \ + ("%p " x, this, ##__VA_ARGS__)) +#define LOGE(x, ...) \ + NS_DebugBreak(NS_DEBUG_WARNING, \ + nsPrintfCString("%p " x, this, ##__VA_ARGS__).get(), nullptr, \ + __FILE__, __LINE__) + +/** + * Keep a list of frames sent to the audio engine in each DataCallback along + * with the playback rate at the moment. Since the playback rate and number of + * underrun frames can vary in each callback. We need to keep the whole history + * in order to calculate the playback position of the audio engine correctly. + */ +class FrameHistory { + struct Chunk { + uint32_t servicedFrames; + uint32_t totalFrames; + uint32_t rate; + }; + + template + static T FramesToUs(uint32_t frames, uint32_t rate) { + return static_cast(frames) * USECS_PER_S / rate; + } + + public: + FrameHistory() : mBaseOffset(0), mBasePosition(0) {} + + void Append(uint32_t aServiced, uint32_t aUnderrun, uint32_t aRate) { + /* In most case where playback rate stays the same and we don't underrun + * frames, we are able to merge chunks to avoid lose of precision to add up + * in compressing chunks into |mBaseOffset| and |mBasePosition|. + */ + if (!mChunks.IsEmpty()) { + Chunk& c = mChunks.LastElement(); + // 2 chunks (c1 and c2) can be merged when rate is the same and + // adjacent frames are zero. That is, underrun frames in c1 are zero + // or serviced frames in c2 are zero. + if (c.rate == aRate && + (c.servicedFrames == c.totalFrames || aServiced == 0)) { + c.servicedFrames += aServiced; + c.totalFrames += aServiced + aUnderrun; + return; + } + } + Chunk* p = mChunks.AppendElement(); + p->servicedFrames = aServiced; + p->totalFrames = aServiced + aUnderrun; + p->rate = aRate; + } + + /** + * @param frames The playback position in frames of the audio engine. + * @return The playback position in microseconds of the audio engine, + * adjusted by playback rate changes and underrun frames. + */ + int64_t GetPosition(int64_t frames) { + // playback position should not go backward. + MOZ_ASSERT(frames >= mBaseOffset); + while (true) { + if (mChunks.IsEmpty()) { + return static_cast(mBasePosition); + } + const Chunk& c = mChunks[0]; + if (frames <= mBaseOffset + c.totalFrames) { + uint32_t delta = frames - mBaseOffset; + delta = std::min(delta, c.servicedFrames); + return static_cast(mBasePosition) + + FramesToUs(delta, c.rate); + } + // Since the playback position of the audio engine will not go backward, + // we are able to compress chunks so that |mChunks| won't grow + // unlimitedly. Note that we lose precision in converting integers into + // floats and inaccuracy will accumulate over time. However, for a 24hr + // long, sample rate = 44.1k file, the error will be less than 1 + // microsecond after playing 24 hours. So we are fine with that. + mBaseOffset += c.totalFrames; + mBasePosition += FramesToUs(c.servicedFrames, c.rate); + mChunks.RemoveElementAt(0); + } + } + + private: + AutoTArray mChunks; + int64_t mBaseOffset; + double mBasePosition; +}; + +AudioStream::AudioStream(DataSource& aSource, uint32_t aInRate, + uint32_t aOutputChannels, + AudioConfig::ChannelLayout::ChannelMap aChannelMap) + : mTimeStretcher(nullptr), + mAudioClock(aInRate), + mChannelMap(aChannelMap), + mMonitor("AudioStream"), + mOutChannels(aOutputChannels), + mState(INITIALIZED), + mDataSource(aSource), + mAudioThreadId(ProfilerThreadId{}), + mSandboxed(CubebUtils::SandboxEnabled()), + mPlaybackComplete(false), + mPlaybackRate(1.0f), + mPreservesPitch(true), + mCallbacksStarted(false) {} + +AudioStream::~AudioStream() { + LOG("deleted, state %d", mState.load()); + MOZ_ASSERT(mState == SHUTDOWN && !mCubebStream, + "Should've called ShutDown() before deleting an AudioStream"); +} + +size_t AudioStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + size_t amount = aMallocSizeOf(this); + + // Possibly add in the future: + // - mTimeStretcher + // - mCubebStream + + return amount; +} + +nsresult AudioStream::EnsureTimeStretcherInitialized() { + AssertIsOnAudioThread(); + if (!mTimeStretcher) { + mTimeStretcher = new RLBoxSoundTouch(); + mTimeStretcher->setSampleRate(mAudioClock.GetInputRate()); + mTimeStretcher->setChannels(mOutChannels); + mTimeStretcher->setPitch(1.0); + + // SoundTouch v2.1.2 uses automatic time-stretch settings with the following + // values: + // Tempo 0.5: 90ms sequence, 20ms seekwindow, 8ms overlap + // Tempo 2.0: 40ms sequence, 15ms seekwindow, 8ms overlap + // We are going to use a smaller 10ms sequence size to improve speech + // clarity, giving more resolution at high tempo and less reverb at low + // tempo. Maintain 15ms seekwindow and 8ms overlap for smoothness. + mTimeStretcher->setSetting( + SETTING_SEQUENCE_MS, + StaticPrefs::media_audio_playbackrate_soundtouch_sequence_ms()); + mTimeStretcher->setSetting( + SETTING_SEEKWINDOW_MS, + StaticPrefs::media_audio_playbackrate_soundtouch_seekwindow_ms()); + mTimeStretcher->setSetting( + SETTING_OVERLAP_MS, + StaticPrefs::media_audio_playbackrate_soundtouch_overlap_ms()); + } + return NS_OK; +} + +nsresult AudioStream::SetPlaybackRate(double aPlaybackRate) { + TRACE_COMMENT("AudioStream::SetPlaybackRate", "%f", aPlaybackRate); + NS_ASSERTION( + aPlaybackRate > 0.0, + "Can't handle negative or null playbackrate in the AudioStream."); + if (aPlaybackRate == mPlaybackRate) { + return NS_OK; + } + + mPlaybackRate = static_cast(aPlaybackRate); + + return NS_OK; +} + +nsresult AudioStream::SetPreservesPitch(bool aPreservesPitch) { + TRACE_COMMENT("AudioStream::SetPreservesPitch", "%d", aPreservesPitch); + if (aPreservesPitch == mPreservesPitch) { + return NS_OK; + } + + mPreservesPitch = aPreservesPitch; + + return NS_OK; +} + +template +int AudioStream::InvokeCubeb(Function aFunction, Args&&... aArgs) { + mMonitor.AssertCurrentThreadOwns(); + MonitorAutoUnlock mon(mMonitor); + return aFunction(mCubebStream.get(), std::forward(aArgs)...); +} + +nsresult AudioStream::Init(AudioDeviceInfo* aSinkInfo) + MOZ_NO_THREAD_SAFETY_ANALYSIS { + auto startTime = TimeStamp::Now(); + TRACE("AudioStream::Init"); + + LOG("%s channels: %d, rate: %d", __FUNCTION__, mOutChannels, + mAudioClock.GetInputRate()); + + mSinkInfo = aSinkInfo; + + cubeb_stream_params params; + params.rate = mAudioClock.GetInputRate(); + params.channels = mOutChannels; + params.layout = static_cast(mChannelMap); + params.format = CubebUtils::ToCubebFormat::value; + params.prefs = CubebUtils::GetDefaultStreamPrefs(CUBEB_DEVICE_TYPE_OUTPUT); + + // This is noop if MOZ_DUMP_AUDIO is not set. + mDumpFile.Open("AudioStream", mOutChannels, mAudioClock.GetInputRate()); + + RefPtr handle = CubebUtils::GetCubeb(); + if (!handle) { + LOGE("Can't get cubeb context!"); + CubebUtils::ReportCubebStreamInitFailure(true); + return NS_ERROR_DOM_MEDIA_CUBEB_INITIALIZATION_ERR; + } + + mCubeb = handle; + return OpenCubeb(handle->Context(), params, startTime, + CubebUtils::GetFirstStream()); +} + +nsresult AudioStream::OpenCubeb(cubeb* aContext, cubeb_stream_params& aParams, + TimeStamp aStartTime, bool aIsFirst) { + TRACE("AudioStream::OpenCubeb"); + MOZ_ASSERT(aContext); + + cubeb_stream* stream = nullptr; + /* Convert from milliseconds to frames. */ + uint32_t latency_frames = + CubebUtils::GetCubebPlaybackLatencyInMilliseconds() * aParams.rate / 1000; + cubeb_devid deviceID = nullptr; + if (mSinkInfo && mSinkInfo->DeviceID()) { + deviceID = mSinkInfo->DeviceID(); + } + if (CubebUtils::CubebStreamInit(aContext, &stream, "AudioStream", nullptr, + nullptr, deviceID, &aParams, latency_frames, + DataCallback_S, StateCallback_S, + this) == CUBEB_OK) { + mCubebStream.reset(stream); + CubebUtils::ReportCubebBackendUsed(); + } else { + LOGE("OpenCubeb() failed to init cubeb"); + CubebUtils::ReportCubebStreamInitFailure(aIsFirst); + return NS_ERROR_FAILURE; + } + + TimeDuration timeDelta = TimeStamp::Now() - aStartTime; + LOG("creation time %sfirst: %u ms", aIsFirst ? "" : "not ", + (uint32_t)timeDelta.ToMilliseconds()); + + return NS_OK; +} + +void AudioStream::SetVolume(double aVolume) { + TRACE_COMMENT("AudioStream::SetVolume", "%f", aVolume); + MOZ_ASSERT(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume"); + + MOZ_ASSERT(mState != SHUTDOWN, "Don't set volume after shutdown."); + if (mState == ERRORED) { + return; + } + + MonitorAutoLock mon(mMonitor); + if (InvokeCubeb(cubeb_stream_set_volume, + aVolume * CubebUtils::GetVolumeScale()) != CUBEB_OK) { + LOGE("Could not change volume on cubeb stream."); + } +} + +void AudioStream::SetStreamName(const nsAString& aStreamName) { + TRACE("AudioStream::SetStreamName"); + + nsAutoCString aRawStreamName; + nsresult rv = NS_CopyUnicodeToNative(aStreamName, aRawStreamName); + + if (NS_FAILED(rv) || aStreamName.IsEmpty()) { + return; + } + + MonitorAutoLock mon(mMonitor); + if (InvokeCubeb(cubeb_stream_set_name, aRawStreamName.get()) != CUBEB_OK) { + LOGE("Could not set cubeb stream name."); + } +} + +RefPtr AudioStream::Start() { + TRACE("AudioStream::Start"); + MOZ_ASSERT(mState == INITIALIZED); + mState = STARTED; + RefPtr promise; + { + MonitorAutoLock mon(mMonitor); + // As cubeb might call audio stream's state callback very soon after we + // start cubeb, we have to create the promise beforehand in order to handle + // the case where we immediately get `drained`. + promise = mEndedPromise.Ensure(__func__); + mPlaybackComplete = false; + + if (InvokeCubeb(cubeb_stream_start) != CUBEB_OK) { + mState = ERRORED; + mEndedPromise.RejectIfExists(NS_ERROR_FAILURE, __func__); + } + + LOG("started, state %s", mState == STARTED ? "STARTED" + : mState == DRAINED ? "DRAINED" + : "ERRORED"); + } + return promise; +} + +void AudioStream::Pause() { + TRACE("AudioStream::Pause"); + MOZ_ASSERT(mState != INITIALIZED, "Must be Start()ed."); + MOZ_ASSERT(mState != STOPPED, "Already Pause()ed."); + MOZ_ASSERT(mState != SHUTDOWN, "Already ShutDown()ed."); + + // Do nothing if we are already drained or errored. + if (mState == DRAINED || mState == ERRORED) { + return; + } + + MonitorAutoLock mon(mMonitor); + if (InvokeCubeb(cubeb_stream_stop) != CUBEB_OK) { + mState = ERRORED; + } else if (mState != DRAINED && mState != ERRORED) { + // Don't transition to other states if we are already + // drained or errored. + mState = STOPPED; + } +} + +void AudioStream::Resume() { + TRACE("AudioStream::Resume"); + MOZ_ASSERT(mState != INITIALIZED, "Must be Start()ed."); + MOZ_ASSERT(mState != STARTED, "Already Start()ed."); + MOZ_ASSERT(mState != SHUTDOWN, "Already ShutDown()ed."); + + // Do nothing if we are already drained or errored. + if (mState == DRAINED || mState == ERRORED) { + return; + } + + MonitorAutoLock mon(mMonitor); + if (InvokeCubeb(cubeb_stream_start) != CUBEB_OK) { + mState = ERRORED; + } else if (mState != DRAINED && mState != ERRORED) { + // Don't transition to other states if we are already + // drained or errored. + mState = STARTED; + } +} + +void AudioStream::ShutDown() { + TRACE("AudioStream::ShutDown"); + LOG("ShutDown, state %d", mState.load()); + + MonitorAutoLock mon(mMonitor); + if (mCubebStream) { + // Force stop to put the cubeb stream in a stable state before deletion. + InvokeCubeb(cubeb_stream_stop); + // Must not try to shut down cubeb from within the lock! wasapi may still + // call our callback after Pause()/stop()!?! Bug 996162 + cubeb_stream* cubeb = mCubebStream.release(); + MonitorAutoUnlock unlock(mMonitor); + cubeb_stream_destroy(cubeb); + } + + // After `cubeb_stream_stop` has been called, there is no audio thread + // anymore. We can delete the time stretcher. + if (mTimeStretcher) { + delete mTimeStretcher; + mTimeStretcher = nullptr; + } + + mState = SHUTDOWN; + mEndedPromise.ResolveIfExists(true, __func__); +} + +int64_t AudioStream::GetPosition() { + TRACE("AudioStream::GetPosition"); +#ifndef XP_MACOSX + MonitorAutoLock mon(mMonitor); +#endif + int64_t frames = GetPositionInFramesUnlocked(); + return frames >= 0 ? mAudioClock.GetPosition(frames) : -1; +} + +int64_t AudioStream::GetPositionInFrames() { + TRACE("AudioStream::GetPositionInFrames"); +#ifndef XP_MACOSX + MonitorAutoLock mon(mMonitor); +#endif + int64_t frames = GetPositionInFramesUnlocked(); + + return frames >= 0 ? mAudioClock.GetPositionInFrames(frames) : -1; +} + +int64_t AudioStream::GetPositionInFramesUnlocked() { + TRACE("AudioStream::GetPositionInFramesUnlocked"); +#ifndef XP_MACOSX + mMonitor.AssertCurrentThreadOwns(); +#endif + + if (mState == ERRORED) { + return -1; + } + + uint64_t position = 0; + int rv; + +#ifndef XP_MACOSX + rv = InvokeCubeb(cubeb_stream_get_position, &position); +#else + rv = cubeb_stream_get_position(mCubebStream.get(), &position); +#endif + + if (rv != CUBEB_OK) { + return -1; + } + return static_cast(std::min(position, INT64_MAX)); +} + +bool AudioStream::IsValidAudioFormat(Chunk* aChunk) { + if (aChunk->Rate() != mAudioClock.GetInputRate()) { + LOGW("mismatched sample %u, mInRate=%u", aChunk->Rate(), + mAudioClock.GetInputRate()); + return false; + } + + return aChunk->Channels() <= 8; +} + +void AudioStream::GetUnprocessed(AudioBufferWriter& aWriter) { + TRACE("AudioStream::GetUnprocessed"); + AssertIsOnAudioThread(); + // Flush the timestretcher pipeline, if we were playing using a playback rate + // other than 1.0. + if (mTimeStretcher) { + // Get number of samples and based on this either receive samples or write + // silence. At worst, the attacker can supply weird sound samples or + // result in us writing silence. + auto numSamples = mTimeStretcher->numSamples().unverified_safe_because( + "We only use this to decide whether to receive samples or write " + "silence."); + if (numSamples) { + RLBoxSoundTouch* timeStretcher = mTimeStretcher; + aWriter.Write( + [timeStretcher](AudioDataValue* aPtr, uint32_t aFrames) { + return timeStretcher->receiveSamples(aPtr, aFrames); + }, + aWriter.Available()); + + // TODO: There might be still unprocessed samples in the stretcher. + // We should either remove or flush them so they won't be in the output + // next time we switch a playback rate other than 1.0. + mTimeStretcher->numUnprocessedSamples().copy_and_verify([](auto samples) { + NS_WARNING_ASSERTION(samples == 0, "no samples"); + }); + } else { + // Don't need it anymore: playbackRate is 1.0, and the time stretcher has + // been flushed. + delete mTimeStretcher; + mTimeStretcher = nullptr; + } + } + + while (aWriter.Available() > 0) { + uint32_t count = mDataSource.PopFrames(aWriter.Ptr(), aWriter.Available(), + mAudioThreadChanged); + if (count == 0) { + break; + } + aWriter.Advance(count); + } +} + +void AudioStream::GetTimeStretched(AudioBufferWriter& aWriter) { + TRACE("AudioStream::GetTimeStretched"); + AssertIsOnAudioThread(); + if (EnsureTimeStretcherInitialized() != NS_OK) { + return; + } + + uint32_t toPopFrames = + ceil(aWriter.Available() * mAudioClock.GetPlaybackRate()); + + // At each iteration, get number of samples and (based on this) write from + // the data source or silence. At worst, if the number of samples is a lie + // (i.e., under attacker control) we'll either not write anything or keep + // writing noise. This is safe because all the memory operations within the + // loop (and after) are checked. + while (mTimeStretcher->numSamples().unverified_safe_because( + "Only used to decide whether to put samples.") < + aWriter.Available()) { + // pop into a temp buffer, and put into the stretcher. + AutoTArray buf; + auto size = CheckedUint32(mOutChannels) * toPopFrames; + if (!size.isValid()) { + // The overflow should not happen in normal case. + LOGW("Invalid member data: %d channels, %d frames", mOutChannels, + toPopFrames); + return; + } + buf.SetLength(size.value()); + // ensure no variable channel count or something like that + uint32_t count = + mDataSource.PopFrames(buf.Elements(), toPopFrames, mAudioThreadChanged); + if (count == 0) { + break; + } + mTimeStretcher->putSamples(buf.Elements(), count); + } + + auto* timeStretcher = mTimeStretcher; + aWriter.Write( + [timeStretcher](AudioDataValue* aPtr, uint32_t aFrames) { + return timeStretcher->receiveSamples(aPtr, aFrames); + }, + aWriter.Available()); +} + +bool AudioStream::CheckThreadIdChanged() { + ProfilerThreadId id = profiler_current_thread_id(); + if (id != mAudioThreadId) { + mAudioThreadId = id; + mAudioThreadChanged = true; + return true; + } + mAudioThreadChanged = false; + return false; +} + +void AudioStream::AssertIsOnAudioThread() const { + // This can be called right after CheckThreadIdChanged, because the audio + // thread can change when not sandboxed. + MOZ_ASSERT(mAudioThreadId.load() == profiler_current_thread_id()); +} + +void AudioStream::UpdatePlaybackRateIfNeeded() { + AssertIsOnAudioThread(); + if (mAudioClock.GetPreservesPitch() == mPreservesPitch && + mAudioClock.GetPlaybackRate() == mPlaybackRate) { + return; + } + + EnsureTimeStretcherInitialized(); + + mAudioClock.SetPlaybackRate(mPlaybackRate); + mAudioClock.SetPreservesPitch(mPreservesPitch); + + if (mPreservesPitch) { + mTimeStretcher->setTempo(mPlaybackRate); + mTimeStretcher->setRate(1.0f); + } else { + mTimeStretcher->setTempo(1.0f); + mTimeStretcher->setRate(mPlaybackRate); + } +} + +long AudioStream::DataCallback(void* aBuffer, long aFrames) { + if (CheckThreadIdChanged() && !mSandboxed) { + CallbackThreadRegistry::Get()->Register(mAudioThreadId, + "NativeAudioCallback"); + } + WebCore::DenormalDisabler disabler; + if (!mCallbacksStarted) { + mCallbacksStarted = true; + } + + TRACE_AUDIO_CALLBACK_BUDGET("AudioStream real-time budget", aFrames, + mAudioClock.GetInputRate()); + TRACE("AudioStream::DataCallback"); + MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown"); + + if (SoftRealTimeLimitReached()) { + DemoteThreadFromRealTime(); + } + + UpdatePlaybackRateIfNeeded(); + + auto writer = AudioBufferWriter( + Span(reinterpret_cast(aBuffer), + mOutChannels * aFrames), + mOutChannels, aFrames); + + if (mAudioClock.GetInputRate() == mAudioClock.GetOutputRate()) { + GetUnprocessed(writer); + } else { + GetTimeStretched(writer); + } + + // Always send audible frames first, and silent frames later. + // Otherwise it will break the assumption of FrameHistory. + if (!mDataSource.Ended()) { +#ifndef XP_MACOSX + MonitorAutoLock mon(mMonitor); +#endif + mAudioClock.UpdateFrameHistory(aFrames - writer.Available(), + writer.Available(), mAudioThreadChanged); + if (writer.Available() > 0) { + TRACE_COMMENT("AudioStream::DataCallback", "Underrun: %d frames missing", + writer.Available()); + LOGW("lost %d frames", writer.Available()); + writer.WriteZeros(writer.Available()); + } + } else { + // No more new data in the data source, and the drain has completed. We + // don't need the time stretcher anymore at this point. + if (mTimeStretcher && writer.Available()) { + delete mTimeStretcher; + mTimeStretcher = nullptr; + } +#ifndef XP_MACOSX + MonitorAutoLock mon(mMonitor); +#endif + mAudioClock.UpdateFrameHistory(aFrames - writer.Available(), 0, + mAudioThreadChanged); + } + + mDumpFile.Write(static_cast(aBuffer), + aFrames * mOutChannels); + + if (!mSandboxed && writer.Available() != 0) { + CallbackThreadRegistry::Get()->Unregister(mAudioThreadId); + } + return aFrames - writer.Available(); +} + +void AudioStream::StateCallback(cubeb_state aState) { + MOZ_ASSERT(mState != SHUTDOWN, "No state callback after shutdown"); + LOG("StateCallback, mState=%d cubeb_state=%d", mState.load(), aState); + + MonitorAutoLock mon(mMonitor); + if (aState == CUBEB_STATE_DRAINED) { + LOG("Drained"); + mState = DRAINED; + mPlaybackComplete = true; + mEndedPromise.ResolveIfExists(true, __func__); + } else if (aState == CUBEB_STATE_ERROR) { + LOGE("StateCallback() state %d cubeb error", mState.load()); + mState = ERRORED; + mPlaybackComplete = true; + mEndedPromise.RejectIfExists(NS_ERROR_FAILURE, __func__); + } +} + +bool AudioStream::IsPlaybackCompleted() const { return mPlaybackComplete; } + +AudioClock::AudioClock(uint32_t aInRate) + : mOutRate(aInRate), + mInRate(aInRate), + mPreservesPitch(true), + mFrameHistory(new FrameHistory()) {} + +// Audio thread only +void AudioClock::UpdateFrameHistory(uint32_t aServiced, uint32_t aUnderrun, + bool aAudioThreadChanged) { +#ifdef XP_MACOSX + if (aAudioThreadChanged) { + mCallbackInfoQueue.ResetProducerThreadId(); + } + // Flush the local items, if any, and then attempt to enqueue the current + // item. This is only a fallback mechanism, under non-critical load this is + // just going to enqueue an item in the queue. + while (!mAudioThreadCallbackInfo.IsEmpty()) { + CallbackInfo& info = mAudioThreadCallbackInfo[0]; + // If still full, keep it audio-thread side for now. + if (mCallbackInfoQueue.Enqueue(info) != 1) { + break; + } + mAudioThreadCallbackInfo.RemoveElementAt(0); + } + CallbackInfo info(aServiced, aUnderrun, mOutRate); + if (mCallbackInfoQueue.Enqueue(info) != 1) { + NS_WARNING( + "mCallbackInfoQueue full, storing the values in the audio thread."); + mAudioThreadCallbackInfo.AppendElement(info); + } +#else + MutexAutoLock lock(mMutex); + mFrameHistory->Append(aServiced, aUnderrun, mOutRate); +#endif +} + +int64_t AudioClock::GetPositionInFrames(int64_t aFrames) { + CheckedInt64 v = UsecsToFrames(GetPosition(aFrames), mInRate); + return v.isValid() ? v.value() : -1; +} + +int64_t AudioClock::GetPosition(int64_t frames) { +#ifdef XP_MACOSX + // Dequeue all history info, and apply them before returning the position + // based on frame history. + CallbackInfo info; + while (mCallbackInfoQueue.Dequeue(&info, 1)) { + mFrameHistory->Append(info.mServiced, info.mUnderrun, info.mOutputRate); + } +#else + MutexAutoLock lock(mMutex); +#endif + return mFrameHistory->GetPosition(frames); +} + +void AudioClock::SetPlaybackRate(double aPlaybackRate) { + mOutRate = static_cast(mInRate / aPlaybackRate); +} + +double AudioClock::GetPlaybackRate() const { + return static_cast(mInRate) / mOutRate; +} + +void AudioClock::SetPreservesPitch(bool aPreservesPitch) { + mPreservesPitch = aPreservesPitch; +} + +bool AudioClock::GetPreservesPitch() const { return mPreservesPitch; } + +} // namespace mozilla diff --git a/dom/media/AudioStream.h b/dom/media/AudioStream.h new file mode 100644 index 0000000000..11a61b9fe7 --- /dev/null +++ b/dom/media/AudioStream.h @@ -0,0 +1,382 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(AudioStream_h_) +# define AudioStream_h_ + +# include "AudioSampleFormat.h" +# include "CubebUtils.h" +# include "MediaInfo.h" +# include "MediaSink.h" +# include "mozilla/Atomics.h" +# include "mozilla/Monitor.h" +# include "mozilla/MozPromise.h" +# include "mozilla/ProfilerUtils.h" +# include "mozilla/RefPtr.h" +# include "mozilla/Result.h" +# include "mozilla/TimeStamp.h" +# include "mozilla/UniquePtr.h" +# include "mozilla/SPSCQueue.h" +# include "nsCOMPtr.h" +# include "nsThreadUtils.h" +# include "WavDumper.h" + +namespace mozilla { + +struct CubebDestroyPolicy { + void operator()(cubeb_stream* aStream) const { + cubeb_stream_destroy(aStream); + } +}; + +class AudioStream; +class FrameHistory; +class AudioConfig; +class RLBoxSoundTouch; + +// A struct that contains the number of frames serviced or underrun by a +// callback, alongside the sample-rate for this callback (in case of playback +// rate change, it can be variable). +struct CallbackInfo { + CallbackInfo() = default; + CallbackInfo(uint32_t aServiced, uint32_t aUnderrun, uint32_t aOutputRate) + : mServiced(aServiced), mUnderrun(aUnderrun), mOutputRate(aOutputRate) {} + uint32_t mServiced = 0; + uint32_t mUnderrun = 0; + uint32_t mOutputRate = 0; +}; + +class AudioClock { + public: + explicit AudioClock(uint32_t aInRate); + + // Update the number of samples that has been written in the audio backend. + // Called on the audio thread only. + void UpdateFrameHistory(uint32_t aServiced, uint32_t aUnderrun, + bool aAudioThreadChanged); + + /** + * @param aFrames The playback position in frames of the audio engine. + * @return The playback position in frames of the stream, + * adjusted by playback rate changes and underrun frames. + */ + int64_t GetPositionInFrames(int64_t aFrames); + + /** + * @param frames The playback position in frames of the audio engine. + * @return The playback position in microseconds of the stream, + * adjusted by playback rate changes and underrun frames. + */ + int64_t GetPosition(int64_t frames); + + // Set the playback rate. + // Called on the audio thread only. + void SetPlaybackRate(double aPlaybackRate); + // Get the current playback rate. + // Called on the audio thread only. + double GetPlaybackRate() const; + // Set if we are preserving the pitch. + // Called on the audio thread only. + void SetPreservesPitch(bool aPreservesPitch); + // Get the current pitch preservation state. + // Called on the audio thread only. + bool GetPreservesPitch() const; + + // Called on either thread. + uint32_t GetInputRate() const { return mInRate; } + uint32_t GetOutputRate() const { return mOutRate; } + + private: + // Output rate in Hz (characteristic of the playback rate). Written on the + // audio thread, read on either thread. + Atomic mOutRate; + // Input rate in Hz (characteristic of the media being played). + const uint32_t mInRate; + // True if the we are timestretching, false if we are resampling. Accessed on + // the audio thread only. + bool mPreservesPitch; + // The history of frames sent to the audio engine in each DataCallback. + // Only accessed from non-audio threads on macOS, accessed on both threads and + // protected by the AudioStream monitor on other platforms. + const UniquePtr mFrameHistory +# ifndef XP_MACOSX + MOZ_GUARDED_BY(mMutex) +# endif + ; +# ifdef XP_MACOSX + // Enqueued on the audio thread, dequeued from the other thread. The maximum + // size of this queue has been chosen empirically. + SPSCQueue mCallbackInfoQueue{100}; + // If it isn't possible to send the callback info to the non-audio thread, + // store them here until it's possible to send them. This is an unlikely + // fallback path. The size of this array has been chosen empirically. Only + // ever accessed on the audio thread. + AutoTArray mAudioThreadCallbackInfo; +# else + Mutex mMutex{"AudioClock"}; +# endif +}; + +/* + * A bookkeeping class to track the read/write position of an audio buffer. + */ +class AudioBufferCursor { + public: + AudioBufferCursor(Span aSpan, uint32_t aChannels, + uint32_t aFrames) + : mChannels(aChannels), mSpan(aSpan), mFrames(aFrames) {} + + // Advance the cursor to account for frames that are consumed. + uint32_t Advance(uint32_t aFrames) { + MOZ_DIAGNOSTIC_ASSERT(Contains(aFrames)); + MOZ_ASSERT(mFrames >= aFrames); + mFrames -= aFrames; + mOffset += mChannels * aFrames; + return aFrames; + } + + // The number of frames available for read/write in this buffer. + uint32_t Available() const { return mFrames; } + + // Return a pointer where read/write should begin. + AudioDataValue* Ptr() const { + MOZ_DIAGNOSTIC_ASSERT(mOffset <= mSpan.Length()); + return mSpan.Elements() + mOffset; + } + + protected: + bool Contains(uint32_t aFrames) const { + return mSpan.Length() >= mOffset + mChannels * aFrames; + } + const uint32_t mChannels; + + private: + const Span mSpan; + size_t mOffset = 0; + uint32_t mFrames; +}; + +/* + * A helper class to encapsulate pointer arithmetic and provide means to modify + * the underlying audio buffer. + */ +class AudioBufferWriter : public AudioBufferCursor { + public: + AudioBufferWriter(Span aSpan, uint32_t aChannels, + uint32_t aFrames) + : AudioBufferCursor(aSpan, aChannels, aFrames) {} + + uint32_t WriteZeros(uint32_t aFrames) { + MOZ_DIAGNOSTIC_ASSERT(Contains(aFrames)); + memset(Ptr(), 0, sizeof(AudioDataValue) * mChannels * aFrames); + return Advance(aFrames); + } + + uint32_t Write(const AudioDataValue* aPtr, uint32_t aFrames) { + MOZ_DIAGNOSTIC_ASSERT(Contains(aFrames)); + memcpy(Ptr(), aPtr, sizeof(AudioDataValue) * mChannels * aFrames); + return Advance(aFrames); + } + + // Provide a write fuction to update the audio buffer with the following + // signature: uint32_t(const AudioDataValue* aPtr, uint32_t aFrames) + // aPtr: Pointer to the audio buffer. + // aFrames: The number of frames available in the buffer. + // return: The number of frames actually written by the function. + template + uint32_t Write(const Function& aFunction, uint32_t aFrames) { + MOZ_DIAGNOSTIC_ASSERT(Contains(aFrames)); + return Advance(aFunction(Ptr(), aFrames)); + } + + using AudioBufferCursor::Available; +}; + +// Access to a single instance of this class must be synchronized by +// callers, or made from a single thread. One exception is that access to +// GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels}, +// SetMicrophoneActive is thread-safe without external synchronization. +class AudioStream final { + virtual ~AudioStream(); + + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioStream) + + class Chunk { + public: + // Return a pointer to the audio data. + virtual const AudioDataValue* Data() const = 0; + // Return the number of frames in this chunk. + virtual uint32_t Frames() const = 0; + // Return the number of audio channels. + virtual uint32_t Channels() const = 0; + // Return the sample rate of this chunk. + virtual uint32_t Rate() const = 0; + // Return a writable pointer for downmixing. + virtual AudioDataValue* GetWritable() const = 0; + virtual ~Chunk() = default; + }; + + class DataSource { + public: + // Attempt to acquire aFrames frames of audio, and returns the number of + // frames successfuly acquired. + virtual uint32_t PopFrames(AudioDataValue* aAudio, uint32_t aFrames, + bool aAudioThreadChanged) = 0; + // Return true if no more data will be added to the source. + virtual bool Ended() const = 0; + + protected: + virtual ~DataSource() = default; + }; + + // aOutputChannels is the number of audio channels (1 for mono, 2 for stereo, + // etc), aChannelMap is the indicator for channel layout(mono, stereo, 5.1 or + // 7.1 ). Initialize the audio stream.and aRate is the sample rate + // (22050Hz, 44100Hz, etc). + AudioStream(DataSource& aSource, uint32_t aInRate, uint32_t aOutputChannels, + AudioConfig::ChannelLayout::ChannelMap aChannelMap); + + nsresult Init(AudioDeviceInfo* aSinkInfo); + + // Closes the stream. All future use of the stream is an error. + void ShutDown(); + + // Set the current volume of the audio playback. This is a value from + // 0 (meaning muted) to 1 (meaning full volume). Thread-safe. + void SetVolume(double aVolume); + + void SetStreamName(const nsAString& aStreamName); + + // Start the stream. + RefPtr Start(); + + // Pause audio playback. + void Pause(); + + // Resume audio playback. + void Resume(); + + // Return the position in microseconds of the audio frame being played by + // the audio hardware, compensated for playback rate change. Thread-safe. + int64_t GetPosition(); + + // Return the position, measured in audio frames played since the stream + // was opened, of the audio hardware. Thread-safe. + int64_t GetPositionInFrames(); + + uint32_t GetOutChannels() const { return mOutChannels; } + + // Set playback rate as a multiple of the intrinsic playback rate. This is + // to be called only with aPlaybackRate > 0.0. + nsresult SetPlaybackRate(double aPlaybackRate); + // Switch between resampling (if false) and time stretching (if true, + // default). + nsresult SetPreservesPitch(bool aPreservesPitch); + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const; + + bool IsPlaybackCompleted() const; + + // Returns true if at least one DataCallback has been called. + bool CallbackStarted() const { return mCallbacksStarted; } + + protected: + friend class AudioClock; + + // Return the position, measured in audio frames played since the stream was + // opened, of the audio hardware, not adjusted for the changes of playback + // rate or underrun frames. + // Caller must own the monitor. + int64_t GetPositionInFramesUnlocked(); + + private: + nsresult OpenCubeb(cubeb* aContext, cubeb_stream_params& aParams, + TimeStamp aStartTime, bool aIsFirst); + + static long DataCallback_S(cubeb_stream*, void* aThis, + const void* /* aInputBuffer */, + void* aOutputBuffer, long aFrames) { + return static_cast(aThis)->DataCallback(aOutputBuffer, + aFrames); + } + + static void StateCallback_S(cubeb_stream*, void* aThis, cubeb_state aState) { + static_cast(aThis)->StateCallback(aState); + } + + long DataCallback(void* aBuffer, long aFrames); + void StateCallback(cubeb_state aState); + + // Audio thread only + nsresult EnsureTimeStretcherInitialized(); + void GetUnprocessed(AudioBufferWriter& aWriter); + void GetTimeStretched(AudioBufferWriter& aWriter); + void UpdatePlaybackRateIfNeeded(); + + // Return true if audio frames are valid (correct sampling rate and valid + // channel count) otherwise false. + bool IsValidAudioFormat(Chunk* aChunk) MOZ_REQUIRES(mMonitor); + + template + int InvokeCubeb(Function aFunction, Args&&... aArgs) MOZ_REQUIRES(mMonitor); + bool CheckThreadIdChanged(); + void AssertIsOnAudioThread() const; + + RLBoxSoundTouch* mTimeStretcher; + AudioClock mAudioClock; + + WavDumper mDumpFile; + + const AudioConfig::ChannelLayout::ChannelMap mChannelMap; + + // The monitor is held to protect all access to member variables below. + Monitor mMonitor MOZ_UNANNOTATED; + + const uint32_t mOutChannels; + + // mCubebStream holds a bare pointer to cubeb, so we hold a ref on its behalf + RefPtr mCubeb; + // Owning reference to a cubeb_stream. Set in Init(), cleared in ShutDown, so + // no lock is needed to access. + UniquePtr mCubebStream; + + enum StreamState { + INITIALIZED, // Initialized, playback has not begun. + STARTED, // cubeb started. + STOPPED, // Stopped by a call to Pause(). + DRAINED, // StateCallback has indicated that the drain is complete. + ERRORED, // Stream disabled due to an internal error. + SHUTDOWN // ShutDown has been called + }; + + std::atomic mState; + + // DataSource::PopFrames can never be called concurrently. + // DataSource::IsEnded uses only atomics. + DataSource& mDataSource; + + // The device info of the current sink. If null + // the default device is used. It is set + // during the Init() in decoder thread. + RefPtr mSinkInfo; + // Contains the id of the audio thread, from profiler_get_thread_id. + std::atomic mAudioThreadId; + const bool mSandboxed = false; + + MozPromiseHolder mEndedPromise + MOZ_GUARDED_BY(mMonitor); + std::atomic mPlaybackComplete; + // Both written on the MDSM thread, read on the audio thread. + std::atomic mPlaybackRate; + std::atomic mPreservesPitch; + // Audio thread only + bool mAudioThreadChanged = false; + Atomic mCallbacksStarted; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/AudioStreamTrack.cpp b/dom/media/AudioStreamTrack.cpp new file mode 100644 index 0000000000..d8ca9827b8 --- /dev/null +++ b/dom/media/AudioStreamTrack.cpp @@ -0,0 +1,55 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioStreamTrack.h" + +#include "MediaTrackGraph.h" +#include "nsContentUtils.h" + +namespace mozilla::dom { + +RefPtr AudioStreamTrack::AddAudioOutput( + void* aKey, AudioDeviceInfo* aSink) { + if (Ended()) { + return GenericPromise::CreateAndResolve(true, __func__); + } + + mTrack->AddAudioOutput(aKey, aSink); + return mTrack->Graph()->NotifyWhenDeviceStarted(aSink); +} + +void AudioStreamTrack::RemoveAudioOutput(void* aKey) { + if (Ended()) { + return; + } + + mTrack->RemoveAudioOutput(aKey); +} + +void AudioStreamTrack::SetAudioOutputVolume(void* aKey, float aVolume) { + if (Ended()) { + return; + } + + mTrack->SetAudioOutputVolume(aKey, aVolume); +} + +void AudioStreamTrack::GetLabel(nsAString& aLabel, CallerType aCallerType) { + nsIGlobalObject* global = + GetParentObject() ? GetParentObject()->AsGlobal() : nullptr; + if (nsContentUtils::ShouldResistFingerprinting(aCallerType, global, + RFPTarget::StreamTrackLabel)) { + aLabel.AssignLiteral("Internal Microphone"); + return; + } + MediaStreamTrack::GetLabel(aLabel, aCallerType); +} + +already_AddRefed AudioStreamTrack::CloneInternal() { + return do_AddRef(new AudioStreamTrack(mWindow, mInputTrack, mSource, + ReadyState(), Muted(), mConstraints)); +} + +} // namespace mozilla::dom diff --git a/dom/media/AudioStreamTrack.h b/dom/media/AudioStreamTrack.h new file mode 100644 index 0000000000..f958cfd43f --- /dev/null +++ b/dom/media/AudioStreamTrack.h @@ -0,0 +1,48 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AUDIOSTREAMTRACK_H_ +#define AUDIOSTREAMTRACK_H_ + +#include "MediaStreamTrack.h" +#include "DOMMediaStream.h" +#include "CrossGraphPort.h" +#include "nsClassHashtable.h" + +namespace mozilla::dom { + +class AudioStreamTrack : public MediaStreamTrack { + public: + AudioStreamTrack( + nsPIDOMWindowInner* aWindow, mozilla::MediaTrack* aInputTrack, + MediaStreamTrackSource* aSource, + MediaStreamTrackState aReadyState = MediaStreamTrackState::Live, + bool aMuted = false, + const MediaTrackConstraints& aConstraints = MediaTrackConstraints()) + : MediaStreamTrack(aWindow, aInputTrack, aSource, aReadyState, aMuted, + aConstraints) {} + + AudioStreamTrack* AsAudioStreamTrack() override { return this; } + const AudioStreamTrack* AsAudioStreamTrack() const override { return this; } + + // Direct output to aSink, or the default output device if aSink is null. + // No more than one output may exist for a single aKey at any one time. + // Returns a promise that resolves when the device is processing audio. + RefPtr AddAudioOutput(void* aKey, AudioDeviceInfo* aSink); + void RemoveAudioOutput(void* aKey); + void SetAudioOutputVolume(void* aKey, float aVolume); + + // WebIDL + void GetKind(nsAString& aKind) override { aKind.AssignLiteral("audio"); } + + void GetLabel(nsAString& aLabel, CallerType aCallerType) override; + + protected: + already_AddRefed CloneInternal() override; +}; + +} // namespace mozilla::dom + +#endif /* AUDIOSTREAMTRACK_H_ */ diff --git a/dom/media/AudioTrack.cpp b/dom/media/AudioTrack.cpp new file mode 100644 index 0000000000..e6ee43a0f8 --- /dev/null +++ b/dom/media/AudioTrack.cpp @@ -0,0 +1,70 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 et tw=78: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/AudioStreamTrack.h" +#include "mozilla/dom/AudioTrack.h" +#include "mozilla/dom/AudioTrackBinding.h" +#include "mozilla/dom/AudioTrackList.h" +#include "mozilla/dom/HTMLMediaElement.h" + +namespace mozilla::dom { + +AudioTrack::AudioTrack(nsIGlobalObject* aOwnerGlobal, const nsAString& aId, + const nsAString& aKind, const nsAString& aLabel, + const nsAString& aLanguage, bool aEnabled, + AudioStreamTrack* aStreamTrack) + : MediaTrack(aOwnerGlobal, aId, aKind, aLabel, aLanguage), + mEnabled(aEnabled), + mAudioStreamTrack(aStreamTrack) {} + +AudioTrack::~AudioTrack() = default; + +NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioTrack, MediaTrack, mAudioStreamTrack) + +NS_IMPL_ADDREF_INHERITED(AudioTrack, MediaTrack) +NS_IMPL_RELEASE_INHERITED(AudioTrack, MediaTrack) +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioTrack) +NS_INTERFACE_MAP_END_INHERITING(MediaTrack) + +JSObject* AudioTrack::WrapObject(JSContext* aCx, + JS::Handle aGivenProto) { + return AudioTrack_Binding::Wrap(aCx, this, aGivenProto); +} + +void AudioTrack::SetEnabled(bool aEnabled) { + SetEnabledInternal(aEnabled, MediaTrack::DEFAULT); +} + +void AudioTrack::SetEnabledInternal(bool aEnabled, int aFlags) { + if (aEnabled == mEnabled) { + return; + } + mEnabled = aEnabled; + + // If this AudioTrack is no longer in its original AudioTrackList, then + // whether it is enabled or not has no effect on its original list. + if (!mList) { + return; + } + + if (mEnabled) { + HTMLMediaElement* element = mList->GetMediaElement(); + if (element) { + element->NotifyMediaTrackEnabled(this); + } + } else { + HTMLMediaElement* element = mList->GetMediaElement(); + if (element) { + element->NotifyMediaTrackDisabled(this); + } + } + + if (!(aFlags & MediaTrack::FIRE_NO_EVENTS)) { + mList->CreateAndDispatchChangeEvent(); + } +} + +} // namespace mozilla::dom diff --git a/dom/media/AudioTrack.h b/dom/media/AudioTrack.h new file mode 100644 index 0000000000..29104801ca --- /dev/null +++ b/dom/media/AudioTrack.h @@ -0,0 +1,52 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 et tw=78: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_AudioTrack_h +#define mozilla_dom_AudioTrack_h + +#include "MediaTrack.h" + +namespace mozilla::dom { + +class AudioStreamTrack; + +class AudioTrack : public MediaTrack { + public: + AudioTrack(nsIGlobalObject* aOwnerGlobal, const nsAString& aId, + const nsAString& aKind, const nsAString& aLabel, + const nsAString& aLanguage, bool aEnabled, + AudioStreamTrack* aStreamTrack = nullptr); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioTrack, MediaTrack) + + JSObject* WrapObject(JSContext* aCx, + JS::Handle aGivenProto) override; + + AudioTrack* AsAudioTrack() override { return this; } + + void SetEnabledInternal(bool aEnabled, int aFlags) override; + + // Get associated audio stream track when the audio track comes from + // MediaStream. This might be nullptr when the src of owning HTMLMediaElement + // is not MediaStream. + AudioStreamTrack* GetAudioStreamTrack() { return mAudioStreamTrack; } + + // WebIDL + bool Enabled() const { return mEnabled; } + + void SetEnabled(bool aEnabled); + + private: + virtual ~AudioTrack(); + + bool mEnabled; + RefPtr mAudioStreamTrack; +}; + +} // namespace mozilla::dom + +#endif // mozilla_dom_AudioTrack_h diff --git a/dom/media/AudioTrackList.cpp b/dom/media/AudioTrackList.cpp new file mode 100644 index 0000000000..0ba1f880e9 --- /dev/null +++ b/dom/media/AudioTrackList.cpp @@ -0,0 +1,32 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/AudioTrack.h" +#include "mozilla/dom/AudioTrackList.h" +#include "mozilla/dom/AudioTrackListBinding.h" + +namespace mozilla::dom { + +JSObject* AudioTrackList::WrapObject(JSContext* aCx, + JS::Handle aGivenProto) { + return AudioTrackList_Binding::Wrap(aCx, this, aGivenProto); +} + +AudioTrack* AudioTrackList::operator[](uint32_t aIndex) { + MediaTrack* track = MediaTrackList::operator[](aIndex); + return track->AsAudioTrack(); +} + +AudioTrack* AudioTrackList::IndexedGetter(uint32_t aIndex, bool& aFound) { + MediaTrack* track = MediaTrackList::IndexedGetter(aIndex, aFound); + return track ? track->AsAudioTrack() : nullptr; +} + +AudioTrack* AudioTrackList::GetTrackById(const nsAString& aId) { + MediaTrack* track = MediaTrackList::GetTrackById(aId); + return track ? track->AsAudioTrack() : nullptr; +} + +} // namespace mozilla::dom diff --git a/dom/media/AudioTrackList.h b/dom/media/AudioTrackList.h new file mode 100644 index 0000000000..3048eeea35 --- /dev/null +++ b/dom/media/AudioTrackList.h @@ -0,0 +1,38 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 et tw=78: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_AudioTrackList_h +#define mozilla_dom_AudioTrackList_h + +#include "MediaTrack.h" +#include "MediaTrackList.h" + +namespace mozilla::dom { + +class AudioTrack; + +class AudioTrackList : public MediaTrackList { + public: + AudioTrackList(nsIGlobalObject* aOwnerObject, HTMLMediaElement* aMediaElement) + : MediaTrackList(aOwnerObject, aMediaElement) {} + + JSObject* WrapObject(JSContext* aCx, + JS::Handle aGivenProto) override; + + AudioTrack* operator[](uint32_t aIndex); + + // WebIDL + AudioTrack* IndexedGetter(uint32_t aIndex, bool& aFound); + + AudioTrack* GetTrackById(const nsAString& aId); + + protected: + AudioTrackList* AsAudioTrackList() override { return this; } +}; + +} // namespace mozilla::dom + +#endif // mozilla_dom_AudioTrackList_h diff --git a/dom/media/BackgroundVideoDecodingPermissionObserver.cpp b/dom/media/BackgroundVideoDecodingPermissionObserver.cpp new file mode 100644 index 0000000000..f91629a090 --- /dev/null +++ b/dom/media/BackgroundVideoDecodingPermissionObserver.cpp @@ -0,0 +1,149 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "BackgroundVideoDecodingPermissionObserver.h" + +#include "mozilla/AsyncEventDispatcher.h" +#include "mozilla/dom/BrowsingContext.h" +#include "mozilla/StaticPrefs_media.h" +#include "MediaDecoder.h" +#include "nsContentUtils.h" +#include "mozilla/dom/Document.h" +#include "mozilla/Services.h" +#include "nsIObserverService.h" + +namespace mozilla { + +BackgroundVideoDecodingPermissionObserver:: + BackgroundVideoDecodingPermissionObserver(MediaDecoder* aDecoder) + : mDecoder(aDecoder), mIsRegisteredForEvent(false) { + MOZ_ASSERT(mDecoder); +} + +NS_IMETHODIMP +BackgroundVideoDecodingPermissionObserver::Observe(nsISupports* aSubject, + const char* aTopic, + const char16_t* aData) { + if (!StaticPrefs::media_resume_background_video_on_tabhover()) { + return NS_OK; + } + + if (!IsValidEventSender(aSubject)) { + return NS_OK; + } + + if (strcmp(aTopic, "unselected-tab-hover") == 0) { + bool allowed = !NS_strcmp(aData, u"true"); + mDecoder->SetIsBackgroundVideoDecodingAllowed(allowed); + } + return NS_OK; +} + +void BackgroundVideoDecodingPermissionObserver::RegisterEvent() { + MOZ_ASSERT(!mIsRegisteredForEvent); + nsCOMPtr observerService = services::GetObserverService(); + if (observerService) { + observerService->AddObserver(this, "unselected-tab-hover", false); + mIsRegisteredForEvent = true; + if (nsContentUtils::IsInStableOrMetaStableState()) { + // Events shall not be fired synchronously to prevent anything visible + // from the scripts while we are in stable state. + if (nsCOMPtr doc = GetOwnerDoc()) { + doc->Dispatch(NewRunnableMethod( + "BackgroundVideoDecodingPermissionObserver::EnableEvent", this, + &BackgroundVideoDecodingPermissionObserver::EnableEvent)); + } + } else { + EnableEvent(); + } + } +} + +void BackgroundVideoDecodingPermissionObserver::UnregisterEvent() { + MOZ_ASSERT(mIsRegisteredForEvent); + nsCOMPtr observerService = services::GetObserverService(); + if (observerService) { + observerService->RemoveObserver(this, "unselected-tab-hover"); + mIsRegisteredForEvent = false; + mDecoder->SetIsBackgroundVideoDecodingAllowed(false); + if (nsContentUtils::IsInStableOrMetaStableState()) { + // Events shall not be fired synchronously to prevent anything visible + // from the scripts while we are in stable state. + if (nsCOMPtr doc = GetOwnerDoc()) { + doc->Dispatch(NewRunnableMethod( + "BackgroundVideoDecodingPermissionObserver::DisableEvent", this, + &BackgroundVideoDecodingPermissionObserver::DisableEvent)); + } + } else { + DisableEvent(); + } + } +} + +BackgroundVideoDecodingPermissionObserver:: + ~BackgroundVideoDecodingPermissionObserver() { + MOZ_ASSERT(!mIsRegisteredForEvent); +} + +void BackgroundVideoDecodingPermissionObserver::EnableEvent() const { + // If we can't get document or outer window, then you can't reach the chrome + // either, so we don't need want to dispatch the event. + dom::Document* doc = GetOwnerDoc(); + if (!doc || !doc->GetWindow()) { + return; + } + + RefPtr asyncDispatcher = + new AsyncEventDispatcher(doc, u"UnselectedTabHover:Enable"_ns, + CanBubble::eYes, ChromeOnlyDispatch::eYes); + asyncDispatcher->PostDOMEvent(); +} + +void BackgroundVideoDecodingPermissionObserver::DisableEvent() const { + // If we can't get document or outer window, then you can't reach the chrome + // either, so we don't need want to dispatch the event. + dom::Document* doc = GetOwnerDoc(); + if (!doc || !doc->GetWindow()) { + return; + } + + RefPtr asyncDispatcher = + new AsyncEventDispatcher(doc, u"UnselectedTabHover:Disable"_ns, + CanBubble::eYes, ChromeOnlyDispatch::eYes); + asyncDispatcher->PostDOMEvent(); +} + +dom::BrowsingContext* BackgroundVideoDecodingPermissionObserver::GetOwnerBC() + const { + dom::Document* doc = GetOwnerDoc(); + return doc ? doc->GetBrowsingContext() : nullptr; +} + +dom::Document* BackgroundVideoDecodingPermissionObserver::GetOwnerDoc() const { + if (!mDecoder->GetOwner()) { + return nullptr; + } + + return mDecoder->GetOwner()->GetDocument(); +} + +bool BackgroundVideoDecodingPermissionObserver::IsValidEventSender( + nsISupports* aSubject) const { + nsCOMPtr senderInner(do_QueryInterface(aSubject)); + if (!senderInner) { + return false; + } + + RefPtr senderBC = senderInner->GetBrowsingContext(); + if (!senderBC) { + return false; + } + // Valid sender should be in the same browsing context tree as where owner is. + return GetOwnerBC() ? GetOwnerBC()->Top() == senderBC->Top() : false; +} + +NS_IMPL_ISUPPORTS(BackgroundVideoDecodingPermissionObserver, nsIObserver) + +} // namespace mozilla diff --git a/dom/media/BackgroundVideoDecodingPermissionObserver.h b/dom/media/BackgroundVideoDecodingPermissionObserver.h new file mode 100644 index 0000000000..ee8e8164de --- /dev/null +++ b/dom/media/BackgroundVideoDecodingPermissionObserver.h @@ -0,0 +1,51 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(BackgroundVideoDecodingPermissionObserver_h_) +# define BackgroundVideoDecodingPermissionObserver_h_ + +# include "nsIObserver.h" +# include "nsISupportsImpl.h" + +class nsISupports; +class nsPIDOMWindowOuter; + +namespace mozilla { + +namespace dom { +class Document; +class BrowsingContext; +} // namespace dom + +class MediaDecoder; + +class BackgroundVideoDecodingPermissionObserver final : public nsIObserver { + public: + NS_DECL_ISUPPORTS + + explicit BackgroundVideoDecodingPermissionObserver(MediaDecoder* aDecoder); + + NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic, + const char16_t* aData) override; + void RegisterEvent(); + void UnregisterEvent(); + + private: + ~BackgroundVideoDecodingPermissionObserver(); + void EnableEvent() const; + void DisableEvent() const; + dom::BrowsingContext* GetOwnerBC() const; + dom::Document* GetOwnerDoc() const; + bool IsValidEventSender(nsISupports* aSubject) const; + + // The life cycle of observer would always be shorter than decoder, so we + // use raw pointer here. + MediaDecoder* mDecoder; + bool mIsRegisteredForEvent; +}; + +} // namespace mozilla + +#endif // BackgroundVideoDecodingPermissionObserver_h_ diff --git a/dom/media/BaseMediaResource.cpp b/dom/media/BaseMediaResource.cpp new file mode 100644 index 0000000000..e5ba50109a --- /dev/null +++ b/dom/media/BaseMediaResource.cpp @@ -0,0 +1,171 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "BaseMediaResource.h" + +#include "ChannelMediaResource.h" +#include "CloneableWithRangeMediaResource.h" +#include "FileMediaResource.h" +#include "MediaContainerType.h" +#include "mozilla/dom/BlobImpl.h" +#include "mozilla/dom/BlobURLProtocolHandler.h" +#include "mozilla/dom/HTMLMediaElement.h" +#include "mozilla/InputStreamLengthHelper.h" +#include "nsDebug.h" +#include "nsError.h" +#include "nsICloneableInputStream.h" +#include "nsIFile.h" +#include "nsIFileChannel.h" +#include "nsIInputStream.h" +#include "nsNetUtil.h" + +namespace mozilla { + +already_AddRefed BaseMediaResource::Create( + MediaResourceCallback* aCallback, nsIChannel* aChannel, + bool aIsPrivateBrowsing) { + NS_ASSERTION(NS_IsMainThread(), + "MediaResource::Open called on non-main thread"); + + // If the channel was redirected, we want the post-redirect URI; + // but if the URI scheme was expanded, say from chrome: to jar:file:, + // we want the original URI. + nsCOMPtr uri; + nsresult rv = NS_GetFinalChannelURI(aChannel, getter_AddRefs(uri)); + NS_ENSURE_SUCCESS(rv, nullptr); + + nsAutoCString contentTypeString; + aChannel->GetContentType(contentTypeString); + Maybe containerType = + MakeMediaContainerType(contentTypeString); + if (!containerType) { + return nullptr; + } + + // Let's try to create a FileMediaResource in case the channel is a nsIFile + nsCOMPtr fc = do_QueryInterface(aChannel); + if (fc) { + RefPtr resource = + new FileMediaResource(aCallback, aChannel, uri); + return resource.forget(); + } + + int64_t streamLength = -1; + + RefPtr blobImpl; + if (dom::IsBlobURI(uri) && + NS_SUCCEEDED(NS_GetBlobForBlobURI(uri, getter_AddRefs(blobImpl))) && + blobImpl) { + IgnoredErrorResult rv; + + nsCOMPtr stream; + blobImpl->CreateInputStream(getter_AddRefs(stream), rv); + if (NS_WARN_IF(rv.Failed())) { + return nullptr; + } + + // If this stream knows its own size synchronously, we can still use + // FileMediaResource. If the size is known, it means that the reading + // doesn't require any async operation. This is required because + // FileMediaResource doesn't work with nsIAsyncInputStreams. + int64_t length; + if (InputStreamLengthHelper::GetSyncLength(stream, &length) && + length >= 0) { + RefPtr resource = + new FileMediaResource(aCallback, aChannel, uri, length); + return resource.forget(); + } + + // Also if the stream doesn't know its own size synchronously, we can still + // read the length from the blob. + uint64_t size = blobImpl->GetSize(rv); + if (NS_WARN_IF(rv.Failed())) { + return nullptr; + } + + // Maybe this blob URL can be cloned with a range. + nsCOMPtr cloneableWithRange = + do_QueryInterface(stream); + if (cloneableWithRange) { + RefPtr resource = new CloneableWithRangeMediaResource( + aCallback, aChannel, uri, stream, size); + return resource.forget(); + } + + // We know the size of the stream for blobURLs, let's use it. + streamLength = size; + } + + RefPtr resource = new ChannelMediaResource( + aCallback, aChannel, uri, streamLength, aIsPrivateBrowsing); + return resource.forget(); +} + +void BaseMediaResource::SetLoadInBackground(bool aLoadInBackground) { + if (aLoadInBackground == mLoadInBackground) { + return; + } + mLoadInBackground = aLoadInBackground; + if (!mChannel) { + // No channel, resource is probably already loaded. + return; + } + + MediaDecoderOwner* owner = mCallback->GetMediaOwner(); + if (!owner) { + NS_WARNING("Null owner in MediaResource::SetLoadInBackground()"); + return; + } + RefPtr element = owner->GetMediaElement(); + if (!element) { + NS_WARNING("Null element in MediaResource::SetLoadInBackground()"); + return; + } + + bool isPending = false; + if (NS_SUCCEEDED(mChannel->IsPending(&isPending)) && isPending) { + nsLoadFlags loadFlags; + DebugOnly rv = mChannel->GetLoadFlags(&loadFlags); + NS_ASSERTION(NS_SUCCEEDED(rv), "GetLoadFlags() failed!"); + + if (aLoadInBackground) { + loadFlags |= nsIRequest::LOAD_BACKGROUND; + } else { + loadFlags &= ~nsIRequest::LOAD_BACKGROUND; + } + Unused << NS_WARN_IF(NS_FAILED(ModifyLoadFlags(loadFlags))); + } +} + +nsresult BaseMediaResource::ModifyLoadFlags(nsLoadFlags aFlags) { + nsCOMPtr loadGroup; + nsresult rv = mChannel->GetLoadGroup(getter_AddRefs(loadGroup)); + MOZ_ASSERT(NS_SUCCEEDED(rv), "GetLoadGroup() failed!"); + + bool inLoadGroup = false; + if (loadGroup) { + nsresult status; + mChannel->GetStatus(&status); + + rv = loadGroup->RemoveRequest(mChannel, nullptr, status); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + inLoadGroup = true; + } + + rv = mChannel->SetLoadFlags(aFlags); + MOZ_ASSERT(NS_SUCCEEDED(rv), "SetLoadFlags() failed!"); + + if (inLoadGroup) { + rv = loadGroup->AddRequest(mChannel, nullptr); + MOZ_ASSERT(NS_SUCCEEDED(rv), "AddRequest() failed!"); + } + + return NS_OK; +} + +} // namespace mozilla diff --git a/dom/media/BaseMediaResource.h b/dom/media/BaseMediaResource.h new file mode 100644 index 0000000000..29cde01e8c --- /dev/null +++ b/dom/media/BaseMediaResource.h @@ -0,0 +1,151 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BaseMediaResource_h +#define BaseMediaResource_h + +#include "MediaResource.h" +#include "MediaResourceCallback.h" +#include "MediaCache.h" +#include "nsIChannel.h" +#include "nsIURI.h" +#include "nsIStreamListener.h" +#include "mozilla/dom/MediaDebugInfoBinding.h" + +class nsIPrincipal; + +namespace mozilla { + +DDLoggedTypeDeclNameAndBase(BaseMediaResource, MediaResource); + +class BaseMediaResource : public MediaResource, + public DecoderDoctorLifeLogger { + public: + /** + * Create a resource, reading data from the channel. Call on main thread only. + * The caller must follow up by calling resource->Open(). + */ + static already_AddRefed Create( + MediaResourceCallback* aCallback, nsIChannel* aChannel, + bool aIsPrivateBrowsing); + + // Pass true to limit the amount of readahead data (specified by + // "media.cache_readahead_limit") or false to read as much as the + // cache size allows. + virtual void ThrottleReadahead(bool bThrottle) {} + + // This is the client's estimate of the playback rate assuming + // the media plays continuously. The cache can't guess this itself + // because it doesn't know when the decoder was paused, buffering, etc. + virtual void SetPlaybackRate(uint32_t aBytesPerSecond) = 0; + + // Get the estimated download rate in bytes per second (assuming no + // pausing of the channel is requested by Gecko). + // *aIsReliable is set to true if we think the estimate is useful. + virtual double GetDownloadRate(bool* aIsReliable) = 0; + + // Moves any existing channel loads into or out of background. Background + // loads don't block the load event. This also determines whether or not any + // new loads initiated (for example to seek) will be in the background. + void SetLoadInBackground(bool aLoadInBackground); + + // Suspend any downloads that are in progress. + // If aCloseImmediately is set, resources should be released immediately + // since we don't expect to resume again any time soon. Otherwise we + // may resume again soon so resources should be held for a little + // while. + virtual void Suspend(bool aCloseImmediately) = 0; + + // Resume any downloads that have been suspended. + virtual void Resume() = 0; + + // The mode is initially MODE_METADATA. + virtual void SetReadMode(MediaCacheStream::ReadMode aMode) = 0; + + // Returns true if the resource can be seeked to unbuffered ranges, i.e. + // for an HTTP network stream this returns true if HTTP1.1 Byte Range + // requests are supported by the connection/server. + virtual bool IsTransportSeekable() = 0; + + // Get the current principal for the channel + virtual already_AddRefed GetCurrentPrincipal() = 0; + + // Return true if the loading of this resource required cross-origin + // redirects. + virtual bool HadCrossOriginRedirects() = 0; + + /** + * Open the stream. This creates a stream listener and returns it in + * aStreamListener; this listener needs to be notified of incoming data. + */ + virtual nsresult Open(nsIStreamListener** aStreamListener) = 0; + + // If this returns false, then we shouldn't try to clone this MediaResource + // because its underlying resources are not suitable for reuse (e.g. + // because the underlying connection has been lost, or this resource + // just can't be safely cloned). If this returns true, CloneData could + // still fail. If this returns false, CloneData should not be called. + virtual bool CanClone() { return false; } + + // Create a new stream of the same type that refers to the same URI + // with a new channel. Any cached data associated with the original + // stream should be accessible in the new stream too. + virtual already_AddRefed CloneData( + MediaResourceCallback* aCallback) { + return nullptr; + } + + // Returns true if the resource is a live stream. + virtual bool IsLiveStream() const { return false; } + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + // Might be useful to track in the future: + // - mChannel + // - mURI (possibly owned, looks like just a ref from mChannel) + // Not owned: + // - mCallback + return 0; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + virtual void GetDebugInfo(dom::MediaResourceDebugInfo& aInfo) {} + + protected: + BaseMediaResource(MediaResourceCallback* aCallback, nsIChannel* aChannel, + nsIURI* aURI) + : mCallback(aCallback), + mChannel(aChannel), + mURI(aURI), + mLoadInBackground(false) {} + virtual ~BaseMediaResource() = default; + + // Set the request's load flags to aFlags. If the request is part of a + // load group, the request is removed from the group, the flags are set, and + // then the request is added back to the load group. + nsresult ModifyLoadFlags(nsLoadFlags aFlags); + + RefPtr mCallback; + + // Channel used to download the media data. Must be accessed + // from the main thread only. + nsCOMPtr mChannel; + + // URI in case the stream needs to be re-opened. Access from + // main thread only. + nsCOMPtr mURI; + + // True if SetLoadInBackground() has been called with + // aLoadInBackground = true, i.e. when the document load event is not + // blocked by this resource, and all channel loads will be in the + // background. + bool mLoadInBackground; +}; + +} // namespace mozilla + +#endif // BaseMediaResource_h diff --git a/dom/media/Benchmark.cpp b/dom/media/Benchmark.cpp new file mode 100644 index 0000000000..1bc6d833d9 --- /dev/null +++ b/dom/media/Benchmark.cpp @@ -0,0 +1,395 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "Benchmark.h" + +#include "BufferMediaResource.h" +#include "MediaData.h" +#include "MediaDataDecoderProxy.h" +#include "PDMFactory.h" +#include "VideoUtils.h" +#include "WebMDemuxer.h" +#include "mozilla/AbstractThread.h" +#include "mozilla/Components.h" +#include "mozilla/Preferences.h" +#include "mozilla/SharedThreadPool.h" +#include "mozilla/StaticMutex.h" +#include "mozilla/StaticPrefs_media.h" +#include "mozilla/TaskQueue.h" +#include "mozilla/Telemetry.h" +#include "mozilla/dom/ContentChild.h" +#include "mozilla/gfx/gfxVars.h" +#include "nsGkAtoms.h" +#include "nsIGfxInfo.h" + +#ifndef MOZ_WIDGET_ANDROID +# include "WebMSample.h" +#endif + +using namespace mozilla::gfx; + +namespace mozilla { + +// Update this version number to force re-running the benchmark. Such as when +// an improvement to FFVP9 or LIBVPX is deemed worthwhile. +const uint32_t VP9Benchmark::sBenchmarkVersionID = 5; + +const char* VP9Benchmark::sBenchmarkFpsPref = "media.benchmark.vp9.fps"; +const char* VP9Benchmark::sBenchmarkFpsVersionCheck = + "media.benchmark.vp9.versioncheck"; +bool VP9Benchmark::sHasRunTest = false; + +// static +bool VP9Benchmark::ShouldRun() { +#if defined(MOZ_WIDGET_ANDROID) + // Assume that the VP9 software decoder will always be too slow. + return false; +#else +# if defined(MOZ_APPLEMEDIA) + const nsCOMPtr gfxInfo = components::GfxInfo::Service(); + nsString vendorID, deviceID; + gfxInfo->GetAdapterVendorID(vendorID); + // We won't run the VP9 benchmark on mac using an Intel GPU as performance are + // poor, see bug 1404042. + if (vendorID.EqualsLiteral("0x8086")) { + return false; + } + // Fall Through +# endif + return true; +#endif +} + +// static +uint32_t VP9Benchmark::MediaBenchmarkVp9Fps() { + if (!ShouldRun()) { + return 0; + } + return StaticPrefs::media_benchmark_vp9_fps(); +} + +// static +bool VP9Benchmark::IsVP9DecodeFast(bool aDefault) { +#if defined(MOZ_WIDGET_ANDROID) + return false; +#else + if (!ShouldRun()) { + return false; + } + static StaticMutex sMutex MOZ_UNANNOTATED; + uint32_t decodeFps = StaticPrefs::media_benchmark_vp9_fps(); + uint32_t hadRecentUpdate = StaticPrefs::media_benchmark_vp9_versioncheck(); + bool needBenchmark; + { + StaticMutexAutoLock lock(sMutex); + needBenchmark = !sHasRunTest && + (decodeFps == 0 || hadRecentUpdate != sBenchmarkVersionID); + sHasRunTest = true; + } + + if (needBenchmark) { + RefPtr demuxer = new WebMDemuxer( + new BufferMediaResource(sWebMSample, sizeof(sWebMSample))); + RefPtr estimiser = new Benchmark( + demuxer, + {StaticPrefs::media_benchmark_frames(), // frames to measure + 1, // start benchmarking after decoding this frame. + 8, // loop after decoding that many frames. + TimeDuration::FromMilliseconds( + StaticPrefs::media_benchmark_timeout())}); + estimiser->Run()->Then( + AbstractThread::MainThread(), __func__, + [](uint32_t aDecodeFps) { + if (XRE_IsContentProcess()) { + dom::ContentChild* contentChild = dom::ContentChild::GetSingleton(); + if (contentChild) { + contentChild->SendNotifyBenchmarkResult(u"VP9"_ns, aDecodeFps); + } + } else { + Preferences::SetUint(sBenchmarkFpsPref, aDecodeFps); + Preferences::SetUint(sBenchmarkFpsVersionCheck, + sBenchmarkVersionID); + } + }, + []() {}); + } + + if (decodeFps == 0) { + return aDefault; + } + + return decodeFps >= StaticPrefs::media_benchmark_vp9_threshold(); +#endif +} + +Benchmark::Benchmark(MediaDataDemuxer* aDemuxer, const Parameters& aParameters) + : QueueObject( + TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR), + "Benchmark::QueueObject")), + mParameters(aParameters), + mPlaybackState(this, aDemuxer) { + MOZ_COUNT_CTOR(Benchmark); +} + +Benchmark::~Benchmark() { MOZ_COUNT_DTOR(Benchmark); } + +RefPtr Benchmark::Run() { + RefPtr self = this; + mKeepAliveUntilComplete = this; + return InvokeAsync(Thread(), __func__, [self] { + RefPtr p = self->mPromise.Ensure(__func__); + self->mPlaybackState.Dispatch(NS_NewRunnableFunction( + "Benchmark::Run", [self]() { self->mPlaybackState.DemuxSamples(); })); + return p; + }); +} + +void Benchmark::ReturnResult(uint32_t aDecodeFps) { + MOZ_ASSERT(OnThread()); + + mPromise.ResolveIfExists(aDecodeFps, __func__); +} + +void Benchmark::ReturnError(const MediaResult& aError) { + MOZ_ASSERT(OnThread()); + + mPromise.RejectIfExists(aError, __func__); +} + +void Benchmark::Dispose() { + MOZ_ASSERT(OnThread()); + + mKeepAliveUntilComplete = nullptr; +} + +void Benchmark::Init() { + MOZ_ASSERT(NS_IsMainThread()); + gfxVars::Initialize(); +} + +BenchmarkPlayback::BenchmarkPlayback(Benchmark* aGlobalState, + MediaDataDemuxer* aDemuxer) + : QueueObject( + TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR), + "BenchmarkPlayback::QueueObject")), + mGlobalState(aGlobalState), + mDecoderTaskQueue(TaskQueue::Create( + GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), + "BenchmarkPlayback::mDecoderTaskQueue")), + mDemuxer(aDemuxer), + mSampleIndex(0), + mFrameCount(0), + mFinished(false), + mDrained(false) {} + +void BenchmarkPlayback::DemuxSamples() { + MOZ_ASSERT(OnThread()); + + RefPtr ref(mGlobalState); + mDemuxer->Init()->Then( + Thread(), __func__, + [this, ref](nsresult aResult) { + MOZ_ASSERT(OnThread()); + if (mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack)) { + mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0); + } else if (mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack)) { + mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0); + } + if (!mTrackDemuxer) { + Error(MediaResult(NS_ERROR_FAILURE, "Can't create track demuxer")); + return; + } + DemuxNextSample(); + }, + [this, ref](const MediaResult& aError) { Error(aError); }); +} + +void BenchmarkPlayback::DemuxNextSample() { + MOZ_ASSERT(OnThread()); + + RefPtr ref(mGlobalState); + RefPtr promise = + mTrackDemuxer->GetSamples(); + promise->Then( + Thread(), __func__, + [this, ref](RefPtr aHolder) { + mSamples.AppendElements(std::move(aHolder->GetMovableSamples())); + if (ref->mParameters.mStopAtFrame && + mSamples.Length() == ref->mParameters.mStopAtFrame.ref()) { + InitDecoder(mTrackDemuxer->GetInfo()); + } else { + Dispatch( + NS_NewRunnableFunction("BenchmarkPlayback::DemuxNextSample", + [this, ref]() { DemuxNextSample(); })); + } + }, + [this, ref](const MediaResult& aError) { + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + InitDecoder(mTrackDemuxer->GetInfo()); + break; + default: + Error(aError); + break; + } + }); +} + +void BenchmarkPlayback::InitDecoder(UniquePtr&& aInfo) { + MOZ_ASSERT(OnThread()); + + if (!aInfo) { + Error(MediaResult(NS_ERROR_FAILURE, "Invalid TrackInfo")); + return; + } + + RefPtr platform = new PDMFactory(); + mInfo = std::move(aInfo); + RefPtr ref(mGlobalState); + platform->CreateDecoder(CreateDecoderParams{*mInfo}) + ->Then( + Thread(), __func__, + [this, ref](RefPtr&& aDecoder) { + mDecoder = new MediaDataDecoderProxy( + aDecoder.forget(), do_AddRef(mDecoderTaskQueue.get())); + mDecoder->Init()->Then( + Thread(), __func__, + [this, ref](TrackInfo::TrackType aTrackType) { + InputExhausted(); + }, + [this, ref](const MediaResult& aError) { Error(aError); }); + }, + [this, ref](const MediaResult& aError) { Error(aError); }); +} + +void BenchmarkPlayback::FinalizeShutdown() { + MOZ_ASSERT(OnThread()); + + MOZ_ASSERT(mFinished, "GlobalShutdown must have been run"); + MOZ_ASSERT(!mDecoder, "mDecoder must have been shutdown already"); + MOZ_ASSERT(!mDemuxer, "mDemuxer must have been shutdown already"); + MOZ_DIAGNOSTIC_ASSERT(mDecoderTaskQueue->IsEmpty()); + mDecoderTaskQueue = nullptr; + + RefPtr ref(mGlobalState); + ref->Thread()->Dispatch(NS_NewRunnableFunction( + "BenchmarkPlayback::FinalizeShutdown", [ref]() { ref->Dispose(); })); +} + +void BenchmarkPlayback::GlobalShutdown() { + MOZ_ASSERT(OnThread()); + + MOZ_ASSERT(!mFinished, "We've already shutdown"); + + mFinished = true; + + if (mTrackDemuxer) { + mTrackDemuxer->Reset(); + mTrackDemuxer->BreakCycles(); + mTrackDemuxer = nullptr; + } + mDemuxer = nullptr; + + if (mDecoder) { + RefPtr ref(mGlobalState); + mDecoder->Flush()->Then( + Thread(), __func__, + [ref, this]() { + mDecoder->Shutdown()->Then( + Thread(), __func__, [ref, this]() { FinalizeShutdown(); }, + []() { MOZ_CRASH("not reached"); }); + mDecoder = nullptr; + mInfo = nullptr; + }, + []() { MOZ_CRASH("not reached"); }); + } else { + FinalizeShutdown(); + } +} + +void BenchmarkPlayback::Output(MediaDataDecoder::DecodedData&& aResults) { + MOZ_ASSERT(OnThread()); + MOZ_ASSERT(!mFinished); + + RefPtr ref(mGlobalState); + mFrameCount += aResults.Length(); + if (!mDecodeStartTime && mFrameCount >= ref->mParameters.mStartupFrame) { + mDecodeStartTime = Some(TimeStamp::Now()); + } + TimeStamp now = TimeStamp::Now(); + uint32_t frames = mFrameCount - ref->mParameters.mStartupFrame; + TimeDuration elapsedTime = now - mDecodeStartTime.refOr(now); + if (((frames == ref->mParameters.mFramesToMeasure) && + mFrameCount > ref->mParameters.mStartupFrame && frames > 0) || + elapsedTime >= ref->mParameters.mTimeout || mDrained) { + uint32_t decodeFps = frames / elapsedTime.ToSeconds(); + GlobalShutdown(); + ref->Dispatch(NS_NewRunnableFunction( + "BenchmarkPlayback::Output", + [ref, decodeFps]() { ref->ReturnResult(decodeFps); })); + } +} + +void BenchmarkPlayback::Error(const MediaResult& aError) { + MOZ_ASSERT(OnThread()); + + RefPtr ref(mGlobalState); + GlobalShutdown(); + ref->Dispatch( + NS_NewRunnableFunction("BenchmarkPlayback::Error", + [ref, aError]() { ref->ReturnError(aError); })); +} + +void BenchmarkPlayback::InputExhausted() { + MOZ_ASSERT(OnThread()); + MOZ_ASSERT(!mFinished); + + if (mSampleIndex >= mSamples.Length()) { + Error(MediaResult(NS_ERROR_FAILURE, "Nothing left to decode")); + return; + } + + RefPtr sample = mSamples[mSampleIndex]; + RefPtr ref(mGlobalState); + RefPtr p = mDecoder->Decode(sample); + + mSampleIndex++; + if (mSampleIndex == mSamples.Length() && !ref->mParameters.mStopAtFrame) { + // Complete current frame decode then drain if still necessary. + p->Then( + Thread(), __func__, + [ref, this](MediaDataDecoder::DecodedData&& aResults) { + Output(std::move(aResults)); + if (!mFinished) { + mDecoder->Drain()->Then( + Thread(), __func__, + [ref, this](MediaDataDecoder::DecodedData&& aResults) { + mDrained = true; + Output(std::move(aResults)); + MOZ_ASSERT(mFinished, "We must be done now"); + }, + [ref, this](const MediaResult& aError) { Error(aError); }); + } + }, + [ref, this](const MediaResult& aError) { Error(aError); }); + } else { + if (mSampleIndex == mSamples.Length() && ref->mParameters.mStopAtFrame) { + mSampleIndex = 0; + } + // Continue decoding + p->Then( + Thread(), __func__, + [ref, this](MediaDataDecoder::DecodedData&& aResults) { + Output(std::move(aResults)); + if (!mFinished) { + InputExhausted(); + } + }, + [ref, this](const MediaResult& aError) { Error(aError); }); + } +} + +} // namespace mozilla diff --git a/dom/media/Benchmark.h b/dom/media/Benchmark.h new file mode 100644 index 0000000000..b76942edc5 --- /dev/null +++ b/dom/media/Benchmark.h @@ -0,0 +1,119 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_BENCHMARK_H +#define MOZILLA_BENCHMARK_H + +#include "MediaDataDemuxer.h" +#include "PlatformDecoderModule.h" +#include "QueueObject.h" +#include "mozilla/Maybe.h" +#include "mozilla/RefPtr.h" +#include "mozilla/TimeStamp.h" +#include "mozilla/UniquePtr.h" +#include "nsCOMPtr.h" + +namespace mozilla { + +class TaskQueue; +class Benchmark; + +class BenchmarkPlayback : public QueueObject { + friend class Benchmark; + BenchmarkPlayback(Benchmark* aGlobalState, MediaDataDemuxer* aDemuxer); + void DemuxSamples(); + void DemuxNextSample(); + void GlobalShutdown(); + void InitDecoder(UniquePtr&& aInfo); + + void Output(MediaDataDecoder::DecodedData&& aResults); + void Error(const MediaResult& aError); + void InputExhausted(); + + // Shutdown trackdemuxer and demuxer if any and shutdown the task queues. + void FinalizeShutdown(); + + Atomic mGlobalState; + + RefPtr mDecoderTaskQueue; + RefPtr mDecoder; + + // Object only accessed on Thread() + RefPtr mDemuxer; + RefPtr mTrackDemuxer; + nsTArray> mSamples; + UniquePtr mInfo; + size_t mSampleIndex; + Maybe mDecodeStartTime; + uint32_t mFrameCount; + bool mFinished; + bool mDrained; +}; + +// Init() must have been called at least once prior on the +// main thread. +class Benchmark : public QueueObject { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Benchmark) + + struct Parameters { + Parameters() + : mFramesToMeasure(UINT32_MAX), + mStartupFrame(1), + mTimeout(TimeDuration::Forever()) {} + + Parameters(uint32_t aFramesToMeasure, uint32_t aStartupFrame, + uint32_t aStopAtFrame, const TimeDuration& aTimeout) + : mFramesToMeasure(aFramesToMeasure), + mStartupFrame(aStartupFrame), + mStopAtFrame(Some(aStopAtFrame)), + mTimeout(aTimeout) {} + + const uint32_t mFramesToMeasure; + const uint32_t mStartupFrame; + const Maybe mStopAtFrame; + const TimeDuration mTimeout; + }; + + typedef MozPromise + BenchmarkPromise; + + explicit Benchmark(MediaDataDemuxer* aDemuxer, + const Parameters& aParameters = Parameters()); + RefPtr Run(); + + // Must be called on the main thread. + static void Init(); + + private: + friend class BenchmarkPlayback; + virtual ~Benchmark(); + void ReturnResult(uint32_t aDecodeFps); + void ReturnError(const MediaResult& aError); + void Dispose(); + const Parameters mParameters; + RefPtr mKeepAliveUntilComplete; + BenchmarkPlayback mPlaybackState; + MozPromiseHolder mPromise; +}; + +class VP9Benchmark { + public: + static bool IsVP9DecodeFast(bool aDefault = false); + static const char* sBenchmarkFpsPref; + static const char* sBenchmarkFpsVersionCheck; + static const uint32_t sBenchmarkVersionID; + static bool sHasRunTest; + // Return the value of media.benchmark.vp9.fps preference (which will be 0 if + // not known) + static uint32_t MediaBenchmarkVp9Fps(); + + private: + static bool ShouldRun(); +}; +} // namespace mozilla + +#endif diff --git a/dom/media/BitReader.cpp b/dom/media/BitReader.cpp new file mode 100644 index 0000000000..d5ae26eba7 --- /dev/null +++ b/dom/media/BitReader.cpp @@ -0,0 +1,197 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// Derived from Stagefright's ABitReader. + +#include "BitReader.h" + +namespace mozilla { + +BitReader::BitReader(const mozilla::MediaByteBuffer* aBuffer) + : BitReader(aBuffer->Elements(), aBuffer->Length() * 8) {} + +BitReader::BitReader(const mozilla::MediaByteBuffer* aBuffer, size_t aBits) + : BitReader(aBuffer->Elements(), aBits) {} + +BitReader::BitReader(const uint8_t* aBuffer, size_t aBits) + : mData(aBuffer), + mOriginalBitSize(aBits), + mTotalBitsLeft(aBits), + mSize((aBits + 7) / 8), + mReservoir(0), + mNumBitsLeft(0) {} + +BitReader::~BitReader() = default; + +uint32_t BitReader::ReadBits(size_t aNum) { + MOZ_ASSERT(aNum <= 32); + if (mTotalBitsLeft < aNum) { + NS_WARNING("Reading past end of buffer"); + return 0; + } + uint32_t result = 0; + while (aNum > 0) { + if (mNumBitsLeft == 0) { + FillReservoir(); + } + + size_t m = aNum; + if (m > mNumBitsLeft) { + m = mNumBitsLeft; + } + + if (m == 32) { + result = mReservoir; + mReservoir = 0; + } else { + result = (result << m) | (mReservoir >> (32 - m)); + mReservoir <<= m; + } + mNumBitsLeft -= m; + mTotalBitsLeft -= m; + + aNum -= m; + } + + return result; +} + +// Read unsigned integer Exp-Golomb-coded. +uint32_t BitReader::ReadUE() { + uint32_t i = 0; + + while (ReadBit() == 0 && i < 32) { + i++; + } + if (i == 32) { + // This can happen if the data is invalid, or if it's + // short, since ReadBit() will return 0 when it runs + // off the end of the buffer. + NS_WARNING("Invalid H.264 data"); + return 0; + } + uint32_t r = ReadBits(i); + r += (uint32_t(1) << i) - 1; + + return r; +} + +// Read signed integer Exp-Golomb-coded. +int32_t BitReader::ReadSE() { + int32_t r = ReadUE(); + if (r & 1) { + return (r + 1) / 2; + } else { + return -r / 2; + } +} + +uint64_t BitReader::ReadU64() { + uint64_t hi = ReadU32(); + uint32_t lo = ReadU32(); + return (hi << 32) | lo; +} + +CheckedUint64 BitReader::ReadULEB128() { + // See https://en.wikipedia.org/wiki/LEB128#Decode_unsigned_integer + CheckedUint64 value = 0; + for (size_t i = 0; i < sizeof(uint64_t) * 8 / 7; i++) { + bool more = ReadBit(); + value += static_cast(ReadBits(7)) << (i * 7); + if (!more) { + break; + } + } + return value; +} + +uint64_t BitReader::ReadUTF8() { + int64_t val = ReadBits(8); + uint32_t top = (val & 0x80) >> 1; + + if ((val & 0xc0) == 0x80 || val >= 0xFE) { + // error. + return -1; + } + while (val & top) { + int tmp = ReadBits(8) - 128; + if (tmp >> 6) { + // error. + return -1; + } + val = (val << 6) + tmp; + top <<= 5; + } + val &= (top << 1) - 1; + return val; +} + +size_t BitReader::BitCount() const { return mOriginalBitSize - mTotalBitsLeft; } + +size_t BitReader::BitsLeft() const { return mTotalBitsLeft; } + +void BitReader::FillReservoir() { + if (mSize == 0) { + NS_ASSERTION(false, "Attempting to fill reservoir from past end of data"); + return; + } + + mReservoir = 0; + size_t i; + for (i = 0; mSize > 0 && i < 4; i++) { + mReservoir = (mReservoir << 8) | *mData; + mData++; + mSize--; + } + + mNumBitsLeft = 8 * i; + mReservoir <<= 32 - mNumBitsLeft; +} + +/* static */ +uint32_t BitReader::GetBitLength(const mozilla::MediaByteBuffer* aNAL) { + size_t size = aNAL->Length(); + + while (size > 0 && aNAL->ElementAt(size - 1) == 0) { + size--; + } + + if (!size) { + return 0; + } + + if (size > UINT32_MAX / 8) { + // We can't represent it, we'll use as much as we can. + return UINT32_MAX; + } + + uint8_t v = aNAL->ElementAt(size - 1); + size *= 8; + + // Remove the stop bit and following trailing zeros. + if (v) { + // Count the consecutive zero bits (trailing) on the right by binary search. + // Adapted from Matt Whitlock algorithm to only bother with 8 bits integers. + uint32_t c; + if (v & 1) { + // Special case for odd v (assumed to happen half of the time). + c = 0; + } else { + c = 1; + if ((v & 0xf) == 0) { + v >>= 4; + c += 4; + } + if ((v & 0x3) == 0) { + v >>= 2; + c += 2; + } + c -= v & 0x1; + } + size -= c + 1; + } + return size; +} + +} // namespace mozilla diff --git a/dom/media/BitReader.h b/dom/media/BitReader.h new file mode 100644 index 0000000000..21c28f2c8c --- /dev/null +++ b/dom/media/BitReader.h @@ -0,0 +1,54 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BIT_READER_H_ +#define BIT_READER_H_ + +#include "MediaData.h" + +namespace mozilla { + +class BitReader { + public: + explicit BitReader(const MediaByteBuffer* aBuffer); + BitReader(const MediaByteBuffer* aBuffer, size_t aBits); + BitReader(const uint8_t* aBuffer, size_t aBits); + ~BitReader(); + uint32_t ReadBits(size_t aNum); + bool ReadBit() { return ReadBits(1) != 0; } + uint32_t ReadU32() { return ReadBits(32); } + uint64_t ReadU64(); + + // Read the UTF-8 sequence and convert it to its 64-bit UCS-4 encoded form. + // Return 0xfffffffffffffff if sequence was invalid. + uint64_t ReadUTF8(); + // Read unsigned integer Exp-Golomb-coded. + uint32_t ReadUE(); + // Read signed integer Exp-Golomb-coded. + int32_t ReadSE(); + // Read unsigned integer Little Endian Base 128 coded. + // Limited to unsigned 64 bits. + CheckedUint64 ReadULEB128(); + + // Return the number of bits parsed so far; + size_t BitCount() const; + // Return the number of bits left. + size_t BitsLeft() const; + + // Return RBSP bit length. + static uint32_t GetBitLength(const MediaByteBuffer* aNAL); + + private: + void FillReservoir(); + const uint8_t* mData; + const size_t mOriginalBitSize; + size_t mTotalBitsLeft; + size_t mSize; // Size left in bytes + uint32_t mReservoir; // Left-aligned bits + size_t mNumBitsLeft; // Number of bits left in reservoir. +}; + +} // namespace mozilla + +#endif // BIT_READER_H_ diff --git a/dom/media/BitWriter.cpp b/dom/media/BitWriter.cpp new file mode 100644 index 0000000000..e1d513f191 --- /dev/null +++ b/dom/media/BitWriter.cpp @@ -0,0 +1,104 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "BitWriter.h" + +#include + +#include "MediaData.h" +#include "mozilla/MathAlgorithms.h" + +namespace mozilla { + +constexpr uint8_t golombLen[256] = { + 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 17, +}; + +BitWriter::BitWriter(MediaByteBuffer* aBuffer) : mBuffer(aBuffer) {} + +BitWriter::~BitWriter() = default; + +void BitWriter::WriteBits(uint64_t aValue, size_t aBits) { + MOZ_ASSERT(aBits <= sizeof(uint64_t) * 8); + + while (aBits) { + if (mBitIndex == 0) { + mBuffer->AppendElement(0); + } + + const uint8_t clearMask = ~(~0u << (8 - mBitIndex)); + uint8_t mask = 0; + + if (mBitIndex + aBits > 8) { + // Not enough bits in the current byte to write all the bits + // required, we'll process what we can and continue with the left over. + const uint8_t leftOverBits = mBitIndex + aBits - 8; + const uint64_t leftOver = aValue & (~uint64_t(0) >> (8 - mBitIndex)); + mask = aValue >> leftOverBits; + + mBitIndex = 8; + aValue = leftOver; + aBits = leftOverBits; + } else { + const uint8_t offset = 8 - mBitIndex - aBits; + mask = aValue << offset; + + mBitIndex += aBits; + aBits = 0; + } + + mBuffer->ElementAt(mPosition) |= mask & clearMask; + + if (mBitIndex == 8) { + mPosition++; + mBitIndex = 0; + } + } +} + +void BitWriter::WriteUE(uint32_t aValue) { + MOZ_ASSERT(aValue <= (UINT32_MAX - 1)); + + if (aValue < 256) { + WriteBits(aValue + 1, golombLen[aValue]); + } else { + const uint32_t e = FloorLog2(aValue + 1); + WriteBits(aValue + 1, e * 2 + 1); + } +} + +void BitWriter::WriteULEB128(uint64_t aValue) { + // See https://en.wikipedia.org/wiki/LEB128#Encode_unsigned_integer + do { + uint8_t byte = aValue & 0x7F; + aValue >>= 7; + WriteBit(aValue != 0); + WriteBits(byte, 7); + } while (aValue != 0); +} + +void BitWriter::CloseWithRbspTrailing() { + WriteBit(true); + WriteBits(0, (8 - mBitIndex) & 7); +} + +void BitWriter::AdvanceBytes(uint32_t aByteOffset) { + MOZ_DIAGNOSTIC_ASSERT(mBitIndex == 0); + mPosition += aByteOffset; +} + +} // namespace mozilla diff --git a/dom/media/BitWriter.h b/dom/media/BitWriter.h new file mode 100644 index 0000000000..6abf7bcde3 --- /dev/null +++ b/dom/media/BitWriter.h @@ -0,0 +1,49 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BIT_WRITER_H_ +#define BIT_WRITER_H_ + +#include "mozilla/RefPtr.h" + +namespace mozilla { + +class MediaByteBuffer; + +class BitWriter { + public: + explicit BitWriter(MediaByteBuffer* aBuffer); + virtual ~BitWriter(); + void WriteBits(uint64_t aValue, size_t aBits); + void WriteBit(bool aValue) { WriteBits(aValue, 1); } + void WriteU8(uint8_t aValue) { WriteBits(aValue, 8); } + void WriteU32(uint32_t aValue) { WriteBits(aValue, 32); } + void WriteU64(uint64_t aValue) { WriteBits(aValue, 64); } + + // Write unsigned integer into Exp-Golomb-coded. 2^32-2 at most + void WriteUE(uint32_t aValue); + // Write unsigned integer Little Endian Base 128 coded. + void WriteULEB128(uint64_t aValue); + + // Write RBSP trailing bits. + void CloseWithRbspTrailing(); + + // Advance position forward without modifying buffer, which is usually used + // along with the case when directly appending a byte array to the + // MediaByteBuffer for the efficiency, instead of writing bits one by one. + // So this can only be called when the bit index is zero. + void AdvanceBytes(uint32_t aByteOffset); + + // Return the number of bits written so far; + size_t BitCount() const { return mPosition * 8 + mBitIndex; } + + private: + RefPtr mBuffer; + size_t mPosition = 0; + uint8_t mBitIndex = 0; +}; + +} // namespace mozilla + +#endif // BIT_WRITER_H_ diff --git a/dom/media/BufferMediaResource.h b/dom/media/BufferMediaResource.h new file mode 100644 index 0000000000..693704a2ae --- /dev/null +++ b/dom/media/BufferMediaResource.h @@ -0,0 +1,76 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(BufferMediaResource_h_) +# define BufferMediaResource_h_ + +# include "MediaResource.h" +# include "nsISeekableStream.h" +# include + +namespace mozilla { + +DDLoggedTypeDeclNameAndBase(BufferMediaResource, MediaResource); + +// A simple MediaResource based on an in memory buffer. This class accepts +// the address and the length of the buffer, and simulates a read/seek API +// on top of it. The Read implementation involves copying memory, which is +// unfortunate, but the MediaResource interface mandates that. +class BufferMediaResource + : public MediaResource, + public DecoderDoctorLifeLogger { + public: + BufferMediaResource(const uint8_t* aBuffer, uint32_t aLength) + : mBuffer(aBuffer), mLength(aLength) {} + + protected: + virtual ~BufferMediaResource() = default; + + private: + // These methods are called off the main thread. + nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount, + uint32_t* aBytes) override { + if (aOffset < 0 || aOffset > mLength) { + return NS_ERROR_FAILURE; + } + *aBytes = std::min(mLength - static_cast(aOffset), aCount); + memcpy(aBuffer, mBuffer + aOffset, *aBytes); + return NS_OK; + } + // Memory-based and no locks, caching discouraged. + bool ShouldCacheReads() override { return false; } + + void Pin() override {} + void Unpin() override {} + int64_t GetLength() override { return mLength; } + int64_t GetNextCachedData(int64_t aOffset) override { return aOffset; } + int64_t GetCachedDataEnd(int64_t aOffset) override { + return std::max(aOffset, int64_t(mLength)); + } + bool IsDataCachedToEndOfResource(int64_t aOffset) override { return true; } + nsresult ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) override { + if (aOffset < 0) { + return NS_ERROR_FAILURE; + } + + uint32_t bytes = std::min(mLength - static_cast(aOffset), aCount); + memcpy(aBuffer, mBuffer + aOffset, bytes); + return NS_OK; + } + + nsresult GetCachedRanges(MediaByteRangeSet& aRanges) override { + aRanges += MediaByteRange(0, int64_t(mLength)); + return NS_OK; + } + + private: + const uint8_t* mBuffer; + uint32_t mLength; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/BufferReader.h b/dom/media/BufferReader.h new file mode 100644 index 0000000000..a7508b20c3 --- /dev/null +++ b/dom/media/BufferReader.h @@ -0,0 +1,335 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BUFFER_READER_H_ +#define BUFFER_READER_H_ + +#include +#include "mozilla/EndianUtils.h" +#include "nscore.h" +#include "nsTArray.h" +#include "MediaData.h" +#include "MediaSpan.h" +#include "mozilla/Logging.h" +#include "mozilla/Result.h" + +namespace mozilla { + +extern mozilla::LazyLogModule gMP4MetadataLog; + +class MOZ_RAII BufferReader { + public: + BufferReader() : mPtr(nullptr), mRemaining(0), mLength(0) {} + BufferReader(const uint8_t* aData, size_t aSize) + : mPtr(aData), mRemaining(aSize), mLength(aSize) {} + template + explicit BufferReader(const AutoTArray& aData) + : mPtr(aData.Elements()), + mRemaining(aData.Length()), + mLength(aData.Length()) {} + explicit BufferReader(const nsTArray& aData) + : mPtr(aData.Elements()), + mRemaining(aData.Length()), + mLength(aData.Length()) {} + explicit BufferReader(const mozilla::MediaByteBuffer* aData) + : mPtr(aData->Elements()), + mRemaining(aData->Length()), + mLength(aData->Length()) {} + explicit BufferReader(const mozilla::MediaSpan& aData) + : mPtr(aData.Elements()), + mRemaining(aData.Length()), + mLength(aData.Length()) {} + explicit BufferReader(const Span& aData) + : mPtr(aData.Elements()), + mRemaining(aData.Length()), + mLength(aData.Length()) {} + + void SetData(const nsTArray& aData) { + MOZ_ASSERT(!mPtr && !mRemaining); + mPtr = aData.Elements(); + mRemaining = aData.Length(); + mLength = mRemaining; + } + + ~BufferReader() = default; + + size_t Offset() const { return mLength - mRemaining; } + + size_t Remaining() const { return mRemaining; } + + mozilla::Result ReadU8() { + auto ptr = Read(1); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return *ptr; + } + + mozilla::Result ReadU16() { + auto ptr = Read(2); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readUint16(ptr); + } + + mozilla::Result ReadLE16() { + auto ptr = Read(2); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::LittleEndian::readInt16(ptr); + } + + mozilla::Result ReadU24() { + auto ptr = Read(3); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return ptr[0] << 16 | ptr[1] << 8 | ptr[2]; + } + + mozilla::Result Read24() { + return ReadU24().map([](uint32_t x) { return (int32_t)x; }); + } + + mozilla::Result ReadLE24() { + auto ptr = Read(3); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + int32_t result = int32_t(ptr[2] << 16 | ptr[1] << 8 | ptr[0]); + if (result & 0x00800000u) { + result -= 0x1000000; + } + return result; + } + + mozilla::Result ReadU32() { + auto ptr = Read(4); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readUint32(ptr); + } + + mozilla::Result Read32() { + auto ptr = Read(4); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readInt32(ptr); + } + + mozilla::Result ReadLEU32() { + auto ptr = Read(4); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::LittleEndian::readUint32(ptr); + } + + mozilla::Result ReadU64() { + auto ptr = Read(8); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readUint64(ptr); + } + + mozilla::Result Read64() { + auto ptr = Read(8); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readInt64(ptr); + } + + const uint8_t* Read(size_t aCount) { + if (aCount > mRemaining) { + mPtr += mRemaining; + mRemaining = 0; + return nullptr; + } + mRemaining -= aCount; + + const uint8_t* result = mPtr; + mPtr += aCount; + + return result; + } + + const uint8_t* Rewind(size_t aCount) { + MOZ_ASSERT(aCount <= Offset()); + size_t rewind = Offset(); + if (aCount < rewind) { + rewind = aCount; + } + mRemaining += rewind; + mPtr -= rewind; + return mPtr; + } + + mozilla::Result PeekU8() const { + auto ptr = Peek(1); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return *ptr; + } + + mozilla::Result PeekU16() const { + auto ptr = Peek(2); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readUint16(ptr); + } + + mozilla::Result PeekU24() const { + auto ptr = Peek(3); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return ptr[0] << 16 | ptr[1] << 8 | ptr[2]; + } + + mozilla::Result Peek24() const { + return PeekU24().map([](uint32_t x) { return (int32_t)x; }); + } + + mozilla::Result PeekU32() { + auto ptr = Peek(4); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return mozilla::BigEndian::readUint32(ptr); + } + + const uint8_t* Peek(size_t aCount) const { + if (aCount > mRemaining) { + return nullptr; + } + return mPtr; + } + + const uint8_t* Seek(size_t aOffset) { + if (aOffset >= mLength) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure, offset: %zu", __func__, aOffset)); + return nullptr; + } + + mPtr = mPtr - Offset() + aOffset; + mRemaining = mLength - aOffset; + return mPtr; + } + + const uint8_t* Reset() { + mPtr -= Offset(); + mRemaining = mLength; + return mPtr; + } + + uint32_t Align() const { return 4 - ((intptr_t)mPtr & 3); } + + template + bool CanReadType() const { + return mRemaining >= sizeof(T); + } + + template + T ReadType() { + auto ptr = Read(sizeof(T)); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return 0; + } + // handle unaligned accesses by memcpying + T ret; + memcpy(&ret, ptr, sizeof(T)); + return ret; + } + + template + [[nodiscard]] bool ReadArray(nsTArray& aDest, size_t aLength) { + auto ptr = Read(aLength * sizeof(T)); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return false; + } + + aDest.Clear(); + aDest.AppendElements(reinterpret_cast(ptr), aLength); + return true; + } + + template + [[nodiscard]] bool ReadArray(FallibleTArray& aDest, size_t aLength) { + auto ptr = Read(aLength * sizeof(T)); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return false; + } + + aDest.Clear(); + if (!aDest.SetCapacity(aLength, mozilla::fallible)) { + return false; + } + MOZ_ALWAYS_TRUE(aDest.AppendElements(reinterpret_cast(ptr), + aLength, mozilla::fallible)); + return true; + } + + template + mozilla::Result, nsresult> ReadSpan(size_t aLength) { + auto ptr = Read(aLength * sizeof(T)); + if (!ptr) { + MOZ_LOG(gMP4MetadataLog, mozilla::LogLevel::Error, + ("%s: failure", __func__)); + return mozilla::Err(NS_ERROR_FAILURE); + } + return Span(reinterpret_cast(ptr), aLength); + } + + private: + const uint8_t* mPtr; + size_t mRemaining; + size_t mLength; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/ByteWriter.h b/dom/media/ByteWriter.h new file mode 100644 index 0000000000..cc7f5ecf3f --- /dev/null +++ b/dom/media/ByteWriter.h @@ -0,0 +1,61 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BYTE_WRITER_H_ +#define BYTE_WRITER_H_ + +#include "mozilla/EndianUtils.h" +#include "nsTArray.h" + +namespace mozilla { + +template +class ByteWriter { + public: + explicit ByteWriter(nsTArray& aData) : mPtr(aData) {} + ~ByteWriter() = default; + + [[nodiscard]] bool WriteU8(uint8_t aByte) { return Write(&aByte, 1); } + + [[nodiscard]] bool WriteU16(uint16_t aShort) { + uint8_t c[2]; + Endianess::writeUint16(&c[0], aShort); + return Write(&c[0], 2); + } + + [[nodiscard]] bool WriteU32(uint32_t aLong) { + uint8_t c[4]; + Endianess::writeUint32(&c[0], aLong); + return Write(&c[0], 4); + } + + [[nodiscard]] bool Write32(int32_t aLong) { + uint8_t c[4]; + Endianess::writeInt32(&c[0], aLong); + return Write(&c[0], 4); + } + + [[nodiscard]] bool WriteU64(uint64_t aLongLong) { + uint8_t c[8]; + Endianess::writeUint64(&c[0], aLongLong); + return Write(&c[0], 8); + } + + [[nodiscard]] bool Write64(int64_t aLongLong) { + uint8_t c[8]; + Endianess::writeInt64(&c[0], aLongLong); + return Write(&c[0], 8); + } + + [[nodiscard]] bool Write(const uint8_t* aSrc, size_t aCount) { + return mPtr.AppendElements(aSrc, aCount, mozilla::fallible); + } + + private: + nsTArray& mPtr; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/CallbackThreadRegistry.cpp b/dom/media/CallbackThreadRegistry.cpp new file mode 100644 index 0000000000..f4d2af5bd1 --- /dev/null +++ b/dom/media/CallbackThreadRegistry.cpp @@ -0,0 +1,101 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CallbackThreadRegistry.h" +#include "mozilla/ClearOnShutdown.h" + +namespace mozilla { +struct CallbackThreadRegistrySingleton { + CallbackThreadRegistrySingleton() + : mRegistry(MakeUnique()) { + NS_DispatchToMainThread( + NS_NewRunnableFunction(__func__, [registry = &mRegistry] { + const auto phase = ShutdownPhase::XPCOMShutdownFinal; + MOZ_DIAGNOSTIC_ASSERT(!PastShutdownPhase(phase)); + ClearOnShutdown(registry, phase); + })); + } + + UniquePtr mRegistry; +}; + +CallbackThreadRegistry::CallbackThreadRegistry() + : mThreadIds("CallbackThreadRegistry::mThreadIds") {} + +/* static */ +CallbackThreadRegistry* CallbackThreadRegistry::Get() { + static CallbackThreadRegistrySingleton sSingleton; + return sSingleton.mRegistry.get(); +} + +static bool CanLeak() { +#ifdef NS_BUILD_REFCNT_LOGGING + static const bool logging = + getenv("XPCOM_MEM_LEAK_LOG") || getenv("XPCOM_MEM_BLOAT_LOG") || + getenv("XPCOM_MEM_REFCNT_LOG") || getenv("XPCOM_MEM_ALLOC_LOG") || + getenv("XPCOM_MEM_COMPTR_LOG"); + return logging; +#else + return false; +#endif +} + +void CallbackThreadRegistry::Register(ProfilerThreadId aThreadId, + const char* aName) { + if (!aThreadId.IsSpecified()) { + // profiler_current_thread_id is unspecified on unsupported platforms. + return; + } + + if (CanLeak()) { + NS_WARNING( + "Not registering callback thread due to refcount logging; it may show " + "up as a leak of the TLS-backed nsThread wrapper if the thread " + "outlives xpcom shutdown."); + return; + } + + auto threadIds = mThreadIds.Lock(); + for (uint32_t i = 0; i < threadIds->Length(); i++) { + if ((*threadIds)[i].mId == aThreadId) { + (*threadIds)[i].mUserCount++; + return; + } + } + ThreadUserCount tuc; + tuc.mId = aThreadId; + tuc.mUserCount = 1; + threadIds->AppendElement(tuc); + PROFILER_REGISTER_THREAD(aName); +} + +void CallbackThreadRegistry::Unregister(ProfilerThreadId aThreadId) { + if (!aThreadId.IsSpecified()) { + // profiler_current_thread_id is unspedified on unsupported platforms. + return; + } + + if (CanLeak()) { + return; + } + + auto threadIds = mThreadIds.Lock(); + for (uint32_t i = 0; i < threadIds->Length(); i++) { + if ((*threadIds)[i].mId == aThreadId) { + MOZ_ASSERT((*threadIds)[i].mUserCount > 0); + (*threadIds)[i].mUserCount--; + + if ((*threadIds)[i].mUserCount == 0) { + PROFILER_UNREGISTER_THREAD(); + threadIds->RemoveElementAt(i); + } + return; + } + } + MOZ_ASSERT_UNREACHABLE("Current thread was not registered"); +} + +} // namespace mozilla diff --git a/dom/media/CallbackThreadRegistry.h b/dom/media/CallbackThreadRegistry.h new file mode 100644 index 0000000000..a154b3a35f --- /dev/null +++ b/dom/media/CallbackThreadRegistry.h @@ -0,0 +1,60 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef CALLBACKTHREADREGISTRY_H +#define CALLBACKTHREADREGISTRY_H + +#include +#include +#include +#include +#include + +namespace mozilla { + +// This class is a singleton that tracks various callback threads and makes +// sure they are registered or unregistered to the profiler safely and +// consistently. +// +// Register and Unregister are fairly expensive and shouldn't be used in a hot +// path. +class CallbackThreadRegistry final { + public: + CallbackThreadRegistry(); + + ~CallbackThreadRegistry() { + // It would be nice to be able to assert that all threads have been + // unregistered, but we can't: it's legal to suspend an audio stream, so + // that the callback isn't called, and then immediately destroy it. + } + + CallbackThreadRegistry(const CallbackThreadRegistry&) = delete; + CallbackThreadRegistry& operator=(const CallbackThreadRegistry&) = delete; + CallbackThreadRegistry(CallbackThreadRegistry&&) = delete; + CallbackThreadRegistry& operator=(CallbackThreadRegistry&&) = delete; + + // Returns the global instance of CallbackThreadRegistry. Safe from all + // threads. + static CallbackThreadRegistry* Get(); + + // This is intended to be called in the first callback of a callback + // thread. + void Register(ProfilerThreadId aThreadId, const char* aName); + + // This is intended to be called when an object stops an audio callback thread + void Unregister(ProfilerThreadId aThreadId); + + private: + struct ThreadUserCount { + ProfilerThreadId mId; // from profiler_current_thread_id + int mUserCount = 0; + }; + DataMutex> mThreadIds; +}; + +} // namespace mozilla + +#endif // CALLBACKTHREADREGISTRY_H diff --git a/dom/media/CanvasCaptureMediaStream.cpp b/dom/media/CanvasCaptureMediaStream.cpp new file mode 100644 index 0000000000..377243540c --- /dev/null +++ b/dom/media/CanvasCaptureMediaStream.cpp @@ -0,0 +1,212 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CanvasCaptureMediaStream.h" + +#include "DOMMediaStream.h" +#include "ImageContainer.h" +#include "MediaTrackGraph.h" +#include "Tracing.h" +#include "VideoSegment.h" +#include "gfxPlatform.h" +#include "mozilla/Atomics.h" +#include "mozilla/dom/CanvasCaptureMediaStreamBinding.h" +#include "mozilla/gfx/2D.h" +#include "nsContentUtils.h" + +using namespace mozilla::layers; +using namespace mozilla::gfx; + +namespace mozilla::dom { + +OutputStreamDriver::OutputStreamDriver(SourceMediaTrack* aSourceStream, + const PrincipalHandle& aPrincipalHandle) + : mSourceStream(aSourceStream), mPrincipalHandle(aPrincipalHandle) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mSourceStream); +} + +OutputStreamDriver::~OutputStreamDriver() { + MOZ_ASSERT(NS_IsMainThread()); + EndTrack(); +} + +void OutputStreamDriver::EndTrack() { + MOZ_ASSERT(NS_IsMainThread()); + if (!mSourceStream->IsDestroyed()) { + mSourceStream->Destroy(); + } +} + +void OutputStreamDriver::SetImage(RefPtr&& aImage, + const TimeStamp& aTime) { + MOZ_ASSERT(NS_IsMainThread()); + + VideoSegment segment; + const auto size = aImage->GetSize(); + segment.AppendFrame(aImage.forget(), size, mPrincipalHandle, false, aTime); + mSourceStream->AppendData(&segment); +} + +// ---------------------------------------------------------------------- + +class TimerDriver : public OutputStreamDriver { + public: + explicit TimerDriver(SourceMediaTrack* aSourceStream, const double& aFPS, + const PrincipalHandle& aPrincipalHandle) + : OutputStreamDriver(aSourceStream, aPrincipalHandle), + mFrameInterval(aFPS == 0.0 ? TimeDuration::Forever() + : TimeDuration::FromSeconds(1.0 / aFPS)) {} + + void RequestFrameCapture() override { mExplicitCaptureRequested = true; } + + bool FrameCaptureRequested(const TimeStamp& aTime) const override { + if (mLastFrameTime.IsNull()) { + // All CanvasCaptureMediaStreams shall at least get one frame. + return true; + } + + if (mExplicitCaptureRequested) { + return true; + } + + if ((aTime - mLastFrameTime) >= mFrameInterval) { + return true; + } + + return false; + } + + void NewFrame(already_AddRefed aImage, + const TimeStamp& aTime) override { + nsCString str; + if (profiler_thread_is_being_profiled_for_markers()) { + TimeDuration sinceLast = + aTime - (mLastFrameTime.IsNull() ? aTime : mLastFrameTime); + str.AppendPrintf( + "TimerDriver %staking frame (%sexplicitly requested; after %.2fms; " + "interval cap %.2fms)", + sinceLast >= mFrameInterval ? "" : "NOT ", + mExplicitCaptureRequested ? "" : "NOT ", sinceLast.ToMilliseconds(), + mFrameInterval.ToMilliseconds()); + } + AUTO_PROFILER_MARKER_TEXT("Canvas CaptureStream", MEDIA_RT, {}, str); + + RefPtr image = aImage; + + if (!FrameCaptureRequested(aTime)) { + return; + } + + mLastFrameTime = aTime; + mExplicitCaptureRequested = false; + SetImage(std::move(image), aTime); + } + + protected: + virtual ~TimerDriver() = default; + + private: + const TimeDuration mFrameInterval; + bool mExplicitCaptureRequested = false; + TimeStamp mLastFrameTime; +}; + +// ---------------------------------------------------------------------- + +class AutoDriver : public OutputStreamDriver { + public: + explicit AutoDriver(SourceMediaTrack* aSourceStream, + const PrincipalHandle& aPrincipalHandle) + : OutputStreamDriver(aSourceStream, aPrincipalHandle) {} + + void RequestFrameCapture() override {} + + bool FrameCaptureRequested(const TimeStamp& aTime) const override { + return true; + } + + void NewFrame(already_AddRefed aImage, + const TimeStamp& aTime) override { + AUTO_PROFILER_MARKER_TEXT("Canvas CaptureStream", MEDIA_RT, {}, + "AutoDriver taking frame"_ns); + + RefPtr image = aImage; + SetImage(std::move(image), aTime); + } + + protected: + virtual ~AutoDriver() = default; +}; + +// ---------------------------------------------------------------------- + +NS_IMPL_CYCLE_COLLECTION_INHERITED(CanvasCaptureMediaStream, DOMMediaStream, + mCanvas) + +NS_IMPL_ADDREF_INHERITED(CanvasCaptureMediaStream, DOMMediaStream) +NS_IMPL_RELEASE_INHERITED(CanvasCaptureMediaStream, DOMMediaStream) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(CanvasCaptureMediaStream) +NS_INTERFACE_MAP_END_INHERITING(DOMMediaStream) + +CanvasCaptureMediaStream::CanvasCaptureMediaStream(nsPIDOMWindowInner* aWindow, + HTMLCanvasElement* aCanvas) + : DOMMediaStream(aWindow), mCanvas(aCanvas) {} + +CanvasCaptureMediaStream::~CanvasCaptureMediaStream() = default; + +JSObject* CanvasCaptureMediaStream::WrapObject( + JSContext* aCx, JS::Handle aGivenProto) { + return dom::CanvasCaptureMediaStream_Binding::Wrap(aCx, this, aGivenProto); +} + +void CanvasCaptureMediaStream::RequestFrame() { + if (mOutputStreamDriver) { + mOutputStreamDriver->RequestFrameCapture(); + } +} + +nsresult CanvasCaptureMediaStream::Init(const dom::Optional& aFPS, + nsIPrincipal* aPrincipal) { + MediaTrackGraph* graph = MediaTrackGraph::GetInstance( + MediaTrackGraph::SYSTEM_THREAD_DRIVER, GetOwner(), + MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE, + MediaTrackGraph::DEFAULT_OUTPUT_DEVICE); + SourceMediaTrack* source = graph->CreateSourceTrack(MediaSegment::VIDEO); + PrincipalHandle principalHandle = MakePrincipalHandle(aPrincipal); + if (!aFPS.WasPassed()) { + mOutputStreamDriver = new AutoDriver(source, principalHandle); + } else if (aFPS.Value() < 0) { + return NS_ERROR_ILLEGAL_VALUE; + } else { + // Cap frame rate to 60 FPS for sanity + double fps = std::min(60.0, aFPS.Value()); + mOutputStreamDriver = new TimerDriver(source, fps, principalHandle); + } + return NS_OK; +} + +FrameCaptureListener* CanvasCaptureMediaStream::FrameCaptureListener() { + return mOutputStreamDriver; +} + +void CanvasCaptureMediaStream::StopCapture() { + if (!mOutputStreamDriver) { + return; + } + + mOutputStreamDriver->EndTrack(); + mOutputStreamDriver = nullptr; +} + +SourceMediaTrack* CanvasCaptureMediaStream::GetSourceStream() const { + if (!mOutputStreamDriver) { + return nullptr; + } + return mOutputStreamDriver->mSourceStream; +} + +} // namespace mozilla::dom diff --git a/dom/media/CanvasCaptureMediaStream.h b/dom/media/CanvasCaptureMediaStream.h new file mode 100644 index 0000000000..faa5972142 --- /dev/null +++ b/dom/media/CanvasCaptureMediaStream.h @@ -0,0 +1,132 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_CanvasCaptureMediaStream_h_ +#define mozilla_dom_CanvasCaptureMediaStream_h_ + +#include "DOMMediaStream.h" +#include "mozilla/dom/HTMLCanvasElement.h" +#include "PrincipalHandle.h" + +class nsIPrincipal; + +namespace mozilla { +class DOMMediaStream; +class SourceMediaTrack; + +namespace layers { +class Image; +} // namespace layers + +namespace dom { +class CanvasCaptureMediaStream; +class HTMLCanvasElement; +class OutputStreamFrameListener; + +/* + * The CanvasCaptureMediaStream is a MediaStream subclass that provides a video + * track containing frames from a canvas. See an architectural overview below. + * + * ---------------------------------------------------------------------------- + * === Main Thread === __________________________ + * | | + * | CanvasCaptureMediaStream | + * |__________________________| + * | + * | RequestFrame() + * v + * ________________________ + * ________ FrameCaptureRequested? | | + * | | ------------------------> | OutputStreamDriver | + * | Canvas | SetFrameCapture() | (FrameCaptureListener) | + * |________| ------------------------> |________________________| + * | + * | SetImage() - + * | AppendToTrack() + * | + * v + * __________________________ + * | | + * | MTG / SourceMediaTrack | + * |__________________________| + * ---------------------------------------------------------------------------- + */ + +/* + * Base class for drivers of the output stream. + * It is up to each sub class to implement the NewFrame() callback of + * FrameCaptureListener. + */ +class OutputStreamDriver : public FrameCaptureListener { + public: + OutputStreamDriver(SourceMediaTrack* aSourceStream, + const PrincipalHandle& aPrincipalHandle); + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamDriver); + + /* + * Called from js' requestFrame() when it wants the next painted frame to be + * explicitly captured. + */ + virtual void RequestFrameCapture() = 0; + + /* + * Sub classes can SetImage() to update the image being appended to the + * output stream. It will be appended on the next NotifyPull from MTG. + */ + void SetImage(RefPtr&& aImage, const TimeStamp& aTime); + + /* + * Ends the track in mSourceStream when we know there won't be any more images + * requested for it. + */ + void EndTrack(); + + const RefPtr mSourceStream; + const PrincipalHandle mPrincipalHandle; + + protected: + virtual ~OutputStreamDriver(); +}; + +class CanvasCaptureMediaStream : public DOMMediaStream { + public: + CanvasCaptureMediaStream(nsPIDOMWindowInner* aWindow, + HTMLCanvasElement* aCanvas); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(CanvasCaptureMediaStream, + DOMMediaStream) + + nsresult Init(const dom::Optional& aFPS, nsIPrincipal* aPrincipal); + + JSObject* WrapObject(JSContext* aCx, + JS::Handle aGivenProto) override; + + // WebIDL + HTMLCanvasElement* Canvas() const { return mCanvas; } + void RequestFrame(); + + dom::FrameCaptureListener* FrameCaptureListener(); + + /** + * Stops capturing for this stream at mCanvas. + */ + void StopCapture(); + + SourceMediaTrack* GetSourceStream() const; + + protected: + ~CanvasCaptureMediaStream(); + + private: + RefPtr mCanvas; + RefPtr mOutputStreamDriver; +}; + +} // namespace dom +} // namespace mozilla + +#endif /* mozilla_dom_CanvasCaptureMediaStream_h_ */ diff --git a/dom/media/ChannelMediaDecoder.cpp b/dom/media/ChannelMediaDecoder.cpp new file mode 100644 index 0000000000..c6da221f94 --- /dev/null +++ b/dom/media/ChannelMediaDecoder.cpp @@ -0,0 +1,567 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ChannelMediaDecoder.h" +#include "ChannelMediaResource.h" +#include "DecoderTraits.h" +#include "ExternalEngineStateMachine.h" +#include "MediaDecoderStateMachine.h" +#include "MediaFormatReader.h" +#include "BaseMediaResource.h" +#include "MediaShutdownManager.h" +#include "base/process_util.h" +#include "mozilla/Preferences.h" +#include "mozilla/StaticPrefs_media.h" +#include "VideoUtils.h" + +namespace mozilla { + +using TimeUnit = media::TimeUnit; + +extern LazyLogModule gMediaDecoderLog; +#define LOG(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, x, ##__VA_ARGS__) + +ChannelMediaDecoder::ResourceCallback::ResourceCallback( + AbstractThread* aMainThread) + : mAbstractMainThread(aMainThread) { + MOZ_ASSERT(aMainThread); + DecoderDoctorLogger::LogConstructionAndBase( + "ChannelMediaDecoder::ResourceCallback", this, + static_cast(this)); +} + +ChannelMediaDecoder::ResourceCallback::~ResourceCallback() { + DecoderDoctorLogger::LogDestruction("ChannelMediaDecoder::ResourceCallback", + this); +} + +void ChannelMediaDecoder::ResourceCallback::Connect( + ChannelMediaDecoder* aDecoder) { + MOZ_ASSERT(NS_IsMainThread()); + mDecoder = aDecoder; + DecoderDoctorLogger::LinkParentAndChild( + "ChannelMediaDecoder::ResourceCallback", this, "decoder", mDecoder); + mTimer = NS_NewTimer(mAbstractMainThread->AsEventTarget()); +} + +void ChannelMediaDecoder::ResourceCallback::Disconnect() { + MOZ_ASSERT(NS_IsMainThread()); + if (mDecoder) { + DecoderDoctorLogger::UnlinkParentAndChild( + "ChannelMediaDecoder::ResourceCallback", this, mDecoder); + mDecoder = nullptr; + mTimer->Cancel(); + mTimer = nullptr; + } +} + +AbstractThread* ChannelMediaDecoder::ResourceCallback::AbstractMainThread() + const { + return mAbstractMainThread; +} + +MediaDecoderOwner* ChannelMediaDecoder::ResourceCallback::GetMediaOwner() + const { + MOZ_ASSERT(NS_IsMainThread()); + return mDecoder ? mDecoder->GetOwner() : nullptr; +} + +void ChannelMediaDecoder::ResourceCallback::NotifyNetworkError( + const MediaResult& aError) { + MOZ_ASSERT(NS_IsMainThread()); + DDLOGEX2("ChannelMediaDecoder::ResourceCallback", this, DDLogCategory::Log, + "network_error", aError); + if (mDecoder) { + mDecoder->NetworkError(aError); + } +} + +/* static */ +void ChannelMediaDecoder::ResourceCallback::TimerCallback(nsITimer* aTimer, + void* aClosure) { + MOZ_ASSERT(NS_IsMainThread()); + ResourceCallback* thiz = static_cast(aClosure); + MOZ_ASSERT(thiz->mDecoder); + thiz->mDecoder->NotifyReaderDataArrived(); + thiz->mTimerArmed = false; +} + +void ChannelMediaDecoder::ResourceCallback::NotifyDataArrived() { + MOZ_ASSERT(NS_IsMainThread()); + DDLOGEX2("ChannelMediaDecoder::ResourceCallback", this, DDLogCategory::Log, + "data_arrived", true); + + if (!mDecoder) { + return; + } + + mDecoder->DownloadProgressed(); + + if (mTimerArmed) { + return; + } + // In situations where these notifications come from stochastic network + // activity, we can save significant computation by throttling the + // calls to MediaDecoder::NotifyDataArrived() which will update the buffer + // ranges of the reader. + mTimerArmed = true; + mTimer->InitWithNamedFuncCallback( + TimerCallback, this, sDelay, nsITimer::TYPE_ONE_SHOT, + "ChannelMediaDecoder::ResourceCallback::TimerCallback"); +} + +void ChannelMediaDecoder::ResourceCallback::NotifyDataEnded(nsresult aStatus) { + DDLOGEX2("ChannelMediaDecoder::ResourceCallback", this, DDLogCategory::Log, + "data_ended", aStatus); + MOZ_ASSERT(NS_IsMainThread()); + if (mDecoder) { + mDecoder->NotifyDownloadEnded(aStatus); + } +} + +void ChannelMediaDecoder::ResourceCallback::NotifyPrincipalChanged() { + MOZ_ASSERT(NS_IsMainThread()); + DDLOGEX2("ChannelMediaDecoder::ResourceCallback", this, DDLogCategory::Log, + "principal_changed", true); + if (mDecoder) { + mDecoder->NotifyPrincipalChanged(); + } +} + +void ChannelMediaDecoder::NotifyPrincipalChanged() { + MOZ_ASSERT(NS_IsMainThread()); + MediaDecoder::NotifyPrincipalChanged(); + if (!mInitialChannelPrincipalKnown) { + // We'll receive one notification when the channel's initial principal + // is known, after all HTTP redirects have resolved. This isn't really a + // principal change, so return here to avoid the mSameOriginMedia check + // below. + mInitialChannelPrincipalKnown = true; + return; + } + if (!mSameOriginMedia) { + // Block mid-flight redirects to non CORS same origin destinations. + // See bugs 1441153, 1443942. + LOG("ChannnelMediaDecoder prohibited cross origin redirect blocked."); + NetworkError(MediaResult(NS_ERROR_DOM_BAD_URI, + "Prohibited cross origin redirect blocked")); + } +} + +void ChannelMediaDecoder::ResourceCallback::NotifySuspendedStatusChanged( + bool aSuspendedByCache) { + MOZ_ASSERT(NS_IsMainThread()); + DDLOGEX2("ChannelMediaDecoder::ResourceCallback", this, DDLogCategory::Log, + "suspended_status_changed", aSuspendedByCache); + MediaDecoderOwner* owner = GetMediaOwner(); + if (owner) { + owner->NotifySuspendedByCache(aSuspendedByCache); + } +} + +ChannelMediaDecoder::ChannelMediaDecoder(MediaDecoderInit& aInit) + : MediaDecoder(aInit), + mResourceCallback( + new ResourceCallback(aInit.mOwner->AbstractMainThread())) { + mResourceCallback->Connect(this); +} + +/* static */ +already_AddRefed ChannelMediaDecoder::Create( + MediaDecoderInit& aInit, DecoderDoctorDiagnostics* aDiagnostics) { + MOZ_ASSERT(NS_IsMainThread()); + RefPtr decoder; + if (DecoderTraits::CanHandleContainerType(aInit.mContainerType, + aDiagnostics) != CANPLAY_NO) { + decoder = new ChannelMediaDecoder(aInit); + return decoder.forget(); + } + + return nullptr; +} + +bool ChannelMediaDecoder::CanClone() { + MOZ_ASSERT(NS_IsMainThread()); + return mResource && mResource->CanClone(); +} + +already_AddRefed ChannelMediaDecoder::Clone( + MediaDecoderInit& aInit) { + if (!mResource || DecoderTraits::CanHandleContainerType( + aInit.mContainerType, nullptr) == CANPLAY_NO) { + return nullptr; + } + RefPtr decoder = new ChannelMediaDecoder(aInit); + nsresult rv = decoder->Load(mResource); + if (NS_FAILED(rv)) { + decoder->Shutdown(); + return nullptr; + } + return decoder.forget(); +} + +MediaDecoderStateMachineBase* ChannelMediaDecoder::CreateStateMachine( + bool aDisableExternalEngine) { + MOZ_ASSERT(NS_IsMainThread()); + MediaFormatReaderInit init; + init.mVideoFrameContainer = GetVideoFrameContainer(); + init.mKnowsCompositor = GetCompositor(); + init.mCrashHelper = GetOwner()->CreateGMPCrashHelper(); + init.mFrameStats = mFrameStats; + init.mResource = mResource; + init.mMediaDecoderOwnerID = mOwner; + static Atomic sTrackingIdCounter(0); + init.mTrackingId.emplace(TrackingId::Source::ChannelDecoder, + sTrackingIdCounter++, + TrackingId::TrackAcrossProcesses::Yes); + mReader = DecoderTraits::CreateReader(ContainerType(), init); + +#ifdef MOZ_WMF_MEDIA_ENGINE + // TODO : Only for testing development for now. In the future this should be + // used for encrypted content only. + if (StaticPrefs::media_wmf_media_engine_enabled() && + StaticPrefs::media_wmf_media_engine_channel_decoder_enabled() && + !aDisableExternalEngine) { + return new ExternalEngineStateMachine(this, mReader); + } +#endif + return new MediaDecoderStateMachine(this, mReader); +} + +void ChannelMediaDecoder::Shutdown() { + mResourceCallback->Disconnect(); + MediaDecoder::Shutdown(); + + if (mResource) { + // Force any outstanding seek and byterange requests to complete + // to prevent shutdown from deadlocking. + mResourceClosePromise = mResource->Close(); + } +} + +void ChannelMediaDecoder::ShutdownInternal() { + if (!mResourceClosePromise) { + MediaShutdownManager::Instance().Unregister(this); + return; + } + + mResourceClosePromise->Then( + AbstractMainThread(), __func__, + [self = RefPtr(this)] { + MediaShutdownManager::Instance().Unregister(self); + }); +} + +nsresult ChannelMediaDecoder::Load(nsIChannel* aChannel, + bool aIsPrivateBrowsing, + nsIStreamListener** aStreamListener) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(!mResource); + MOZ_ASSERT(aStreamListener); + + mResource = BaseMediaResource::Create(mResourceCallback, aChannel, + aIsPrivateBrowsing); + if (!mResource) { + return NS_ERROR_FAILURE; + } + DDLINKCHILD("resource", mResource.get()); + + nsresult rv = MediaShutdownManager::Instance().Register(this); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + + rv = mResource->Open(aStreamListener); + NS_ENSURE_SUCCESS(rv, rv); + return CreateAndInitStateMachine(mResource->IsLiveStream()); +} + +nsresult ChannelMediaDecoder::Load(BaseMediaResource* aOriginal) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(!mResource); + + mResource = aOriginal->CloneData(mResourceCallback); + if (!mResource) { + return NS_ERROR_FAILURE; + } + DDLINKCHILD("resource", mResource.get()); + + nsresult rv = MediaShutdownManager::Instance().Register(this); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + return CreateAndInitStateMachine(mResource->IsLiveStream()); +} + +void ChannelMediaDecoder::NotifyDownloadEnded(nsresult aStatus) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + LOG("NotifyDownloadEnded, status=%" PRIx32, static_cast(aStatus)); + + if (NS_SUCCEEDED(aStatus)) { + // Download ends successfully. This is a stream with a finite length. + GetStateMachine()->DispatchIsLiveStream(false); + } + + MediaDecoderOwner* owner = GetOwner(); + if (NS_SUCCEEDED(aStatus) || aStatus == NS_BASE_STREAM_CLOSED) { + nsCOMPtr r = NS_NewRunnableFunction( + "ChannelMediaDecoder::UpdatePlaybackRate", + [stats = mPlaybackStatistics, + res = RefPtr(mResource), duration = mDuration]() { + auto rate = ComputePlaybackRate(stats, res, + duration.match(DurationToTimeUnit())); + UpdatePlaybackRate(rate, res); + }); + nsresult rv = GetStateMachine()->OwnerThread()->Dispatch(r.forget()); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + owner->DownloadSuspended(); + // NotifySuspendedStatusChanged will tell the element that download + // has been suspended "by the cache", which is true since we never + // download anything. The element can then transition to HAVE_ENOUGH_DATA. + owner->NotifySuspendedByCache(true); + } else if (aStatus == NS_BINDING_ABORTED) { + // Download has been cancelled by user. + owner->LoadAborted(); + } else { + NetworkError(MediaResult(aStatus, "Download aborted")); + } +} + +bool ChannelMediaDecoder::CanPlayThroughImpl() { + MOZ_ASSERT(NS_IsMainThread()); + return mCanPlayThrough; +} + +void ChannelMediaDecoder::OnPlaybackEvent(MediaPlaybackEvent&& aEvent) { + MOZ_ASSERT(NS_IsMainThread()); + switch (aEvent.mType) { + case MediaPlaybackEvent::PlaybackStarted: + mPlaybackPosition = aEvent.mData.as(); + mPlaybackStatistics.Start(); + break; + case MediaPlaybackEvent::PlaybackProgressed: { + int64_t newPos = aEvent.mData.as(); + mPlaybackStatistics.AddBytes(newPos - mPlaybackPosition); + mPlaybackPosition = newPos; + break; + } + case MediaPlaybackEvent::PlaybackStopped: { + int64_t newPos = aEvent.mData.as(); + mPlaybackStatistics.AddBytes(newPos - mPlaybackPosition); + mPlaybackPosition = newPos; + mPlaybackStatistics.Stop(); + break; + } + default: + break; + } + MediaDecoder::OnPlaybackEvent(std::move(aEvent)); +} + +void ChannelMediaDecoder::DurationChanged() { + MOZ_ASSERT(NS_IsMainThread()); + MediaDecoder::DurationChanged(); + // Duration has changed so we should recompute playback rate + nsCOMPtr r = NS_NewRunnableFunction( + "ChannelMediaDecoder::UpdatePlaybackRate", + [stats = mPlaybackStatistics, res = RefPtr(mResource), + duration = mDuration]() { + auto rate = ComputePlaybackRate(stats, res, + duration.match(DurationToTimeUnit())); + UpdatePlaybackRate(rate, res); + }); + nsresult rv = GetStateMachine()->OwnerThread()->Dispatch(r.forget()); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +void ChannelMediaDecoder::DownloadProgressed() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + GetOwner()->DownloadProgressed(); + + using StatsPromise = MozPromise; + InvokeAsync(GetStateMachine()->OwnerThread(), __func__, + [playbackStats = mPlaybackStatistics, + res = RefPtr(mResource), duration = mDuration, + pos = mPlaybackPosition]() { + auto rate = ComputePlaybackRate( + playbackStats, res, duration.match(DurationToTimeUnit())); + UpdatePlaybackRate(rate, res); + MediaStatistics stats = GetStatistics(rate, res, pos); + return StatsPromise::CreateAndResolve(stats, __func__); + }) + ->Then( + mAbstractMainThread, __func__, + [=, + self = RefPtr(this)](MediaStatistics aStats) { + if (IsShutdown()) { + return; + } + mCanPlayThrough = aStats.CanPlayThrough(); + GetStateMachine()->DispatchCanPlayThrough(mCanPlayThrough); + mResource->ThrottleReadahead(ShouldThrottleDownload(aStats)); + // Update readyState since mCanPlayThrough might have changed. + GetOwner()->UpdateReadyState(); + }, + []() { MOZ_ASSERT_UNREACHABLE("Promise not resolved"); }); +} + +/* static */ ChannelMediaDecoder::PlaybackRateInfo +ChannelMediaDecoder::ComputePlaybackRate(const MediaChannelStatistics& aStats, + BaseMediaResource* aResource, + const TimeUnit& aDuration) { + MOZ_ASSERT(!NS_IsMainThread()); + + int64_t length = aResource->GetLength(); + if (aDuration.IsValid() && !aDuration.IsInfinite() && + aDuration.IsPositive() && length >= 0 && + length / aDuration.ToSeconds() < UINT32_MAX) { + return {uint32_t(length / aDuration.ToSeconds()), true}; + } + + bool reliable = false; + uint32_t rate = aStats.GetRate(&reliable); + return {rate, reliable}; +} + +/* static */ +void ChannelMediaDecoder::UpdatePlaybackRate(const PlaybackRateInfo& aInfo, + BaseMediaResource* aResource) { + MOZ_ASSERT(!NS_IsMainThread()); + + uint32_t rate = aInfo.mRate; + + if (aInfo.mReliable) { + // Avoid passing a zero rate + rate = std::max(rate, 1u); + } else { + // Set a minimum rate of 10,000 bytes per second ... sometimes we just + // don't have good data + rate = std::max(rate, 10000u); + } + + aResource->SetPlaybackRate(rate); +} + +/* static */ +MediaStatistics ChannelMediaDecoder::GetStatistics( + const PlaybackRateInfo& aInfo, BaseMediaResource* aRes, + int64_t aPlaybackPosition) { + MOZ_ASSERT(!NS_IsMainThread()); + + MediaStatistics result; + result.mDownloadRate = aRes->GetDownloadRate(&result.mDownloadRateReliable); + result.mDownloadPosition = aRes->GetCachedDataEnd(aPlaybackPosition); + result.mTotalBytes = aRes->GetLength(); + result.mPlaybackRate = aInfo.mRate; + result.mPlaybackRateReliable = aInfo.mReliable; + result.mPlaybackPosition = aPlaybackPosition; + return result; +} + +bool ChannelMediaDecoder::ShouldThrottleDownload( + const MediaStatistics& aStats) { + // We throttle the download if either the throttle override pref is set + // (so that we always throttle at the readahead limit on mobile if using + // a cellular network) or if the download is fast enough that there's no + // concern about playback being interrupted. + MOZ_ASSERT(NS_IsMainThread()); + NS_ENSURE_TRUE(GetStateMachine(), false); + + int64_t length = aStats.mTotalBytes; + if (length > 0 && + length <= int64_t(StaticPrefs::media_memory_cache_max_size()) * 1024) { + // Don't throttle the download of small resources. This is to speed + // up seeking, as seeks into unbuffered ranges would require starting + // up a new HTTP transaction, which adds latency. + return false; + } + + if (OnCellularConnection() && + Preferences::GetBool( + "media.throttle-cellular-regardless-of-download-rate", false)) { + return true; + } + + if (!aStats.mDownloadRateReliable || !aStats.mPlaybackRateReliable) { + return false; + } + uint32_t factor = + std::max(2u, Preferences::GetUint("media.throttle-factor", 2)); + return aStats.mDownloadRate > factor * aStats.mPlaybackRate; +} + +void ChannelMediaDecoder::AddSizeOfResources(ResourceSizes* aSizes) { + MOZ_ASSERT(NS_IsMainThread()); + if (mResource) { + aSizes->mByteSize += mResource->SizeOfIncludingThis(aSizes->mMallocSizeOf); + } +} + +already_AddRefed ChannelMediaDecoder::GetCurrentPrincipal() { + MOZ_ASSERT(NS_IsMainThread()); + return mResource ? mResource->GetCurrentPrincipal() : nullptr; +} + +bool ChannelMediaDecoder::HadCrossOriginRedirects() { + MOZ_ASSERT(NS_IsMainThread()); + return mResource ? mResource->HadCrossOriginRedirects() : false; +} + +bool ChannelMediaDecoder::IsTransportSeekable() { + MOZ_ASSERT(NS_IsMainThread()); + return mResource->IsTransportSeekable(); +} + +void ChannelMediaDecoder::SetLoadInBackground(bool aLoadInBackground) { + MOZ_ASSERT(NS_IsMainThread()); + if (mResource) { + mResource->SetLoadInBackground(aLoadInBackground); + } +} + +void ChannelMediaDecoder::Suspend() { + MOZ_ASSERT(NS_IsMainThread()); + if (mResource) { + mResource->Suspend(true); + } + MediaDecoder::Suspend(); +} + +void ChannelMediaDecoder::Resume() { + MOZ_ASSERT(NS_IsMainThread()); + if (mResource) { + mResource->Resume(); + } + MediaDecoder::Resume(); +} + +void ChannelMediaDecoder::MetadataLoaded( + UniquePtr aInfo, UniquePtr aTags, + MediaDecoderEventVisibility aEventVisibility) { + MediaDecoder::MetadataLoaded(std::move(aInfo), std::move(aTags), + aEventVisibility); + // Set mode to PLAYBACK after reading metadata. + mResource->SetReadMode(MediaCacheStream::MODE_PLAYBACK); +} + +void ChannelMediaDecoder::GetDebugInfo(dom::MediaDecoderDebugInfo& aInfo) { + MediaDecoder::GetDebugInfo(aInfo); + if (mResource) { + mResource->GetDebugInfo(aInfo.mResource); + } +} + +} // namespace mozilla + +// avoid redefined macro in unified build +#undef LOG diff --git a/dom/media/ChannelMediaDecoder.h b/dom/media/ChannelMediaDecoder.h new file mode 100644 index 0000000000..47bf5a08b9 --- /dev/null +++ b/dom/media/ChannelMediaDecoder.h @@ -0,0 +1,169 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ChannelMediaDecoder_h_ +#define ChannelMediaDecoder_h_ + +#include "MediaDecoder.h" +#include "MediaResourceCallback.h" +#include "MediaChannelStatistics.h" + +class nsIChannel; +class nsIStreamListener; + +namespace mozilla { + +class BaseMediaResource; + +DDLoggedTypeDeclNameAndBase(ChannelMediaDecoder, MediaDecoder); + +class ChannelMediaDecoder + : public MediaDecoder, + public DecoderDoctorLifeLogger { + // Used to register with MediaResource to receive notifications which will + // be forwarded to MediaDecoder. + class ResourceCallback : public MediaResourceCallback { + // Throttle calls to MediaDecoder::NotifyDataArrived() + // to be at most once per 500ms. + static const uint32_t sDelay = 500; + + public: + explicit ResourceCallback(AbstractThread* aMainThread); + // Start to receive notifications from ResourceCallback. + void Connect(ChannelMediaDecoder* aDecoder); + // Called upon shutdown to stop receiving notifications. + void Disconnect(); + + private: + ~ResourceCallback(); + + /* MediaResourceCallback functions */ + AbstractThread* AbstractMainThread() const override; + MediaDecoderOwner* GetMediaOwner() const override; + void NotifyNetworkError(const MediaResult& aError) override; + void NotifyDataArrived() override; + void NotifyDataEnded(nsresult aStatus) override; + void NotifyPrincipalChanged() override; + void NotifySuspendedStatusChanged(bool aSuspendedByCache) override; + + static void TimerCallback(nsITimer* aTimer, void* aClosure); + + // The decoder to send notifications. Main-thread only. + ChannelMediaDecoder* mDecoder = nullptr; + nsCOMPtr mTimer; + bool mTimerArmed = false; + const RefPtr mAbstractMainThread; + }; + + protected: + void ShutdownInternal() override; + void OnPlaybackEvent(MediaPlaybackEvent&& aEvent) override; + void DurationChanged() override; + void MetadataLoaded(UniquePtr aInfo, UniquePtr aTags, + MediaDecoderEventVisibility aEventVisibility) override; + void NotifyPrincipalChanged() override; + + RefPtr mResourceCallback; + RefPtr mResource; + + explicit ChannelMediaDecoder(MediaDecoderInit& aInit); + + void GetDebugInfo(dom::MediaDecoderDebugInfo& aInfo); + + public: + // Create a decoder for the given aType. Returns null if we were unable + // to create the decoder, for example because the requested MIME type in + // the init struct was unsupported. + static already_AddRefed Create( + MediaDecoderInit& aInit, DecoderDoctorDiagnostics* aDiagnostics); + + void Shutdown() override; + + bool CanClone(); + + // Create a new decoder of the same type as this one. + already_AddRefed Clone(MediaDecoderInit& aInit); + + nsresult Load(nsIChannel* aChannel, bool aIsPrivateBrowsing, + nsIStreamListener** aStreamListener); + + void AddSizeOfResources(ResourceSizes* aSizes) override; + already_AddRefed GetCurrentPrincipal() override; + bool HadCrossOriginRedirects() override; + bool IsTransportSeekable() override; + void SetLoadInBackground(bool aLoadInBackground) override; + void Suspend() override; + void Resume() override; + + private: + void DownloadProgressed(); + + // Create a new state machine to run this decoder. + MediaDecoderStateMachineBase* CreateStateMachine( + bool aDisableExternalEngine) override; + + nsresult Load(BaseMediaResource* aOriginal); + + // Called by MediaResource when the download has ended. + // Called on the main thread only. aStatus is the result from OnStopRequest. + void NotifyDownloadEnded(nsresult aStatus); + + // Called by the MediaResource to keep track of the number of bytes read + // from the resource. Called on the main by an event runner dispatched + // by the MediaResource read functions. + void NotifyBytesConsumed(int64_t aBytes, int64_t aOffset); + + bool CanPlayThroughImpl() final; + + struct PlaybackRateInfo { + uint32_t mRate; // Estimate of the current playback rate (bytes/second). + bool mReliable; // True if mRate is a reliable estimate. + }; + // The actual playback rate computation. + static PlaybackRateInfo ComputePlaybackRate( + const MediaChannelStatistics& aStats, BaseMediaResource* aResource, + const media::TimeUnit& aDuration); + + // Something has changed that could affect the computed playback rate, + // so recompute it. + static void UpdatePlaybackRate(const PlaybackRateInfo& aInfo, + BaseMediaResource* aResource); + + // Return statistics. This is used for progress events and other things. + // This can be called from any thread. It's only a snapshot of the + // current state, since other threads might be changing the state + // at any time. + static MediaStatistics GetStatistics(const PlaybackRateInfo& aInfo, + BaseMediaResource* aRes, + int64_t aPlaybackPosition); + + bool ShouldThrottleDownload(const MediaStatistics& aStats); + + // Data needed to estimate playback data rate. The timeline used for + // this estimate is "decode time" (where the "current time" is the + // time of the last decoded video frame). + MediaChannelStatistics mPlaybackStatistics; + + // Current playback position in the stream. This is (approximately) + // where we're up to playing back the stream. This is not adjusted + // during decoder seek operations, but it's updated at the end when we + // start playing back again. + int64_t mPlaybackPosition = 0; + + bool mCanPlayThrough = false; + + // True if we've been notified that the ChannelMediaResource has + // a principal. + bool mInitialChannelPrincipalKnown = false; + + // Set in Shutdown() when we start closing mResource, if mResource is set. + // Must resolve before we unregister the shutdown blocker. + RefPtr mResourceClosePromise; +}; + +} // namespace mozilla + +#endif // ChannelMediaDecoder_h_ diff --git a/dom/media/ChannelMediaResource.cpp b/dom/media/ChannelMediaResource.cpp new file mode 100644 index 0000000000..aefedb37d1 --- /dev/null +++ b/dom/media/ChannelMediaResource.cpp @@ -0,0 +1,1057 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ChannelMediaResource.h" + +#include "mozilla/Preferences.h" +#include "mozilla/dom/HTMLMediaElement.h" +#include "mozilla/net/OpaqueResponseUtils.h" +#include "nsIAsyncVerifyRedirectCallback.h" +#include "nsICachingChannel.h" +#include "nsIClassOfService.h" +#include "nsIHttpChannel.h" +#include "nsIInputStream.h" +#include "nsIThreadRetargetableRequest.h" +#include "nsITimedChannel.h" +#include "nsHttp.h" +#include "nsNetUtil.h" + +static const uint32_t HTTP_PARTIAL_RESPONSE_CODE = 206; +static const uint32_t HTTP_OK_CODE = 200; +static const uint32_t HTTP_REQUESTED_RANGE_NOT_SATISFIABLE_CODE = 416; + +mozilla::LazyLogModule gMediaResourceLog("MediaResource"); +// Debug logging macro with object pointer and class name. +#define LOG(msg, ...) \ + DDMOZ_LOG(gMediaResourceLog, mozilla::LogLevel::Debug, msg, ##__VA_ARGS__) + +namespace mozilla { + +ChannelMediaResource::ChannelMediaResource(MediaResourceCallback* aCallback, + nsIChannel* aChannel, nsIURI* aURI, + int64_t aStreamLength, + bool aIsPrivateBrowsing) + : BaseMediaResource(aCallback, aChannel, aURI), + mCacheStream(this, aIsPrivateBrowsing), + mSuspendAgent(mCacheStream), + mKnownStreamLength(aStreamLength) {} + +ChannelMediaResource::~ChannelMediaResource() { + MOZ_ASSERT(mClosed); + MOZ_ASSERT(!mChannel); + MOZ_ASSERT(!mListener); + if (mSharedInfo) { + mSharedInfo->mResources.RemoveElement(this); + } +} + +// ChannelMediaResource::Listener just observes the channel and +// forwards notifications to the ChannelMediaResource. We use multiple +// listener objects so that when we open a new stream for a seek we can +// disconnect the old listener from the ChannelMediaResource and hook up +// a new listener, so notifications from the old channel are discarded +// and don't confuse us. +NS_IMPL_ISUPPORTS(ChannelMediaResource::Listener, nsIRequestObserver, + nsIStreamListener, nsIChannelEventSink, nsIInterfaceRequestor, + nsIThreadRetargetableStreamListener) + +nsresult ChannelMediaResource::Listener::OnStartRequest(nsIRequest* aRequest) { + mMutex.AssertOnWritingThread(); // Writing thread is MainThread + if (!mResource) return NS_OK; + return mResource->OnStartRequest(aRequest, mOffset); +} + +nsresult ChannelMediaResource::Listener::OnStopRequest(nsIRequest* aRequest, + nsresult aStatus) { + mMutex.AssertOnWritingThread(); + if (!mResource) return NS_OK; + return mResource->OnStopRequest(aRequest, aStatus); +} + +nsresult ChannelMediaResource::Listener::OnDataAvailable( + nsIRequest* aRequest, nsIInputStream* aStream, uint64_t aOffset, + uint32_t aCount) { + // This might happen off the main thread. + RefPtr res; + { + MutexSingleWriterAutoLock lock(mMutex); + res = mResource; + } + // Note Rekove() might happen at the same time to reset mResource. We check + // the load ID to determine if the data is from an old channel. + return res ? res->OnDataAvailable(mLoadID, aStream, aCount) : NS_OK; +} + +nsresult ChannelMediaResource::Listener::AsyncOnChannelRedirect( + nsIChannel* aOld, nsIChannel* aNew, uint32_t aFlags, + nsIAsyncVerifyRedirectCallback* cb) { + mMutex.AssertOnWritingThread(); + + nsresult rv = NS_OK; + if (mResource) { + rv = mResource->OnChannelRedirect(aOld, aNew, aFlags, mOffset); + } + + if (NS_FAILED(rv)) { + return rv; + } + + cb->OnRedirectVerifyCallback(NS_OK); + return NS_OK; +} + +nsresult ChannelMediaResource::Listener::CheckListenerChain() { return NS_OK; } + +NS_IMETHODIMP +ChannelMediaResource::Listener::OnDataFinished(nsresult) { return NS_OK; } + +nsresult ChannelMediaResource::Listener::GetInterface(const nsIID& aIID, + void** aResult) { + return QueryInterface(aIID, aResult); +} + +void ChannelMediaResource::Listener::Revoke() { + MOZ_ASSERT(NS_IsMainThread()); + MutexSingleWriterAutoLock lock(mMutex); + mResource = nullptr; +} + +static bool IsPayloadCompressed(nsIHttpChannel* aChannel) { + nsAutoCString encoding; + Unused << aChannel->GetResponseHeader("Content-Encoding"_ns, encoding); + return encoding.Length() > 0; +} + +nsresult ChannelMediaResource::OnStartRequest(nsIRequest* aRequest, + int64_t aRequestOffset) { + NS_ASSERTION(mChannel.get() == aRequest, "Wrong channel!"); + MOZ_DIAGNOSTIC_ASSERT(!mClosed); + + MediaDecoderOwner* owner = mCallback->GetMediaOwner(); + MOZ_DIAGNOSTIC_ASSERT(owner); + dom::HTMLMediaElement* element = owner->GetMediaElement(); + MOZ_DIAGNOSTIC_ASSERT(element); + + nsresult status; + nsresult rv = aRequest->GetStatus(&status); + NS_ENSURE_SUCCESS(rv, rv); + + if (status == NS_BINDING_ABORTED) { + // Request was aborted before we had a chance to receive any data, or + // even an OnStartRequest(). Close the channel. This is important, as + // we don't want to mess up our state, as if we're cloned that would + // cause the clone to copy incorrect metadata (like whether we're + // infinite for example). + CloseChannel(); + return status; + } + + if (element->ShouldCheckAllowOrigin()) { + // If the request was cancelled by nsCORSListenerProxy due to failing + // the CORS security check, send an error through to the media element. + if (status == NS_ERROR_DOM_BAD_URI) { + mCallback->NotifyNetworkError(MediaResult(status, "CORS not allowed")); + return NS_ERROR_DOM_BAD_URI; + } + } + + nsCOMPtr hc = do_QueryInterface(aRequest); + bool seekable = false; + int64_t length = -1; + int64_t startOffset = aRequestOffset; + + if (hc) { + uint32_t responseStatus = 0; + Unused << hc->GetResponseStatus(&responseStatus); + bool succeeded = false; + Unused << hc->GetRequestSucceeded(&succeeded); + + if (!succeeded && NS_SUCCEEDED(status)) { + // HTTP-level error (e.g. 4xx); treat this as a fatal network-level error. + // We might get this on a seek. + // (Note that lower-level errors indicated by NS_FAILED(status) are + // handled in OnStopRequest.) + // A 416 error should treated as EOF here... it's possible + // that we don't get Content-Length, we read N bytes, then we + // suspend and resume, the resume reopens the channel and we seek to + // offset N, but there are no more bytes, so we get a 416 + // "Requested Range Not Satisfiable". + if (responseStatus == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE_CODE) { + // OnStopRequest will not be fired, so we need to do some of its + // work here. Note we need to pass the load ID first so the following + // NotifyDataEnded() can pass the ID check. + mCacheStream.NotifyLoadID(mLoadID); + mCacheStream.NotifyDataEnded(mLoadID, status); + } else { + mCallback->NotifyNetworkError( + MediaResult(NS_ERROR_FAILURE, "HTTP error")); + } + + // This disconnects our listener so we don't get any more data. We + // certainly don't want an error page to end up in our cache! + CloseChannel(); + return NS_OK; + } + + nsAutoCString ranges; + Unused << hc->GetResponseHeader("Accept-Ranges"_ns, ranges); + bool acceptsRanges = + net::nsHttp::FindToken(ranges.get(), "bytes", HTTP_HEADER_VALUE_SEPS); + + int64_t contentLength = -1; + const bool isCompressed = IsPayloadCompressed(hc); + if (!isCompressed) { + hc->GetContentLength(&contentLength); + } + + // Check response code for byte-range requests (seeking, chunk requests). + // We don't expect to get a 206 response for a compressed stream, but + // double check just to be sure. + if (!isCompressed && responseStatus == HTTP_PARTIAL_RESPONSE_CODE) { + // Parse Content-Range header. + int64_t rangeStart = 0; + int64_t rangeEnd = 0; + int64_t rangeTotal = 0; + rv = ParseContentRangeHeader(hc, rangeStart, rangeEnd, rangeTotal); + + // We received 'Content-Range', so the server accepts range requests. + bool gotRangeHeader = NS_SUCCEEDED(rv); + + if (gotRangeHeader) { + startOffset = rangeStart; + // We received 'Content-Range', so the server accepts range requests. + // Notify media cache about the length and start offset of data + // received. Note: If aRangeTotal == -1, then the total bytes is unknown + // at this stage. + // For now, tell the decoder that the stream is infinite. + if (rangeTotal != -1) { + contentLength = std::max(contentLength, rangeTotal); + } + } + acceptsRanges = gotRangeHeader; + } else if (responseStatus == HTTP_OK_CODE) { + // HTTP_OK_CODE means data will be sent from the start of the stream. + startOffset = 0; + + if (aRequestOffset > 0) { + // If HTTP_OK_CODE is responded for a non-zero range request, we have + // to assume seeking doesn't work. + acceptsRanges = false; + } + } + if (aRequestOffset == 0 && contentLength >= 0 && + (responseStatus == HTTP_OK_CODE || + responseStatus == HTTP_PARTIAL_RESPONSE_CODE)) { + length = contentLength; + } + // XXX we probably should examine the Content-Range header in case + // the server gave us a range which is not quite what we asked for + + // If we get an HTTP_OK_CODE response to our byte range request, + // and the server isn't sending Accept-Ranges:bytes then we don't + // support seeking. We also can't seek in compressed streams. + seekable = !isCompressed && acceptsRanges; + } else { + // Not an HTTP channel. Assume data will be sent from position zero. + startOffset = 0; + } + + // Update principals before OnDataAvailable() putting the data in the cache. + // This is important, we want to make sure all principals are updated before + // any consumer can see the new data. + UpdatePrincipal(); + if (owner->HasError()) { + // Updating the principal resulted in an error. Abort the load. + CloseChannel(); + return NS_OK; + } + + mCacheStream.NotifyDataStarted(mLoadID, startOffset, seekable, length); + mIsTransportSeekable = seekable; + if (mFirstReadLength < 0) { + mFirstReadLength = length; + } + + mSuspendAgent.Delegate(mChannel); + + // Fires an initial progress event. + owner->DownloadProgressed(); + + nsCOMPtr retarget; + if ((retarget = do_QueryInterface(aRequest))) { + // Note this will not always succeed. We need to handle the case where + // all resources sharing the same cache might run their data callbacks + // on different threads. + retarget->RetargetDeliveryTo(mCacheStream.OwnerThread()); + } + + return NS_OK; +} + +bool ChannelMediaResource::IsTransportSeekable() { + MOZ_ASSERT(NS_IsMainThread()); + // We Report the transport as seekable if we know we will never seek into + // the underlying transport. As the MediaCache reads content by block of + // BLOCK_SIZE bytes, so the content length is less it will always be fully + // read from offset = 0 and we can then always successfully seek within this + // buffered content. + return mIsTransportSeekable || + (mFirstReadLength > 0 && + mFirstReadLength < MediaCacheStream::BLOCK_SIZE); +} + +nsresult ChannelMediaResource::ParseContentRangeHeader( + nsIHttpChannel* aHttpChan, int64_t& aRangeStart, int64_t& aRangeEnd, + int64_t& aRangeTotal) const { + NS_ENSURE_ARG(aHttpChan); + + nsAutoCString rangeStr; + nsresult rv = aHttpChan->GetResponseHeader("Content-Range"_ns, rangeStr); + NS_ENSURE_SUCCESS(rv, rv); + NS_ENSURE_FALSE(rangeStr.IsEmpty(), NS_ERROR_ILLEGAL_VALUE); + + auto rangeOrErr = net::ParseContentRangeHeaderString(rangeStr); + NS_ENSURE_FALSE(rangeOrErr.isErr(), rangeOrErr.unwrapErr()); + + aRangeStart = std::get<0>(rangeOrErr.inspect()); + aRangeEnd = std::get<1>(rangeOrErr.inspect()); + aRangeTotal = std::get<2>(rangeOrErr.inspect()); + + LOG("Received bytes [%" PRId64 "] to [%" PRId64 "] of [%" PRId64 + "] for decoder[%p]", + aRangeStart, aRangeEnd, aRangeTotal, mCallback.get()); + + return NS_OK; +} + +nsresult ChannelMediaResource::OnStopRequest(nsIRequest* aRequest, + nsresult aStatus) { + NS_ASSERTION(mChannel.get() == aRequest, "Wrong channel!"); + NS_ASSERTION(!mSuspendAgent.IsSuspended(), + "How can OnStopRequest fire while we're suspended?"); + MOZ_DIAGNOSTIC_ASSERT(!mClosed); + + // Move this request back into the foreground. This is necessary for + // requests owned by video documents to ensure the load group fires + // OnStopRequest when restoring from session history. + nsLoadFlags loadFlags; + DebugOnly rv = mChannel->GetLoadFlags(&loadFlags); + NS_ASSERTION(NS_SUCCEEDED(rv), "GetLoadFlags() failed!"); + + if (loadFlags & nsIRequest::LOAD_BACKGROUND) { + Unused << NS_WARN_IF( + NS_FAILED(ModifyLoadFlags(loadFlags & ~nsIRequest::LOAD_BACKGROUND))); + } + + // Note that aStatus might have succeeded --- this might be a normal close + // --- even in situations where the server cut us off because we were + // suspended. It is also possible that the server sends us fewer bytes than + // requested. So we need to "reopen on error" in that case too. The only + // cases where we don't need to reopen are when *we* closed the stream. + // But don't reopen if we need to seek and we don't think we can... that would + // cause us to just re-read the stream, which would be really bad. + /* + * | length | offset | reopen | + * +--------+-----------+----------+ + * | -1 | 0 | yes | + * +--------+-----------+----------+ + * | -1 | > 0 | seekable | + * +--------+-----------+----------+ + * | 0 | X | no | + * +--------+-----------+----------+ + * | > 0 | 0 | yes | + * +--------+-----------+----------+ + * | > 0 | != length | seekable | + * +--------+-----------+----------+ + * | > 0 | == length | no | + */ + if (aStatus != NS_ERROR_PARSED_DATA_CACHED && aStatus != NS_BINDING_ABORTED) { + auto lengthAndOffset = mCacheStream.GetLengthAndOffset(); + int64_t length = lengthAndOffset.mLength; + int64_t offset = lengthAndOffset.mOffset; + if ((offset == 0 || mIsTransportSeekable) && offset != length) { + // If the stream did close normally, restart the channel if we're either + // at the start of the resource, or if the server is seekable and we're + // not at the end of stream. We don't restart the stream if we're at the + // end because not all web servers handle this case consistently; see: + // https://bugzilla.mozilla.org/show_bug.cgi?id=1373618#c36 + nsresult rv = Seek(offset, false); + if (NS_SUCCEEDED(rv)) { + return rv; + } + // Close the streams that failed due to error. This will cause all + // client Read and Seek operations on those streams to fail. Blocked + // Reads will also be woken up. + Close(); + } + } + + mCacheStream.NotifyDataEnded(mLoadID, aStatus); + return NS_OK; +} + +nsresult ChannelMediaResource::OnChannelRedirect(nsIChannel* aOld, + nsIChannel* aNew, + uint32_t aFlags, + int64_t aOffset) { + // OnChannelRedirect() is followed by OnStartRequest() where we will + // call mSuspendAgent.Delegate(). + mChannel = aNew; + return SetupChannelHeaders(aOffset); +} + +nsresult ChannelMediaResource::CopySegmentToCache( + nsIInputStream* aInStream, void* aClosure, const char* aFromSegment, + uint32_t aToOffset, uint32_t aCount, uint32_t* aWriteCount) { + *aWriteCount = aCount; + Closure* closure = static_cast(aClosure); + MediaCacheStream* cacheStream = &closure->mResource->mCacheStream; + if (cacheStream->OwnerThread()->IsOnCurrentThread()) { + cacheStream->NotifyDataReceived( + closure->mLoadID, aCount, + reinterpret_cast(aFromSegment)); + return NS_OK; + } + + RefPtr self = closure->mResource; + uint32_t loadID = closure->mLoadID; + UniquePtr data = MakeUnique(aCount); + memcpy(data.get(), aFromSegment, aCount); + cacheStream->OwnerThread()->Dispatch(NS_NewRunnableFunction( + "MediaCacheStream::NotifyDataReceived", + [self, loadID, data = std::move(data), aCount]() { + self->mCacheStream.NotifyDataReceived(loadID, aCount, data.get()); + })); + + return NS_OK; +} + +nsresult ChannelMediaResource::OnDataAvailable(uint32_t aLoadID, + nsIInputStream* aStream, + uint32_t aCount) { + // This might happen off the main thread. + Closure closure{aLoadID, this}; + uint32_t count = aCount; + while (count > 0) { + uint32_t read; + nsresult rv = + aStream->ReadSegments(CopySegmentToCache, &closure, count, &read); + if (NS_FAILED(rv)) return rv; + NS_ASSERTION(read > 0, "Read 0 bytes while data was available?"); + count -= read; + } + + return NS_OK; +} + +int64_t ChannelMediaResource::CalculateStreamLength() const { + if (!mChannel) { + return -1; + } + + nsCOMPtr hc = do_QueryInterface(mChannel); + if (!hc) { + return -1; + } + + bool succeeded = false; + Unused << hc->GetRequestSucceeded(&succeeded); + if (!succeeded) { + return -1; + } + + // We can't determine the length of uncompressed payload. + const bool isCompressed = IsPayloadCompressed(hc); + if (isCompressed) { + return -1; + } + + int64_t contentLength = -1; + if (NS_FAILED(hc->GetContentLength(&contentLength))) { + return -1; + } + + uint32_t responseStatus = 0; + Unused << hc->GetResponseStatus(&responseStatus); + if (responseStatus != HTTP_PARTIAL_RESPONSE_CODE) { + return contentLength; + } + + // We have an HTTP Byte Range response. The Content-Length is the length + // of the response, not the resource. We need to parse the Content-Range + // header and extract the range total in order to get the stream length. + int64_t rangeStart = 0; + int64_t rangeEnd = 0; + int64_t rangeTotal = 0; + bool gotRangeHeader = NS_SUCCEEDED( + ParseContentRangeHeader(hc, rangeStart, rangeEnd, rangeTotal)); + if (gotRangeHeader && rangeTotal != -1) { + contentLength = std::max(contentLength, rangeTotal); + } + return contentLength; +} + +nsresult ChannelMediaResource::Open(nsIStreamListener** aStreamListener) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + MOZ_ASSERT(aStreamListener); + MOZ_ASSERT(mChannel); + + int64_t streamLength = + mKnownStreamLength < 0 ? CalculateStreamLength() : mKnownStreamLength; + nsresult rv = mCacheStream.Init(streamLength); + if (NS_FAILED(rv)) { + return rv; + } + + mSharedInfo = new SharedInfo; + mSharedInfo->mResources.AppendElement(this); + + mIsLiveStream = streamLength < 0; + mListener = new Listener(this, 0, ++mLoadID); + *aStreamListener = mListener; + NS_ADDREF(*aStreamListener); + return NS_OK; +} + +dom::HTMLMediaElement* ChannelMediaResource::MediaElement() const { + MOZ_ASSERT(NS_IsMainThread()); + MediaDecoderOwner* owner = mCallback->GetMediaOwner(); + MOZ_DIAGNOSTIC_ASSERT(owner); + dom::HTMLMediaElement* element = owner->GetMediaElement(); + MOZ_DIAGNOSTIC_ASSERT(element); + return element; +} + +nsresult ChannelMediaResource::OpenChannel(int64_t aOffset) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!mClosed); + MOZ_ASSERT(mChannel); + MOZ_ASSERT(!mListener, "Listener should have been removed by now"); + + mListener = new Listener(this, aOffset, ++mLoadID); + nsresult rv = mChannel->SetNotificationCallbacks(mListener.get()); + NS_ENSURE_SUCCESS(rv, rv); + + rv = SetupChannelHeaders(aOffset); + NS_ENSURE_SUCCESS(rv, rv); + + rv = mChannel->AsyncOpen(mListener); + NS_ENSURE_SUCCESS(rv, rv); + + // Tell the media element that we are fetching data from a channel. + MediaElement()->DownloadResumed(); + + return NS_OK; +} + +nsresult ChannelMediaResource::SetupChannelHeaders(int64_t aOffset) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!mClosed); + + // Always use a byte range request even if we're reading from the start + // of the resource. + // This enables us to detect if the stream supports byte range + // requests, and therefore seeking, early. + nsCOMPtr hc = do_QueryInterface(mChannel); + if (hc) { + // Use |mOffset| if seeking in a complete file download. + nsAutoCString rangeString("bytes="); + rangeString.AppendInt(aOffset); + rangeString.Append('-'); + nsresult rv = hc->SetRequestHeader("Range"_ns, rangeString, false); + NS_ENSURE_SUCCESS(rv, rv); + + // Send Accept header for video and audio types only (Bug 489071) + MediaElement()->SetRequestHeaders(hc); + } else { + NS_ASSERTION(aOffset == 0, "Don't know how to seek on this channel type"); + return NS_ERROR_FAILURE; + } + return NS_OK; +} + +RefPtr ChannelMediaResource::Close() { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + if (!mClosed) { + CloseChannel(); + mClosed = true; + return mCacheStream.Close(); + } + return GenericPromise::CreateAndResolve(true, __func__); +} + +already_AddRefed ChannelMediaResource::GetCurrentPrincipal() { + MOZ_ASSERT(NS_IsMainThread()); + return do_AddRef(mSharedInfo->mPrincipal); +} + +bool ChannelMediaResource::HadCrossOriginRedirects() { + MOZ_ASSERT(NS_IsMainThread()); + return mSharedInfo->mHadCrossOriginRedirects; +} + +bool ChannelMediaResource::CanClone() { + return !mClosed && mCacheStream.IsAvailableForSharing(); +} + +already_AddRefed ChannelMediaResource::CloneData( + MediaResourceCallback* aCallback) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(CanClone(), "Stream can't be cloned"); + + RefPtr resource = + new ChannelMediaResource(aCallback, nullptr, mURI, mKnownStreamLength); + + resource->mIsLiveStream = mIsLiveStream; + resource->mIsTransportSeekable = mIsTransportSeekable; + resource->mSharedInfo = mSharedInfo; + mSharedInfo->mResources.AppendElement(resource.get()); + + // Initially the clone is treated as suspended by the cache, because + // we don't have a channel. If the cache needs to read data from the clone + // it will call CacheClientResume (or CacheClientSeek with aResume true) + // which will recreate the channel. This way, if all of the media data + // is already in the cache we don't create an unnecessary HTTP channel + // and perform a useless HTTP transaction. + resource->mCacheStream.InitAsClone(&mCacheStream); + return resource.forget(); +} + +void ChannelMediaResource::CloseChannel() { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + // Revoking listener should be done before canceling the channel, because + // canceling the channel might cause the input stream to release its buffer. + // If we don't do revoke first, it's possible that `OnDataAvailable` would be + // called later and then incorrectly access that released buffer. + if (mListener) { + mListener->Revoke(); + mListener = nullptr; + } + + if (mChannel) { + mSuspendAgent.Revoke(); + // The status we use here won't be passed to the decoder, since + // we've already revoked the listener. It can however be passed + // to nsDocumentViewer::LoadComplete if our channel is the one + // that kicked off creation of a video document. We don't want that + // document load to think there was an error. + // NS_ERROR_PARSED_DATA_CACHED is the best thing we have for that + // at the moment. + mChannel->Cancel(NS_ERROR_PARSED_DATA_CACHED); + mChannel = nullptr; + } +} + +nsresult ChannelMediaResource::ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) { + return mCacheStream.ReadFromCache(aBuffer, aOffset, aCount); +} + +nsresult ChannelMediaResource::ReadAt(int64_t aOffset, char* aBuffer, + uint32_t aCount, uint32_t* aBytes) { + NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); + return mCacheStream.ReadAt(aOffset, aBuffer, aCount, aBytes); +} + +void ChannelMediaResource::ThrottleReadahead(bool bThrottle) { + mCacheStream.ThrottleReadahead(bThrottle); +} + +nsresult ChannelMediaResource::GetCachedRanges(MediaByteRangeSet& aRanges) { + return mCacheStream.GetCachedRanges(aRanges); +} + +void ChannelMediaResource::Suspend(bool aCloseImmediately) { + NS_ASSERTION(NS_IsMainThread(), "Don't call on non-main thread"); + + if (mClosed) { + // Nothing to do when we are closed. + return; + } + + dom::HTMLMediaElement* element = MediaElement(); + + if (mChannel && aCloseImmediately && mIsTransportSeekable) { + CloseChannel(); + } + + if (mSuspendAgent.Suspend()) { + element->DownloadSuspended(); + } +} + +void ChannelMediaResource::Resume() { + NS_ASSERTION(NS_IsMainThread(), "Don't call on non-main thread"); + + if (mClosed) { + // Nothing to do when we are closed. + return; + } + + dom::HTMLMediaElement* element = MediaElement(); + + if (mSuspendAgent.Resume()) { + if (mChannel) { + // Just wake up our existing channel + element->DownloadResumed(); + } else { + mCacheStream.NotifyResume(); + } + } +} + +nsresult ChannelMediaResource::RecreateChannel() { + MOZ_DIAGNOSTIC_ASSERT(!mClosed); + + nsLoadFlags loadFlags = nsICachingChannel::LOAD_BYPASS_LOCAL_CACHE_IF_BUSY | + (mLoadInBackground ? nsIRequest::LOAD_BACKGROUND : 0); + + dom::HTMLMediaElement* element = MediaElement(); + + nsCOMPtr loadGroup = element->GetDocumentLoadGroup(); + NS_ENSURE_TRUE(loadGroup, NS_ERROR_NULL_POINTER); + + nsSecurityFlags securityFlags = + element->ShouldCheckAllowOrigin() + ? nsILoadInfo::SEC_REQUIRE_CORS_INHERITS_SEC_CONTEXT + : nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_INHERITS_SEC_CONTEXT; + + if (element->GetCORSMode() == CORS_USE_CREDENTIALS) { + securityFlags |= nsILoadInfo::SEC_COOKIES_INCLUDE; + } + + MOZ_ASSERT(element->IsAnyOfHTMLElements(nsGkAtoms::audio, nsGkAtoms::video)); + nsContentPolicyType contentPolicyType = + element->IsHTMLElement(nsGkAtoms::audio) + ? nsIContentPolicy::TYPE_INTERNAL_AUDIO + : nsIContentPolicy::TYPE_INTERNAL_VIDEO; + + // If element has 'triggeringprincipal' attribute, we will use the value as + // triggeringPrincipal for the channel, otherwise it will default to use + // aElement->NodePrincipal(). + // This function returns true when element has 'triggeringprincipal', so if + // setAttrs is true we will override the origin attributes on the channel + // later. + nsCOMPtr triggeringPrincipal; + bool setAttrs = nsContentUtils::QueryTriggeringPrincipal( + element, getter_AddRefs(triggeringPrincipal)); + + nsresult rv = NS_NewChannelWithTriggeringPrincipal( + getter_AddRefs(mChannel), mURI, element, triggeringPrincipal, + securityFlags, contentPolicyType, + nullptr, // aPerformanceStorage + loadGroup, + nullptr, // aCallbacks + loadFlags); + NS_ENSURE_SUCCESS(rv, rv); + + nsCOMPtr loadInfo = mChannel->LoadInfo(); + if (setAttrs) { + // The function simply returns NS_OK, so we ignore the return value. + Unused << loadInfo->SetOriginAttributes( + triggeringPrincipal->OriginAttributesRef()); + } + + Unused << loadInfo->SetIsMediaRequest(true); + + nsCOMPtr cos(do_QueryInterface(mChannel)); + if (cos) { + // Unconditionally disable throttling since we want the media to fluently + // play even when we switch the tab to background. + cos->AddClassFlags(nsIClassOfService::DontThrottle); + } + + return rv; +} + +void ChannelMediaResource::CacheClientNotifyDataReceived() { + mCallback->AbstractMainThread()->Dispatch(NewRunnableMethod( + "MediaResourceCallback::NotifyDataArrived", mCallback.get(), + &MediaResourceCallback::NotifyDataArrived)); +} + +void ChannelMediaResource::CacheClientNotifyDataEnded(nsresult aStatus) { + mCallback->AbstractMainThread()->Dispatch(NS_NewRunnableFunction( + "ChannelMediaResource::CacheClientNotifyDataEnded", + [self = RefPtr(this), aStatus]() { + if (NS_SUCCEEDED(aStatus)) { + self->mIsLiveStream = false; + } + self->mCallback->NotifyDataEnded(aStatus); + })); +} + +void ChannelMediaResource::CacheClientNotifyPrincipalChanged() { + NS_ASSERTION(NS_IsMainThread(), "Don't call on non-main thread"); + + mCallback->NotifyPrincipalChanged(); +} + +void ChannelMediaResource::UpdatePrincipal() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mChannel); + nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager(); + if (!secMan) { + return; + } + bool hadData = mSharedInfo->mPrincipal != nullptr; + // Channels created from a media element (in RecreateChannel() or + // HTMLMediaElement::ChannelLoader) do not have SANDBOXED_ORIGIN set in the + // LoadInfo. Document loads for a sandboxed iframe, however, may have + // SANDBOXED_ORIGIN set. Ignore sandboxing so that on such loads the result + // principal is not replaced with a null principal but describes the source + // of the data and is the same as would be obtained from a load from the + // media host element. + nsCOMPtr principal; + secMan->GetChannelResultPrincipalIfNotSandboxed(mChannel, + getter_AddRefs(principal)); + if (nsContentUtils::CombineResourcePrincipals(&mSharedInfo->mPrincipal, + principal)) { + for (auto* r : mSharedInfo->mResources) { + r->CacheClientNotifyPrincipalChanged(); + } + if (!mChannel) { // Sometimes cleared during NotifyPrincipalChanged() + return; + } + } + nsCOMPtr loadInfo = mChannel->LoadInfo(); + auto mode = loadInfo->GetSecurityMode(); + if (mode != nsILoadInfo::SEC_REQUIRE_CORS_INHERITS_SEC_CONTEXT) { + MOZ_ASSERT( + mode == nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_INHERITS_SEC_CONTEXT || + mode == nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL, + "no-cors request"); + MOZ_ASSERT(!hadData || !mChannel->IsDocument(), + "Only the initial load may be a document load"); + bool finalResponseIsOpaque = + // NS_GetFinalChannelURI() and GetChannelResultPrincipal() return the + // original request URI for null-origin Responses from ServiceWorker, + // in which case the URI does not necessarily indicate the real source + // of data. Such null-origin Responses have Basic LoadTainting, and + // so can be distinguished from true cross-origin responses when the + // channel is not a document load. + // + // When the channel is a document load, LoadTainting indicates opacity + // wrt the parent document and so does not indicate whether the + // response is cross-origin wrt to the media element. However, + // ServiceWorkers for document loads are always same-origin with the + // channel URI and so there is no need to distinguish null-origin + // ServiceWorker responses to document loads. + // + // CORS filtered Responses from ServiceWorker also cannot be mixed + // with no-cors cross-origin responses. + (mChannel->IsDocument() || + loadInfo->GetTainting() == LoadTainting::Opaque) && + // Although intermediate cross-origin redirects back to URIs with + // loadingPrincipal will have LoadTainting::Opaque and will taint the + // media element, they are not considered opaque when verifying + // network responses; they can be mixed with non-opaque responses from + // subsequent loads on the same-origin finalURI. + !nsContentUtils::CheckMayLoad(MediaElement()->NodePrincipal(), mChannel, + /*allowIfInheritsPrincipal*/ true); + if (!hadData) { // First response with data + mSharedInfo->mFinalResponsesAreOpaque = finalResponseIsOpaque; + } else if (mSharedInfo->mFinalResponsesAreOpaque != finalResponseIsOpaque) { + for (auto* r : mSharedInfo->mResources) { + r->mCallback->NotifyNetworkError(MediaResult( + NS_ERROR_CONTENT_BLOCKED, "opaque and non-opaque responses")); + } + // Our caller, OnStartRequest() will CloseChannel() on discovering the + // error, so no data will be read from the channel. + return; + } + } + // ChannelMediaResource can recreate the channel. When this happens, we don't + // want to overwrite mHadCrossOriginRedirects because the new channel could + // skip intermediate redirects. + if (!mSharedInfo->mHadCrossOriginRedirects) { + nsCOMPtr timedChannel = do_QueryInterface(mChannel); + if (timedChannel) { + bool allRedirectsSameOrigin = false; + mSharedInfo->mHadCrossOriginRedirects = + NS_SUCCEEDED(timedChannel->GetAllRedirectsSameOrigin( + &allRedirectsSameOrigin)) && + !allRedirectsSameOrigin; + } + } +} + +void ChannelMediaResource::CacheClientNotifySuspendedStatusChanged( + bool aSuspended) { + mCallback->AbstractMainThread()->Dispatch(NewRunnableMethod( + "MediaResourceCallback::NotifySuspendedStatusChanged", mCallback.get(), + &MediaResourceCallback::NotifySuspendedStatusChanged, aSuspended)); +} + +nsresult ChannelMediaResource::Seek(int64_t aOffset, bool aResume) { + MOZ_ASSERT(NS_IsMainThread()); + + if (mClosed) { + // Nothing to do when we are closed. + return NS_OK; + } + + LOG("Seek requested for aOffset [%" PRId64 "]", aOffset); + + CloseChannel(); + + if (aResume) { + mSuspendAgent.Resume(); + } + + // Don't create a new channel if we are still suspended. The channel will + // be recreated when we are resumed. + if (mSuspendAgent.IsSuspended()) { + return NS_OK; + } + + nsresult rv = RecreateChannel(); + NS_ENSURE_SUCCESS(rv, rv); + + return OpenChannel(aOffset); +} + +void ChannelMediaResource::CacheClientSeek(int64_t aOffset, bool aResume) { + RefPtr self = this; + nsCOMPtr r = NS_NewRunnableFunction( + "ChannelMediaResource::Seek", [self, aOffset, aResume]() { + nsresult rv = self->Seek(aOffset, aResume); + if (NS_FAILED(rv)) { + // Close the streams that failed due to error. This will cause all + // client Read and Seek operations on those streams to fail. Blocked + // Reads will also be woken up. + self->Close(); + } + }); + mCallback->AbstractMainThread()->Dispatch(r.forget()); +} + +void ChannelMediaResource::CacheClientSuspend() { + mCallback->AbstractMainThread()->Dispatch( + NewRunnableMethod("ChannelMediaResource::Suspend", this, + &ChannelMediaResource::Suspend, false)); +} + +void ChannelMediaResource::CacheClientResume() { + mCallback->AbstractMainThread()->Dispatch(NewRunnableMethod( + "ChannelMediaResource::Resume", this, &ChannelMediaResource::Resume)); +} + +int64_t ChannelMediaResource::GetNextCachedData(int64_t aOffset) { + return mCacheStream.GetNextCachedData(aOffset); +} + +int64_t ChannelMediaResource::GetCachedDataEnd(int64_t aOffset) { + return mCacheStream.GetCachedDataEnd(aOffset); +} + +bool ChannelMediaResource::IsDataCachedToEndOfResource(int64_t aOffset) { + return mCacheStream.IsDataCachedToEndOfStream(aOffset); +} + +bool ChannelMediaResource::IsSuspended() { return mSuspendAgent.IsSuspended(); } + +void ChannelMediaResource::SetReadMode(MediaCacheStream::ReadMode aMode) { + mCacheStream.SetReadMode(aMode); +} + +void ChannelMediaResource::SetPlaybackRate(uint32_t aBytesPerSecond) { + mCacheStream.SetPlaybackRate(aBytesPerSecond); +} + +void ChannelMediaResource::Pin() { mCacheStream.Pin(); } + +void ChannelMediaResource::Unpin() { mCacheStream.Unpin(); } + +double ChannelMediaResource::GetDownloadRate(bool* aIsReliable) { + return mCacheStream.GetDownloadRate(aIsReliable); +} + +int64_t ChannelMediaResource::GetLength() { return mCacheStream.GetLength(); } + +void ChannelMediaResource::GetDebugInfo(dom::MediaResourceDebugInfo& aInfo) { + mCacheStream.GetDebugInfo(aInfo.mCacheStream); +} + +// ChannelSuspendAgent + +bool ChannelSuspendAgent::Suspend() { + MOZ_ASSERT(NS_IsMainThread()); + SuspendInternal(); + if (++mSuspendCount == 1) { + mCacheStream.NotifyClientSuspended(true); + return true; + } + return false; +} + +void ChannelSuspendAgent::SuspendInternal() { + MOZ_ASSERT(NS_IsMainThread()); + if (mChannel) { + bool isPending = false; + nsresult rv = mChannel->IsPending(&isPending); + if (NS_SUCCEEDED(rv) && isPending && !mIsChannelSuspended) { + mChannel->Suspend(); + mIsChannelSuspended = true; + } + } +} + +bool ChannelSuspendAgent::Resume() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(IsSuspended(), "Resume without suspend!"); + + if (--mSuspendCount == 0) { + if (mChannel && mIsChannelSuspended) { + mChannel->Resume(); + mIsChannelSuspended = false; + } + mCacheStream.NotifyClientSuspended(false); + return true; + } + return false; +} + +void ChannelSuspendAgent::Delegate(nsIChannel* aChannel) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aChannel); + MOZ_ASSERT(!mChannel, "The previous channel not closed."); + MOZ_ASSERT(!mIsChannelSuspended); + + mChannel = aChannel; + // Ensure the suspend status of the channel matches our suspend count. + if (IsSuspended()) { + SuspendInternal(); + } +} + +void ChannelSuspendAgent::Revoke() { + MOZ_ASSERT(NS_IsMainThread()); + + if (!mChannel) { + // Channel already revoked. Nothing to do. + return; + } + + // Before closing the channel, it needs to be resumed to make sure its + // internal state is correct. Besides, We need to suspend the channel after + // recreating. + if (mIsChannelSuspended) { + mChannel->Resume(); + mIsChannelSuspended = false; + } + mChannel = nullptr; +} + +bool ChannelSuspendAgent::IsSuspended() { + MOZ_ASSERT(NS_IsMainThread()); + return (mSuspendCount > 0); +} + +} // namespace mozilla + +#undef LOG diff --git a/dom/media/ChannelMediaResource.h b/dom/media/ChannelMediaResource.h new file mode 100644 index 0000000000..73494b4653 --- /dev/null +++ b/dom/media/ChannelMediaResource.h @@ -0,0 +1,275 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_media_ChannelMediaResource_h +#define mozilla_dom_media_ChannelMediaResource_h + +#include "BaseMediaResource.h" +#include "MediaCache.h" +#include "mozilla/Mutex.h" +#include "nsIChannelEventSink.h" +#include "nsIInterfaceRequestor.h" +#include "nsIThreadRetargetableStreamListener.h" + +class nsIHttpChannel; + +namespace mozilla { + +/** + * This class is responsible for managing the suspend count and report suspend + * status of channel. + **/ +class ChannelSuspendAgent { + public: + explicit ChannelSuspendAgent(MediaCacheStream& aCacheStream) + : mCacheStream(aCacheStream) {} + + // True when the channel has been suspended or needs to be suspended. + bool IsSuspended(); + + // Return true when the channel is logically suspended, i.e. the suspend + // count goes from 0 to 1. + bool Suspend(); + + // Return true only when the suspend count is equal to zero. + bool Resume(); + + // Tell the agent to manage the suspend status of the channel. + void Delegate(nsIChannel* aChannel); + // Stop the management of the suspend status of the channel. + void Revoke(); + + private: + // Only suspends channel but not changes the suspend count. + void SuspendInternal(); + + nsIChannel* mChannel = nullptr; + MediaCacheStream& mCacheStream; + uint32_t mSuspendCount = 0; + bool mIsChannelSuspended = false; +}; + +DDLoggedTypeDeclNameAndBase(ChannelMediaResource, BaseMediaResource); + +/** + * This is the MediaResource implementation that wraps Necko channels. + * Much of its functionality is actually delegated to MediaCache via + * an underlying MediaCacheStream. + * + * All synchronization is performed by MediaCacheStream; all off-main- + * thread operations are delegated directly to that object. + */ +class ChannelMediaResource + : public BaseMediaResource, + public DecoderDoctorLifeLogger { + // Store information shared among resources. Main thread only. + struct SharedInfo { + NS_INLINE_DECL_REFCOUNTING(SharedInfo); + + nsTArray mResources; + // Null if there is not yet any data from any origin. + nsCOMPtr mPrincipal; + // Meaningful only when mPrincipal is non-null, + // unaffected by intermediate cross-origin redirects. + bool mFinalResponsesAreOpaque = false; + + bool mHadCrossOriginRedirects = false; + + private: + ~SharedInfo() = default; + }; + RefPtr mSharedInfo; + + public: + ChannelMediaResource(MediaResourceCallback* aDecoder, nsIChannel* aChannel, + nsIURI* aURI, int64_t aStreamLength, + bool aIsPrivateBrowsing = false); + ~ChannelMediaResource(); + + // These are called on the main thread by MediaCache. These must + // not block or grab locks, because the media cache is holding its lock. + // Notify that data is available from the cache. This can happen even + // if this stream didn't read any data, since another stream might have + // received data for the same resource. + void CacheClientNotifyDataReceived(); + // Notify that we reached the end of the stream. This can happen even + // if this stream didn't read any data, since another stream might have + // received data for the same resource. + void CacheClientNotifyDataEnded(nsresult aStatus); + // Notify that the principal for the cached resource changed. + void CacheClientNotifyPrincipalChanged(); + // Notify the decoder that the cache suspended status changed. + void CacheClientNotifySuspendedStatusChanged(bool aSuspended); + + // These are called on the main thread by MediaCache. These shouldn't block, + // but they may grab locks --- the media cache is not holding its lock + // when these are called. + // Start a new load at the given aOffset. The old load is cancelled + // and no more data from the old load will be notified via + // MediaCacheStream::NotifyDataReceived/Ended. + void CacheClientSeek(int64_t aOffset, bool aResume); + // Suspend the current load since data is currently not wanted + void CacheClientSuspend(); + // Resume the current load since data is wanted again + void CacheClientResume(); + + bool IsSuspended(); + + void ThrottleReadahead(bool bThrottle) override; + + // Main thread + nsresult Open(nsIStreamListener** aStreamListener) override; + RefPtr Close() override; + void Suspend(bool aCloseImmediately) override; + void Resume() override; + already_AddRefed GetCurrentPrincipal() override; + bool HadCrossOriginRedirects() override; + bool CanClone() override; + already_AddRefed CloneData( + MediaResourceCallback* aDecoder) override; + nsresult ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) override; + + // Other thread + void SetReadMode(MediaCacheStream::ReadMode aMode) override; + void SetPlaybackRate(uint32_t aBytesPerSecond) override; + nsresult ReadAt(int64_t offset, char* aBuffer, uint32_t aCount, + uint32_t* aBytes) override; + // Data stored in IO&lock-encumbered MediaCacheStream, caching recommended. + bool ShouldCacheReads() override { return true; } + + // Any thread + void Pin() override; + void Unpin() override; + double GetDownloadRate(bool* aIsReliable) override; + int64_t GetLength() override; + int64_t GetNextCachedData(int64_t aOffset) override; + int64_t GetCachedDataEnd(int64_t aOffset) override; + bool IsDataCachedToEndOfResource(int64_t aOffset) override; + bool IsTransportSeekable() override; + bool IsLiveStream() const override { return mIsLiveStream; } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override { + // Might be useful to track in the future: + // - mListener (seems minor) + size_t size = BaseMediaResource::SizeOfExcludingThis(aMallocSizeOf); + size += mCacheStream.SizeOfExcludingThis(aMallocSizeOf); + + return size; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + void GetDebugInfo(dom::MediaResourceDebugInfo& aInfo) override; + + class Listener final : public nsIInterfaceRequestor, + public nsIChannelEventSink, + public nsIThreadRetargetableStreamListener, + public SingleWriterLockOwner { + ~Listener() = default; + + public: + Listener(ChannelMediaResource* aResource, int64_t aOffset, uint32_t aLoadID) + : mMutex("Listener.mMutex", this), + mResource(aResource), + mOffset(aOffset), + mLoadID(aLoadID) {} + + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSIREQUESTOBSERVER + NS_DECL_NSISTREAMLISTENER + NS_DECL_NSICHANNELEVENTSINK + NS_DECL_NSIINTERFACEREQUESTOR + NS_DECL_NSITHREADRETARGETABLESTREAMLISTENER + + void Revoke(); + + bool OnWritingThread() const override { return NS_IsMainThread(); } + + private: + MutexSingleWriter mMutex; + // mResource should only be modified on the main thread with the lock. + // So it can be read without lock on the main thread or on other threads + // with the lock. + RefPtr mResource MOZ_GUARDED_BY(mMutex); + + const int64_t mOffset; + const uint32_t mLoadID; + }; + friend class Listener; + + nsresult GetCachedRanges(MediaByteRangeSet& aRanges) override; + + protected: + nsresult Seek(int64_t aOffset, bool aResume); + + // These are called on the main thread by Listener. + nsresult OnStartRequest(nsIRequest* aRequest, int64_t aRequestOffset); + nsresult OnStopRequest(nsIRequest* aRequest, nsresult aStatus); + nsresult OnDataAvailable(uint32_t aLoadID, nsIInputStream* aStream, + uint32_t aCount); + nsresult OnChannelRedirect(nsIChannel* aOld, nsIChannel* aNew, + uint32_t aFlags, int64_t aOffset); + + // Use only before MediaDecoder shutdown. Main thread only. + dom::HTMLMediaElement* MediaElement() const; + // Opens the channel, using an HTTP byte range request to start at aOffset + // if possible. Main thread only. + nsresult OpenChannel(int64_t aOffset); + nsresult RecreateChannel(); + // Add headers to HTTP request. Main thread only. + nsresult SetupChannelHeaders(int64_t aOffset); + // Closes the channel. Main thread only. + void CloseChannel(); + // Update the principal for the resource. Main thread only. + void UpdatePrincipal(); + + // Parses 'Content-Range' header and returns results via parameters. + // Returns error if header is not available, values are not parse-able or + // values are out of range. + nsresult ParseContentRangeHeader(nsIHttpChannel* aHttpChan, + int64_t& aRangeStart, int64_t& aRangeEnd, + int64_t& aRangeTotal) const; + + // Calculates the length of the resource using HTTP headers, if this + // is an HTTP channel. Returns -1 on failure, or for non HTTP channels. + int64_t CalculateStreamLength() const; + + struct Closure { + uint32_t mLoadID; + ChannelMediaResource* mResource; + }; + + static nsresult CopySegmentToCache(nsIInputStream* aInStream, void* aClosure, + const char* aFromSegment, + uint32_t aToOffset, uint32_t aCount, + uint32_t* aWriteCount); + + // Main thread access only + // True if Close() has been called. + bool mClosed = false; + // The last reported seekability state for the underlying channel + bool mIsTransportSeekable = false; + // Length of the content first reported. + int64_t mFirstReadLength = -1; + RefPtr mListener; + // A mono-increasing integer to uniquely identify the channel we are loading. + uint32_t mLoadID = 0; + bool mIsLiveStream = false; + + // Any thread access + MediaCacheStream mCacheStream; + + ChannelSuspendAgent mSuspendAgent; + + // The size of the stream if known at construction time (such as with blob) + const int64_t mKnownStreamLength; +}; + +} // namespace mozilla + +#endif // mozilla_dom_media_ChannelMediaResource_h diff --git a/dom/media/CloneableWithRangeMediaResource.cpp b/dom/media/CloneableWithRangeMediaResource.cpp new file mode 100644 index 0000000000..4faac6125f --- /dev/null +++ b/dom/media/CloneableWithRangeMediaResource.cpp @@ -0,0 +1,228 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CloneableWithRangeMediaResource.h" + +#include "mozilla/AbstractThread.h" +#include "mozilla/Monitor.h" +#include "nsContentUtils.h" +#include "nsIAsyncInputStream.h" +#include "nsITimedChannel.h" +#include "nsNetCID.h" +#include "nsServiceManagerUtils.h" + +namespace mozilla { + +namespace { + +class InputStreamReader final : public nsIInputStreamCallback { + public: + NS_DECL_THREADSAFE_ISUPPORTS + + static already_AddRefed Create( + nsICloneableInputStreamWithRange* aStream, int64_t aStart, + uint32_t aLength) { + MOZ_ASSERT(aStream); + + nsCOMPtr stream; + nsresult rv = + aStream->CloneWithRange(aStart, aLength, getter_AddRefs(stream)); + if (NS_WARN_IF(NS_FAILED(rv))) { + return nullptr; + } + + RefPtr reader = new InputStreamReader(stream); + return reader.forget(); + } + + nsresult Read(char* aBuffer, uint32_t aSize, uint32_t* aRead) { + uint32_t done = 0; + do { + uint32_t read; + nsresult rv = SyncRead(aBuffer + done, aSize - done, &read); + if (NS_SUCCEEDED(rv) && read == 0) { + break; + } + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + done += read; + } while (done != aSize); + + *aRead = done; + return NS_OK; + } + + NS_IMETHOD + OnInputStreamReady(nsIAsyncInputStream* aStream) override { + // Let's continue with SyncRead(). + MonitorAutoLock lock(mMonitor); + lock.Notify(); + return NS_OK; + } + + private: + explicit InputStreamReader(nsIInputStream* aStream) + : mStream(aStream), mMonitor("InputStreamReader::mMonitor") { + MOZ_ASSERT(aStream); + } + + ~InputStreamReader() = default; + + nsresult SyncRead(char* aBuffer, uint32_t aSize, uint32_t* aRead) { + while (1) { + nsresult rv = mStream->Read(aBuffer, aSize, aRead); + // All good. + if (rv == NS_BASE_STREAM_CLOSED || NS_SUCCEEDED(rv)) { + return NS_OK; + } + + // An error. + if (NS_FAILED(rv) && rv != NS_BASE_STREAM_WOULD_BLOCK) { + return rv; + } + + // We need to proceed async. + if (!mAsyncStream) { + mAsyncStream = do_QueryInterface(mStream); + } + + if (!mAsyncStream) { + return NS_ERROR_FAILURE; + } + + nsCOMPtr target = + do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID); + MOZ_ASSERT(target); + + { + // We wait for ::OnInputStreamReady() to be called. + MonitorAutoLock lock(mMonitor); + + rv = mAsyncStream->AsyncWait(this, 0, aSize, target); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + + lock.Wait(); + } + } + } + + nsCOMPtr mStream; + nsCOMPtr mAsyncStream; + Monitor mMonitor MOZ_UNANNOTATED; +}; + +NS_IMPL_ADDREF(InputStreamReader); +NS_IMPL_RELEASE(InputStreamReader); + +NS_INTERFACE_MAP_BEGIN(InputStreamReader) + NS_INTERFACE_MAP_ENTRY(nsIInputStreamCallback) + NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIInputStreamCallback) +NS_INTERFACE_MAP_END + +} // namespace + +void CloneableWithRangeMediaResource::MaybeInitialize() { + if (!mInitialized) { + mInitialized = true; + mCallback->AbstractMainThread()->Dispatch(NewRunnableMethod( + "MediaResourceCallback::NotifyDataEnded", mCallback.get(), + &MediaResourceCallback::NotifyDataEnded, NS_OK)); + } +} + +nsresult CloneableWithRangeMediaResource::GetCachedRanges( + MediaByteRangeSet& aRanges) { + MaybeInitialize(); + aRanges += MediaByteRange(0, (int64_t)mSize); + return NS_OK; +} + +nsresult CloneableWithRangeMediaResource::Open( + nsIStreamListener** aStreamListener) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aStreamListener); + + *aStreamListener = nullptr; + return NS_OK; +} + +RefPtr CloneableWithRangeMediaResource::Close() { + return GenericPromise::CreateAndResolve(true, __func__); +} + +already_AddRefed +CloneableWithRangeMediaResource::GetCurrentPrincipal() { + MOZ_ASSERT(NS_IsMainThread()); + + nsCOMPtr principal; + nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager(); + if (!secMan || !mChannel) { + return nullptr; + } + + secMan->GetChannelResultPrincipal(mChannel, getter_AddRefs(principal)); + return principal.forget(); +} + +bool CloneableWithRangeMediaResource::HadCrossOriginRedirects() { + MOZ_ASSERT(NS_IsMainThread()); + + nsCOMPtr timedChannel = do_QueryInterface(mChannel); + if (!timedChannel) { + return false; + } + + bool allRedirectsSameOrigin = false; + return NS_SUCCEEDED(timedChannel->GetAllRedirectsSameOrigin( + &allRedirectsSameOrigin)) && + !allRedirectsSameOrigin; +} + +nsresult CloneableWithRangeMediaResource::ReadFromCache(char* aBuffer, + int64_t aOffset, + uint32_t aCount) { + MaybeInitialize(); + if (!aCount) { + return NS_OK; + } + + RefPtr reader = + InputStreamReader::Create(mStream, aOffset, aCount); + if (!reader) { + return NS_ERROR_FAILURE; + } + + uint32_t bytes = 0; + nsresult rv = reader->Read(aBuffer, aCount, &bytes); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + + return bytes == aCount ? NS_OK : NS_ERROR_FAILURE; +} + +nsresult CloneableWithRangeMediaResource::ReadAt(int64_t aOffset, char* aBuffer, + uint32_t aCount, + uint32_t* aBytes) { + MOZ_ASSERT(!NS_IsMainThread()); + + RefPtr reader = + InputStreamReader::Create(mStream, aOffset, aCount); + if (!reader) { + return NS_ERROR_FAILURE; + } + + nsresult rv = reader->Read(aBuffer, aCount, aBytes); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + + return NS_OK; +} + +} // namespace mozilla diff --git a/dom/media/CloneableWithRangeMediaResource.h b/dom/media/CloneableWithRangeMediaResource.h new file mode 100644 index 0000000000..1c0bab3c1b --- /dev/null +++ b/dom/media/CloneableWithRangeMediaResource.h @@ -0,0 +1,101 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_media_CloneableWithRangeMediaResource_h +#define mozilla_dom_media_CloneableWithRangeMediaResource_h + +#include "BaseMediaResource.h" +#include "nsICloneableInputStream.h" + +namespace mozilla { + +class CloneableWithRangeMediaResource : public BaseMediaResource { + public: + CloneableWithRangeMediaResource(MediaResourceCallback* aCallback, + nsIChannel* aChannel, nsIURI* aURI, + nsIInputStream* aStream, uint64_t aSize) + : BaseMediaResource(aCallback, aChannel, aURI), + mStream(do_QueryInterface(aStream)), + mSize(aSize), + mInitialized(false) { + MOZ_ASSERT(mStream); + } + + ~CloneableWithRangeMediaResource() = default; + + // Main thread + nsresult Open(nsIStreamListener** aStreamListener) override; + RefPtr Close() override; + void Suspend(bool aCloseImmediately) override {} + void Resume() override {} + already_AddRefed GetCurrentPrincipal() override; + bool HadCrossOriginRedirects() override; + nsresult ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) override; + + // These methods are called off the main thread. + + // Other thread + void SetReadMode(MediaCacheStream::ReadMode aMode) override {} + void SetPlaybackRate(uint32_t aBytesPerSecond) override {} + nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount, + uint32_t* aBytes) override; + // (Probably) file-based, caching recommended. + bool ShouldCacheReads() override { return true; } + + // Any thread + void Pin() override {} + void Unpin() override {} + + double GetDownloadRate(bool* aIsReliable) override { + // The data's all already here + *aIsReliable = true; + return 100 * 1024 * 1024; // arbitray, use 100MB/s + } + + int64_t GetLength() override { + MaybeInitialize(); + return mSize; + } + + int64_t GetNextCachedData(int64_t aOffset) override { + MaybeInitialize(); + return (aOffset < (int64_t)mSize) ? aOffset : -1; + } + + int64_t GetCachedDataEnd(int64_t aOffset) override { + MaybeInitialize(); + return std::max(aOffset, (int64_t)mSize); + } + + bool IsDataCachedToEndOfResource(int64_t aOffset) override { return true; } + bool IsTransportSeekable() override { return true; } + + nsresult GetCachedRanges(MediaByteRangeSet& aRanges) override; + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override { + return BaseMediaResource::SizeOfExcludingThis(aMallocSizeOf); + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + private: + void MaybeInitialize(); + + // Input stream for the media data. This can be used from any + // thread. + nsCOMPtr mStream; + + // The stream size. + uint64_t mSize; + + bool mInitialized; +}; + +} // namespace mozilla + +#endif // mozilla_dom_media_CloneableWithRangeMediaResource_h diff --git a/dom/media/CrossGraphPort.cpp b/dom/media/CrossGraphPort.cpp new file mode 100644 index 0000000000..25d3a2b505 --- /dev/null +++ b/dom/media/CrossGraphPort.cpp @@ -0,0 +1,163 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CrossGraphPort.h" + +#include "AudioDeviceInfo.h" +#include "AudioStreamTrack.h" +#include "MediaTrackGraph.h" +#include "mozilla/Logging.h" +#include "mozilla/Preferences.h" + +namespace mozilla { + +#ifdef LOG +# undef LOG +#endif +#ifdef LOG_TEST +# undef LOG_TEST +#endif + +extern LazyLogModule gMediaTrackGraphLog; +#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg) +#define LOG_TEST(type) MOZ_LOG_TEST(gMediaTrackGraphLog, type) + +UniquePtr CrossGraphPort::Connect( + const RefPtr& aStreamTrack, + MediaTrackGraph* aPartnerGraph) { + MOZ_ASSERT(aStreamTrack); + MOZ_ASSERT(aPartnerGraph); + if (aStreamTrack->Graph() == aPartnerGraph) { + // Primary graph the same as partner graph, just remove the existing cross + // graph connection + return nullptr; + } + + RefPtr receiver = aPartnerGraph->CreateCrossGraphReceiver( + aStreamTrack->Graph()->GraphRate()); + + RefPtr transmitter = + aStreamTrack->Graph()->CreateCrossGraphTransmitter(receiver); + + RefPtr port = + aStreamTrack->ForwardTrackContentsTo(transmitter); + + return WrapUnique(new CrossGraphPort(std::move(port), std::move(transmitter), + std::move(receiver))); +} + +CrossGraphPort::~CrossGraphPort() { + mTransmitter->Destroy(); + mReceiver->Destroy(); + mTransmitterPort->Destroy(); +} + +/** CrossGraphTransmitter **/ + +CrossGraphTransmitter::CrossGraphTransmitter( + TrackRate aSampleRate, RefPtr aReceiver) + : ProcessedMediaTrack(aSampleRate, MediaSegment::AUDIO, + nullptr /* aSegment */), + mReceiver(std::move(aReceiver)) {} + +void CrossGraphTransmitter::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + MOZ_ASSERT(!mInputs.IsEmpty()); + MOZ_ASSERT(mDisabledMode == DisabledTrackMode::ENABLED); + + MediaTrack* input = mInputs[0]->GetSource(); + + if (input->Ended() && + (input->GetEnd() <= input->GraphTimeToTrackTimeWithBlocking(aFrom))) { + mEnded = true; + return; + } + + LOG(LogLevel::Verbose, + ("Transmitter (%p) from %" PRId64 ", to %" PRId64 ", ticks %" PRId64 "", + this, aFrom, aTo, aTo - aFrom)); + + AudioSegment audio; + GraphTime next; + for (GraphTime t = aFrom; t < aTo; t = next) { + MediaInputPort::InputInterval interval = + MediaInputPort::GetNextInputInterval(mInputs[0], t); + interval.mEnd = std::min(interval.mEnd, aTo); + + TrackTime ticks = interval.mEnd - interval.mStart; + next = interval.mEnd; + + if (interval.mStart >= interval.mEnd) { + break; + } + + if (interval.mInputIsBlocked) { + audio.AppendNullData(ticks); + } else if (input->IsSuspended()) { + audio.AppendNullData(ticks); + } else { + MOZ_ASSERT(GetEnd() == GraphTimeToTrackTimeWithBlocking(interval.mStart), + "Samples missing"); + TrackTime inputStart = + input->GraphTimeToTrackTimeWithBlocking(interval.mStart); + TrackTime inputEnd = + input->GraphTimeToTrackTimeWithBlocking(interval.mEnd); + audio.AppendSlice(*input->GetData(), inputStart, inputEnd); + } + } + + mStartTime = aTo; + + for (AudioSegment::ChunkIterator iter(audio); !iter.IsEnded(); iter.Next()) { + Unused << mReceiver->EnqueueAudio(*iter); + } +} + +/** CrossGraphReceiver **/ + +CrossGraphReceiver::CrossGraphReceiver(TrackRate aSampleRate, + TrackRate aTransmitterRate) + : ProcessedMediaTrack(aSampleRate, MediaSegment::AUDIO, + static_cast(new AudioSegment())), + mDriftCorrection(aTransmitterRate, aSampleRate, PRINCIPAL_HANDLE_NONE) {} + +uint32_t CrossGraphReceiver::NumberOfChannels() const { + return GetData()->MaxChannelCount(); +} + +void CrossGraphReceiver::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + LOG(LogLevel::Verbose, + ("Receiver (%p) mSegment: duration: %" PRId64 ", from %" PRId64 + ", to %" PRId64 ", ticks %" PRId64 "", + this, mSegment->GetDuration(), aFrom, aTo, aTo - aFrom)); + + AudioSegment transmittedAudio; + while (mCrossThreadFIFO.AvailableRead()) { + AudioChunk chunk; + Unused << mCrossThreadFIFO.Dequeue(&chunk, 1); + transmittedAudio.AppendAndConsumeChunk(std::move(chunk)); + mTransmitterHasStarted = true; + } + + if (mTransmitterHasStarted) { + // If it does not have enough frames the result will be silence. + AudioSegment audioCorrected = + mDriftCorrection.RequestFrames(transmittedAudio, aTo - aFrom); + if (LOG_TEST(LogLevel::Verbose) && audioCorrected.IsNull()) { + LOG(LogLevel::Verbose, + ("Receiver(%p): Silence has been added, not enough input", this)); + } + mSegment->AppendFrom(&audioCorrected); + } else { + mSegment->AppendNullData(aTo - aFrom); + } +} + +int CrossGraphReceiver::EnqueueAudio(AudioChunk& aChunk) { + // This will take place on transmitter graph thread only. + return mCrossThreadFIFO.Enqueue(aChunk); +} + +} // namespace mozilla diff --git a/dom/media/CrossGraphPort.h b/dom/media/CrossGraphPort.h new file mode 100644 index 0000000000..da9630ae42 --- /dev/null +++ b/dom/media/CrossGraphPort.h @@ -0,0 +1,100 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_CROSS_GRAPH_TRACK_H_ +#define MOZILLA_CROSS_GRAPH_TRACK_H_ + +#include "AudioDriftCorrection.h" +#include "AudioSegment.h" +#include "ForwardedInputTrack.h" +#include "mozilla/SPSCQueue.h" +#include "mozilla/UniquePtr.h" + +namespace mozilla { +class CrossGraphReceiver; +} + +namespace mozilla::dom { +class AudioStreamTrack; +} + +namespace mozilla { + +/** + * CrossGraphTransmitter and CrossGraphPort are currently unused, but intended + * for connecting MediaTracks of different MediaTrackGraphs with different + * sample rates or clock sources for bug 1674892. + * + * Create with MediaTrackGraph::CreateCrossGraphTransmitter() + */ +class CrossGraphTransmitter : public ProcessedMediaTrack { + public: + CrossGraphTransmitter(TrackRate aSampleRate, + RefPtr aReceiver); + CrossGraphTransmitter* AsCrossGraphTransmitter() override { return this; } + + uint32_t NumberOfChannels() const override { + MOZ_CRASH("CrossGraphTransmitter has no segment. It cannot be played out."); + } + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + + private: + const RefPtr mReceiver; +}; + +/** + * Create with MediaTrackGraph::CreateCrossGraphReceiver() + */ +class CrossGraphReceiver : public ProcessedMediaTrack { + public: + CrossGraphReceiver(TrackRate aSampleRate, TrackRate aTransmitterRate); + CrossGraphReceiver* AsCrossGraphReceiver() override { return this; } + + uint32_t NumberOfChannels() const override; + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + + int EnqueueAudio(AudioChunk& aChunk); + + private: + SPSCQueue mCrossThreadFIFO{30}; + // Indicates that tre CrossGraphTransmitter has started sending frames. It + // is false untill the point, transmitter has sent the first valid frame. + // Accessed in GraphThread only. + bool mTransmitterHasStarted = false; + // Correct the drift between transmitter and receiver. Reciever (this class) + // is considered as the master clock. + // Accessed in GraphThread only. + AudioDriftCorrection mDriftCorrection; +}; + +class CrossGraphPort final { + public: + static UniquePtr Connect( + const RefPtr& aStreamTrack, + MediaTrackGraph* aPartnerGraph); + ~CrossGraphPort(); + + const RefPtr mTransmitter; + const RefPtr mReceiver; + + private: + explicit CrossGraphPort(RefPtr aTransmitterPort, + RefPtr aTransmitter, + RefPtr aReceiver) + : mTransmitter(std::move(aTransmitter)), + mReceiver(std::move(aReceiver)), + mTransmitterPort(std::move(aTransmitterPort)) { + MOZ_ASSERT(mTransmitter); + MOZ_ASSERT(mReceiver); + MOZ_ASSERT(mTransmitterPort); + } + + // The port that connects the input track to the transmitter. + const RefPtr mTransmitterPort; +}; + +} // namespace mozilla + +#endif /* MOZILLA_CROSS_GRAPH_TRACK_H_ */ diff --git a/dom/media/CubebInputStream.cpp b/dom/media/CubebInputStream.cpp new file mode 100644 index 0000000000..38b66315f9 --- /dev/null +++ b/dom/media/CubebInputStream.cpp @@ -0,0 +1,178 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "CubebInputStream.h" + +#include "AudioSampleFormat.h" +#include "mozilla/Logging.h" + +namespace mozilla { + +extern mozilla::LazyLogModule gMediaTrackGraphLog; + +#ifdef LOG_INTERNAL +# undef LOG_INTERNAL +#endif // LOG_INTERNAL +#define LOG_INTERNAL(level, msg, ...) \ + MOZ_LOG(gMediaTrackGraphLog, LogLevel::level, (msg, ##__VA_ARGS__)) + +#ifdef LOG +# undef LOG +#endif // LOG +#define LOG(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__) + +#ifdef LOGE +# undef LOGE +#endif // LOGE +#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__) + +#define InvokeCubebWithLog(func, ...) \ + ({ \ + int _retval; \ + _retval = InvokeCubeb(func, ##__VA_ARGS__); \ + if (_retval == CUBEB_OK) { \ + LOG("CubebInputStream %p: %s for stream %p was successful", this, #func, \ + mStream.get()); \ + } else { \ + LOGE("CubebInputStream %p: %s for stream %p was failed. Error %d", this, \ + #func, mStream.get(), _retval); \ + } \ + _retval; \ + }) + +static cubeb_stream_params CreateStreamInitParams(uint32_t aChannels, + uint32_t aRate, + bool aIsVoice) { + cubeb_stream_params params; + params.format = CubebUtils::ToCubebFormat::value; + params.rate = aRate; + params.channels = aChannels; + params.layout = CUBEB_LAYOUT_UNDEFINED; + params.prefs = CubebUtils::GetDefaultStreamPrefs(CUBEB_DEVICE_TYPE_INPUT); + + if (aIsVoice) { + params.prefs |= static_cast(CUBEB_STREAM_PREF_VOICE); + } + + return params; +} + +void CubebInputStream::CubebDestroyPolicy::operator()( + cubeb_stream* aStream) const { + int r = cubeb_stream_register_device_changed_callback(aStream, nullptr); + if (r == CUBEB_OK) { + LOG("Unregister device changed callback for %p successfully", aStream); + } else { + LOGE("Fail to unregister device changed callback for %p. Error %d", aStream, + r); + } + cubeb_stream_destroy(aStream); +} + +/* static */ +UniquePtr CubebInputStream::Create(cubeb_devid aDeviceId, + uint32_t aChannels, + uint32_t aRate, + bool aIsVoice, + Listener* aListener) { + if (!aListener) { + LOGE("No available listener"); + return nullptr; + } + + RefPtr handle = CubebUtils::GetCubeb(); + if (!handle) { + LOGE("No valid cubeb context"); + CubebUtils::ReportCubebStreamInitFailure(CubebUtils::GetFirstStream()); + return nullptr; + } + + cubeb_stream_params params = + CreateStreamInitParams(aChannels, aRate, aIsVoice); + uint32_t latencyFrames = CubebUtils::GetCubebMTGLatencyInFrames(¶ms); + + cubeb_stream* cubebStream = nullptr; + + RefPtr listener(aListener); + if (int r = CubebUtils::CubebStreamInit( + handle->Context(), &cubebStream, "input-only stream", aDeviceId, + ¶ms, nullptr, nullptr, latencyFrames, DataCallback_s, + StateCallback_s, listener.get()); + r != CUBEB_OK) { + CubebUtils::ReportCubebStreamInitFailure(CubebUtils::GetFirstStream()); + LOGE("Fail to create a cubeb stream. Error %d", r); + return nullptr; + } + + UniquePtr inputStream(cubebStream); + + LOG("Create a cubeb stream %p successfully", inputStream.get()); + + UniquePtr stream( + new CubebInputStream(listener.forget(), std::move(inputStream))); + stream->Init(); + return stream; +} + +CubebInputStream::CubebInputStream( + already_AddRefed&& aListener, + UniquePtr&& aStream) + : mListener(aListener), + mCubeb(CubebUtils::GetCubeb()), + mStream(std::move(aStream)) { + MOZ_ASSERT(mListener); + MOZ_ASSERT(mStream); +} + +void CubebInputStream::Init() { + // cubeb_stream_register_device_changed_callback is only supported on macOS + // platform and MockCubebfor now. + InvokeCubebWithLog(cubeb_stream_register_device_changed_callback, + CubebInputStream::DeviceChangedCallback_s); +} + +int CubebInputStream::Start() { return InvokeCubebWithLog(cubeb_stream_start); } + +int CubebInputStream::Stop() { return InvokeCubebWithLog(cubeb_stream_stop); } + +int CubebInputStream::Latency(uint32_t* aLatencyFrames) { + return InvokeCubebWithLog(cubeb_stream_get_input_latency, aLatencyFrames); +} + +template +int CubebInputStream::InvokeCubeb(Function aFunction, Args&&... aArgs) { + MOZ_ASSERT(mStream); + return aFunction(mStream.get(), std::forward(aArgs)...); +} + +/* static */ +long CubebInputStream::DataCallback_s(cubeb_stream* aStream, void* aUser, + const void* aInputBuffer, + void* aOutputBuffer, long aFrames) { + MOZ_ASSERT(aUser); + MOZ_ASSERT(aInputBuffer); + MOZ_ASSERT(!aOutputBuffer); + return static_cast(aUser)->DataCallback(aInputBuffer, aFrames); +} + +/* static */ +void CubebInputStream::StateCallback_s(cubeb_stream* aStream, void* aUser, + cubeb_state aState) { + MOZ_ASSERT(aUser); + static_cast(aUser)->StateCallback(aState); +} + +/* static */ +void CubebInputStream::DeviceChangedCallback_s(void* aUser) { + MOZ_ASSERT(aUser); + static_cast(aUser)->DeviceChangedCallback(); +} + +#undef LOG_INTERNAL +#undef LOG +#undef LOGE + +} // namespace mozilla diff --git a/dom/media/CubebInputStream.h b/dom/media/CubebInputStream.h new file mode 100644 index 0000000000..a35924a976 --- /dev/null +++ b/dom/media/CubebInputStream.h @@ -0,0 +1,86 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_CUBEBINPUTSTREAM_H_ +#define DOM_MEDIA_CUBEBINPUTSTREAM_H_ + +#include "CubebUtils.h" +#include "mozilla/Maybe.h" +#include "mozilla/RefPtr.h" +#include "mozilla/UniquePtr.h" +#include "nsISupportsImpl.h" + +namespace mozilla { + +// A light-weight wrapper to operate the C style Cubeb APIs for an input-only +// audio stream in a C++-friendly way. +// Limitation: Do not call these APIs in an audio callback thread. Otherwise we +// may get a deadlock. +class CubebInputStream final { + public: + ~CubebInputStream() = default; + + class Listener { + public: + NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING; + + // This will be fired on audio callback thread. + virtual long DataCallback(const void* aBuffer, long aFrames) = 0; + // This can be fired on any thread. + virtual void StateCallback(cubeb_state aState) = 0; + // This can be fired on any thread. + virtual void DeviceChangedCallback() = 0; + + protected: + Listener() = default; + virtual ~Listener() = default; + }; + + // Return a non-null pointer if the stream has been initialized + // successfully. Otherwise return a null pointer. + static UniquePtr Create(cubeb_devid aDeviceId, + uint32_t aChannels, uint32_t aRate, + bool aIsVoice, Listener* aListener); + + // Start producing audio data. + int Start(); + + // Stop producing audio data. + int Stop(); + + // Gets the approximate stream latency in frames. + int Latency(uint32_t* aLatencyFrames); + + private: + struct CubebDestroyPolicy { + void operator()(cubeb_stream* aStream) const; + }; + CubebInputStream(already_AddRefed&& aListener, + UniquePtr&& aStream); + + void Init(); + + template + int InvokeCubeb(Function aFunction, Args&&... aArgs); + + // Static wrapper function cubeb callbacks. + static long DataCallback_s(cubeb_stream* aStream, void* aUser, + const void* aInputBuffer, void* aOutputBuffer, + long aFrames); + static void StateCallback_s(cubeb_stream* aStream, void* aUser, + cubeb_state aState); + static void DeviceChangedCallback_s(void* aUser); + + // mListener must outlive the life time of the mStream. + const RefPtr mListener; + // So must mCubeb (mStream has a bare pointer to cubeb). + const RefPtr mCubeb; + const UniquePtr mStream; +}; + +} // namespace mozilla + +#endif // DOM_MEDIA_CUBEBINPUTSTREAM_H_ diff --git a/dom/media/CubebUtils.cpp b/dom/media/CubebUtils.cpp new file mode 100644 index 0000000000..bad1ab649d --- /dev/null +++ b/dom/media/CubebUtils.cpp @@ -0,0 +1,908 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CubebUtils.h" + +#include "audio_thread_priority.h" +#include "mozilla/AbstractThread.h" +#include "mozilla/dom/ContentChild.h" +#include "mozilla/glean/GleanMetrics.h" +#include "mozilla/ipc/FileDescriptor.h" +#include "mozilla/Logging.h" +#include "mozilla/Preferences.h" +#include "mozilla/Components.h" +#include "mozilla/Sprintf.h" +#include "mozilla/StaticMutex.h" +#include "mozilla/StaticPtr.h" +#include "mozilla/Telemetry.h" +#include "mozilla/UnderrunHandler.h" +#include "nsContentUtils.h" +#include "nsDebug.h" +#include "nsIStringBundle.h" +#include "nsString.h" +#include "nsThreadUtils.h" +#include "prdtoa.h" +#include +#include +#ifdef MOZ_WIDGET_ANDROID +# include "mozilla/java/GeckoAppShellWrappers.h" +#endif +#ifdef XP_WIN +# include "mozilla/mscom/EnsureMTA.h" +#endif +#include "audioipc2_server_ffi_generated.h" +#include "audioipc2_client_ffi_generated.h" +#include +#include +#include "CallbackThreadRegistry.h" +#include "mozilla/StaticPrefs_media.h" + +#define AUDIOIPC_STACK_SIZE_DEFAULT (64 * 4096) + +#define PREF_VOLUME_SCALE "media.volume_scale" +#define PREF_CUBEB_BACKEND "media.cubeb.backend" +#define PREF_CUBEB_OUTPUT_DEVICE "media.cubeb.output_device" +#define PREF_CUBEB_LATENCY_PLAYBACK "media.cubeb_latency_playback_ms" +#define PREF_CUBEB_LATENCY_MTG "media.cubeb_latency_mtg_frames" +// Allows to get something non-default for the preferred sample-rate, to allow +// troubleshooting in the field and testing. +#define PREF_CUBEB_FORCE_SAMPLE_RATE "media.cubeb.force_sample_rate" +#define PREF_CUBEB_LOGGING_LEVEL "logging.cubeb" +// Hidden pref used by tests to force failure to obtain cubeb context +#define PREF_CUBEB_FORCE_NULL_CONTEXT "media.cubeb.force_null_context" +#define PREF_CUBEB_OUTPUT_VOICE_ROUTING "media.cubeb.output_voice_routing" +#define PREF_CUBEB_SANDBOX "media.cubeb.sandbox" +#define PREF_AUDIOIPC_STACK_SIZE "media.audioipc.stack_size" +#define PREF_AUDIOIPC_SHM_AREA_SIZE "media.audioipc.shm_area_size" + +#if defined(XP_LINUX) || defined(XP_MACOSX) || defined(XP_WIN) +# define MOZ_CUBEB_REMOTING +#endif + +namespace mozilla { + +namespace { + +using Telemetry::LABELS_MEDIA_AUDIO_BACKEND; +using Telemetry::LABELS_MEDIA_AUDIO_INIT_FAILURE; + +LazyLogModule gCubebLog("cubeb"); + +void CubebLogCallback(const char* aFmt, ...) { + char buffer[1024]; + + va_list arglist; + va_start(arglist, aFmt); + VsprintfLiteral(buffer, aFmt, arglist); + MOZ_LOG(gCubebLog, LogLevel::Error, ("%s", buffer)); + va_end(arglist); +} + +// This mutex protects the variables below. +StaticMutex sMutex; +enum class CubebState { + Uninitialized = 0, + Initialized, + Shutdown +} sCubebState = CubebState::Uninitialized; +StaticRefPtr sCubebHandle; +double sVolumeScale = 1.0; +uint32_t sCubebPlaybackLatencyInMilliseconds = 100; +uint32_t sCubebMTGLatencyInFrames = 512; +// If sCubebForcedSampleRate is zero, PreferredSampleRate will return the +// preferred sample-rate for the audio backend in use. Otherwise, it will be +// used as the preferred sample-rate. +Atomic sCubebForcedSampleRate{0}; +bool sCubebPlaybackLatencyPrefSet = false; +bool sCubebMTGLatencyPrefSet = false; +bool sAudioStreamInitEverSucceeded = false; +bool sCubebForceNullContext = false; +bool sRouteOutputAsVoice = false; +#ifdef MOZ_CUBEB_REMOTING +bool sCubebSandbox = false; +size_t sAudioIPCStackSize; +size_t sAudioIPCShmAreaSize; +#endif +StaticAutoPtr sBrandName; +StaticAutoPtr sCubebBackendName; +StaticAutoPtr sCubebOutputDeviceName; +#ifdef MOZ_WIDGET_ANDROID +// Counts the number of time a request for switching to global "communication +// mode" has been received. If this is > 0, global communication mode is to be +// enabled. If it is 0, the global communication mode is to be disabled. +// This allows to correctly track the global behaviour to adopt accross +// asynchronous GraphDriver changes, on Android. +int sInCommunicationCount = 0; +#endif + +const char kBrandBundleURL[] = "chrome://branding/locale/brand.properties"; + +std::unordered_map + kTelemetryBackendLabel = { + {"audiounit", LABELS_MEDIA_AUDIO_BACKEND::audiounit}, + {"audiounit-rust", LABELS_MEDIA_AUDIO_BACKEND::audiounit_rust}, + {"aaudio", LABELS_MEDIA_AUDIO_BACKEND::aaudio}, + {"opensl", LABELS_MEDIA_AUDIO_BACKEND::opensl}, + {"wasapi", LABELS_MEDIA_AUDIO_BACKEND::wasapi}, + {"winmm", LABELS_MEDIA_AUDIO_BACKEND::winmm}, + {"alsa", LABELS_MEDIA_AUDIO_BACKEND::alsa}, + {"jack", LABELS_MEDIA_AUDIO_BACKEND::jack}, + {"oss", LABELS_MEDIA_AUDIO_BACKEND::oss}, + {"pulse", LABELS_MEDIA_AUDIO_BACKEND::pulse}, + {"pulse-rust", LABELS_MEDIA_AUDIO_BACKEND::pulse_rust}, + {"sndio", LABELS_MEDIA_AUDIO_BACKEND::sndio}, + {"sun", LABELS_MEDIA_AUDIO_BACKEND::sunaudio}, +}; + +// Prefered samplerate, in Hz (characteristic of the hardware, mixer, platform, +// and API used). +// +// sMutex protects *initialization* of this, which must be performed from each +// thread before fetching, after which it is safe to fetch without holding the +// mutex because it is only written once per process execution (by the first +// initialization to complete). Since the init must have been called on a +// given thread before fetching the value, it's guaranteed (via the mutex) that +// sufficient memory barriers have occurred to ensure the correct value is +// visible on the querying thread/CPU. +static Atomic sPreferredSampleRate{0}; + +#ifdef MOZ_CUBEB_REMOTING +// AudioIPC server handle +void* sServerHandle = nullptr; + +// Initialized during early startup, protected by sMutex. +StaticAutoPtr sIPCConnection; + +static bool StartAudioIPCServer() { + if (sCubebSandbox) { + audioipc2::AudioIpcServerInitParams initParams{}; + initParams.mThreadCreateCallback = [](const char* aName) { + PROFILER_REGISTER_THREAD(aName); + }; + initParams.mThreadDestroyCallback = []() { PROFILER_UNREGISTER_THREAD(); }; + + sServerHandle = audioipc2::audioipc2_server_start( + sBrandName, sCubebBackendName, &initParams); + } + return sServerHandle != nullptr; +} + +static void ShutdownAudioIPCServer() { + if (!sServerHandle) { + return; + } + + audioipc2::audioipc2_server_stop(sServerHandle); + sServerHandle = nullptr; +} +#endif // MOZ_CUBEB_REMOTING +} // namespace + +static const uint32_t CUBEB_NORMAL_LATENCY_MS = 100; +// Consevative default that can work on all platforms. +static const uint32_t CUBEB_NORMAL_LATENCY_FRAMES = 1024; + +namespace CubebUtils { +RefPtr GetCubebUnlocked(); + +void GetPrefAndSetString(const char* aPref, StaticAutoPtr& aStorage) { + nsAutoCString value; + Preferences::GetCString(aPref, value); + if (value.IsEmpty()) { + aStorage = nullptr; + } else { + aStorage = new char[value.Length() + 1]; + PodCopy(aStorage.get(), value.get(), value.Length()); + aStorage[value.Length()] = 0; + } +} + +void PrefChanged(const char* aPref, void* aClosure) { + if (strcmp(aPref, PREF_VOLUME_SCALE) == 0) { + nsAutoCString value; + Preferences::GetCString(aPref, value); + StaticMutexAutoLock lock(sMutex); + if (value.IsEmpty()) { + sVolumeScale = 1.0; + } else { + sVolumeScale = std::max(0, PR_strtod(value.get(), nullptr)); + } + } else if (strcmp(aPref, PREF_CUBEB_LATENCY_PLAYBACK) == 0) { + StaticMutexAutoLock lock(sMutex); + // Arbitrary default stream latency of 100ms. The higher this + // value, the longer stream volume changes will take to become + // audible. + sCubebPlaybackLatencyPrefSet = Preferences::HasUserValue(aPref); + uint32_t value = Preferences::GetUint(aPref, CUBEB_NORMAL_LATENCY_MS); + sCubebPlaybackLatencyInMilliseconds = + std::min(std::max(value, 1), 1000); + } else if (strcmp(aPref, PREF_CUBEB_LATENCY_MTG) == 0) { + StaticMutexAutoLock lock(sMutex); + sCubebMTGLatencyPrefSet = Preferences::HasUserValue(aPref); + uint32_t value = Preferences::GetUint(aPref, CUBEB_NORMAL_LATENCY_FRAMES); + // 128 is the block size for the Web Audio API, which limits how low the + // latency can be here. + // We don't want to limit the upper limit too much, so that people can + // experiment. + sCubebMTGLatencyInFrames = + std::min(std::max(value, 128), 1e6); + } else if (strcmp(aPref, PREF_CUBEB_FORCE_SAMPLE_RATE) == 0) { + StaticMutexAutoLock lock(sMutex); + sCubebForcedSampleRate = Preferences::GetUint(aPref); + } else if (strcmp(aPref, PREF_CUBEB_LOGGING_LEVEL) == 0) { + LogLevel value = + ToLogLevel(Preferences::GetInt(aPref, 0 /* LogLevel::Disabled */)); + if (value == LogLevel::Verbose) { + cubeb_set_log_callback(CUBEB_LOG_VERBOSE, CubebLogCallback); + } else if (value == LogLevel::Debug) { + cubeb_set_log_callback(CUBEB_LOG_NORMAL, CubebLogCallback); + } else if (value == LogLevel::Disabled) { + cubeb_set_log_callback(CUBEB_LOG_DISABLED, nullptr); + } + } else if (strcmp(aPref, PREF_CUBEB_BACKEND) == 0) { + StaticMutexAutoLock lock(sMutex); + GetPrefAndSetString(aPref, sCubebBackendName); + } else if (strcmp(aPref, PREF_CUBEB_OUTPUT_DEVICE) == 0) { + StaticMutexAutoLock lock(sMutex); + GetPrefAndSetString(aPref, sCubebOutputDeviceName); + } else if (strcmp(aPref, PREF_CUBEB_FORCE_NULL_CONTEXT) == 0) { + StaticMutexAutoLock lock(sMutex); + sCubebForceNullContext = Preferences::GetBool(aPref, false); + MOZ_LOG(gCubebLog, LogLevel::Verbose, + ("%s: %s", PREF_CUBEB_FORCE_NULL_CONTEXT, + sCubebForceNullContext ? "true" : "false")); + } +#ifdef MOZ_CUBEB_REMOTING + else if (strcmp(aPref, PREF_CUBEB_SANDBOX) == 0) { + StaticMutexAutoLock lock(sMutex); + sCubebSandbox = Preferences::GetBool(aPref); + MOZ_LOG(gCubebLog, LogLevel::Verbose, + ("%s: %s", PREF_CUBEB_SANDBOX, sCubebSandbox ? "true" : "false")); + } else if (strcmp(aPref, PREF_AUDIOIPC_STACK_SIZE) == 0) { + StaticMutexAutoLock lock(sMutex); + sAudioIPCStackSize = Preferences::GetUint(PREF_AUDIOIPC_STACK_SIZE, + AUDIOIPC_STACK_SIZE_DEFAULT); + } else if (strcmp(aPref, PREF_AUDIOIPC_SHM_AREA_SIZE) == 0) { + StaticMutexAutoLock lock(sMutex); + sAudioIPCShmAreaSize = Preferences::GetUint(PREF_AUDIOIPC_SHM_AREA_SIZE); + } +#endif + else if (strcmp(aPref, PREF_CUBEB_OUTPUT_VOICE_ROUTING) == 0) { + StaticMutexAutoLock lock(sMutex); + sRouteOutputAsVoice = Preferences::GetBool(aPref); + MOZ_LOG(gCubebLog, LogLevel::Verbose, + ("%s: %s", PREF_CUBEB_OUTPUT_VOICE_ROUTING, + sRouteOutputAsVoice ? "true" : "false")); + } +} + +bool GetFirstStream() { + static bool sFirstStream = true; + + StaticMutexAutoLock lock(sMutex); + bool result = sFirstStream; + sFirstStream = false; + return result; +} + +double GetVolumeScale() { + StaticMutexAutoLock lock(sMutex); + return sVolumeScale; +} + +RefPtr GetCubeb() { + StaticMutexAutoLock lock(sMutex); + return GetCubebUnlocked(); +} + +// This is only exported when running tests. +void ForceSetCubebContext(cubeb* aCubebContext) { + StaticMutexAutoLock lock(sMutex); + if (aCubebContext) { + sCubebHandle = new CubebHandle(aCubebContext); + } else { + sCubebHandle = nullptr; + } + sCubebState = CubebState::Initialized; +} + +void SetInCommunication(bool aInCommunication) { +#ifdef MOZ_WIDGET_ANDROID + StaticMutexAutoLock lock(sMutex); + if (aInCommunication) { + sInCommunicationCount++; + } else { + MOZ_ASSERT(sInCommunicationCount > 0); + sInCommunicationCount--; + } + + if (sInCommunicationCount == 1) { + java::GeckoAppShell::SetCommunicationAudioModeOn(true); + } else if (sInCommunicationCount == 0) { + java::GeckoAppShell::SetCommunicationAudioModeOn(false); + } +#endif +} + +bool InitPreferredSampleRate() { + sMutex.AssertCurrentThreadOwns(); + if (sPreferredSampleRate != 0) { + return true; + } +#ifdef MOZ_WIDGET_ANDROID + int rate = AndroidGetAudioOutputSampleRate(); + if (rate > 0) { + sPreferredSampleRate = rate; + return true; + } else { + return false; + } +#else + RefPtr handle = GetCubebUnlocked(); + if (!handle) { + return false; + } + uint32_t rate; + { + StaticMutexAutoUnlock unlock(sMutex); + if (cubeb_get_preferred_sample_rate(handle->Context(), &rate) != CUBEB_OK) { + return false; + } + } + sPreferredSampleRate = rate; +#endif + MOZ_ASSERT(sPreferredSampleRate); + return true; +} + +uint32_t PreferredSampleRate(bool aShouldResistFingerprinting) { + StaticMutexAutoLock lock(sMutex); + if (sCubebForcedSampleRate) { + return sCubebForcedSampleRate; + } + if (aShouldResistFingerprinting) { + return 44100; + } + if (!InitPreferredSampleRate()) { + return 44100; + } + MOZ_ASSERT(sPreferredSampleRate); + return sPreferredSampleRate; +} + +int CubebStreamInit(cubeb* context, cubeb_stream** stream, + char const* stream_name, cubeb_devid input_device, + cubeb_stream_params* input_stream_params, + cubeb_devid output_device, + cubeb_stream_params* output_stream_params, + uint32_t latency_frames, cubeb_data_callback data_callback, + cubeb_state_callback state_callback, void* user_ptr) { + uint32_t ms = StaticPrefs::media_cubeb_slow_stream_init_ms(); + if (ms) { + std::this_thread::sleep_for(std::chrono::milliseconds(ms)); + } + return cubeb_stream_init(context, stream, stream_name, input_device, + input_stream_params, output_device, + output_stream_params, latency_frames, data_callback, + state_callback, user_ptr); +} + +void InitBrandName() { + if (sBrandName) { + return; + } + nsAutoString brandName; + nsCOMPtr stringBundleService = + mozilla::components::StringBundle::Service(); + if (stringBundleService) { + nsCOMPtr brandBundle; + nsresult rv = stringBundleService->CreateBundle( + kBrandBundleURL, getter_AddRefs(brandBundle)); + if (NS_SUCCEEDED(rv)) { + rv = brandBundle->GetStringFromName("brandShortName", brandName); + NS_WARNING_ASSERTION( + NS_SUCCEEDED(rv), + "Could not get the program name for a cubeb stream."); + } + } + NS_LossyConvertUTF16toASCII ascii(brandName); + sBrandName = new char[ascii.Length() + 1]; + PodCopy(sBrandName.get(), ascii.get(), ascii.Length()); + sBrandName[ascii.Length()] = 0; +} + +#ifdef MOZ_CUBEB_REMOTING +void InitAudioIPCConnection() { + MOZ_ASSERT(NS_IsMainThread()); + auto contentChild = dom::ContentChild::GetSingleton(); + auto promise = contentChild->SendCreateAudioIPCConnection(); + promise->Then( + AbstractThread::MainThread(), __func__, + [](dom::FileDescOrError&& aFD) { + StaticMutexAutoLock lock(sMutex); + MOZ_ASSERT(!sIPCConnection); + if (aFD.type() == dom::FileDescOrError::Type::TFileDescriptor) { + sIPCConnection = new ipc::FileDescriptor(std::move(aFD)); + } else { + MOZ_LOG(gCubebLog, LogLevel::Error, + ("SendCreateAudioIPCConnection failed: invalid FD")); + } + }, + [](mozilla::ipc::ResponseRejectReason&& aReason) { + MOZ_LOG(gCubebLog, LogLevel::Error, + ("SendCreateAudioIPCConnection rejected: %d", int(aReason))); + }); +} +#endif + +#ifdef MOZ_CUBEB_REMOTING +ipc::FileDescriptor CreateAudioIPCConnectionUnlocked() { + MOZ_ASSERT(sCubebSandbox && XRE_IsParentProcess()); + if (!sServerHandle) { + MOZ_LOG(gCubebLog, LogLevel::Debug, ("Starting cubeb server...")); + if (!StartAudioIPCServer()) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("audioipc_server_start failed")); + return ipc::FileDescriptor(); + } + } + MOZ_LOG(gCubebLog, LogLevel::Debug, + ("%s: %d", PREF_AUDIOIPC_SHM_AREA_SIZE, (int)sAudioIPCShmAreaSize)); + MOZ_ASSERT(sServerHandle); + ipc::FileDescriptor::PlatformHandleType rawFD; + rawFD = audioipc2::audioipc2_server_new_client(sServerHandle, + sAudioIPCShmAreaSize); + ipc::FileDescriptor fd(rawFD); + if (!fd.IsValid()) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("audioipc_server_new_client failed")); + return ipc::FileDescriptor(); + } + // Close rawFD since FileDescriptor's ctor cloned it. + // TODO: Find cleaner cross-platform way to close rawFD. +# ifdef XP_WIN + CloseHandle(rawFD); +# else + close(rawFD); +# endif + return fd; +} +#endif + +ipc::FileDescriptor CreateAudioIPCConnection() { +#ifdef MOZ_CUBEB_REMOTING + StaticMutexAutoLock lock(sMutex); + return CreateAudioIPCConnectionUnlocked(); +#else + return ipc::FileDescriptor(); +#endif +} + +RefPtr GetCubebUnlocked() { + sMutex.AssertCurrentThreadOwns(); + if (sCubebForceNullContext) { + // Pref set such that we should return a null context + MOZ_LOG(gCubebLog, LogLevel::Debug, + ("%s: returning null context due to %s!", __func__, + PREF_CUBEB_FORCE_NULL_CONTEXT)); + return nullptr; + } + if (sCubebState != CubebState::Uninitialized) { + // If we have already passed the initialization point (below), just return + // the current context, which may be null (e.g., after error or shutdown.) + return sCubebHandle; + } + + if (!sBrandName && NS_IsMainThread()) { + InitBrandName(); + } else { + NS_WARNING_ASSERTION( + sBrandName, + "Did not initialize sbrandName, and not on the main thread?"); + } + + int rv = CUBEB_ERROR; +#ifdef MOZ_CUBEB_REMOTING + MOZ_LOG(gCubebLog, LogLevel::Info, + ("%s: %s", PREF_CUBEB_SANDBOX, sCubebSandbox ? "true" : "false")); + + if (sCubebSandbox) { + if (XRE_IsParentProcess() && !sIPCConnection) { + // TODO: Don't use audio IPC when within the same process. + auto fd = CreateAudioIPCConnectionUnlocked(); + if (fd.IsValid()) { + sIPCConnection = new ipc::FileDescriptor(fd); + } + } + if (NS_WARN_IF(!sIPCConnection)) { + // Either the IPC connection failed to init or we're still waiting for + // InitAudioIPCConnection to complete (bug 1454782). + return nullptr; + } + + MOZ_LOG(gCubebLog, LogLevel::Debug, + ("%s: %d", PREF_AUDIOIPC_STACK_SIZE, (int)sAudioIPCStackSize)); + + audioipc2::AudioIpcInitParams initParams{}; + initParams.mStackSize = sAudioIPCStackSize; + initParams.mServerConnection = + sIPCConnection->ClonePlatformHandle().release(); + initParams.mThreadCreateCallback = [](const char* aName) { + PROFILER_REGISTER_THREAD(aName); + }; + initParams.mThreadDestroyCallback = []() { PROFILER_UNREGISTER_THREAD(); }; + + cubeb* temp = nullptr; + rv = audioipc2::audioipc2_client_init(&temp, sBrandName, &initParams); + if (temp) { + sCubebHandle = new CubebHandle(temp); + } + } else { +#endif // MOZ_CUBEB_REMOTING +#ifdef XP_WIN + mozilla::mscom::EnsureMTA([&]() -> void { +#endif + cubeb* temp = nullptr; + rv = cubeb_init(&temp, sBrandName, sCubebBackendName); + if (temp) { + sCubebHandle = new CubebHandle(temp); + } +#ifdef XP_WIN + }); +#endif +#ifdef MOZ_CUBEB_REMOTING + } + sIPCConnection = nullptr; +#endif // MOZ_CUBEB_REMOTING + NS_WARNING_ASSERTION(rv == CUBEB_OK, "Could not get a cubeb context."); + sCubebState = + (rv == CUBEB_OK) ? CubebState::Initialized : CubebState::Uninitialized; + + return sCubebHandle; +} + +void ReportCubebBackendUsed() { + RefPtr handle; + { + StaticMutexAutoLock lock(sMutex); + sAudioStreamInitEverSucceeded = true; + handle = sCubebHandle; + } + + MOZ_RELEASE_ASSERT(handle.get()); + + LABELS_MEDIA_AUDIO_BACKEND label = LABELS_MEDIA_AUDIO_BACKEND::unknown; + auto backend = + kTelemetryBackendLabel.find(cubeb_get_backend_id(handle->Context())); + if (backend != kTelemetryBackendLabel.end()) { + label = backend->second; + } + AccumulateCategorical(label); + + mozilla::glean::media_audio::backend + .Get(backend != kTelemetryBackendLabel.end() + ? nsDependentCString(cubeb_get_backend_id(handle->Context())) + : nsCString("unknown"_ns)) + .Add(); +} + +void ReportCubebStreamInitFailure(bool aIsFirst) { + StaticMutexAutoLock lock(sMutex); + if (!aIsFirst && !sAudioStreamInitEverSucceeded) { + // This machine has no audio hardware, or it's in really bad shape, don't + // send this info, since we want CUBEB_BACKEND_INIT_FAILURE_OTHER to detect + // failures to open multiple streams in a process over time. + return; + } + AccumulateCategorical(aIsFirst ? LABELS_MEDIA_AUDIO_INIT_FAILURE::first + : LABELS_MEDIA_AUDIO_INIT_FAILURE::other); + mozilla::glean::media_audio::init_failure + .EnumGet(aIsFirst ? mozilla::glean::media_audio::InitFailureLabel::eFirst + : mozilla::glean::media_audio::InitFailureLabel::eOther) + .Add(); +} + +uint32_t GetCubebPlaybackLatencyInMilliseconds() { + StaticMutexAutoLock lock(sMutex); + return sCubebPlaybackLatencyInMilliseconds; +} + +bool CubebPlaybackLatencyPrefSet() { + StaticMutexAutoLock lock(sMutex); + return sCubebPlaybackLatencyPrefSet; +} + +bool CubebMTGLatencyPrefSet() { + StaticMutexAutoLock lock(sMutex); + return sCubebMTGLatencyPrefSet; +} + +uint32_t GetCubebMTGLatencyInFrames(cubeb_stream_params* params) { + StaticMutexAutoLock lock(sMutex); + if (sCubebMTGLatencyPrefSet) { + MOZ_ASSERT(sCubebMTGLatencyInFrames > 0); + return sCubebMTGLatencyInFrames; + } + +#ifdef MOZ_WIDGET_ANDROID + int frames = AndroidGetAudioOutputFramesPerBuffer(); + if (frames > 0) { + return frames; + } else { + return 512; + } +#else + RefPtr handle = GetCubebUnlocked(); + if (!handle) { + return sCubebMTGLatencyInFrames; // default 512 + } + uint32_t latency_frames = 0; + int cubeb_result = CUBEB_OK; + + { + StaticMutexAutoUnlock unlock(sMutex); + cubeb_result = + cubeb_get_min_latency(handle->Context(), params, &latency_frames); + } + + if (cubeb_result != CUBEB_OK) { + NS_WARNING("Could not get minimal latency from cubeb."); + return sCubebMTGLatencyInFrames; // default 512 + } + return latency_frames; +#endif +} + +static const char* gInitCallbackPrefs[] = { + PREF_VOLUME_SCALE, PREF_CUBEB_OUTPUT_DEVICE, + PREF_CUBEB_LATENCY_PLAYBACK, PREF_CUBEB_LATENCY_MTG, + PREF_CUBEB_BACKEND, PREF_CUBEB_FORCE_NULL_CONTEXT, + PREF_CUBEB_SANDBOX, PREF_AUDIOIPC_STACK_SIZE, + PREF_AUDIOIPC_SHM_AREA_SIZE, nullptr, +}; + +static const char* gCallbackPrefs[] = { + PREF_CUBEB_FORCE_SAMPLE_RATE, + // We don't want to call the callback on startup, because the pref is the + // empty string by default ("", which means "logging disabled"). Because the + // logging can be enabled via environment variables (MOZ_LOG="module:5"), + // calling this callback on init would immediately re-disable the logging. + PREF_CUBEB_LOGGING_LEVEL, + nullptr, +}; + +void InitLibrary() { + Preferences::RegisterCallbacksAndCall(PrefChanged, gInitCallbackPrefs); + Preferences::RegisterCallbacks(PrefChanged, gCallbackPrefs); + + if (MOZ_LOG_TEST(gCubebLog, LogLevel::Verbose)) { + cubeb_set_log_callback(CUBEB_LOG_VERBOSE, CubebLogCallback); + } else if (MOZ_LOG_TEST(gCubebLog, LogLevel::Error)) { + cubeb_set_log_callback(CUBEB_LOG_NORMAL, CubebLogCallback); + } + +#ifndef MOZ_WIDGET_ANDROID + NS_DispatchToMainThread( + NS_NewRunnableFunction("CubebUtils::InitLibrary", &InitBrandName)); +#endif +#ifdef MOZ_CUBEB_REMOTING + if (sCubebSandbox && XRE_IsContentProcess()) { +# if defined(XP_LINUX) && !defined(MOZ_WIDGET_ANDROID) + if (atp_set_real_time_limit(0, 48000)) { + NS_WARNING("could not set real-time limit in CubebUtils::InitLibrary"); + } + InstallSoftRealTimeLimitHandler(); +# endif + InitAudioIPCConnection(); + } +#endif + + // Ensure the CallbackThreadRegistry is not created in an audio callback by + // creating it now. + Unused << CallbackThreadRegistry::Get(); +} + +void ShutdownLibrary() { + Preferences::UnregisterCallbacks(PrefChanged, gInitCallbackPrefs); + Preferences::UnregisterCallbacks(PrefChanged, gCallbackPrefs); + + cubeb_set_log_callback(CUBEB_LOG_DISABLED, nullptr); + RefPtr trash; + StaticMutexAutoLock lock(sMutex); + trash = sCubebHandle.forget(); + sBrandName = nullptr; + sCubebBackendName = nullptr; + // This will ensure we don't try to re-create a context. + sCubebState = CubebState::Shutdown; + + if (trash) { + StaticMutexAutoUnlock unlock(sMutex); + nsrefcnt count = trash.forget().take()->Release(); + MOZ_RELEASE_ASSERT(!count, + "ShutdownLibrary should be releasing the last reference " + "to the cubeb ctx!"); + } + +#ifdef MOZ_CUBEB_REMOTING + sIPCConnection = nullptr; + ShutdownAudioIPCServer(); +#endif +} + +bool SandboxEnabled() { +#ifdef MOZ_CUBEB_REMOTING + StaticMutexAutoLock lock(sMutex); + return !!sCubebSandbox; +#else + return false; +#endif +} + +uint32_t MaxNumberOfChannels() { + RefPtr handle = GetCubeb(); + uint32_t maxNumberOfChannels; + if (handle && cubeb_get_max_channel_count(handle->Context(), + &maxNumberOfChannels) == CUBEB_OK) { + return maxNumberOfChannels; + } + + return 0; +} + +void GetCurrentBackend(nsAString& aBackend) { + RefPtr handle = GetCubeb(); + if (handle) { + const char* backend = cubeb_get_backend_id(handle->Context()); + if (backend) { + aBackend.AssignASCII(backend); + return; + } + } + aBackend.AssignLiteral("unknown"); +} + +char* GetForcedOutputDevice() { + StaticMutexAutoLock lock(sMutex); + return sCubebOutputDeviceName; +} + +cubeb_stream_prefs GetDefaultStreamPrefs(cubeb_device_type aType) { + cubeb_stream_prefs prefs = CUBEB_STREAM_PREF_NONE; +#ifdef XP_WIN + if (StaticPrefs::media_cubeb_wasapi_raw() & static_cast(aType)) { + prefs |= CUBEB_STREAM_PREF_RAW; + } +#endif + return prefs; +} + +bool RouteOutputAsVoice() { return sRouteOutputAsVoice; } + +long datacb(cubeb_stream*, void*, const void*, void* out_buffer, long nframes) { + PodZero(static_cast(out_buffer), nframes * 2); + return nframes; +} + +void statecb(cubeb_stream*, void*, cubeb_state) {} + +bool EstimatedRoundTripLatencyDefaultDevices(double* aMean, double* aStdDev) { + RefPtr handle = GetCubeb(); + if (!handle) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("No cubeb context, bailing.")); + return false; + } + nsTArray roundtripLatencies; + // Create a cubeb stream with the correct latency and default input/output + // devices (mono/stereo channels). Wait for two seconds, get the latency a few + // times. + int rv; + uint32_t rate; + uint32_t latencyFrames; + rv = cubeb_get_preferred_sample_rate(handle->Context(), &rate); + if (rv != CUBEB_OK) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("Could not get preferred rate")); + return false; + } + + cubeb_stream_params output_params; + output_params.format = CUBEB_SAMPLE_FLOAT32NE; + output_params.rate = rate; + output_params.channels = 2; + output_params.layout = CUBEB_LAYOUT_UNDEFINED; + output_params.prefs = GetDefaultStreamPrefs(CUBEB_DEVICE_TYPE_OUTPUT); + + latencyFrames = GetCubebMTGLatencyInFrames(&output_params); + + cubeb_stream_params input_params; + input_params.format = CUBEB_SAMPLE_FLOAT32NE; + input_params.rate = rate; + input_params.channels = 1; + input_params.layout = CUBEB_LAYOUT_UNDEFINED; + input_params.prefs = GetDefaultStreamPrefs(CUBEB_DEVICE_TYPE_INPUT); + + cubeb_stream* stm; + rv = cubeb_stream_init(handle->Context(), &stm, + "about:support latency estimation", NULL, + &input_params, NULL, &output_params, latencyFrames, + datacb, statecb, NULL); + if (rv != CUBEB_OK) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("Could not get init stream")); + return false; + } + + rv = cubeb_stream_start(stm); + if (rv != CUBEB_OK) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("Could not start stream")); + return false; + } + // +-2s + for (uint32_t i = 0; i < 40; i++) { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + uint32_t inputLatency, outputLatency, rvIn, rvOut; + rvOut = cubeb_stream_get_latency(stm, &outputLatency); + if (rvOut) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("Could not get output latency")); + } + rvIn = cubeb_stream_get_input_latency(stm, &inputLatency); + if (rvIn) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("Could not get input latency")); + } + if (rvIn != CUBEB_OK || rvOut != CUBEB_OK) { + continue; + } + + double roundTrip = static_cast(outputLatency + inputLatency) / rate; + roundtripLatencies.AppendElement(roundTrip); + } + rv = cubeb_stream_stop(stm); + if (rv != CUBEB_OK) { + MOZ_LOG(gCubebLog, LogLevel::Error, ("Could not stop the stream")); + } + + *aMean = 0.0; + *aStdDev = 0.0; + double variance = 0.0; + for (uint32_t i = 0; i < roundtripLatencies.Length(); i++) { + *aMean += roundtripLatencies[i]; + } + + *aMean /= roundtripLatencies.Length(); + + for (uint32_t i = 0; i < roundtripLatencies.Length(); i++) { + variance += pow(roundtripLatencies[i] - *aMean, 2.); + } + variance /= roundtripLatencies.Length(); + + *aStdDev = sqrt(variance); + + MOZ_LOG(gCubebLog, LogLevel::Debug, + ("Default device roundtrip latency in seconds %lf (stddev: %lf)", + *aMean, *aStdDev)); + + cubeb_stream_destroy(stm); + + return true; +} + +#ifdef MOZ_WIDGET_ANDROID +int32_t AndroidGetAudioOutputSampleRate() { +# if defined(MOZ_ANDROID_CONTENT_SERVICE_ISOLATED_PROCESS) + return 44100; // TODO: Remote value; will be handled in following patch. +# else + int32_t sample_rate = java::GeckoAppShell::GetAudioOutputSampleRate(); + return sample_rate; +# endif +} +int32_t AndroidGetAudioOutputFramesPerBuffer() { +# if defined(MOZ_ANDROID_CONTENT_SERVICE_ISOLATED_PROCESS) + return 512; // TODO: Remote value; will be handled in following patch. +# else + int32_t frames = java::GeckoAppShell::GetAudioOutputFramesPerBuffer(); + return frames; +# endif +} +#endif + +} // namespace CubebUtils +} // namespace mozilla diff --git a/dom/media/CubebUtils.h b/dom/media/CubebUtils.h new file mode 100644 index 0000000000..c05c8d2449 --- /dev/null +++ b/dom/media/CubebUtils.h @@ -0,0 +1,120 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(CubebUtils_h_) +# define CubebUtils_h_ + +# include "cubeb/cubeb.h" + +# include "AudioSampleFormat.h" +# include "nsString.h" +# include "nsISupportsImpl.h" + +class AudioDeviceInfo; + +MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(cubeb_stream_prefs) + +namespace mozilla { + +class CallbackThreadRegistry; + +namespace CubebUtils { + +typedef cubeb_devid AudioDeviceID; + +template +struct ToCubebFormat { + static const cubeb_sample_format value = CUBEB_SAMPLE_FLOAT32NE; +}; + +template <> +struct ToCubebFormat { + static const cubeb_sample_format value = CUBEB_SAMPLE_S16NE; +}; + +class CubebHandle { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CubebHandle) + explicit CubebHandle(cubeb* aCubeb) : mCubeb(aCubeb) { + MOZ_RELEASE_ASSERT(mCubeb); + }; + CubebHandle(const CubebHandle&) = delete; + cubeb* Context() const { return mCubeb.get(); } + + private: + struct CubebDeletePolicy { + void operator()(cubeb* aCubeb) { cubeb_destroy(aCubeb); } + }; + const UniquePtr mCubeb; + ~CubebHandle() = default; +}; + +// Initialize Audio Library. Some Audio backends require initializing the +// library before using it. +void InitLibrary(); + +// Shutdown Audio Library. Some Audio backends require shutting down the +// library after using it. +void ShutdownLibrary(); + +bool SandboxEnabled(); + +// Returns the maximum number of channels supported by the audio hardware. +uint32_t MaxNumberOfChannels(); + +// Get the sample rate the hardware/mixer runs at. Thread safe. +uint32_t PreferredSampleRate(bool aShouldResistFingerprinting); + +// Initialize a cubeb stream. A pass through wrapper for cubeb_stream_init, +// that can simulate streams that are very slow to start, by setting the pref +// media.cubeb.slow_stream_init_ms. +int CubebStreamInit(cubeb* context, cubeb_stream** stream, + char const* stream_name, cubeb_devid input_device, + cubeb_stream_params* input_stream_params, + cubeb_devid output_device, + cubeb_stream_params* output_stream_params, + uint32_t latency_frames, cubeb_data_callback data_callback, + cubeb_state_callback state_callback, void* user_ptr); + +enum Side { Input, Output }; + +double GetVolumeScale(); +bool GetFirstStream(); +RefPtr GetCubeb(); +void ReportCubebStreamInitFailure(bool aIsFirstStream); +void ReportCubebBackendUsed(); +uint32_t GetCubebPlaybackLatencyInMilliseconds(); +uint32_t GetCubebMTGLatencyInFrames(cubeb_stream_params* params); +bool CubebLatencyPrefSet(); +void GetCurrentBackend(nsAString& aBackend); +cubeb_stream_prefs GetDefaultStreamPrefs(cubeb_device_type aType); +char* GetForcedOutputDevice(); +// No-op on all platforms but Android, where it tells the device's AudioManager +// to switch to "communication mode", which might change audio routing, +// bluetooth communication type, etc. +void SetInCommunication(bool aInCommunication); +// Returns true if the output streams should be routed like a stream containing +// voice data, and not generic audio. This can influence audio processing and +// device selection. +bool RouteOutputAsVoice(); +// Returns, in seconds, the roundtrip latency Gecko thinks there is between the +// default input and output devices. This is for diagnosing purposes, the +// latency figures are best used directly from the cubeb streams themselves, as +// the devices being used matter. This is blocking. +bool EstimatedRoundTripLatencyDefaultDevices(double* aMean, double* aStdDev); + +# ifdef MOZ_WIDGET_ANDROID +int32_t AndroidGetAudioOutputSampleRate(); +int32_t AndroidGetAudioOutputFramesPerBuffer(); +# endif + +# ifdef ENABLE_SET_CUBEB_BACKEND +void ForceSetCubebContext(cubeb* aCubebContext); +# endif +} // namespace CubebUtils +} // namespace mozilla + +#endif // CubebUtils_h_ diff --git a/dom/media/DOMMediaStream.cpp b/dom/media/DOMMediaStream.cpp new file mode 100644 index 0000000000..5031882c19 --- /dev/null +++ b/dom/media/DOMMediaStream.cpp @@ -0,0 +1,545 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "DOMMediaStream.h" + +#include "AudioCaptureTrack.h" +#include "AudioChannelAgent.h" +#include "AudioStreamTrack.h" +#include "MediaTrackGraph.h" +#include "MediaTrackGraphImpl.h" +#include "MediaTrackListener.h" +#include "Tracing.h" +#include "VideoStreamTrack.h" +#include "mozilla/dom/AudioTrack.h" +#include "mozilla/dom/AudioTrackList.h" +#include "mozilla/dom/DocGroup.h" +#include "mozilla/dom/HTMLCanvasElement.h" +#include "mozilla/dom/MediaStreamBinding.h" +#include "mozilla/dom/MediaStreamTrackEvent.h" +#include "mozilla/dom/Promise.h" +#include "mozilla/dom/VideoTrack.h" +#include "mozilla/dom/VideoTrackList.h" +#include "mozilla/media/MediaUtils.h" +#include "nsContentUtils.h" +#include "nsGlobalWindowInner.h" +#include "nsIUUIDGenerator.h" +#include "nsPIDOMWindow.h" +#include "nsProxyRelease.h" +#include "nsRFPService.h" +#include "nsServiceManagerUtils.h" + +#ifdef LOG +# undef LOG +#endif + +using namespace mozilla; +using namespace mozilla::dom; +using namespace mozilla::layers; +using namespace mozilla::media; + +static LazyLogModule gMediaStreamLog("MediaStream"); +#define LOG(type, msg) MOZ_LOG(gMediaStreamLog, type, msg) + +static bool ContainsLiveTracks( + const nsTArray>& aTracks) { + for (const auto& track : aTracks) { + if (track->ReadyState() == MediaStreamTrackState::Live) { + return true; + } + } + + return false; +} + +static bool ContainsLiveAudioTracks( + const nsTArray>& aTracks) { + for (const auto& track : aTracks) { + if (track->AsAudioStreamTrack() && + track->ReadyState() == MediaStreamTrackState::Live) { + return true; + } + } + + return false; +} + +class DOMMediaStream::PlaybackTrackListener : public MediaStreamTrackConsumer { + public: + NS_INLINE_DECL_REFCOUNTING(PlaybackTrackListener) + + explicit PlaybackTrackListener(DOMMediaStream* aStream) : mStream(aStream) {} + + void NotifyEnded(MediaStreamTrack* aTrack) override { + if (!mStream) { + return; + } + + if (!aTrack) { + MOZ_ASSERT(false); + return; + } + + MOZ_ASSERT(mStream->HasTrack(*aTrack)); + mStream->NotifyTrackRemoved(aTrack); + } + + protected: + virtual ~PlaybackTrackListener() = default; + + WeakPtr mStream; +}; + +NS_IMPL_CYCLE_COLLECTION_CLASS(DOMMediaStream) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(DOMMediaStream, + DOMEventTargetHelper) + tmp->Destroy(); + NS_IMPL_CYCLE_COLLECTION_UNLINK(mTracks) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mConsumersToKeepAlive) + NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR +NS_IMPL_CYCLE_COLLECTION_UNLINK_END + +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(DOMMediaStream, + DOMEventTargetHelper) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mTracks) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mConsumersToKeepAlive) +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_ADDREF_INHERITED(DOMMediaStream, DOMEventTargetHelper) +NS_IMPL_RELEASE_INHERITED(DOMMediaStream, DOMEventTargetHelper) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DOMMediaStream) + NS_INTERFACE_MAP_ENTRY_CONCRETE(DOMMediaStream) +NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) + +DOMMediaStream::DOMMediaStream(nsPIDOMWindowInner* aWindow) + : DOMEventTargetHelper(aWindow), + mPlaybackTrackListener(MakeAndAddRef(this)) { + nsresult rv; + nsCOMPtr uuidgen = + do_GetService("@mozilla.org/uuid-generator;1", &rv); + + if (NS_SUCCEEDED(rv) && uuidgen) { + nsID uuid; + memset(&uuid, 0, sizeof(uuid)); + rv = uuidgen->GenerateUUIDInPlace(&uuid); + if (NS_SUCCEEDED(rv)) { + char buffer[NSID_LENGTH]; + uuid.ToProvidedString(buffer); + mID = NS_ConvertASCIItoUTF16(buffer); + } + } +} + +DOMMediaStream::~DOMMediaStream() { Destroy(); } + +void DOMMediaStream::Destroy() { + LOG(LogLevel::Debug, ("DOMMediaStream %p Being destroyed.", this)); + for (const auto& track : mTracks) { + // We must remove ourselves from each track's principal change observer list + // before we die. + if (!track->Ended()) { + track->RemoveConsumer(mPlaybackTrackListener); + } + } + mTrackListeners.Clear(); +} + +JSObject* DOMMediaStream::WrapObject(JSContext* aCx, + JS::Handle aGivenProto) { + return dom::MediaStream_Binding::Wrap(aCx, this, aGivenProto); +} + +/* static */ +already_AddRefed DOMMediaStream::Constructor( + const GlobalObject& aGlobal, ErrorResult& aRv) { + Sequence> emptyTrackSeq; + return Constructor(aGlobal, emptyTrackSeq, aRv); +} + +/* static */ +already_AddRefed DOMMediaStream::Constructor( + const GlobalObject& aGlobal, const DOMMediaStream& aStream, + ErrorResult& aRv) { + nsTArray> tracks; + aStream.GetTracks(tracks); + + Sequence> nonNullTrackSeq; + if (!nonNullTrackSeq.SetLength(tracks.Length(), fallible)) { + MOZ_ASSERT(false); + aRv.Throw(NS_ERROR_OUT_OF_MEMORY); + return nullptr; + } + + for (size_t i = 0; i < tracks.Length(); ++i) { + nonNullTrackSeq[i] = tracks[i]; + } + + return Constructor(aGlobal, nonNullTrackSeq, aRv); +} + +/* static */ +already_AddRefed DOMMediaStream::Constructor( + const GlobalObject& aGlobal, + const Sequence>& aTracks, + ErrorResult& aRv) { + nsCOMPtr ownerWindow = + do_QueryInterface(aGlobal.GetAsSupports()); + if (!ownerWindow) { + aRv.Throw(NS_ERROR_FAILURE); + return nullptr; + } + + auto newStream = MakeRefPtr(ownerWindow); + for (MediaStreamTrack& track : aTracks) { + newStream->AddTrack(track); + } + return newStream.forget(); +} + +already_AddRefed DOMMediaStream::CountUnderlyingStreams( + const GlobalObject& aGlobal, ErrorResult& aRv) { + nsCOMPtr window = + do_QueryInterface(aGlobal.GetAsSupports()); + if (!window) { + aRv.Throw(NS_ERROR_UNEXPECTED); + return nullptr; + } + + nsCOMPtr go = do_QueryInterface(aGlobal.GetAsSupports()); + if (!go) { + aRv.Throw(NS_ERROR_UNEXPECTED); + return nullptr; + } + + RefPtr p = Promise::Create(go, aRv); + if (aRv.Failed()) { + return nullptr; + } + + MediaTrackGraph* graph = MediaTrackGraph::GetInstanceIfExists( + window, MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE, + MediaTrackGraph::DEFAULT_OUTPUT_DEVICE); + if (!graph) { + p->MaybeResolve(0); + return p.forget(); + } + + auto* graphImpl = static_cast(graph); + + class Counter : public ControlMessage { + public: + Counter(MediaTrackGraphImpl* aGraph, const RefPtr& aPromise) + : ControlMessage(nullptr), mGraph(aGraph), mPromise(aPromise) { + MOZ_ASSERT(NS_IsMainThread()); + } + + void Run() override { + TRACE("DOMMediaStream::Counter") + uint32_t streams = + mGraph->mTracks.Length() + mGraph->mSuspendedTracks.Length(); + mGraph->DispatchToMainThreadStableState(NS_NewRunnableFunction( + "DOMMediaStream::CountUnderlyingStreams (stable state)", + [promise = std::move(mPromise), streams]() mutable { + NS_DispatchToMainThread(NS_NewRunnableFunction( + "DOMMediaStream::CountUnderlyingStreams", + [promise = std::move(promise), streams]() { + promise->MaybeResolve(streams); + })); + })); + } + + // mPromise can only be AddRefed/Released on main thread. + // In case of shutdown, Run() does not run, so we dispatch mPromise to be + // released on main thread here. + void RunDuringShutdown() override { + NS_ReleaseOnMainThread( + "DOMMediaStream::CountUnderlyingStreams::Counter::RunDuringShutdown", + mPromise.forget()); + } + + private: + // mGraph owns this Counter instance and decides its lifetime. + MediaTrackGraphImpl* mGraph; + RefPtr mPromise; + }; + graphImpl->AppendMessage(MakeUnique(graphImpl, p)); + + return p.forget(); +} + +void DOMMediaStream::GetId(nsAString& aID) const { aID = mID; } + +void DOMMediaStream::GetAudioTracks( + nsTArray>& aTracks) const { + for (const auto& track : mTracks) { + if (AudioStreamTrack* t = track->AsAudioStreamTrack()) { + aTracks.AppendElement(t); + } + } +} + +void DOMMediaStream::GetAudioTracks( + nsTArray>& aTracks) const { + for (const auto& track : mTracks) { + if (track->AsAudioStreamTrack()) { + aTracks.AppendElement(track); + } + } +} + +void DOMMediaStream::GetVideoTracks( + nsTArray>& aTracks) const { + for (const auto& track : mTracks) { + if (VideoStreamTrack* t = track->AsVideoStreamTrack()) { + aTracks.AppendElement(t); + } + } +} + +void DOMMediaStream::GetVideoTracks( + nsTArray>& aTracks) const { + for (const auto& track : mTracks) { + if (track->AsVideoStreamTrack()) { + aTracks.AppendElement(track); + } + } +} + +void DOMMediaStream::GetTracks( + nsTArray>& aTracks) const { + for (const auto& track : mTracks) { + aTracks.AppendElement(track); + } +} + +void DOMMediaStream::AddTrack(MediaStreamTrack& aTrack) { + LOG(LogLevel::Info, ("DOMMediaStream %p Adding track %p (from track %p)", + this, &aTrack, aTrack.GetTrack())); + + if (HasTrack(aTrack)) { + LOG(LogLevel::Debug, + ("DOMMediaStream %p already contains track %p", this, &aTrack)); + return; + } + + mTracks.AppendElement(&aTrack); + + if (!aTrack.Ended()) { + NotifyTrackAdded(&aTrack); + } +} + +void DOMMediaStream::RemoveTrack(MediaStreamTrack& aTrack) { + if (static_cast(gMediaStreamLog)->ShouldLog(LogLevel::Info)) { + if (aTrack.Ended()) { + LOG(LogLevel::Info, + ("DOMMediaStream %p Removing (ended) track %p", this, &aTrack)); + } else { + LOG(LogLevel::Info, + ("DOMMediaStream %p Removing track %p (from track %p)", this, &aTrack, + aTrack.GetTrack())); + } + } + + if (!mTracks.RemoveElement(&aTrack)) { + LOG(LogLevel::Debug, + ("DOMMediaStream %p does not contain track %p", this, &aTrack)); + return; + } + + if (!aTrack.Ended()) { + NotifyTrackRemoved(&aTrack); + } +} + +already_AddRefed DOMMediaStream::Clone() { + auto newStream = MakeRefPtr(GetOwner()); + + LOG(LogLevel::Info, + ("DOMMediaStream %p created clone %p", this, newStream.get())); + + for (const auto& track : mTracks) { + LOG(LogLevel::Debug, + ("DOMMediaStream %p forwarding external track %p to clone %p", this, + track.get(), newStream.get())); + RefPtr clone = track->Clone(); + newStream->AddTrack(*clone); + } + + return newStream.forget(); +} + +bool DOMMediaStream::Active() const { return mActive; } +bool DOMMediaStream::Audible() const { return mAudible; } + +MediaStreamTrack* DOMMediaStream::GetTrackById(const nsAString& aId) const { + for (const auto& track : mTracks) { + nsString id; + track->GetId(id); + if (id == aId) { + return track; + } + } + return nullptr; +} + +bool DOMMediaStream::HasTrack(const MediaStreamTrack& aTrack) const { + return mTracks.Contains(&aTrack); +} + +void DOMMediaStream::AddTrackInternal(MediaStreamTrack* aTrack) { + LOG(LogLevel::Debug, + ("DOMMediaStream %p Adding owned track %p", this, aTrack)); + AddTrack(*aTrack); + DispatchTrackEvent(u"addtrack"_ns, aTrack); +} + +void DOMMediaStream::RemoveTrackInternal(MediaStreamTrack* aTrack) { + LOG(LogLevel::Debug, + ("DOMMediaStream %p Removing owned track %p", this, aTrack)); + if (!HasTrack(*aTrack)) { + return; + } + RemoveTrack(*aTrack); + DispatchTrackEvent(u"removetrack"_ns, aTrack); +} + +already_AddRefed DOMMediaStream::GetPrincipal() { + if (!GetOwner()) { + return nullptr; + } + nsCOMPtr principal = + nsGlobalWindowInner::Cast(GetOwner())->GetPrincipal(); + for (const auto& t : mTracks) { + if (t->Ended()) { + continue; + } + nsContentUtils::CombineResourcePrincipals(&principal, t->GetPrincipal()); + } + return principal.forget(); +} + +void DOMMediaStream::NotifyActive() { + LOG(LogLevel::Info, ("DOMMediaStream %p NotifyActive(). ", this)); + + MOZ_ASSERT(mActive); + for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) { + mTrackListeners[i]->NotifyActive(); + } +} + +void DOMMediaStream::NotifyInactive() { + LOG(LogLevel::Info, ("DOMMediaStream %p NotifyInactive(). ", this)); + + MOZ_ASSERT(!mActive); + for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) { + mTrackListeners[i]->NotifyInactive(); + } +} + +void DOMMediaStream::NotifyAudible() { + LOG(LogLevel::Info, ("DOMMediaStream %p NotifyAudible(). ", this)); + + MOZ_ASSERT(mAudible); + for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) { + mTrackListeners[i]->NotifyAudible(); + } +} + +void DOMMediaStream::NotifyInaudible() { + LOG(LogLevel::Info, ("DOMMediaStream %p NotifyInaudible(). ", this)); + + MOZ_ASSERT(!mAudible); + for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) { + mTrackListeners[i]->NotifyInaudible(); + } +} + +void DOMMediaStream::RegisterTrackListener(TrackListener* aListener) { + MOZ_ASSERT(NS_IsMainThread()); + + mTrackListeners.AppendElement(aListener); +} + +void DOMMediaStream::UnregisterTrackListener(TrackListener* aListener) { + MOZ_ASSERT(NS_IsMainThread()); + mTrackListeners.RemoveElement(aListener); +} + +void DOMMediaStream::NotifyTrackAdded(const RefPtr& aTrack) { + MOZ_ASSERT(NS_IsMainThread()); + + aTrack->AddConsumer(mPlaybackTrackListener); + + for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) { + mTrackListeners[i]->NotifyTrackAdded(aTrack); + } + + if (!mActive) { + // Check if we became active. + if (ContainsLiveTracks(mTracks)) { + mActive = true; + NotifyActive(); + } + } + + if (!mAudible) { + // Check if we became audible. + if (ContainsLiveAudioTracks(mTracks)) { + mAudible = true; + NotifyAudible(); + } + } +} + +void DOMMediaStream::NotifyTrackRemoved( + const RefPtr& aTrack) { + MOZ_ASSERT(NS_IsMainThread()); + + if (aTrack) { + // aTrack may be null to allow HTMLMediaElement::MozCaptureStream streams + // to be played until the source media element has ended. The source media + // element will then call NotifyTrackRemoved(nullptr) to signal that we can + // go inactive, regardless of the timing of the last track ending. + + aTrack->RemoveConsumer(mPlaybackTrackListener); + + for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) { + mTrackListeners[i]->NotifyTrackRemoved(aTrack); + } + + if (!mActive) { + NS_ASSERTION(false, "Shouldn't remove a live track if already inactive"); + return; + } + } + + if (mAudible) { + // Check if we became inaudible. + if (!ContainsLiveAudioTracks(mTracks)) { + mAudible = false; + NotifyInaudible(); + } + } + + // Check if we became inactive. + if (!ContainsLiveTracks(mTracks)) { + mActive = false; + NotifyInactive(); + } +} + +nsresult DOMMediaStream::DispatchTrackEvent( + const nsAString& aName, const RefPtr& aTrack) { + MediaStreamTrackEventInit init; + init.mTrack = aTrack; + + RefPtr event = + MediaStreamTrackEvent::Constructor(this, aName, init); + + return DispatchTrustedEvent(event); +} diff --git a/dom/media/DOMMediaStream.h b/dom/media/DOMMediaStream.h new file mode 100644 index 0000000000..b0a9f895bb --- /dev/null +++ b/dom/media/DOMMediaStream.h @@ -0,0 +1,252 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef NSDOMMEDIASTREAM_H_ +#define NSDOMMEDIASTREAM_H_ + +#include "ImageContainer.h" + +#include "nsCycleCollectionParticipant.h" +#include "nsWrapperCache.h" +#include "nsIPrincipal.h" +#include "MediaTrackConstraints.h" +#include "mozilla/DOMEventTargetHelper.h" +#include "mozilla/RelativeTimeline.h" +#include "mozilla/WeakPtr.h" + +namespace mozilla { + +class AbstractThread; +class DOMMediaStream; + +enum class BlockingMode; + +namespace dom { +class HTMLCanvasElement; +class MediaStreamTrack; +class MediaStreamTrackSource; +class AudioStreamTrack; +class VideoStreamTrack; +} // namespace dom + +namespace layers { +class ImageContainer; +class OverlayImage; +} // namespace layers + +#define NS_DOMMEDIASTREAM_IID \ + { \ + 0x8cb65468, 0x66c0, 0x444e, { \ + 0x89, 0x9f, 0x89, 0x1d, 0x9e, 0xd2, 0xbe, 0x7c \ + } \ + } + +/** + * DOMMediaStream is the implementation of the js-exposed MediaStream interface. + * + * This is a thin main-thread class grouping MediaStreamTracks together. + */ +class DOMMediaStream : public DOMEventTargetHelper, + public RelativeTimeline, + public SupportsWeakPtr { + typedef dom::MediaStreamTrack MediaStreamTrack; + typedef dom::AudioStreamTrack AudioStreamTrack; + typedef dom::VideoStreamTrack VideoStreamTrack; + typedef dom::MediaStreamTrackSource MediaStreamTrackSource; + + public: + typedef dom::MediaTrackConstraints MediaTrackConstraints; + + class TrackListener { + public: + virtual ~TrackListener() = default; + + /** + * Called when the DOMMediaStream has a live track added, either by + * script (addTrack()) or the source creating one. + */ + virtual void NotifyTrackAdded(const RefPtr& aTrack){}; + + /** + * Called when the DOMMediaStream removes a live track from playback, either + * by script (removeTrack(), track.stop()) or the source ending it. + */ + virtual void NotifyTrackRemoved(const RefPtr& aTrack){}; + + /** + * Called when the DOMMediaStream has become active. + */ + virtual void NotifyActive(){}; + + /** + * Called when the DOMMediaStream has become inactive. + */ + virtual void NotifyInactive(){}; + + /** + * Called when the DOMMediaStream has become audible. + */ + virtual void NotifyAudible(){}; + + /** + * Called when the DOMMediaStream has become inaudible. + */ + virtual void NotifyInaudible(){}; + }; + + explicit DOMMediaStream(nsPIDOMWindowInner* aWindow); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DOMMediaStream, DOMEventTargetHelper) + NS_DECLARE_STATIC_IID_ACCESSOR(NS_DOMMEDIASTREAM_IID) + + virtual JSObject* WrapObject(JSContext* aCx, + JS::Handle aGivenProto) override; + + // WebIDL + + static already_AddRefed Constructor( + const dom::GlobalObject& aGlobal, ErrorResult& aRv); + + static already_AddRefed Constructor( + const dom::GlobalObject& aGlobal, const DOMMediaStream& aStream, + ErrorResult& aRv); + + static already_AddRefed Constructor( + const dom::GlobalObject& aGlobal, + const dom::Sequence>& aTracks, + ErrorResult& aRv); + + static already_AddRefed CountUnderlyingStreams( + const dom::GlobalObject& aGlobal, ErrorResult& aRv); + + void GetId(nsAString& aID) const; + + void GetAudioTracks(nsTArray>& aTracks) const; + void GetAudioTracks(nsTArray>& aTracks) const; + void GetVideoTracks(nsTArray>& aTracks) const; + void GetVideoTracks(nsTArray>& aTracks) const; + void GetTracks(nsTArray>& aTracks) const; + MediaStreamTrack* GetTrackById(const nsAString& aId) const; + void AddTrack(MediaStreamTrack& aTrack); + void RemoveTrack(MediaStreamTrack& aTrack); + already_AddRefed Clone(); + + bool Active() const; + + IMPL_EVENT_HANDLER(addtrack) + IMPL_EVENT_HANDLER(removetrack) + + // NON-WebIDL + + // Returns true if this stream contains a live audio track. + bool Audible() const; + + /** + * Returns true if this DOMMediaStream has aTrack in mTracks. + */ + bool HasTrack(const MediaStreamTrack& aTrack) const; + + /** + * Returns a principal indicating who may access this stream. The stream + * contents can only be accessed by principals subsuming this principal. + */ + already_AddRefed GetPrincipal(); + + // Webrtc allows the remote side to name a stream whatever it wants, and we + // need to surface this to content. + void AssignId(const nsAString& aID) { mID = aID; } + + /** + * Adds a MediaStreamTrack to mTracks and raises "addtrack". + * + * Note that "addtrack" is raised synchronously and only has an effect if + * this MediaStream is already exposed to script. For spec compliance this is + * to be called from an async task. + */ + void AddTrackInternal(MediaStreamTrack* aTrack); + + /** + * Removes a MediaStreamTrack from mTracks and fires "removetrack" if it + * was removed. + * + * Note that "removetrack" is raised synchronously and only has an effect if + * this MediaStream is already exposed to script. For spec compliance this is + * to be called from an async task. + */ + void RemoveTrackInternal(MediaStreamTrack* aTrack); + + /** + * Add an nsISupports object that this stream will keep alive as long as + * the stream itself is alive. + */ + void AddConsumerToKeepAlive(nsISupports* aConsumer) { + mConsumersToKeepAlive.AppendElement(aConsumer); + } + + // Registers a track listener to this MediaStream, for listening to changes + // to our track set. The caller must call UnregisterTrackListener before + // being destroyed, so we don't hold on to a dead pointer. Main thread only. + void RegisterTrackListener(TrackListener* aListener); + + // Unregisters a track listener from this MediaStream. The caller must call + // UnregisterTrackListener before being destroyed, so we don't hold on to + // a dead pointer. Main thread only. + void UnregisterTrackListener(TrackListener* aListener); + + protected: + virtual ~DOMMediaStream(); + + void Destroy(); + + // Dispatches NotifyActive() to all registered track listeners. + void NotifyActive(); + + // Dispatches NotifyInactive() to all registered track listeners. + void NotifyInactive(); + + // Dispatches NotifyAudible() to all registered track listeners. + void NotifyAudible(); + + // Dispatches NotifyInaudible() to all registered track listeners. + void NotifyInaudible(); + + // Dispatches NotifyTrackAdded() to all registered track listeners. + void NotifyTrackAdded(const RefPtr& aTrack); + + // Dispatches NotifyTrackRemoved() to all registered track listeners. + void NotifyTrackRemoved(const RefPtr& aTrack); + + // Dispatches "addtrack" or "removetrack". + nsresult DispatchTrackEvent(const nsAString& aName, + const RefPtr& aTrack); + + // MediaStreamTracks contained by this DOMMediaStream. + nsTArray> mTracks; + + // Listener tracking when live MediaStreamTracks in mTracks end. + class PlaybackTrackListener; + RefPtr mPlaybackTrackListener; + + nsString mID; + + // Keep these alive while the stream is alive. + nsTArray> mConsumersToKeepAlive; + + // The track listeners subscribe to changes in this stream's track set. + nsTArray mTrackListeners; + + // True if this stream has live tracks. + bool mActive = false; + + // True if this stream has live audio tracks. + bool mAudible = false; +}; + +NS_DEFINE_STATIC_IID_ACCESSOR(DOMMediaStream, NS_DOMMEDIASTREAM_IID) + +} // namespace mozilla + +#endif /* NSDOMMEDIASTREAM_H_ */ diff --git a/dom/media/DecoderTraits.cpp b/dom/media/DecoderTraits.cpp new file mode 100644 index 0000000000..af4d08ae4b --- /dev/null +++ b/dom/media/DecoderTraits.cpp @@ -0,0 +1,309 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "DecoderTraits.h" +#include "MediaContainerType.h" +#include "mozilla/Preferences.h" + +#include "OggDecoder.h" +#include "OggDemuxer.h" + +#include "WebMDecoder.h" +#include "WebMDemuxer.h" + +#ifdef MOZ_ANDROID_HLS_SUPPORT +# include "HLSDecoder.h" +#endif +#include "MP4Decoder.h" +#include "MP4Demuxer.h" +#include "MediaFormatReader.h" + +#include "MP3Decoder.h" +#include "MP3Demuxer.h" + +#include "WaveDecoder.h" +#include "WaveDemuxer.h" + +#include "ADTSDecoder.h" +#include "ADTSDemuxer.h" + +#include "FlacDecoder.h" +#include "FlacDemuxer.h" + +namespace mozilla { + +/* static */ +bool DecoderTraits::IsHttpLiveStreamingType(const MediaContainerType& aType) { + const auto& mimeType = aType.Type(); + return // For m3u8. + // https://tools.ietf.org/html/draft-pantos-http-live-streaming-19#section-10 + mimeType == MEDIAMIMETYPE("application/vnd.apple.mpegurl") || + // Some sites serve these as the informal m3u type. + mimeType == MEDIAMIMETYPE("application/x-mpegurl") || + mimeType == MEDIAMIMETYPE("audio/mpegurl") || + mimeType == MEDIAMIMETYPE("audio/x-mpegurl"); +} + +/* static */ +bool DecoderTraits::IsMatroskaType(const MediaContainerType& aType) { + const auto& mimeType = aType.Type(); + // https://matroska.org/technical/specs/notes.html + return mimeType == MEDIAMIMETYPE("audio/x-matroska") || + mimeType == MEDIAMIMETYPE("video/x-matroska"); +} + +static CanPlayStatus CanHandleCodecsType( + const MediaContainerType& aType, DecoderDoctorDiagnostics* aDiagnostics) { + // We should have been given a codecs string, though it may be empty. + MOZ_ASSERT(aType.ExtendedType().HaveCodecs()); + + // Container type with the MIME type, no codecs. + const MediaContainerType mimeType(aType.Type()); + + if (OggDecoder::IsSupportedType(mimeType)) { + if (OggDecoder::IsSupportedType(aType)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, + // ogg is supported and working: the codec must be invalid. + return CANPLAY_NO; + } + if (WaveDecoder::IsSupportedType(MediaContainerType(mimeType))) { + if (WaveDecoder::IsSupportedType(aType)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, wave + // is supported and working: the codec must be invalid or not supported. + return CANPLAY_NO; + } + if (WebMDecoder::IsSupportedType(mimeType)) { + if (WebMDecoder::IsSupportedType(aType)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, + // webm is supported and working: the codec must be invalid. + return CANPLAY_NO; + } + if (MP4Decoder::IsSupportedType(mimeType, + /* DecoderDoctorDiagnostics* */ nullptr)) { + if (MP4Decoder::IsSupportedType(aType, aDiagnostics)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, + // fmp4 is supported and working: the codec must be invalid. + return CANPLAY_NO; + } + if (MP3Decoder::IsSupportedType(mimeType)) { + if (MP3Decoder::IsSupportedType(aType)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, + // mp3 is supported and working: the codec must be invalid. + return CANPLAY_NO; + } + if (ADTSDecoder::IsSupportedType(mimeType)) { + if (ADTSDecoder::IsSupportedType(aType)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, + // adts is supported and working: the codec must be invalid. + return CANPLAY_NO; + } + if (FlacDecoder::IsSupportedType(mimeType)) { + if (FlacDecoder::IsSupportedType(aType)) { + return CANPLAY_YES; + } + // We can only reach this position if a particular codec was requested, + // flac is supported and working: the codec must be invalid. + return CANPLAY_NO; + } + + return CANPLAY_MAYBE; +} + +static CanPlayStatus CanHandleMediaType( + const MediaContainerType& aType, DecoderDoctorDiagnostics* aDiagnostics) { + if (DecoderTraits::IsHttpLiveStreamingType(aType)) { + Telemetry::Accumulate(Telemetry::MEDIA_HLS_CANPLAY_REQUESTED, true); + } +#ifdef MOZ_ANDROID_HLS_SUPPORT + if (HLSDecoder::IsSupportedType(aType)) { + Telemetry::Accumulate(Telemetry::MEDIA_HLS_CANPLAY_SUPPORTED, true); + return CANPLAY_MAYBE; + } +#endif + + if (DecoderTraits::IsMatroskaType(aType)) { + Telemetry::Accumulate(Telemetry::MEDIA_MKV_CANPLAY_REQUESTED, true); + } + + if (aType.ExtendedType().HaveCodecs()) { + CanPlayStatus result = CanHandleCodecsType(aType, aDiagnostics); + if (result == CANPLAY_NO || result == CANPLAY_YES) { + return result; + } + } + + // Container type with just the MIME type/subtype, no codecs. + const MediaContainerType mimeType(aType.Type()); + + if (OggDecoder::IsSupportedType(mimeType)) { + return CANPLAY_MAYBE; + } + if (WaveDecoder::IsSupportedType(mimeType)) { + return CANPLAY_MAYBE; + } + if (MP4Decoder::IsSupportedType(mimeType, aDiagnostics)) { + return CANPLAY_MAYBE; + } + if (WebMDecoder::IsSupportedType(mimeType)) { + return CANPLAY_MAYBE; + } + if (MP3Decoder::IsSupportedType(mimeType)) { + return CANPLAY_MAYBE; + } + if (ADTSDecoder::IsSupportedType(mimeType)) { + return CANPLAY_MAYBE; + } + if (FlacDecoder::IsSupportedType(mimeType)) { + return CANPLAY_MAYBE; + } + return CANPLAY_NO; +} + +/* static */ +CanPlayStatus DecoderTraits::CanHandleContainerType( + const MediaContainerType& aContainerType, + DecoderDoctorDiagnostics* aDiagnostics) { + return CanHandleMediaType(aContainerType, aDiagnostics); +} + +/* static */ +bool DecoderTraits::ShouldHandleMediaType( + const char* aMIMEType, DecoderDoctorDiagnostics* aDiagnostics) { + Maybe containerType = MakeMediaContainerType(aMIMEType); + if (!containerType) { + return false; + } + + if (WaveDecoder::IsSupportedType(*containerType)) { + // We should not return true for Wave types, since there are some + // Wave codecs actually in use in the wild that we don't support, and + // we should allow those to be handled by plugins or helper apps. + // Furthermore people can play Wave files on most platforms by other + // means. + return false; + } + + return CanHandleMediaType(*containerType, aDiagnostics) != CANPLAY_NO; +} + +/* static */ +already_AddRefed DecoderTraits::CreateDemuxer( + const MediaContainerType& aType, MediaResource* aResource) { + MOZ_ASSERT(NS_IsMainThread()); + RefPtr demuxer; + + if (MP4Decoder::IsSupportedType(aType, + /* DecoderDoctorDiagnostics* */ nullptr)) { + demuxer = new MP4Demuxer(aResource); + } else if (MP3Decoder::IsSupportedType(aType)) { + demuxer = new MP3Demuxer(aResource); + } else if (ADTSDecoder::IsSupportedType(aType)) { + demuxer = new ADTSDemuxer(aResource); + } else if (WaveDecoder::IsSupportedType(aType)) { + demuxer = new WAVDemuxer(aResource); + } else if (FlacDecoder::IsSupportedType(aType)) { + demuxer = new FlacDemuxer(aResource); + } else if (OggDecoder::IsSupportedType(aType)) { + demuxer = new OggDemuxer(aResource); + } else if (WebMDecoder::IsSupportedType(aType)) { + demuxer = new WebMDemuxer(aResource); + } + + return demuxer.forget(); +} + +/* static */ +MediaFormatReader* DecoderTraits::CreateReader(const MediaContainerType& aType, + MediaFormatReaderInit& aInit) { + MOZ_ASSERT(NS_IsMainThread()); + + RefPtr demuxer = CreateDemuxer(aType, aInit.mResource); + if (!demuxer) { + return nullptr; + } + + MediaFormatReader* decoderReader = new MediaFormatReader(aInit, demuxer); + + if (OggDecoder::IsSupportedType(aType)) { + static_cast(demuxer.get()) + ->SetChainingEvents(&decoderReader->TimedMetadataProducer(), + &decoderReader->MediaNotSeekableProducer()); + } + + return decoderReader; +} + +/* static */ +bool DecoderTraits::IsSupportedInVideoDocument(const nsACString& aType) { + // Forbid playing media in video documents if the user has opted + // not to, using either the legacy WMF specific pref, or the newer + // catch-all pref. + if (!Preferences::GetBool("media.wmf.play-stand-alone", true) || + !Preferences::GetBool("media.play-stand-alone", true)) { + return false; + } + + Maybe type = MakeMediaContainerType(aType); + if (!type) { + return false; + } + + return OggDecoder::IsSupportedType(*type) || + WebMDecoder::IsSupportedType(*type) || + MP4Decoder::IsSupportedType(*type, + /* DecoderDoctorDiagnostics* */ nullptr) || + MP3Decoder::IsSupportedType(*type) || + ADTSDecoder::IsSupportedType(*type) || + FlacDecoder::IsSupportedType(*type) || +#ifdef MOZ_ANDROID_HLS_SUPPORT + HLSDecoder::IsSupportedType(*type) || +#endif + false; +} + +/* static */ +nsTArray> DecoderTraits::GetTracksInfo( + const MediaContainerType& aType) { + // Container type with just the MIME type/subtype, no codecs. + const MediaContainerType mimeType(aType.Type()); + + if (OggDecoder::IsSupportedType(mimeType)) { + return OggDecoder::GetTracksInfo(aType); + } + if (WaveDecoder::IsSupportedType(mimeType)) { + return WaveDecoder::GetTracksInfo(aType); + } + if (MP4Decoder::IsSupportedType(mimeType, nullptr)) { + return MP4Decoder::GetTracksInfo(aType); + } + if (WebMDecoder::IsSupportedType(mimeType)) { + return WebMDecoder::GetTracksInfo(aType); + } + if (MP3Decoder::IsSupportedType(mimeType)) { + return MP3Decoder::GetTracksInfo(aType); + } + if (ADTSDecoder::IsSupportedType(mimeType)) { + return ADTSDecoder::GetTracksInfo(aType); + } + if (FlacDecoder::IsSupportedType(mimeType)) { + return FlacDecoder::GetTracksInfo(aType); + } + return nsTArray>(); +} + +} // namespace mozilla diff --git a/dom/media/DecoderTraits.h b/dom/media/DecoderTraits.h new file mode 100644 index 0000000000..6416c13d80 --- /dev/null +++ b/dom/media/DecoderTraits.h @@ -0,0 +1,68 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DecoderTraits_h_ +#define DecoderTraits_h_ + +#include "mozilla/UniquePtr.h" +#include "nsStringFwd.h" +#include "nsTArray.h" + +namespace mozilla { + +class DecoderDoctorDiagnostics; +class MediaContainerType; +class MediaDataDemuxer; +struct MediaFormatReaderInit; +class MediaFormatReader; +class MediaResource; +class TrackInfo; + +enum CanPlayStatus { CANPLAY_NO, CANPLAY_MAYBE, CANPLAY_YES }; + +class DecoderTraits { + public: + // Returns the CanPlayStatus indicating if we can handle this container type. + static CanPlayStatus CanHandleContainerType( + const MediaContainerType& aContainerType, + DecoderDoctorDiagnostics* aDiagnostics); + + // Returns true if we should handle this MIME type when it appears + // as an or as a toplevel page. If, in practice, our support + // for the type is more limited than appears in the wild, we should return + // false here even if CanHandleMediaType would return true. + static bool ShouldHandleMediaType(const char* aMIMEType, + DecoderDoctorDiagnostics* aDiagnostics); + + // Create a demuxer for the given MIME type aType. Returns null if we + // were unable to create the demuxer. + static already_AddRefed CreateDemuxer( + const MediaContainerType& aType, MediaResource* aResource); + + // Create a reader for thew given MIME type aType. Returns null + // if we were unable to create the reader. + static MediaFormatReader* CreateReader(const MediaContainerType& aType, + MediaFormatReaderInit& aInit); + + // Returns true if MIME type aType is supported in video documents, + // or false otherwise. Not all platforms support all MIME types, and + // vice versa. + static bool IsSupportedInVideoDocument(const nsACString& aType); + + // Returns true if aType is MIME type of hls. + static bool IsHttpLiveStreamingType(const MediaContainerType& aType); + + // Returns true if aType is matroska type. + static bool IsMatroskaType(const MediaContainerType& aType); + + // Returns an array of all TrackInfo objects described by this type. + static nsTArray> GetTracksInfo( + const MediaContainerType& aType); +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/DeviceInputTrack.cpp b/dom/media/DeviceInputTrack.cpp new file mode 100644 index 0000000000..87d1ae73ab --- /dev/null +++ b/dom/media/DeviceInputTrack.cpp @@ -0,0 +1,639 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "DeviceInputTrack.h" + +#include "Tracing.h" + +namespace mozilla { + +#ifdef LOG_INTERNAL +# undef LOG_INTERNAL +#endif // LOG_INTERNAL +#define LOG_INTERNAL(level, msg, ...) \ + MOZ_LOG(gMediaTrackGraphLog, LogLevel::level, (msg, ##__VA_ARGS__)) + +#ifdef LOG +# undef LOG +#endif // LOG +#define LOG(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__) + +#ifdef LOGE +# undef LOGE +#endif // LOGE +#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__) + +// This can only be called in graph thread since mGraph->CurrentDriver() is +// graph thread only +#ifdef TRACK_GRAPH_LOG_INTERNAL +# undef TRACK_GRAPH_LOG_INTERNAL +#endif // TRACK_GRAPH_LOG_INTERNAL +#define TRACK_GRAPH_LOG_INTERNAL(level, msg, ...) \ + LOG_INTERNAL(level, "(Graph %p, Driver %p) DeviceInputTrack %p, " msg, \ + this->mGraph, this->mGraph->CurrentDriver(), this, \ + ##__VA_ARGS__) + +#ifdef TRACK_GRAPH_LOG +# undef TRACK_GRAPH_LOG +#endif // TRACK_GRAPH_LOG +#define TRACK_GRAPH_LOG(msg, ...) \ + TRACK_GRAPH_LOG_INTERNAL(Debug, msg, ##__VA_ARGS__) + +#ifdef TRACK_GRAPH_LOGV +# undef TRACK_GRAPH_LOGV +#endif // TRACK_GRAPH_LOGV +#define TRACK_GRAPH_LOGV(msg, ...) \ + TRACK_GRAPH_LOG_INTERNAL(Verbose, msg, ##__VA_ARGS__) + +#ifdef TRACK_GRAPH_LOGE +# undef TRACK_GRAPH_LOGE +#endif // TRACK_GRAPH_LOGE +#define TRACK_GRAPH_LOGE(msg, ...) \ + TRACK_GRAPH_LOG_INTERNAL(Error, msg, ##__VA_ARGS__) + +#ifdef CONSUMER_GRAPH_LOG_INTERNAL +# undef CONSUMER_GRAPH_LOG_INTERNAL +#endif // CONSUMER_GRAPH_LOG_INTERNAL +#define CONSUMER_GRAPH_LOG_INTERNAL(level, msg, ...) \ + LOG_INTERNAL( \ + level, "(Graph %p, Driver %p) DeviceInputConsumerTrack %p, " msg, \ + this->mGraph, this->mGraph->CurrentDriver(), this, ##__VA_ARGS__) + +#ifdef CONSUMER_GRAPH_LOGV +# undef CONSUMER_GRAPH_LOGV +#endif // CONSUMER_GRAPH_LOGV +#define CONSUMER_GRAPH_LOGV(msg, ...) \ + CONSUMER_GRAPH_LOG_INTERNAL(Verbose, msg, ##__VA_ARGS__) + +DeviceInputConsumerTrack::DeviceInputConsumerTrack(TrackRate aSampleRate) + : ProcessedMediaTrack(aSampleRate, MediaSegment::AUDIO, + new AudioSegment()) {} + +void DeviceInputConsumerTrack::ConnectDeviceInput( + CubebUtils::AudioDeviceID aId, AudioDataListener* aListener, + const PrincipalHandle& aPrincipal) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(Graph()); + MOZ_ASSERT(aListener); + MOZ_ASSERT(!mListener); + MOZ_ASSERT(!mDeviceInputTrack); + MOZ_ASSERT(mDeviceId.isNothing()); + MOZ_ASSERT(!mDeviceInputTrack, + "Must disconnect a device input before connecting a new one"); + + mListener = aListener; + mDeviceId.emplace(aId); + + mDeviceInputTrack = + DeviceInputTrack::OpenAudio(Graph(), aId, aPrincipal, this); + LOG("Open device %p (DeviceInputTrack %p) for consumer %p", aId, + mDeviceInputTrack.get(), this); + mPort = AllocateInputPort(mDeviceInputTrack.get()); +} + +void DeviceInputConsumerTrack::DisconnectDeviceInput() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(Graph()); + + if (!mListener) { + MOZ_ASSERT(mDeviceId.isNothing()); + MOZ_ASSERT(!mDeviceInputTrack); + return; + } + + MOZ_ASSERT(mPort); + MOZ_ASSERT(mDeviceInputTrack); + MOZ_ASSERT(mDeviceId.isSome()); + + LOG("Close device %p (DeviceInputTrack %p) for consumer %p ", *mDeviceId, + mDeviceInputTrack.get(), this); + mPort->Destroy(); + DeviceInputTrack::CloseAudio(mDeviceInputTrack.forget(), this); + mListener = nullptr; + mDeviceId = Nothing(); +} + +Maybe DeviceInputConsumerTrack::DeviceId() const { + MOZ_ASSERT(NS_IsMainThread()); + return mDeviceId; +} + +NotNull DeviceInputConsumerTrack::GetAudioDataListener() + const { + MOZ_ASSERT(NS_IsMainThread()); + return WrapNotNull(mListener.get()); +} + +bool DeviceInputConsumerTrack::ConnectToNativeDevice() const { + MOZ_ASSERT(NS_IsMainThread()); + return mDeviceInputTrack && mDeviceInputTrack->AsNativeInputTrack(); +} + +bool DeviceInputConsumerTrack::ConnectToNonNativeDevice() const { + MOZ_ASSERT(NS_IsMainThread()); + return mDeviceInputTrack && mDeviceInputTrack->AsNonNativeInputTrack(); +} + +void DeviceInputConsumerTrack::GetInputSourceData(AudioSegment& aOutput, + const MediaInputPort* aPort, + GraphTime aFrom, + GraphTime aTo) const { + AssertOnGraphThread(); + MOZ_ASSERT(aOutput.IsEmpty()); + + MediaTrack* source = aPort->GetSource(); + GraphTime next; + for (GraphTime t = aFrom; t < aTo; t = next) { + MediaInputPort::InputInterval interval = + MediaInputPort::GetNextInputInterval(aPort, t); + interval.mEnd = std::min(interval.mEnd, aTo); + + const bool inputEnded = + source->Ended() && + source->GetEnd() <= + source->GraphTimeToTrackTimeWithBlocking(interval.mStart); + + TrackTime ticks = interval.mEnd - interval.mStart; + next = interval.mEnd; + + if (interval.mStart >= interval.mEnd) { + break; + } + + if (inputEnded) { + aOutput.AppendNullData(ticks); + CONSUMER_GRAPH_LOGV( + "Getting %" PRId64 + " ticks of null data from input port source (ended input)", + ticks); + } else if (interval.mInputIsBlocked) { + aOutput.AppendNullData(ticks); + CONSUMER_GRAPH_LOGV( + "Getting %" PRId64 + " ticks of null data from input port source (blocked input)", + ticks); + } else if (source->IsSuspended()) { + aOutput.AppendNullData(ticks); + CONSUMER_GRAPH_LOGV( + "Getting %" PRId64 + " ticks of null data from input port source (source is suspended)", + ticks); + } else { + TrackTime start = + source->GraphTimeToTrackTimeWithBlocking(interval.mStart); + TrackTime end = source->GraphTimeToTrackTimeWithBlocking(interval.mEnd); + MOZ_ASSERT(source->GetData()->GetDuration() >= end); + aOutput.AppendSlice(*source->GetData(), start, end); + CONSUMER_GRAPH_LOGV("Getting %" PRId64 + " ticks of real data from input port source %p", + end - start, source); + } + } +} + +/* static */ +NotNull> DeviceInputTrack::OpenAudio( + MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle, + DeviceInputConsumerTrack* aConsumer) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aConsumer); + MOZ_ASSERT(aGraph == aConsumer->Graph()); + + RefPtr track = + aGraph->GetDeviceInputTrackMainThread(aDeviceId); + if (track) { + MOZ_ASSERT(!track->mConsumerTracks.IsEmpty()); + track->AddDataListener(aConsumer->GetAudioDataListener()); + } else { + // Create a NativeInputTrack or NonNativeInputTrack, depending on whether + // the given graph already has a native device or not. + if (aGraph->GetNativeInputTrackMainThread()) { + // A native device is already in use. This device will be a non-native + // device. + track = new NonNativeInputTrack(aGraph->GraphRate(), aDeviceId, + aPrincipalHandle); + } else { + // No native device is in use. This device will be the native device. + track = new NativeInputTrack(aGraph->GraphRate(), aDeviceId, + aPrincipalHandle); + } + LOG("Create %sNativeInputTrack %p in MTG %p for device %p", + (track->AsNativeInputTrack() ? "" : "Non"), track.get(), aGraph, + aDeviceId); + aGraph->AddTrack(track); + // Add the listener before opening the device so the device passed to + // OpenAudioInput always has a non-zero input channel count. + track->AddDataListener(aConsumer->GetAudioDataListener()); + aGraph->OpenAudioInput(track); + } + MOZ_ASSERT(track->AsNativeInputTrack() || track->AsNonNativeInputTrack()); + MOZ_ASSERT(track->mDeviceId == aDeviceId); + + MOZ_ASSERT(!track->mConsumerTracks.Contains(aConsumer)); + track->mConsumerTracks.AppendElement(aConsumer); + + LOG("DeviceInputTrack %p (device %p: %snative) in MTG %p has %zu users now", + track.get(), track->mDeviceId, + (track->AsNativeInputTrack() ? "" : "non-"), aGraph, + track->mConsumerTracks.Length()); + if (track->mConsumerTracks.Length() > 1) { + track->ReevaluateInputDevice(); + } + + return WrapNotNull(track); +} + +/* static */ +void DeviceInputTrack::CloseAudio(already_AddRefed aTrack, + DeviceInputConsumerTrack* aConsumer) { + MOZ_ASSERT(NS_IsMainThread()); + + RefPtr track = aTrack; + MOZ_ASSERT(track); + + track->RemoveDataListener(aConsumer->GetAudioDataListener()); + DebugOnly removed = track->mConsumerTracks.RemoveElement(aConsumer); + MOZ_ASSERT(removed); + LOG("DeviceInputTrack %p (device %p) in MTG %p has %zu users now", + track.get(), track->mDeviceId, track->Graph(), + track->mConsumerTracks.Length()); + if (track->mConsumerTracks.IsEmpty()) { + track->Graph()->CloseAudioInput(track); + track->Destroy(); + } else { + track->ReevaluateInputDevice(); + } +} + +const nsTArray>& +DeviceInputTrack::GetConsumerTracks() const { + MOZ_ASSERT(NS_IsMainThread()); + return mConsumerTracks; +} + +DeviceInputTrack::DeviceInputTrack(TrackRate aSampleRate, + CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle) + : ProcessedMediaTrack(aSampleRate, MediaSegment::AUDIO, new AudioSegment()), + mDeviceId(aDeviceId), + mPrincipalHandle(aPrincipalHandle) {} + +uint32_t DeviceInputTrack::MaxRequestedInputChannels() const { + AssertOnGraphThreadOrNotRunning(); + uint32_t maxInputChannels = 0; + for (const auto& listener : mListeners) { + maxInputChannels = std::max(maxInputChannels, + listener->RequestedInputChannelCount(mGraph)); + } + return maxInputChannels; +} + +bool DeviceInputTrack::HasVoiceInput() const { + AssertOnGraphThreadOrNotRunning(); + for (const auto& listener : mListeners) { + if (listener->IsVoiceInput(mGraph)) { + return true; + } + } + return false; +} + +void DeviceInputTrack::DeviceChanged(MediaTrackGraph* aGraph) const { + AssertOnGraphThreadOrNotRunning(); + MOZ_ASSERT(aGraph == mGraph, + "Receive device changed signal from another graph"); + TRACK_GRAPH_LOG("DeviceChanged"); + for (const auto& listener : mListeners) { + listener->DeviceChanged(aGraph); + } +} + +void DeviceInputTrack::ReevaluateInputDevice() { + MOZ_ASSERT(NS_IsMainThread()); + QueueControlMessageWithNoShutdown([self = RefPtr{this}, this] { + TRACE("DeviceInputTrack::ReevaluateInputDevice ControlMessage"); + Graph()->ReevaluateInputDevice(mDeviceId); + }); +} + +void DeviceInputTrack::AddDataListener(AudioDataListener* aListener) { + MOZ_ASSERT(NS_IsMainThread()); + QueueControlMessageWithNoShutdown( + [self = RefPtr{this}, this, listener = RefPtr{aListener}] { + TRACE("DeviceInputTrack::AddDataListener ControlMessage"); + MOZ_ASSERT(!mListeners.Contains(listener.get()), + "Don't add a listener twice."); + mListeners.AppendElement(listener.get()); + }); +} + +void DeviceInputTrack::RemoveDataListener(AudioDataListener* aListener) { + MOZ_ASSERT(NS_IsMainThread()); + QueueControlMessageWithNoShutdown( + [self = RefPtr{this}, this, listener = RefPtr{aListener}] { + TRACE("DeviceInputTrack::RemoveDataListener ControlMessage"); + DebugOnly wasPresent = mListeners.RemoveElement(listener.get()); + MOZ_ASSERT(wasPresent, "Remove an unknown listener"); + listener->Disconnect(Graph()); + }); +} + +NativeInputTrack::NativeInputTrack(TrackRate aSampleRate, + CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle) + : DeviceInputTrack(aSampleRate, aDeviceId, aPrincipalHandle), + mIsBufferingAppended(false), + mInputChannels(0) {} + +void NativeInputTrack::DestroyImpl() { + AssertOnGraphThreadOrNotRunning(); + mPendingData.Clear(); + ProcessedMediaTrack::DestroyImpl(); +} + +void NativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + AssertOnGraphThread(); + TRACE_COMMENT("NativeInputTrack::ProcessInput", "%p", this); + + TRACK_GRAPH_LOGV("(Native) ProcessInput from %" PRId64 " to %" PRId64 + ", needs %" PRId64 " frames", + aFrom, aTo, aTo - aFrom); + + TrackTime from = GraphTimeToTrackTime(aFrom); + TrackTime to = GraphTimeToTrackTime(aTo); + if (from >= to) { + return; + } + + MOZ_ASSERT_IF(!mIsBufferingAppended, mPendingData.IsEmpty()); + + TrackTime need = to - from; + TrackTime dataNeed = std::min(mPendingData.GetDuration(), need); + TrackTime silenceNeed = std::max(need - dataNeed, (TrackTime)0); + + // TODO (bug 1879353): Reenable assertion. + // MOZ_ASSERT_IF(dataNeed > 0, silenceNeed == 0); + + GetData()->AppendSlice(mPendingData, 0, dataNeed); + mPendingData.RemoveLeading(dataNeed); + GetData()->AppendNullData(silenceNeed); + + // TODO (bug 1879353): Remove as assertion above will hold. + if (dataNeed > 0 && silenceNeed > 0) { + NotifyInputStopped(mGraph); + } +} + +uint32_t NativeInputTrack::NumberOfChannels() const { + AssertOnGraphThreadOrNotRunning(); + return mInputChannels; +} + +void NativeInputTrack::NotifyInputStopped(MediaTrackGraph* aGraph) { + AssertOnGraphThreadOrNotRunning(); + MOZ_ASSERT(aGraph == mGraph, + "Receive input stopped signal from another graph"); + TRACK_GRAPH_LOG("(Native) NotifyInputStopped"); + mInputChannels = 0; + mIsBufferingAppended = false; + mPendingData.Clear(); +} + +void NativeInputTrack::NotifyInputData(MediaTrackGraph* aGraph, + const AudioDataValue* aBuffer, + size_t aFrames, TrackRate aRate, + uint32_t aChannels, + uint32_t aAlreadyBuffered) { + AssertOnGraphThread(); + MOZ_ASSERT(aGraph == mGraph, "Receive input data from another graph"); + TRACK_GRAPH_LOGV( + "NotifyInputData: frames=%zu, rate=%d, channel=%u, alreadyBuffered=%u", + aFrames, aRate, aChannels, aAlreadyBuffered); + + if (!mIsBufferingAppended) { + // First time we see live frames getting added. Use what's already buffered + // in the driver's scratch buffer as a starting point. + MOZ_ASSERT(mPendingData.IsEmpty()); + constexpr TrackTime buffering = WEBAUDIO_BLOCK_SIZE; + const TrackTime remaining = + buffering - static_cast(aAlreadyBuffered); + mPendingData.AppendNullData(remaining); + mIsBufferingAppended = true; + TRACK_GRAPH_LOG("Set mIsBufferingAppended by appending %" PRId64 " frames.", + remaining); + } + + MOZ_ASSERT(aChannels); + if (!mInputChannels) { + mInputChannels = aChannels; + } + mPendingData.AppendFromInterleavedBuffer(aBuffer, aFrames, aChannels, + mPrincipalHandle); +} + +NonNativeInputTrack::NonNativeInputTrack( + TrackRate aSampleRate, CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle) + : DeviceInputTrack(aSampleRate, aDeviceId, aPrincipalHandle), + mAudioSource(nullptr), + mSourceIdNumber(0) {} + +void NonNativeInputTrack::DestroyImpl() { + AssertOnGraphThreadOrNotRunning(); + if (mAudioSource) { + mAudioSource->Stop(); + mAudioSource = nullptr; + } + ProcessedMediaTrack::DestroyImpl(); +} + +void NonNativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + AssertOnGraphThread(); + TRACE_COMMENT("NonNativeInputTrack::ProcessInput", "%p", this); + + TRACK_GRAPH_LOGV("(NonNative) ProcessInput from %" PRId64 " to %" PRId64 + ", needs %" PRId64 " frames", + aFrom, aTo, aTo - aFrom); + + TrackTime from = GraphTimeToTrackTime(aFrom); + TrackTime to = GraphTimeToTrackTime(aTo); + if (from >= to) { + return; + } + + TrackTime delta = to - from; + if (!mAudioSource) { + GetData()->AppendNullData(delta); + return; + } + + AudioInputSource::Consumer consumer = AudioInputSource::Consumer::Same; + // GraphRunner keeps the same thread. + MOZ_ASSERT(!HasGraphThreadChanged()); + + AudioSegment data = mAudioSource->GetAudioSegment(delta, consumer); + MOZ_ASSERT(data.GetDuration() == delta); + GetData()->AppendFrom(&data); +} + +uint32_t NonNativeInputTrack::NumberOfChannels() const { + AssertOnGraphThreadOrNotRunning(); + return mAudioSource ? mAudioSource->mChannelCount : 0; +} + +void NonNativeInputTrack::StartAudio( + RefPtr&& aAudioInputSource) { + AssertOnGraphThread(); + MOZ_ASSERT(aAudioInputSource->mPrincipalHandle == mPrincipalHandle); + MOZ_ASSERT(aAudioInputSource->mDeviceId == mDeviceId); + + TRACK_GRAPH_LOG("StartAudio with source %p", aAudioInputSource.get()); +#ifdef DEBUG + mGraphThreadId = std::this_thread::get_id(); +#endif + mAudioSource = std::move(aAudioInputSource); + mAudioSource->Start(); +} + +void NonNativeInputTrack::StopAudio() { + AssertOnGraphThread(); + + TRACK_GRAPH_LOG("StopAudio from source %p", mAudioSource.get()); + if (!mAudioSource) { + return; + } + mAudioSource->Stop(); + mAudioSource = nullptr; +#ifdef DEBUG + mGraphThreadId = std::thread::id(); +#endif +} + +AudioInputType NonNativeInputTrack::DevicePreference() const { + AssertOnGraphThreadOrNotRunning(); + return mAudioSource && mAudioSource->mIsVoice ? AudioInputType::Voice + : AudioInputType::Unknown; +} + +void NonNativeInputTrack::NotifyDeviceChanged(uint32_t aSourceId) { + AssertOnGraphThreadOrNotRunning(); + + // No need to forward the notification if the audio input has been stopped or + // restarted by it users. + if (!mAudioSource || mAudioSource->mId != aSourceId) { + TRACK_GRAPH_LOG("(NonNative) NotifyDeviceChanged: No need to forward"); + return; + } + + TRACK_GRAPH_LOG("(NonNative) NotifyDeviceChanged"); + // Forward the notification. + DeviceInputTrack::DeviceChanged(mGraph); +} + +void NonNativeInputTrack::NotifyInputStopped(uint32_t aSourceId) { + AssertOnGraphThreadOrNotRunning(); + + // No need to forward the notification if the audio input has been stopped or + // restarted by it users. + if (!mAudioSource || mAudioSource->mId != aSourceId) { + TRACK_GRAPH_LOG("(NonNative) NotifyInputStopped: No need to forward"); + return; + } + + TRACK_GRAPH_LOGE( + "(NonNative) NotifyInputStopped: audio unexpectedly stopped"); + // Destory the underlying audio stream if it's stopped unexpectedly. + mAudioSource->Stop(); +} + +AudioInputSource::Id NonNativeInputTrack::GenerateSourceId() { + AssertOnGraphThread(); + return mSourceIdNumber++; +} + +#ifdef DEBUG +bool NonNativeInputTrack::HasGraphThreadChanged() { + AssertOnGraphThread(); + + std::thread::id currentId = std::this_thread::get_id(); + if (mGraphThreadId == currentId) { + return false; + } + mGraphThreadId = currentId; + return true; +} +#endif // DEBUG + +AudioInputSourceListener::AudioInputSourceListener(NonNativeInputTrack* aOwner) + : mOwner(aOwner) {} + +void AudioInputSourceListener::AudioDeviceChanged( + AudioInputSource::Id aSourceId) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mOwner); + + if (mOwner->IsDestroyed()) { + LOG("NonNativeInputTrack %p has been destroyed. No need to forward the " + "audio device-changed notification", + mOwner.get()); + return; + } + + MOZ_DIAGNOSTIC_ASSERT(mOwner->Graph()); + mOwner->QueueControlMessageWithNoShutdown([inputTrack = mOwner, aSourceId] { + TRACE("NonNativeInputTrack::AudioDeviceChanged ControlMessage"); + inputTrack->NotifyDeviceChanged(aSourceId); + }); +} + +void AudioInputSourceListener::AudioStateCallback( + AudioInputSource::Id aSourceId, + AudioInputSource::EventListener::State aState) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mOwner); + + const char* state = + aState == AudioInputSource::EventListener::State::Started ? "started" + : aState == AudioInputSource::EventListener::State::Stopped ? "stopped" + : aState == AudioInputSource::EventListener::State::Drained ? "drained" + : "error"; + + if (mOwner->IsDestroyed()) { + LOG("NonNativeInputTrack %p has been destroyed. No need to forward the " + "audio state-changed(%s) notification", + mOwner.get(), state); + return; + } + + if (aState == AudioInputSource::EventListener::State::Started) { + LOG("We can ignore %s notification for NonNativeInputTrack %p", state, + mOwner.get()); + return; + } + + LOG("Notify audio stopped due to entering %s state", state); + + MOZ_DIAGNOSTIC_ASSERT(mOwner->Graph()); + mOwner->QueueControlMessageWithNoShutdown([inputTrack = mOwner, aSourceId] { + TRACE("NonNativeInputTrack::AudioStateCallback ControlMessage"); + inputTrack->NotifyInputStopped(aSourceId); + }); +} + +#undef LOG_INTERNAL +#undef LOG +#undef LOGE +#undef TRACK_GRAPH_LOG_INTERNAL +#undef TRACK_GRAPH_LOG +#undef TRACK_GRAPH_LOGV +#undef TRACK_GRAPH_LOGE +#undef CONSUMER_GRAPH_LOG_INTERNAL +#undef CONSUMER_GRAPH_LOGV + +} // namespace mozilla diff --git a/dom/media/DeviceInputTrack.h b/dom/media/DeviceInputTrack.h new file mode 100644 index 0000000000..6206dc0dfc --- /dev/null +++ b/dom/media/DeviceInputTrack.h @@ -0,0 +1,303 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_DEVICEINPUTTRACK_H_ +#define DOM_MEDIA_DEVICEINPUTTRACK_H_ + +#include + +#include "AudioDriftCorrection.h" +#include "AudioSegment.h" +#include "AudioInputSource.h" +#include "MediaTrackGraph.h" +#include "GraphDriver.h" +#include "mozilla/NotNull.h" + +namespace mozilla { + +class NativeInputTrack; +class NonNativeInputTrack; + +// Any MediaTrack that needs the audio data from the certain device should +// inherit the this class and get the raw audio data on graph thread via +// GetInputSourceData(), after calling ConnectDeviceInput() and before +// DisconnectDeviceInput() on main thread. See more examples in +// TestAudioTrackGraph.cpp +// +// Example: +// +// class RawAudioDataTrack : public DeviceInputConsumerTrack { +// public: +// ... +// +// void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override +// { +// if (aFrom >= aTo) { +// return; +// } +// +// if (mInputs.IsEmpty()) { +// GetData()->AppendNullData(aTo - aFrom); +// } else { +// MOZ_ASSERT(mInputs.Length() == 1); +// AudioSegment data; +// DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom, +// aTo); +// // You can do audio data processing before appending to mSegment here. +// GetData()->AppendFrom(&data); +// } +// }; +// +// uint32_t NumberOfChannels() const override { +// if (mInputs.IsEmpty()) { +// return 0; +// } +// DeviceInputTrack* t = mInputs[0]->GetSource()->AsDeviceInputTrack(); +// MOZ_ASSERT(t); +// return t->NumberOfChannels(); +// } +// +// ... +// +// private: +// explicit RawAudioDataTrack(TrackRate aSampleRate) +// : DeviceInputConsumerTrack(aSampleRate) {} +// }; +class DeviceInputConsumerTrack : public ProcessedMediaTrack { + public: + explicit DeviceInputConsumerTrack(TrackRate aSampleRate); + + // Main Thread APIs: + void ConnectDeviceInput(CubebUtils::AudioDeviceID aId, + AudioDataListener* aListener, + const PrincipalHandle& aPrincipal); + void DisconnectDeviceInput(); + Maybe DeviceId() const; + NotNull GetAudioDataListener() const; + bool ConnectToNativeDevice() const; + bool ConnectToNonNativeDevice() const; + + // Any thread: + DeviceInputConsumerTrack* AsDeviceInputConsumerTrack() override { + return this; + } + + protected: + // Graph thread API: + // Get the data in [aFrom, aTo) from aPort->GetSource() to aOutput. aOutput + // needs to be empty. + void GetInputSourceData(AudioSegment& aOutput, const MediaInputPort* aPort, + GraphTime aFrom, GraphTime aTo) const; + + // Main Thread variables: + RefPtr mPort; + RefPtr mDeviceInputTrack; + RefPtr mListener; + Maybe mDeviceId; +}; + +class DeviceInputTrack : public ProcessedMediaTrack { + public: + // Main Thread APIs: + // Any MediaTrack that needs the audio data from the certain device should + // inherit the DeviceInputConsumerTrack class and call GetInputSourceData to + // get the data instead of using the below APIs. + // + // The following two APIs can create and destroy a DeviceInputTrack reference + // on main thread, then open and close the underlying audio device accordingly + // on the graph thread. The user who wants to read the audio input from a + // certain device should use these APIs to obtain a DeviceInputTrack reference + // and release the reference when the user no longer needs the audio data. + // + // Once the DeviceInputTrack is created on the main thread, the paired device + // will start producing data, so its users can read the data immediately on + // the graph thread, once they obtain the reference. The lifetime of + // DeviceInputTrack is managed by the MediaTrackGraph itself. When the + // DeviceInputTrack has no user any more, MediaTrackGraph will destroy it. + // This means, it occurs when the last reference has been released by the API + // below. + // + // The DeviceInputTrack is either a NativeInputTrack, or a + // NonNativeInputTrack. We can have only one NativeInputTrack per + // MediaTrackGraph, but multiple NonNativeInputTrack per graph. The audio + // device paired with the NativeInputTrack is called "native device", and the + // device paired with the NonNativeInputTrack is called "non-native device". + // In other words, we can have only one native device per MediaTrackGraph, but + // many non-native devices per graph. + // + // The native device is the first input device created in the MediaTrackGraph. + // All other devices created after it will be non-native devices. Once the + // native device is destroyed, the first non-native device will be promoted to + // the new native device. The switch will be started by the MediaTrackGraph. + // The MediaTrackGraph will force DeviceInputTrack's users to re-configure + // their DeviceInputTrack connections with the APIs below to execute the + // switching. + // + // The native device is also the audio input device serving the + // AudioCallbackDriver, which drives the MediaTrackGraph periodically from + // audio callback thread. The audio data produced by the native device and + // non-native device is stored in NativeInputTrack and NonNativeInputTrack + // respectively, and then accessed by their users. The only difference between + // these audio data is that the data from the non-native device is + // clock-drift-corrected since the non-native device may run on a different + // clock than the native device's one. + // + // Example: + // // On main thread + // RefPtr track = DeviceInputTrack::OpenAudio(...); + // ... + // // On graph thread + // AudioSegmen* data = track->GetData(); + // ... + // // On main thread + // DeviceInputTrack::CloseAudio(track.forget(), ...); + // + // Returns a reference of DeviceInputTrack, storing the input audio data from + // the given device, in the given MediaTrackGraph. The paired audio device + // will be opened accordingly. The DeviceInputTrack will access its user's + // audio settings via the attached AudioDataListener, and delivers the + // notifications when it needs. + static NotNull> OpenAudio( + MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle, + DeviceInputConsumerTrack* aConsumer); + // Destroy the DeviceInputTrack reference obtained by the above API. The + // paired audio device will be closed accordingly. + static void CloseAudio(already_AddRefed aTrack, + DeviceInputConsumerTrack* aConsumer); + + // Main thread API: + const nsTArray>& GetConsumerTracks() const; + + // Graph thread APIs: + // Query audio settings from its users. + uint32_t MaxRequestedInputChannels() const; + bool HasVoiceInput() const; + // Deliver notification to its users. + void DeviceChanged(MediaTrackGraph* aGraph) const; + + // Any thread: + DeviceInputTrack* AsDeviceInputTrack() override { return this; } + virtual NativeInputTrack* AsNativeInputTrack() { return nullptr; } + virtual NonNativeInputTrack* AsNonNativeInputTrack() { return nullptr; } + + // Any thread: + const CubebUtils::AudioDeviceID mDeviceId; + const PrincipalHandle mPrincipalHandle; + + protected: + DeviceInputTrack(TrackRate aSampleRate, CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle); + ~DeviceInputTrack() = default; + + private: + // Main thread APIs: + void ReevaluateInputDevice(); + void AddDataListener(AudioDataListener* aListener); + void RemoveDataListener(AudioDataListener* aListener); + + // Only accessed on the main thread. + // When this becomes empty, this DeviceInputTrack is no longer needed. + nsTArray> mConsumerTracks; + + // Only accessed on the graph thread. + nsTArray> mListeners; +}; + +class NativeInputTrack final : public DeviceInputTrack { + public: + // Do not call this directly. This can only be called in DeviceInputTrack or + // tests. + NativeInputTrack(TrackRate aSampleRate, CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle); + + // Graph Thread APIs, for ProcessedMediaTrack. + void DestroyImpl() override; + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + uint32_t NumberOfChannels() const override; + + // Graph thread APIs: Get input audio data and event from graph. + void NotifyInputStopped(MediaTrackGraph* aGraph); + void NotifyInputData(MediaTrackGraph* aGraph, const AudioDataValue* aBuffer, + size_t aFrames, TrackRate aRate, uint32_t aChannels, + uint32_t aAlreadyBuffered); + + // Any thread + NativeInputTrack* AsNativeInputTrack() override { return this; } + + private: + ~NativeInputTrack() = default; + + // Graph thread only members: + // Indicate whether we append extra frames in mPendingData. The extra number + // of frames is in [0, WEBAUDIO_BLOCK_SIZE] range. + bool mIsBufferingAppended = false; + // Queue the audio input data coming from NotifyInputData. + AudioSegment mPendingData; + // The input channel count for the audio data. + uint32_t mInputChannels = 0; +}; + +class NonNativeInputTrack final : public DeviceInputTrack { + public: + // Do not call this directly. This can only be called in DeviceInputTrack or + // tests. + NonNativeInputTrack(TrackRate aSampleRate, + CubebUtils::AudioDeviceID aDeviceId, + const PrincipalHandle& aPrincipalHandle); + + // Graph Thread APIs, for ProcessedMediaTrack + void DestroyImpl() override; + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + uint32_t NumberOfChannels() const override; + + // Any thread + NonNativeInputTrack* AsNonNativeInputTrack() override { return this; } + + // Graph thread APIs: + void StartAudio(RefPtr&& aAudioInputSource); + void StopAudio(); + AudioInputType DevicePreference() const; + void NotifyDeviceChanged(AudioInputSource::Id aSourceId); + void NotifyInputStopped(AudioInputSource::Id aSourceId); + AudioInputSource::Id GenerateSourceId(); + + private: + ~NonNativeInputTrack() = default; + + // Graph thread only. + RefPtr mAudioSource; + AudioInputSource::Id mSourceIdNumber; + +#ifdef DEBUG + // Graph thread only. + bool HasGraphThreadChanged(); + // Graph thread only. Identifies a thread only between StartAudio() + // and StopAudio(), to track the thread used with mAudioSource. + std::thread::id mGraphThreadId; +#endif +}; + +class AudioInputSourceListener : public AudioInputSource::EventListener { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInputSourceListener, override); + + explicit AudioInputSourceListener(NonNativeInputTrack* aOwner); + + // Main thread APIs: + void AudioDeviceChanged(AudioInputSource::Id aSourceId) override; + void AudioStateCallback( + AudioInputSource::Id aSourceId, + AudioInputSource::EventListener::State aState) override; + + private: + ~AudioInputSourceListener() = default; + const RefPtr mOwner; +}; + +} // namespace mozilla + +#endif // DOM_MEDIA_DEVICEINPUTTRACK_H_ diff --git a/dom/media/DriftCompensation.h b/dom/media/DriftCompensation.h new file mode 100644 index 0000000000..ef22f7106f --- /dev/null +++ b/dom/media/DriftCompensation.h @@ -0,0 +1,137 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#ifndef DriftCompensation_h_ +#define DriftCompensation_h_ + +#include "MediaSegment.h" +#include "VideoUtils.h" +#include "mozilla/Atomics.h" +#include "mozilla/Unused.h" + +namespace mozilla { + +static LazyLogModule gDriftCompensatorLog("DriftCompensator"); +#define LOG(type, ...) MOZ_LOG(gDriftCompensatorLog, type, (__VA_ARGS__)) + +/** + * DriftCompensator can be used to handle drift between audio and video tracks + * from the MediaTrackGraph. + * + * Drift can occur because audio is driven by a MediaTrackGraph running off an + * audio callback, thus it's progressed by the clock of one the audio output + * devices on the user's machine. Video on the other hand is always expressed in + * wall-clock TimeStamps, i.e., it's progressed by the system clock. These + * clocks will, over time, drift apart. + * + * Do not use the DriftCompensator across multiple audio tracks, as it will + * automatically record the start time of the first audio samples, and all + * samples for the same audio track on the same audio clock will have to be + * processed to retain accuracy. + * + * DriftCompensator is designed to be used from two threads: + * - The audio thread for notifications of audio samples. + * - The video thread for compensating drift of video frames to match the audio + * clock. + */ +class DriftCompensator { + const RefPtr mVideoThread; + const TrackRate mAudioRate; + + // Number of audio samples produced. Any thread. + Atomic mAudioSamples{0}; + + // Time the first audio samples were added. mVideoThread only. + TimeStamp mAudioStartTime; + + void SetAudioStartTime(TimeStamp aTime) { + MOZ_ASSERT(mVideoThread->IsOnCurrentThread()); + MOZ_ASSERT(mAudioStartTime.IsNull()); + mAudioStartTime = aTime; + } + + protected: + virtual ~DriftCompensator() = default; + + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DriftCompensator) + + DriftCompensator(RefPtr aVideoThread, TrackRate aAudioRate) + : mVideoThread(std::move(aVideoThread)), mAudioRate(aAudioRate) { + MOZ_ASSERT(mAudioRate > 0); + } + + void NotifyAudioStart(TimeStamp aStart) { + MOZ_ASSERT(mAudioSamples == 0); + LOG(LogLevel::Info, "DriftCompensator %p at rate %d started", this, + mAudioRate); + nsresult rv = mVideoThread->Dispatch(NewRunnableMethod( + "DriftCompensator::SetAudioStartTime", this, + &DriftCompensator::SetAudioStartTime, aStart)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + } + + /** + * aSamples is the number of samples fed by an AudioStream. + */ + void NotifyAudio(TrackTime aSamples) { + MOZ_ASSERT(aSamples > 0); + mAudioSamples += aSamples; + + LOG(LogLevel::Verbose, + "DriftCompensator %p Processed another %" PRId64 + " samples; now %.3fs audio", + this, aSamples, static_cast(mAudioSamples) / mAudioRate); + } + + /** + * Drift compensates a video TimeStamp based on historical audio data. + */ + virtual TimeStamp GetVideoTime(TimeStamp aNow, TimeStamp aTime) { + MOZ_ASSERT(mVideoThread->IsOnCurrentThread()); + TrackTime samples = mAudioSamples; + + if (samples / mAudioRate < 10) { + // We don't apply compensation for the first 10 seconds because of the + // higher inaccuracy during this time. + LOG(LogLevel::Debug, "DriftCompensator %p %" PRId64 "ms so far; ignoring", + this, samples * 1000 / mAudioRate); + return aTime; + } + + if (aNow == mAudioStartTime) { + LOG(LogLevel::Warning, + "DriftCompensator %p video scale 0, assuming no drift", this); + return aTime; + } + + double videoScaleUs = (aNow - mAudioStartTime).ToMicroseconds(); + double audioScaleUs = FramesToUsecs(samples, mAudioRate).value(); + double videoDurationUs = (aTime - mAudioStartTime).ToMicroseconds(); + + TimeStamp reclocked = + mAudioStartTime + TimeDuration::FromMicroseconds( + videoDurationUs * audioScaleUs / videoScaleUs); + + LOG(LogLevel::Debug, + "DriftCompensator %p GetVideoTime, v-now: %.3fs, a-now: %.3fs; %.3fs " + "-> %.3fs (d %.3fms)", + this, (aNow - mAudioStartTime).ToSeconds(), + TimeDuration::FromMicroseconds(audioScaleUs).ToSeconds(), + (aTime - mAudioStartTime).ToSeconds(), + (reclocked - mAudioStartTime).ToSeconds(), + (reclocked - aTime).ToMilliseconds()); + + return reclocked; + } +}; + +#undef LOG + +} // namespace mozilla + +#endif /* DriftCompensation_h_ */ diff --git a/dom/media/EncoderTraits.cpp b/dom/media/EncoderTraits.cpp new file mode 100644 index 0000000000..ba6d43f826 --- /dev/null +++ b/dom/media/EncoderTraits.cpp @@ -0,0 +1,17 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "PEMFactory.h" + +namespace mozilla::EncodeTraits { + +// Returns true if it is possible to encode to a particular configuration, false +// otherwise. +bool Supports(const EncoderConfig& aConfig) { + RefPtr pem = new PEMFactory(); + return pem->Supports(aConfig); +} +} diff --git a/dom/media/EncoderTraits.h b/dom/media/EncoderTraits.h new file mode 100644 index 0000000000..d96bc37e4a --- /dev/null +++ b/dom/media/EncoderTraits.h @@ -0,0 +1,23 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef EncoderTraits_h_ +#define EncoderTraits_h_ + +#include "mozilla/dom/EncoderTypes.h" +#include "PEMFactory.h" + +namespace mozilla::EncoderSupport { + +bool Supports(const RefPtr& aEncoderConfigInternal) { + RefPtr factory = new PEMFactory(); + EncoderConfig config = aEncoderConfigInternal->ToEncoderConfig(); + return factory->Supports(config); +} + +} // namespace mozilla + +#endif diff --git a/dom/media/ExternalEngineStateMachine.cpp b/dom/media/ExternalEngineStateMachine.cpp new file mode 100644 index 0000000000..68fb053b83 --- /dev/null +++ b/dom/media/ExternalEngineStateMachine.cpp @@ -0,0 +1,1277 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ExternalEngineStateMachine.h" + +#include "PerformanceRecorder.h" +#ifdef MOZ_WMF_MEDIA_ENGINE +# include "MFMediaEngineDecoderModule.h" +# include "mozilla/MFMediaEngineChild.h" +# include "mozilla/StaticPrefs_media.h" +#endif +#include "mozilla/Atomics.h" +#include "mozilla/ClearOnShutdown.h" +#include "mozilla/ProfilerLabels.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/StaticMutex.h" +#include "mozilla/glean/GleanMetrics.h" +#include "nsThreadUtils.h" +#include "VideoUtils.h" + +namespace mozilla { + +extern LazyLogModule gMediaDecoderLog; + +#define FMT(x, ...) \ + "Decoder=%p, State=%s, " x, mDecoderID, GetStateStr(), ##__VA_ARGS__ +#define LOG(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, "Decoder=%p, State=%s, " x, \ + mDecoderID, GetStateStr(), ##__VA_ARGS__) +#define LOGV(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, "Decoder=%p, State=%s, " x, \ + mDecoderID, GetStateStr(), ##__VA_ARGS__) +#define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get()) +#define LOGE(x, ...) \ + NS_DebugBreak(NS_DEBUG_WARNING, \ + nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, \ + __FILE__, __LINE__) + +const char* ExternalEngineEventToStr(ExternalEngineEvent aEvent) { +#define EVENT_TO_STR(event) \ + case ExternalEngineEvent::event: \ + return #event + switch (aEvent) { + EVENT_TO_STR(LoadedMetaData); + EVENT_TO_STR(LoadedFirstFrame); + EVENT_TO_STR(LoadedData); + EVENT_TO_STR(Waiting); + EVENT_TO_STR(Playing); + EVENT_TO_STR(Seeked); + EVENT_TO_STR(BufferingStarted); + EVENT_TO_STR(BufferingEnded); + EVENT_TO_STR(Timeupdate); + EVENT_TO_STR(Ended); + EVENT_TO_STR(RequestForAudio); + EVENT_TO_STR(RequestForVideo); + EVENT_TO_STR(AudioEnough); + EVENT_TO_STR(VideoEnough); + default: + MOZ_ASSERT_UNREACHABLE("Undefined event!"); + return "Undefined"; + } +#undef EVENT_TO_STR +} + +/** + * This class monitors the amount of crash happened for a remote engine + * process. It the amount of crash of the remote process exceeds the defined + * threshold, then `ShouldRecoverProcess()` will return false to indicate that + * we should not keep spawning that remote process because it's too easy to + * crash. + * + * In addition, we also have another mechanism in the media format reader + * (MFR) to detect crash amount of remote processes, but that would only + * happen during the decoding process. The main reason to choose using this + * simple monitor, instead of the mechanism in the MFR is because that + * mechanism can't detect every crash happening in the remote process, such as + * crash happening during initializing the remote engine, or setting the CDM + * pipepline, which can happen prior to decoding. + */ +class ProcessCrashMonitor final { + public: + static void NotifyCrash() { + StaticMutexAutoLock lock(sMutex); + auto* monitor = ProcessCrashMonitor::EnsureInstance(); + if (!monitor) { + return; + } + monitor->mCrashNums++; + } + static bool ShouldRecoverProcess() { + StaticMutexAutoLock lock(sMutex); + auto* monitor = ProcessCrashMonitor::EnsureInstance(); + if (!monitor) { + return false; + } + return monitor->mCrashNums <= monitor->mMaxCrashes; + } + + private: + ProcessCrashMonitor() : mCrashNums(0) { +#ifdef MOZ_WMF_MEDIA_ENGINE + mMaxCrashes = StaticPrefs::media_wmf_media_engine_max_crashes(); +#else + mMaxCrashes = 0; +#endif + }; + ProcessCrashMonitor(const ProcessCrashMonitor&) = delete; + ProcessCrashMonitor& operator=(const ProcessCrashMonitor&) = delete; + + static ProcessCrashMonitor* EnsureInstance() { + if (sIsShutdown) { + return nullptr; + } + if (!sCrashMonitor) { + sCrashMonitor.reset(new ProcessCrashMonitor()); + GetMainThreadSerialEventTarget()->Dispatch( + NS_NewRunnableFunction("ProcessCrashMonitor::EnsureInstance", [&] { + RunOnShutdown( + [&] { + StaticMutexAutoLock lock(sMutex); + sCrashMonitor.reset(); + sIsShutdown = true; + }, + ShutdownPhase::XPCOMShutdown); + })); + } + return sCrashMonitor.get(); + } + + static inline StaticMutex sMutex; + static inline UniquePtr sCrashMonitor; + static inline Atomic sIsShutdown{false}; + + uint32_t mCrashNums; + uint32_t mMaxCrashes; +}; + +/* static */ +const char* ExternalEngineStateMachine::StateToStr(State aNextState) { +#define STATE_TO_STR(state) \ + case State::state: \ + return #state + switch (aNextState) { + STATE_TO_STR(InitEngine); + STATE_TO_STR(ReadingMetadata); + STATE_TO_STR(RunningEngine); + STATE_TO_STR(SeekingData); + STATE_TO_STR(ShutdownEngine); + STATE_TO_STR(RecoverEngine); + default: + MOZ_ASSERT_UNREACHABLE("Undefined state!"); + return "Undefined"; + } +#undef STATE_TO_STR +} + +const char* ExternalEngineStateMachine::GetStateStr() const { + return StateToStr(mState.mName); +} + +void ExternalEngineStateMachine::ChangeStateTo(State aNextState) { + LOG("Change state : '%s' -> '%s' (play-state=%d)", StateToStr(mState.mName), + StateToStr(aNextState), mPlayState.Ref()); + // Assert the possible state transitions. + MOZ_ASSERT_IF(mState.IsInitEngine(), aNextState == State::ReadingMetadata || + aNextState == State::ShutdownEngine); + MOZ_ASSERT_IF(mState.IsReadingMetadata(), + aNextState == State::RunningEngine || + aNextState == State::ShutdownEngine); + MOZ_ASSERT_IF(mState.IsRunningEngine(), + aNextState == State::SeekingData || + aNextState == State::ShutdownEngine || + aNextState == State::RecoverEngine); + MOZ_ASSERT_IF(mState.IsSeekingData(), + aNextState == State::RunningEngine || + aNextState == State::ShutdownEngine || + aNextState == State::RecoverEngine); + MOZ_ASSERT_IF(mState.IsShutdownEngine(), aNextState == State::ShutdownEngine); + MOZ_ASSERT_IF( + mState.IsRecoverEngine(), + aNextState == State::SeekingData || aNextState == State::ShutdownEngine); + if (aNextState == State::SeekingData) { + mState = StateObject({StateObject::SeekingData()}); + } else if (aNextState == State::ReadingMetadata) { + mState = StateObject({StateObject::ReadingMetadata()}); + } else if (aNextState == State::RunningEngine) { + mState = StateObject({StateObject::RunningEngine()}); + } else if (aNextState == State::ShutdownEngine) { + mState = StateObject({StateObject::ShutdownEngine()}); + } else if (aNextState == State::RecoverEngine) { + mState = StateObject({StateObject::RecoverEngine()}); + } else { + MOZ_ASSERT_UNREACHABLE("Wrong state!"); + } +} + +ExternalEngineStateMachine::ExternalEngineStateMachine( + MediaDecoder* aDecoder, MediaFormatReader* aReader) + : MediaDecoderStateMachineBase(aDecoder, aReader) { + LOG("Created ExternalEngineStateMachine"); + MOZ_ASSERT(mState.IsInitEngine()); + InitEngine(); +} + +void ExternalEngineStateMachine::InitEngine() { + MOZ_ASSERT(mState.IsInitEngine() || mState.IsRecoverEngine()); +#ifdef MOZ_WMF_MEDIA_ENGINE + mEngine.reset(new MFMediaEngineWrapper(this, mFrameStats)); +#endif + if (mEngine) { + auto* state = mState.AsInitEngine(); + state->mInitPromise = mEngine->Init(!mMinimizePreroll); + state->mInitPromise + ->Then(OwnerThread(), __func__, this, + &ExternalEngineStateMachine::OnEngineInitSuccess, + &ExternalEngineStateMachine::OnEngineInitFailure) + ->Track(state->mEngineInitRequest); + } +} + +void ExternalEngineStateMachine::OnEngineInitSuccess() { + AssertOnTaskQueue(); + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::OnEngineInitSuccess", + MEDIA_PLAYBACK); + MOZ_ASSERT(mState.IsInitEngine() || mState.IsRecoverEngine()); + LOG("Initialized the external playback engine %" PRIu64, mEngine->Id()); + auto* state = mState.AsInitEngine(); + state->mEngineInitRequest.Complete(); + mReader->UpdateMediaEngineId(mEngine->Id()); + state->mInitPromise = nullptr; + if (mState.IsInitEngine()) { + ChangeStateTo(State::ReadingMetadata); + ReadMetadata(); + return; + } + // We just recovered from CDM process crash, so we need to update the media + // info to the new CDM process. + MOZ_ASSERT(mInfo); + mEngine->SetMediaInfo(*mInfo); + SeekTarget target(mCurrentPosition.Ref(), SeekTarget::Type::Accurate); + Seek(target); +} + +void ExternalEngineStateMachine::OnEngineInitFailure() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsInitEngine() || mState.IsRecoverEngine()); + LOGE("Failed to initialize the external playback engine"); + auto* state = mState.AsInitEngine(); + state->mEngineInitRequest.Complete(); + state->mInitPromise = nullptr; + // TODO : Should fallback to the normal playback with media engine. + ReportTelemetry(NS_ERROR_DOM_MEDIA_FATAL_ERR); + DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); +} + +void ExternalEngineStateMachine::ReadMetadata() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsReadingMetadata()); + mReader->ReadMetadata() + ->Then(OwnerThread(), __func__, this, + &ExternalEngineStateMachine::OnMetadataRead, + &ExternalEngineStateMachine::OnMetadataNotRead) + ->Track(mState.AsReadingMetadata()->mMetadataRequest); +} + +void ExternalEngineStateMachine::OnMetadataRead(MetadataHolder&& aMetadata) { + AssertOnTaskQueue(); + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::OnMetadataRead", + MEDIA_PLAYBACK); + MOZ_ASSERT(mState.IsReadingMetadata()); + LOG("OnMetadataRead"); + + mState.AsReadingMetadata()->mMetadataRequest.Complete(); + mInfo.emplace(*aMetadata.mInfo); + mMediaSeekable = Info().mMediaSeekable; + mMediaSeekableOnlyInBufferedRanges = + Info().mMediaSeekableOnlyInBufferedRanges; + + if (!IsFormatSupportedByExternalEngine(*mInfo)) { + // The external engine doesn't support the type, try to notify the decoder + // to use our own state machine again. Not a real "error", because it would + // fallback to another state machine. + DecodeError( + MediaResult(NS_ERROR_DOM_MEDIA_EXTERNAL_ENGINE_NOT_SUPPORTED_ERR)); + return; + } + +#ifdef MOZ_WMF_MEDIA_ENGINE + // Only support encrypted playback. Not a real "error", because it would + // fallback to another state machine. + if (!mInfo->IsEncrypted() && + StaticPrefs::media_wmf_media_engine_enabled() == 2) { + LOG("External engine only supports encrypted playback by the pref"); + DecodeError( + MediaResult(NS_ERROR_DOM_MEDIA_EXTERNAL_ENGINE_NOT_SUPPORTED_ERR)); + return; + } +#endif + + mEngine->SetMediaInfo(*mInfo); + + if (Info().mMetadataDuration.isSome()) { + mDuration = Info().mMetadataDuration; + } else if (Info().mUnadjustedMetadataEndTime.isSome()) { + const media::TimeUnit unadjusted = Info().mUnadjustedMetadataEndTime.ref(); + const media::TimeUnit adjustment = Info().mStartTime; + mInfo->mMetadataDuration.emplace(unadjusted - adjustment); + mDuration = Info().mMetadataDuration; + } + + // If we don't know the duration by this point, we assume infinity, per spec. + if (mDuration.Ref().isNothing()) { + mDuration = Some(media::TimeUnit::FromInfinity()); + } + MOZ_ASSERT(mDuration.Ref().isSome()); + + if (mInfo->HasVideo()) { + mVideoDisplay = mInfo->mVideo.mDisplay; + } + + LOG("Metadata loaded : a=%s, v=%s, size=[%dx%d], duration=%s", + mInfo->HasAudio() ? mInfo->mAudio.mMimeType.get() : "none", + mInfo->HasVideo() ? mInfo->mVideo.mMimeType.get() : "none", + mVideoDisplay.width, mVideoDisplay.height, + mDuration.Ref()->ToString().get()); + + mMetadataLoadedEvent.Notify(std::move(aMetadata.mInfo), + std::move(aMetadata.mTags), + MediaDecoderEventVisibility::Observable); + StartRunningEngine(); +} + +void ExternalEngineStateMachine::OnMetadataNotRead(const MediaResult& aError) { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsReadingMetadata()); + LOGE("Decode metadata failed, shutting down decoder"); + mState.AsReadingMetadata()->mMetadataRequest.Complete(); + ReportTelemetry(aError); + DecodeError(aError); +} + +bool ExternalEngineStateMachine::IsFormatSupportedByExternalEngine( + const MediaInfo& aInfo) { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsReadingMetadata()); +#ifdef MOZ_WMF_MEDIA_ENGINE + const bool audioSupported = + !aInfo.HasAudio() || + MFMediaEngineDecoderModule::SupportsConfig(aInfo.mAudio); + const bool videoSupported = + !aInfo.HasVideo() || + MFMediaEngineDecoderModule::SupportsConfig(aInfo.mVideo); + LOG("audio=%s (supported=%d), video=%s(supported=%d)", + aInfo.HasAudio() ? aInfo.mAudio.mMimeType.get() : "none", audioSupported, + aInfo.HasVideo() ? aInfo.mVideo.mMimeType.get() : "none", videoSupported); + return audioSupported && videoSupported; +#else + return false; +#endif +} + +RefPtr ExternalEngineStateMachine::Seek( + const SeekTarget& aTarget) { + AssertOnTaskQueue(); + if (!mState.IsRunningEngine() && !mState.IsSeekingData() && + !mState.IsRecoverEngine()) { + MOZ_ASSERT(false, "Can't seek due to unsupported state."); + return MediaDecoder::SeekPromise::CreateAndReject(true, __func__); + } + // We don't support these type of seek, because they're depending on the + // implementation of the external engine, which might not be supported. + if (aTarget.IsNextFrame() || aTarget.IsVideoOnly()) { + return MediaDecoder::SeekPromise::CreateAndReject(true, __func__); + } + + LOG("Start seeking to %" PRId64, aTarget.GetTime().ToMicroseconds()); + auto* state = mState.AsSeekingData(); + if (!state) { + // We're in other states, so change the state to seeking. + ChangeStateTo(State::SeekingData); + state = mState.AsSeekingData(); + } + state->SetTarget(aTarget); + + // Update related status. + mSentPlaybackEndedEvent = false; + mOnPlaybackEvent.Notify(MediaPlaybackEvent::SeekStarted); + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING); + + // Notify the external playback engine about seeking. After the engine changes + // its current time, it would send `seeked` event. + mEngine->Seek(aTarget.GetTime()); + state->mWaitingEngineSeeked = true; + SeekReader(); + return state->mSeekJob.mPromise.Ensure(__func__); +} + +void ExternalEngineStateMachine::SeekReader() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsSeekingData()); + auto* state = mState.AsSeekingData(); + + // Reset the reader first and ask it to perform a demuxer seek. + ResetDecode(); + state->mWaitingReaderSeeked = true; + LOG("Seek reader to %" PRId64, state->GetTargetTime().ToMicroseconds()); + mReader->Seek(state->mSeekJob.mTarget.ref()) + ->Then(OwnerThread(), __func__, this, + &ExternalEngineStateMachine::OnSeekResolved, + &ExternalEngineStateMachine::OnSeekRejected) + ->Track(state->mSeekRequest); +} + +void ExternalEngineStateMachine::OnSeekResolved(const media::TimeUnit& aUnit) { + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::OnSeekResolved", + MEDIA_PLAYBACK); + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsSeekingData()); + auto* state = mState.AsSeekingData(); + + LOG("OnReaderSeekResolved"); + state->mSeekRequest.Complete(); + state->mWaitingReaderSeeked = false; + + // Start sending new data to the external playback engine. + if (HasAudio()) { + mHasEnoughAudio = false; + OnRequestAudio(); + } + if (HasVideo()) { + mHasEnoughVideo = false; + OnRequestVideo(); + } + CheckIfSeekCompleted(); +} + +void ExternalEngineStateMachine::OnSeekRejected( + const SeekRejectValue& aReject) { + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::OnSeekRejected", + MEDIA_PLAYBACK); + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsSeekingData()); + auto* state = mState.AsSeekingData(); + + LOG("OnReaderSeekRejected"); + state->mSeekRequest.Complete(); + if (aReject.mError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { + LOG("OnSeekRejected reason=WAITING_FOR_DATA type=%s", + MediaData::TypeToStr(aReject.mType)); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::AUDIO_DATA, + !IsRequestingAudioData()); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::VIDEO_DATA, + !IsRequestingVideoData()); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::AUDIO_DATA, + !IsWaitingAudioData()); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::VIDEO_DATA, + !IsWaitingVideoData()); + + // Fire 'waiting' to notify the player that we are waiting for data. + mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING); + WaitForData(aReject.mType); + return; + } + + if (aReject.mError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { + EndOfStream(aReject.mType); + return; + } + + MOZ_ASSERT(NS_FAILED(aReject.mError), + "Cancels should also disconnect mSeekRequest"); + state->RejectIfExists(__func__); + ReportTelemetry(aReject.mError); + DecodeError(aReject.mError); +} + +bool ExternalEngineStateMachine::IsSeeking() { + AssertOnTaskQueue(); + const auto* state = mState.AsSeekingData(); + return state && state->IsSeeking(); +} + +void ExternalEngineStateMachine::CheckIfSeekCompleted() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsSeekingData()); + auto* state = mState.AsSeekingData(); + if (state->mWaitingEngineSeeked || state->mWaitingReaderSeeked) { + LOG("Seek hasn't been completed yet, waitEngineSeeked=%d, " + "waitReaderSeeked=%d", + state->mWaitingEngineSeeked, state->mWaitingReaderSeeked); + return; + } + + // As seeking should be accurate and we can't control the exact timing inside + // the external media engine. We always set the newCurrentTime = seekTime + // so that the updated HTMLMediaElement.currentTime will always be the seek + // target. + if (state->GetTargetTime() != mCurrentPosition) { + LOG("Force adjusting current time (%" PRId64 + ") to match to target (%" PRId64 ")", + mCurrentPosition.Ref().ToMicroseconds(), + state->GetTargetTime().ToMicroseconds()); + mCurrentPosition = state->GetTargetTime(); + } + + LOG("Seek completed"); + state->Resolve(__func__); + mOnPlaybackEvent.Notify(MediaPlaybackEvent::Invalidate); + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); + StartRunningEngine(); +} + +void ExternalEngineStateMachine::ResetDecode() { + AssertOnTaskQueue(); + if (!mInfo) { + return; + } + + LOG("ResetDecode"); + MediaFormatReader::TrackSet tracks; + if (HasVideo()) { + mVideoDataRequest.DisconnectIfExists(); + mVideoWaitRequest.DisconnectIfExists(); + tracks += TrackInfo::kVideoTrack; + } + if (HasAudio()) { + mAudioDataRequest.DisconnectIfExists(); + mAudioWaitRequest.DisconnectIfExists(); + tracks += TrackInfo::kAudioTrack; + } + mReader->ResetDecode(tracks); +} + +RefPtr ExternalEngineStateMachine::InvokeSetSink( + const RefPtr& aSink) { + MOZ_ASSERT(NS_IsMainThread()); + // TODO : can media engine support this? + return GenericPromise::CreateAndReject(NS_ERROR_FAILURE, __func__); +} + +RefPtr ExternalEngineStateMachine::Shutdown() { + AssertOnTaskQueue(); + if (mState.IsShutdownEngine()) { + LOG("Already shutdown"); + return mState.AsShutdownEngine()->mShutdown; + } + + LOG("Shutdown"); + ChangeStateTo(State::ShutdownEngine); + ResetDecode(); + + mAudioDataRequest.DisconnectIfExists(); + mVideoDataRequest.DisconnectIfExists(); + mAudioWaitRequest.DisconnectIfExists(); + mVideoWaitRequest.DisconnectIfExists(); + + mDuration.DisconnectAll(); + mCurrentPosition.DisconnectAll(); + // TODO : implement audible check + mIsAudioDataAudible.DisconnectAll(); + + mMetadataManager.Disconnect(); + + mSetCDMProxyPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_ABORT_ERR, __func__); + mSetCDMProxyRequest.DisconnectIfExists(); + + mEngine->Shutdown(); + + auto* state = mState.AsShutdownEngine(); + state->mShutdown = mReader->Shutdown()->Then( + OwnerThread(), __func__, [self = RefPtr{this}, this]() { + LOG("Shutting down state machine task queue"); + return OwnerThread()->BeginShutdown(); + }); + return state->mShutdown; +} + +void ExternalEngineStateMachine::BufferedRangeUpdated() { + AssertOnTaskQueue(); + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::BufferedRangeUpdated", + MEDIA_PLAYBACK); + + // While playing an unseekable stream of unknown duration, mDuration + // is updated as we play. But if data is being downloaded + // faster than played, mDuration won't reflect the end of playable data + // since we haven't played the frame at the end of buffered data. So update + // mDuration here as new data is downloaded to prevent such a lag. + if (mBuffered.Ref().IsInvalid()) { + return; + } + + bool exists; + media::TimeUnit end{mBuffered.Ref().GetEnd(&exists)}; + if (!exists) { + return; + } + + // Use estimated duration from buffer ranges when mDuration is unknown or + // the estimated duration is larger. + if (mDuration.Ref().isNothing() || mDuration.Ref()->IsInfinite() || + end > mDuration.Ref().ref()) { + mDuration = Some(end); + DDLOG(DDLogCategory::Property, "duration_us", + mDuration.Ref()->ToMicroseconds()); + } +} + +// Note: the variadic only supports passing member variables. +#define PERFORM_WHEN_ALLOW(Func, ...) \ + do { \ + /* Initialzation is not done yet, postpone the operation */ \ + if ((mState.IsInitEngine() || mState.IsRecoverEngine()) && \ + mState.AsInitEngine()->mInitPromise) { \ + LOG("%s is called before init", __func__); \ + mState.AsInitEngine()->mInitPromise->Then( \ + OwnerThread(), __func__, \ + [self = RefPtr{this}, this]( \ + const GenericNonExclusivePromise::ResolveOrRejectValue& aVal) { \ + if (aVal.IsResolve()) { \ + Func(__VA_ARGS__); \ + } \ + }); \ + return; \ + } else if (mState.IsShutdownEngine()) { \ + return; \ + } \ + } while (false) + +void ExternalEngineStateMachine::SetPlaybackRate(double aPlaybackRate) { + AssertOnTaskQueue(); + mPlaybackRate = aPlaybackRate; + PERFORM_WHEN_ALLOW(SetPlaybackRate, mPlaybackRate); + mEngine->SetPlaybackRate(aPlaybackRate); +} + +void ExternalEngineStateMachine::VolumeChanged() { + AssertOnTaskQueue(); + PERFORM_WHEN_ALLOW(VolumeChanged); + mEngine->SetVolume(mVolume); +} + +void ExternalEngineStateMachine::PreservesPitchChanged() { + AssertOnTaskQueue(); + PERFORM_WHEN_ALLOW(PreservesPitchChanged); + mEngine->SetPreservesPitch(mPreservesPitch); +} + +void ExternalEngineStateMachine::PlayStateChanged() { + AssertOnTaskQueue(); + PERFORM_WHEN_ALLOW(PlayStateChanged); + if (mPlayState == MediaDecoder::PLAY_STATE_PLAYING) { + mEngine->Play(); + } else if (mPlayState == MediaDecoder::PLAY_STATE_PAUSED) { + mEngine->Pause(); + } +} + +void ExternalEngineStateMachine::LoopingChanged() { + AssertOnTaskQueue(); + PERFORM_WHEN_ALLOW(LoopingChanged); + mEngine->SetLooping(mLooping); +} + +#undef PERFORM_WHEN_ALLOW + +void ExternalEngineStateMachine::EndOfStream(MediaData::Type aType) { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsRunningEngine() || mState.IsSeekingData()); + static auto DataTypeToTrackType = [](const MediaData::Type& aType) { + if (aType == MediaData::Type::VIDEO_DATA) { + return TrackInfo::TrackType::kVideoTrack; + } + if (aType == MediaData::Type::AUDIO_DATA) { + return TrackInfo::TrackType::kAudioTrack; + } + return TrackInfo::TrackType::kUndefinedTrack; + }; + mEngine->NotifyEndOfStream(DataTypeToTrackType(aType)); +} + +void ExternalEngineStateMachine::WaitForData(MediaData::Type aType) { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsRunningEngine() || mState.IsSeekingData()); + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::WaitForData", + MEDIA_PLAYBACK); + MOZ_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + + LOG("WaitForData"); + RefPtr self = this; + if (aType == MediaData::Type::AUDIO_DATA) { + MOZ_ASSERT(HasAudio()); + mReader->WaitForData(MediaData::Type::AUDIO_DATA) + ->Then( + OwnerThread(), __func__, + [self, this](MediaData::Type aType) { + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::WaitForData:AudioResolved", + MEDIA_PLAYBACK); + MOZ_ASSERT(aType == MediaData::Type::AUDIO_DATA); + LOG("Done waiting for audio data"); + mAudioWaitRequest.Complete(); + MaybeFinishWaitForData(); + }, + [self, this](const WaitForDataRejectValue& aRejection) { + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::WaitForData:AudioRejected", + MEDIA_PLAYBACK); + mAudioWaitRequest.Complete(); + DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); + }) + ->Track(mAudioWaitRequest); + } else { + MOZ_ASSERT(HasVideo()); + mReader->WaitForData(MediaData::Type::VIDEO_DATA) + ->Then( + OwnerThread(), __func__, + [self, this](MediaData::Type aType) { + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::WaitForData:VideoResolved", + MEDIA_PLAYBACK); + MOZ_ASSERT(aType == MediaData::Type::VIDEO_DATA); + LOG("Done waiting for video data"); + mVideoWaitRequest.Complete(); + MaybeFinishWaitForData(); + }, + [self, this](const WaitForDataRejectValue& aRejection) { + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::WaitForData:VideoRejected", + MEDIA_PLAYBACK); + mVideoWaitRequest.Complete(); + DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); + }) + ->Track(mVideoWaitRequest); + } +} + +void ExternalEngineStateMachine::MaybeFinishWaitForData() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsRunningEngine() || mState.IsSeekingData()); + + bool isWaitingForAudio = HasAudio() && mAudioWaitRequest.Exists(); + bool isWaitingForVideo = HasVideo() && mVideoWaitRequest.Exists(); + if (isWaitingForAudio || isWaitingForVideo) { + LOG("Still waiting for data (waitAudio=%d, waitVideo=%d)", + isWaitingForAudio, isWaitingForVideo); + return; + } + + LOG("Finished waiting for data"); + if (mState.IsSeekingData()) { + SeekReader(); + return; + } + if (HasAudio()) { + RunningEngineUpdate(MediaData::Type::AUDIO_DATA); + } + if (HasVideo()) { + RunningEngineUpdate(MediaData::Type::VIDEO_DATA); + } +} + +void ExternalEngineStateMachine::StartRunningEngine() { + ChangeStateTo(State::RunningEngine); + // Manually check the play state because the engine might be recovered from + // crash or just get recreated, so PlayStateChanged() won't be triggered. + if (mPlayState == MediaDecoder::PLAY_STATE_PLAYING) { + mEngine->Play(); + } + if (HasAudio()) { + RunningEngineUpdate(MediaData::Type::AUDIO_DATA); + } + if (HasVideo()) { + RunningEngineUpdate(MediaData::Type::VIDEO_DATA); + } +} + +void ExternalEngineStateMachine::RunningEngineUpdate(MediaData::Type aType) { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsRunningEngine() || mState.IsSeekingData()); + if (aType == MediaData::Type::AUDIO_DATA && !mHasEnoughAudio) { + OnRequestAudio(); + } + if (aType == MediaData::Type::VIDEO_DATA && !mHasEnoughVideo) { + OnRequestVideo(); + } +} + +void ExternalEngineStateMachine::OnRequestAudio() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsRunningEngine() || mState.IsSeekingData()); + LOGV("OnRequestAudio"); + + if (!HasAudio()) { + return; + } + + if (IsRequestingAudioData() || mAudioWaitRequest.Exists() || IsSeeking()) { + LOGV( + "No need to request audio, isRequesting=%d, waitingAudio=%d, " + "isSeeking=%d", + IsRequestingAudioData(), mAudioWaitRequest.Exists(), IsSeeking()); + return; + } + + LOGV("Start requesting audio"); + PerformanceRecorder perfRecorder(MediaStage::RequestData); + RefPtr self = this; + mReader->RequestAudioData() + ->Then( + OwnerThread(), __func__, + [this, self, perfRecorder(std::move(perfRecorder))]( + const RefPtr& aAudio) mutable { + perfRecorder.Record(); + mAudioDataRequest.Complete(); + LOGV("Completed requesting audio"); + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::OnRequestAudio:Resolved", + MEDIA_PLAYBACK); + MOZ_ASSERT(aAudio); + RunningEngineUpdate(MediaData::Type::AUDIO_DATA); + }, + [this, self](const MediaResult& aError) { + mAudioDataRequest.Complete(); + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::OnRequestAudio:Rejected", + MEDIA_PLAYBACK); + LOG("OnRequestAudio ErrorName=%s Message=%s", + aError.ErrorName().get(), aError.Message().get()); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + WaitForData(MediaData::Type::AUDIO_DATA); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + OnRequestAudio(); + break; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + LOG("Reach to the end, no more audio data"); + EndOfStream(MediaData::Type::AUDIO_DATA); + break; + case NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR: + // We will handle the process crash in `NotifyErrorInternal()` + // so here just silently ignore this. + break; + default: + ReportTelemetry(aError); + DecodeError(aError); + } + }) + ->Track(mAudioDataRequest); +} + +void ExternalEngineStateMachine::OnRequestVideo() { + AssertOnTaskQueue(); + MOZ_ASSERT(mState.IsRunningEngine() || mState.IsSeekingData()); + LOGV("OnRequestVideo"); + + if (!HasVideo()) { + return; + } + + if (IsRequestingVideoData() || mVideoWaitRequest.Exists() || IsSeeking()) { + LOGV( + "No need to request video, isRequesting=%d, waitingVideo=%d, " + "isSeeking=%d", + IsRequestingVideoData(), mVideoWaitRequest.Exists(), IsSeeking()); + return; + } + + LOGV("Start requesting video"); + PerformanceRecorder perfRecorder(MediaStage::RequestData, + Info().mVideo.mImage.height); + RefPtr self = this; + mReader->RequestVideoData(GetVideoThreshold(), false) + ->Then( + OwnerThread(), __func__, + [this, self, perfRecorder(std::move(perfRecorder))]( + const RefPtr& aVideo) mutable { + perfRecorder.Record(); + mVideoDataRequest.Complete(); + LOGV("Completed requesting video"); + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::OnRequestVideo:Resolved", + MEDIA_PLAYBACK); + MOZ_ASSERT(aVideo); + if (!mHasReceivedFirstDecodedVideoFrame) { + mHasReceivedFirstDecodedVideoFrame = true; + OnLoadedFirstFrame(); + } + RunningEngineUpdate(MediaData::Type::VIDEO_DATA); + // Send image to PIP window. + if (mSecondaryVideoContainer.Ref()) { + mSecondaryVideoContainer.Ref()->SetCurrentFrame( + mVideoDisplay, aVideo->mImage, TimeStamp::Now()); + } else { + mVideoFrameContainer->SetCurrentFrame( + mVideoDisplay, aVideo->mImage, TimeStamp::Now()); + } + }, + [this, self](const MediaResult& aError) { + mVideoDataRequest.Complete(); + AUTO_PROFILER_LABEL( + "ExternalEngineStateMachine::OnRequestVideo:Rejected", + MEDIA_PLAYBACK); + LOG("OnRequestVideo ErrorName=%s Message=%s", + aError.ErrorName().get(), aError.Message().get()); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + WaitForData(MediaData::Type::VIDEO_DATA); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + OnRequestVideo(); + break; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + LOG("Reach to the end, no more video data"); + EndOfStream(MediaData::Type::VIDEO_DATA); + break; + case NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR: + // We will handle the process crash in `NotifyErrorInternal()` + // so here just silently ignore this. + break; + default: + ReportTelemetry(aError); + DecodeError(aError); + } + }) + ->Track(mVideoDataRequest); +} + +void ExternalEngineStateMachine::OnLoadedFirstFrame() { + AssertOnTaskQueue(); + // We will wait until receive the first video frame. + if (mInfo->HasVideo() && !mHasReceivedFirstDecodedVideoFrame) { + LOGV("Hasn't received first decoded video frame"); + return; + } + LOGV("OnLoadedFirstFrame"); + MediaDecoderEventVisibility visibility = + mSentFirstFrameLoadedEvent ? MediaDecoderEventVisibility::Suppressed + : MediaDecoderEventVisibility::Observable; + mSentFirstFrameLoadedEvent = true; + mFirstFrameLoadedEvent.Notify(UniquePtr(new MediaInfo(Info())), + visibility); + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); +} + +void ExternalEngineStateMachine::OnLoadedData() { + AssertOnTaskQueue(); + // In case the external engine doesn't send the first frame loaded event + // correctly. + if (!mSentFirstFrameLoadedEvent) { + OnLoadedFirstFrame(); + } + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); +} + +void ExternalEngineStateMachine::OnWaiting() { + AssertOnTaskQueue(); + mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING); +} + +void ExternalEngineStateMachine::OnPlaying() { + AssertOnTaskQueue(); + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); +} + +void ExternalEngineStateMachine::OnSeeked() { + AssertOnTaskQueue(); + if (!mState.IsSeekingData()) { + LOG("Engine Seeking has been completed, ignore the event"); + return; + } + MOZ_ASSERT(mState.IsSeekingData()); + + const auto currentTime = mEngine->GetCurrentPosition(); + auto* state = mState.AsSeekingData(); + LOG("OnEngineSeeked, target=%" PRId64 ", currentTime=%" PRId64, + state->GetTargetTime().ToMicroseconds(), currentTime.ToMicroseconds()); + // It's possible to receive multiple seeked event if we seek the engine + // before the previous seeking finishes, so we would wait until the last + // seeking is finished. + if (currentTime >= state->GetTargetTime()) { + state->mWaitingEngineSeeked = false; + CheckIfSeekCompleted(); + } +} + +void ExternalEngineStateMachine::OnBufferingStarted() { + AssertOnTaskQueue(); + mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING); + if (HasAudio()) { + WaitForData(MediaData::Type::AUDIO_DATA); + } + if (HasVideo()) { + WaitForData(MediaData::Type::VIDEO_DATA); + } +} + +void ExternalEngineStateMachine::OnBufferingEnded() { + AssertOnTaskQueue(); + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); +} + +void ExternalEngineStateMachine::OnEnded() { + AssertOnTaskQueue(); + if (mSentPlaybackEndedEvent) { + return; + } + LOG("Playback is ended"); + mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE); + mOnPlaybackEvent.Notify(MediaPlaybackEvent::PlaybackEnded); + mSentPlaybackEndedEvent = true; +} + +void ExternalEngineStateMachine::OnTimeupdate() { + AssertOnTaskQueue(); + if (IsSeeking()) { + return; + } + mCurrentPosition = mEngine->GetCurrentPosition(); + if (mDuration.Ref().ref() < mCurrentPosition.Ref()) { + mDuration = Some(mCurrentPosition.Ref()); + } +} + +void ExternalEngineStateMachine::NotifyEventInternal( + ExternalEngineEvent aEvent) { + AssertOnTaskQueue(); + AUTO_PROFILER_LABEL("ExternalEngineStateMachine::NotifyEventInternal", + MEDIA_PLAYBACK); + LOG("Receive event %s", ExternalEngineEventToStr(aEvent)); + if (mState.IsShutdownEngine()) { + return; + } + switch (aEvent) { + case ExternalEngineEvent::LoadedMetaData: + // We read metadata by ourselves, ignore this if there is any. + MOZ_ASSERT(mInfo); + break; + case ExternalEngineEvent::LoadedFirstFrame: + OnLoadedFirstFrame(); + break; + case ExternalEngineEvent::LoadedData: + OnLoadedData(); + break; + case ExternalEngineEvent::Waiting: + OnWaiting(); + break; + case ExternalEngineEvent::Playing: + OnPlaying(); + break; + case ExternalEngineEvent::Seeked: + OnSeeked(); + break; + case ExternalEngineEvent::BufferingStarted: + OnBufferingStarted(); + break; + case ExternalEngineEvent::BufferingEnded: + OnBufferingEnded(); + break; + case ExternalEngineEvent::Timeupdate: + OnTimeupdate(); + break; + case ExternalEngineEvent::Ended: + OnEnded(); + break; + case ExternalEngineEvent::RequestForAudio: + mHasEnoughAudio = false; + if (ShouldRunEngineUpdateForRequest()) { + RunningEngineUpdate(MediaData::Type::AUDIO_DATA); + } + break; + case ExternalEngineEvent::RequestForVideo: + mHasEnoughVideo = false; + if (ShouldRunEngineUpdateForRequest()) { + RunningEngineUpdate(MediaData::Type::VIDEO_DATA); + } + break; + case ExternalEngineEvent::AudioEnough: + mHasEnoughAudio = true; + break; + case ExternalEngineEvent::VideoEnough: + mHasEnoughVideo = true; + break; + default: + MOZ_ASSERT_UNREACHABLE("Undefined event!"); + break; + } +} + +bool ExternalEngineStateMachine::ShouldRunEngineUpdateForRequest() { + // Running engine update will request new data, which could be run on + // `RunningEngine` or `SeekingData` state. However, in `SeekingData` we should + // only request new data after finishing reader seek, otherwise the reader + // would start requesting data from a wrong position. + return mState.IsRunningEngine() || + (mState.AsSeekingData() && + !mState.AsSeekingData()->mWaitingReaderSeeked); +} + +void ExternalEngineStateMachine::NotifyErrorInternal( + const MediaResult& aError) { + AssertOnTaskQueue(); + LOG("Engine error: %s", aError.Description().get()); + if (aError == NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR) { + // The external engine doesn't support the type, try to notify the decoder + // to use our own state machine again. + ReportTelemetry(NS_ERROR_DOM_MEDIA_EXTERNAL_ENGINE_NOT_SUPPORTED_ERR); + DecodeError( + MediaResult(NS_ERROR_DOM_MEDIA_EXTERNAL_ENGINE_NOT_SUPPORTED_ERR)); + } else if (aError == NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR) { + ReportTelemetry(NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR); + RecoverFromCDMProcessCrashIfNeeded(); + } else { + ReportTelemetry(aError); + DecodeError(aError); + } +} + +void ExternalEngineStateMachine::NotifyResizingInternal(uint32_t aWidth, + uint32_t aHeight) { + LOG("video resize from [%d,%d] to [%d,%d]", mVideoDisplay.width, + mVideoDisplay.height, aWidth, aHeight); + mVideoDisplay = gfx::IntSize{aWidth, aHeight}; +} + +void ExternalEngineStateMachine::RecoverFromCDMProcessCrashIfNeeded() { + AssertOnTaskQueue(); + if (mState.IsRecoverEngine()) { + return; + } + ProcessCrashMonitor::NotifyCrash(); + if (!ProcessCrashMonitor::ShouldRecoverProcess()) { + LOG("CDM process has crashed too many times, abort recovery"); + DecodeError( + MediaResult(NS_ERROR_DOM_MEDIA_EXTERNAL_ENGINE_NOT_SUPPORTED_ERR)); + return; + } + + LOG("CDM process crashed, recover the engine again (last time=%" PRId64 ")", + mCurrentPosition.Ref().ToMicroseconds()); + ChangeStateTo(State::RecoverEngine); + if (HasVideo()) { + mVideoDataRequest.DisconnectIfExists(); + mVideoWaitRequest.DisconnectIfExists(); + } + if (HasAudio()) { + mAudioDataRequest.DisconnectIfExists(); + mAudioWaitRequest.DisconnectIfExists(); + } + // Ask the reader to shutdown current decoders which are no longer available + // due to the remote process crash. + mReader->ReleaseResources(); + InitEngine(); +} + +media::TimeUnit ExternalEngineStateMachine::GetVideoThreshold() { + AssertOnTaskQueue(); + if (auto* state = mState.AsSeekingData()) { + return state->GetTargetTime(); + } + return mCurrentPosition.Ref(); +} + +void ExternalEngineStateMachine::UpdateSecondaryVideoContainer() { + AssertOnTaskQueue(); + LOG("UpdateSecondaryVideoContainer=%p", mSecondaryVideoContainer.Ref().get()); + mOnSecondaryVideoContainerInstalled.Notify(mSecondaryVideoContainer.Ref()); +} + +RefPtr ExternalEngineStateMachine::SetCDMProxy( + CDMProxy* aProxy) { + if (mState.IsShutdownEngine()) { + return SetCDMPromise::CreateAndReject(NS_ERROR_FAILURE, __func__); + } + + if (mState.IsInitEngine() && mState.AsInitEngine()->mInitPromise) { + LOG("SetCDMProxy is called before init"); + mState.AsInitEngine()->mInitPromise->Then( + OwnerThread(), __func__, + [self = RefPtr{this}, proxy = RefPtr{aProxy}, + this](const GenericNonExclusivePromise::ResolveOrRejectValue& aVal) { + SetCDMProxy(proxy) + ->Then(OwnerThread(), __func__, + [self = RefPtr{this}, + this](const SetCDMPromise::ResolveOrRejectValue& aVal) { + mSetCDMProxyRequest.Complete(); + if (aVal.IsResolve()) { + mSetCDMProxyPromise.Resolve(true, __func__); + } else { + mSetCDMProxyPromise.Reject(NS_ERROR_DOM_MEDIA_CDM_ERR, + __func__); + } + }) + ->Track(mSetCDMProxyRequest); + }); + return mSetCDMProxyPromise.Ensure(__func__); + } + + // TODO : set CDM proxy again if we recreate the media engine after crash. + mKeySystem = NS_ConvertUTF16toUTF8(aProxy->KeySystem()); + LOG("SetCDMProxy=%p (key-system=%s)", aProxy, mKeySystem.get()); + MOZ_DIAGNOSTIC_ASSERT(mEngine); + if (!mEngine->SetCDMProxy(aProxy)) { + LOG("Failed to set CDM proxy on the engine"); + return SetCDMPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CDM_ERR, __func__); + } + return MediaDecoderStateMachineBase::SetCDMProxy(aProxy); +} + +bool ExternalEngineStateMachine::IsCDMProxySupported(CDMProxy* aProxy) { +#ifdef MOZ_WMF_CDM + MOZ_ASSERT(aProxy); + // 1=enabled encrypted and clear, 2=enabled encrytped + if (StaticPrefs::media_wmf_media_engine_enabled() != 1 && + StaticPrefs::media_wmf_media_engine_enabled() != 2) { + return false; + } + + // The CDM needs to be hosted in the same process of the external engine, and + // only WMFCDM meets this requirement. + return aProxy->AsWMFCDMProxy(); +#else + return false; +#endif +} + +void ExternalEngineStateMachine::ReportTelemetry(const MediaResult& aError) { + glean::mfcdm::ErrorExtra extraData; + extraData.errorName = Some(aError.ErrorName()); + nsAutoCString resolution; + if (mInfo) { + if (mInfo->HasAudio()) { + extraData.audioCodec = Some(mInfo->mAudio.mMimeType); + } + if (mInfo->HasVideo()) { + extraData.videoCodec = Some(mInfo->mVideo.mMimeType); + DetermineResolutionForTelemetry(*mInfo, resolution); + extraData.resolution = Some(resolution); + } + } + if (!mKeySystem.IsEmpty()) { + extraData.keySystem = Some(mKeySystem); + } + glean::mfcdm::error.Record(Some(extraData)); + if (MOZ_LOG_TEST(gMediaDecoderLog, LogLevel::Debug)) { + nsPrintfCString logMessage{"MFCDM Error event, error=%s", + aError.ErrorName().get()}; + if (mInfo) { + if (mInfo->HasAudio()) { + logMessage.Append( + nsPrintfCString{", audio=%s", mInfo->mAudio.mMimeType.get()}); + } + if (mInfo->HasVideo()) { + logMessage.Append(nsPrintfCString{", video=%s, resolution=%s", + mInfo->mVideo.mMimeType.get(), + resolution.get()}); + } + } + if (!mKeySystem.IsEmpty()) { + logMessage.Append(nsPrintfCString{", keySystem=%s", mKeySystem.get()}); + } + LOG("%s", logMessage.get()); + } +} + +#undef FMT +#undef LOG +#undef LOGV +#undef LOGW +#undef LOGE + +} // namespace mozilla diff --git a/dom/media/ExternalEngineStateMachine.h b/dom/media/ExternalEngineStateMachine.h new file mode 100644 index 0000000000..84dedbe717 --- /dev/null +++ b/dom/media/ExternalEngineStateMachine.h @@ -0,0 +1,346 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_EXTERNALENGINESTATEMACHINE_H_ +#define DOM_MEDIA_EXTERNALENGINESTATEMACHINE_H_ + +#include "MediaDecoderStateMachineBase.h" +#include "SeekJob.h" +#include "mozilla/Variant.h" + +namespace mozilla { + +/** + * ExternalPlaybackEngine represents a media engine which is responsible for + * decoding and playback, which are not controlled by Gecko. + */ +class ExternalPlaybackEngine; + +enum class ExternalEngineEvent { + LoadedMetaData, + LoadedFirstFrame, + LoadedData, + Waiting, + Playing, + Seeked, + BufferingStarted, + BufferingEnded, + Timeupdate, + Ended, + RequestForAudio, + RequestForVideo, + AudioEnough, + VideoEnough, +}; +const char* ExternalEngineEventToStr(ExternalEngineEvent aEvent); + +/** + * When using ExternalEngineStateMachine, that means we use an external engine + * to control decoding and playback (including A/V sync). Eg. Media Foundation + * Media Engine on Windows. + * + * The external engine does most of playback works, and uses ExternalEngineEvent + * to tell us its internal state. Therefore, this state machine is responsible + * to address those events from the engine and coordinate the format reader in + * order to provide data to the engine correctly. + */ +DDLoggedTypeDeclName(ExternalEngineStateMachine); + +class ExternalEngineStateMachine final + : public MediaDecoderStateMachineBase, + public DecoderDoctorLifeLogger { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ExternalEngineStateMachine, override) + + ExternalEngineStateMachine(MediaDecoder* aDecoder, + MediaFormatReader* aReader); + + RefPtr InvokeSetSink( + const RefPtr& aSink) override; + + // The media sample would be managed by the external engine so we won't store + // any samples in our side. + size_t SizeOfVideoQueue() const override { return 0; } + size_t SizeOfAudioQueue() const override { return 0; } + + // Not supported. + void SetVideoDecodeMode(VideoDecodeMode aMode) override {} + void InvokeSuspendMediaSink() override {} + void InvokeResumeMediaSink() override {} + RefPtr RequestDebugInfo( + dom::MediaDecoderStateMachineDebugInfo& aInfo) override { + // This debug info doesn't fit in this scenario because most decoding + // details are only visible inside the external engine. + return GenericPromise::CreateAndResolve(true, __func__); + } + + void NotifyEvent(ExternalEngineEvent aEvent) { + // On the engine manager thread. + Unused << OwnerThread()->Dispatch(NS_NewRunnableFunction( + "ExternalEngineStateMachine::NotifyEvent", + [self = RefPtr{this}, aEvent] { self->NotifyEventInternal(aEvent); })); + } + void NotifyError(const MediaResult& aError) { + // On the engine manager thread. + Unused << OwnerThread()->Dispatch(NS_NewRunnableFunction( + "ExternalEngineStateMachine::NotifyError", + [self = RefPtr{this}, aError] { self->NotifyErrorInternal(aError); })); + } + void NotifyResizing(uint32_t aWidth, uint32_t aHeight) { + // On the engine manager thread. + Unused << OwnerThread()->Dispatch( + NS_NewRunnableFunction("ExternalEngineStateMachine::NotifyResizing", + [self = RefPtr{this}, aWidth, aHeight] { + self->NotifyResizingInternal(aWidth, aHeight); + })); + } + + const char* GetStateStr() const; + + RefPtr SetCDMProxy(CDMProxy* aProxy) override; + + bool IsCDMProxySupported(CDMProxy* aProxy) override; + + private: + ~ExternalEngineStateMachine() = default; + + void AssertOnTaskQueue() const { MOZ_ASSERT(OnTaskQueue()); } + + // A light-weight state object that helps to store some variables which would + // only be used in a certain state. Also be able to do the cleaning for the + // state transition. Only modify on the task queue. + struct StateObject final { + enum class State { + InitEngine, + ReadingMetadata, + RunningEngine, + SeekingData, + ShutdownEngine, + RecoverEngine, + }; + struct InitEngine { + InitEngine() = default; + ~InitEngine() { mEngineInitRequest.DisconnectIfExists(); } + MozPromiseRequestHolder mEngineInitRequest; + RefPtr mInitPromise; + }; + struct ReadingMetadata { + ReadingMetadata() = default; + ~ReadingMetadata() { mMetadataRequest.DisconnectIfExists(); } + MozPromiseRequestHolder + mMetadataRequest; + }; + struct RunningEngine {}; + struct SeekingData { + SeekingData() = default; + SeekingData(SeekingData&&) = default; + SeekingData(const SeekingData&) = delete; + SeekingData& operator=(const SeekingData&) = delete; + ~SeekingData() { + mSeekJob.RejectIfExists(__func__); + mSeekRequest.DisconnectIfExists(); + } + void SetTarget(const SeekTarget& aTarget) { + // If there is any promise for previous seeking, reject it first. + mSeekJob.RejectIfExists(__func__); + mSeekRequest.DisconnectIfExists(); + // Then create a new seek job. + mSeekJob = SeekJob(); + mSeekJob.mTarget = Some(aTarget); + } + void Resolve(const char* aCallSite) { + MOZ_ASSERT(mSeekJob.Exists()); + mSeekJob.Resolve(aCallSite); + mSeekJob = SeekJob(); + } + void RejectIfExists(const char* aCallSite) { + mSeekJob.RejectIfExists(aCallSite); + } + bool IsSeeking() const { return mSeekRequest.Exists(); } + media::TimeUnit GetTargetTime() const { + return mSeekJob.mTarget ? mSeekJob.mTarget->GetTime() + : media::TimeUnit::Invalid(); + } + // Set it to true when starting seeking, and would be set to false after + // receiving engine's `seeked` event. Used on thhe task queue only. + bool mWaitingEngineSeeked = false; + bool mWaitingReaderSeeked = false; + MozPromiseRequestHolder mSeekRequest; + SeekJob mSeekJob; + }; + struct ShutdownEngine { + RefPtr mShutdown; + }; + // This state is used to recover the media engine after the MF CDM process + // crashes. + struct RecoverEngine : public InitEngine {}; + + StateObject() : mData(InitEngine()), mName(State::InitEngine){}; + explicit StateObject(ReadingMetadata&& aArg) + : mData(std::move(aArg)), mName(State::ReadingMetadata){}; + explicit StateObject(RunningEngine&& aArg) + : mData(std::move(aArg)), mName(State::RunningEngine){}; + explicit StateObject(SeekingData&& aArg) + : mData(std::move(aArg)), mName(State::SeekingData){}; + explicit StateObject(ShutdownEngine&& aArg) + : mData(std::move(aArg)), mName(State::ShutdownEngine){}; + explicit StateObject(RecoverEngine&& aArg) + : mData(std::move(aArg)), mName(State::RecoverEngine){}; + + bool IsInitEngine() const { return mData.is(); } + bool IsReadingMetadata() const { return mData.is(); } + bool IsRunningEngine() const { return mData.is(); } + bool IsSeekingData() const { return mData.is(); } + bool IsShutdownEngine() const { return mData.is(); } + bool IsRecoverEngine() const { return mData.is(); } + + InitEngine* AsInitEngine() { + if (IsInitEngine()) { + return &mData.as(); + } + if (IsRecoverEngine()) { + return &mData.as(); + } + return nullptr; + } + ReadingMetadata* AsReadingMetadata() { + return IsReadingMetadata() ? &mData.as() : nullptr; + } + SeekingData* AsSeekingData() { + return IsSeekingData() ? &mData.as() : nullptr; + } + ShutdownEngine* AsShutdownEngine() { + return IsShutdownEngine() ? &mData.as() : nullptr; + } + + Variant + mData; + State mName; + } mState; + using State = StateObject::State; + + void NotifyEventInternal(ExternalEngineEvent aEvent); + void NotifyErrorInternal(const MediaResult& aError); + void NotifyResizingInternal(uint32_t aWidth, uint32_t aHeight); + + RefPtr Shutdown() override; + + void SetPlaybackRate(double aPlaybackRate) override; + void BufferedRangeUpdated() override; + void VolumeChanged() override; + void PreservesPitchChanged() override; + void PlayStateChanged() override; + void LoopingChanged() override; + + // Not supported. + void SetIsLiveStream(bool aIsLiveStream) override {} + void SetCanPlayThrough(bool aCanPlayThrough) override {} + void SetFragmentEndTime(const media::TimeUnit& aFragmentEndTime) override {} + + void InitEngine(); + void OnEngineInitSuccess(); + void OnEngineInitFailure(); + + void ReadMetadata(); + void OnMetadataRead(MetadataHolder&& aMetadata); + void OnMetadataNotRead(const MediaResult& aError); + bool IsFormatSupportedByExternalEngine(const MediaInfo& aInfo); + + // Functions for handling external engine event. + void OnLoadedFirstFrame(); + void OnLoadedData(); + void OnWaiting(); + void OnPlaying(); + void OnSeeked(); + void OnBufferingStarted(); + void OnBufferingEnded(); + void OnTimeupdate(); + void OnEnded(); + void OnRequestAudio(); + void OnRequestVideo(); + + void ResetDecode(); + + void EndOfStream(MediaData::Type aType); + void WaitForData(MediaData::Type aType); + + void StartRunningEngine(); + void RunningEngineUpdate(MediaData::Type aType); + + void ChangeStateTo(State aNextState); + static const char* StateToStr(State aState); + + RefPtr Seek(const SeekTarget& aTarget) override; + void SeekReader(); + void OnSeekResolved(const media::TimeUnit& aUnit); + void OnSeekRejected(const SeekRejectValue& aReject); + bool IsSeeking(); + void CheckIfSeekCompleted(); + + void MaybeFinishWaitForData(); + + void SetBlankVideoToVideoContainer(); + + media::TimeUnit GetVideoThreshold(); + + bool ShouldRunEngineUpdateForRequest(); + + void UpdateSecondaryVideoContainer() override; + + void RecoverFromCDMProcessCrashIfNeeded(); + + void ReportTelemetry(const MediaResult& aError); + + UniquePtr mEngine; + + bool mHasEnoughAudio = false; + bool mHasEnoughVideo = false; + bool mSentPlaybackEndedEvent = false; + bool mHasReceivedFirstDecodedVideoFrame = false; + + // Only used if setting CDM happens before the engine finishes initialization. + MozPromiseHolder mSetCDMProxyPromise; + MozPromiseRequestHolder mSetCDMProxyRequest; + + // It would be zero for audio-only playback. + gfx::IntSize mVideoDisplay; + + // It would be set if playback is encrypted. + nsCString mKeySystem; +}; + +class ExternalPlaybackEngine { + public: + explicit ExternalPlaybackEngine(ExternalEngineStateMachine* aOwner) + : mOwner(aOwner) {} + + virtual ~ExternalPlaybackEngine() = default; + + // Init the engine and specify the preload request. + virtual RefPtr Init(bool aShouldPreload) = 0; + virtual void Shutdown() = 0; + virtual uint64_t Id() const = 0; + + // Following methods should only be called after successfully initialize the + // external engine. + virtual void Play() = 0; + virtual void Pause() = 0; + virtual void Seek(const media::TimeUnit& aTargetTime) = 0; + virtual void SetPlaybackRate(double aPlaybackRate) = 0; + virtual void SetVolume(double aVolume) = 0; + virtual void SetLooping(bool aLooping) = 0; + virtual void SetPreservesPitch(bool aPreservesPitch) = 0; + virtual media::TimeUnit GetCurrentPosition() = 0; + virtual void NotifyEndOfStream(TrackInfo::TrackType aType) = 0; + virtual void SetMediaInfo(const MediaInfo& aInfo) = 0; + virtual bool SetCDMProxy(CDMProxy* aProxy) = 0; + virtual void NotifyResizing(uint32_t aWidth, uint32_t aHeight) = 0; + + ExternalEngineStateMachine* const MOZ_NON_OWNING_REF mOwner; +}; + +} // namespace mozilla + +#endif // DOM_MEDIA_EXTERNALENGINESTATEMACHINE_H_ diff --git a/dom/media/FileBlockCache.cpp b/dom/media/FileBlockCache.cpp new file mode 100644 index 0000000000..3989c05833 --- /dev/null +++ b/dom/media/FileBlockCache.cpp @@ -0,0 +1,506 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "FileBlockCache.h" +#include "MediaCache.h" +#include "VideoUtils.h" +#include "prio.h" +#include +#include "nsAnonymousTemporaryFile.h" +#include "nsIThreadManager.h" +#include "mozilla/dom/ContentChild.h" +#include "mozilla/ScopeExit.h" +#include "nsXULAppAPI.h" + +namespace mozilla { + +#undef LOG +LazyLogModule gFileBlockCacheLog("FileBlockCache"); +#define LOG(x, ...) \ + MOZ_LOG(gFileBlockCacheLog, LogLevel::Debug, ("%p " x, this, ##__VA_ARGS__)) + +static void CloseFD(PRFileDesc* aFD) { + PRStatus prrc; + prrc = PR_Close(aFD); + if (prrc != PR_SUCCESS) { + NS_WARNING("PR_Close() failed."); + } +} + +void FileBlockCache::SetCacheFile(PRFileDesc* aFD) { + LOG("SetCacheFile aFD=%p", aFD); + if (!aFD) { + // Failed to get a temporary file. Shutdown. + Close(); + return; + } + { + MutexAutoLock lock(mFileMutex); + mFD = aFD; + } + { + MutexAutoLock lock(mDataMutex); + LOG("SetFileCache mBackgroundET=%p, mIsWriteScheduled %d", + mBackgroundET.get(), mIsWriteScheduled); + if (mBackgroundET) { + // Still open, complete the initialization. + mInitialized = true; + if (mIsWriteScheduled) { + // A write was scheduled while waiting for FD. We need to run/dispatch a + // task to service the request. + nsCOMPtr event = mozilla::NewRunnableMethod( + "FileBlockCache::SetCacheFile -> PerformBlockIOs", this, + &FileBlockCache::PerformBlockIOs); + mBackgroundET->Dispatch(event.forget(), NS_DISPATCH_EVENT_MAY_BLOCK); + } + return; + } + } + // We've been closed while waiting for the file descriptor. + // Close the file descriptor we've just received, if still there. + MutexAutoLock lock(mFileMutex); + if (mFD) { + CloseFD(mFD); + mFD = nullptr; + } +} + +nsresult FileBlockCache::Init() { + LOG("Init()"); + MutexAutoLock mon(mDataMutex); + MOZ_ASSERT(!mBackgroundET); + nsresult rv = NS_CreateBackgroundTaskQueue("FileBlockCache", + getter_AddRefs(mBackgroundET)); + if (NS_FAILED(rv)) { + return rv; + } + + if (XRE_IsParentProcess()) { + RefPtr self = this; + rv = mBackgroundET->Dispatch( + NS_NewRunnableFunction("FileBlockCache::Init", + [self] { + PRFileDesc* fd = nullptr; + nsresult rv = + NS_OpenAnonymousTemporaryFile(&fd); + if (NS_SUCCEEDED(rv)) { + self->SetCacheFile(fd); + } else { + self->Close(); + } + }), + NS_DISPATCH_EVENT_MAY_BLOCK); + } else { + // We must request a temporary file descriptor from the parent process. + RefPtr self = this; + rv = dom::ContentChild::GetSingleton()->AsyncOpenAnonymousTemporaryFile( + [self](PRFileDesc* aFD) { self->SetCacheFile(aFD); }); + } + + if (NS_FAILED(rv)) { + Close(); + } + + return rv; +} + +void FileBlockCache::Flush() { + LOG("Flush()"); + MutexAutoLock mon(mDataMutex); + MOZ_ASSERT(mBackgroundET); + + // Dispatch a task so we won't clear the arrays while PerformBlockIOs() is + // dropping the data lock and cause InvalidArrayIndex. + RefPtr self = this; + mBackgroundET->Dispatch( + NS_NewRunnableFunction("FileBlockCache::Flush", [self]() { + MutexAutoLock mon(self->mDataMutex); + // Just discard pending changes, assume MediaCache won't read from + // blocks it hasn't written to. + self->mChangeIndexList.clear(); + self->mBlockChanges.Clear(); + })); +} + +size_t FileBlockCache::GetMaxBlocks(size_t aCacheSizeInKB) const { + // We look up the cache size every time. This means dynamic changes + // to the pref are applied. + // Ensure we can divide BLOCK_SIZE by 1024. + static_assert(MediaCacheStream::BLOCK_SIZE % 1024 == 0, + "BLOCK_SIZE should be a multiple of 1024"); + // Ensure BLOCK_SIZE/1024 is at least 2. + static_assert(MediaCacheStream::BLOCK_SIZE / 1024 >= 2, + "BLOCK_SIZE / 1024 should be at least 2"); + // Ensure we can convert BLOCK_SIZE/1024 to a uint32_t without truncation. + static_assert(MediaCacheStream::BLOCK_SIZE / 1024 <= int64_t(UINT32_MAX), + "BLOCK_SIZE / 1024 should be at most UINT32_MAX"); + // Since BLOCK_SIZE is a strict multiple of 1024, + // aCacheSizeInKB * 1024 / BLOCK_SIZE == aCacheSizeInKB / (BLOCK_SIZE / + // 1024), but the latter formula avoids a potential overflow from `* 1024`. + // And because BLOCK_SIZE/1024 is at least 2, the maximum cache size + // INT32_MAX*2 will give a maxBlocks that can fit in an int32_t. + constexpr size_t blockSizeKb = size_t(MediaCacheStream::BLOCK_SIZE / 1024); + const size_t maxBlocks = aCacheSizeInKB / blockSizeKb; + return std::max(maxBlocks, size_t(1)); +} + +FileBlockCache::FileBlockCache() + : mFileMutex("MediaCache.Writer.IO.Mutex"), + mFD(nullptr), + mFDCurrentPos(0), + mDataMutex("MediaCache.Writer.Data.Mutex"), + mIsWriteScheduled(false), + mIsReading(false) {} + +FileBlockCache::~FileBlockCache() { Close(); } + +void FileBlockCache::Close() { + LOG("Close()"); + + nsCOMPtr thread; + { + MutexAutoLock mon(mDataMutex); + if (!mBackgroundET) { + return; + } + thread.swap(mBackgroundET); + } + + PRFileDesc* fd; + { + MutexAutoLock lock(mFileMutex); + fd = mFD; + mFD = nullptr; + } + + // Let the thread close the FD, and then trigger its own shutdown. + // Note that mBackgroundET is now empty, so no other task will be posted + // there. Also mBackgroundET and mFD are empty and therefore can be reused + // immediately. + nsresult rv = thread->Dispatch(NS_NewRunnableFunction("FileBlockCache::Close", + [thread, fd] { + if (fd) { + CloseFD(fd); + } + // No need to shutdown + // background task + // queues. + }), + NS_DISPATCH_EVENT_MAY_BLOCK); + NS_ENSURE_SUCCESS_VOID(rv); +} + +template +bool ContainerContains(const Container& aContainer, const Value& value) { + return std::find(aContainer.begin(), aContainer.end(), value) != + aContainer.end(); +} + +nsresult FileBlockCache::WriteBlock(uint32_t aBlockIndex, + Span aData1, + Span aData2) { + MutexAutoLock mon(mDataMutex); + + if (!mBackgroundET) { + return NS_ERROR_FAILURE; + } + + // Check if we've already got a pending write scheduled for this block. + mBlockChanges.EnsureLengthAtLeast(aBlockIndex + 1); + bool blockAlreadyHadPendingChange = mBlockChanges[aBlockIndex] != nullptr; + mBlockChanges[aBlockIndex] = new BlockChange(aData1, aData2); + + if (!blockAlreadyHadPendingChange || + !ContainerContains(mChangeIndexList, aBlockIndex)) { + // We either didn't already have a pending change for this block, or we + // did but we didn't have an entry for it in mChangeIndexList (we're in the + // process of writing it and have removed the block's index out of + // mChangeIndexList in Run() but not finished writing the block to file + // yet). Add the blocks index to the end of mChangeIndexList to ensure the + // block is written as as soon as possible. + mChangeIndexList.push_back(aBlockIndex); + } + NS_ASSERTION(ContainerContains(mChangeIndexList, aBlockIndex), + "Must have entry for new block"); + + EnsureWriteScheduled(); + + return NS_OK; +} + +void FileBlockCache::EnsureWriteScheduled() { + mDataMutex.AssertCurrentThreadOwns(); + MOZ_ASSERT(mBackgroundET); + + if (mIsWriteScheduled || mIsReading) { + return; + } + mIsWriteScheduled = true; + if (!mInitialized) { + // We're still waiting on a file descriptor. When it arrives, + // the write will be scheduled. + return; + } + nsCOMPtr event = mozilla::NewRunnableMethod( + "FileBlockCache::EnsureWriteScheduled -> PerformBlockIOs", this, + &FileBlockCache::PerformBlockIOs); + mBackgroundET->Dispatch(event.forget(), NS_DISPATCH_EVENT_MAY_BLOCK); +} + +nsresult FileBlockCache::Seek(int64_t aOffset) { + mFileMutex.AssertCurrentThreadOwns(); + + if (mFDCurrentPos != aOffset) { + MOZ_ASSERT(mFD); + int64_t result = PR_Seek64(mFD, aOffset, PR_SEEK_SET); + if (result != aOffset) { + NS_WARNING("Failed to seek media cache file"); + return NS_ERROR_FAILURE; + } + mFDCurrentPos = result; + } + return NS_OK; +} + +nsresult FileBlockCache::ReadFromFile(int64_t aOffset, uint8_t* aDest, + int32_t aBytesToRead, + int32_t& aBytesRead) { + LOG("ReadFromFile(offset=%" PRIu64 ", len=%u)", aOffset, aBytesToRead); + mFileMutex.AssertCurrentThreadOwns(); + MOZ_ASSERT(mFD); + + nsresult res = Seek(aOffset); + if (NS_FAILED(res)) return res; + + aBytesRead = PR_Read(mFD, aDest, aBytesToRead); + if (aBytesRead <= 0) return NS_ERROR_FAILURE; + mFDCurrentPos += aBytesRead; + + return NS_OK; +} + +nsresult FileBlockCache::WriteBlockToFile(int32_t aBlockIndex, + const uint8_t* aBlockData) { + LOG("WriteBlockToFile(index=%u)", aBlockIndex); + + mFileMutex.AssertCurrentThreadOwns(); + MOZ_ASSERT(mFD); + + nsresult rv = Seek(BlockIndexToOffset(aBlockIndex)); + if (NS_FAILED(rv)) return rv; + + int32_t amount = PR_Write(mFD, aBlockData, BLOCK_SIZE); + if (amount < BLOCK_SIZE) { + NS_WARNING("Failed to write media cache block!"); + return NS_ERROR_FAILURE; + } + mFDCurrentPos += BLOCK_SIZE; + + return NS_OK; +} + +nsresult FileBlockCache::MoveBlockInFile(int32_t aSourceBlockIndex, + int32_t aDestBlockIndex) { + LOG("MoveBlockInFile(src=%u, dest=%u)", aSourceBlockIndex, aDestBlockIndex); + + mFileMutex.AssertCurrentThreadOwns(); + + uint8_t buf[BLOCK_SIZE]; + int32_t bytesRead = 0; + if (NS_FAILED(ReadFromFile(BlockIndexToOffset(aSourceBlockIndex), buf, + BLOCK_SIZE, bytesRead))) { + return NS_ERROR_FAILURE; + } + return WriteBlockToFile(aDestBlockIndex, buf); +} + +void FileBlockCache::PerformBlockIOs() { + MutexAutoLock mon(mDataMutex); + MOZ_ASSERT(mBackgroundET->IsOnCurrentThread()); + NS_ASSERTION(mIsWriteScheduled, "Should report write running or scheduled."); + + LOG("Run() mFD=%p mBackgroundET=%p", mFD, mBackgroundET.get()); + + while (!mChangeIndexList.empty()) { + if (!mBackgroundET) { + // We've been closed, abort, discarding unwritten changes. + mIsWriteScheduled = false; + return; + } + + if (mIsReading) { + // We're trying to read; postpone all writes. (Reader will resume writes.) + mIsWriteScheduled = false; + return; + } + + // Process each pending change. We pop the index out of the change + // list, but leave the BlockChange in mBlockChanges until the change + // is written to file. This is so that any read which happens while + // we drop mDataMutex to write will refer to the data's source in + // memory, rather than the not-yet up to date data written to file. + // This also ensures we will insert a new index into mChangeIndexList + // when this happens. + + // Hold a reference to the change, in case another change + // overwrites the mBlockChanges entry for this block while we drop + // mDataMutex to take mFileMutex. + int32_t blockIndex = mChangeIndexList.front(); + RefPtr change = mBlockChanges[blockIndex]; + MOZ_ASSERT(change, + "Change index list should only contain entries for blocks " + "with changes"); + { + MutexAutoUnlock unlock(mDataMutex); + MutexAutoLock lock(mFileMutex); + if (!mFD) { + // We may be here if mFD has been reset because we're closing, so we + // don't care anymore about writes. + return; + } + if (change->IsWrite()) { + WriteBlockToFile(blockIndex, change->mData.get()); + } else if (change->IsMove()) { + MoveBlockInFile(change->mSourceBlockIndex, blockIndex); + } + } + mChangeIndexList.pop_front(); // MonitorAutoUnlock above + // If a new change has not been made to the block while we dropped + // mDataMutex, clear reference to the old change. Otherwise, the old + // reference has been cleared already. + if (mBlockChanges[blockIndex] == change) { // MonitorAutoUnlock above + mBlockChanges[blockIndex] = nullptr; // MonitorAutoUnlock above + } + } + + mIsWriteScheduled = false; +} + +nsresult FileBlockCache::Read(int64_t aOffset, uint8_t* aData, int32_t aLength, + int32_t* aBytes) { + MutexAutoLock mon(mDataMutex); + + if (!mBackgroundET || (aOffset / BLOCK_SIZE) > INT32_MAX) { + return NS_ERROR_FAILURE; + } + + mIsReading = true; + auto exitRead = MakeScopeExit([&] { + mDataMutex.AssertCurrentThreadOwns(); + mIsReading = false; + if (!mChangeIndexList.empty()) { + // mReading has stopped or prevented pending writes, resume them. + EnsureWriteScheduled(); + } + }); + + int32_t bytesToRead = aLength; + int64_t offset = aOffset; + uint8_t* dst = aData; + while (bytesToRead > 0) { + int32_t blockIndex = static_cast(offset / BLOCK_SIZE); + int32_t start = offset % BLOCK_SIZE; + int32_t amount = std::min(BLOCK_SIZE - start, bytesToRead); + + // If the block is not yet written to file, we can just read from + // the memory buffer, otherwise we need to read from file. + int32_t bytesRead = 0; + MOZ_ASSERT(!mBlockChanges.IsEmpty()); + MOZ_ASSERT(blockIndex >= 0 && + static_cast(blockIndex) < mBlockChanges.Length()); + RefPtr change = mBlockChanges.SafeElementAt(blockIndex); + if (change && change->IsWrite()) { + // Block isn't yet written to file. Read from memory buffer. + const uint8_t* blockData = change->mData.get(); + memcpy(dst, blockData + start, amount); + bytesRead = amount; + } else { + if (change && change->IsMove()) { + // The target block is the destination of a not-yet-completed move + // action, so read from the move's source block from file. Note we + // *don't* follow a chain of moves here, as a move's source index + // is resolved when MoveBlock() is called, and the move's source's + // block could be have itself been subject to a move (or write) + // which happened *after* this move was recorded. + blockIndex = change->mSourceBlockIndex; + } + // Block has been written to file, either as the source block of a move, + // or as a stable (all changes made) block. Read the data directly + // from file. + nsresult res; + { + MutexAutoUnlock unlock(mDataMutex); + MutexAutoLock lock(mFileMutex); + if (!mFD) { + // Not initialized yet, or closed. + return NS_ERROR_FAILURE; + } + res = ReadFromFile(BlockIndexToOffset(blockIndex) + start, dst, amount, + bytesRead); + } + NS_ENSURE_SUCCESS(res, res); + } + dst += bytesRead; + offset += bytesRead; + bytesToRead -= bytesRead; + } + *aBytes = aLength - bytesToRead; + return NS_OK; +} + +nsresult FileBlockCache::MoveBlock(int32_t aSourceBlockIndex, + int32_t aDestBlockIndex) { + MutexAutoLock mon(mDataMutex); + + if (!mBackgroundET) { + return NS_ERROR_FAILURE; + } + + mBlockChanges.EnsureLengthAtLeast( + std::max(aSourceBlockIndex, aDestBlockIndex) + 1); + + // The source block's contents may be the destination of another pending + // move, which in turn can be the destination of another pending move, + // etc. Resolve the final source block, so that if one of the blocks in + // the chain of moves is overwritten, we don't lose the reference to the + // contents of the destination block. + int32_t sourceIndex = aSourceBlockIndex; + BlockChange* sourceBlock = nullptr; + while ((sourceBlock = mBlockChanges[sourceIndex]) && sourceBlock->IsMove()) { + sourceIndex = sourceBlock->mSourceBlockIndex; + } + + if (mBlockChanges[aDestBlockIndex] == nullptr || + !ContainerContains(mChangeIndexList, aDestBlockIndex)) { + // Only add another entry to the change index list if we don't already + // have one for this block. We won't have an entry when either there's + // no pending change for this block, or if there is a pending change for + // this block and we're in the process of writing it (we've popped the + // block's index out of mChangeIndexList in Run() but not finished writing + // the block to file yet. + mChangeIndexList.push_back(aDestBlockIndex); + } + + // If the source block hasn't yet been written to file then the dest block + // simply contains that same write. Resolve this as a write instead. + if (sourceBlock && sourceBlock->IsWrite()) { + mBlockChanges[aDestBlockIndex] = new BlockChange(sourceBlock->mData.get()); + } else { + mBlockChanges[aDestBlockIndex] = new BlockChange(sourceIndex); + } + + EnsureWriteScheduled(); + + NS_ASSERTION(ContainerContains(mChangeIndexList, aDestBlockIndex), + "Should have scheduled block for change"); + + return NS_OK; +} + +} // End namespace mozilla. + +// avoid redefined macro in unified build +#undef LOG diff --git a/dom/media/FileBlockCache.h b/dom/media/FileBlockCache.h new file mode 100644 index 0000000000..3a1daf0794 --- /dev/null +++ b/dom/media/FileBlockCache.h @@ -0,0 +1,193 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef FILE_BLOCK_CACHE_H_ +#define FILE_BLOCK_CACHE_H_ + +#include "mozilla/Attributes.h" +#include "mozilla/MozPromise.h" +#include "mozilla/Mutex.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/AbstractThread.h" +#include "nsTArray.h" +#include "MediaBlockCacheBase.h" +#include "nsDeque.h" +#include "nsThreadUtils.h" +#include + +struct PRFileDesc; + +namespace mozilla { + +// Manages file I/O for the media cache. Data comes in over the network +// via callbacks on the main thread, however we don't want to write the +// incoming data to the media cache on the main thread, as this could block +// causing UI jank. +// +// So FileBlockCache provides an abstraction for a temporary file accessible +// as an array of blocks, which supports a block move operation, and +// allows synchronous reading and writing from any thread, with writes being +// buffered so as not to block. +// +// Writes and cache block moves (which require reading) are deferred to +// their own non-main thread. This object also ensures that data which has +// been scheduled to be written, but hasn't actually *been* written, is read +// as if it had, i.e. pending writes are cached in readable memory until +// they're flushed to file. +// +// To improve efficiency, writes can only be done at block granularity, +// whereas reads can be done with byte granularity. +// +// Note it's also recommended not to read from the media cache from the main +// thread to prevent jank. +// +// When WriteBlock() or MoveBlock() are called, data about how to complete +// the block change is added to mBlockChanges, indexed by block index, and +// the block index is appended to the mChangeIndexList. This enables +// us to quickly tell if a block has been changed, and ensures we can perform +// the changes in the correct order. An event is dispatched to perform the +// changes listed in mBlockChanges to file. Read() checks mBlockChanges and +// determines the current data to return, reading from file or from +// mBlockChanges as necessary. +class FileBlockCache : public MediaBlockCacheBase { + public: + FileBlockCache(); + + protected: + virtual ~FileBlockCache(); + + public: + // Launch thread and open temporary file. + nsresult Init() override; + + // Will discard pending changes if any. + void Flush() override; + + // Maximum number of blocks allowed in this block cache. + // Calculated from "media.cache_size" pref. + size_t GetMaxBlocks(size_t aCacheSizeInKB) const override; + + // Can be called on any thread. This defers to a non-main thread. + nsresult WriteBlock(uint32_t aBlockIndex, Span aData1, + Span aData2) override; + + // Synchronously reads data from file. May read from file or memory + // depending on whether written blocks have been flushed to file yet. + // Not recommended to be called from the main thread, as can cause jank. + nsresult Read(int64_t aOffset, uint8_t* aData, int32_t aLength, + int32_t* aBytes) override; + + // Moves a block asynchronously. Can be called on any thread. + // This defers file I/O to a non-main thread. + nsresult MoveBlock(int32_t aSourceBlockIndex, + int32_t aDestBlockIndex) override; + + // Represents a change yet to be made to a block in the file. The change + // is either a write (and the data to be written is stored in this struct) + // or a move (and the index of the source block is stored instead). + struct BlockChange final { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BlockChange) + + // This block is waiting in memory to be written. + // Stores a copy of the block, so we can write it asynchronously. + explicit BlockChange(const uint8_t* aData) : mSourceBlockIndex(-1) { + mData = MakeUnique(BLOCK_SIZE); + memcpy(mData.get(), aData, BLOCK_SIZE); + } + + BlockChange(Span aData1, Span aData2) + : mSourceBlockIndex(-1) { + MOZ_ASSERT(aData1.Length() + aData2.Length() == BLOCK_SIZE); + mData = MakeUnique(BLOCK_SIZE); + memcpy(mData.get(), aData1.Elements(), aData1.Length()); + memcpy(mData.get() + aData1.Length(), aData2.Elements(), aData2.Length()); + } + + // This block's contents are located in another file + // block, i.e. this block has been moved. + explicit BlockChange(int32_t aSourceBlockIndex) + : mSourceBlockIndex(aSourceBlockIndex) {} + + UniquePtr mData; + const int32_t mSourceBlockIndex; + + bool IsMove() const { return mSourceBlockIndex != -1; } + bool IsWrite() const { + return mSourceBlockIndex == -1 && mData.get() != nullptr; + } + + private: + // Private destructor, to discourage deletion outside of Release(): + ~BlockChange() = default; + }; + + private: + int64_t BlockIndexToOffset(int32_t aBlockIndex) { + return static_cast(aBlockIndex) * BLOCK_SIZE; + } + + void SetCacheFile(PRFileDesc* aFD); + + // Close file in thread and terminate thread. + void Close(); + + // Performs block writes and block moves on its own thread. + void PerformBlockIOs(); + + // Mutex which controls access to mFD and mFDCurrentPos. Don't hold + // mDataMutex while holding mFileMutex! mFileMutex must be owned + // while accessing any of the following data fields or methods. + Mutex mFileMutex; + // Moves a block already committed to file. + nsresult MoveBlockInFile(int32_t aSourceBlockIndex, int32_t aDestBlockIndex); + // Seeks file pointer. + nsresult Seek(int64_t aOffset); + // Reads data from file offset. + nsresult ReadFromFile(int64_t aOffset, uint8_t* aDest, int32_t aBytesToRead, + int32_t& aBytesRead); + nsresult WriteBlockToFile(int32_t aBlockIndex, const uint8_t* aBlockData); + // File descriptor we're writing to. This is created externally, but + // shutdown by us. + PRFileDesc* mFD MOZ_PT_GUARDED_BY(mFileMutex); + // The current file offset in the file. + int64_t mFDCurrentPos MOZ_GUARDED_BY(mFileMutex); + + // Mutex which controls access to all data in this class, except mFD + // and mFDCurrentPos. Don't hold mDataMutex while holding mFileMutex! + // mDataMutex must be owned while accessing any of the following data + // fields or methods. + Mutex mDataMutex; + // Ensures we either are running the event to preform IO, or an event + // has been dispatched to preform the IO. + // mDataMutex must be owned while calling this. + void EnsureWriteScheduled(); + + // Array of block changes to made. If mBlockChanges[offset/BLOCK_SIZE] == + // nullptr, then the block has no pending changes to be written, but if + // mBlockChanges[offset/BLOCK_SIZE] != nullptr, then either there's a block + // cached in memory waiting to be written, or this block is the target of a + // block move. + nsTArray > mBlockChanges MOZ_GUARDED_BY(mDataMutex); + // Event target upon which block writes and block moves are performed. This is + // created upon open, and dropped on close. + nsCOMPtr mBackgroundET MOZ_GUARDED_BY(mDataMutex); + // Queue of pending block indexes that need to be written or moved. + std::deque mChangeIndexList MOZ_GUARDED_BY(mDataMutex); + // True if we've dispatched an event to commit all pending block changes + // to file on mBackgroundET. + bool mIsWriteScheduled MOZ_GUARDED_BY(mDataMutex); + // True when a read is happening. Pending writes may be postponed, to give + // higher priority to reads (which may be blocking the caller). + bool mIsReading MOZ_GUARDED_BY(mDataMutex); + // True if we've got a temporary file descriptor. Note: we don't use mFD + // directly as that's synchronized via mFileMutex and we need to make + // decisions about whether we can write while holding mDataMutex. + bool mInitialized MOZ_GUARDED_BY(mDataMutex) = false; +}; + +} // End namespace mozilla. + +#endif /* FILE_BLOCK_CACHE_H_ */ diff --git a/dom/media/FileMediaResource.cpp b/dom/media/FileMediaResource.cpp new file mode 100644 index 0000000000..16bcc5c3de --- /dev/null +++ b/dom/media/FileMediaResource.cpp @@ -0,0 +1,223 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "FileMediaResource.h" + +#include "mozilla/AbstractThread.h" +#include "mozilla/dom/BlobImpl.h" +#include "mozilla/dom/BlobURLProtocolHandler.h" +#include "nsContentUtils.h" +#include "nsIFile.h" +#include "nsIFileChannel.h" +#include "nsIFileStreams.h" +#include "nsITimedChannel.h" +#include "nsNetUtil.h" + +namespace mozilla { + +void FileMediaResource::EnsureSizeInitialized() { + mLock.AssertCurrentThreadOwns(); + NS_ASSERTION(mInput, "Must have file input stream"); + if (mSizeInitialized && mNotifyDataEndedProcessed) { + return; + } + + if (!mSizeInitialized) { + // Get the file size and inform the decoder. + uint64_t size; + nsresult res = mInput->Available(&size); + if (NS_SUCCEEDED(res) && size <= INT64_MAX) { + mSize = (int64_t)size; + } + } + mSizeInitialized = true; + if (!mNotifyDataEndedProcessed && mSize >= 0) { + mCallback->AbstractMainThread()->Dispatch(NewRunnableMethod( + "MediaResourceCallback::NotifyDataEnded", mCallback.get(), + &MediaResourceCallback::NotifyDataEnded, NS_OK)); + } + mNotifyDataEndedProcessed = true; +} + +nsresult FileMediaResource::GetCachedRanges(MediaByteRangeSet& aRanges) { + MutexAutoLock lock(mLock); + + EnsureSizeInitialized(); + if (mSize == -1) { + return NS_ERROR_FAILURE; + } + aRanges += MediaByteRange(0, mSize); + return NS_OK; +} + +nsresult FileMediaResource::Open(nsIStreamListener** aStreamListener) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + MOZ_ASSERT(aStreamListener); + + *aStreamListener = nullptr; + nsresult rv = NS_OK; + + MutexAutoLock lock(mLock); + // The channel is already open. We need a synchronous stream that + // implements nsISeekableStream, so we have to find the underlying + // file and reopen it + nsCOMPtr fc(do_QueryInterface(mChannel)); + if (fc) { + nsCOMPtr file; + rv = fc->GetFile(getter_AddRefs(file)); + NS_ENSURE_SUCCESS(rv, rv); + + rv = NS_NewLocalFileInputStream(getter_AddRefs(mInput), file, -1, -1, + nsIFileInputStream::SHARE_DELETE); + NS_ENSURE_SUCCESS(rv, rv); + } else if (dom::IsBlobURI(mURI)) { + RefPtr blobImpl; + rv = NS_GetBlobForBlobURI(mURI, getter_AddRefs(blobImpl)); + NS_ENSURE_SUCCESS(rv, rv); + MOZ_ASSERT(blobImpl); + + ErrorResult err; + blobImpl->CreateInputStream(getter_AddRefs(mInput), err); + if (NS_WARN_IF(err.Failed())) { + return err.StealNSResult(); + } + } + + mSeekable = do_QueryInterface(mInput); + if (!mSeekable) { + // XXX The file may just be a .url or similar + // shortcut that points to a Web site. We need to fix this by + // doing an async open and waiting until we locate the real resource, + // then using that (if it's still a file!). + return NS_ERROR_FAILURE; + } + + return NS_OK; +} + +RefPtr FileMediaResource::Close() { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + // Since mChennel is only accessed by main thread, there is no necessary to + // take the lock. + if (mChannel) { + mChannel->Cancel(NS_ERROR_PARSED_DATA_CACHED); + mChannel = nullptr; + } + + return GenericPromise::CreateAndResolve(true, __func__); +} + +already_AddRefed FileMediaResource::GetCurrentPrincipal() { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + nsCOMPtr principal; + nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager(); + if (!secMan || !mChannel) return nullptr; + secMan->GetChannelResultPrincipal(mChannel, getter_AddRefs(principal)); + return principal.forget(); +} + +bool FileMediaResource::HadCrossOriginRedirects() { + MOZ_ASSERT(NS_IsMainThread()); + + nsCOMPtr timedChannel = do_QueryInterface(mChannel); + if (!timedChannel) { + return false; + } + + bool allRedirectsSameOrigin = false; + return NS_SUCCEEDED(timedChannel->GetAllRedirectsSameOrigin( + &allRedirectsSameOrigin)) && + !allRedirectsSameOrigin; +} + +nsresult FileMediaResource::ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) { + MutexAutoLock lock(mLock); + + EnsureSizeInitialized(); + if (!aCount) { + return NS_OK; + } + int64_t offset = 0; + nsresult res = mSeekable->Tell(&offset); + NS_ENSURE_SUCCESS(res, res); + res = mSeekable->Seek(nsISeekableStream::NS_SEEK_SET, aOffset); + NS_ENSURE_SUCCESS(res, res); + uint32_t bytesRead = 0; + do { + uint32_t x = 0; + uint32_t bytesToRead = aCount - bytesRead; + res = mInput->Read(aBuffer, bytesToRead, &x); + bytesRead += x; + if (!x) { + res = NS_ERROR_FAILURE; + } + } while (bytesRead != aCount && res == NS_OK); + + // Reset read head to original position so we don't disturb any other + // reading thread. + nsresult seekres = mSeekable->Seek(nsISeekableStream::NS_SEEK_SET, offset); + + // If a read failed in the loop above, we want to return its failure code. + NS_ENSURE_SUCCESS(res, res); + + // Else we succeed if the reset-seek succeeds. + return seekres; +} + +nsresult FileMediaResource::UnsafeRead(char* aBuffer, uint32_t aCount, + uint32_t* aBytes) { + EnsureSizeInitialized(); + return mInput->Read(aBuffer, aCount, aBytes); +} + +nsresult FileMediaResource::ReadAt(int64_t aOffset, char* aBuffer, + uint32_t aCount, uint32_t* aBytes) { + NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); + + nsresult rv; + { + MutexAutoLock lock(mLock); + rv = UnsafeSeek(nsISeekableStream::NS_SEEK_SET, aOffset); + if (NS_FAILED(rv)) return rv; + rv = UnsafeRead(aBuffer, aCount, aBytes); + } + return rv; +} + +already_AddRefed FileMediaResource::UnsafeMediaReadAt( + int64_t aOffset, uint32_t aCount) { + RefPtr bytes = new MediaByteBuffer(); + bool ok = bytes->SetLength(aCount, fallible); + NS_ENSURE_TRUE(ok, nullptr); + nsresult rv = UnsafeSeek(nsISeekableStream::NS_SEEK_SET, aOffset); + NS_ENSURE_SUCCESS(rv, nullptr); + char* curr = reinterpret_cast(bytes->Elements()); + const char* start = curr; + while (aCount > 0) { + uint32_t bytesRead; + rv = UnsafeRead(curr, aCount, &bytesRead); + NS_ENSURE_SUCCESS(rv, nullptr); + if (!bytesRead) { + break; + } + aCount -= bytesRead; + curr += bytesRead; + } + bytes->SetLength(curr - start); + return bytes.forget(); +} + +nsresult FileMediaResource::UnsafeSeek(int32_t aWhence, int64_t aOffset) { + NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); + + if (!mSeekable) return NS_ERROR_FAILURE; + EnsureSizeInitialized(); + return mSeekable->Seek(aWhence, aOffset); +} + +} // namespace mozilla diff --git a/dom/media/FileMediaResource.h b/dom/media/FileMediaResource.h new file mode 100644 index 0000000000..7373a6fd37 --- /dev/null +++ b/dom/media/FileMediaResource.h @@ -0,0 +1,136 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_media_FileMediaResource_h +#define mozilla_dom_media_FileMediaResource_h + +#include "BaseMediaResource.h" +#include "mozilla/Mutex.h" + +namespace mozilla { + +class FileMediaResource : public BaseMediaResource { + public: + FileMediaResource(MediaResourceCallback* aCallback, nsIChannel* aChannel, + nsIURI* aURI, int64_t aSize = -1 /* unknown size */) + : BaseMediaResource(aCallback, aChannel, aURI), + mSize(aSize), + mLock("FileMediaResource.mLock"), + mSizeInitialized(aSize != -1) {} + ~FileMediaResource() = default; + + // Main thread + nsresult Open(nsIStreamListener** aStreamListener) override; + RefPtr Close() override; + void Suspend(bool aCloseImmediately) override {} + void Resume() override {} + already_AddRefed GetCurrentPrincipal() override; + bool HadCrossOriginRedirects() override; + nsresult ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) override; + + // These methods are called off the main thread. + + // Other thread + void SetReadMode(MediaCacheStream::ReadMode aMode) override {} + void SetPlaybackRate(uint32_t aBytesPerSecond) override {} + nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount, + uint32_t* aBytes) override; + // (Probably) file-based, caching recommended. + bool ShouldCacheReads() override { return true; } + + // Any thread + void Pin() override {} + void Unpin() override {} + double GetDownloadRate(bool* aIsReliable) override { + // The data's all already here + *aIsReliable = true; + return 100 * 1024 * 1024; // arbitray, use 100MB/s + } + + int64_t GetLength() override { + MutexAutoLock lock(mLock); + + EnsureSizeInitialized(); + return mSizeInitialized ? mSize : 0; + } + + int64_t GetNextCachedData(int64_t aOffset) override { + MutexAutoLock lock(mLock); + + EnsureSizeInitialized(); + return (aOffset < mSize) ? aOffset : -1; + } + + int64_t GetCachedDataEnd(int64_t aOffset) override { + MutexAutoLock lock(mLock); + + EnsureSizeInitialized(); + return std::max(aOffset, mSize); + } + bool IsDataCachedToEndOfResource(int64_t aOffset) override { return true; } + bool IsTransportSeekable() override { return true; } + + nsresult GetCachedRanges(MediaByteRangeSet& aRanges) override; + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override { + // Might be useful to track in the future: + // - mInput + return BaseMediaResource::SizeOfExcludingThis(aMallocSizeOf); + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + protected: + // These Unsafe variants of Read and Seek perform their operations + // without acquiring mLock. The caller must obtain the lock before + // calling. The implmentation of Read, Seek and ReadAt obtains the + // lock before calling these Unsafe variants to read or seek. + nsresult UnsafeRead(char* aBuffer, uint32_t aCount, uint32_t* aBytes) + MOZ_REQUIRES(mLock); + nsresult UnsafeSeek(int32_t aWhence, int64_t aOffset) MOZ_REQUIRES(mLock); + + private: + // Ensures mSize is initialized, if it can be. + // mLock must be held when this is called, and mInput must be non-null. + void EnsureSizeInitialized() MOZ_REQUIRES(mLock); + already_AddRefed UnsafeMediaReadAt(int64_t aOffset, + uint32_t aCount) + MOZ_REQUIRES(mLock); + + // The file size, or -1 if not known. Immutable after Open(). + // Can be used from any thread. + // XXX FIX? is this under mLock? comments are contradictory + int64_t mSize MOZ_GUARDED_BY(mLock); + + // This lock handles synchronisation between calls to Close() and + // the Read, Seek, etc calls. Close must not be called while a + // Read or Seek is in progress since it resets various internal + // values to null. + // This lock protects mSeekable, mInput, mSize, and mSizeInitialized. + Mutex mLock; + + // Seekable stream interface to file. This can be used from any + // thread. + nsCOMPtr mSeekable MOZ_GUARDED_BY(mLock); + + // Input stream for the media data. This can be used from any + // thread. + nsCOMPtr mInput MOZ_GUARDED_BY(mLock); + + // Whether we've attempted to initialize mSize. Note that mSize can be -1 + // when mSizeInitialized is true if we tried and failed to get the size + // of the file. + bool mSizeInitialized MOZ_GUARDED_BY(mLock); + // Set to true if NotifyDataEnded callback has been processed (which only + // occurs if resource size is known) + bool mNotifyDataEndedProcessed = false; +}; + +} // namespace mozilla + +#endif // mozilla_dom_media_FileMediaResource_h diff --git a/dom/media/ForwardedInputTrack.cpp b/dom/media/ForwardedInputTrack.cpp new file mode 100644 index 0000000000..2f71f0e12a --- /dev/null +++ b/dom/media/ForwardedInputTrack.cpp @@ -0,0 +1,291 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ForwardedInputTrack.h" + +#include +#include "AudioChannelService.h" +#include "AudioNodeEngine.h" +#include "AudioNodeExternalInputTrack.h" +#include "AudioNodeTrack.h" +#include "AudioSegment.h" +#include "DOMMediaStream.h" +#include "GeckoProfiler.h" +#include "ImageContainer.h" +#include "MediaTrackGraph.h" +#include "mozilla/Attributes.h" +#include "mozilla/Logging.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Unused.h" +#include "nsContentUtils.h" +#include "nsPrintfCString.h" +#include "nsServiceManagerUtils.h" +#include "nsWidgetsCID.h" +#include "prerror.h" +#include "Tracing.h" +#include "VideoSegment.h" +#include "webaudio/MediaStreamAudioDestinationNode.h" + +using namespace mozilla::layers; +using namespace mozilla::dom; +using namespace mozilla::gfx; + +namespace mozilla { + +#ifdef TRACK_LOG +# undef TRACK_LOG +#endif + +LazyLogModule gForwardedInputTrackLog("ForwardedInputTrack"); +#define TRACK_LOG(type, msg) MOZ_LOG(gForwardedInputTrackLog, type, msg) + +ForwardedInputTrack::ForwardedInputTrack(TrackRate aSampleRate, + MediaSegment::Type aType) + : ProcessedMediaTrack( + aSampleRate, aType, + aType == MediaSegment::AUDIO + ? static_cast(new AudioSegment()) + : static_cast(new VideoSegment())) {} + +void ForwardedInputTrack::AddInput(MediaInputPort* aPort) { + SetInput(aPort); + ProcessedMediaTrack::AddInput(aPort); +} + +void ForwardedInputTrack::RemoveInput(MediaInputPort* aPort) { + TRACK_LOG(LogLevel::Debug, + ("ForwardedInputTrack %p removing input %p", this, aPort)); + MOZ_ASSERT(aPort == mInputPort); + + for (const auto& listener : mOwnedDirectListeners) { + MediaTrack* source = mInputPort->GetSource(); + TRACK_LOG(LogLevel::Debug, + ("ForwardedInputTrack %p removing direct listener " + "%p. Forwarding to input track %p.", + this, listener.get(), aPort->GetSource())); + source->RemoveDirectListenerImpl(listener); + } + + DisabledTrackMode oldMode = CombinedDisabledMode(); + mInputDisabledMode = DisabledTrackMode::ENABLED; + NotifyIfDisabledModeChangedFrom(oldMode); + + mInputPort = nullptr; + ProcessedMediaTrack::RemoveInput(aPort); +} + +void ForwardedInputTrack::SetInput(MediaInputPort* aPort) { + MOZ_ASSERT(aPort); + MOZ_ASSERT(aPort->GetSource()); + MOZ_ASSERT(aPort->GetSource()->GetData()); + MOZ_ASSERT(!mInputPort); + MOZ_ASSERT(mInputDisabledMode == DisabledTrackMode::ENABLED); + + mInputPort = aPort; + + for (const auto& listener : mOwnedDirectListeners) { + MediaTrack* source = mInputPort->GetSource(); + TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p adding direct listener " + "%p. Forwarding to input track %p.", + this, listener.get(), aPort->GetSource())); + source->AddDirectListenerImpl(do_AddRef(listener)); + } + + DisabledTrackMode oldMode = CombinedDisabledMode(); + mInputDisabledMode = mInputPort->GetSource()->CombinedDisabledMode(); + NotifyIfDisabledModeChangedFrom(oldMode); +} + +void ForwardedInputTrack::ProcessInputImpl(MediaTrack* aSource, + MediaSegment* aSegment, + GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + GraphTime next; + for (GraphTime t = aFrom; t < aTo; t = next) { + MediaInputPort::InputInterval interval = + MediaInputPort::GetNextInputInterval(mInputPort, t); + interval.mEnd = std::min(interval.mEnd, aTo); + + const bool inputEnded = + !aSource || + (aSource->Ended() && + aSource->GetEnd() <= + aSource->GraphTimeToTrackTimeWithBlocking(interval.mEnd)); + + TrackTime ticks = interval.mEnd - interval.mStart; + next = interval.mEnd; + + if (interval.mStart >= interval.mEnd) { + break; + } + + if (inputEnded) { + if (mAutoend && (aFlags & ALLOW_END)) { + mEnded = true; + break; + } + aSegment->AppendNullData(ticks); + TRACK_LOG(LogLevel::Verbose, + ("ForwardedInputTrack %p appending %lld ticks " + "of null data (ended input)", + this, (long long)ticks)); + } else if (interval.mInputIsBlocked) { + aSegment->AppendNullData(ticks); + TRACK_LOG(LogLevel::Verbose, + ("ForwardedInputTrack %p appending %lld ticks " + "of null data (blocked input)", + this, (long long)ticks)); + } else if (InMutedCycle()) { + aSegment->AppendNullData(ticks); + } else if (aSource->IsSuspended()) { + aSegment->AppendNullData(ticks); + } else { + MOZ_ASSERT(GetEnd() == GraphTimeToTrackTimeWithBlocking(interval.mStart), + "Samples missing"); + TrackTime inputStart = + aSource->GraphTimeToTrackTimeWithBlocking(interval.mStart); + TrackTime inputEnd = + aSource->GraphTimeToTrackTimeWithBlocking(interval.mEnd); + aSegment->AppendSlice(*aSource->GetData(), inputStart, inputEnd); + } + ApplyTrackDisabling(aSegment); + for (const auto& listener : mTrackListeners) { + listener->NotifyQueuedChanges(Graph(), GetEnd(), *aSegment); + } + mSegment->AppendFrom(aSegment); + } +} + +void ForwardedInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + TRACE_COMMENT("ForwardedInputTrack::ProcessInput", "ForwardedInputTrack %p", + this); + if (mEnded) { + return; + } + + MediaInputPort* input = mInputPort; + MediaTrack* source = input ? input->GetSource() : nullptr; + if (mType == MediaSegment::AUDIO) { + AudioSegment audio; + ProcessInputImpl(source, &audio, aFrom, aTo, aFlags); + } else if (mType == MediaSegment::VIDEO) { + VideoSegment video; + ProcessInputImpl(source, &video, aFrom, aTo, aFlags); + } else { + MOZ_CRASH("Unknown segment type"); + } + + if (mEnded) { + RemoveAllDirectListenersImpl(); + } +} + +DisabledTrackMode ForwardedInputTrack::CombinedDisabledMode() const { + if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK || + mInputDisabledMode == DisabledTrackMode::SILENCE_BLACK) { + return DisabledTrackMode::SILENCE_BLACK; + } + if (mDisabledMode == DisabledTrackMode::SILENCE_FREEZE || + mInputDisabledMode == DisabledTrackMode::SILENCE_FREEZE) { + return DisabledTrackMode::SILENCE_FREEZE; + } + return DisabledTrackMode::ENABLED; +} + +void ForwardedInputTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) { + bool enabled = aMode == DisabledTrackMode::ENABLED; + TRACK_LOG(LogLevel::Info, ("ForwardedInputTrack %p was explicitly %s", this, + enabled ? "enabled" : "disabled")); + for (DirectMediaTrackListener* listener : mOwnedDirectListeners) { + DisabledTrackMode oldMode = mDisabledMode; + bool oldEnabled = oldMode == DisabledTrackMode::ENABLED; + if (!oldEnabled && enabled) { + TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p setting " + "direct listener enabled", + this)); + listener->DecreaseDisabled(oldMode); + } else if (oldEnabled && !enabled) { + TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p setting " + "direct listener disabled", + this)); + listener->IncreaseDisabled(aMode); + } + } + MediaTrack::SetDisabledTrackModeImpl(aMode); +} + +void ForwardedInputTrack::OnInputDisabledModeChanged( + DisabledTrackMode aInputMode) { + MOZ_ASSERT(mInputs.Length() == 1); + MOZ_ASSERT(mInputs[0]->GetSource()); + DisabledTrackMode oldMode = CombinedDisabledMode(); + if (mInputDisabledMode == DisabledTrackMode::SILENCE_BLACK && + aInputMode == DisabledTrackMode::SILENCE_FREEZE) { + // Don't allow demoting from SILENCE_BLACK to SILENCE_FREEZE. Frames will + // remain black so we shouldn't notify that the track got enabled. + aInputMode = DisabledTrackMode::SILENCE_BLACK; + } + mInputDisabledMode = aInputMode; + NotifyIfDisabledModeChangedFrom(oldMode); +} + +uint32_t ForwardedInputTrack::NumberOfChannels() const { + MOZ_DIAGNOSTIC_ASSERT(mSegment->GetType() == MediaSegment::AUDIO); + if (!mInputPort || !mInputPort->GetSource()) { + return GetData()->MaxChannelCount(); + } + return mInputPort->GetSource()->NumberOfChannels(); +} + +void ForwardedInputTrack::AddDirectListenerImpl( + already_AddRefed aListener) { + RefPtr listener = aListener; + mOwnedDirectListeners.AppendElement(listener); + + DisabledTrackMode currentMode = mDisabledMode; + if (currentMode != DisabledTrackMode::ENABLED) { + listener->IncreaseDisabled(currentMode); + } + + if (mInputPort) { + MediaTrack* source = mInputPort->GetSource(); + TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p adding direct listener " + "%p. Forwarding to input track %p.", + this, listener.get(), source)); + source->AddDirectListenerImpl(listener.forget()); + } +} + +void ForwardedInputTrack::RemoveDirectListenerImpl( + DirectMediaTrackListener* aListener) { + for (size_t i = 0; i < mOwnedDirectListeners.Length(); ++i) { + if (mOwnedDirectListeners[i] == aListener) { + TRACK_LOG(LogLevel::Debug, + ("ForwardedInputTrack %p removing direct listener %p", this, + aListener)); + DisabledTrackMode currentMode = mDisabledMode; + if (currentMode != DisabledTrackMode::ENABLED) { + // Reset the listener's state. + aListener->DecreaseDisabled(currentMode); + } + mOwnedDirectListeners.RemoveElementAt(i); + break; + } + } + if (mInputPort) { + // Forward to the input + MediaTrack* source = mInputPort->GetSource(); + source->RemoveDirectListenerImpl(aListener); + } +} + +void ForwardedInputTrack::RemoveAllDirectListenersImpl() { + for (const auto& listener : mOwnedDirectListeners.Clone()) { + RemoveDirectListenerImpl(listener); + } + MOZ_DIAGNOSTIC_ASSERT(mOwnedDirectListeners.IsEmpty()); +} + +} // namespace mozilla diff --git a/dom/media/ForwardedInputTrack.h b/dom/media/ForwardedInputTrack.h new file mode 100644 index 0000000000..2aaa30ca8f --- /dev/null +++ b/dom/media/ForwardedInputTrack.h @@ -0,0 +1,68 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_FORWARDEDINPUTTRACK_H_ +#define MOZILLA_FORWARDEDINPUTTRACK_H_ + +#include "MediaTrackGraph.h" +#include "MediaTrackListener.h" +#include + +namespace mozilla { + +/** + * See MediaTrackGraph::CreateForwardedInputTrack. + */ +class ForwardedInputTrack : public ProcessedMediaTrack { + public: + ForwardedInputTrack(TrackRate aSampleRate, MediaSegment::Type aType); + + virtual ForwardedInputTrack* AsForwardedInputTrack() override { return this; } + friend class DOMMediaStream; + + void AddInput(MediaInputPort* aPort) override; + void RemoveInput(MediaInputPort* aPort) override; + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + + DisabledTrackMode CombinedDisabledMode() const override; + void SetDisabledTrackModeImpl(DisabledTrackMode aMode) override; + void OnInputDisabledModeChanged(DisabledTrackMode aInputMode) override; + + uint32_t NumberOfChannels() const override; + + friend class MediaTrackGraphImpl; + + protected: + // Set up this track from a specific input. + void SetInput(MediaInputPort* aPort); + + // MediaSegment-agnostic ProcessInput. + void ProcessInputImpl(MediaTrack* aSource, MediaSegment* aSegment, + GraphTime aFrom, GraphTime aTo, uint32_t aFlags); + + void AddDirectListenerImpl( + already_AddRefed aListener) override; + void RemoveDirectListenerImpl(DirectMediaTrackListener* aListener) override; + void RemoveAllDirectListenersImpl() override; + + // These are direct track listeners that have been added to this + // ForwardedInputTrack-track. While an input is set, these are forwarded to + // the input track. We will update these when this track's disabled status + // changes. + nsTArray> mOwnedDirectListeners; + + // Set if an input has been added, nullptr otherwise. Adding more than one + // input is an error. + MediaInputPort* mInputPort = nullptr; + + // This track's input's associated disabled mode. ENABLED if there is no + // input. This is used with MediaTrackListener::NotifyEnabledStateChanged(), + // which affects only video tracks. This is set only on ForwardedInputTracks. + DisabledTrackMode mInputDisabledMode = DisabledTrackMode::ENABLED; +}; + +} // namespace mozilla + +#endif /* MOZILLA_FORWARDEDINPUTTRACK_H_ */ diff --git a/dom/media/FrameStatistics.h b/dom/media/FrameStatistics.h new file mode 100644 index 0000000000..c0063bd1bc --- /dev/null +++ b/dom/media/FrameStatistics.h @@ -0,0 +1,196 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef FrameStatistics_h_ +#define FrameStatistics_h_ + +#include "mozilla/ReentrantMonitor.h" + +namespace mozilla { + +struct FrameStatisticsData { + // Number of frames parsed and demuxed from media. + // Access protected by mReentrantMonitor. + uint64_t mParsedFrames = 0; + + // Number of parsed frames which were actually decoded. + // Access protected by mReentrantMonitor. + uint64_t mDecodedFrames = 0; + + // Number of parsed frames which were dropped in the decoder. + // Access protected by mReentrantMonitor. + uint64_t mDroppedDecodedFrames = 0; + + // Number of decoded frames which were dropped in the sink + // Access protected by mReentrantMonitor. + uint64_t mDroppedSinkFrames = 0; + + // Number of sinked frames which were dropped in the compositor + // Access protected by mReentrantMonitor. + uint64_t mDroppedCompositorFrames = 0; + + // Number of decoded frames which were actually sent down the rendering + // pipeline to be painted ("presented"). Access protected by + // mReentrantMonitor. + uint64_t mPresentedFrames = 0; + + // Sum of all inter-keyframe segment durations, in microseconds. + // Dividing by count will give the average inter-keyframe time. + uint64_t mInterKeyframeSum_us = 0; + // Number of inter-keyframe segments summed so far. + size_t mInterKeyframeCount = 0; + + // Maximum inter-keyframe segment duration, in microseconds. + uint64_t mInterKeyFrameMax_us = 0; + + FrameStatisticsData() = default; + FrameStatisticsData(uint64_t aParsed, uint64_t aDecoded, uint64_t aPresented, + uint64_t aDroppedDecodedFrames, + uint64_t aDroppedSinkFrames, + uint64_t aDroppedCompositorFrames) + : mParsedFrames(aParsed), + mDecodedFrames(aDecoded), + mDroppedDecodedFrames(aDroppedDecodedFrames), + mDroppedSinkFrames(aDroppedSinkFrames), + mDroppedCompositorFrames(aDroppedCompositorFrames), + mPresentedFrames(aPresented) {} + + void Accumulate(const FrameStatisticsData& aStats) { + mParsedFrames += aStats.mParsedFrames; + mDecodedFrames += aStats.mDecodedFrames; + mPresentedFrames += aStats.mPresentedFrames; + mDroppedDecodedFrames += aStats.mDroppedDecodedFrames; + mDroppedSinkFrames += aStats.mDroppedSinkFrames; + mDroppedCompositorFrames += aStats.mDroppedCompositorFrames; + mInterKeyframeSum_us += aStats.mInterKeyframeSum_us; + mInterKeyframeCount += aStats.mInterKeyframeCount; + // It doesn't make sense to add max numbers, instead keep the bigger one. + if (mInterKeyFrameMax_us < aStats.mInterKeyFrameMax_us) { + mInterKeyFrameMax_us = aStats.mInterKeyFrameMax_us; + } + } +}; + +// Frame decoding/painting related performance counters. +// Threadsafe. +class FrameStatistics { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(FrameStatistics); + + FrameStatistics() : mReentrantMonitor("FrameStats") {} + + // Returns a copy of all frame statistics data. + // Can be called on any thread. + FrameStatisticsData GetFrameStatisticsData() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData; + } + + // Returns number of frames which have been parsed from the media. + // Can be called on any thread. + uint64_t GetParsedFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData.mParsedFrames; + } + + // Returns the number of parsed frames which have been decoded. + // Can be called on any thread. + uint64_t GetDecodedFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData.mDecodedFrames; + } + + // Returns the number of decoded frames which have been sent to the rendering + // pipeline for painting ("presented"). + // Can be called on any thread. + uint64_t GetPresentedFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData.mPresentedFrames; + } + + // Returns the number of presented and dropped frames + // Can be called on any thread. + uint64_t GetTotalFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return GetTotalFrames(mFrameStatisticsData); + } + + static uint64_t GetTotalFrames(const FrameStatisticsData& aData) { + return aData.mPresentedFrames + GetDroppedFrames(aData); + } + + // Returns the number of frames that have been skipped because they have + // missed their composition deadline. + uint64_t GetDroppedFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return GetDroppedFrames(mFrameStatisticsData); + } + + static uint64_t GetDroppedFrames(const FrameStatisticsData& aData) { + return aData.mDroppedDecodedFrames + aData.mDroppedSinkFrames + + aData.mDroppedCompositorFrames; + } + + uint64_t GetDroppedDecodedFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData.mDroppedDecodedFrames; + } + + uint64_t GetDroppedSinkFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData.mDroppedSinkFrames; + } + + uint64_t GetDroppedCompositorFrames() const { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + return mFrameStatisticsData.mDroppedCompositorFrames; + } + + // Increments the parsed and decoded frame counters by the passed in counts. + // Can be called on any thread. + void Accumulate(const FrameStatisticsData& aStats) { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + mFrameStatisticsData.Accumulate(aStats); + } + + // Increments the presented frame counters. + // Can be called on any thread. + void NotifyPresentedFrame() { + ReentrantMonitorAutoEnter mon(mReentrantMonitor); + ++mFrameStatisticsData.mPresentedFrames; + } + + // Stack based class to assist in notifying the frame statistics of + // parsed and decoded frames. Use inside video demux & decode functions + // to ensure all parsed and decoded frames are reported on all return paths. + class AutoNotifyDecoded { + public: + explicit AutoNotifyDecoded(FrameStatistics* aFrameStats) + : mFrameStats(aFrameStats) {} + ~AutoNotifyDecoded() { + if (mFrameStats) { + mFrameStats->Accumulate(mStats); + } + } + + FrameStatisticsData mStats; + + private: + FrameStatistics* mFrameStats; + }; + + private: + ~FrameStatistics() = default; + + // ReentrantMonitor to protect access of playback statistics. + mutable ReentrantMonitor mReentrantMonitor MOZ_UNANNOTATED; + + FrameStatisticsData mFrameStatisticsData; +}; + +} // namespace mozilla + +#endif // FrameStatistics_h_ diff --git a/dom/media/GetUserMediaRequest.cpp b/dom/media/GetUserMediaRequest.cpp new file mode 100644 index 0000000000..84c9eba32a --- /dev/null +++ b/dom/media/GetUserMediaRequest.cpp @@ -0,0 +1,127 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "GetUserMediaRequest.h" + +#include "base/basictypes.h" +#include "MediaManager.h" +#include "mozilla/dom/MediaDevicesBinding.h" +#include "mozilla/dom/MediaStreamBinding.h" +#include "mozilla/dom/GetUserMediaRequestBinding.h" +#include "nsIMediaDevice.h" +#include "nsIScriptGlobalObject.h" +#include "nsPIDOMWindow.h" + +namespace mozilla::dom { + +GetUserMediaRequest::GetUserMediaRequest( + nsPIDOMWindowInner* aInnerWindow, const nsAString& aCallID, + RefPtr aMediaDeviceSet, + const MediaStreamConstraints& aConstraints, bool aIsSecure, + bool aIsHandlingUserInput) + : mInnerWindowID(aInnerWindow->WindowID()), + mOuterWindowID(aInnerWindow->GetOuterWindow()->WindowID()), + mCallID(aCallID), + mMediaDeviceSet(std::move(aMediaDeviceSet)), + mConstraints(new MediaStreamConstraints(aConstraints)), + mType(GetUserMediaRequestType::Getusermedia), + mIsSecure(aIsSecure), + mIsHandlingUserInput(aIsHandlingUserInput) {} + +GetUserMediaRequest::GetUserMediaRequest( + nsPIDOMWindowInner* aInnerWindow, const nsAString& aCallID, + RefPtr aMediaDeviceSet, + const AudioOutputOptions& aAudioOutputOptions, bool aIsSecure, + bool aIsHandlingUserInput) + : mInnerWindowID(aInnerWindow->WindowID()), + mOuterWindowID(aInnerWindow->GetOuterWindow()->WindowID()), + mCallID(aCallID), + mMediaDeviceSet(std::move(aMediaDeviceSet)), + mAudioOutputOptions(new AudioOutputOptions(aAudioOutputOptions)), + mType(GetUserMediaRequestType::Selectaudiooutput), + mIsSecure(aIsSecure), + mIsHandlingUserInput(aIsHandlingUserInput) {} + +GetUserMediaRequest::GetUserMediaRequest(nsPIDOMWindowInner* aInnerWindow, + const nsAString& aRawId, + const nsAString& aMediaSource, + bool aIsHandlingUserInput) + : mInnerWindowID(0), + mOuterWindowID(0), + mRawID(aRawId), + mMediaSource(aMediaSource), + mType(GetUserMediaRequestType::Recording_device_stopped), + mIsSecure(false), + mIsHandlingUserInput(aIsHandlingUserInput) { + if (aInnerWindow && aInnerWindow->GetOuterWindow()) { + mOuterWindowID = aInnerWindow->GetOuterWindow()->WindowID(); + } +} + +GetUserMediaRequest::~GetUserMediaRequest() = default; + +NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_0(GetUserMediaRequest) +NS_IMPL_CYCLE_COLLECTING_ADDREF(GetUserMediaRequest) +NS_IMPL_CYCLE_COLLECTING_RELEASE(GetUserMediaRequest) +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(GetUserMediaRequest) + NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY + NS_INTERFACE_MAP_ENTRY(nsISupports) +NS_INTERFACE_MAP_END + +JSObject* GetUserMediaRequest::WrapObject(JSContext* aCx, + JS::Handle aGivenProto) { + return GetUserMediaRequest_Binding::Wrap(aCx, this, aGivenProto); +} + +nsISupports* GetUserMediaRequest::GetParentObject() { return nullptr; } + +GetUserMediaRequestType GetUserMediaRequest::Type() { return mType; } + +void GetUserMediaRequest::GetCallID(nsString& retval) { retval = mCallID; } + +void GetUserMediaRequest::GetRawID(nsString& retval) { retval = mRawID; } + +void GetUserMediaRequest::GetMediaSource(nsString& retval) { + retval = mMediaSource; +} + +uint64_t GetUserMediaRequest::WindowID() { return mOuterWindowID; } + +uint64_t GetUserMediaRequest::InnerWindowID() { return mInnerWindowID; } + +bool GetUserMediaRequest::IsSecure() { return mIsSecure; } + +bool GetUserMediaRequest::IsHandlingUserInput() const { + return mIsHandlingUserInput; +} + +void GetUserMediaRequest::GetDevices( + nsTArray>& retval) const { + MOZ_ASSERT(retval.Length() == 0); + if (!mMediaDeviceSet) { + return; + } + for (const auto& device : *mMediaDeviceSet) { + retval.AppendElement(device); + } +} + +void GetUserMediaRequest::GetConstraints(MediaStreamConstraints& result) { + MOZ_ASSERT(result.mAudio.IsBoolean() && !result.mAudio.GetAsBoolean() && + result.mVideo.IsBoolean() && !result.mVideo.GetAsBoolean(), + "result should be default initialized"); + if (mConstraints) { + result = *mConstraints; + } +} + +void GetUserMediaRequest::GetAudioOutputOptions(AudioOutputOptions& result) { + MOZ_ASSERT(result.mDeviceId.IsEmpty(), + "result should be default initialized"); + if (mAudioOutputOptions) { + result = *mAudioOutputOptions; + } +} + +} // namespace mozilla::dom diff --git a/dom/media/GetUserMediaRequest.h b/dom/media/GetUserMediaRequest.h new file mode 100644 index 0000000000..a2b69bf1d6 --- /dev/null +++ b/dom/media/GetUserMediaRequest.h @@ -0,0 +1,93 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef GetUserMediaRequest_h__ +#define GetUserMediaRequest_h__ + +#include +#include "js/TypeDecls.h" +#include "mozilla/Assertions.h" +#include "mozilla/UniquePtr.h" +#include "nsCycleCollectionParticipant.h" +#include "nsISupports.h" +#include "nsString.h" +#include "nsWrapperCache.h" + +class nsIMediaDevice; +class nsPIDOMWindowInner; + +namespace mozilla { + +class LocalMediaDevice; + +namespace media { +template +class Refcountable; +} + +namespace dom { + +struct AudioOutputOptions; +struct MediaStreamConstraints; +enum class GetUserMediaRequestType : uint8_t; + +class GetUserMediaRequest : public nsISupports, public nsWrapperCache { + public: + using LocalMediaDeviceSetRefCnt = + media::Refcountable>>; + + // For getUserMedia "getUserMedia:request" + GetUserMediaRequest(nsPIDOMWindowInner* aInnerWindow, + const nsAString& aCallID, + RefPtr aMediaDeviceSet, + const MediaStreamConstraints& aConstraints, + bool aIsSecure, bool aIsHandlingUserInput); + // For selectAudioOutput "getUserMedia:request" + GetUserMediaRequest(nsPIDOMWindowInner* aInnerWindow, + const nsAString& aCallID, + RefPtr aMediaDeviceSet, + const AudioOutputOptions& aAudioOutputOptions, + bool aIsSecure, bool aIsHandlingUserInput); + // For "recording-device-stopped" + GetUserMediaRequest(nsPIDOMWindowInner* aInnerWindow, const nsAString& aRawId, + const nsAString& aMediaSource, bool aIsHandlingUserInput); + + NS_DECL_CYCLE_COLLECTING_ISUPPORTS + NS_DECL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(GetUserMediaRequest) + + JSObject* WrapObject(JSContext* cx, + JS::Handle aGivenProto) override; + nsISupports* GetParentObject(); + + GetUserMediaRequestType Type(); + uint64_t WindowID(); + uint64_t InnerWindowID(); + bool IsSecure(); + bool IsHandlingUserInput() const; + void GetCallID(nsString& retval); + void GetRawID(nsString& retval); + void GetMediaSource(nsString& retval); + void GetDevices(nsTArray>& retval) const; + void GetConstraints(MediaStreamConstraints& result); + void GetAudioOutputOptions(AudioOutputOptions& result); + + private: + virtual ~GetUserMediaRequest(); + + uint64_t mInnerWindowID, mOuterWindowID; + const nsString mCallID; + const nsString mRawID; + const nsString mMediaSource; + const RefPtr mMediaDeviceSet; + UniquePtr mConstraints; + UniquePtr mAudioOutputOptions; + GetUserMediaRequestType mType; + bool mIsSecure; + bool mIsHandlingUserInput; +}; + +} // namespace dom +} // namespace mozilla + +#endif // GetUserMediaRequest_h__ diff --git a/dom/media/GraphDriver.cpp b/dom/media/GraphDriver.cpp new file mode 100644 index 0000000000..36c5b58864 --- /dev/null +++ b/dom/media/GraphDriver.cpp @@ -0,0 +1,1379 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "GraphDriver.h" + +#include "AudioNodeEngine.h" +#include "cubeb/cubeb.h" +#include "mozilla/dom/AudioContext.h" +#include "mozilla/dom/AudioDeviceInfo.h" +#include "mozilla/dom/BaseAudioContextBinding.h" +#include "mozilla/SchedulerGroup.h" +#include "mozilla/SharedThreadPool.h" +#include "mozilla/ClearOnShutdown.h" +#include "mozilla/Unused.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/StaticPrefs_media.h" +#include "CubebDeviceEnumerator.h" +#include "MediaTrackGraphImpl.h" +#include "CallbackThreadRegistry.h" +#include "Tracing.h" + +#ifdef MOZ_WEBRTC +# include "webrtc/MediaEngineWebRTC.h" +#endif + +#ifdef XP_MACOSX +# include +# include "nsCocoaFeatures.h" +#endif + +extern mozilla::LazyLogModule gMediaTrackGraphLog; +#ifdef LOG +# undef LOG +#endif // LOG +#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg) + +namespace mozilla { + +GraphDriver::GraphDriver(GraphInterface* aGraphInterface, + GraphDriver* aPreviousDriver, uint32_t aSampleRate) + : mGraphInterface(aGraphInterface), + mSampleRate(aSampleRate), + mPreviousDriver(aPreviousDriver) {} + +void GraphDriver::SetStreamName(const nsACString& aStreamName) { + MOZ_ASSERT(InIteration() || (!ThreadRunning() && NS_IsMainThread())); + mStreamName = aStreamName; + LOG(LogLevel::Debug, ("%p: GraphDriver::SetStreamName driver=%p %s", Graph(), + this, mStreamName.get())); +} + +void GraphDriver::SetState(const nsACString& aStreamName, + GraphTime aIterationEnd, + GraphTime aStateComputedTime) { + MOZ_ASSERT(InIteration() || !ThreadRunning()); + + mStreamName = aStreamName; + mIterationEnd = aIterationEnd; + mStateComputedTime = aStateComputedTime; +} + +#ifdef DEBUG +bool GraphDriver::InIteration() const { + return OnThread() || Graph()->InDriverIteration(this); +} +#endif + +GraphDriver* GraphDriver::PreviousDriver() { + MOZ_ASSERT(InIteration() || !ThreadRunning()); + return mPreviousDriver; +} + +void GraphDriver::SetPreviousDriver(GraphDriver* aPreviousDriver) { + MOZ_ASSERT(InIteration() || !ThreadRunning()); + mPreviousDriver = aPreviousDriver; +} + +ThreadedDriver::ThreadedDriver(GraphInterface* aGraphInterface, + GraphDriver* aPreviousDriver, + uint32_t aSampleRate) + : GraphDriver(aGraphInterface, aPreviousDriver, aSampleRate), + mThreadRunning(false) {} + +class MediaTrackGraphShutdownThreadRunnable : public Runnable { + public: + explicit MediaTrackGraphShutdownThreadRunnable( + already_AddRefed aThread) + : Runnable("MediaTrackGraphShutdownThreadRunnable"), mThread(aThread) {} + NS_IMETHOD Run() override { + TRACE("MediaTrackGraphShutdownThreadRunnable"); + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mThread); + + mThread->AsyncShutdown(); + mThread = nullptr; + return NS_OK; + } + + private: + nsCOMPtr mThread; +}; + +ThreadedDriver::~ThreadedDriver() { + if (mThread) { + nsCOMPtr event = + new MediaTrackGraphShutdownThreadRunnable(mThread.forget()); + SchedulerGroup::Dispatch(event.forget()); + } +} + +class MediaTrackGraphInitThreadRunnable : public Runnable { + public: + explicit MediaTrackGraphInitThreadRunnable(ThreadedDriver* aDriver) + : Runnable("MediaTrackGraphInitThreadRunnable"), mDriver(aDriver) {} + NS_IMETHOD Run() override { + TRACE("MediaTrackGraphInitThreadRunnable"); + MOZ_ASSERT(!mDriver->ThreadRunning()); + LOG(LogLevel::Debug, ("Starting a new system driver for graph %p", + mDriver->mGraphInterface.get())); + + if (GraphDriver* previousDriver = mDriver->PreviousDriver()) { + LOG(LogLevel::Debug, + ("%p releasing an AudioCallbackDriver(%p), for graph %p", + mDriver.get(), previousDriver, mDriver->Graph())); + MOZ_ASSERT(!mDriver->AsAudioCallbackDriver()); + AudioCallbackDriver* audioCallbackDriver = + previousDriver->AsAudioCallbackDriver(); + MOZ_ALWAYS_SUCCEEDS(audioCallbackDriver->mCubebOperationThread->Dispatch( + NS_NewRunnableFunction( + "ThreadedDriver previousDriver::Stop()", + [audioCallbackDriver = RefPtr{audioCallbackDriver}] { + audioCallbackDriver->Stop(); + }))); + mDriver->SetPreviousDriver(nullptr); + } + + mDriver->RunThread(); + return NS_OK; + } + + private: + RefPtr mDriver; +}; + +void ThreadedDriver::Start() { + MOZ_ASSERT(!ThreadRunning()); + LOG(LogLevel::Debug, + ("Starting thread for a SystemClockDriver %p", mGraphInterface.get())); + Unused << NS_WARN_IF(mThread); + MOZ_ASSERT(!mThread); // Ensure we haven't already started it + + nsCOMPtr event = new MediaTrackGraphInitThreadRunnable(this); + // Note: mThread may be null during event->Run() if we pass to NewNamedThread! + // See AudioInitTask + nsresult rv = NS_NewNamedThread("MediaTrackGrph", getter_AddRefs(mThread)); + if (NS_SUCCEEDED(rv)) { + mThread->Dispatch(event.forget(), NS_DISPATCH_NORMAL); + } +} + +void ThreadedDriver::Shutdown() { + NS_ASSERTION(NS_IsMainThread(), "Must be called on main thread"); + // mGraph's thread is not running so it's OK to do whatever here + LOG(LogLevel::Debug, ("Stopping threads for MediaTrackGraph %p", this)); + + if (mThread) { + LOG(LogLevel::Debug, + ("%p: Stopping ThreadedDriver's %p thread", Graph(), this)); + mThread->AsyncShutdown(); + mThread = nullptr; + } +} + +SystemClockDriver::SystemClockDriver(GraphInterface* aGraphInterface, + GraphDriver* aPreviousDriver, + uint32_t aSampleRate) + : ThreadedDriver(aGraphInterface, aPreviousDriver, aSampleRate), + mInitialTimeStamp(TimeStamp::Now()), + mCurrentTimeStamp(TimeStamp::Now()), + mLastTimeStamp(TimeStamp::Now()) {} + +SystemClockDriver::~SystemClockDriver() = default; + +void ThreadedDriver::RunThread() { + mThreadRunning = true; + while (true) { + auto iterationStart = mIterationEnd; + mIterationEnd += GetIntervalForIteration(); + + if (mStateComputedTime < mIterationEnd) { + LOG(LogLevel::Warning, ("%p: Global underrun detected", Graph())); + mIterationEnd = mStateComputedTime; + } + + if (iterationStart >= mIterationEnd) { + NS_ASSERTION(iterationStart == mIterationEnd, "Time can't go backwards!"); + // This could happen due to low clock resolution, maybe? + LOG(LogLevel::Debug, ("%p: Time did not advance", Graph())); + } + + GraphTime nextStateComputedTime = + MediaTrackGraphImpl::RoundUpToEndOfAudioBlock( + mIterationEnd + MillisecondsToMediaTime(AUDIO_TARGET_MS)); + if (nextStateComputedTime < mStateComputedTime) { + // A previous driver may have been processing further ahead of + // iterationEnd. + LOG(LogLevel::Warning, + ("%p: Prevent state from going backwards. interval[%ld; %ld] " + "state[%ld; " + "%ld]", + Graph(), (long)iterationStart, (long)mIterationEnd, + (long)mStateComputedTime, (long)nextStateComputedTime)); + nextStateComputedTime = mStateComputedTime; + } + LOG(LogLevel::Verbose, + ("%p: interval[%ld; %ld] state[%ld; %ld]", Graph(), + (long)iterationStart, (long)mIterationEnd, (long)mStateComputedTime, + (long)nextStateComputedTime)); + + mStateComputedTime = nextStateComputedTime; + IterationResult result = + Graph()->OneIteration(mStateComputedTime, mIterationEnd, nullptr); + + if (result.IsStop()) { + // Signal that we're done stopping. + result.Stopped(); + break; + } + WaitForNextIteration(); + if (GraphDriver* nextDriver = result.NextDriver()) { + LOG(LogLevel::Debug, ("%p: Switching to AudioCallbackDriver", Graph())); + result.Switched(); + nextDriver->SetState(mStreamName, mIterationEnd, mStateComputedTime); + nextDriver->Start(); + break; + } + MOZ_ASSERT(result.IsStillProcessing()); + } + mThreadRunning = false; +} + +MediaTime SystemClockDriver::GetIntervalForIteration() { + TimeStamp now = TimeStamp::Now(); + MediaTime interval = + SecondsToMediaTime((now - mCurrentTimeStamp).ToSeconds()); + mCurrentTimeStamp = now; + + MOZ_LOG(gMediaTrackGraphLog, LogLevel::Verbose, + ("%p: Updating current time to %f (real %f, StateComputedTime() %f)", + Graph(), MediaTimeToSeconds(mIterationEnd + interval), + (now - mInitialTimeStamp).ToSeconds(), + MediaTimeToSeconds(mStateComputedTime))); + + return interval; +} + +void ThreadedDriver::EnsureNextIteration() { + mWaitHelper.EnsureNextIteration(); +} + +void ThreadedDriver::WaitForNextIteration() { + MOZ_ASSERT(mThread); + MOZ_ASSERT(OnThread()); + mWaitHelper.WaitForNextIterationAtLeast(WaitInterval()); +} + +TimeDuration SystemClockDriver::WaitInterval() { + MOZ_ASSERT(mThread); + MOZ_ASSERT(OnThread()); + TimeStamp now = TimeStamp::Now(); + int64_t timeoutMS = MEDIA_GRAPH_TARGET_PERIOD_MS - + int64_t((now - mCurrentTimeStamp).ToMilliseconds()); + // Make sure timeoutMS doesn't overflow 32 bits by waking up at + // least once a minute, if we need to wake up at all + timeoutMS = std::max(0, std::min(timeoutMS, 60 * 1000)); + LOG(LogLevel::Verbose, + ("%p: Waiting for next iteration; at %f, timeout=%f", Graph(), + (now - mInitialTimeStamp).ToSeconds(), timeoutMS / 1000.0)); + + return TimeDuration::FromMilliseconds(timeoutMS); +} + +OfflineClockDriver::OfflineClockDriver(GraphInterface* aGraphInterface, + uint32_t aSampleRate, GraphTime aSlice) + : ThreadedDriver(aGraphInterface, nullptr, aSampleRate), mSlice(aSlice) {} + +OfflineClockDriver::~OfflineClockDriver() = default; + +void OfflineClockDriver::RunThread() { + nsCOMPtr threadInternal = do_QueryInterface(mThread); + nsCOMPtr observer = do_QueryInterface(Graph()); + threadInternal->SetObserver(observer); + + ThreadedDriver::RunThread(); +} + +MediaTime OfflineClockDriver::GetIntervalForIteration() { + return MillisecondsToMediaTime(mSlice); +} + +/* Helper to proxy the GraphInterface methods used by a running + * mFallbackDriver. */ +class AudioCallbackDriver::FallbackWrapper : public GraphInterface { + public: + FallbackWrapper(RefPtr aGraph, + RefPtr aOwner, uint32_t aSampleRate, + const nsACString& aStreamName, GraphTime aIterationEnd, + GraphTime aStateComputedTime) + : mGraph(std::move(aGraph)), + mOwner(std::move(aOwner)), + mFallbackDriver( + MakeRefPtr(this, nullptr, aSampleRate)) { + mFallbackDriver->SetState(aStreamName, aIterationEnd, aStateComputedTime); + } + + NS_DECL_THREADSAFE_ISUPPORTS + + /* Proxied SystemClockDriver methods */ + void Start() { mFallbackDriver->Start(); } + MOZ_CAN_RUN_SCRIPT void Shutdown() { + RefPtr driver = mFallbackDriver; + driver->Shutdown(); + } + void SetStreamName(const nsACString& aStreamName) { + mFallbackDriver->SetStreamName(aStreamName); + } + void EnsureNextIteration() { mFallbackDriver->EnsureNextIteration(); } +#ifdef DEBUG + bool InIteration() { return mFallbackDriver->InIteration(); } +#endif + bool OnThread() { return mFallbackDriver->OnThread(); } + + /* GraphInterface methods */ + void NotifyInputStopped() override { + MOZ_CRASH("Unexpected NotifyInputStopped from fallback SystemClockDriver"); + } + void NotifyInputData(const AudioDataValue* aBuffer, size_t aFrames, + TrackRate aRate, uint32_t aChannels, + uint32_t aAlreadyBuffered) override { + MOZ_CRASH("Unexpected NotifyInputData from fallback SystemClockDriver"); + } + void DeviceChanged() override { + MOZ_CRASH("Unexpected DeviceChanged from fallback SystemClockDriver"); + } +#ifdef DEBUG + bool InDriverIteration(const GraphDriver* aDriver) const override { + return mGraph->InDriverIteration(mOwner) && mOwner->OnFallback(); + } +#endif + IterationResult OneIteration(GraphTime aStateComputedEnd, + GraphTime aIterationEnd, + MixerCallbackReceiver* aMixerReceiver) override { + MOZ_ASSERT(!aMixerReceiver); + +#ifdef DEBUG + AutoInCallback aic(mOwner); +#endif + + IterationResult result = + mGraph->OneIteration(aStateComputedEnd, aIterationEnd, aMixerReceiver); + + AudioStreamState audioState = mOwner->mAudioStreamState; + + MOZ_ASSERT(audioState != AudioStreamState::Stopping, + "The audio driver can only enter stopping if it iterated the " + "graph, which it can only do if there's no fallback driver"); + + // After a devicechange event from the audio driver, wait for a five + // millisecond grace period before handing control to the audio driver. We + // do this because cubeb leaves no guarantee on audio callbacks coming in + // after a device change event. + if (audioState == AudioStreamState::ChangingDevice && + mOwner->mChangingDeviceStartTime + TimeDuration::FromMilliseconds(5) < + TimeStamp::Now()) { + mOwner->mChangingDeviceStartTime = TimeStamp(); + if (mOwner->mAudioStreamState.compareExchange( + AudioStreamState::ChangingDevice, AudioStreamState::Starting)) { + audioState = AudioStreamState::Starting; + LOG(LogLevel::Debug, ("%p: Fallback driver has started. Waiting for " + "audio driver to start.", + mOwner.get())); + } + } + + if (audioState != AudioStreamState::Running && result.IsStillProcessing()) { + mOwner->MaybeStartAudioStream(); + return result; + } + + MOZ_ASSERT(result.IsStillProcessing() || result.IsStop() || + result.IsSwitchDriver()); + + // Proxy the release of the fallback driver to a background thread, so it + // doesn't perform unexpected suicide. + IterationResult stopFallback = + IterationResult::CreateStop(NS_NewRunnableFunction( + "AudioCallbackDriver::FallbackDriverStopped", + [self = RefPtr(this), this, aIterationEnd, + aStateComputedEnd, result = std::move(result)]() mutable { + FallbackDriverState fallbackState = + result.IsStillProcessing() ? FallbackDriverState::None + : FallbackDriverState::Stopped; + mOwner->FallbackDriverStopped(aIterationEnd, aStateComputedEnd, + fallbackState); + + if (fallbackState == FallbackDriverState::Stopped) { +#ifdef DEBUG + // The AudioCallbackDriver may not iterate the graph, but we'll + // call into it so we need to be regarded as "in iteration". + AutoInCallback aic(mOwner); +#endif + if (GraphDriver* nextDriver = result.NextDriver()) { + LOG(LogLevel::Debug, + ("%p: Switching from fallback to other driver.", + mOwner.get())); + result.Switched(); + nextDriver->SetState(mOwner->mStreamName, aIterationEnd, + aStateComputedEnd); + nextDriver->Start(); + } else if (result.IsStop()) { + LOG(LogLevel::Debug, + ("%p: Stopping fallback driver.", mOwner.get())); + result.Stopped(); + } + } + mOwner = nullptr; + NS_DispatchBackgroundTask(NS_NewRunnableFunction( + "AudioCallbackDriver::FallbackDriverStopped::Release", + [fallback = std::move(self->mFallbackDriver)] {})); + })); + + return stopFallback; + } + + private: + virtual ~FallbackWrapper() = default; + + const RefPtr mGraph; + // Valid until mFallbackDriver has finished its last iteration. + RefPtr mOwner; + RefPtr mFallbackDriver; +}; + +NS_IMPL_ISUPPORTS0(AudioCallbackDriver::FallbackWrapper) + +/* static */ +already_AddRefed AudioCallbackDriver::CreateTaskQueue() { + RefPtr pool = CUBEB_TASK_THREAD; + const uint32_t kIdleThreadTimeoutMs = 2000; + pool->SetIdleThreadTimeout(PR_MillisecondsToInterval(kIdleThreadTimeoutMs)); + + RefPtr queue = + TaskQueue::Create(pool.forget(), "AudioCallbackDriver cubeb task queue"); + return queue.forget(); +} + +AudioCallbackDriver::AudioCallbackDriver( + GraphInterface* aGraphInterface, GraphDriver* aPreviousDriver, + uint32_t aSampleRate, uint32_t aOutputChannelCount, + uint32_t aInputChannelCount, CubebUtils::AudioDeviceID aOutputDeviceID, + CubebUtils::AudioDeviceID aInputDeviceID, AudioInputType aAudioInputType) + : GraphDriver(aGraphInterface, aPreviousDriver, aSampleRate), + mOutputChannelCount(aOutputChannelCount), + mInputChannelCount(aInputChannelCount), + mOutputDeviceID(aOutputDeviceID), + mInputDeviceID(aInputDeviceID), + mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS), + mCubebOperationThread(CreateTaskQueue()), + mAudioThreadId(ProfilerThreadId{}), + mAudioThreadIdInCb(std::thread::id()), + mFallback("AudioCallbackDriver::mFallback"), + mSandboxed(CubebUtils::SandboxEnabled()) { + LOG(LogLevel::Debug, ("%p: AudioCallbackDriver %p ctor - input: device %p, " + "channel %d, output: device %p, channel %d", + Graph(), this, mInputDeviceID, mInputChannelCount, + mOutputDeviceID, mOutputChannelCount)); + + NS_WARNING_ASSERTION(mOutputChannelCount != 0, + "Invalid output channel count"); + MOZ_ASSERT(mOutputChannelCount <= 8); + + bool allowVoice = StaticPrefs:: + media_getusermedia_microphone_prefer_voice_stream_with_processing_enabled(); +#ifdef MOZ_WIDGET_COCOA + // Using the VoiceProcessingIO audio unit on MacOS 12 causes crashes in + // OS code. + allowVoice = allowVoice && nsCocoaFeatures::macOSVersionMajor() != 12; +#endif + + if (aAudioInputType == AudioInputType::Voice && allowVoice) { + LOG(LogLevel::Debug, ("VOICE.")); + mInputDevicePreference = CUBEB_DEVICE_PREF_VOICE; + CubebUtils::SetInCommunication(true); + } else { + mInputDevicePreference = CUBEB_DEVICE_PREF_ALL; + } +} + +AudioCallbackDriver::~AudioCallbackDriver() { + if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) { + CubebUtils::SetInCommunication(false); + } +} + +bool IsMacbookOrMacbookAir() { +#ifdef XP_MACOSX + size_t len = 0; + sysctlbyname("hw.model", NULL, &len, NULL, 0); + if (len) { + UniquePtr model(new char[len]); + // This string can be + // MacBook%d,%d for a normal MacBook + // MacBookAir%d,%d for a Macbook Air + sysctlbyname("hw.model", model.get(), &len, NULL, 0); + char* substring = strstr(model.get(), "MacBook"); + if (substring) { + const size_t offset = strlen("MacBook"); + if (!strncmp(model.get() + offset, "Air", 3) || + isdigit(model[offset + 1])) { + return true; + } + } + } +#endif + return false; +} + +void AudioCallbackDriver::Init(const nsCString& aStreamName) { + LOG(LogLevel::Debug, + ("%p: AudioCallbackDriver::Init driver=%p", Graph(), this)); + TRACE("AudioCallbackDriver::Init"); + MOZ_ASSERT(OnCubebOperationThread()); + MOZ_ASSERT(mAudioStreamState == AudioStreamState::Pending); + if (mFallbackDriverState == FallbackDriverState::Stopped) { + // The graph has already stopped us. + return; + } + RefPtr handle = CubebUtils::GetCubeb(); + if (!handle) { + NS_WARNING("Could not get cubeb context."); + LOG(LogLevel::Warning, ("%s: Could not get cubeb context", __func__)); + mAudioStreamState = AudioStreamState::None; + if (TryStartingFallbackDriver().isOk()) { + CubebUtils::ReportCubebStreamInitFailure(true); + } + return; + } + + cubeb_stream_params output; + cubeb_stream_params input; + bool firstStream = CubebUtils::GetFirstStream(); + + MOZ_ASSERT(!NS_IsMainThread(), + "This is blocking and should never run on the main thread."); + + output.rate = mSampleRate; + output.format = CUBEB_SAMPLE_FLOAT32NE; + + if (!mOutputChannelCount) { + LOG(LogLevel::Warning, ("Output number of channels is 0.")); + mAudioStreamState = AudioStreamState::None; + if (TryStartingFallbackDriver().isOk()) { + CubebUtils::ReportCubebStreamInitFailure(firstStream); + } + return; + } + + CubebUtils::AudioDeviceID forcedOutputDeviceId = nullptr; + + char* forcedOutputDeviceName = CubebUtils::GetForcedOutputDevice(); + if (forcedOutputDeviceName) { + RefPtr enumerator = Enumerator::GetInstance(); + RefPtr device = enumerator->DeviceInfoFromName( + NS_ConvertUTF8toUTF16(forcedOutputDeviceName), EnumeratorSide::OUTPUT); + if (device && device->DeviceID()) { + forcedOutputDeviceId = device->DeviceID(); + } + } + + mBuffer = AudioCallbackBufferWrapper(mOutputChannelCount); + mScratchBuffer = + SpillBuffer(mOutputChannelCount); + + output.channels = mOutputChannelCount; + AudioConfig::ChannelLayout::ChannelMap channelMap = + AudioConfig::ChannelLayout(mOutputChannelCount).Map(); + + output.layout = static_cast(channelMap); + output.prefs = CubebUtils::GetDefaultStreamPrefs(CUBEB_DEVICE_TYPE_OUTPUT); + if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE && + CubebUtils::RouteOutputAsVoice()) { + output.prefs |= static_cast(CUBEB_STREAM_PREF_VOICE); + } + + uint32_t latencyFrames = CubebUtils::GetCubebMTGLatencyInFrames(&output); + + LOG(LogLevel::Debug, ("Minimum latency in frames: %d", latencyFrames)); + + // Macbook and MacBook air don't have enough CPU to run very low latency + // MediaTrackGraphs, cap the minimal latency to 512 frames int this case. + if (IsMacbookOrMacbookAir()) { + latencyFrames = std::max((uint32_t)512, latencyFrames); + LOG(LogLevel::Debug, + ("Macbook or macbook air, new latency: %d", latencyFrames)); + } + + // Buffer sizes lower than 10ms are nowadays common. It's not very useful + // when doing voice, because all the WebRTC code that does audio input + // processing deals in 10ms chunks of audio. Take the first power of two + // above 10ms at the current rate in this case. It's probably 512, for common + // rates. + if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) { + if (latencyFrames < mSampleRate / 100) { + latencyFrames = mozilla::RoundUpPow2(mSampleRate / 100); + LOG(LogLevel::Debug, + ("AudioProcessing enabled, new latency %d", latencyFrames)); + } + } + + // It's not useful for the graph to run with a block size lower than the Web + // Audio API block size, but increasingly devices report that they can do + // audio latencies lower than that. + if (latencyFrames < WEBAUDIO_BLOCK_SIZE) { + LOG(LogLevel::Debug, + ("Latency clamped to %d from %d", WEBAUDIO_BLOCK_SIZE, latencyFrames)); + latencyFrames = WEBAUDIO_BLOCK_SIZE; + } + LOG(LogLevel::Debug, ("Effective latency in frames: %d", latencyFrames)); + + input = output; + input.channels = mInputChannelCount; + input.layout = CUBEB_LAYOUT_UNDEFINED; + input.prefs = CubebUtils::GetDefaultStreamPrefs(CUBEB_DEVICE_TYPE_INPUT); + if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) { + input.prefs |= static_cast(CUBEB_STREAM_PREF_VOICE); + } + + cubeb_stream* stream = nullptr; + const char* streamName = + aStreamName.IsEmpty() ? "AudioCallbackDriver" : aStreamName.get(); + bool inputWanted = mInputChannelCount > 0; + CubebUtils::AudioDeviceID outputId = mOutputDeviceID; + CubebUtils::AudioDeviceID inputId = mInputDeviceID; + + if (CubebUtils::CubebStreamInit( + handle->Context(), &stream, streamName, inputId, + inputWanted ? &input : nullptr, + forcedOutputDeviceId ? forcedOutputDeviceId : outputId, &output, + latencyFrames, DataCallback_s, StateCallback_s, this) == CUBEB_OK) { + mCubeb = handle; + mAudioStream.own(stream); + DebugOnly rv = + cubeb_stream_set_volume(mAudioStream, CubebUtils::GetVolumeScale()); + NS_WARNING_ASSERTION( + rv == CUBEB_OK, + "Could not set the audio stream volume in GraphDriver.cpp"); + CubebUtils::ReportCubebBackendUsed(); + } else { + NS_WARNING( + "Could not create a cubeb stream for MediaTrackGraph, falling " + "back to a SystemClockDriver"); + mAudioStreamState = AudioStreamState::None; + // Only report failures when we're not coming from a driver that was + // created itself as a fallback driver because of a previous audio driver + // failure. + if (TryStartingFallbackDriver().isOk()) { + CubebUtils::ReportCubebStreamInitFailure(firstStream); + } + return; + } + +#ifdef XP_MACOSX + PanOutputIfNeeded(inputWanted); +#endif + + cubeb_stream_register_device_changed_callback( + mAudioStream, AudioCallbackDriver::DeviceChangedCallback_s); + + // No-op if MOZ_DUMP_AUDIO is not defined as an environment variable. This + // is intended for diagnosing issues, and only works if the content sandbox is + // disabled. + mInputStreamFile.Open("GraphDriverInput", input.channels, input.rate); + mOutputStreamFile.Open("GraphDriverOutput", output.channels, output.rate); + + if (NS_WARN_IF(!StartStream())) { + LOG(LogLevel::Warning, + ("%p: AudioCallbackDriver couldn't start a cubeb stream.", Graph())); + return; + } + + LOG(LogLevel::Debug, ("%p: AudioCallbackDriver started.", Graph())); +} + +void AudioCallbackDriver::SetCubebStreamName(const nsCString& aStreamName) { + MOZ_ASSERT(OnCubebOperationThread()); + MOZ_ASSERT(mAudioStream); + cubeb_stream_set_name(mAudioStream, aStreamName.get()); +} + +void AudioCallbackDriver::Start() { + MOZ_ASSERT(!IsStarted()); + MOZ_ASSERT(mAudioStreamState == AudioStreamState::None); + MOZ_ASSERT_IF(PreviousDriver(), PreviousDriver()->InIteration()); + mAudioStreamState = AudioStreamState::Pending; + + // Starting an audio driver could take a while. We start a system driver in + // the meantime so that the graph is kept running. + (void)TryStartingFallbackDriver(); + + if (mPreviousDriver) { + if (AudioCallbackDriver* previousAudioCallback = + mPreviousDriver->AsAudioCallbackDriver()) { + LOG(LogLevel::Debug, ("Releasing audio driver off main thread.")); + MOZ_ALWAYS_SUCCEEDS( + previousAudioCallback->mCubebOperationThread->Dispatch( + NS_NewRunnableFunction( + "AudioCallbackDriver previousDriver::Stop()", + [previousDriver = RefPtr{previousAudioCallback}] { + previousDriver->Stop(); + }))); + } else { + LOG(LogLevel::Debug, + ("Dropping driver reference for SystemClockDriver.")); + MOZ_ASSERT(mPreviousDriver->AsSystemClockDriver()); + } + mPreviousDriver = nullptr; + } + + LOG(LogLevel::Debug, ("Starting new audio driver off main thread, " + "to ensure it runs after previous shutdown.")); + MOZ_ALWAYS_SUCCEEDS(mCubebOperationThread->Dispatch( + NS_NewRunnableFunction("AudioCallbackDriver Init()", + [self = RefPtr{this}, streamName = mStreamName] { + self->Init(streamName); + }))); +} + +bool AudioCallbackDriver::StartStream() { + TRACE("AudioCallbackDriver::StartStream"); + MOZ_ASSERT(!IsStarted() && OnCubebOperationThread()); + // Set STARTING before cubeb_stream_start, since starting the cubeb stream + // can result in a callback (that may read mAudioStreamState) before + // mAudioStreamState would otherwise be set. + mAudioStreamState = AudioStreamState::Starting; + if (cubeb_stream_start(mAudioStream) != CUBEB_OK) { + NS_WARNING("Could not start cubeb stream for MTG."); + return false; + } + + return true; +} + +void AudioCallbackDriver::Stop() { + LOG(LogLevel::Debug, + ("%p: AudioCallbackDriver::Stop driver=%p", Graph(), this)); + TRACE("AudioCallbackDriver::Stop"); + MOZ_ASSERT(OnCubebOperationThread()); + cubeb_stream_register_device_changed_callback(mAudioStream, nullptr); + if (cubeb_stream_stop(mAudioStream) != CUBEB_OK) { + NS_WARNING("Could not stop cubeb stream for MTG."); + } else { + mAudioStreamState = AudioStreamState::None; + } +} + +void AudioCallbackDriver::Shutdown() { + MOZ_ASSERT(NS_IsMainThread()); + RefPtr fallback; + { + auto fallbackLock = mFallback.Lock(); + fallback = fallbackLock.ref(); + fallbackLock.ref() = nullptr; + } + if (fallback) { + LOG(LogLevel::Debug, + ("%p: Releasing fallback driver %p.", Graph(), fallback.get())); + fallback->Shutdown(); + } + + LOG(LogLevel::Debug, + ("%p: Releasing audio driver off main thread (GraphDriver::Shutdown).", + Graph())); + + nsLiteralCString reason("AudioCallbackDriver::Shutdown"); + NS_DispatchAndSpinEventLoopUntilComplete( + reason, mCubebOperationThread, + NS_NewRunnableFunction(reason.get(), + [self = RefPtr{this}] { self->Stop(); })); +} + +void AudioCallbackDriver::SetStreamName(const nsACString& aStreamName) { + MOZ_ASSERT(InIteration() || !ThreadRunning()); + if (aStreamName == mStreamName) { + return; + } + // Record the stream name, which will be passed onto the next driver, if + // any, either from this driver or the fallback driver. + GraphDriver::SetStreamName(aStreamName); + { + auto fallbackLock = mFallback.Lock(); + FallbackWrapper* fallback = fallbackLock.ref().get(); + if (fallback) { + MOZ_ASSERT(fallback->InIteration()); + fallback->SetStreamName(aStreamName); + } + } + AudioStreamState streamState = mAudioStreamState; + if (streamState != AudioStreamState::None && + streamState != AudioStreamState::Stopping) { + MOZ_ALWAYS_SUCCEEDS(mCubebOperationThread->Dispatch( + NS_NewRunnableFunction("AudioCallbackDriver SetStreamName()", + [self = RefPtr{this}, streamName = mStreamName] { + self->SetCubebStreamName(streamName); + }))); + } +} + +/* static */ +long AudioCallbackDriver::DataCallback_s(cubeb_stream* aStream, void* aUser, + const void* aInputBuffer, + void* aOutputBuffer, long aFrames) { + AudioCallbackDriver* driver = reinterpret_cast(aUser); + return driver->DataCallback(static_cast(aInputBuffer), + static_cast(aOutputBuffer), + aFrames); +} + +/* static */ +void AudioCallbackDriver::StateCallback_s(cubeb_stream* aStream, void* aUser, + cubeb_state aState) { + AudioCallbackDriver* driver = reinterpret_cast(aUser); + driver->StateCallback(aState); +} + +/* static */ +void AudioCallbackDriver::DeviceChangedCallback_s(void* aUser) { + AudioCallbackDriver* driver = reinterpret_cast(aUser); + driver->DeviceChangedCallback(); +} + +AudioCallbackDriver::AutoInCallback::AutoInCallback( + AudioCallbackDriver* aDriver) + : mDriver(aDriver) { + MOZ_ASSERT(mDriver->mAudioThreadIdInCb == std::thread::id()); + mDriver->mAudioThreadIdInCb = std::this_thread::get_id(); +} + +AudioCallbackDriver::AutoInCallback::~AutoInCallback() { + MOZ_ASSERT(mDriver->mAudioThreadIdInCb == std::this_thread::get_id()); + mDriver->mAudioThreadIdInCb = std::thread::id(); +} + +bool AudioCallbackDriver::CheckThreadIdChanged() { + ProfilerThreadId id = profiler_current_thread_id(); + if (id != mAudioThreadId) { + mAudioThreadId = id; + return true; + } + return false; +} + +long AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer, + AudioDataValue* aOutputBuffer, + long aFrames) { + if (!mSandboxed && CheckThreadIdChanged()) { + CallbackThreadRegistry::Get()->Register(mAudioThreadId, + "NativeAudioCallback"); + } + + if (mAudioStreamState.compareExchange(AudioStreamState::Starting, + AudioStreamState::Running)) { + LOG(LogLevel::Verbose, ("%p: AudioCallbackDriver %p First audio callback " + "close the Fallback driver", + Graph(), this)); + } + + FallbackDriverState fallbackState = mFallbackDriverState; + if (MOZ_UNLIKELY(fallbackState == FallbackDriverState::Stopped)) { + // We're supposed to stop. + PodZero(aOutputBuffer, aFrames * mOutputChannelCount); + if (!mSandboxed) { + CallbackThreadRegistry::Get()->Unregister(mAudioThreadId); + } + return aFrames - 1; + } + + AudioStreamState audioStreamState = mAudioStreamState; + if (MOZ_UNLIKELY(audioStreamState == AudioStreamState::ChangingDevice || + fallbackState == FallbackDriverState::Running)) { + // Wait for the fallback driver to stop. Wake it up so it can stop if it's + // sleeping. + LOG(LogLevel::Verbose, + ("%p: AudioCallbackDriver %p Waiting for the Fallback driver to stop", + Graph(), this)); + EnsureNextIteration(); + PodZero(aOutputBuffer, aFrames * mOutputChannelCount); + return aFrames; + } + + MOZ_ASSERT(audioStreamState == AudioStreamState::Running); + TRACE_AUDIO_CALLBACK_BUDGET("AudioCallbackDriver real-time budget", aFrames, + mSampleRate); + TRACE("AudioCallbackDriver::DataCallback"); + +#ifdef DEBUG + AutoInCallback aic(this); +#endif + + uint32_t durationMS = aFrames * 1000 / mSampleRate; + + // For now, simply average the duration with the previous + // duration so there is some damping against sudden changes. + if (!mIterationDurationMS) { + mIterationDurationMS = durationMS; + } else { + mIterationDurationMS = (mIterationDurationMS * 3) + durationMS; + mIterationDurationMS /= 4; + } + + mBuffer.SetBuffer(aOutputBuffer, aFrames); + // fill part or all with leftover data from last iteration (since we + // align to Audio blocks) + uint32_t alreadyBuffered = mScratchBuffer.Empty(mBuffer); + + // State computed time is decided by the audio callback's buffer length. We + // compute the iteration start and end from there, trying to keep the amount + // of buffering in the graph constant. + GraphTime nextStateComputedTime = + MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(mStateComputedTime + + mBuffer.Available()); + + auto iterationStart = mIterationEnd; + // inGraph is the number of audio frames there is between the state time and + // the current time, i.e. the maximum theoretical length of the interval we + // could use as [iterationStart; mIterationEnd]. + GraphTime inGraph = mStateComputedTime - iterationStart; + // We want the interval [iterationStart; mIterationEnd] to be before the + // interval [mStateComputedTime; nextStateComputedTime]. We also want + // the distance between these intervals to be roughly equivalent each time, to + // ensure there is no clock drift between current time and state time. Since + // we can't act on the state time because we have to fill the audio buffer, we + // reclock the current time against the state time, here. + mIterationEnd = iterationStart + 0.8 * inGraph; + + LOG(LogLevel::Verbose, + ("%p: interval[%ld; %ld] state[%ld; %ld] (frames: %ld) (durationMS: %u) " + "(duration ticks: %ld)", + Graph(), (long)iterationStart, (long)mIterationEnd, + (long)mStateComputedTime, (long)nextStateComputedTime, (long)aFrames, + (uint32_t)durationMS, + (long)(nextStateComputedTime - mStateComputedTime))); + + if (mStateComputedTime < mIterationEnd) { + LOG(LogLevel::Error, ("%p: Media graph global underrun detected", Graph())); + MOZ_ASSERT_UNREACHABLE("We should not underrun in full duplex"); + mIterationEnd = mStateComputedTime; + } + + // Process mic data if any/needed + if (aInputBuffer && mInputChannelCount > 0) { + Graph()->NotifyInputData(aInputBuffer, static_cast(aFrames), + mSampleRate, mInputChannelCount, alreadyBuffered); + } + + IterationResult result = + Graph()->OneIteration(nextStateComputedTime, mIterationEnd, this); + + mStateComputedTime = nextStateComputedTime; + + MOZ_ASSERT(mBuffer.Available() == 0, + "The graph should have filled the buffer"); + + mBuffer.BufferFilled(); + +#ifdef MOZ_SAMPLE_TYPE_FLOAT32 + // Prevent returning NaN to the OS mixer, and propagating NaN into the reverse + // stream of the AEC. + NaNToZeroInPlace(aOutputBuffer, aFrames * mOutputChannelCount); +#endif + +#ifdef XP_MACOSX + // This only happens when the output is on a macbookpro's external speaker, + // that are stereo, but let's just be safe. + if (mNeedsPanning && mOutputChannelCount == 2) { + // hard pan to the right + for (uint32_t i = 0; i < aFrames * 2; i += 2) { + aOutputBuffer[i + 1] += aOutputBuffer[i]; + aOutputBuffer[i] = 0.0; + } + } +#endif + + // No-op if MOZ_DUMP_AUDIO is not defined as an environment variable + if (aInputBuffer) { + mInputStreamFile.Write(static_cast(aInputBuffer), + aFrames * mInputChannelCount); + } + mOutputStreamFile.Write(static_cast(aOutputBuffer), + aFrames * mOutputChannelCount); + + if (result.IsStop()) { + if (mInputDeviceID) { + mGraphInterface->NotifyInputStopped(); + } + // Signal that we have stopped. + result.Stopped(); + // Update the flag before handing over the graph and going to drain. + mAudioStreamState = AudioStreamState::Stopping; + if (!mSandboxed) { + CallbackThreadRegistry::Get()->Unregister(mAudioThreadId); + } + return aFrames - 1; + } + + if (GraphDriver* nextDriver = result.NextDriver()) { + LOG(LogLevel::Debug, + ("%p: Switching to %s driver.", Graph(), + nextDriver->AsAudioCallbackDriver() ? "audio" : "system")); + if (mInputDeviceID) { + mGraphInterface->NotifyInputStopped(); + } + result.Switched(); + mAudioStreamState = AudioStreamState::Stopping; + nextDriver->SetState(mStreamName, mIterationEnd, mStateComputedTime); + nextDriver->Start(); + if (!mSandboxed) { + CallbackThreadRegistry::Get()->Unregister(mAudioThreadId); + } + // Returning less than aFrames starts the draining and eventually stops the + // audio thread. This function will never get called again. + return aFrames - 1; + } + + MOZ_ASSERT(result.IsStillProcessing()); + return aFrames; +} + +static const char* StateToString(cubeb_state aState) { + switch (aState) { + case CUBEB_STATE_STARTED: + return "STARTED"; + case CUBEB_STATE_STOPPED: + return "STOPPED"; + case CUBEB_STATE_DRAINED: + return "DRAINED"; + case CUBEB_STATE_ERROR: + return "ERROR"; + default: + MOZ_CRASH("Unexpected state!"); + } +} + +void AudioCallbackDriver::StateCallback(cubeb_state aState) { + MOZ_ASSERT(!InIteration()); + LOG(LogLevel::Debug, + ("AudioCallbackDriver(%p) State: %s", this, StateToString(aState))); + + if (aState == CUBEB_STATE_STARTED || aState == CUBEB_STATE_STOPPED) { + // Nothing to do for STARTED. + // + // For STOPPED, don't reset mAudioStreamState until after + // cubeb_stream_stop() returns, as wasapi_stream_stop() dispatches + // CUBEB_STATE_STOPPED before ensuring that data callbacks have finished. + // https://searchfox.org/mozilla-central/rev/f9beb753a84aa297713d1565dcd0c5e3c66e4174/media/libcubeb/src/cubeb_wasapi.cpp#3009,3012 + return; + } + + AudioStreamState streamState = mAudioStreamState; + if (streamState < AudioStreamState::Starting) { + // mAudioStream has already entered STOPPED, DRAINED, or ERROR. + // Don't reset a Pending state indicating that a task to destroy + // mAudioStream and init a new cubeb_stream has already been triggered. + return; + } + + // Reset for DRAINED or ERROR. + streamState = mAudioStreamState.exchange(AudioStreamState::None); + + if (aState == CUBEB_STATE_ERROR) { + // About to hand over control of the graph. Do not start a new driver if + // StateCallback() receives an error for this stream while the main thread + // or another driver has control of the graph. + if (streamState == AudioStreamState::Starting || + streamState == AudioStreamState::ChangingDevice || + streamState == AudioStreamState::Running) { + if (mFallbackDriverState.compareExchange(FallbackDriverState::None, + FallbackDriverState::Running)) { + // Only switch to fallback if it's not already running. It could be + // running with the callback driver having started but not seen a single + // callback yet. I.e., handover from fallback to callback is not done. + if (mInputDeviceID) { +#ifdef DEBUG + // No audio callback after an error. We're calling into the graph here + // so we need to be regarded as "in iteration". + AutoInCallback aic(this); +#endif + mGraphInterface->NotifyInputStopped(); + } + FallbackToSystemClockDriver(); + } + } + } +} + +void AudioCallbackDriver::MixerCallback(AudioChunk* aMixedBuffer, + uint32_t aSampleRate) { + MOZ_ASSERT(InIteration()); + uint32_t toWrite = mBuffer.Available(); + + TrackTime frameCount = aMixedBuffer->mDuration; + if (!mBuffer.Available() && frameCount > 0) { + NS_WARNING("DataCallback buffer full, expect frame drops."); + } + + MOZ_ASSERT(mBuffer.Available() <= frameCount); + + mBuffer.WriteFrames(*aMixedBuffer, mBuffer.Available()); + MOZ_ASSERT(mBuffer.Available() == 0, + "Missing frames to fill audio callback's buffer."); + if (toWrite == frameCount) { + return; + } + + aMixedBuffer->SliceTo(toWrite, frameCount); + DebugOnly written = mScratchBuffer.Fill(*aMixedBuffer); + NS_WARNING_ASSERTION(written == frameCount - toWrite, "Dropping frames."); +}; + +void AudioCallbackDriver::PanOutputIfNeeded(bool aMicrophoneActive) { +#ifdef XP_MACOSX + TRACE("AudioCallbackDriver::PanOutputIfNeeded"); + cubeb_device* out = nullptr; + int rv; + char name[128]; + size_t length = sizeof(name); + + rv = sysctlbyname("hw.model", name, &length, NULL, 0); + if (rv) { + return; + } + + int major, minor; + for (uint32_t i = 0; i < length; i++) { + // skip the model name + if (isalpha(name[i])) { + continue; + } + sscanf(name + i, "%d,%d", &major, &minor); + break; + } + + enum MacbookModel { MacBook, MacBookPro, MacBookAir, NotAMacbook }; + + MacbookModel model; + + if (!strncmp(name, "MacBookPro", length)) { + model = MacBookPro; + } else if (strncmp(name, "MacBookAir", length)) { + model = MacBookAir; + } else if (strncmp(name, "MacBook", length)) { + model = MacBook; + } else { + model = NotAMacbook; + } + // For macbook pro before 2016 model (change of chassis), hard pan the audio + // to the right if the speakers are in use to avoid feedback. + if (model == MacBookPro && major <= 12) { + if (cubeb_stream_get_current_device(mAudioStream, &out) == CUBEB_OK) { + MOZ_ASSERT(out); + // Check if we are currently outputing sound on external speakers. + if (out->output_name && !strcmp(out->output_name, "ispk")) { + // Pan everything to the right speaker. + LOG(LogLevel::Debug, ("Using the built-in speakers, with%s audio input", + aMicrophoneActive ? "" : "out")); + mNeedsPanning = aMicrophoneActive; + } else { + LOG(LogLevel::Debug, ("Using an external output device")); + mNeedsPanning = false; + } + cubeb_stream_device_destroy(mAudioStream, out); + } + } +#endif +} + +void AudioCallbackDriver::DeviceChangedCallback() { + MOZ_ASSERT(!InIteration()); + // Set this before the atomic write. + mChangingDeviceStartTime = TimeStamp::Now(); + + if (mAudioStreamState.compareExchange(AudioStreamState::Running, + AudioStreamState::ChangingDevice)) { + // Change to ChangingDevice only if we're running, i.e. there has been a + // data callback and no state callback saying otherwise. + // - If the audio stream is not running, it has either been stopped or it is + // starting. In the latter case we assume there will be no data callback + // coming until after the device change is done. + // - If the audio stream is running here, there is no guarantee from the + // cubeb mac backend that no more data callback will occur before the + // device change takes place. They will however stop *soon*, and we hope + // they stop before the first callback from the fallback driver. If the + // fallback driver callback occurs before the last data callback before + // the device switch, the worst case is that a long period of time + // (seconds) may pass without the graph getting iterated at all. + Result res = TryStartingFallbackDriver(); + + LOG(LogLevel::Info, + ("%p: AudioCallbackDriver %p underlying default device is changing. " + "Fallback %s.", + Graph(), this, + res.isOk() ? "started" + : (res.inspectErr() == FallbackDriverState::Running + ? "already running" + : "has been stopped"))); + + if (res.isErr() && res.inspectErr() == FallbackDriverState::Stopped) { + mChangingDeviceStartTime = TimeStamp(); + } + } + + // Tell the audio engine the device has changed, it might want to reset some + // state. + Graph()->DeviceChanged(); +#ifdef XP_MACOSX + RefPtr self(this); + bool hasInput = mInputChannelCount; + NS_DispatchBackgroundTask(NS_NewRunnableFunction( + "PanOutputIfNeeded", [self{std::move(self)}, hasInput]() { + self->PanOutputIfNeeded(hasInput); + })); +#endif +} + +uint32_t AudioCallbackDriver::IterationDuration() { + MOZ_ASSERT(InIteration()); + // The real fix would be to have an API in cubeb to give us the number. Short + // of that, we approximate it here. bug 1019507 + return mIterationDurationMS; +} + +void AudioCallbackDriver::EnsureNextIteration() { + if (mFallbackDriverState == FallbackDriverState::Running) { + auto fallback = mFallback.Lock(); + if (fallback.ref()) { + fallback.ref()->EnsureNextIteration(); + } + } +} + +TimeDuration AudioCallbackDriver::AudioOutputLatency() { + TRACE("AudioCallbackDriver::AudioOutputLatency"); + uint32_t latencyFrames; + int rv = cubeb_stream_get_latency(mAudioStream, &latencyFrames); + if (rv || mSampleRate == 0) { + return TimeDuration::FromSeconds(0.0); + } + + return TimeDuration::FromSeconds(static_cast(latencyFrames) / + mSampleRate); +} + +bool AudioCallbackDriver::OnFallback() const { + MOZ_ASSERT(InIteration()); + return mFallbackDriverState == FallbackDriverState::Running; +} + +Result +AudioCallbackDriver::TryStartingFallbackDriver() { + FallbackDriverState oldState = + mFallbackDriverState.exchange(FallbackDriverState::Running); + switch (oldState) { + case FallbackDriverState::None: + // None -> Running: we can start the fallback. + FallbackToSystemClockDriver(); + return true; + case FallbackDriverState::Stopped: + // Stopped -> Running: Invalid edge, the graph has told us to stop. + // Restore the state. + mFallbackDriverState = oldState; + [[fallthrough]]; + case FallbackDriverState::Running: + // Nothing to do, return the state. + return Err(oldState); + } + MOZ_CRASH("Unexpected fallback state"); +} + +void AudioCallbackDriver::FallbackToSystemClockDriver() { + MOZ_ASSERT(mFallbackDriverState == FallbackDriverState::Running); + DebugOnly audioStreamState = + static_cast(mAudioStreamState); + MOZ_ASSERT(audioStreamState == AudioStreamState::None || + audioStreamState == AudioStreamState::Pending || + audioStreamState == AudioStreamState::ChangingDevice); + LOG(LogLevel::Debug, + ("%p: AudioCallbackDriver %p Falling back to SystemClockDriver.", Graph(), + this)); + mNextReInitBackoffStep = + TimeDuration::FromMilliseconds(AUDIO_INITIAL_FALLBACK_BACKOFF_STEP_MS); + mNextReInitAttempt = TimeStamp::Now() + mNextReInitBackoffStep; + auto fallback = + MakeRefPtr(Graph(), this, mSampleRate, mStreamName, + mIterationEnd, mStateComputedTime); + { + auto driver = mFallback.Lock(); + MOZ_RELEASE_ASSERT(!driver.ref()); + driver.ref() = fallback; + } + fallback->Start(); +} + +void AudioCallbackDriver::FallbackDriverStopped(GraphTime aIterationEnd, + GraphTime aStateComputedTime, + FallbackDriverState aState) { + mIterationEnd = aIterationEnd; + mStateComputedTime = aStateComputedTime; + mNextReInitAttempt = TimeStamp(); + mNextReInitBackoffStep = TimeDuration(); + { + auto fallback = mFallback.Lock(); + MOZ_ASSERT(fallback.ref()->OnThread()); + fallback.ref() = nullptr; + } + + MOZ_ASSERT(aState == FallbackDriverState::None || + aState == FallbackDriverState::Stopped); + mFallbackDriverState = aState; + AudioStreamState audioState = mAudioStreamState; + LOG(LogLevel::Debug, + ("%p: AudioCallbackDriver %p Fallback driver stopped.%s%s", Graph(), this, + aState == FallbackDriverState::Stopped ? " Draining." : "", + aState == FallbackDriverState::None && + audioState == AudioStreamState::ChangingDevice + ? " Starting another due to device change." + : "")); + + if (aState == FallbackDriverState::None) { + MOZ_ASSERT(audioState == AudioStreamState::Running || + audioState == AudioStreamState::ChangingDevice); + if (audioState == AudioStreamState::ChangingDevice) { + MOZ_ALWAYS_OK(TryStartingFallbackDriver()); + } + } +} + +void AudioCallbackDriver::MaybeStartAudioStream() { + AudioStreamState streamState = mAudioStreamState; + if (streamState != AudioStreamState::None) { + LOG(LogLevel::Verbose, + ("%p: AudioCallbackDriver %p Cannot re-init.", Graph(), this)); + return; + } + + TimeStamp now = TimeStamp::Now(); + if (now < mNextReInitAttempt) { + LOG(LogLevel::Verbose, + ("%p: AudioCallbackDriver %p Not time to re-init yet. %.3fs left.", + Graph(), this, (mNextReInitAttempt - now).ToSeconds())); + return; + } + + LOG(LogLevel::Debug, ("%p: AudioCallbackDriver %p Attempting to re-init " + "audio stream from fallback driver.", + Graph(), this)); + mNextReInitBackoffStep = + std::min(mNextReInitBackoffStep * 2, + TimeDuration::FromMilliseconds( + StaticPrefs::media_audio_device_retry_ms())); + mNextReInitAttempt = now + mNextReInitBackoffStep; + Start(); +} + +} // namespace mozilla + +// avoid redefined macro in unified build +#undef LOG diff --git a/dom/media/GraphDriver.h b/dom/media/GraphDriver.h new file mode 100644 index 0000000000..9ada03e7e6 --- /dev/null +++ b/dom/media/GraphDriver.h @@ -0,0 +1,793 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef GRAPHDRIVER_H_ +#define GRAPHDRIVER_H_ + +#include "nsAutoRef.h" +#include "nsIThread.h" +#include "AudioBufferUtils.h" +#include "AudioMixer.h" +#include "AudioSegment.h" +#include "SelfRef.h" +#include "mozilla/Atomics.h" +#include "mozilla/dom/AudioContext.h" +#include "mozilla/DataMutex.h" +#include "mozilla/TaskQueue.h" +#include "mozilla/StaticPtr.h" +#include "WavDumper.h" + +#include + +struct cubeb_stream; + +template <> +class nsAutoRefTraits : public nsPointerRefTraits { + public: + static void Release(cubeb_stream* aStream) { cubeb_stream_destroy(aStream); } +}; + +namespace mozilla { + +// A thread pool containing only one thread to execute the cubeb operations. We +// should always use this thread to init, destroy, start, or stop cubeb streams, +// to avoid data racing or deadlock issues across platforms. +#define CUBEB_TASK_THREAD SharedThreadPool::Get("CubebOperation"_ns, 1) + +/** + * Assume we can run an iteration of the MediaTrackGraph loop in this much time + * or less. + * We try to run the control loop at this rate. + */ +static const int MEDIA_GRAPH_TARGET_PERIOD_MS = 10; + +/** + * Assume that we might miss our scheduled wakeup of the MediaTrackGraph by + * this much. + */ +static const int SCHEDULE_SAFETY_MARGIN_MS = 10; + +/** + * Try have this much audio buffered in streams and queued to the hardware. + * The maximum delay to the end of the next control loop + * is 2*MEDIA_GRAPH_TARGET_PERIOD_MS + SCHEDULE_SAFETY_MARGIN_MS. + * There is no point in buffering more audio than this in a stream at any + * given time (until we add processing). + * This is not optimal yet. + */ +static const int AUDIO_TARGET_MS = + 2 * MEDIA_GRAPH_TARGET_PERIOD_MS + SCHEDULE_SAFETY_MARGIN_MS; + +/** + * After starting a fallback driver, wait this long before attempting to re-init + * the audio stream the first time. + */ +static const int AUDIO_INITIAL_FALLBACK_BACKOFF_STEP_MS = 10; + +/** + * The backoff step duration for when to next attempt to re-init the audio + * stream is capped at this value. + */ +static const int AUDIO_MAX_FALLBACK_BACKOFF_STEP_MS = 1000; + +class AudioCallbackDriver; +class GraphDriver; +class MediaTrack; +class OfflineClockDriver; +class SystemClockDriver; + +namespace dom { +enum class AudioContextOperation : uint8_t; +} + +struct GraphInterface : public nsISupports { + /** + * Object returned from OneIteration() instructing the iterating GraphDriver + * what to do. + * + * - If the result is StillProcessing: keep the iterations coming. + * - If the result is Stop: the driver potentially updates its internal state + * and interacts with the graph (e.g., NotifyOutputData), then it must call + * Stopped() exactly once. + * - If the result is SwitchDriver: the driver updates internal state as for + * the Stop result, then it must call Switched() exactly once and start + * NextDriver(). + */ + class IterationResult final { + struct Undefined {}; + struct StillProcessing {}; + struct Stop { + explicit Stop(RefPtr aStoppedRunnable) + : mStoppedRunnable(std::move(aStoppedRunnable)) {} + Stop(const Stop&) = delete; + Stop(Stop&& aOther) noexcept + : mStoppedRunnable(std::move(aOther.mStoppedRunnable)) {} + ~Stop() { MOZ_ASSERT(!mStoppedRunnable); } + RefPtr mStoppedRunnable; + void Stopped() { + mStoppedRunnable->Run(); + mStoppedRunnable = nullptr; + } + }; + struct SwitchDriver { + SwitchDriver(RefPtr aDriver, + RefPtr aSwitchedRunnable) + : mDriver(std::move(aDriver)), + mSwitchedRunnable(std::move(aSwitchedRunnable)) {} + SwitchDriver(const SwitchDriver&) = delete; + SwitchDriver(SwitchDriver&& aOther) noexcept + : mDriver(std::move(aOther.mDriver)), + mSwitchedRunnable(std::move(aOther.mSwitchedRunnable)) {} + ~SwitchDriver() { MOZ_ASSERT(!mSwitchedRunnable); } + RefPtr mDriver; + RefPtr mSwitchedRunnable; + void Switched() { + mSwitchedRunnable->Run(); + mSwitchedRunnable = nullptr; + } + }; + Variant mResult; + + explicit IterationResult(StillProcessing&& aArg) + : mResult(std::move(aArg)) {} + explicit IterationResult(Stop&& aArg) : mResult(std::move(aArg)) {} + explicit IterationResult(SwitchDriver&& aArg) : mResult(std::move(aArg)) {} + + public: + IterationResult() : mResult(Undefined()) {} + IterationResult(const IterationResult&) = delete; + IterationResult(IterationResult&&) = default; + + IterationResult& operator=(const IterationResult&) = delete; + IterationResult& operator=(IterationResult&&) = default; + + static IterationResult CreateStillProcessing() { + return IterationResult(StillProcessing()); + } + static IterationResult CreateStop(RefPtr aStoppedRunnable) { + return IterationResult(Stop(std::move(aStoppedRunnable))); + } + static IterationResult CreateSwitchDriver( + RefPtr aDriver, RefPtr aSwitchedRunnable) { + return IterationResult( + SwitchDriver(std::move(aDriver), std::move(aSwitchedRunnable))); + } + + bool IsStillProcessing() const { return mResult.is(); } + bool IsStop() const { return mResult.is(); } + bool IsSwitchDriver() const { return mResult.is(); } + + void Stopped() { + MOZ_ASSERT(IsStop()); + mResult.as().Stopped(); + } + + GraphDriver* NextDriver() const { + if (!IsSwitchDriver()) { + return nullptr; + } + return mResult.as().mDriver; + } + + void Switched() { + MOZ_ASSERT(IsSwitchDriver()); + mResult.as().Switched(); + } + }; + + /* Called on the graph thread after an AudioCallbackDriver with an input + * stream has stopped. */ + virtual void NotifyInputStopped() = 0; + /* Called on the graph thread when there is new input data for listeners. This + * is the raw audio input for this MediaTrackGraph. */ + virtual void NotifyInputData(const AudioDataValue* aBuffer, size_t aFrames, + TrackRate aRate, uint32_t aChannels, + uint32_t aAlreadyBuffered) = 0; + /* Called every time there are changes to input/output audio devices like + * plug/unplug etc. This can be called on any thread, and posts a message to + * the main thread so that it can post a message to the graph thread. */ + virtual void DeviceChanged() = 0; + /* Called by GraphDriver to iterate the graph. Mixed audio output from the + * graph is passed into aMixerReceiver, if it is non-null. */ + virtual IterationResult OneIteration( + GraphTime aStateComputedEnd, GraphTime aIterationEnd, + MixerCallbackReceiver* aMixerReceiver) = 0; +#ifdef DEBUG + /* True if we're on aDriver's thread, or if we're on mGraphRunner's thread + * and mGraphRunner is currently run by aDriver. */ + virtual bool InDriverIteration(const GraphDriver* aDriver) const = 0; +#endif +}; + +/** + * A driver is responsible for the scheduling of the processing, the thread + * management, and give the different clocks to a MediaTrackGraph. This is an + * abstract base class. A MediaTrackGraph can be driven by an + * OfflineClockDriver, if the graph is offline, or a SystemClockDriver or an + * AudioCallbackDriver, if the graph is real time. + * A MediaTrackGraph holds an owning reference to its driver. + * + * The lifetime of drivers is a complicated affair. Here are the different + * scenarii that can happen: + * + * Starting a MediaTrackGraph with an AudioCallbackDriver + * - A new thread T is created, from the main thread. + * - On this thread T, cubeb is initialized if needed, and a cubeb_stream is + * created and started + * - The thread T posts a message to the main thread to terminate itself. + * - The graph runs off the audio thread + * + * Starting a MediaTrackGraph with a SystemClockDriver: + * - A new thread T is created from the main thread. + * - The graph runs off this thread. + * + * Switching from a SystemClockDriver to an AudioCallbackDriver: + * - At the end of the MTG iteration, the graph tells the current driver to + * switch to an AudioCallbackDriver, which is created and initialized on the + * graph thread. + * - At the end of the MTG iteration, the SystemClockDriver transfers its timing + * info and a reference to itself to the AudioCallbackDriver. It then starts + * the AudioCallbackDriver. + * - When the AudioCallbackDriver starts, it: + * - Starts a fallback SystemClockDriver that runs until the + * AudioCallbackDriver is running, in case it takes a long time to start (it + * could block on I/O, e.g., negotiating a bluetooth connection). + * - Checks if it has been switched from a SystemClockDriver, and if that is + * the case, sends a message to the main thread to shut the + * SystemClockDriver thread down. + * - When the AudioCallbackDriver is running, data callbacks are blocked. The + * fallback driver detects this in its callback and stops itself. The first + * DataCallback after the fallback driver had stopped goes through. + * - The graph now runs off an audio callback. + * + * Switching from an AudioCallbackDriver to a SystemClockDriver: + * - At the end of the MTG iteration, the graph tells the current driver to + * switch to a SystemClockDriver. + * - the AudioCallbackDriver transfers its timing info and a reference to itself + * to the SystemClockDriver. A new SystemClockDriver is started from the + * current audio thread. + * - When starting, the SystemClockDriver checks if it has been switched from an + * AudioCallbackDriver. If yes, it creates a new temporary thread to release + * the cubeb_streams. This temporary thread closes the cubeb_stream, and then + * dispatches a message to the main thread to be terminated. + * - The graph now runs off a normal thread. + * + * Two drivers cannot run at the same time for the same graph. The thread safety + * of the different members of drivers, and their access pattern is documented + * next to the members themselves. + */ +class GraphDriver { + public: + using IterationResult = GraphInterface::IterationResult; + + GraphDriver(GraphInterface* aGraphInterface, GraphDriver* aPreviousDriver, + uint32_t aSampleRate); + + NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING + + /* Start the graph, init the driver, start the thread. + * A driver cannot be started twice, it must be shutdown + * before being started again. */ + virtual void Start() = 0; + /* Shutdown GraphDriver */ + MOZ_CAN_RUN_SCRIPT virtual void Shutdown() = 0; + /* Set the UTF-8 name for system audio streams. + * Graph thread, or main thread if the graph is not running. */ + virtual void SetStreamName(const nsACString& aStreamName); + /* Rate at which the GraphDriver runs, in ms. This can either be user + * controlled (because we are using a {System,Offline}ClockDriver, and decide + * how often we want to wakeup/how much we want to process per iteration), or + * it can be indirectly set by the latency of the audio backend, and the + * number of buffers of this audio backend: say we have four buffers, and 40ms + * latency, we will get a callback approximately every 10ms. */ + virtual uint32_t IterationDuration() = 0; + /* + * Signaled by the graph when it needs another iteration. Goes unhandled for + * GraphDrivers that are not able to sleep indefinitely (i.e., all drivers but + * ThreadedDriver). Can be called on any thread. + */ + virtual void EnsureNextIteration() = 0; + + // Those are simply for accessing the associated pointer. Graph thread only, + // or if one is not running, main thread. + GraphDriver* PreviousDriver(); + void SetPreviousDriver(GraphDriver* aPreviousDriver); + + virtual AudioCallbackDriver* AsAudioCallbackDriver() { return nullptr; } + virtual const AudioCallbackDriver* AsAudioCallbackDriver() const { + return nullptr; + } + + virtual OfflineClockDriver* AsOfflineClockDriver() { return nullptr; } + virtual const OfflineClockDriver* AsOfflineClockDriver() const { + return nullptr; + } + + virtual SystemClockDriver* AsSystemClockDriver() { return nullptr; } + virtual const SystemClockDriver* AsSystemClockDriver() const { + return nullptr; + } + + /** + * Set the state of the driver so it can start at the right point in time, + * after switching from another driver. + */ + void SetState(const nsACString& aStreamName, GraphTime aIterationEnd, + GraphTime aStateComputedTime); + + GraphInterface* Graph() const { return mGraphInterface; } + +#ifdef DEBUG + // True if the current thread is currently iterating the MTG. + bool InIteration() const; +#endif + // True if the current thread is the GraphDriver's thread. + virtual bool OnThread() const = 0; + // GraphDriver's thread has started and the thread is running. + virtual bool ThreadRunning() const = 0; + + double MediaTimeToSeconds(GraphTime aTime) const { + NS_ASSERTION(aTime > -TRACK_TIME_MAX && aTime <= TRACK_TIME_MAX, + "Bad time"); + return static_cast(aTime) / mSampleRate; + } + + GraphTime SecondsToMediaTime(double aS) const { + NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX / TRACK_RATE_MAX, + "Bad seconds"); + return mSampleRate * aS; + } + + GraphTime MillisecondsToMediaTime(int32_t aMS) const { + return RateConvertTicksRoundDown(mSampleRate, 1000, aMS); + } + + protected: + // The UTF-8 name for system audio streams. Graph thread. + nsCString mStreamName; + // Time of the end of this graph iteration. + GraphTime mIterationEnd = 0; + // Time until which the graph has processed data. + GraphTime mStateComputedTime = 0; + // The GraphInterface this driver is currently iterating. + const RefPtr mGraphInterface; + // The sample rate for the graph, and in case of an audio driver, also for the + // cubeb stream. + const uint32_t mSampleRate; + + // This is non-null only when this driver has recently switched from an other + // driver, and has not cleaned it up yet (for example because the audio stream + // is currently calling the callback during initialization). + // + // This is written to when changing driver, from the previous driver's thread, + // or a thread created for the occasion. This is read each time we need to + // check whether we're changing driver (in Switching()), from the graph + // thread. + // This must be accessed using the {Set,Get}PreviousDriver methods. + RefPtr mPreviousDriver; + + virtual ~GraphDriver() = default; +}; + +class MediaTrackGraphInitThreadRunnable; + +/** + * This class is a driver that manages its own thread. + */ +class ThreadedDriver : public GraphDriver { + class IterationWaitHelper { + Monitor mMonitor MOZ_UNANNOTATED; + // The below members are guarded by mMonitor. + bool mNeedAnotherIteration = false; + TimeStamp mWakeTime; + + public: + IterationWaitHelper() : mMonitor("IterationWaitHelper::mMonitor") {} + + /** + * If another iteration is needed we wait for aDuration, otherwise we wait + * for a wake-up. If a wake-up occurs before aDuration time has passed, we + * wait for aDuration nonetheless. + */ + void WaitForNextIterationAtLeast(TimeDuration aDuration) { + MonitorAutoLock lock(mMonitor); + TimeStamp now = TimeStamp::Now(); + mWakeTime = now + aDuration; + while (true) { + if (mNeedAnotherIteration && now >= mWakeTime) { + break; + } + if (mNeedAnotherIteration) { + lock.Wait(mWakeTime - now); + } else { + lock.Wait(TimeDuration::Forever()); + } + now = TimeStamp::Now(); + } + mWakeTime = TimeStamp(); + mNeedAnotherIteration = false; + } + + /** + * Sets mNeedAnotherIteration to true and notifies the monitor, in case a + * driver is currently waiting. + */ + void EnsureNextIteration() { + MonitorAutoLock lock(mMonitor); + mNeedAnotherIteration = true; + lock.Notify(); + } + }; + + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadedDriver, override); + + ThreadedDriver(GraphInterface* aGraphInterface, GraphDriver* aPreviousDriver, + uint32_t aSampleRate); + + void EnsureNextIteration() override; + void Start() override; + MOZ_CAN_RUN_SCRIPT void Shutdown() override; + /** + * Runs main control loop on the graph thread. Normally a single invocation + * of this runs for the entire lifetime of the graph thread. + */ + virtual void RunThread(); + friend class MediaTrackGraphInitThreadRunnable; + uint32_t IterationDuration() override { return MEDIA_GRAPH_TARGET_PERIOD_MS; } + + nsIThread* Thread() const { return mThread; } + + bool OnThread() const override { + return !mThread || mThread->IsOnCurrentThread(); + } + + bool ThreadRunning() const override { return mThreadRunning; } + + protected: + /* Waits until it's time to process more data. */ + void WaitForNextIteration(); + /* Implementation dependent time the ThreadedDriver should wait between + * iterations. */ + virtual TimeDuration WaitInterval() = 0; + /* When the graph wakes up to do an iteration, implementations return the + * range of time that will be processed. This is called only once per + * iteration; it may determine the interval from state in a previous + * call. */ + virtual MediaTime GetIntervalForIteration() = 0; + + virtual ~ThreadedDriver(); + + nsCOMPtr mThread; + + private: + // This is true if the thread is running. It is false + // before starting the thread and after stopping it. + Atomic mThreadRunning; + + // Any thread. + IterationWaitHelper mWaitHelper; +}; + +/** + * A SystemClockDriver drives a GraphInterface using a system clock, and waits + * using a monitor, between each iteration. + */ +class SystemClockDriver : public ThreadedDriver { + public: + SystemClockDriver(GraphInterface* aGraphInterface, + GraphDriver* aPreviousDriver, uint32_t aSampleRate); + virtual ~SystemClockDriver(); + SystemClockDriver* AsSystemClockDriver() override { return this; } + const SystemClockDriver* AsSystemClockDriver() const override { return this; } + + protected: + /* Return the TimeDuration to wait before the next rendering iteration. */ + TimeDuration WaitInterval() override; + MediaTime GetIntervalForIteration() override; + + private: + // Those are only modified (after initialization) on the graph thread. The + // graph thread does not run during the initialization. + TimeStamp mInitialTimeStamp; + TimeStamp mCurrentTimeStamp; + TimeStamp mLastTimeStamp; +}; + +/** + * An OfflineClockDriver runs the graph as fast as possible, without waiting + * between iteration. + */ +class OfflineClockDriver : public ThreadedDriver { + public: + OfflineClockDriver(GraphInterface* aGraphInterface, uint32_t aSampleRate, + GraphTime aSlice); + virtual ~OfflineClockDriver(); + OfflineClockDriver* AsOfflineClockDriver() override { return this; } + const OfflineClockDriver* AsOfflineClockDriver() const override { + return this; + } + + void RunThread() override; + + protected: + TimeDuration WaitInterval() override { return TimeDuration(); } + MediaTime GetIntervalForIteration() override; + + private: + // Time, in GraphTime, for each iteration + GraphTime mSlice; +}; + +enum class AudioInputType { Unknown, Voice }; + +/** + * This is a graph driver that is based on callback functions called by the + * audio api. This ensures minimal audio latency, because it means there is no + * buffering happening: the audio is generated inside the callback. + * + * This design is less flexible than running our own thread: + * - We have no control over the thread: + * - It cannot block, and it has to run for a shorter amount of time than the + * buffer it is going to fill, or an under-run is going to occur (short burst + * of silence in the final audio output). + * - We can't know for sure when the callback function is going to be called + * (although we compute an estimation so we can schedule video frames) + * - Creating and shutting the thread down is a blocking operation, that can + * take _seconds_ in some cases (because IPC has to be set up, and + * sometimes hardware components are involved and need to be warmed up) + * - We have no control on how much audio we generate, we have to return exactly + * the number of frames asked for by the callback. Since for the Web Audio + * API, we have to do block processing at 128 frames per block, we need to + * keep a little spill buffer to store the extra frames. + */ +class AudioCallbackDriver : public GraphDriver, public MixerCallbackReceiver { + using IterationResult = GraphInterface::IterationResult; + enum class FallbackDriverState; + class FallbackWrapper; + + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING_WITH_DELETE_ON_EVENT_TARGET( + AudioCallbackDriver, mCubebOperationThread, override); + + /** If aInputChannelCount is zero, then this driver is output-only. */ + AudioCallbackDriver(GraphInterface* aGraphInterface, + GraphDriver* aPreviousDriver, uint32_t aSampleRate, + uint32_t aOutputChannelCount, uint32_t aInputChannelCount, + CubebUtils::AudioDeviceID aOutputDeviceID, + CubebUtils::AudioDeviceID aInputDeviceID, + AudioInputType aAudioInputType); + + void Start() override; + MOZ_CAN_RUN_SCRIPT void Shutdown() override; + void SetStreamName(const nsACString& aStreamName) override; + + /* Static wrapper function cubeb calls back. */ + static long DataCallback_s(cubeb_stream* aStream, void* aUser, + const void* aInputBuffer, void* aOutputBuffer, + long aFrames); + static void StateCallback_s(cubeb_stream* aStream, void* aUser, + cubeb_state aState); + static void DeviceChangedCallback_s(void* aUser); + + /* This function is called by the underlying audio backend when a refill is + * needed. This is what drives the whole graph when it is used to output + * audio. If the return value is exactly aFrames, this function will get + * called again. If it is less than aFrames, the stream will go in draining + * mode, and this function will not be called again. */ + long DataCallback(const AudioDataValue* aInputBuffer, + AudioDataValue* aOutputBuffer, long aFrames); + /* This function is called by the underlying audio backend, but is only used + * for informational purposes at the moment. */ + void StateCallback(cubeb_state aState); + /* This is an approximation of the number of millisecond there are between two + * iterations of the graph. */ + uint32_t IterationDuration() override; + /* If the audio stream has started, this does nothing. There will be another + * iteration. If there is an active fallback driver, we forward the call so it + * can wake up. */ + void EnsureNextIteration() override; + + /* This function gets called when the graph has produced the audio frames for + * this iteration. */ + void MixerCallback(AudioChunk* aMixedBuffer, uint32_t aSampleRate) override; + + AudioCallbackDriver* AsAudioCallbackDriver() override { return this; } + const AudioCallbackDriver* AsAudioCallbackDriver() const override { + return this; + } + + uint32_t OutputChannelCount() { return mOutputChannelCount; } + + uint32_t InputChannelCount() { return mInputChannelCount; } + + AudioInputType InputDevicePreference() { + if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) { + return AudioInputType::Voice; + } + return AudioInputType::Unknown; + } + + std::thread::id ThreadId() const { return mAudioThreadIdInCb.load(); } + + /* Called when the thread servicing the callback has changed. This can be + * fairly expensive */ + void OnThreadIdChanged(); + /* Called at the beginning of the audio callback to check if the thread id has + * changed. */ + bool CheckThreadIdChanged(); + + bool OnThread() const override { + return mAudioThreadIdInCb.load() == std::this_thread::get_id(); + } + + /* Returns true if this driver has started (perhaps with a fallback driver) + * and not yet stopped. */ + bool ThreadRunning() const override { + return mAudioStreamState == AudioStreamState::Running || + mFallbackDriverState == FallbackDriverState::Running; + } + + /* Whether the underlying cubeb stream has been started and has not stopped + * or errored. */ + bool IsStarted() { return mAudioStreamState > AudioStreamState::Starting; }; + + // Returns the output latency for the current audio output stream. + TimeDuration AudioOutputLatency(); + + /* Returns true if this driver is currently driven by the fallback driver. */ + bool OnFallback() const; + + private: + /** + * On certain MacBookPro, the microphone is located near the left speaker. + * We need to pan the sound output to the right speaker if we are using the + * mic and the built-in speaker, or we will have terrible echo. */ + void PanOutputIfNeeded(bool aMicrophoneActive); + /** + * This is called when the output device used by the cubeb stream changes. */ + void DeviceChangedCallback(); + /* Start the cubeb stream */ + bool StartStream(); + friend class MediaTrackGraphInitThreadRunnable; + void Init(const nsCString& aStreamName); + void SetCubebStreamName(const nsCString& aStreamName); + void Stop(); + /* Calls FallbackToSystemClockDriver() if in FallbackDriverState::None. + * Returns Ok(true) if the fallback driver was started, or the old + * FallbackDriverState in an Err otherwise. */ + Result TryStartingFallbackDriver(); + /* Fall back to a SystemClockDriver using a normal thread. If needed, the + * graph will try to re-open an audio stream later. */ + void FallbackToSystemClockDriver(); + /* Called by the fallback driver when it has fully stopped, after finishing + * its last iteration. If it stopped after the audio stream started, aState + * will be None. If it stopped after the graph told it to stop, or switch, + * aState will be Stopped. Hands over state to the audio driver that may + * iterate the graph after this has been called. */ + void FallbackDriverStopped(GraphTime aIterationEnd, + GraphTime aStateComputedTime, + FallbackDriverState aState); + + /* Called at the end of the fallback driver's iteration to see whether we + * should attempt to start the AudioStream again. */ + void MaybeStartAudioStream(); + + /* This is true when the method is executed on CubebOperation thread pool. */ + bool OnCubebOperationThread() { + return mCubebOperationThread->IsOnCurrentThreadInfallible(); + } + + /* MediaTrackGraphs are always down/up mixed to output channels. */ + const uint32_t mOutputChannelCount; + /* The size of this buffer comes from the fact that some audio backends can + * call back with a number of frames lower than one block (128 frames), so we + * need to keep at most two block in the SpillBuffer, because we always round + * up to block boundaries during an iteration. + * This is only ever accessed on the audio callback thread. */ + SpillBuffer mScratchBuffer; + /* Wrapper to ensure we write exactly the number of frames we need in the + * audio buffer cubeb passes us. This is only ever accessed on the audio + * callback thread. */ + AudioCallbackBufferWrapper mBuffer; + // mAudioStream (a cubeb_stream) has a bare pointer to the cubeb context, so + // we hold a strong reference on its behalf. + RefPtr mCubeb; + /* cubeb stream for this graph. This is non-null after a successful + * cubeb_stream_init(). CubebOperation thread only. */ + nsAutoRef mAudioStream; + /* The number of input channels from cubeb. Set before opening cubeb. If it is + * zero then the driver is output-only. */ + const uint32_t mInputChannelCount; + /** + * Devices to use for cubeb input & output, or nullptr for default device. + */ + const CubebUtils::AudioDeviceID mOutputDeviceID; + const CubebUtils::AudioDeviceID mInputDeviceID; + /* Approximation of the time between two callbacks. This is used to schedule + * video frames. This is in milliseconds. Only even used (after + * inizatialization) on the audio callback thread. */ + uint32_t mIterationDurationMS; + + struct AutoInCallback { + explicit AutoInCallback(AudioCallbackDriver* aDriver); + ~AutoInCallback(); + AudioCallbackDriver* mDriver; + }; + + static already_AddRefed CreateTaskQueue(); + + /* Shared thread pool with up to one thread for off-main-thread + * initialization and shutdown of the audio stream and for other tasks that + * must run serially for access to mAudioStream. */ + const RefPtr mCubebOperationThread; + cubeb_device_pref mInputDevicePreference; + /* Contains the id of the audio thread, from profiler_current_thread_id. */ + std::atomic mAudioThreadId; + /* This allows implementing AutoInCallback. This is equal to the current + * thread id when in an audio callback, and is an invalid thread id otherwise. + */ + std::atomic mAudioThreadIdInCb; + /* State of the audio stream, see inline comments. */ + enum class AudioStreamState { + /* There is no cubeb_stream or mAudioStream is in CUBEB_STATE_ERROR or + * CUBEB_STATE_STOPPED and no pending task exists to Init() a new + * cubeb_stream. */ + None, + /* A task to Init() a new cubeb_stream is pending. */ + Pending, + /* cubeb_start_stream() is about to be or has been called on mAudioStream. + * Any previous cubeb_streams have been destroyed. */ + Starting, + /* mAudioStream has advertised it will change device. In this state we + ignore all data callbacks until the fallback driver has started. */ + ChangingDevice, + /* mAudioStream is running. */ + Running, + /* mAudioStream is draining, and will soon stop. */ + Stopping + }; + Atomic mAudioStreamState{AudioStreamState::None}; + /* State of the fallback driver, see inline comments. */ + enum class FallbackDriverState { + /* There is no fallback driver. */ + None, + /* There is a fallback driver trying to iterate us. */ + Running, + /* There was a fallback driver and the graph stopped it. No audio callback + may iterate the graph. */ + Stopped, + }; + Atomic mFallbackDriverState{FallbackDriverState::None}; + /* SystemClockDriver used as fallback if this AudioCallbackDriver fails to + * init or start. */ + DataMutex> mFallback; + /* If using a fallback driver, this is the duration to wait after failing to + * start it before attempting to start it again. */ + TimeDuration mNextReInitBackoffStep; + /* If using a fallback driver, this is the next time we'll try to start the + * audio stream. */ + TimeStamp mNextReInitAttempt; + /* The time mAudioStreamState was changed to ChangingDevice. + * Synchronized by the mAudioStreamState atomic, i.e. written *before* writing + * the atomic, and read *after* reading the atomic. */ + TimeStamp mChangingDeviceStartTime; +#ifdef XP_MACOSX + /* When using the built-in speakers on macbook pro (13 and 15, all models), + * it's best to hard pan the audio on the right, to avoid feedback into the + * microphone that is located next to the left speaker. */ + Atomic mNeedsPanning; +#endif + + WavDumper mInputStreamFile; + WavDumper mOutputStreamFile; + + virtual ~AudioCallbackDriver(); + const bool mSandboxed = false; +}; + +} // namespace mozilla + +#endif // GRAPHDRIVER_H_ diff --git a/dom/media/GraphRunner.cpp b/dom/media/GraphRunner.cpp new file mode 100644 index 0000000000..eef05c6fe3 --- /dev/null +++ b/dom/media/GraphRunner.cpp @@ -0,0 +1,178 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "GraphRunner.h" + +#include "GraphDriver.h" +#include "MediaTrackGraph.h" +#include "MediaTrackGraphImpl.h" +#include "nsISupportsImpl.h" +#include "nsISupportsPriority.h" +#include "prthread.h" +#include "Tracing.h" +#include "audio_thread_priority.h" +#ifdef MOZ_WIDGET_ANDROID +# include "AndroidProcess.h" +#endif // MOZ_WIDGET_ANDROID + +namespace mozilla { + +GraphRunner::GraphRunner(MediaTrackGraphImpl* aGraph, + already_AddRefed aThread) + : Runnable("GraphRunner"), + mMonitor("GraphRunner::mMonitor"), + mGraph(aGraph), + mThreadState(ThreadState::Wait), + mThread(aThread) { + mThread->Dispatch(do_AddRef(this)); +} + +GraphRunner::~GraphRunner() { + MOZ_ASSERT(mThreadState == ThreadState::Shutdown); +} + +/* static */ +already_AddRefed GraphRunner::Create(MediaTrackGraphImpl* aGraph) { + nsCOMPtr thread; + if (NS_WARN_IF(NS_FAILED( + NS_NewNamedThread("GraphRunner", getter_AddRefs(thread))))) { + return nullptr; + } + nsCOMPtr supportsPriority = do_QueryInterface(thread); + MOZ_ASSERT(supportsPriority); + MOZ_ALWAYS_SUCCEEDS( + supportsPriority->SetPriority(nsISupportsPriority::PRIORITY_HIGHEST)); + + return do_AddRef(new GraphRunner(aGraph, thread.forget())); +} + +void GraphRunner::Shutdown() { + { + MonitorAutoLock lock(mMonitor); + MOZ_ASSERT(mThreadState == ThreadState::Wait); + mThreadState = ThreadState::Shutdown; + mMonitor.Notify(); + } + mThread->Shutdown(); +} + +auto GraphRunner::OneIteration(GraphTime aStateTime, GraphTime aIterationEnd, + MixerCallbackReceiver* aMixerReceiver) + -> IterationResult { + TRACE("GraphRunner::OneIteration"); + + MonitorAutoLock lock(mMonitor); + MOZ_ASSERT(mThreadState == ThreadState::Wait); + mIterationState = + Some(IterationState(aStateTime, aIterationEnd, aMixerReceiver)); + +#ifdef DEBUG + if (const auto* audioDriver = + mGraph->CurrentDriver()->AsAudioCallbackDriver()) { + mAudioDriverThreadId = audioDriver->ThreadId(); + } else if (const auto* clockDriver = + mGraph->CurrentDriver()->AsSystemClockDriver()) { + mClockDriverThread = clockDriver->Thread(); + } else { + MOZ_CRASH("Unknown GraphDriver"); + } +#endif + // Signal that mIterationState was updated + mThreadState = ThreadState::Run; + mMonitor.Notify(); + // Wait for mIterationResult to update + do { + mMonitor.Wait(); + } while (mThreadState == ThreadState::Run); + +#ifdef DEBUG + mAudioDriverThreadId = std::thread::id(); + mClockDriverThread = nullptr; +#endif + + mIterationState = Nothing(); + + IterationResult result = std::move(mIterationResult); + mIterationResult = IterationResult(); + return result; +} + +#ifdef MOZ_WIDGET_ANDROID +namespace { +void PromoteRenderingThreadAndroid() { + MOZ_LOG(gMediaTrackGraphLog, LogLevel::Debug, + ("GraphRunner default thread priority: %d", + java::sdk::Process::GetThreadPriority(java::sdk::Process::MyTid()))); + java::sdk::Process::SetThreadPriority( + java::sdk::Process::THREAD_PRIORITY_URGENT_AUDIO); + MOZ_LOG(gMediaTrackGraphLog, LogLevel::Debug, + ("GraphRunner promoted thread priority: %d", + java::sdk::Process::GetThreadPriority(java::sdk::Process::MyTid()))); +} +}; // namespace +#endif // MOZ_WIDGET_ANDROID + +NS_IMETHODIMP GraphRunner::Run() { +#ifndef XP_LINUX + atp_handle* handle = + atp_promote_current_thread_to_real_time(0, mGraph->GraphRate()); +#endif + +#ifdef MOZ_WIDGET_ANDROID + PromoteRenderingThreadAndroid(); +#endif // MOZ_WIDGET_ANDROID + + nsCOMPtr threadInternal = do_QueryInterface(mThread); + threadInternal->SetObserver(mGraph); + + MonitorAutoLock lock(mMonitor); + while (true) { + while (mThreadState == ThreadState::Wait) { + mMonitor.Wait(); // Wait for mIterationState to update or for shutdown + } + if (mThreadState == ThreadState::Shutdown) { + break; + } + MOZ_DIAGNOSTIC_ASSERT(mIterationState.isSome()); + TRACE("GraphRunner::Run"); + mIterationResult = mGraph->OneIterationImpl( + mIterationState->StateTime(), mIterationState->IterationEnd(), + mIterationState->MixerReceiver()); + // Signal that mIterationResult was updated + mThreadState = ThreadState::Wait; + mMonitor.Notify(); + } + +#ifndef XP_LINUX + if (handle) { + atp_demote_current_thread_from_real_time(handle); + } +#endif + + return NS_OK; +} + +bool GraphRunner::OnThread() const { return mThread->IsOnCurrentThread(); } + +#ifdef DEBUG +bool GraphRunner::InDriverIteration(const GraphDriver* aDriver) const { + if (!OnThread()) { + return false; + } + + if (const auto* audioDriver = aDriver->AsAudioCallbackDriver()) { + return audioDriver->ThreadId() == mAudioDriverThreadId; + } + + if (const auto* clockDriver = aDriver->AsSystemClockDriver()) { + return clockDriver->Thread() == mClockDriverThread; + } + + MOZ_CRASH("Unknown driver"); +} +#endif + +} // namespace mozilla diff --git a/dom/media/GraphRunner.h b/dom/media/GraphRunner.h new file mode 100644 index 0000000000..8597f8c29e --- /dev/null +++ b/dom/media/GraphRunner.h @@ -0,0 +1,121 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_GraphRunner_h +#define mozilla_GraphRunner_h + +#include "GraphDriver.h" +#include "MediaSegment.h" +#include "mozilla/Monitor.h" + +#include + +struct PRThread; + +namespace mozilla { + +class AudioMixer; +class MediaTrackGraphImpl; + +class GraphRunner final : public Runnable { + using IterationResult = GraphInterface::IterationResult; + + public: + static already_AddRefed Create(MediaTrackGraphImpl* aGraph); + + /** + * Marks us as shut down and signals mThread, so that it runs until the end. + */ + MOZ_CAN_RUN_SCRIPT void Shutdown(); + + /** + * Signals one iteration of mGraph. Hands state over to mThread and runs + * the iteration there. + */ + IterationResult OneIteration(GraphTime aStateTime, GraphTime aIterationEnd, + MixerCallbackReceiver* aMixerReceiver); + + /** + * Runs mGraph until it shuts down. + */ + NS_IMETHOD Run() override; + + /** + * Returns true if called on mThread. + */ + bool OnThread() const; + +#ifdef DEBUG + /** + * Returns true if called on mThread, and aDriver was the driver that called + * OneIteration() last. + */ + bool InDriverIteration(const GraphDriver* aDriver) const; +#endif + + private: + explicit GraphRunner(MediaTrackGraphImpl* aGraph, + already_AddRefed aThread); + ~GraphRunner(); + + class IterationState { + GraphTime mStateTime; + GraphTime mIterationEnd; + MixerCallbackReceiver* MOZ_NON_OWNING_REF mMixerReceiver; + + public: + IterationState(GraphTime aStateTime, GraphTime aIterationEnd, + MixerCallbackReceiver* aMixerReceiver) + : mStateTime(aStateTime), + mIterationEnd(aIterationEnd), + mMixerReceiver(aMixerReceiver) {} + IterationState& operator=(const IterationState& aOther) = default; + GraphTime StateTime() const { return mStateTime; } + GraphTime IterationEnd() const { return mIterationEnd; } + MixerCallbackReceiver* MixerReceiver() const { return mMixerReceiver; } + }; + + // Monitor used for yielding mThread through Wait(), and scheduling mThread + // through Signal() from a GraphDriver. + Monitor mMonitor; + // The MediaTrackGraph we're running. Weakptr beecause this graph owns us and + // guarantees that our lifetime will not go beyond that of itself. + MediaTrackGraphImpl* const mGraph; + // State being handed over to the graph through OneIteration. Protected by + // mMonitor. + Maybe mIterationState MOZ_GUARDED_BY(mMonitor); + // Result from mGraph's OneIteration. Protected by mMonitor. + IterationResult mIterationResult MOZ_GUARDED_BY(mMonitor); + + enum class ThreadState { + Wait, // Waiting for a message. This is the initial state. + // A transition from Run back to Wait occurs on the runner thread + // after it processes as far as mIterationState->mStateTime + // and sets mIterationResult. + Run, // Set on driver thread after each mIterationState update. + Shutdown, // Set when Shutdown() is called on main thread. + }; + // Protected by mMonitor until set to Shutdown, after which this is not + // modified. + ThreadState mThreadState MOZ_GUARDED_BY(mMonitor); + + // The thread running mGraph. Set on construction, after other members are + // initialized. Cleared at the end of Shutdown(). + const nsCOMPtr mThread; + +#ifdef DEBUG + // Set to mGraph's audio callback driver's thread id, if run by an + // AudioCallbackDriver, while OneIteration() is running. + std::thread::id mAudioDriverThreadId = std::thread::id(); + // Set to mGraph's system clock driver's thread, if run by a + // SystemClockDriver, while OneIteration() is running. + nsIThread* mClockDriverThread = nullptr; +#endif +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/IdpSandbox.sys.mjs b/dom/media/IdpSandbox.sys.mjs new file mode 100644 index 0000000000..8b94abf7dd --- /dev/null +++ b/dom/media/IdpSandbox.sys.mjs @@ -0,0 +1,284 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { NetUtil } from "resource://gre/modules/NetUtil.sys.mjs"; + +/** This little class ensures that redirects maintain an https:// origin */ +function RedirectHttpsOnly() {} + +RedirectHttpsOnly.prototype = { + asyncOnChannelRedirect(oldChannel, newChannel, flags, callback) { + if (newChannel.URI.scheme !== "https") { + callback.onRedirectVerifyCallback(Cr.NS_ERROR_ABORT); + } else { + callback.onRedirectVerifyCallback(Cr.NS_OK); + } + }, + + getInterface(iid) { + return this.QueryInterface(iid); + }, + QueryInterface: ChromeUtils.generateQI(["nsIChannelEventSink"]), +}; + +/** This class loads a resource into a single string. ResourceLoader.load() is + * the entry point. */ +function ResourceLoader(res, rej) { + this.resolve = res; + this.reject = rej; + this.data = ""; +} + +/** Loads the identified https:// URL. */ +ResourceLoader.load = function (uri, doc) { + return new Promise((resolve, reject) => { + let listener = new ResourceLoader(resolve, reject); + let ioChannel = NetUtil.newChannel({ + uri, + loadingNode: doc, + securityFlags: Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL, + contentPolicyType: Ci.nsIContentPolicy.TYPE_INTERNAL_SCRIPT, + }); + + ioChannel.loadGroup = doc.documentLoadGroup.QueryInterface(Ci.nsILoadGroup); + ioChannel.notificationCallbacks = new RedirectHttpsOnly(); + ioChannel.asyncOpen(listener); + }); +}; + +ResourceLoader.prototype = { + onDataAvailable(request, input, offset, count) { + let stream = Cc["@mozilla.org/scriptableinputstream;1"].createInstance( + Ci.nsIScriptableInputStream + ); + stream.init(input); + this.data += stream.read(count); + }, + + onStartRequest(request) {}, + + onStopRequest(request, status) { + if (Components.isSuccessCode(status)) { + var statusCode = request.QueryInterface(Ci.nsIHttpChannel).responseStatus; + if (statusCode === 200) { + this.resolve({ request, data: this.data }); + } else { + this.reject(new Error("Non-200 response from server: " + statusCode)); + } + } else { + this.reject(new Error("Load failed: " + status)); + } + }, + + getInterface(iid) { + return this.QueryInterface(iid); + }, + QueryInterface: ChromeUtils.generateQI(["nsIStreamListener"]), +}; + +/** + * A simple implementation of the WorkerLocation interface. + */ +function createLocationFromURI(uri) { + return { + href: uri.spec, + protocol: uri.scheme + ":", + host: uri.host + (uri.port >= 0 ? ":" + uri.port : ""), + port: uri.port, + hostname: uri.host, + pathname: uri.pathQueryRef.replace(/[#\?].*/, ""), + search: uri.pathQueryRef.replace(/^[^\?]*/, "").replace(/#.*/, ""), + hash: uri.hasRef ? "#" + uri.ref : "", + origin: uri.prePath, + toString() { + return uri.spec; + }, + }; +} + +/** + * A javascript sandbox for running an IdP. + * + * @param domain (string) the domain of the IdP + * @param protocol (string?) the protocol of the IdP [default: 'default'] + * @param win (obj) the current window + * @throws if the domain or protocol aren't valid + */ +export function IdpSandbox(domain, protocol, win) { + this.source = IdpSandbox.createIdpUri(domain, protocol || "default"); + this.active = null; + this.sandbox = null; + this.window = win; +} + +IdpSandbox.checkDomain = function (domain) { + if (!domain || typeof domain !== "string") { + throw new Error( + "Invalid domain for identity provider: " + + "must be a non-zero length string" + ); + } +}; + +/** + * Checks that the IdP protocol is superficially sane. In particular, we don't + * want someone adding relative paths (e.g., '../../myuri'), which could be used + * to move outside of /.well-known/ and into space that they control. + */ +IdpSandbox.checkProtocol = function (protocol) { + let message = "Invalid protocol for identity provider: "; + if (!protocol || typeof protocol !== "string") { + throw new Error(message + "must be a non-zero length string"); + } + if (decodeURIComponent(protocol).match(/[\/\\]/)) { + throw new Error(message + "must not include '/' or '\\'"); + } +}; + +/** + * Turns a domain and protocol into a URI. This does some aggressive checking + * to make sure that we aren't being fooled somehow. Throws on fooling. + */ +IdpSandbox.createIdpUri = function (domain, protocol) { + IdpSandbox.checkDomain(domain); + IdpSandbox.checkProtocol(protocol); + + let message = "Invalid IdP parameters: "; + try { + let wkIdp = "https://" + domain + "/.well-known/idp-proxy/" + protocol; + let uri = Services.io.newURI(wkIdp); + + if (uri.hostPort !== domain) { + throw new Error(message + "domain is invalid"); + } + if (uri.pathQueryRef.indexOf("/.well-known/idp-proxy/") !== 0) { + throw new Error(message + "must produce a /.well-known/idp-proxy/ URI"); + } + + return uri; + } catch (e) { + if ( + typeof e.result !== "undefined" && + e.result === Cr.NS_ERROR_MALFORMED_URI + ) { + throw new Error(message + "must produce a valid URI"); + } + throw e; + } +}; + +IdpSandbox.prototype = { + isSame(domain, protocol) { + return this.source.spec === IdpSandbox.createIdpUri(domain, protocol).spec; + }, + + start() { + if (!this.active) { + this.active = ResourceLoader.load(this.source, this.window.document).then( + result => this._createSandbox(result) + ); + } + return this.active; + }, + + // Provides the sandbox with some useful facilities. Initially, this is only + // a minimal set; it is far easier to add more as the need arises, than to + // take them back if we discover a mistake. + _populateSandbox(uri) { + this.sandbox.location = Cu.cloneInto( + createLocationFromURI(uri), + this.sandbox, + { cloneFunctions: true } + ); + }, + + _createSandbox(result) { + let principal = Services.scriptSecurityManager.getChannelResultPrincipal( + result.request + ); + + this.sandbox = Cu.Sandbox(principal, { + sandboxName: "IdP-" + this.source.host, + wantComponents: false, + wantExportHelpers: false, + wantGlobalProperties: [ + "indexedDB", + "XMLHttpRequest", + "TextEncoder", + "TextDecoder", + "URL", + "URLSearchParams", + "atob", + "btoa", + "Blob", + "crypto", + "rtcIdentityProvider", + "fetch", + ], + }); + let registrar = this.sandbox.rtcIdentityProvider; + if (!Cu.isXrayWrapper(registrar)) { + throw new Error("IdP setup failed"); + } + + // have to use the ultimate URI, not the starting one to avoid + // that origin stealing from the one that redirected to it + this._populateSandbox(result.request.URI); + try { + Cu.evalInSandbox( + result.data, + this.sandbox, + "latest", + result.request.URI.spec, + 1 + ); + } catch (e) { + // These can be passed straight on, because they are explicitly labelled + // as being IdP errors by the IdP and we drop line numbers as a result. + if (e.name === "IdpError" || e.name === "IdpLoginError") { + throw e; + } + this._logError(e); + throw new Error("Error in IdP, check console for details"); + } + + if (!registrar.hasIdp) { + throw new Error("IdP failed to call rtcIdentityProvider.register()"); + } + return registrar; + }, + + // Capture all the details from the error and log them to the console. This + // can't rethrow anything else because that could leak information about the + // internal workings of the IdP across origins. + _logError(e) { + let winID = this.window.windowGlobalChild.innerWindowId; + let scriptError = Cc["@mozilla.org/scripterror;1"].createInstance( + Ci.nsIScriptError + ); + scriptError.initWithWindowID( + e.message, + e.fileName, + null, + e.lineNumber, + e.columnNumber, + Ci.nsIScriptError.errorFlag, + "content javascript", + winID + ); + Services.console.logMessage(scriptError); + }, + + stop() { + if (this.sandbox) { + Cu.nukeSandbox(this.sandbox); + } + this.sandbox = null; + this.active = null; + }, + + toString() { + return this.source.spec; + }, +}; diff --git a/dom/media/ImageToI420.cpp b/dom/media/ImageToI420.cpp new file mode 100644 index 0000000000..8fc5198b4a --- /dev/null +++ b/dom/media/ImageToI420.cpp @@ -0,0 +1,148 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ImageToI420.h" + +#include "ImageContainer.h" +#include "libyuv/convert.h" +#include "mozilla/dom/ImageBitmapBinding.h" +#include "mozilla/dom/ImageUtils.h" +#include "mozilla/gfx/Point.h" +#include "mozilla/RefPtr.h" +#include "nsThreadUtils.h" + +using mozilla::ImageFormat; +using mozilla::dom::ImageBitmapFormat; +using mozilla::dom::ImageUtils; +using mozilla::gfx::DataSourceSurface; +using mozilla::gfx::SourceSurface; +using mozilla::gfx::SurfaceFormat; +using mozilla::layers::Image; +using mozilla::layers::PlanarYCbCrData; +using mozilla::layers::PlanarYCbCrImage; + +static const PlanarYCbCrData* GetPlanarYCbCrData(Image* aImage) { + switch (aImage->GetFormat()) { + case ImageFormat::PLANAR_YCBCR: + return aImage->AsPlanarYCbCrImage()->GetData(); + case ImageFormat::NV_IMAGE: + return aImage->AsNVImage()->GetData(); + default: + return nullptr; + } +} + +static already_AddRefed GetSourceSurface(Image* aImage) { + if (!aImage->AsGLImage() || NS_IsMainThread()) { + return aImage->GetAsSourceSurface(); + } + + // GLImage::GetAsSourceSurface() only supports main thread + RefPtr surf; + NS_DispatchAndSpinEventLoopUntilComplete( + "ImageToI420::GLImage::GetSourceSurface"_ns, + mozilla::GetMainThreadSerialEventTarget(), + NS_NewRunnableFunction( + "ImageToI420::GLImage::GetSourceSurface", + [&aImage, &surf]() { surf = aImage->GetAsSourceSurface(); })); + + return surf.forget(); +} + +static nsresult MapRv(int aRv) { + // Docs for libyuv::ConvertToI420 say: + // Returns 0 for successful; -1 for invalid parameter. Non-zero for failure. + switch (aRv) { + case 0: + return NS_OK; + case -1: + return NS_ERROR_INVALID_ARG; + default: + return NS_ERROR_FAILURE; + } +} + +namespace mozilla { + +nsresult ConvertToI420(Image* aImage, uint8_t* aDestY, int aDestStrideY, + uint8_t* aDestU, int aDestStrideU, uint8_t* aDestV, + int aDestStrideV) { + if (!aImage->IsValid()) { + return NS_ERROR_INVALID_ARG; + } + + if (const PlanarYCbCrData* data = GetPlanarYCbCrData(aImage)) { + const ImageUtils imageUtils(aImage); + switch (imageUtils.GetFormat()) { + case ImageBitmapFormat::YUV420P: + return MapRv(libyuv::I420ToI420( + data->mYChannel, data->mYStride, data->mCbChannel, + data->mCbCrStride, data->mCrChannel, data->mCbCrStride, aDestY, + aDestStrideY, aDestU, aDestStrideU, aDestV, aDestStrideV, + aImage->GetSize().width, aImage->GetSize().height)); + case ImageBitmapFormat::YUV422P: + return MapRv(libyuv::I422ToI420( + data->mYChannel, data->mYStride, data->mCbChannel, + data->mCbCrStride, data->mCrChannel, data->mCbCrStride, aDestY, + aDestStrideY, aDestU, aDestStrideU, aDestV, aDestStrideV, + aImage->GetSize().width, aImage->GetSize().height)); + case ImageBitmapFormat::YUV444P: + return MapRv(libyuv::I444ToI420( + data->mYChannel, data->mYStride, data->mCbChannel, + data->mCbCrStride, data->mCrChannel, data->mCbCrStride, aDestY, + aDestStrideY, aDestU, aDestStrideU, aDestV, aDestStrideV, + aImage->GetSize().width, aImage->GetSize().height)); + case ImageBitmapFormat::YUV420SP_NV12: + return MapRv(libyuv::NV12ToI420( + data->mYChannel, data->mYStride, data->mCbChannel, + data->mCbCrStride, aDestY, aDestStrideY, aDestU, aDestStrideU, + aDestV, aDestStrideV, aImage->GetSize().width, + aImage->GetSize().height)); + case ImageBitmapFormat::YUV420SP_NV21: + return MapRv(libyuv::NV21ToI420( + data->mYChannel, data->mYStride, data->mCrChannel, + data->mCbCrStride, aDestY, aDestStrideY, aDestU, aDestStrideU, + aDestV, aDestStrideV, aImage->GetSize().width, + aImage->GetSize().height)); + default: + MOZ_ASSERT_UNREACHABLE("YUV format conversion not implemented"); + return NS_ERROR_NOT_IMPLEMENTED; + } + } + + RefPtr surf = GetSourceSurface(aImage); + if (!surf) { + return NS_ERROR_FAILURE; + } + + RefPtr data = surf->GetDataSurface(); + if (!data) { + return NS_ERROR_FAILURE; + } + + DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ); + if (!map.IsMapped()) { + return NS_ERROR_FAILURE; + } + + switch (surf->GetFormat()) { + case SurfaceFormat::B8G8R8A8: + case SurfaceFormat::B8G8R8X8: + return MapRv(libyuv::ARGBToI420( + static_cast(map.GetData()), map.GetStride(), aDestY, + aDestStrideY, aDestU, aDestStrideU, aDestV, aDestStrideV, + aImage->GetSize().width, aImage->GetSize().height)); + case SurfaceFormat::R5G6B5_UINT16: + return MapRv(libyuv::RGB565ToI420( + static_cast(map.GetData()), map.GetStride(), aDestY, + aDestStrideY, aDestU, aDestStrideU, aDestV, aDestStrideV, + aImage->GetSize().width, aImage->GetSize().height)); + default: + MOZ_ASSERT_UNREACHABLE("Surface format conversion not implemented"); + return NS_ERROR_NOT_IMPLEMENTED; + } +} + +} // namespace mozilla diff --git a/dom/media/ImageToI420.h b/dom/media/ImageToI420.h new file mode 100644 index 0000000000..24a66ebc9f --- /dev/null +++ b/dom/media/ImageToI420.h @@ -0,0 +1,26 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ImageToI420Converter_h +#define ImageToI420Converter_h + +#include "nsError.h" + +namespace mozilla { + +namespace layers { +class Image; +} // namespace layers + +/** + * Converts aImage to an I420 image and writes it to the given buffers. + */ +nsresult ConvertToI420(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY, + uint8_t* aDestU, int aDestStrideU, uint8_t* aDestV, + int aDestStrideV); + +} // namespace mozilla + +#endif /* ImageToI420Converter_h */ diff --git a/dom/media/Intervals.h b/dom/media/Intervals.h new file mode 100644 index 0000000000..e433bbdf39 --- /dev/null +++ b/dom/media/Intervals.h @@ -0,0 +1,762 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_INTERVALS_H_ +#define DOM_MEDIA_INTERVALS_H_ + +#include +#include + +#include "nsTArray.h" +#include "nsString.h" +#include "nsPrintfCString.h" + +// Specialization for nsTArray CopyChooser. +namespace mozilla::media { +template +class IntervalSet; +class TimeUnit; +} // namespace mozilla::media + +template +struct nsTArray_RelocationStrategy> { + typedef nsTArray_RelocateUsingMoveConstructor> + Type; +}; + +namespace mozilla::media { + +/* Interval defines an interval between two points. Unlike a traditional + interval [A,B] where A <= x <= B, the upper boundary B is exclusive: A <= x < + B (e.g [A,B[ or [A,B) depending on where you're living) It provides basic + interval arithmetic and fuzzy edges. The type T must provides a default + constructor and +, -, <, <= and == operators. + */ +template +class Interval { + public: + typedef Interval SelfType; + + Interval() : mStart(T()), mEnd(T()), mFuzz(T()) {} + + template + Interval(StartArg&& aStart, EndArg&& aEnd) + : mStart(aStart), mEnd(aEnd), mFuzz() { + MOZ_DIAGNOSTIC_ASSERT(mStart <= mEnd, "Invalid Interval"); + } + + template + Interval(StartArg&& aStart, EndArg&& aEnd, FuzzArg&& aFuzz) + : mStart(aStart), mEnd(aEnd), mFuzz(aFuzz) { + MOZ_DIAGNOSTIC_ASSERT(mStart <= mEnd, "Invalid Interval"); + } + + Interval(const SelfType& aOther) + : mStart(aOther.mStart), mEnd(aOther.mEnd), mFuzz(aOther.mFuzz) {} + + Interval(SelfType&& aOther) + : mStart(std::move(aOther.mStart)), + mEnd(std::move(aOther.mEnd)), + mFuzz(std::move(aOther.mFuzz)) {} + + SelfType& operator=(const SelfType& aOther) { + mStart = aOther.mStart; + mEnd = aOther.mEnd; + mFuzz = aOther.mFuzz; + return *this; + } + + SelfType& operator=(SelfType&& aOther) { + MOZ_ASSERT(&aOther != this, "self-moves are prohibited"); + this->~Interval(); + new (this) Interval(std::move(aOther)); + return *this; + } + + // Basic interval arithmetic operator definition. + SelfType operator+(const SelfType& aOther) const { + return SelfType(mStart + aOther.mStart, mEnd + aOther.mEnd, + mFuzz + aOther.mFuzz); + } + + SelfType operator+(const T& aVal) const { + return SelfType(mStart + aVal, mEnd + aVal, mFuzz); + } + + SelfType operator-(const SelfType& aOther) const { + return SelfType(mStart - aOther.mEnd, mEnd - aOther.mStart, + mFuzz + aOther.mFuzz); + } + + SelfType operator-(const T& aVal) const { + return SelfType(mStart - aVal, mEnd - aVal, mFuzz); + } + + SelfType& operator+=(const SelfType& aOther) { + mStart += aOther.mStart; + mEnd += aOther.mEnd; + mFuzz += aOther.mFuzz; + return *this; + } + + SelfType& operator+=(const T& aVal) { + mStart += aVal; + mEnd += aVal; + return *this; + } + + SelfType& operator-=(const SelfType& aOther) { + mStart -= aOther.mStart; + mEnd -= aOther.mEnd; + mFuzz += aOther.mFuzz; + return *this; + } + + SelfType& operator-=(const T& aVal) { + mStart -= aVal; + mEnd -= aVal; + return *this; + } + + bool operator==(const SelfType& aOther) const { + return mStart == aOther.mStart && mEnd == aOther.mEnd; + } + + bool operator!=(const SelfType& aOther) const { return !(*this == aOther); } + + bool Contains(const T& aX) const { + return mStart - mFuzz <= aX && aX < mEnd + mFuzz; + } + + bool ContainsStrict(const T& aX) const { return mStart <= aX && aX < mEnd; } + + bool ContainsWithStrictEnd(const T& aX) const { + return mStart - mFuzz <= aX && aX < mEnd; + } + + bool Contains(const SelfType& aOther) const { + return (mStart - mFuzz <= aOther.mStart + aOther.mFuzz) && + (aOther.mEnd - aOther.mFuzz <= mEnd + mFuzz); + } + + bool ContainsStrict(const SelfType& aOther) const { + return mStart <= aOther.mStart && aOther.mEnd <= mEnd; + } + + bool ContainsWithStrictEnd(const SelfType& aOther) const { + return (mStart - mFuzz <= aOther.mStart + aOther.mFuzz) && + aOther.mEnd <= mEnd; + } + + bool Intersects(const SelfType& aOther) const { + return (mStart - mFuzz < aOther.mEnd + aOther.mFuzz) && + (aOther.mStart - aOther.mFuzz < mEnd + mFuzz); + } + + bool IntersectsStrict(const SelfType& aOther) const { + return mStart < aOther.mEnd && aOther.mStart < mEnd; + } + + // Same as Intersects, but including the boundaries. + bool Touches(const SelfType& aOther) const { + return (mStart - mFuzz <= aOther.mEnd + aOther.mFuzz) && + (aOther.mStart - aOther.mFuzz <= mEnd + mFuzz); + } + + // Returns true if aOther is strictly to the right of this and contiguous. + // This operation isn't commutative. + bool Contiguous(const SelfType& aOther) const { + return mEnd <= aOther.mStart && + aOther.mStart - mEnd <= mFuzz + aOther.mFuzz; + } + + bool RightOf(const SelfType& aOther) const { + return aOther.mEnd - aOther.mFuzz <= mStart + mFuzz; + } + + bool LeftOf(const SelfType& aOther) const { + return mEnd - mFuzz <= aOther.mStart + aOther.mFuzz; + } + + SelfType Span(const SelfType& aOther) const { + if (IsEmpty()) { + return aOther; + } + SelfType result(*this); + if (aOther.mStart < mStart) { + result.mStart = aOther.mStart; + } + if (mEnd < aOther.mEnd) { + result.mEnd = aOther.mEnd; + } + if (mFuzz < aOther.mFuzz) { + result.mFuzz = aOther.mFuzz; + } + return result; + } + + SelfType Intersection(const SelfType& aOther) const { + const T& s = std::max(mStart, aOther.mStart); + const T& e = std::min(mEnd, aOther.mEnd); + const T& f = std::max(mFuzz, aOther.mFuzz); + if (s < e) { + return SelfType(s, e, f); + } + // Return an empty interval. + return SelfType(); + } + + T Length() const { return mEnd - mStart; } + + bool IsEmpty() const { return mStart == mEnd; } + + void SetFuzz(const T& aFuzz) { mFuzz = aFuzz; } + + // Returns true if the two intervals intersect with this being on the right + // of aOther + bool TouchesOnRight(const SelfType& aOther) const { + return aOther.mStart <= mStart && + (mStart - mFuzz <= aOther.mEnd + aOther.mFuzz) && + (aOther.mStart - aOther.mFuzz <= mEnd + mFuzz); + } + + // Returns true if the two intervals intersect with this being on the right + // of aOther, ignoring fuzz. + bool TouchesOnRightStrict(const SelfType& aOther) const { + return aOther.mStart <= mStart && mStart <= aOther.mEnd; + } + + nsCString ToString() const { + if constexpr (std::is_same_v) { + return nsPrintfCString("[%s, %s](%s)", mStart.ToString().get(), + mEnd.ToString().get(), mFuzz.ToString().get()); + } else if constexpr (std::is_same_v) { + return nsPrintfCString("[%lf, %lf](%lf)", mStart, mEnd, mFuzz); + } + } + + T mStart; + T mEnd; + T mFuzz; +}; + +// An IntervalSet in a collection of Intervals. The IntervalSet is always +// normalized. +template +class IntervalSet { + public: + typedef IntervalSet SelfType; + typedef Interval ElemType; + typedef AutoTArray ContainerType; + typedef typename ContainerType::index_type IndexType; + + IntervalSet() = default; + virtual ~IntervalSet() = default; + + IntervalSet(const SelfType& aOther) : mIntervals(aOther.mIntervals.Clone()) {} + + IntervalSet(SelfType&& aOther) { + mIntervals.AppendElements(std::move(aOther.mIntervals)); + } + + explicit IntervalSet(const ElemType& aOther) { + if (!aOther.IsEmpty()) { + mIntervals.AppendElement(aOther); + } + } + + explicit IntervalSet(ElemType&& aOther) { + if (!aOther.IsEmpty()) { + mIntervals.AppendElement(std::move(aOther)); + } + } + + bool operator==(const SelfType& aOther) const { + return mIntervals == aOther.mIntervals; + } + + bool operator!=(const SelfType& aOther) const { + return mIntervals != aOther.mIntervals; + } + + SelfType& operator=(const SelfType& aOther) { + mIntervals = aOther.mIntervals.Clone(); + return *this; + } + + SelfType& operator=(SelfType&& aOther) { + MOZ_ASSERT(&aOther != this, "self-moves are prohibited"); + this->~IntervalSet(); + new (this) IntervalSet(std::move(aOther)); + return *this; + } + + SelfType& operator=(const ElemType& aInterval) { + mIntervals.Clear(); + if (!aInterval.IsEmpty()) { + mIntervals.AppendElement(aInterval); + } + return *this; + } + + SelfType& operator=(ElemType&& aInterval) { + mIntervals.Clear(); + if (!aInterval.IsEmpty()) { + mIntervals.AppendElement(std::move(aInterval)); + } + return *this; + } + + SelfType& Add(const SelfType& aIntervals) { + if (aIntervals.mIntervals.Length() == 1) { + Add(aIntervals.mIntervals[0]); + } else { + mIntervals.AppendElements(aIntervals.mIntervals); + Normalize(); + } + return *this; + } + + SelfType& Add(const ElemType& aInterval) { + if (aInterval.IsEmpty()) { + return *this; + } + if (mIntervals.IsEmpty()) { + mIntervals.AppendElement(aInterval); + return *this; + } + ElemType& last = mIntervals.LastElement(); + if (aInterval.TouchesOnRight(last)) { + last = last.Span(aInterval); + return *this; + } + // Most of our actual usage is adding an interval that will be outside the + // range. We can speed up normalization here. + if (aInterval.RightOf(last)) { + mIntervals.AppendElement(aInterval); + return *this; + } + + ContainerType normalized; + ElemType current(aInterval); + IndexType i = 0; + for (; i < mIntervals.Length(); i++) { + ElemType& interval = mIntervals[i]; + if (current.Touches(interval)) { + current = current.Span(interval); + } else if (current.LeftOf(interval)) { + break; + } else { + normalized.AppendElement(std::move(interval)); + } + } + normalized.AppendElement(std::move(current)); + for (; i < mIntervals.Length(); i++) { + normalized.AppendElement(std::move(mIntervals[i])); + } + mIntervals.Clear(); + mIntervals.AppendElements(std::move(normalized)); + + return *this; + } + + SelfType& operator+=(const SelfType& aIntervals) { + Add(aIntervals); + return *this; + } + + SelfType& operator+=(const ElemType& aInterval) { + Add(aInterval); + return *this; + } + + SelfType operator+(const SelfType& aIntervals) const { + SelfType intervals(*this); + intervals.Add(aIntervals); + return intervals; + } + + SelfType operator+(const ElemType& aInterval) const { + SelfType intervals(*this); + intervals.Add(aInterval); + return intervals; + } + + friend SelfType operator+(const ElemType& aInterval, + const SelfType& aIntervals) { + SelfType intervals; + intervals.Add(aInterval); + intervals.Add(aIntervals); + return intervals; + } + + // Excludes an interval from an IntervalSet. + SelfType& operator-=(const ElemType& aInterval) { + if (aInterval.IsEmpty() || mIntervals.IsEmpty()) { + return *this; + } + if (mIntervals.Length() == 1 && + mIntervals[0].TouchesOnRightStrict(aInterval)) { + // Fast path when we're removing from the front of a set with a + // single interval. This is common for the buffered time ranges + // we see on Twitch. + if (aInterval.mEnd >= mIntervals[0].mEnd) { + mIntervals.RemoveElementAt(0); + } else { + mIntervals[0].mStart = aInterval.mEnd; + mIntervals[0].mFuzz = std::max(mIntervals[0].mFuzz, aInterval.mFuzz); + } + return *this; + } + + // General case performed by inverting aInterval within the bounds of + // mIntervals and then doing the intersection. + T firstEnd = std::max(mIntervals[0].mStart, aInterval.mStart); + T secondStart = std::min(mIntervals.LastElement().mEnd, aInterval.mEnd); + ElemType startInterval(mIntervals[0].mStart, firstEnd); + ElemType endInterval(secondStart, mIntervals.LastElement().mEnd); + SelfType intervals(std::move(startInterval)); + intervals += std::move(endInterval); + return Intersection(intervals); + } + + SelfType& operator-=(const SelfType& aIntervals) { + for (const auto& interval : aIntervals.mIntervals) { + *this -= interval; + } + return *this; + } + + SelfType operator-(const SelfType& aInterval) const { + SelfType intervals(*this); + intervals -= aInterval; + return intervals; + } + + SelfType operator-(const ElemType& aInterval) const { + SelfType intervals(*this); + intervals -= aInterval; + return intervals; + } + + // Mutate this IntervalSet to be the union of this and aOther. + SelfType& Union(const SelfType& aOther) { + Add(aOther); + return *this; + } + + SelfType& Union(const ElemType& aInterval) { + Add(aInterval); + return *this; + } + + // Mutate this TimeRange to be the intersection of this and aOther. + SelfType& Intersection(const SelfType& aOther) { + ContainerType intersection; + + // Ensure the intersection has enough capacity to store the upper bound on + // the intersection size. This ensures that we don't spend time reallocating + // the storage as we append, at the expense of extra memory. + intersection.SetCapacity(std::max(aOther.Length(), mIntervals.Length())); + + const ContainerType& other = aOther.mIntervals; + IndexType i = 0, j = 0; + for (; i < mIntervals.Length() && j < other.Length();) { + if (mIntervals[i].IntersectsStrict(other[j])) { + intersection.AppendElement(mIntervals[i].Intersection(other[j])); + } + if (mIntervals[i].mEnd < other[j].mEnd) { + i++; + } else { + j++; + } + } + mIntervals = std::move(intersection); + return *this; + } + + SelfType& Intersection(const ElemType& aInterval) { + SelfType intervals(aInterval); + return Intersection(intervals); + } + + const ElemType& operator[](IndexType aIndex) const { + return mIntervals[aIndex]; + } + + // Returns the start boundary of the first interval. Or a default constructed + // T if IntervalSet is empty (and aExists if provided will be set to false). + T GetStart(bool* aExists = nullptr) const { + bool exists = !mIntervals.IsEmpty(); + + if (aExists) { + *aExists = exists; + } + + if (exists) { + return mIntervals[0].mStart; + } else { + return T(); + } + } + + // Returns the end boundary of the last interval. Or a default constructed T + // if IntervalSet is empty (and aExists if provided will be set to false). + T GetEnd(bool* aExists = nullptr) const { + bool exists = !mIntervals.IsEmpty(); + if (aExists) { + *aExists = exists; + } + + if (exists) { + return mIntervals.LastElement().mEnd; + } else { + return T(); + } + } + + IndexType Length() const { return mIntervals.Length(); } + + bool IsEmpty() const { return mIntervals.IsEmpty(); } + + T Start(IndexType aIndex) const { return mIntervals[aIndex].mStart; } + + T Start(IndexType aIndex, bool& aExists) const { + aExists = aIndex < mIntervals.Length(); + + if (aExists) { + return mIntervals[aIndex].mStart; + } else { + return T(); + } + } + + T End(IndexType aIndex) const { return mIntervals[aIndex].mEnd; } + + T End(IndexType aIndex, bool& aExists) const { + aExists = aIndex < mIntervals.Length(); + + if (aExists) { + return mIntervals[aIndex].mEnd; + } else { + return T(); + } + } + + bool Contains(const ElemType& aInterval) const { + for (const auto& interval : mIntervals) { + if (interval.Contains(aInterval)) { + return true; + } + } + return false; + } + + bool ContainsStrict(const ElemType& aInterval) const { + for (const auto& interval : mIntervals) { + if (interval.ContainsStrict(aInterval)) { + return true; + } + } + return false; + } + + bool Contains(const T& aX) const { + for (const auto& interval : mIntervals) { + if (interval.Contains(aX)) { + return true; + } + } + return false; + } + + bool ContainsStrict(const T& aX) const { + for (const auto& interval : mIntervals) { + if (interval.ContainsStrict(aX)) { + return true; + } + } + return false; + } + + bool ContainsWithStrictEnd(const T& aX) const { + for (const auto& interval : mIntervals) { + if (interval.ContainsWithStrictEnd(aX)) { + return true; + } + } + return false; + } + + bool ContainsWithStrictEnd(const ElemType& aInterval) const { + for (const auto& interval : mIntervals) { + if (interval.ContainsWithStrictEnd(aInterval)) { + return true; + } + } + return false; + } + + bool Intersects(const ElemType& aInterval) const { + for (const auto& interval : mIntervals) { + if (interval.Intersects(aInterval)) { + return true; + } + } + return false; + } + + bool IntersectsStrict(const ElemType& aInterval) const { + for (const auto& interval : mIntervals) { + if (interval.IntersectsStrict(aInterval)) { + return true; + } + } + return false; + } + + // Returns if there's any intersection between this and aOther. + bool IntersectsStrict(const SelfType& aOther) const { + const ContainerType& other = aOther.mIntervals; + IndexType i = 0, j = 0; + for (; i < mIntervals.Length() && j < other.Length();) { + if (mIntervals[i].IntersectsStrict(other[j])) { + return true; + } + if (mIntervals[i].mEnd < other[j].mEnd) { + i++; + } else { + j++; + } + } + return false; + } + + bool IntersectsWithStrictEnd(const ElemType& aInterval) const { + for (const auto& interval : mIntervals) { + if (interval.IntersectsWithStrictEnd(aInterval)) { + return true; + } + } + return false; + } + + // Shift all values by aOffset. + SelfType& Shift(const T& aOffset) { + for (auto& interval : mIntervals) { + interval.mStart = interval.mStart + aOffset; + interval.mEnd = interval.mEnd + aOffset; + } + return *this; + } + + void SetFuzz(const T& aFuzz) { + for (auto& interval : mIntervals) { + interval.SetFuzz(aFuzz); + } + MergeOverlappingIntervals(); + } + + static const IndexType NoIndex = IndexType(-1); + + IndexType Find(const T& aValue) const { + for (IndexType i = 0; i < mIntervals.Length(); i++) { + if (mIntervals[i].Contains(aValue)) { + return i; + } + } + return NoIndex; + } + + // Methods for range-based for loops. + typename ContainerType::iterator begin() { return mIntervals.begin(); } + + typename ContainerType::const_iterator begin() const { + return mIntervals.begin(); + } + + typename ContainerType::iterator end() { return mIntervals.end(); } + + typename ContainerType::const_iterator end() const { + return mIntervals.end(); + } + + ElemType& LastInterval() { + MOZ_ASSERT(!mIntervals.IsEmpty()); + return mIntervals.LastElement(); + } + + const ElemType& LastInterval() const { + MOZ_ASSERT(!mIntervals.IsEmpty()); + return mIntervals.LastElement(); + } + + void Clear() { mIntervals.Clear(); } + + protected: + ContainerType mIntervals; + + private: + void Normalize() { + if (mIntervals.Length() < 2) { + return; + } + mIntervals.Sort(CompareIntervals()); + MergeOverlappingIntervals(); + } + + void MergeOverlappingIntervals() { + if (mIntervals.Length() < 2) { + return; + } + + // This merges the intervals in place. + IndexType read = 0; + IndexType write = 0; + while (read < mIntervals.Length()) { + ElemType current(mIntervals[read]); + read++; + while (read < mIntervals.Length() && current.Touches(mIntervals[read])) { + current = current.Span(mIntervals[read]); + read++; + } + mIntervals[write] = current; + write++; + } + mIntervals.SetLength(write); + } + + struct CompareIntervals { + bool Equals(const ElemType& aT1, const ElemType& aT2) const { + return aT1.mStart == aT2.mStart && aT1.mEnd == aT2.mEnd; + } + + bool LessThan(const ElemType& aT1, const ElemType& aT2) const { + return aT1.mStart < aT2.mStart; + } + }; +}; + +// clang doesn't allow for this to be defined inline of IntervalSet. +template +IntervalSet Union(const IntervalSet& aIntervals1, + const IntervalSet& aIntervals2) { + IntervalSet intervals(aIntervals1); + intervals.Union(aIntervals2); + return intervals; +} + +template +IntervalSet Intersection(const IntervalSet& aIntervals1, + const IntervalSet& aIntervals2) { + IntervalSet intersection(aIntervals1); + intersection.Intersection(aIntervals2); + return intersection; +} + +} // namespace mozilla::media + +#endif // DOM_MEDIA_INTERVALS_H_ diff --git a/dom/media/MPSCQueue.h b/dom/media/MPSCQueue.h new file mode 100644 index 0000000000..ea7848154f --- /dev/null +++ b/dom/media/MPSCQueue.h @@ -0,0 +1,132 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_MPSCQueue_h +#define mozilla_dom_MPSCQueue_h + +namespace mozilla { + +// This class implements a lock-free multiple producer single consumer queue of +// fixed size log messages, with the following characteristics: +// - Unbounded (uses a intrinsic linked list) +// - Allocates on Push. Push can be called on any thread. +// - Deallocates on Pop. Pop MUST always be called on the same thread for the +// life-time of the queue. +// +// In our scenario, the producer threads are real-time, they can't block. The +// consummer thread runs every now and then and empties the queue to a log +// file, on disk. +const size_t MPSC_MSG_RESERVED = sizeof(std::atomic); + +template +class MPSCQueue { + public: + struct Message { + Message() { mNext.store(nullptr, std::memory_order_relaxed); } + Message(const Message& aMessage) = delete; + void operator=(const Message& aMessage) = delete; + + std::atomic mNext; + T data; + }; + + // Creates a new MPSCQueue. Initially, the queue has a single sentinel node, + // pointed to by both mHead and mTail. + MPSCQueue() + // At construction, the initial message points to nullptr (it has no + // successor). It is a sentinel node, that does not contain meaningful + // data. + : mHead(new Message()), mTail(mHead.load(std::memory_order_relaxed)) {} + + ~MPSCQueue() { + Message dummy; + while (Pop(&dummy.data)) { + } + Message* front = mHead.load(std::memory_order_relaxed); + delete front; + } + + void Push(MPSCQueue::Message* aMessage) { + // The next two non-commented line are called A and B in this paragraph. + // Producer threads i, i-1, etc. are numbered in the order they reached + // A in time, thread i being the thread that has reached A first. + // Atomically, on line A the new `mHead` is set to be the node that was + // just allocated, with strong memory order. From now on, any thread + // that reaches A will see that the node just allocated is + // effectively the head of the list, and will make itself the new head + // of the list. + // In a bad case (when thread i executes A and then + // is not scheduled for a long time), it is possible that thread i-1 and + // subsequent threads create a seemingly disconnected set of nodes, but + // they all have the correct value for the next node to set as their + // mNext member on their respective stacks (in `prev`), and this is + // always correct. When the scheduler resumes, and line B is executed, + // the correct linkage is resumed. + // Before line B, since mNext for the node was the last element of + // the queue still has an mNext of nullptr, Pop will not see the node + // added. + // For line A, it's critical to have strong ordering both ways (since + // it's going to possibly be read and write repeatidly by multiple + // threads) + // Line B can have weaker guarantees, it's only going to be written by a + // single thread, and we just need to ensure it's read properly by a + // single other one. + Message* prev = mHead.exchange(aMessage, std::memory_order_acq_rel); + prev->mNext.store(aMessage, std::memory_order_release); + } + + // Copy the content of the first message of the queue to aOutput, and + // frees the message. Returns true if there was a message, in which case + // `aOutput` contains a valid value. If the queue was empty, returns false, + // in which case `aOutput` is left untouched. + bool Pop(T* aOutput) { + // Similarly, in this paragraph, the two following lines are called A + // and B, and threads are called thread i, i-1, etc. in order of + // execution of line A. + // On line A, the first element of the queue is acquired. It is simply a + // sentinel node. + // On line B, we acquire the node that has the data we want. If B is + // null, then only the sentinel node was present in the queue, we can + // safely return false. + // mTail can be loaded with relaxed ordering, since it's not written nor + // read by any other thread (this queue is single consumer). + // mNext can be written to by one of the producer, so it's necessary to + // ensure those writes are seen, hence the stricter ordering. + Message* tail = mTail.load(std::memory_order_relaxed); + Message* next = tail->mNext.load(std::memory_order_acquire); + + if (next == nullptr) { + return false; + } + + *aOutput = next->data; + + // Simply shift the queue one node further, so that the sentinel node is + // now pointing to the correct most ancient node. It contains stale data, + // but this data will never be read again. + // It's only necessary to ensure the previous load on this thread is not + // reordered past this line, so release ordering is sufficient here. + mTail.store(next, std::memory_order_release); + + // This thread is now the only thing that points to `tail`, it can be + // safely deleted. + delete tail; + + return true; + } + + private: + // An atomic pointer to the most recent message in the queue. + std::atomic mHead; + // An atomic pointer to a sentinel node, that points to the oldest message + // in the queue. + std::atomic mTail; + + MPSCQueue(const MPSCQueue&) = delete; + void operator=(const MPSCQueue&) = delete; +}; + +} // namespace mozilla + +#endif // mozilla_dom_MPSCQueue_h diff --git a/dom/media/MediaBlockCacheBase.h b/dom/media/MediaBlockCacheBase.h new file mode 100644 index 0000000000..d3cadf3dea --- /dev/null +++ b/dom/media/MediaBlockCacheBase.h @@ -0,0 +1,81 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MEDIA_BLOCK_CACHE_BASE_H_ +#define MEDIA_BLOCK_CACHE_BASE_H_ + +#include "MediaCache.h" +#include "mozilla/Span.h" + +namespace mozilla { + +// Manages block management for the media cache. Data comes in over the network +// via callbacks on the main thread, however we don't want to write the +// incoming data to the media cache on the main thread, as this could block +// causing UI jank. +// +// So MediaBlockCacheBase provides an abstraction for a temporary memory buffer +// or file accessible as an array of blocks, which supports a block move +// operation, and allows synchronous reading and writing from any thread, with +// writes being buffered as needed so as not to block. +// +// Writes and cache block moves (which require reading) may be deferred to +// their own non-main thread. This object also ensures that data which has +// been scheduled to be written, but hasn't actually *been* written, is read +// as if it had, i.e. pending writes are cached in readable memory until +// they're flushed to file. +// +// To improve efficiency, writes can only be done at block granularity, +// whereas reads can be done with byte granularity. +// +// Note it's also recommended not to read from the media cache from the main +// thread to prevent jank. +class MediaBlockCacheBase { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaBlockCacheBase) + + static_assert( + MediaCacheStream::BLOCK_SIZE < + static_cast< + std::remove_const::type>( + INT32_MAX), + "MediaCacheStream::BLOCK_SIZE should fit in 31 bits"); + static const int32_t BLOCK_SIZE = MediaCacheStream::BLOCK_SIZE; + + protected: + virtual ~MediaBlockCacheBase() = default; + + public: + // Initialize this cache. + virtual nsresult Init() = 0; + + // Erase data and discard pending changes to reset the cache to its pristine + // state as it was after Init(). + virtual void Flush() = 0; + + // Maximum number of blocks expected in this block cache. (But allow overflow + // to accomodate incoming traffic before MediaCache can handle it.) + virtual size_t GetMaxBlocks(size_t aCacheSizeInKiB) const = 0; + + // Can be called on any thread. This defers to a non-main thread. + virtual nsresult WriteBlock(uint32_t aBlockIndex, Span aData1, + Span aData2) = 0; + + // Synchronously reads data from file. May read from file or memory + // depending on whether written blocks have been flushed to file yet. + // Not recommended to be called from the main thread, as can cause jank. + virtual nsresult Read(int64_t aOffset, uint8_t* aData, int32_t aLength, + int32_t* aBytes) = 0; + + // Moves a block asynchronously. Can be called on any thread. + // This defers file I/O to a non-main thread. + virtual nsresult MoveBlock(int32_t aSourceBlockIndex, + int32_t aDestBlockIndex) = 0; +}; + +} // End namespace mozilla. + +#endif /* MEDIA_BLOCK_CACHE_BASE_H_ */ diff --git a/dom/media/MediaCache.cpp b/dom/media/MediaCache.cpp new file mode 100644 index 0000000000..41d51a49cc --- /dev/null +++ b/dom/media/MediaCache.cpp @@ -0,0 +1,2816 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaCache.h" + +#include "ChannelMediaResource.h" +#include "FileBlockCache.h" +#include "MediaBlockCacheBase.h" +#include "MediaResource.h" +#include "MemoryBlockCache.h" +#include "mozilla/Attributes.h" +#include "mozilla/ClearOnShutdown.h" +#include "mozilla/ErrorNames.h" +#include "mozilla/Logging.h" +#include "mozilla/Monitor.h" +#include "mozilla/Preferences.h" +#include "mozilla/Services.h" +#include "mozilla/StaticPtr.h" +#include "mozilla/StaticPrefs_browser.h" +#include "mozilla/StaticPrefs_media.h" +#include "mozilla/Telemetry.h" +#include "nsContentUtils.h" +#include "nsINetworkLinkService.h" +#include "nsIObserverService.h" +#include "nsPrintfCString.h" +#include "nsProxyRelease.h" +#include "nsTHashSet.h" +#include "nsThreadUtils.h" +#include "prio.h" +#include "VideoUtils.h" +#include + +namespace mozilla { + +#undef LOG +#undef LOGI +#undef LOGE +LazyLogModule gMediaCacheLog("MediaCache"); +#define LOG(...) MOZ_LOG(gMediaCacheLog, LogLevel::Debug, (__VA_ARGS__)) +#define LOGI(...) MOZ_LOG(gMediaCacheLog, LogLevel::Info, (__VA_ARGS__)) +#define LOGE(...) \ + NS_DebugBreak(NS_DEBUG_WARNING, nsPrintfCString(__VA_ARGS__).get(), nullptr, \ + __FILE__, __LINE__) + +// For HTTP seeking, if number of bytes needing to be +// seeked forward is less than this value then a read is +// done rather than a byte range request. +// +// If we assume a 100Mbit connection, and assume reissuing an HTTP seek causes +// a delay of 200ms, then in that 200ms we could have simply read ahead 2MB. So +// setting SEEK_VS_READ_THRESHOLD to 1MB sounds reasonable. +static const int64_t SEEK_VS_READ_THRESHOLD = 1 * 1024 * 1024; + +// Readahead blocks for non-seekable streams will be limited to this +// fraction of the cache space. We don't normally evict such blocks +// because replacing them requires a seek, but we need to make sure +// they don't monopolize the cache. +static const double NONSEEKABLE_READAHEAD_MAX = 0.5; + +// Data N seconds before the current playback position is given the same +// priority as data REPLAY_PENALTY_FACTOR*N seconds ahead of the current +// playback position. REPLAY_PENALTY_FACTOR is greater than 1 to reflect that +// data in the past is less likely to be played again than data in the future. +// We want to give data just behind the current playback position reasonably +// high priority in case codecs need to retrieve that data (e.g. because +// tracks haven't been muxed well or are being decoded at uneven rates). +// 1/REPLAY_PENALTY_FACTOR as much data will be kept behind the +// current playback position as will be kept ahead of the current playback +// position. +static const uint32_t REPLAY_PENALTY_FACTOR = 3; + +// When looking for a reusable block, scan forward this many blocks +// from the desired "best" block location to look for free blocks, +// before we resort to scanning the whole cache. The idea is to try to +// store runs of stream blocks close-to-consecutively in the cache if we +// can. +static const uint32_t FREE_BLOCK_SCAN_LIMIT = 16; + +#ifdef DEBUG +// Turn this on to do very expensive cache state validation +// #define DEBUG_VERIFY_CACHE +#endif + +class MediaCacheFlusher final : public nsIObserver, + public nsSupportsWeakReference { + public: + NS_DECL_ISUPPORTS + NS_DECL_NSIOBSERVER + + static void RegisterMediaCache(MediaCache* aMediaCache); + static void UnregisterMediaCache(MediaCache* aMediaCache); + + private: + MediaCacheFlusher() = default; + ~MediaCacheFlusher() = default; + + // Singleton instance created when a first MediaCache is registered, and + // released when the last MediaCache is unregistered. + // The observer service will keep a weak reference to it, for notifications. + static StaticRefPtr gMediaCacheFlusher; + + nsTArray mMediaCaches; +}; + +/* static */ +StaticRefPtr MediaCacheFlusher::gMediaCacheFlusher; + +NS_IMPL_ISUPPORTS(MediaCacheFlusher, nsIObserver, nsISupportsWeakReference) + +/* static */ +void MediaCacheFlusher::RegisterMediaCache(MediaCache* aMediaCache) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + if (!gMediaCacheFlusher) { + gMediaCacheFlusher = new MediaCacheFlusher(); + nsCOMPtr observerService = + mozilla::services::GetObserverService(); + if (observerService) { + observerService->AddObserver(gMediaCacheFlusher, "last-pb-context-exited", + true); + observerService->AddObserver(gMediaCacheFlusher, + "cacheservice:empty-cache", true); + observerService->AddObserver( + gMediaCacheFlusher, "contentchild:network-link-type-changed", true); + observerService->AddObserver(gMediaCacheFlusher, + NS_NETWORK_LINK_TYPE_TOPIC, true); + } + } + + gMediaCacheFlusher->mMediaCaches.AppendElement(aMediaCache); +} + +/* static */ +void MediaCacheFlusher::UnregisterMediaCache(MediaCache* aMediaCache) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + gMediaCacheFlusher->mMediaCaches.RemoveElement(aMediaCache); + + if (gMediaCacheFlusher->mMediaCaches.Length() == 0) { + gMediaCacheFlusher = nullptr; + } +} + +class MediaCache { + using AutoLock = MonitorAutoLock; + + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaCache) + + friend class MediaCacheStream::BlockList; + typedef MediaCacheStream::BlockList BlockList; + static const int64_t BLOCK_SIZE = MediaCacheStream::BLOCK_SIZE; + + // Get an instance of a MediaCache (or nullptr if initialization failed). + // aContentLength is the content length if known already, otherwise -1. + // If the length is known and considered small enough, a discrete MediaCache + // with memory backing will be given. Otherwise the one MediaCache with + // file backing will be provided. + // If aIsPrivateBrowsing is true, only initialization of a memory backed + // MediaCache will be attempted, returning nullptr if that fails. + static RefPtr GetMediaCache(int64_t aContentLength, + bool aIsPrivateBrowsing); + + nsISerialEventTarget* OwnerThread() const { return sThread; } + + // Brutally flush the cache contents. Main thread only. + void Flush(); + + // Close all streams associated with private browsing windows. This will + // also remove the blocks from the cache since we don't want to leave any + // traces when PB is done. + void CloseStreamsForPrivateBrowsing(); + + // Cache-file access methods. These are the lowest-level cache methods. + // mMonitor must be held; these can be called on any thread. + // This can return partial reads. + // Note mMonitor will be dropped while doing IO. The caller need + // to handle changes happening when the monitor is not held. + nsresult ReadCacheFile(AutoLock&, int64_t aOffset, void* aData, + int32_t aLength, int32_t* aBytes); + + // The generated IDs are always positive. + int64_t AllocateResourceID(AutoLock&) { return ++mNextResourceID; } + + // mMonitor must be held, called on main thread. + // These methods are used by the stream to set up and tear down streams, + // and to handle reads and writes. + // Add aStream to the list of streams. + void OpenStream(AutoLock&, MediaCacheStream* aStream, bool aIsClone = false); + // Remove aStream from the list of streams. + void ReleaseStream(AutoLock&, MediaCacheStream* aStream); + // Free all blocks belonging to aStream. + void ReleaseStreamBlocks(AutoLock&, MediaCacheStream* aStream); + // Find a cache entry for this data, and write the data into it + void AllocateAndWriteBlock( + AutoLock&, MediaCacheStream* aStream, int32_t aStreamBlockIndex, + Span aData1, + Span aData2 = Span()); + + // mMonitor must be held; can be called on any thread + // Notify the cache that a seek has been requested. Some blocks may + // need to change their class between PLAYED_BLOCK and READAHEAD_BLOCK. + // This does not trigger channel seeks directly, the next Update() + // will do that if necessary. The caller will call QueueUpdate(). + void NoteSeek(AutoLock&, MediaCacheStream* aStream, int64_t aOldOffset); + // Notify the cache that a block has been read from. This is used + // to update last-use times. The block may not actually have a + // cache entry yet since Read can read data from a stream's + // in-memory mPartialBlockBuffer while the block is only partly full, + // and thus hasn't yet been committed to the cache. The caller will + // call QueueUpdate(). + void NoteBlockUsage(AutoLock&, MediaCacheStream* aStream, int32_t aBlockIndex, + int64_t aStreamOffset, MediaCacheStream::ReadMode aMode, + TimeStamp aNow); + // Mark aStream as having the block, adding it as an owner. + void AddBlockOwnerAsReadahead(AutoLock&, int32_t aBlockIndex, + MediaCacheStream* aStream, + int32_t aStreamBlockIndex); + + // This queues a call to Update() on the media cache thread. + void QueueUpdate(AutoLock&); + + // Notify all streams for the resource ID that the suspended status changed + // at the end of MediaCache::Update. + void QueueSuspendedStatusUpdate(AutoLock&, int64_t aResourceID); + + // Updates the cache state asynchronously on the media cache thread: + // -- try to trim the cache back to its desired size, if necessary + // -- suspend channels that are going to read data that's lower priority + // than anything currently cached + // -- resume channels that are going to read data that's higher priority + // than something currently cached + // -- seek channels that need to seek to a new location + void Update(); + +#ifdef DEBUG_VERIFY_CACHE + // Verify invariants, especially block list invariants + void Verify(AutoLock&); +#else + void Verify(AutoLock&) {} +#endif + + mozilla::Monitor& Monitor() { + // This method should only be called outside the main thread. + // The MOZ_DIAGNOSTIC_ASSERT(!NS_IsMainThread()) assertion should be + // re-added as part of bug 1464045 + return mMonitor; + } + + // Polls whether we're on a cellular network connection, and posts a task + // to the MediaCache thread to set the value of MediaCache::sOnCellular. + // Call on main thread only. + static void UpdateOnCellular(); + + /** + * An iterator that makes it easy to iterate through all streams that + * have a given resource ID and are not closed. + * Must be used while holding the media cache lock. + */ + class ResourceStreamIterator { + public: + ResourceStreamIterator(MediaCache* aMediaCache, int64_t aResourceID) + : mMediaCache(aMediaCache), mResourceID(aResourceID), mNext(0) { + aMediaCache->mMonitor.AssertCurrentThreadOwns(); + } + MediaCacheStream* Next(AutoLock& aLock) { + while (mNext < mMediaCache->mStreams.Length()) { + MediaCacheStream* stream = mMediaCache->mStreams[mNext]; + ++mNext; + if (stream->GetResourceID() == mResourceID && !stream->IsClosed(aLock)) + return stream; + } + return nullptr; + } + + private: + MediaCache* mMediaCache; + int64_t mResourceID; + uint32_t mNext; + }; + + protected: + explicit MediaCache(MediaBlockCacheBase* aCache) + : mMonitor("MediaCache.mMonitor"), + mBlockCache(aCache), + mUpdateQueued(false) +#ifdef DEBUG + , + mInUpdate(false) +#endif + { + NS_ASSERTION(NS_IsMainThread(), "Only construct MediaCache on main thread"); + MOZ_COUNT_CTOR(MediaCache); + MediaCacheFlusher::RegisterMediaCache(this); + UpdateOnCellular(); + } + + ~MediaCache() { + NS_ASSERTION(NS_IsMainThread(), "Only destroy MediaCache on main thread"); + if (this == gMediaCache) { + LOG("~MediaCache(Global file-backed MediaCache)"); + // This is the file-backed MediaCache, reset the global pointer. + gMediaCache = nullptr; + } else { + LOG("~MediaCache(Memory-backed MediaCache %p)", this); + } + MediaCacheFlusher::UnregisterMediaCache(this); + NS_ASSERTION(mStreams.IsEmpty(), "Stream(s) still open!"); + Truncate(); + NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?"); + + MOZ_COUNT_DTOR(MediaCache); + } + + static size_t CacheSize() { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + return sOnCellular ? StaticPrefs::media_cache_size_cellular() + : StaticPrefs::media_cache_size(); + } + + static size_t ReadaheadLimit() { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + return sOnCellular ? StaticPrefs::media_cache_readahead_limit_cellular() + : StaticPrefs::media_cache_readahead_limit(); + } + + static size_t ResumeThreshold() { + return sOnCellular ? StaticPrefs::media_cache_resume_threshold_cellular() + : StaticPrefs::media_cache_resume_threshold(); + } + + // Find a free or reusable block and return its index. If there are no + // free blocks and no reusable blocks, add a new block to the cache + // and return it. Can return -1 on OOM. + int32_t FindBlockForIncomingData(AutoLock&, TimeStamp aNow, + MediaCacheStream* aStream, + int32_t aStreamBlockIndex); + // Find a reusable block --- a free block, if there is one, otherwise + // the reusable block with the latest predicted-next-use, or -1 if + // there aren't any freeable blocks. Only block indices less than + // aMaxSearchBlockIndex are considered. If aForStream is non-null, + // then aForStream and aForStreamBlock indicate what media data will + // be placed; FindReusableBlock will favour returning free blocks + // near other blocks for that point in the stream. + int32_t FindReusableBlock(AutoLock&, TimeStamp aNow, + MediaCacheStream* aForStream, + int32_t aForStreamBlock, + int32_t aMaxSearchBlockIndex); + bool BlockIsReusable(AutoLock&, int32_t aBlockIndex); + // Given a list of blocks sorted with the most reusable blocks at the + // end, find the last block whose stream is not pinned (if any) + // and whose cache entry index is less than aBlockIndexLimit + // and append it to aResult. + void AppendMostReusableBlock(AutoLock&, BlockList* aBlockList, + nsTArray* aResult, + int32_t aBlockIndexLimit); + + enum BlockClass { + // block belongs to mMetadataBlockList because data has been consumed + // from it in "metadata mode" --- in particular blocks read during + // Ogg seeks go into this class. These blocks may have played data + // in them too. + METADATA_BLOCK, + // block belongs to mPlayedBlockList because its offset is + // less than the stream's current reader position + PLAYED_BLOCK, + // block belongs to the stream's mReadaheadBlockList because its + // offset is greater than or equal to the stream's current + // reader position + READAHEAD_BLOCK + }; + + struct BlockOwner { + constexpr BlockOwner() = default; + + // The stream that owns this block, or null if the block is free. + MediaCacheStream* mStream = nullptr; + // The block index in the stream. Valid only if mStream is non-null. + // Initialized to an insane value to highlight misuse. + uint32_t mStreamBlock = UINT32_MAX; + // Time at which this block was last used. Valid only if + // mClass is METADATA_BLOCK or PLAYED_BLOCK. + TimeStamp mLastUseTime; + BlockClass mClass = READAHEAD_BLOCK; + }; + + struct Block { + // Free blocks have an empty mOwners array + nsTArray mOwners; + }; + + // Get the BlockList that the block should belong to given its + // current owner + BlockList* GetListForBlock(AutoLock&, BlockOwner* aBlock); + // Get the BlockOwner for the given block index and owning stream + // (returns null if the stream does not own the block) + BlockOwner* GetBlockOwner(AutoLock&, int32_t aBlockIndex, + MediaCacheStream* aStream); + // Returns true iff the block is free + bool IsBlockFree(int32_t aBlockIndex) { + return mIndex[aBlockIndex].mOwners.IsEmpty(); + } + // Add the block to the free list and mark its streams as not having + // the block in cache + void FreeBlock(AutoLock&, int32_t aBlock); + // Mark aStream as not having the block, removing it as an owner. If + // the block has no more owners it's added to the free list. + void RemoveBlockOwner(AutoLock&, int32_t aBlockIndex, + MediaCacheStream* aStream); + // Swap all metadata associated with the two blocks. The caller + // is responsible for swapping up any cache file state. + void SwapBlocks(AutoLock&, int32_t aBlockIndex1, int32_t aBlockIndex2); + // Insert the block into the readahead block list for the stream + // at the right point in the list. + void InsertReadaheadBlock(AutoLock&, BlockOwner* aBlockOwner, + int32_t aBlockIndex); + + // Guess the duration until block aBlock will be next used + TimeDuration PredictNextUse(AutoLock&, TimeStamp aNow, int32_t aBlock); + // Guess the duration until the next incoming data on aStream will be used + TimeDuration PredictNextUseForIncomingData(AutoLock&, + MediaCacheStream* aStream); + + // Truncate the file and index array if there are free blocks at the + // end + void Truncate(); + + void FlushInternal(AutoLock&); + + // There is at most one file-backed media cache. + // It is owned by all MediaCacheStreams that use it. + // This is a raw pointer set by GetMediaCache(), and reset by ~MediaCache(), + // both on the main thread; and is not accessed anywhere else. + static inline MediaCache* gMediaCache = nullptr; + + // This member is main-thread only. It's used to allocate unique + // resource IDs to streams. + int64_t mNextResourceID = 0; + + // The monitor protects all the data members here. Also, off-main-thread + // readers that need to block will Wait() on this monitor. When new + // data becomes available in the cache, we NotifyAll() on this monitor. + mozilla::Monitor mMonitor MOZ_UNANNOTATED; + // This must always be accessed when the monitor is held. + nsTArray mStreams; + // The Blocks describing the cache entries. + nsTArray mIndex; + + RefPtr mBlockCache; + // The list of free blocks; they are not ordered. + BlockList mFreeBlocks; + // True if an event to run Update() has been queued but not processed + bool mUpdateQueued; +#ifdef DEBUG + bool mInUpdate; +#endif + // A list of resource IDs to notify about the change in suspended status. + nsTArray mSuspendedStatusToNotify; + // The thread on which we will run data callbacks from the channels. + // Note this thread is shared among all MediaCache instances. + static inline StaticRefPtr sThread; + // True if we've tried to init sThread. Note we try once only so it is safe + // to access sThread on all threads. + static inline bool sThreadInit = false; + + private: + // MediaCache thread only. True if we're on a cellular network connection. + static inline bool sOnCellular = false; + + // Try to trim the cache back to its desired size, if necessary. Return the + // amount of free block counts after trimming. + int32_t TrimCacheIfNeeded(AutoLock& aLock, const TimeStamp& aNow); + + struct StreamAction { + enum { NONE, SEEK, RESUME, SUSPEND } mTag = NONE; + // Members for 'SEEK' only. + bool mResume = false; + int64_t mSeekTarget = -1; + }; + // In each update, media cache would determine an action for each stream, + // possible actions are: keeping the stream unchanged, seeking to the new + // position, resuming its channel or suspending its channel. The action would + // be determined by considering a lot of different factors, eg. stream's data + // offset and length, how many free or reusable blocks are avaliable, the + // predicted time for the next block...e.t.c. This function will write the + // corresponding action for each stream in `mStreams` into `aActions`. + void DetermineActionsForStreams(AutoLock& aLock, const TimeStamp& aNow, + nsTArray& aActions, + int32_t aFreeBlockCount); + + // Used by MediaCacheStream::GetDebugInfo() only for debugging. + // Don't add new callers to this function. + friend void MediaCacheStream::GetDebugInfo( + dom::MediaCacheStreamDebugInfo& aInfo); + mozilla::Monitor& GetMonitorOnTheMainThread() { + MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread()); + return mMonitor; + } +}; + +void MediaCache::UpdateOnCellular() { + NS_ASSERTION(NS_IsMainThread(), + "Only call on main thread"); // JNI required on Android... + bool onCellular = OnCellularConnection(); + LOG("MediaCache::UpdateOnCellular() onCellular=%d", onCellular); + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCache::UpdateOnCellular", [=]() { sOnCellular = onCellular; }); + sThread->Dispatch(r.forget()); +} + +NS_IMETHODIMP +MediaCacheFlusher::Observe(nsISupports* aSubject, char const* aTopic, + char16_t const* aData) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + if (strcmp(aTopic, "last-pb-context-exited") == 0) { + for (MediaCache* mc : mMediaCaches) { + mc->CloseStreamsForPrivateBrowsing(); + } + return NS_OK; + } + if (strcmp(aTopic, "cacheservice:empty-cache") == 0) { + for (MediaCache* mc : mMediaCaches) { + mc->Flush(); + } + return NS_OK; + } + if (strcmp(aTopic, "contentchild:network-link-type-changed") == 0 || + strcmp(aTopic, NS_NETWORK_LINK_TYPE_TOPIC) == 0) { + MediaCache::UpdateOnCellular(); + } + return NS_OK; +} + +MediaCacheStream::MediaCacheStream(ChannelMediaResource* aClient, + bool aIsPrivateBrowsing) + : mMediaCache(nullptr), + mClient(aClient), + mIsTransportSeekable(false), + mCacheSuspended(false), + mChannelEnded(false), + mStreamOffset(0), + mPlaybackBytesPerSecond(10000), + mPinCount(0), + mNotifyDataEndedStatus(NS_ERROR_NOT_INITIALIZED), + mIsPrivateBrowsing(aIsPrivateBrowsing) {} + +size_t MediaCacheStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + AutoLock lock(mMediaCache->Monitor()); + + // Looks like these are not owned: + // - mClient + size_t size = mBlocks.ShallowSizeOfExcludingThis(aMallocSizeOf); + size += mReadaheadBlocks.SizeOfExcludingThis(aMallocSizeOf); + size += mMetadataBlocks.SizeOfExcludingThis(aMallocSizeOf); + size += mPlayedBlocks.SizeOfExcludingThis(aMallocSizeOf); + size += aMallocSizeOf(mPartialBlockBuffer.get()); + + return size; +} + +size_t MediaCacheStream::BlockList::SizeOfExcludingThis( + MallocSizeOf aMallocSizeOf) const { + return mEntries.ShallowSizeOfExcludingThis(aMallocSizeOf); +} + +void MediaCacheStream::BlockList::AddFirstBlock(int32_t aBlock) { + NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list"); + Entry* entry = mEntries.PutEntry(aBlock); + + if (mFirstBlock < 0) { + entry->mNextBlock = entry->mPrevBlock = aBlock; + } else { + entry->mNextBlock = mFirstBlock; + entry->mPrevBlock = mEntries.GetEntry(mFirstBlock)->mPrevBlock; + mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock; + mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock; + } + mFirstBlock = aBlock; + ++mCount; +} + +void MediaCacheStream::BlockList::AddAfter(int32_t aBlock, int32_t aBefore) { + NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list"); + Entry* entry = mEntries.PutEntry(aBlock); + + Entry* addAfter = mEntries.GetEntry(aBefore); + NS_ASSERTION(addAfter, "aBefore not in list"); + + entry->mNextBlock = addAfter->mNextBlock; + entry->mPrevBlock = aBefore; + mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock; + mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock; + ++mCount; +} + +void MediaCacheStream::BlockList::RemoveBlock(int32_t aBlock) { + Entry* entry = mEntries.GetEntry(aBlock); + MOZ_DIAGNOSTIC_ASSERT(entry, "Block not in list"); + + if (entry->mNextBlock == aBlock) { + MOZ_DIAGNOSTIC_ASSERT(entry->mPrevBlock == aBlock, + "Linked list inconsistency"); + MOZ_DIAGNOSTIC_ASSERT(mFirstBlock == aBlock, "Linked list inconsistency"); + mFirstBlock = -1; + } else { + if (mFirstBlock == aBlock) { + mFirstBlock = entry->mNextBlock; + } + mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = entry->mPrevBlock; + mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = entry->mNextBlock; + } + mEntries.RemoveEntry(entry); + --mCount; +} + +int32_t MediaCacheStream::BlockList::GetLastBlock() const { + if (mFirstBlock < 0) return -1; + return mEntries.GetEntry(mFirstBlock)->mPrevBlock; +} + +int32_t MediaCacheStream::BlockList::GetNextBlock(int32_t aBlock) const { + int32_t block = mEntries.GetEntry(aBlock)->mNextBlock; + if (block == mFirstBlock) return -1; + return block; +} + +int32_t MediaCacheStream::BlockList::GetPrevBlock(int32_t aBlock) const { + if (aBlock == mFirstBlock) return -1; + return mEntries.GetEntry(aBlock)->mPrevBlock; +} + +#ifdef DEBUG +void MediaCacheStream::BlockList::Verify() { + int32_t count = 0; + if (mFirstBlock >= 0) { + int32_t block = mFirstBlock; + do { + Entry* entry = mEntries.GetEntry(block); + NS_ASSERTION(mEntries.GetEntry(entry->mNextBlock)->mPrevBlock == block, + "Bad prev link"); + NS_ASSERTION(mEntries.GetEntry(entry->mPrevBlock)->mNextBlock == block, + "Bad next link"); + block = entry->mNextBlock; + ++count; + } while (block != mFirstBlock); + } + NS_ASSERTION(count == mCount, "Bad count"); +} +#endif + +static void UpdateSwappedBlockIndex(int32_t* aBlockIndex, int32_t aBlock1Index, + int32_t aBlock2Index) { + int32_t index = *aBlockIndex; + if (index == aBlock1Index) { + *aBlockIndex = aBlock2Index; + } else if (index == aBlock2Index) { + *aBlockIndex = aBlock1Index; + } +} + +void MediaCacheStream::BlockList::NotifyBlockSwapped(int32_t aBlockIndex1, + int32_t aBlockIndex2) { + Entry* e1 = mEntries.GetEntry(aBlockIndex1); + Entry* e2 = mEntries.GetEntry(aBlockIndex2); + int32_t e1Prev = -1, e1Next = -1, e2Prev = -1, e2Next = -1; + + // Fix mFirstBlock + UpdateSwappedBlockIndex(&mFirstBlock, aBlockIndex1, aBlockIndex2); + + // Fix mNextBlock/mPrevBlock links. First capture previous/next links + // so we don't get confused due to aliasing. + if (e1) { + e1Prev = e1->mPrevBlock; + e1Next = e1->mNextBlock; + } + if (e2) { + e2Prev = e2->mPrevBlock; + e2Next = e2->mNextBlock; + } + // Update the entries. + if (e1) { + mEntries.GetEntry(e1Prev)->mNextBlock = aBlockIndex2; + mEntries.GetEntry(e1Next)->mPrevBlock = aBlockIndex2; + } + if (e2) { + mEntries.GetEntry(e2Prev)->mNextBlock = aBlockIndex1; + mEntries.GetEntry(e2Next)->mPrevBlock = aBlockIndex1; + } + + // Fix hashtable keys. First remove stale entries. + if (e1) { + e1Prev = e1->mPrevBlock; + e1Next = e1->mNextBlock; + mEntries.RemoveEntry(e1); + // Refresh pointer after hashtable mutation. + e2 = mEntries.GetEntry(aBlockIndex2); + } + if (e2) { + e2Prev = e2->mPrevBlock; + e2Next = e2->mNextBlock; + mEntries.RemoveEntry(e2); + } + // Put new entries back. + if (e1) { + e1 = mEntries.PutEntry(aBlockIndex2); + e1->mNextBlock = e1Next; + e1->mPrevBlock = e1Prev; + } + if (e2) { + e2 = mEntries.PutEntry(aBlockIndex1); + e2->mNextBlock = e2Next; + e2->mPrevBlock = e2Prev; + } +} + +void MediaCache::FlushInternal(AutoLock& aLock) { + for (uint32_t blockIndex = 0; blockIndex < mIndex.Length(); ++blockIndex) { + FreeBlock(aLock, blockIndex); + } + + // Truncate index array. + Truncate(); + NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?"); + // Reset block cache to its pristine state. + mBlockCache->Flush(); +} + +void MediaCache::Flush() { + MOZ_ASSERT(NS_IsMainThread()); + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCache::Flush", [self = RefPtr(this)]() mutable { + AutoLock lock(self->mMonitor); + self->FlushInternal(lock); + // Ensure MediaCache is deleted on the main thread. + NS_ReleaseOnMainThread("MediaCache::Flush", self.forget()); + }); + sThread->Dispatch(r.forget()); +} + +void MediaCache::CloseStreamsForPrivateBrowsing() { + MOZ_ASSERT(NS_IsMainThread()); + sThread->Dispatch(NS_NewRunnableFunction( + "MediaCache::CloseStreamsForPrivateBrowsing", + [self = RefPtr(this)]() mutable { + AutoLock lock(self->mMonitor); + // Copy mStreams since CloseInternal() will change the array. + for (MediaCacheStream* s : self->mStreams.Clone()) { + if (s->mIsPrivateBrowsing) { + s->CloseInternal(lock); + } + } + // Ensure MediaCache is deleted on the main thread. + NS_ReleaseOnMainThread("MediaCache::CloseStreamsForPrivateBrowsing", + self.forget()); + })); +} + +/* static */ +RefPtr MediaCache::GetMediaCache(int64_t aContentLength, + bool aIsPrivateBrowsing) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + if (!sThreadInit) { + sThreadInit = true; + nsCOMPtr thread; + nsresult rv = NS_NewNamedThread("MediaCache", getter_AddRefs(thread)); + if (NS_FAILED(rv)) { + NS_WARNING("Failed to create a thread for MediaCache."); + return nullptr; + } + sThread = ToRefPtr(std::move(thread)); + + static struct ClearThread { + // Called during shutdown to clear sThread. + void operator=(std::nullptr_t) { + MOZ_ASSERT(sThread, "We should only clear sThread once."); + sThread->Shutdown(); + sThread = nullptr; + } + } sClearThread; + ClearOnShutdown(&sClearThread, ShutdownPhase::XPCOMShutdownThreads); + } + + if (!sThread) { + return nullptr; + } + + const int64_t mediaMemoryCacheMaxSize = + static_cast(StaticPrefs::media_memory_cache_max_size()) * 1024; + + // Force usage of in-memory cache if we are in private browsing mode + // and the forceMediaMemoryCache pref is set + // We will not attempt to create an on-disk cache if this is the case + const bool forceMediaMemoryCache = + aIsPrivateBrowsing && + StaticPrefs::browser_privatebrowsing_forceMediaMemoryCache(); + + // Alternatively, use an in-memory cache if the media will fit entirely + // in memory + // aContentLength < 0 indicates we do not know content's actual size + const bool contentFitsInMediaMemoryCache = + (aContentLength > 0) && (aContentLength <= mediaMemoryCacheMaxSize); + + // Try to allocate a memory cache for our content + if (contentFitsInMediaMemoryCache || forceMediaMemoryCache) { + // Figure out how large our cache should be + int64_t cacheSize = 0; + if (contentFitsInMediaMemoryCache) { + cacheSize = aContentLength; + } else if (forceMediaMemoryCache) { + // Unknown content length, we'll give the maximum allowed cache size + // just to be sure. + if (aContentLength < 0) { + cacheSize = mediaMemoryCacheMaxSize; + } else { + // If the content length is less than the maximum allowed cache size, + // use that, otherwise we cap it to max size. + cacheSize = std::min(aContentLength, mediaMemoryCacheMaxSize); + } + } + + RefPtr bc = new MemoryBlockCache(cacheSize); + nsresult rv = bc->Init(); + if (NS_SUCCEEDED(rv)) { + RefPtr mc = new MediaCache(bc); + LOG("GetMediaCache(%" PRIi64 ") -> Memory MediaCache %p", aContentLength, + mc.get()); + return mc; + } + + // MemoryBlockCache initialization failed. + // If we require use of a memory media cache, we will bail here. + // Otherwise use a file-backed MediaCache below. + if (forceMediaMemoryCache) { + return nullptr; + } + } + + if (gMediaCache) { + LOG("GetMediaCache(%" PRIi64 ") -> Existing file-backed MediaCache", + aContentLength); + return gMediaCache; + } + + RefPtr bc = new FileBlockCache(); + nsresult rv = bc->Init(); + if (NS_SUCCEEDED(rv)) { + gMediaCache = new MediaCache(bc); + LOG("GetMediaCache(%" PRIi64 ") -> Created file-backed MediaCache", + aContentLength); + } else { + LOG("GetMediaCache(%" PRIi64 ") -> Failed to create file-backed MediaCache", + aContentLength); + } + + return gMediaCache; +} + +nsresult MediaCache::ReadCacheFile(AutoLock&, int64_t aOffset, void* aData, + int32_t aLength, int32_t* aBytes) { + if (!mBlockCache) { + return NS_ERROR_FAILURE; + } + return mBlockCache->Read(aOffset, reinterpret_cast(aData), aLength, + aBytes); +} + +// Allowed range is whatever can be accessed with an int32_t block index. +static bool IsOffsetAllowed(int64_t aOffset) { + return aOffset < (int64_t(INT32_MAX) + 1) * MediaCache::BLOCK_SIZE && + aOffset >= 0; +} + +// Convert 64-bit offset to 32-bit block index. +// Assumes offset range-check was already done. +static int32_t OffsetToBlockIndexUnchecked(int64_t aOffset) { + // Still check for allowed range in debug builds, to catch out-of-range + // issues early during development. + MOZ_ASSERT(IsOffsetAllowed(aOffset)); + return int32_t(aOffset / MediaCache::BLOCK_SIZE); +} + +// Convert 64-bit offset to 32-bit block index. -1 if out of allowed range. +static int32_t OffsetToBlockIndex(int64_t aOffset) { + return IsOffsetAllowed(aOffset) ? OffsetToBlockIndexUnchecked(aOffset) : -1; +} + +// Convert 64-bit offset to 32-bit offset inside a block. +// Will not fail (even if offset is outside allowed range), so there is no +// need to check for errors. +static int32_t OffsetInBlock(int64_t aOffset) { + // Still check for allowed range in debug builds, to catch out-of-range + // issues early during development. + MOZ_ASSERT(IsOffsetAllowed(aOffset)); + return int32_t(aOffset % MediaCache::BLOCK_SIZE); +} + +int32_t MediaCache::FindBlockForIncomingData(AutoLock& aLock, TimeStamp aNow, + MediaCacheStream* aStream, + int32_t aStreamBlockIndex) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + int32_t blockIndex = + FindReusableBlock(aLock, aNow, aStream, aStreamBlockIndex, INT32_MAX); + + if (blockIndex < 0 || !IsBlockFree(blockIndex)) { + // The block returned is already allocated. + // Don't reuse it if a) there's room to expand the cache or + // b) the data we're going to store in the free block is not higher + // priority than the data already stored in the free block. + // The latter can lead us to go over the cache limit a bit. + if ((mIndex.Length() < + uint32_t(mBlockCache->GetMaxBlocks(MediaCache::CacheSize())) || + blockIndex < 0 || + PredictNextUseForIncomingData(aLock, aStream) >= + PredictNextUse(aLock, aNow, blockIndex))) { + blockIndex = mIndex.Length(); + // XXX(Bug 1631371) Check if this should use a fallible operation as it + // pretended earlier. + mIndex.AppendElement(); + mFreeBlocks.AddFirstBlock(blockIndex); + return blockIndex; + } + } + + return blockIndex; +} + +bool MediaCache::BlockIsReusable(AutoLock&, int32_t aBlockIndex) { + Block* block = &mIndex[aBlockIndex]; + for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { + MediaCacheStream* stream = block->mOwners[i].mStream; + if (stream->mPinCount > 0 || + uint32_t(OffsetToBlockIndex(stream->mStreamOffset)) == + block->mOwners[i].mStreamBlock) { + return false; + } + } + return true; +} + +void MediaCache::AppendMostReusableBlock(AutoLock& aLock, BlockList* aBlockList, + nsTArray* aResult, + int32_t aBlockIndexLimit) { + int32_t blockIndex = aBlockList->GetLastBlock(); + if (blockIndex < 0) return; + do { + // Don't consider blocks for pinned streams, or blocks that are + // beyond the specified limit, or a block that contains a stream's + // current read position (such a block contains both played data + // and readahead data) + if (blockIndex < aBlockIndexLimit && BlockIsReusable(aLock, blockIndex)) { + aResult->AppendElement(blockIndex); + return; + } + blockIndex = aBlockList->GetPrevBlock(blockIndex); + } while (blockIndex >= 0); +} + +int32_t MediaCache::FindReusableBlock(AutoLock& aLock, TimeStamp aNow, + MediaCacheStream* aForStream, + int32_t aForStreamBlock, + int32_t aMaxSearchBlockIndex) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + uint32_t length = + std::min(uint32_t(aMaxSearchBlockIndex), uint32_t(mIndex.Length())); + + if (aForStream && aForStreamBlock > 0 && + uint32_t(aForStreamBlock) <= aForStream->mBlocks.Length()) { + int32_t prevCacheBlock = aForStream->mBlocks[aForStreamBlock - 1]; + if (prevCacheBlock >= 0) { + uint32_t freeBlockScanEnd = + std::min(length, prevCacheBlock + FREE_BLOCK_SCAN_LIMIT); + for (uint32_t i = prevCacheBlock; i < freeBlockScanEnd; ++i) { + if (IsBlockFree(i)) return i; + } + } + } + + if (!mFreeBlocks.IsEmpty()) { + int32_t blockIndex = mFreeBlocks.GetFirstBlock(); + do { + if (blockIndex < aMaxSearchBlockIndex) return blockIndex; + blockIndex = mFreeBlocks.GetNextBlock(blockIndex); + } while (blockIndex >= 0); + } + + // Build a list of the blocks we should consider for the "latest + // predicted time of next use". We can exploit the fact that the block + // linked lists are ordered by increasing time of next use. This is + // actually the whole point of having the linked lists. + AutoTArray candidates; + for (uint32_t i = 0; i < mStreams.Length(); ++i) { + MediaCacheStream* stream = mStreams[i]; + if (stream->mPinCount > 0) { + // No point in even looking at this stream's blocks + continue; + } + + AppendMostReusableBlock(aLock, &stream->mMetadataBlocks, &candidates, + length); + AppendMostReusableBlock(aLock, &stream->mPlayedBlocks, &candidates, length); + + // Don't consider readahead blocks in non-seekable streams. If we + // remove the block we won't be able to seek back to read it later. + if (stream->mIsTransportSeekable) { + AppendMostReusableBlock(aLock, &stream->mReadaheadBlocks, &candidates, + length); + } + } + + TimeDuration latestUse; + int32_t latestUseBlock = -1; + for (uint32_t i = 0; i < candidates.Length(); ++i) { + TimeDuration nextUse = PredictNextUse(aLock, aNow, candidates[i]); + if (nextUse > latestUse) { + latestUse = nextUse; + latestUseBlock = candidates[i]; + } + } + + return latestUseBlock; +} + +MediaCache::BlockList* MediaCache::GetListForBlock(AutoLock&, + BlockOwner* aBlock) { + switch (aBlock->mClass) { + case METADATA_BLOCK: + NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?"); + return &aBlock->mStream->mMetadataBlocks; + case PLAYED_BLOCK: + NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?"); + return &aBlock->mStream->mPlayedBlocks; + case READAHEAD_BLOCK: + NS_ASSERTION(aBlock->mStream, "Readahead block has no stream?"); + return &aBlock->mStream->mReadaheadBlocks; + default: + NS_ERROR("Invalid block class"); + return nullptr; + } +} + +MediaCache::BlockOwner* MediaCache::GetBlockOwner(AutoLock&, + int32_t aBlockIndex, + MediaCacheStream* aStream) { + Block* block = &mIndex[aBlockIndex]; + for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { + if (block->mOwners[i].mStream == aStream) return &block->mOwners[i]; + } + return nullptr; +} + +void MediaCache::SwapBlocks(AutoLock& aLock, int32_t aBlockIndex1, + int32_t aBlockIndex2) { + Block* block1 = &mIndex[aBlockIndex1]; + Block* block2 = &mIndex[aBlockIndex2]; + + block1->mOwners.SwapElements(block2->mOwners); + + // Now all references to block1 have to be replaced with block2 and + // vice versa. + // First update stream references to blocks via mBlocks. + const Block* blocks[] = {block1, block2}; + int32_t blockIndices[] = {aBlockIndex1, aBlockIndex2}; + for (int32_t i = 0; i < 2; ++i) { + for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) { + const BlockOwner* b = &blocks[i]->mOwners[j]; + b->mStream->mBlocks[b->mStreamBlock] = blockIndices[i]; + } + } + + // Now update references to blocks in block lists. + mFreeBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); + + nsTHashSet visitedStreams; + + for (int32_t i = 0; i < 2; ++i) { + for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) { + MediaCacheStream* stream = blocks[i]->mOwners[j].mStream; + // Make sure that we don't update the same stream twice --- that + // would result in swapping the block references back again! + if (!visitedStreams.EnsureInserted(stream)) continue; + stream->mReadaheadBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); + stream->mPlayedBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); + stream->mMetadataBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); + } + } + + Verify(aLock); +} + +void MediaCache::RemoveBlockOwner(AutoLock& aLock, int32_t aBlockIndex, + MediaCacheStream* aStream) { + Block* block = &mIndex[aBlockIndex]; + for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { + BlockOwner* bo = &block->mOwners[i]; + if (bo->mStream == aStream) { + GetListForBlock(aLock, bo)->RemoveBlock(aBlockIndex); + bo->mStream->mBlocks[bo->mStreamBlock] = -1; + block->mOwners.RemoveElementAt(i); + if (block->mOwners.IsEmpty()) { + mFreeBlocks.AddFirstBlock(aBlockIndex); + } + return; + } + } +} + +void MediaCache::AddBlockOwnerAsReadahead(AutoLock& aLock, int32_t aBlockIndex, + MediaCacheStream* aStream, + int32_t aStreamBlockIndex) { + Block* block = &mIndex[aBlockIndex]; + if (block->mOwners.IsEmpty()) { + mFreeBlocks.RemoveBlock(aBlockIndex); + } + BlockOwner* bo = block->mOwners.AppendElement(); + bo->mStream = aStream; + bo->mStreamBlock = aStreamBlockIndex; + aStream->mBlocks[aStreamBlockIndex] = aBlockIndex; + bo->mClass = READAHEAD_BLOCK; + InsertReadaheadBlock(aLock, bo, aBlockIndex); +} + +void MediaCache::FreeBlock(AutoLock& aLock, int32_t aBlock) { + Block* block = &mIndex[aBlock]; + if (block->mOwners.IsEmpty()) { + // already free + return; + } + + LOG("Released block %d", aBlock); + + for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { + BlockOwner* bo = &block->mOwners[i]; + GetListForBlock(aLock, bo)->RemoveBlock(aBlock); + bo->mStream->mBlocks[bo->mStreamBlock] = -1; + } + block->mOwners.Clear(); + mFreeBlocks.AddFirstBlock(aBlock); + Verify(aLock); +} + +TimeDuration MediaCache::PredictNextUse(AutoLock&, TimeStamp aNow, + int32_t aBlock) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + NS_ASSERTION(!IsBlockFree(aBlock), "aBlock is free"); + + Block* block = &mIndex[aBlock]; + // Blocks can be belong to multiple streams. The predicted next use + // time is the earliest time predicted by any of the streams. + TimeDuration result; + for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { + BlockOwner* bo = &block->mOwners[i]; + TimeDuration prediction; + switch (bo->mClass) { + case METADATA_BLOCK: + // This block should be managed in LRU mode. For metadata we predict + // that the time until the next use is the time since the last use. + prediction = aNow - bo->mLastUseTime; + break; + case PLAYED_BLOCK: { + // This block should be managed in LRU mode, and we should impose + // a "replay delay" to reflect the likelihood of replay happening + NS_ASSERTION(static_cast(bo->mStreamBlock) * BLOCK_SIZE < + bo->mStream->mStreamOffset, + "Played block after the current stream position?"); + int64_t bytesBehind = + bo->mStream->mStreamOffset - + static_cast(bo->mStreamBlock) * BLOCK_SIZE; + int64_t millisecondsBehind = + bytesBehind * 1000 / bo->mStream->mPlaybackBytesPerSecond; + prediction = TimeDuration::FromMilliseconds(std::min( + millisecondsBehind * REPLAY_PENALTY_FACTOR, INT32_MAX)); + break; + } + case READAHEAD_BLOCK: { + int64_t bytesAhead = + static_cast(bo->mStreamBlock) * BLOCK_SIZE - + bo->mStream->mStreamOffset; + NS_ASSERTION(bytesAhead >= 0, + "Readahead block before the current stream position?"); + int64_t millisecondsAhead = + bytesAhead * 1000 / bo->mStream->mPlaybackBytesPerSecond; + prediction = TimeDuration::FromMilliseconds( + std::min(millisecondsAhead, INT32_MAX)); + break; + } + default: + NS_ERROR("Invalid class for predicting next use"); + return TimeDuration(0); + } + if (i == 0 || prediction < result) { + result = prediction; + } + } + return result; +} + +TimeDuration MediaCache::PredictNextUseForIncomingData( + AutoLock&, MediaCacheStream* aStream) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + int64_t bytesAhead = aStream->mChannelOffset - aStream->mStreamOffset; + if (bytesAhead <= -BLOCK_SIZE) { + // Hmm, no idea when data behind us will be used. Guess 24 hours. + return TimeDuration::FromSeconds(24 * 60 * 60); + } + if (bytesAhead <= 0) return TimeDuration(0); + int64_t millisecondsAhead = + bytesAhead * 1000 / aStream->mPlaybackBytesPerSecond; + return TimeDuration::FromMilliseconds( + std::min(millisecondsAhead, INT32_MAX)); +} + +void MediaCache::Update() { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + AutoLock lock(mMonitor); + + mUpdateQueued = false; +#ifdef DEBUG + mInUpdate = true; +#endif + const TimeStamp now = TimeStamp::Now(); + const int32_t freeBlockCount = TrimCacheIfNeeded(lock, now); + + // The action to use for each stream. We store these so we can make + // decisions while holding the cache lock but implement those decisions + // without holding the cache lock, since we need to call out to + // stream, decoder and element code. + AutoTArray actions; + DetermineActionsForStreams(lock, now, actions, freeBlockCount); + +#ifdef DEBUG + mInUpdate = false; +#endif + + // First, update the mCacheSuspended/mCacheEnded flags so that they're all + // correct when we fire our CacheClient commands below. Those commands can + // rely on these flags being set correctly for all streams. + for (uint32_t i = 0; i < mStreams.Length(); ++i) { + MediaCacheStream* stream = mStreams[i]; + switch (actions[i].mTag) { + case StreamAction::SEEK: + stream->mCacheSuspended = false; + stream->mChannelEnded = false; + break; + case StreamAction::RESUME: + stream->mCacheSuspended = false; + break; + case StreamAction::SUSPEND: + stream->mCacheSuspended = true; + break; + default: + break; + } + } + + for (uint32_t i = 0; i < mStreams.Length(); ++i) { + MediaCacheStream* stream = mStreams[i]; + switch (actions[i].mTag) { + case StreamAction::SEEK: + LOG("Stream %p CacheSeek to %" PRId64 " (resume=%d)", stream, + actions[i].mSeekTarget, actions[i].mResume); + stream->mClient->CacheClientSeek(actions[i].mSeekTarget, + actions[i].mResume); + break; + case StreamAction::RESUME: + LOG("Stream %p Resumed", stream); + stream->mClient->CacheClientResume(); + QueueSuspendedStatusUpdate(lock, stream->mResourceID); + break; + case StreamAction::SUSPEND: + LOG("Stream %p Suspended", stream); + stream->mClient->CacheClientSuspend(); + QueueSuspendedStatusUpdate(lock, stream->mResourceID); + break; + default: + break; + } + } + + // Notify streams about the suspended status changes. + for (uint32_t i = 0; i < mSuspendedStatusToNotify.Length(); ++i) { + MediaCache::ResourceStreamIterator iter(this, mSuspendedStatusToNotify[i]); + while (MediaCacheStream* stream = iter.Next(lock)) { + stream->mClient->CacheClientNotifySuspendedStatusChanged( + stream->AreAllStreamsForResourceSuspended(lock)); + } + } + mSuspendedStatusToNotify.Clear(); +} + +int32_t MediaCache::TrimCacheIfNeeded(AutoLock& aLock, const TimeStamp& aNow) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + const int32_t maxBlocks = mBlockCache->GetMaxBlocks(MediaCache::CacheSize()); + + int32_t freeBlockCount = mFreeBlocks.GetCount(); + TimeDuration latestPredictedUseForOverflow = 0; + if (mIndex.Length() > uint32_t(maxBlocks)) { + // Try to trim back the cache to its desired maximum size. The cache may + // have overflowed simply due to data being received when we have + // no blocks in the main part of the cache that are free or lower + // priority than the new data. The cache can also be overflowing because + // the media.cache_size preference was reduced. + // First, figure out what the least valuable block in the cache overflow + // is. We don't want to replace any blocks in the main part of the + // cache whose expected time of next use is earlier or equal to that. + // If we allow that, we can effectively end up discarding overflowing + // blocks (by moving an overflowing block to the main part of the cache, + // and then overwriting it with another overflowing block), and we try + // to avoid that since it requires HTTP seeks. + // We also use this loop to eliminate overflowing blocks from + // freeBlockCount. + for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks; + --blockIndex) { + if (IsBlockFree(blockIndex)) { + // Don't count overflowing free blocks in our free block count + --freeBlockCount; + continue; + } + TimeDuration predictedUse = PredictNextUse(aLock, aNow, blockIndex); + latestPredictedUseForOverflow = + std::max(latestPredictedUseForOverflow, predictedUse); + } + } else { + freeBlockCount += maxBlocks - mIndex.Length(); + } + + // Now try to move overflowing blocks to the main part of the cache. + for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks; + --blockIndex) { + if (IsBlockFree(blockIndex)) continue; + + Block* block = &mIndex[blockIndex]; + // Try to relocate the block close to other blocks for the first stream. + // There is no point in trying to make it close to other blocks in + // *all* the streams it might belong to. + int32_t destinationBlockIndex = + FindReusableBlock(aLock, aNow, block->mOwners[0].mStream, + block->mOwners[0].mStreamBlock, maxBlocks); + if (destinationBlockIndex < 0) { + // Nowhere to place this overflow block. We won't be able to + // place any more overflow blocks. + break; + } + + // Don't evict |destinationBlockIndex| if it is within [cur, end) otherwise + // a new channel will be opened to download this block again which is bad. + bool inCurrentCachedRange = false; + for (BlockOwner& owner : mIndex[destinationBlockIndex].mOwners) { + MediaCacheStream* stream = owner.mStream; + int64_t end = OffsetToBlockIndexUnchecked( + stream->GetCachedDataEndInternal(aLock, stream->mStreamOffset)); + int64_t cur = OffsetToBlockIndexUnchecked(stream->mStreamOffset); + if (cur <= owner.mStreamBlock && owner.mStreamBlock < end) { + inCurrentCachedRange = true; + break; + } + } + if (inCurrentCachedRange) { + continue; + } + + if (IsBlockFree(destinationBlockIndex) || + PredictNextUse(aLock, aNow, destinationBlockIndex) > + latestPredictedUseForOverflow) { + // Reuse blocks in the main part of the cache that are less useful than + // the least useful overflow blocks + + nsresult rv = mBlockCache->MoveBlock(blockIndex, destinationBlockIndex); + + if (NS_SUCCEEDED(rv)) { + // We successfully copied the file data. + LOG("Swapping blocks %d and %d (trimming cache)", blockIndex, + destinationBlockIndex); + // Swapping the block metadata here lets us maintain the + // correct positions in the linked lists + SwapBlocks(aLock, blockIndex, destinationBlockIndex); + // Free the overflowing block even if the copy failed. + LOG("Released block %d (trimming cache)", blockIndex); + FreeBlock(aLock, blockIndex); + } + } else { + LOG("Could not trim cache block %d (destination %d, " + "predicted next use %f, latest predicted use for overflow %f", + blockIndex, destinationBlockIndex, + PredictNextUse(aLock, aNow, destinationBlockIndex).ToSeconds(), + latestPredictedUseForOverflow.ToSeconds()); + } + } + // Try chopping back the array of cache entries and the cache file. + Truncate(); + return freeBlockCount; +} + +void MediaCache::DetermineActionsForStreams(AutoLock& aLock, + const TimeStamp& aNow, + nsTArray& aActions, + int32_t aFreeBlockCount) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + // Count the blocks allocated for readahead of non-seekable streams + // (these blocks can't be freed but we don't want them to monopolize the + // cache) + int32_t nonSeekableReadaheadBlockCount = 0; + for (uint32_t i = 0; i < mStreams.Length(); ++i) { + MediaCacheStream* stream = mStreams[i]; + if (!stream->mIsTransportSeekable) { + nonSeekableReadaheadBlockCount += stream->mReadaheadBlocks.GetCount(); + } + } + + // If freeBlockCount is zero, then compute the latest of + // the predicted next-uses for all blocks + TimeDuration latestNextUse; + const int32_t maxBlocks = mBlockCache->GetMaxBlocks(MediaCache::CacheSize()); + if (aFreeBlockCount == 0) { + const int32_t reusableBlock = + FindReusableBlock(aLock, aNow, nullptr, 0, maxBlocks); + if (reusableBlock >= 0) { + latestNextUse = PredictNextUse(aLock, aNow, reusableBlock); + } + } + + for (uint32_t i = 0; i < mStreams.Length(); ++i) { + aActions.AppendElement(StreamAction{}); + + MediaCacheStream* stream = mStreams[i]; + if (stream->mClosed) { + LOG("Stream %p closed", stream); + continue; + } + + // We make decisions based on mSeekTarget when there is a pending seek. + // Otherwise we will keep issuing seek requests until mChannelOffset + // is changed by NotifyDataStarted() which is bad. + const int64_t channelOffset = stream->mSeekTarget != -1 + ? stream->mSeekTarget + : stream->mChannelOffset; + + // Figure out where we should be reading from. It's the first + // uncached byte after the current mStreamOffset. + const int64_t dataOffset = + stream->GetCachedDataEndInternal(aLock, stream->mStreamOffset); + MOZ_ASSERT(dataOffset >= 0); + + // Compute where we'd actually seek to to read at readOffset + int64_t desiredOffset = dataOffset; + if (stream->mIsTransportSeekable) { + if (desiredOffset > channelOffset && + desiredOffset <= channelOffset + SEEK_VS_READ_THRESHOLD) { + // Assume it's more efficient to just keep reading up to the + // desired position instead of trying to seek + desiredOffset = channelOffset; + } + } else { + // We can't seek directly to the desired offset... + if (channelOffset > desiredOffset) { + // Reading forward won't get us anywhere, we need to go backwards. + // Seek back to 0 (the client will reopen the stream) and then + // read forward. + NS_WARNING("Can't seek backwards, so seeking to 0"); + desiredOffset = 0; + // Flush cached blocks out, since if this is a live stream + // the cached data may be completely different next time we + // read it. We have to assume that live streams don't + // advertise themselves as being seekable... + ReleaseStreamBlocks(aLock, stream); + } else { + // otherwise reading forward is looking good, so just stay where we + // are and don't trigger a channel seek! + desiredOffset = channelOffset; + } + } + + // Figure out if we should be reading data now or not. It's amazing + // how complex this is, but each decision is simple enough. + bool enableReading; + if (stream->mStreamLength >= 0 && dataOffset >= stream->mStreamLength) { + // We want data at the end of the stream, where there's nothing to + // read. We don't want to try to read if we're suspended, because that + // might create a new channel and seek unnecessarily (and incorrectly, + // since HTTP doesn't allow seeking to the actual EOF), and we don't want + // to suspend if we're not suspended and already reading at the end of + // the stream, since there just might be more data than the server + // advertised with Content-Length, and we may as well keep reading. + // But we don't want to seek to the end of the stream if we're not + // already there. + LOG("Stream %p at end of stream", stream); + enableReading = + !stream->mCacheSuspended && stream->mStreamLength == channelOffset; + } else if (desiredOffset < stream->mStreamOffset) { + // We're reading to try to catch up to where the current stream + // reader wants to be. Better not stop. + LOG("Stream %p catching up", stream); + enableReading = true; + } else if (desiredOffset < stream->mStreamOffset + BLOCK_SIZE) { + // The stream reader is waiting for us, or nearly so. Better feed it. + LOG("Stream %p feeding reader", stream); + enableReading = true; + } else if (!stream->mIsTransportSeekable && + nonSeekableReadaheadBlockCount >= + maxBlocks * NONSEEKABLE_READAHEAD_MAX) { + // This stream is not seekable and there are already too many blocks + // being cached for readahead for nonseekable streams (which we can't + // free). So stop reading ahead now. + LOG("Stream %p throttling non-seekable readahead", stream); + enableReading = false; + } else if (mIndex.Length() > uint32_t(maxBlocks)) { + // We're in the process of bringing the cache size back to the + // desired limit, so don't bring in more data yet + LOG("Stream %p throttling to reduce cache size", stream); + enableReading = false; + } else { + TimeDuration predictedNewDataUse = + PredictNextUseForIncomingData(aLock, stream); + + if (stream->mThrottleReadahead && stream->mCacheSuspended && + predictedNewDataUse.ToSeconds() > MediaCache::ResumeThreshold()) { + // Don't need data for a while, so don't bother waking up the stream + LOG("Stream %p avoiding wakeup since more data is not needed", stream); + enableReading = false; + } else if (stream->mThrottleReadahead && + predictedNewDataUse.ToSeconds() > + MediaCache::ReadaheadLimit()) { + // Don't read ahead more than this much + LOG("Stream %p throttling to avoid reading ahead too far", stream); + enableReading = false; + } else if (aFreeBlockCount > 0) { + // Free blocks in the cache, so keep reading + LOG("Stream %p reading since there are free blocks", stream); + enableReading = true; + } else if (latestNextUse <= TimeDuration(0)) { + // No reusable blocks, so can't read anything + LOG("Stream %p throttling due to no reusable blocks", stream); + enableReading = false; + } else { + // Read ahead if the data we expect to read is more valuable than + // the least valuable block in the main part of the cache + LOG("Stream %p predict next data in %f, current worst block is %f", + stream, predictedNewDataUse.ToSeconds(), latestNextUse.ToSeconds()); + enableReading = predictedNewDataUse < latestNextUse; + } + } + + if (enableReading) { + for (uint32_t j = 0; j < i; ++j) { + MediaCacheStream* other = mStreams[j]; + if (other->mResourceID == stream->mResourceID && !other->mClosed && + !other->mClientSuspended && !other->mChannelEnded && + OffsetToBlockIndexUnchecked(other->mSeekTarget != -1 + ? other->mSeekTarget + : other->mChannelOffset) == + OffsetToBlockIndexUnchecked(desiredOffset)) { + // This block is already going to be read by the other stream. + // So don't try to read it from this stream as well. + enableReading = false; + LOG("Stream %p waiting on same block (%" PRId32 ") from stream %p", + stream, OffsetToBlockIndexUnchecked(desiredOffset), other); + break; + } + } + } + + if (channelOffset != desiredOffset && enableReading) { + // We need to seek now. + NS_ASSERTION(stream->mIsTransportSeekable || desiredOffset == 0, + "Trying to seek in a non-seekable stream!"); + // Round seek offset down to the start of the block. This is essential + // because we don't want to think we have part of a block already + // in mPartialBlockBuffer. + stream->mSeekTarget = + OffsetToBlockIndexUnchecked(desiredOffset) * BLOCK_SIZE; + aActions[i].mTag = StreamAction::SEEK; + aActions[i].mResume = stream->mCacheSuspended; + aActions[i].mSeekTarget = stream->mSeekTarget; + } else if (enableReading && stream->mCacheSuspended) { + aActions[i].mTag = StreamAction::RESUME; + } else if (!enableReading && !stream->mCacheSuspended) { + aActions[i].mTag = StreamAction::SUSPEND; + } + LOG("Stream %p, mCacheSuspended=%d, enableReading=%d, action=%s", stream, + stream->mCacheSuspended, enableReading, + aActions[i].mTag == StreamAction::SEEK ? "SEEK" + : aActions[i].mTag == StreamAction::RESUME ? "RESUME" + : aActions[i].mTag == StreamAction::SUSPEND ? "SUSPEND" + : "NONE"); + } +} + +void MediaCache::QueueUpdate(AutoLock&) { + // Queuing an update while we're in an update raises a high risk of + // triggering endless events + NS_ASSERTION(!mInUpdate, "Queuing an update while we're in an update"); + if (mUpdateQueued) { + return; + } + mUpdateQueued = true; + sThread->Dispatch(NS_NewRunnableFunction( + "MediaCache::QueueUpdate", [self = RefPtr(this)]() mutable { + self->Update(); + // Ensure MediaCache is deleted on the main thread. + NS_ReleaseOnMainThread("UpdateEvent::mMediaCache", self.forget()); + })); +} + +void MediaCache::QueueSuspendedStatusUpdate(AutoLock&, int64_t aResourceID) { + if (!mSuspendedStatusToNotify.Contains(aResourceID)) { + mSuspendedStatusToNotify.AppendElement(aResourceID); + } +} + +#ifdef DEBUG_VERIFY_CACHE +void MediaCache::Verify(AutoLock&) { + mFreeBlocks.Verify(); + for (uint32_t i = 0; i < mStreams.Length(); ++i) { + MediaCacheStream* stream = mStreams[i]; + stream->mReadaheadBlocks.Verify(); + stream->mPlayedBlocks.Verify(); + stream->mMetadataBlocks.Verify(); + + // Verify that the readahead blocks are listed in stream block order + int32_t block = stream->mReadaheadBlocks.GetFirstBlock(); + int32_t lastStreamBlock = -1; + while (block >= 0) { + uint32_t j = 0; + while (mIndex[block].mOwners[j].mStream != stream) { + ++j; + } + int32_t nextStreamBlock = int32_t(mIndex[block].mOwners[j].mStreamBlock); + NS_ASSERTION(lastStreamBlock < nextStreamBlock, + "Blocks not increasing in readahead stream"); + lastStreamBlock = nextStreamBlock; + block = stream->mReadaheadBlocks.GetNextBlock(block); + } + } +} +#endif + +void MediaCache::InsertReadaheadBlock(AutoLock& aLock, BlockOwner* aBlockOwner, + int32_t aBlockIndex) { + // Find the last block whose stream block is before aBlockIndex's + // stream block, and insert after it + MediaCacheStream* stream = aBlockOwner->mStream; + int32_t readaheadIndex = stream->mReadaheadBlocks.GetLastBlock(); + while (readaheadIndex >= 0) { + BlockOwner* bo = GetBlockOwner(aLock, readaheadIndex, stream); + NS_ASSERTION(bo, "stream must own its blocks"); + if (bo->mStreamBlock < aBlockOwner->mStreamBlock) { + stream->mReadaheadBlocks.AddAfter(aBlockIndex, readaheadIndex); + return; + } + NS_ASSERTION(bo->mStreamBlock > aBlockOwner->mStreamBlock, + "Duplicated blocks??"); + readaheadIndex = stream->mReadaheadBlocks.GetPrevBlock(readaheadIndex); + } + + stream->mReadaheadBlocks.AddFirstBlock(aBlockIndex); + Verify(aLock); +} + +void MediaCache::AllocateAndWriteBlock(AutoLock& aLock, + MediaCacheStream* aStream, + int32_t aStreamBlockIndex, + Span aData1, + Span aData2) { + MOZ_ASSERT(sThread->IsOnCurrentThread()); + + // Remove all cached copies of this block + ResourceStreamIterator iter(this, aStream->mResourceID); + while (MediaCacheStream* stream = iter.Next(aLock)) { + while (aStreamBlockIndex >= int32_t(stream->mBlocks.Length())) { + stream->mBlocks.AppendElement(-1); + } + if (stream->mBlocks[aStreamBlockIndex] >= 0) { + // We no longer want to own this block + int32_t globalBlockIndex = stream->mBlocks[aStreamBlockIndex]; + LOG("Released block %d from stream %p block %d(%" PRId64 ")", + globalBlockIndex, stream, aStreamBlockIndex, + aStreamBlockIndex * BLOCK_SIZE); + RemoveBlockOwner(aLock, globalBlockIndex, stream); + } + } + + // Extend the mBlocks array as necessary + + TimeStamp now = TimeStamp::Now(); + int32_t blockIndex = + FindBlockForIncomingData(aLock, now, aStream, aStreamBlockIndex); + if (blockIndex >= 0) { + FreeBlock(aLock, blockIndex); + + Block* block = &mIndex[blockIndex]; + LOG("Allocated block %d to stream %p block %d(%" PRId64 ")", blockIndex, + aStream, aStreamBlockIndex, aStreamBlockIndex * BLOCK_SIZE); + + ResourceStreamIterator iter(this, aStream->mResourceID); + while (MediaCacheStream* stream = iter.Next(aLock)) { + BlockOwner* bo = block->mOwners.AppendElement(); + if (!bo) { + // Roll back mOwners if any allocation fails. + block->mOwners.Clear(); + return; + } + bo->mStream = stream; + } + + if (block->mOwners.IsEmpty()) { + // This happens when all streams with the resource id are closed. We can + // just return here now and discard the data. + return; + } + + // Tell each stream using this resource about the new block. + for (auto& bo : block->mOwners) { + bo.mStreamBlock = aStreamBlockIndex; + bo.mLastUseTime = now; + bo.mStream->mBlocks[aStreamBlockIndex] = blockIndex; + if (aStreamBlockIndex * BLOCK_SIZE < bo.mStream->mStreamOffset) { + bo.mClass = PLAYED_BLOCK; + // This must be the most-recently-used block, since we + // marked it as used now (which may be slightly bogus, but we'll + // treat it as used for simplicity). + GetListForBlock(aLock, &bo)->AddFirstBlock(blockIndex); + Verify(aLock); + } else { + // This may not be the latest readahead block, although it usually + // will be. We may have to scan for the right place to insert + // the block in the list. + bo.mClass = READAHEAD_BLOCK; + InsertReadaheadBlock(aLock, &bo, blockIndex); + } + } + + // Invariant: block->mOwners.IsEmpty() iff we can find an entry + // in mFreeBlocks for a given blockIndex. + MOZ_DIAGNOSTIC_ASSERT(!block->mOwners.IsEmpty()); + mFreeBlocks.RemoveBlock(blockIndex); + + nsresult rv = mBlockCache->WriteBlock(blockIndex, aData1, aData2); + if (NS_FAILED(rv)) { + LOG("Released block %d from stream %p block %d(%" PRId64 ")", blockIndex, + aStream, aStreamBlockIndex, aStreamBlockIndex * BLOCK_SIZE); + FreeBlock(aLock, blockIndex); + } + } + + // Queue an Update since the cache state has changed (for example + // we might want to stop loading because the cache is full) + QueueUpdate(aLock); +} + +void MediaCache::OpenStream(AutoLock& aLock, MediaCacheStream* aStream, + bool aIsClone) { + LOG("Stream %p opened, aIsClone=%d, mCacheSuspended=%d, " + "mDidNotifyDataEnded=%d", + aStream, aIsClone, aStream->mCacheSuspended, + aStream->mDidNotifyDataEnded); + mStreams.AppendElement(aStream); + + // A cloned stream should've got the ID from its original. + if (!aIsClone) { + MOZ_ASSERT(aStream->mResourceID == 0, "mResourceID has been initialized."); + aStream->mResourceID = AllocateResourceID(aLock); + } + + // We should have a valid ID now no matter it is cloned or not. + MOZ_ASSERT(aStream->mResourceID > 0, "mResourceID is invalid"); + + // Queue an update since a new stream has been opened. + QueueUpdate(aLock); +} + +void MediaCache::ReleaseStream(AutoLock&, MediaCacheStream* aStream) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + LOG("Stream %p closed", aStream); + mStreams.RemoveElement(aStream); + // The caller needs to call QueueUpdate() to re-run Update(). +} + +void MediaCache::ReleaseStreamBlocks(AutoLock& aLock, + MediaCacheStream* aStream) { + // XXX scanning the entire stream doesn't seem great, if not much of it + // is cached, but the only easy alternative is to scan the entire cache + // which isn't better + uint32_t length = aStream->mBlocks.Length(); + for (uint32_t i = 0; i < length; ++i) { + int32_t blockIndex = aStream->mBlocks[i]; + if (blockIndex >= 0) { + LOG("Released block %d from stream %p block %d(%" PRId64 ")", blockIndex, + aStream, i, i * BLOCK_SIZE); + RemoveBlockOwner(aLock, blockIndex, aStream); + } + } +} + +void MediaCache::Truncate() { + uint32_t end; + for (end = mIndex.Length(); end > 0; --end) { + if (!IsBlockFree(end - 1)) break; + mFreeBlocks.RemoveBlock(end - 1); + } + + if (end < mIndex.Length()) { + mIndex.TruncateLength(end); + // XXX We could truncate the cache file here, but we don't seem + // to have a cross-platform API for doing that. At least when all + // streams are closed we shut down the cache, which erases the + // file at that point. + } +} + +void MediaCache::NoteBlockUsage(AutoLock& aLock, MediaCacheStream* aStream, + int32_t aBlockIndex, int64_t aStreamOffset, + MediaCacheStream::ReadMode aMode, + TimeStamp aNow) { + if (aBlockIndex < 0) { + // this block is not in the cache yet + return; + } + + BlockOwner* bo = GetBlockOwner(aLock, aBlockIndex, aStream); + if (!bo) { + // this block is not in the cache yet + return; + } + + // The following check has to be <= because the stream offset has + // not yet been updated for the data read from this block + NS_ASSERTION(bo->mStreamBlock * BLOCK_SIZE <= aStreamOffset, + "Using a block that's behind the read position?"); + + GetListForBlock(aLock, bo)->RemoveBlock(aBlockIndex); + bo->mClass = + (aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK) + ? METADATA_BLOCK + : PLAYED_BLOCK; + // Since this is just being used now, it can definitely be at the front + // of mMetadataBlocks or mPlayedBlocks + GetListForBlock(aLock, bo)->AddFirstBlock(aBlockIndex); + bo->mLastUseTime = aNow; + Verify(aLock); +} + +void MediaCache::NoteSeek(AutoLock& aLock, MediaCacheStream* aStream, + int64_t aOldOffset) { + if (aOldOffset < aStream->mStreamOffset) { + // We seeked forward. Convert blocks from readahead to played. + // Any readahead block that intersects the seeked-over range must + // be converted. + int32_t blockIndex = OffsetToBlockIndex(aOldOffset); + if (blockIndex < 0) { + return; + } + int32_t endIndex = + std::min(OffsetToBlockIndex(aStream->mStreamOffset + (BLOCK_SIZE - 1)), + int32_t(aStream->mBlocks.Length())); + if (endIndex < 0) { + return; + } + TimeStamp now = TimeStamp::Now(); + while (blockIndex < endIndex) { + int32_t cacheBlockIndex = aStream->mBlocks[blockIndex]; + if (cacheBlockIndex >= 0) { + // Marking the block used may not be exactly what we want but + // it's simple + NoteBlockUsage(aLock, aStream, cacheBlockIndex, aStream->mStreamOffset, + MediaCacheStream::MODE_PLAYBACK, now); + } + ++blockIndex; + } + } else { + // We seeked backward. Convert from played to readahead. + // Any played block that is entirely after the start of the seeked-over + // range must be converted. + int32_t blockIndex = + OffsetToBlockIndex(aStream->mStreamOffset + (BLOCK_SIZE - 1)); + if (blockIndex < 0) { + return; + } + int32_t endIndex = + std::min(OffsetToBlockIndex(aOldOffset + (BLOCK_SIZE - 1)), + int32_t(aStream->mBlocks.Length())); + if (endIndex < 0) { + return; + } + while (blockIndex < endIndex) { + MOZ_ASSERT(endIndex > 0); + int32_t cacheBlockIndex = aStream->mBlocks[endIndex - 1]; + if (cacheBlockIndex >= 0) { + BlockOwner* bo = GetBlockOwner(aLock, cacheBlockIndex, aStream); + NS_ASSERTION(bo, "Stream doesn't own its blocks?"); + if (bo->mClass == PLAYED_BLOCK) { + aStream->mPlayedBlocks.RemoveBlock(cacheBlockIndex); + bo->mClass = READAHEAD_BLOCK; + // Adding this as the first block is sure to be OK since + // this must currently be the earliest readahead block + // (that's why we're proceeding backwards from the end of + // the seeked range to the start) + aStream->mReadaheadBlocks.AddFirstBlock(cacheBlockIndex); + Verify(aLock); + } + } + --endIndex; + } + } +} + +void MediaCacheStream::NotifyLoadID(uint32_t aLoadID) { + MOZ_ASSERT(aLoadID > 0); + + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::NotifyLoadID", + [client = RefPtr(mClient), this, aLoadID]() { + AutoLock lock(mMediaCache->Monitor()); + mLoadID = aLoadID; + }); + OwnerThread()->Dispatch(r.forget()); +} + +void MediaCacheStream::NotifyDataStartedInternal(uint32_t aLoadID, + int64_t aOffset, + bool aSeekable, + int64_t aLength) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + MOZ_ASSERT(aLoadID > 0); + LOG("Stream %p DataStarted: %" PRId64 " aLoadID=%u aLength=%" PRId64, this, + aOffset, aLoadID, aLength); + + AutoLock lock(mMediaCache->Monitor()); + NS_WARNING_ASSERTION(aOffset == mSeekTarget || aOffset == mChannelOffset, + "Server is giving us unexpected offset"); + MOZ_ASSERT(aOffset >= 0); + if (aLength >= 0) { + mStreamLength = aLength; + } + mChannelOffset = aOffset; + if (mStreamLength >= 0) { + // If we started reading at a certain offset, then for sure + // the stream is at least that long. + mStreamLength = std::max(mStreamLength, mChannelOffset); + } + mLoadID = aLoadID; + + MOZ_ASSERT(aOffset == 0 || aSeekable, + "channel offset must be zero when we become non-seekable"); + mIsTransportSeekable = aSeekable; + // Queue an Update since we may change our strategy for dealing + // with this stream + mMediaCache->QueueUpdate(lock); + + // Reset mSeekTarget since the seek is completed so MediaCache::Update() will + // make decisions based on mChannelOffset instead of mSeekTarget. + mSeekTarget = -1; + + // Reset these flags since a new load has begun. + mChannelEnded = false; + mDidNotifyDataEnded = false; + + UpdateDownloadStatistics(lock); +} + +void MediaCacheStream::NotifyDataStarted(uint32_t aLoadID, int64_t aOffset, + bool aSeekable, int64_t aLength) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aLoadID > 0); + + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::NotifyDataStarted", + [=, client = RefPtr(mClient)]() { + NotifyDataStartedInternal(aLoadID, aOffset, aSeekable, aLength); + }); + OwnerThread()->Dispatch(r.forget()); +} + +void MediaCacheStream::NotifyDataReceived(uint32_t aLoadID, uint32_t aCount, + const uint8_t* aData) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + MOZ_ASSERT(aLoadID > 0); + + AutoLock lock(mMediaCache->Monitor()); + if (mClosed) { + // Nothing to do if the stream is closed. + return; + } + + LOG("Stream %p DataReceived at %" PRId64 " count=%u aLoadID=%u", this, + mChannelOffset, aCount, aLoadID); + + if (mLoadID != aLoadID) { + // mChannelOffset is updated to a new position when loading a new channel. + // We should discard the data coming from the old channel so it won't be + // stored to the wrong positoin. + return; + } + + mDownloadStatistics.AddBytes(aCount); + + // True if we commit any blocks to the cache. + bool cacheUpdated = false; + + auto source = Span(aData, aCount); + + // We process the data one block (or part of a block) at a time + while (!source.IsEmpty()) { + // The data we've collected so far in the partial block. + auto partial = Span(mPartialBlockBuffer.get(), + OffsetInBlock(mChannelOffset)); + + // The number of bytes needed to complete the partial block. + size_t remaining = BLOCK_SIZE - partial.Length(); + + if (source.Length() >= remaining) { + // We have a whole block now to write it out. + mMediaCache->AllocateAndWriteBlock( + lock, this, OffsetToBlockIndexUnchecked(mChannelOffset), partial, + source.First(remaining)); + source = source.From(remaining); + mChannelOffset += remaining; + cacheUpdated = true; + } else { + // The buffer to be filled in the partial block. + auto buf = Span(mPartialBlockBuffer.get() + partial.Length(), + remaining); + memcpy(buf.Elements(), source.Elements(), source.Length()); + mChannelOffset += source.Length(); + break; + } + } + + MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); + while (MediaCacheStream* stream = iter.Next(lock)) { + if (stream->mStreamLength >= 0) { + // The stream is at least as long as what we've read + stream->mStreamLength = std::max(stream->mStreamLength, mChannelOffset); + } + stream->mClient->CacheClientNotifyDataReceived(); + } + + // XXX it would be fairly easy to optimize things a lot more to + // avoid waking up reader threads unnecessarily + if (cacheUpdated) { + // Wake up the reader who is waiting for the committed blocks. + lock.NotifyAll(); + } +} + +void MediaCacheStream::FlushPartialBlockInternal(AutoLock& aLock, + bool aNotifyAll) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + + int32_t blockIndex = OffsetToBlockIndexUnchecked(mChannelOffset); + int32_t blockOffset = OffsetInBlock(mChannelOffset); + if (blockOffset > 0) { + LOG("Stream %p writing partial block: [%d] bytes; " + "mStreamOffset [%" PRId64 "] mChannelOffset[%" PRId64 + "] mStreamLength [%" PRId64 "] notifying: [%s]", + this, blockOffset, mStreamOffset, mChannelOffset, mStreamLength, + aNotifyAll ? "yes" : "no"); + + // Write back the partial block + memset(mPartialBlockBuffer.get() + blockOffset, 0, + BLOCK_SIZE - blockOffset); + auto data = Span(mPartialBlockBuffer.get(), BLOCK_SIZE); + mMediaCache->AllocateAndWriteBlock(aLock, this, blockIndex, data); + } + + // |mChannelOffset == 0| means download ends with no bytes received. + // We should also wake up those readers who are waiting for data + // that will never come. + if ((blockOffset > 0 || mChannelOffset == 0) && aNotifyAll) { + // Wake up readers who may be waiting for this data + aLock.NotifyAll(); + } +} + +void MediaCacheStream::UpdateDownloadStatistics(AutoLock&) { + if (mChannelEnded || mClientSuspended) { + mDownloadStatistics.Stop(); + } else { + mDownloadStatistics.Start(); + } +} + +void MediaCacheStream::NotifyDataEndedInternal(uint32_t aLoadID, + nsresult aStatus) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + AutoLock lock(mMediaCache->Monitor()); + + if (mClosed || aLoadID != mLoadID) { + // Nothing to do if the stream is closed or a new load has begun. + return; + } + + // It is prudent to update channel/cache status before calling + // CacheClientNotifyDataEnded() which will read |mChannelEnded|. + mChannelEnded = true; + mMediaCache->QueueUpdate(lock); + + UpdateDownloadStatistics(lock); + + if (NS_FAILED(aStatus)) { + // Notify the client about this network error. + mDidNotifyDataEnded = true; + mNotifyDataEndedStatus = aStatus; + mClient->CacheClientNotifyDataEnded(aStatus); + // Wake up the readers so they can fail gracefully. + lock.NotifyAll(); + return; + } + + // Note we don't flush the partial block when download ends abnormally for + // the padding zeros will give wrong data to other streams. + FlushPartialBlockInternal(lock, true); + + MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); + while (MediaCacheStream* stream = iter.Next(lock)) { + // We read the whole stream, so remember the true length + stream->mStreamLength = mChannelOffset; + if (!stream->mDidNotifyDataEnded) { + stream->mDidNotifyDataEnded = true; + stream->mNotifyDataEndedStatus = aStatus; + stream->mClient->CacheClientNotifyDataEnded(aStatus); + } + } +} + +void MediaCacheStream::NotifyDataEnded(uint32_t aLoadID, nsresult aStatus) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aLoadID > 0); + + RefPtr client = mClient; + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::NotifyDataEnded", [client, this, aLoadID, aStatus]() { + NotifyDataEndedInternal(aLoadID, aStatus); + }); + OwnerThread()->Dispatch(r.forget()); +} + +void MediaCacheStream::NotifyClientSuspended(bool aSuspended) { + MOZ_ASSERT(NS_IsMainThread()); + + RefPtr client = mClient; + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::NotifyClientSuspended", [client, this, aSuspended]() { + AutoLock lock(mMediaCache->Monitor()); + if (!mClosed && mClientSuspended != aSuspended) { + mClientSuspended = aSuspended; + // mClientSuspended changes the decision of reading streams. + mMediaCache->QueueUpdate(lock); + UpdateDownloadStatistics(lock); + if (mClientSuspended) { + // Download is suspended. Wake up the readers that might be able to + // get data from the partial block. + lock.NotifyAll(); + } + } + }); + OwnerThread()->Dispatch(r.forget()); +} + +void MediaCacheStream::NotifyResume() { + MOZ_ASSERT(NS_IsMainThread()); + + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::NotifyResume", + [this, client = RefPtr(mClient)]() { + AutoLock lock(mMediaCache->Monitor()); + if (mClosed) { + return; + } + // Don't resume download if we are already at the end of the stream for + // seek will fail and be wasted anyway. + int64_t offset = mSeekTarget != -1 ? mSeekTarget : mChannelOffset; + if (mStreamLength < 0 || offset < mStreamLength) { + mClient->CacheClientSeek(offset, false); + // DownloadResumed() will be notified when a new channel is opened. + } + // The channel remains dead. If we want to read some other data in the + // future, CacheClientSeek() will be called to reopen the channel. + }); + OwnerThread()->Dispatch(r.forget()); +} + +MediaCacheStream::~MediaCacheStream() { + MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread"); + MOZ_ASSERT(!mPinCount, "Unbalanced Pin"); + MOZ_ASSERT(!mMediaCache || mClosed); + + uint32_t lengthKb = uint32_t(std::min( + std::max(mStreamLength, int64_t(0)) / 1024, int64_t(UINT32_MAX))); + LOG("MediaCacheStream::~MediaCacheStream(this=%p) " + "MEDIACACHESTREAM_LENGTH_KB=%" PRIu32, + this, lengthKb); +} + +bool MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock) { + MOZ_ASSERT(!NS_IsMainThread()); + + MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); + // Look for a stream that's able to read the data we need + int64_t dataOffset = -1; + while (MediaCacheStream* stream = iter.Next(aLock)) { + if (stream->mCacheSuspended || stream->mChannelEnded || stream->mClosed) { + continue; + } + if (dataOffset < 0) { + dataOffset = GetCachedDataEndInternal(aLock, mStreamOffset); + } + // Ignore streams that are reading beyond the data we need + if (stream->mChannelOffset > dataOffset) { + continue; + } + return false; + } + + return true; +} + +RefPtr MediaCacheStream::Close() { + MOZ_ASSERT(NS_IsMainThread()); + if (!mMediaCache) { + return GenericPromise::CreateAndResolve(true, __func__); + } + + return InvokeAsync(OwnerThread(), "MediaCacheStream::Close", + [this, client = RefPtr(mClient)] { + AutoLock lock(mMediaCache->Monitor()); + CloseInternal(lock); + return GenericPromise::CreateAndResolve(true, __func__); + }); +} + +void MediaCacheStream::CloseInternal(AutoLock& aLock) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + + if (mClosed) { + return; + } + + // Closing a stream will change the return value of + // MediaCacheStream::AreAllStreamsForResourceSuspended as well as + // ChannelMediaResource::IsSuspendedByCache. Let's notify it. + mMediaCache->QueueSuspendedStatusUpdate(aLock, mResourceID); + + mClosed = true; + mMediaCache->ReleaseStreamBlocks(aLock, this); + mMediaCache->ReleaseStream(aLock, this); + // Wake up any blocked readers + aLock.NotifyAll(); + + // Queue an Update since we may have created more free space. + mMediaCache->QueueUpdate(aLock); +} + +void MediaCacheStream::Pin() { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + ++mPinCount; + // Queue an Update since we may no longer want to read more into the + // cache, if this stream's block have become non-evictable + mMediaCache->QueueUpdate(lock); +} + +void MediaCacheStream::Unpin() { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + NS_ASSERTION(mPinCount > 0, "Unbalanced Unpin"); + --mPinCount; + // Queue an Update since we may be able to read more into the + // cache, if this stream's block have become evictable + mMediaCache->QueueUpdate(lock); +} + +int64_t MediaCacheStream::GetLength() const { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + return mStreamLength; +} + +MediaCacheStream::LengthAndOffset MediaCacheStream::GetLengthAndOffset() const { + MOZ_ASSERT(NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + return {mStreamLength, mChannelOffset}; +} + +int64_t MediaCacheStream::GetNextCachedData(int64_t aOffset) { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + return GetNextCachedDataInternal(lock, aOffset); +} + +int64_t MediaCacheStream::GetCachedDataEnd(int64_t aOffset) { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + return GetCachedDataEndInternal(lock, aOffset); +} + +bool MediaCacheStream::IsDataCachedToEndOfStream(int64_t aOffset) { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + if (mStreamLength < 0) return false; + return GetCachedDataEndInternal(lock, aOffset) >= mStreamLength; +} + +int64_t MediaCacheStream::GetCachedDataEndInternal(AutoLock&, int64_t aOffset) { + int32_t blockIndex = OffsetToBlockIndex(aOffset); + if (blockIndex < 0) { + return aOffset; + } + while (size_t(blockIndex) < mBlocks.Length() && mBlocks[blockIndex] != -1) { + ++blockIndex; + } + int64_t result = blockIndex * BLOCK_SIZE; + if (blockIndex == OffsetToBlockIndexUnchecked(mChannelOffset)) { + // The block containing mChannelOffset may be partially read but not + // yet committed to the main cache + result = mChannelOffset; + } + if (mStreamLength >= 0) { + // The last block in the cache may only be partially valid, so limit + // the cached range to the stream length + result = std::min(result, mStreamLength); + } + return std::max(result, aOffset); +} + +int64_t MediaCacheStream::GetNextCachedDataInternal(AutoLock&, + int64_t aOffset) { + if (aOffset == mStreamLength) return -1; + + int32_t startBlockIndex = OffsetToBlockIndex(aOffset); + if (startBlockIndex < 0) { + return -1; + } + int32_t channelBlockIndex = OffsetToBlockIndexUnchecked(mChannelOffset); + + if (startBlockIndex == channelBlockIndex && aOffset < mChannelOffset) { + // The block containing mChannelOffset is partially read, but not + // yet committed to the main cache. aOffset lies in the partially + // read portion, thus it is effectively cached. + return aOffset; + } + + if (size_t(startBlockIndex) >= mBlocks.Length()) return -1; + + // Is the current block cached? + if (mBlocks[startBlockIndex] != -1) return aOffset; + + // Count the number of uncached blocks + bool hasPartialBlock = OffsetInBlock(mChannelOffset) != 0; + int32_t blockIndex = startBlockIndex + 1; + while (true) { + if ((hasPartialBlock && blockIndex == channelBlockIndex) || + (size_t(blockIndex) < mBlocks.Length() && mBlocks[blockIndex] != -1)) { + // We at the incoming channel block, which has has data in it, + // or are we at a cached block. Return index of block start. + return blockIndex * BLOCK_SIZE; + } + + // No more cached blocks? + if (size_t(blockIndex) >= mBlocks.Length()) return -1; + + ++blockIndex; + } + + MOZ_ASSERT_UNREACHABLE("Should return in loop"); + return -1; +} + +void MediaCacheStream::SetReadMode(ReadMode aMode) { + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::SetReadMode", + [this, client = RefPtr(mClient), aMode]() { + AutoLock lock(mMediaCache->Monitor()); + if (!mClosed && mCurrentMode != aMode) { + mCurrentMode = aMode; + mMediaCache->QueueUpdate(lock); + } + }); + OwnerThread()->Dispatch(r.forget()); +} + +void MediaCacheStream::SetPlaybackRate(uint32_t aBytesPerSecond) { + MOZ_ASSERT(!NS_IsMainThread()); + MOZ_ASSERT(aBytesPerSecond > 0, "Zero playback rate not allowed"); + + AutoLock lock(mMediaCache->Monitor()); + if (!mClosed && mPlaybackBytesPerSecond != aBytesPerSecond) { + mPlaybackBytesPerSecond = aBytesPerSecond; + mMediaCache->QueueUpdate(lock); + } +} + +nsresult MediaCacheStream::Seek(AutoLock& aLock, int64_t aOffset) { + MOZ_ASSERT(!NS_IsMainThread()); + + if (!IsOffsetAllowed(aOffset)) { + return NS_ERROR_ILLEGAL_VALUE; + } + if (mClosed) { + return NS_ERROR_ABORT; + } + + int64_t oldOffset = mStreamOffset; + mStreamOffset = aOffset; + LOG("Stream %p Seek to %" PRId64, this, mStreamOffset); + mMediaCache->NoteSeek(aLock, this, oldOffset); + mMediaCache->QueueUpdate(aLock); + return NS_OK; +} + +void MediaCacheStream::ThrottleReadahead(bool bThrottle) { + MOZ_ASSERT(NS_IsMainThread()); + + nsCOMPtr r = NS_NewRunnableFunction( + "MediaCacheStream::ThrottleReadahead", + [client = RefPtr(mClient), this, bThrottle]() { + AutoLock lock(mMediaCache->Monitor()); + if (!mClosed && mThrottleReadahead != bThrottle) { + LOGI("Stream %p ThrottleReadahead %d", this, bThrottle); + mThrottleReadahead = bThrottle; + mMediaCache->QueueUpdate(lock); + } + }); + OwnerThread()->Dispatch(r.forget()); +} + +uint32_t MediaCacheStream::ReadPartialBlock(AutoLock&, int64_t aOffset, + Span aBuffer) { + MOZ_ASSERT(IsOffsetAllowed(aOffset)); + + if (OffsetToBlockIndexUnchecked(mChannelOffset) != + OffsetToBlockIndexUnchecked(aOffset) || + aOffset >= mChannelOffset) { + // Not in the partial block or no data to read. + return 0; + } + + auto source = Span( + mPartialBlockBuffer.get() + OffsetInBlock(aOffset), + OffsetInBlock(mChannelOffset) - OffsetInBlock(aOffset)); + // We have |source.Length() <= BLOCK_SIZE < INT32_MAX| to guarantee + // that |bytesToRead| can fit into a uint32_t. + uint32_t bytesToRead = std::min(aBuffer.Length(), source.Length()); + memcpy(aBuffer.Elements(), source.Elements(), bytesToRead); + return bytesToRead; +} + +Result MediaCacheStream::ReadBlockFromCache( + AutoLock& aLock, int64_t aOffset, Span aBuffer, + bool aNoteBlockUsage) { + MOZ_ASSERT(IsOffsetAllowed(aOffset)); + + // OffsetToBlockIndexUnchecked() is always non-negative. + uint32_t index = OffsetToBlockIndexUnchecked(aOffset); + int32_t cacheBlock = index < mBlocks.Length() ? mBlocks[index] : -1; + if (cacheBlock < 0 || (mStreamLength >= 0 && aOffset >= mStreamLength)) { + // Not in the cache. + return 0; + } + + if (aBuffer.Length() > size_t(BLOCK_SIZE)) { + // Clamp the buffer to avoid overflow below since we will read at most + // BLOCK_SIZE bytes. + aBuffer = aBuffer.First(BLOCK_SIZE); + } + + if (mStreamLength >= 0 && + int64_t(aBuffer.Length()) > mStreamLength - aOffset) { + // Clamp reads to stream's length + aBuffer = aBuffer.First(mStreamLength - aOffset); + } + + // |BLOCK_SIZE - OffsetInBlock(aOffset)| <= BLOCK_SIZE + int32_t bytesToRead = + std::min(BLOCK_SIZE - OffsetInBlock(aOffset), aBuffer.Length()); + int32_t bytesRead = 0; + nsresult rv = mMediaCache->ReadCacheFile( + aLock, cacheBlock * BLOCK_SIZE + OffsetInBlock(aOffset), + aBuffer.Elements(), bytesToRead, &bytesRead); + + // Ensure |cacheBlock * BLOCK_SIZE + OffsetInBlock(aOffset)| won't overflow. + static_assert(INT64_MAX >= BLOCK_SIZE * (uint32_t(INT32_MAX) + 1), + "BLOCK_SIZE too large!"); + + if (NS_FAILED(rv)) { + nsCString name; + GetErrorName(rv, name); + LOGE("Stream %p ReadCacheFile failed, rv=%s", this, name.Data()); + return mozilla::Err(rv); + } + + if (aNoteBlockUsage) { + mMediaCache->NoteBlockUsage(aLock, this, cacheBlock, aOffset, mCurrentMode, + TimeStamp::Now()); + } + + return bytesRead; +} + +nsresult MediaCacheStream::Read(AutoLock& aLock, char* aBuffer, uint32_t aCount, + uint32_t* aBytes) { + MOZ_ASSERT(!NS_IsMainThread()); + + // Cache the offset in case it is changed again when we are waiting for the + // monitor to be notified to avoid reading at the wrong position. + auto streamOffset = mStreamOffset; + + // The buffer we are about to fill. + auto buffer = Span(aBuffer, aCount); + + // Read one block (or part of a block) at a time + while (!buffer.IsEmpty()) { + if (mClosed) { + return NS_ERROR_ABORT; + } + + if (!IsOffsetAllowed(streamOffset)) { + LOGE("Stream %p invalid offset=%" PRId64, this, streamOffset); + return NS_ERROR_ILLEGAL_VALUE; + } + + if (mStreamLength >= 0 && streamOffset >= mStreamLength) { + // Don't try to read beyond the end of the stream + break; + } + + Result rv = ReadBlockFromCache( + aLock, streamOffset, buffer, true /* aNoteBlockUsage */); + if (rv.isErr()) { + return rv.unwrapErr(); + } + + uint32_t bytes = rv.unwrap(); + if (bytes > 0) { + // Got data from the cache successfully. Read next block. + streamOffset += bytes; + buffer = buffer.From(bytes); + continue; + } + + // See if we can use the data in the partial block of any stream reading + // this resource. Note we use the partial block only when it is completed, + // that is reaching EOS. + bool foundDataInPartialBlock = false; + MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); + while (MediaCacheStream* stream = iter.Next(aLock)) { + if (OffsetToBlockIndexUnchecked(stream->mChannelOffset) == + OffsetToBlockIndexUnchecked(streamOffset) && + stream->mChannelOffset == stream->mStreamLength) { + uint32_t bytes = stream->ReadPartialBlock(aLock, streamOffset, buffer); + streamOffset += bytes; + buffer = buffer.From(bytes); + foundDataInPartialBlock = true; + break; + } + } + if (foundDataInPartialBlock) { + // Break for we've reached EOS. + break; + } + + if (mDidNotifyDataEnded && NS_FAILED(mNotifyDataEndedStatus)) { + // Since download ends abnormally, there is no point in waiting for new + // data to come. We will check the partial block to read as many bytes as + // possible before exiting this function. + bytes = ReadPartialBlock(aLock, streamOffset, buffer); + streamOffset += bytes; + buffer = buffer.From(bytes); + break; + } + + if (mStreamOffset != streamOffset) { + // Update mStreamOffset before we drop the lock. We need to run + // Update() again since stream reading strategy might have changed. + mStreamOffset = streamOffset; + mMediaCache->QueueUpdate(aLock); + } + + // No data to read, so block + aLock.Wait(); + } + + uint32_t count = buffer.Elements() - aBuffer; + *aBytes = count; + if (count == 0) { + return NS_OK; + } + + // Some data was read, so queue an update since block priorities may + // have changed + mMediaCache->QueueUpdate(aLock); + + LOG("Stream %p Read at %" PRId64 " count=%d", this, streamOffset - count, + count); + mStreamOffset = streamOffset; + return NS_OK; +} + +nsresult MediaCacheStream::ReadAt(int64_t aOffset, char* aBuffer, + uint32_t aCount, uint32_t* aBytes) { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + nsresult rv = Seek(lock, aOffset); + if (NS_FAILED(rv)) return rv; + return Read(lock, aBuffer, aCount, aBytes); +} + +nsresult MediaCacheStream::ReadFromCache(char* aBuffer, int64_t aOffset, + uint32_t aCount) { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + + // The buffer we are about to fill. + auto buffer = Span(aBuffer, aCount); + + // Read one block (or part of a block) at a time + int64_t streamOffset = aOffset; + while (!buffer.IsEmpty()) { + if (mClosed) { + // We need to check |mClosed| in each iteration which might be changed + // after calling |mMediaCache->ReadCacheFile|. + return NS_ERROR_FAILURE; + } + + if (!IsOffsetAllowed(streamOffset)) { + LOGE("Stream %p invalid offset=%" PRId64, this, streamOffset); + return NS_ERROR_ILLEGAL_VALUE; + } + + Result rv = + ReadBlockFromCache(lock, streamOffset, buffer); + if (rv.isErr()) { + return rv.unwrapErr(); + } + + uint32_t bytes = rv.unwrap(); + if (bytes > 0) { + // Read data from the cache successfully. Let's try next block. + streamOffset += bytes; + buffer = buffer.From(bytes); + continue; + } + + // The partial block is our last chance to get data. + bytes = ReadPartialBlock(lock, streamOffset, buffer); + if (bytes < buffer.Length()) { + // Not enough data to read. + return NS_ERROR_FAILURE; + } + + // Return for we've got all the requested bytes. + return NS_OK; + } + + return NS_OK; +} + +nsresult MediaCacheStream::Init(int64_t aContentLength) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + MOZ_ASSERT(!mMediaCache, "Has been initialized."); + + if (aContentLength > 0) { + uint32_t length = uint32_t(std::min(aContentLength, int64_t(UINT32_MAX))); + LOG("MediaCacheStream::Init(this=%p) " + "MEDIACACHESTREAM_NOTIFIED_LENGTH=%" PRIu32, + this, length); + + mStreamLength = aContentLength; + } + + mMediaCache = MediaCache::GetMediaCache(aContentLength, mIsPrivateBrowsing); + if (!mMediaCache) { + return NS_ERROR_FAILURE; + } + + OwnerThread()->Dispatch(NS_NewRunnableFunction( + "MediaCacheStream::Init", + [this, res = RefPtr(mClient)]() { + AutoLock lock(mMediaCache->Monitor()); + mMediaCache->OpenStream(lock, this); + })); + + return NS_OK; +} + +void MediaCacheStream::InitAsClone(MediaCacheStream* aOriginal) { + MOZ_ASSERT(!mMediaCache, "Has been initialized."); + MOZ_ASSERT(aOriginal->mMediaCache, "Don't clone an uninitialized stream."); + + // Use the same MediaCache as our clone. + mMediaCache = aOriginal->mMediaCache; + OwnerThread()->Dispatch(NS_NewRunnableFunction( + "MediaCacheStream::InitAsClone", + [this, aOriginal, r1 = RefPtr(mClient), + r2 = RefPtr(aOriginal->mClient)]() { + InitAsCloneInternal(aOriginal); + })); +} + +void MediaCacheStream::InitAsCloneInternal(MediaCacheStream* aOriginal) { + MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); + AutoLock lock(mMediaCache->Monitor()); + LOG("MediaCacheStream::InitAsCloneInternal(this=%p, original=%p)", this, + aOriginal); + + // Download data and notify events if necessary. Note the order is important + // in order to mimic the behavior of data being downloaded from the channel. + + // Step 1: copy/download data from the original stream. + mResourceID = aOriginal->mResourceID; + mStreamLength = aOriginal->mStreamLength; + mIsTransportSeekable = aOriginal->mIsTransportSeekable; + mDownloadStatistics = aOriginal->mDownloadStatistics; + mDownloadStatistics.Stop(); + + // Grab cache blocks from aOriginal as readahead blocks for our stream + for (uint32_t i = 0; i < aOriginal->mBlocks.Length(); ++i) { + int32_t cacheBlockIndex = aOriginal->mBlocks[i]; + if (cacheBlockIndex < 0) continue; + + while (i >= mBlocks.Length()) { + mBlocks.AppendElement(-1); + } + // Every block is a readahead block for the clone because the clone's + // initial stream offset is zero + mMediaCache->AddBlockOwnerAsReadahead(lock, cacheBlockIndex, this, i); + } + + // Copy the partial block. + mChannelOffset = aOriginal->mChannelOffset; + memcpy(mPartialBlockBuffer.get(), aOriginal->mPartialBlockBuffer.get(), + BLOCK_SIZE); + + // Step 2: notify the client that we have new data so the decoder has a chance + // to compute 'canplaythrough' and buffer ranges. + mClient->CacheClientNotifyDataReceived(); + + // Step 3: notify download ended if necessary. + if (aOriginal->mDidNotifyDataEnded && + NS_SUCCEEDED(aOriginal->mNotifyDataEndedStatus)) { + mNotifyDataEndedStatus = aOriginal->mNotifyDataEndedStatus; + mDidNotifyDataEnded = true; + mClient->CacheClientNotifyDataEnded(mNotifyDataEndedStatus); + } + + // Step 4: notify download is suspended by the cache. + mClientSuspended = true; + mCacheSuspended = true; + mChannelEnded = true; + mClient->CacheClientSuspend(); + mMediaCache->QueueSuspendedStatusUpdate(lock, mResourceID); + + // Step 5: add the stream to be managed by the cache. + mMediaCache->OpenStream(lock, this, true /* aIsClone */); + // Wake up the reader which is waiting for the cloned data. + lock.NotifyAll(); +} + +nsISerialEventTarget* MediaCacheStream::OwnerThread() const { + return mMediaCache->OwnerThread(); +} + +nsresult MediaCacheStream::GetCachedRanges(MediaByteRangeSet& aRanges) { + MOZ_ASSERT(!NS_IsMainThread()); + // Take the monitor, so that the cached data ranges can't grow while we're + // trying to loop over them. + AutoLock lock(mMediaCache->Monitor()); + + // We must be pinned while running this, otherwise the cached data ranges may + // shrink while we're trying to loop over them. + NS_ASSERTION(mPinCount > 0, "Must be pinned"); + + int64_t startOffset = GetNextCachedDataInternal(lock, 0); + while (startOffset >= 0) { + int64_t endOffset = GetCachedDataEndInternal(lock, startOffset); + NS_ASSERTION(startOffset < endOffset, + "Buffered range must end after its start"); + // Bytes [startOffset..endOffset] are cached. + aRanges += MediaByteRange(startOffset, endOffset); + startOffset = GetNextCachedDataInternal(lock, endOffset); + NS_ASSERTION( + startOffset == -1 || startOffset > endOffset, + "Must have advanced to start of next range, or hit end of stream"); + } + return NS_OK; +} + +double MediaCacheStream::GetDownloadRate(bool* aIsReliable) { + MOZ_ASSERT(!NS_IsMainThread()); + AutoLock lock(mMediaCache->Monitor()); + return mDownloadStatistics.GetRate(aIsReliable); +} + +void MediaCacheStream::GetDebugInfo(dom::MediaCacheStreamDebugInfo& aInfo) { + AutoLock lock(mMediaCache->GetMonitorOnTheMainThread()); + aInfo.mStreamLength = mStreamLength; + aInfo.mChannelOffset = mChannelOffset; + aInfo.mCacheSuspended = mCacheSuspended; + aInfo.mChannelEnded = mChannelEnded; + aInfo.mLoadID = mLoadID; +} + +} // namespace mozilla + +// avoid redefined macro in unified build +#undef LOG +#undef LOGI diff --git a/dom/media/MediaCache.h b/dom/media/MediaCache.h new file mode 100644 index 0000000000..b4559c1fd1 --- /dev/null +++ b/dom/media/MediaCache.h @@ -0,0 +1,557 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaCache_h_ +#define MediaCache_h_ + +#include "DecoderDoctorLogger.h" +#include "Intervals.h" +#include "mozilla/Monitor.h" +#include "mozilla/Result.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/dom/MediaDebugInfoBinding.h" +#include "nsCOMPtr.h" +#include "nsHashKeys.h" +#include "nsTArray.h" +#include "nsTHashtable.h" + +#include "MediaChannelStatistics.h" + +class nsIEventTarget; +class nsIPrincipal; + +namespace mozilla { +// defined in MediaResource.h +class ChannelMediaResource; +typedef media::IntervalSet MediaByteRangeSet; +class MediaResource; + +/** + * Media applications want fast, "on demand" random access to media data, + * for pausing, seeking, etc. But we are primarily interested + * in transporting media data using HTTP over the Internet, which has + * high latency to open a connection, requires a new connection for every + * seek, may not even support seeking on some connections (especially + * live streams), and uses a push model --- data comes from the server + * and you don't have much control over the rate. Also, transferring data + * over the Internet can be slow and/or unpredictable, so we want to read + * ahead to buffer and cache as much data as possible. + * + * The job of the media cache is to resolve this impedance mismatch. + * The media cache reads data from Necko channels into file-backed storage, + * and offers a random-access file-like API to the stream data + * (MediaCacheStream). Along the way it solves several problems: + * -- The cache intelligently reads ahead to prefetch data that may be + * needed in the future + * -- The size of the cache is bounded so that we don't fill up + * storage with read-ahead data + * -- Cache replacement is managed globally so that the most valuable + * data (across all streams) is retained + * -- The cache can suspend Necko channels temporarily when their data is + * not wanted (yet) + * -- The cache translates file-like seek requests to HTTP seeks, + * including optimizations like not triggering a new seek if it would + * be faster to just keep reading until we reach the seek point. The + * "seek to EOF" idiom to determine file size is also handled efficiently + * (seeking to EOF and then seeking back to the previous offset does not + * trigger any Necko activity) + * -- The cache also handles the case where the server does not support + * seeking + * -- Necko can only send data to the main thread, but MediaCacheStream + * can distribute data to any thread + * -- The cache exposes APIs so clients can detect what data is + * currently held + * + * Note that although HTTP is the most important transport and we only + * support transport-level seeking via HTTP byte-ranges, the media cache + * works with any kind of Necko channels and provides random access to + * cached data even for, e.g., FTP streams. + * + * The media cache is not persistent. It does not currently allow + * data from one load to be used by other loads, either within the same + * browser session or across browser sessions. The media cache file + * is marked "delete on close" so it will automatically disappear in the + * event of a browser crash or shutdown. + * + * The media cache is block-based. Streams are divided into blocks of a + * fixed size (currently 4K) and we cache blocks. A single cache contains + * blocks for all streams. + * + * The cache size is controlled by the media.cache_size preference + * (which is in KB). The default size is 500MB. + * + * The replacement policy predicts a "time of next use" for each block + * in the cache. When we need to free a block, the block with the latest + * "time of next use" will be evicted. Blocks are divided into + * different classes, each class having its own predictor: + * FREE_BLOCK: these blocks are effectively infinitely far in the future; + * a free block will always be chosen for replacement before other classes + * of blocks. + * METADATA_BLOCK: these are blocks that contain data that has been read + * by the decoder in "metadata mode", e.g. while the decoder is searching + * the stream during a seek operation. These blocks are managed with an + * LRU policy; the "time of next use" is predicted to be as far in the + * future as the last use was in the past. + * PLAYED_BLOCK: these are blocks that have not been read in "metadata + * mode", and contain data behind the current decoder read point. (They + * may not actually have been read by the decoder, if the decoder seeked + * forward.) These blocks are managed with an LRU policy except that we add + * REPLAY_DELAY seconds of penalty to their predicted "time of next use", + * to reflect the uncertainty about whether replay will actually happen + * or not. + * READAHEAD_BLOCK: these are blocks that have not been read in + * "metadata mode" and that are entirely ahead of the current decoder + * read point. (They may actually have been read by the decoder in the + * past if the decoder has since seeked backward.) We predict the + * time of next use for these blocks by assuming steady playback and + * dividing the number of bytes between the block and the current decoder + * read point by the decoder's estimate of its playback rate in bytes + * per second. This ensures that the blocks farthest ahead are considered + * least valuable. + * For efficient prediction of the "latest time of next use", we maintain + * linked lists of blocks in each class, ordering blocks by time of + * next use. READAHEAD_BLOCKS have one linked list per stream, since their + * time of next use depends on stream parameters, but the other lists + * are global. + * + * A block containing a current decoder read point can contain data + * both behind and ahead of the read point. It will be classified as a + * PLAYED_BLOCK but we will give it special treatment so it is never + * evicted --- it actually contains the highest-priority readahead data + * as well as played data. + * + * "Time of next use" estimates are also used for flow control. When + * reading ahead we can predict the time of next use for the data that + * will be read. If the predicted time of next use is later then the + * prediction for all currently cached blocks, and the cache is full, then + * we should suspend reading from the Necko channel. + * + * Unfortunately suspending the Necko channel can't immediately stop the + * flow of data from the server. First our desire to suspend has to be + * transmitted to the server (in practice, Necko stops reading from the + * socket, which causes the kernel to shrink its advertised TCP receive + * window size to zero). Then the server can stop sending the data, but + * we will receive data roughly corresponding to the product of the link + * bandwidth multiplied by the round-trip latency. We deal with this by + * letting the cache overflow temporarily and then trimming it back by + * moving overflowing blocks back into the body of the cache, replacing + * less valuable blocks as they become available. We try to avoid simply + * discarding overflowing readahead data. + * + * All changes to the actual contents of the cache happen on the main + * thread, since that's where Necko's notifications happen. + * + * The media cache maintains at most one Necko channel for each stream. + * (In the future it might be advantageous to relax this, e.g. so that a + * seek to near the end of the file can happen without disturbing + * the loading of data from the beginning of the file.) The Necko channel + * is managed through ChannelMediaResource; MediaCache does not + * depend on Necko directly. + * + * Every time something changes that might affect whether we want to + * read from a Necko channel, or whether we want to seek on the Necko + * channel --- such as data arriving or data being consumed by the + * decoder --- we asynchronously trigger MediaCache::Update on the main + * thread. That method implements most cache policy. It evaluates for + * each stream whether we want to suspend or resume the stream and what + * offset we should seek to, if any. It is also responsible for trimming + * back the cache size to its desired limit by moving overflowing blocks + * into the main part of the cache. + * + * Streams can be opened in non-seekable mode. In non-seekable mode, + * the cache will only call ChannelMediaResource::CacheClientSeek with + * a 0 offset. The cache tries hard not to discard readahead data + * for non-seekable streams, since that could trigger a potentially + * disastrous re-read of the entire stream. It's up to cache clients + * to try to avoid requesting seeks on such streams. + * + * MediaCache has a single internal monitor for all synchronization. + * This is treated as the lowest level monitor in the media code. So, + * we must not acquire any MediaDecoder locks or MediaResource locks + * while holding the MediaCache lock. But it's OK to hold those locks + * and then get the MediaCache lock. + * + * MediaCache associates a principal with each stream. CacheClientSeek + * can trigger new HTTP requests; due to redirects to other domains, + * each HTTP load can return data with a different principal. This + * principal must be passed to NotifyDataReceived, and MediaCache + * will detect when different principals are associated with data in the + * same stream, and replace them with a null principal. + */ +class MediaCache; + +DDLoggedTypeDeclName(MediaCacheStream); + +/** + * If the cache fails to initialize then Init will fail, so nonstatic + * methods of this class can assume gMediaCache is non-null. + * + * This class can be directly embedded as a value. + */ +class MediaCacheStream : public DecoderDoctorLifeLogger { + using AutoLock = MonitorAutoLock; + + public: + // This needs to be a power of two + static const int64_t BLOCK_SIZE = 32768; + + enum ReadMode { MODE_METADATA, MODE_PLAYBACK }; + + // aClient provides the underlying transport that cache will use to read + // data for this stream. + MediaCacheStream(ChannelMediaResource* aClient, bool aIsPrivateBrowsing); + ~MediaCacheStream(); + + // Set up this stream with the cache. Can fail on OOM. + // aContentLength is the content length if known, otherwise -1. + // Exactly one of InitAsClone or Init must be called before any other method + // on this class. Does nothing if already initialized. + nsresult Init(int64_t aContentLength); + + // Set up this stream with the cache, assuming it's for the same data + // as the aOriginal stream. + // Exactly one of InitAsClone or Init must be called before any other method + // on this class. + void InitAsClone(MediaCacheStream* aOriginal); + + nsISerialEventTarget* OwnerThread() const; + + // These are called on the main thread. + // This must be called (and resolve) before the ChannelMediaResource + // used to create this MediaCacheStream is deleted. + RefPtr Close(); + // This returns true when the stream has been closed. + bool IsClosed(AutoLock&) const { return mClosed; } + // Returns true when this stream is can be shared by a new resource load. + // Called on the main thread only. + bool IsAvailableForSharing() const { return !mIsPrivateBrowsing; } + + // These callbacks are called on the main thread by the client + // when data has been received via the channel. + + // Notifies the cache that a load has begun. We pass the offset + // because in some cases the offset might not be what the cache + // requested. In particular we might unexpectedly start providing + // data at offset 0. This need not be called if the offset is the + // offset that the cache requested in + // ChannelMediaResource::CacheClientSeek. This can be called at any + // time by the client, not just after a CacheClientSeek. + // + // aSeekable tells us whether the stream is seekable or not. Non-seekable + // streams will always pass 0 for aOffset to CacheClientSeek. This should only + // be called while the stream is at channel offset 0. Seekability can + // change during the lifetime of the MediaCacheStream --- every time + // we do an HTTP load the seekability may be different (and sometimes + // is, in practice, due to the effects of caching proxies). + // + // aLength tells the cache what the server said the data length is going to + // be. The actual data length may be greater (we receive more data than + // specified) or smaller (the stream ends before we reach the given + // length), because servers can lie. The server's reported data length + // *and* the actual data length can even vary over time because a + // misbehaving server may feed us a different stream after each seek + // operation. So this is really just a hint. The cache may however + // stop reading (suspend the channel) when it thinks we've read all the + // data available based on an incorrect reported length. Seeks relative + // EOF also depend on the reported length if we haven't managed to + // read the whole stream yet. + void NotifyDataStarted(uint32_t aLoadID, int64_t aOffset, bool aSeekable, + int64_t aLength); + // Notifies the cache that data has been received. The stream already + // knows the offset because data is received in sequence and + // the starting offset is known via NotifyDataStarted or because + // the cache requested the offset in + // ChannelMediaResource::CacheClientSeek, or because it defaulted to 0. + void NotifyDataReceived(uint32_t aLoadID, uint32_t aCount, + const uint8_t* aData); + + // Set the load ID so the following NotifyDataEnded() call can work properly. + // Used in some rare cases where NotifyDataEnded() is called without the + // preceding NotifyDataStarted(). + void NotifyLoadID(uint32_t aLoadID); + + // Notifies the cache that the channel has closed with the given status. + void NotifyDataEnded(uint32_t aLoadID, nsresult aStatus); + + // Notifies the stream that the suspend status of the client has changed. + // Main thread only. + void NotifyClientSuspended(bool aSuspended); + + // Notifies the stream to resume download at the current offset. + void NotifyResume(); + + // These methods can be called on any thread. + // Cached blocks associated with this stream will not be evicted + // while the stream is pinned. + void Pin(); + void Unpin(); + // See comments above for NotifyDataStarted about how the length + // can vary over time. Returns -1 if no length is known. Returns the + // reported length if we haven't got any better information. If + // the stream ended normally we return the length we actually got. + // If we've successfully read data beyond the originally reported length, + // we return the end of the data we've read. + int64_t GetLength() const; + // Return the length and offset where next channel data will write to. Main + // thread only. + // This method should be removed as part of bug 1464045. + struct LengthAndOffset { + int64_t mLength; + int64_t mOffset; + }; + LengthAndOffset GetLengthAndOffset() const; + // Returns the unique resource ID. Call only on the main thread or while + // holding the media cache lock. + int64_t GetResourceID() { return mResourceID; } + // Returns the end of the bytes starting at the given offset + // which are in cache. + int64_t GetCachedDataEnd(int64_t aOffset); + // Returns the offset of the first byte of cached data at or after aOffset, + // or -1 if there is no such cached data. + int64_t GetNextCachedData(int64_t aOffset); + // Fills aRanges with the ByteRanges representing the data which is currently + // cached. Locks the media cache while running, to prevent any ranges + // growing. The stream should be pinned while this runs and while its results + // are used, to ensure no data is evicted. + nsresult GetCachedRanges(MediaByteRangeSet& aRanges); + + double GetDownloadRate(bool* aIsReliable); + + // Reads from buffered data only. Will fail if not all data to be read is + // in the cache. Will not mark blocks as read. Can be called from the main + // thread. It's the caller's responsibility to wrap the call in a pin/unpin, + // and also to check that the range they want is cached before calling this. + nsresult ReadFromCache(char* aBuffer, int64_t aOffset, uint32_t aCount); + + // IsDataCachedToEndOfStream returns true if all the data from + // aOffset to the end of the stream (the server-reported end, if the + // real end is not known) is in cache. If we know nothing about the + // end of the stream, this returns false. + bool IsDataCachedToEndOfStream(int64_t aOffset); + // The mode is initially MODE_METADATA. + void SetReadMode(ReadMode aMode); + // This is the client's estimate of the playback rate assuming + // the media plays continuously. The cache can't guess this itself + // because it doesn't know when the decoder was paused, buffering, etc. + // Do not pass zero. + void SetPlaybackRate(uint32_t aBytesPerSecond); + + // Returns true when all streams for this resource are suspended or their + // channel has ended. + bool AreAllStreamsForResourceSuspended(AutoLock&); + + // These methods must be called on a different thread from the main + // thread. They should always be called on the same thread for a given + // stream. + // *aBytes gets the number of bytes that were actually read. This can + // be less than aCount. If the first byte of data is not in the cache, + // this will block until the data is available or the stream is + // closed, otherwise it won't block. + nsresult Read(AutoLock&, char* aBuffer, uint32_t aCount, uint32_t* aBytes); + // Seeks to aOffset in the stream then performs a Read operation. See + // 'Read' for argument and return details. + nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount, + uint32_t* aBytes); + + void ThrottleReadahead(bool bThrottle); + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const; + + void GetDebugInfo(dom::MediaCacheStreamDebugInfo& aInfo); + + private: + friend class MediaCache; + + /** + * A doubly-linked list of blocks. Add/Remove/Get methods are all + * constant time. We declare this here so that a stream can contain a + * BlockList of its read-ahead blocks. Blocks are referred to by index + * into the MediaCache::mIndex array. + * + * Blocks can belong to more than one list at the same time, because + * the next/prev pointers are not stored in the block. + */ + class BlockList { + public: + BlockList() : mFirstBlock(-1), mCount(0) {} + ~BlockList() { + NS_ASSERTION(mFirstBlock == -1 && mCount == 0, + "Destroying non-empty block list"); + } + void AddFirstBlock(int32_t aBlock); + void AddAfter(int32_t aBlock, int32_t aBefore); + void RemoveBlock(int32_t aBlock); + // Returns the first block in the list, or -1 if empty + int32_t GetFirstBlock() const { return mFirstBlock; } + // Returns the last block in the list, or -1 if empty + int32_t GetLastBlock() const; + // Returns the next block in the list after aBlock or -1 if + // aBlock is the last block + int32_t GetNextBlock(int32_t aBlock) const; + // Returns the previous block in the list before aBlock or -1 if + // aBlock is the first block + int32_t GetPrevBlock(int32_t aBlock) const; + bool IsEmpty() const { return mFirstBlock < 0; } + int32_t GetCount() const { return mCount; } + // The contents of aBlockIndex1 and aBlockIndex2 have been swapped + void NotifyBlockSwapped(int32_t aBlockIndex1, int32_t aBlockIndex2); +#ifdef DEBUG + // Verify linked-list invariants + void Verify(); +#else + void Verify() {} +#endif + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + private: + struct Entry : public nsUint32HashKey { + explicit Entry(KeyTypePointer aKey) + : nsUint32HashKey(aKey), mNextBlock(0), mPrevBlock(0) {} + Entry(const Entry& toCopy) + : nsUint32HashKey(&toCopy.GetKey()), + mNextBlock(toCopy.mNextBlock), + mPrevBlock(toCopy.mPrevBlock) {} + + int32_t mNextBlock; + int32_t mPrevBlock; + }; + nsTHashtable mEntries; + + // The index of the first block in the list, or -1 if the list is empty. + int32_t mFirstBlock; + // The number of blocks in the list. + int32_t mCount; + }; + + // Read data from the partial block and return the number of bytes read + // successfully. 0 if aOffset is not an offset in the partial block or there + // is nothing to read. + uint32_t ReadPartialBlock(AutoLock&, int64_t aOffset, Span aBuffer); + + // Read data from the cache block specified by aOffset. Return the number of + // bytes read successfully or an error code if any failure. + Result ReadBlockFromCache(AutoLock&, int64_t aOffset, + Span aBuffer, + bool aNoteBlockUsage = false); + + // Non-main thread only. + nsresult Seek(AutoLock&, int64_t aOffset); + + // Returns the end of the bytes starting at the given offset + // which are in cache. + // This method assumes that the cache monitor is held and can be called on + // any thread. + int64_t GetCachedDataEndInternal(AutoLock&, int64_t aOffset); + // Returns the offset of the first byte of cached data at or after aOffset, + // or -1 if there is no such cached data. + // This method assumes that the cache monitor is held and can be called on + // any thread. + int64_t GetNextCachedDataInternal(AutoLock&, int64_t aOffset); + // Used by |NotifyDataEnded| to write |mPartialBlock| to disk. + // If |aNotifyAll| is true, this function will wake up readers who may be + // waiting on the media cache monitor. Called on the media cache thread only. + void FlushPartialBlockInternal(AutoLock&, bool aNotifyAll); + + void NotifyDataStartedInternal(uint32_t aLoadID, int64_t aOffset, + bool aSeekable, int64_t aLength); + + void NotifyDataEndedInternal(uint32_t aLoadID, nsresult aStatus); + + void UpdateDownloadStatistics(AutoLock&); + + void CloseInternal(AutoLock&); + void InitAsCloneInternal(MediaCacheStream* aOriginal); + + // Instance of MediaCache to use with this MediaCacheStream. + RefPtr mMediaCache; + + ChannelMediaResource* const mClient; + + // The following fields must be written holding the cache's monitor and + // only on the main thread, thus can be read either on the main thread + // or while holding the cache's monitor. + + // Set to true when the stream has been closed either explicitly or + // due to an internal cache error + bool mClosed = false; + // This is a unique ID representing the resource we're loading. + // All streams with the same mResourceID are loading the same + // underlying resource and should share data. + // Initialized to 0 as invalid. Will be allocated a valid ID (always positive) + // from the cache. + int64_t mResourceID = 0; + // The last reported seekability state for the underlying channel + bool mIsTransportSeekable; + // True if the cache has suspended our channel because the cache is + // full and the priority of the data that would be received is lower + // than the priority of the data already in the cache + bool mCacheSuspended; + // True if the channel ended and we haven't seeked it again. + bool mChannelEnded; + + // The following fields are protected by the cache's monitor and can be + // written by any thread. + + // The reported or discovered length of the data, or -1 if nothing is known + int64_t mStreamLength = -1; + // The offset where the next data from the channel will arrive + int64_t mChannelOffset = 0; + // The offset where the reader is positioned in the stream + int64_t mStreamOffset; + // For each block in the stream data, maps to the cache entry for the + // block, or -1 if the block is not cached. + nsTArray mBlocks; + // The list of read-ahead blocks, ordered by stream offset; the first + // block is the earliest in the stream (so the last block will be the + // least valuable). + BlockList mReadaheadBlocks; + // The list of metadata blocks; the first block is the most recently used + BlockList mMetadataBlocks; + // The list of played-back blocks; the first block is the most recently used + BlockList mPlayedBlocks; + // The last reported estimate of the decoder's playback rate + uint32_t mPlaybackBytesPerSecond; + // The number of times this stream has been Pinned without a + // corresponding Unpin + uint32_t mPinCount; + // True if CacheClientNotifyDataEnded has been called for this stream. + bool mDidNotifyDataEnded = false; + // The status used when we did CacheClientNotifyDataEnded. Only valid + // when mDidNotifyDataEnded is true. + nsresult mNotifyDataEndedStatus; + // The last reported read mode + ReadMode mCurrentMode = MODE_METADATA; + // The load ID of the current channel. Used to check whether the data is + // coming from an old channel and should be discarded. + uint32_t mLoadID = 0; + // The seek target initiated by MediaCache. -1 if no seek is going on. + int64_t mSeekTarget = -1; + + bool mThrottleReadahead = false; + + // Data received for the block containing mChannelOffset. Data needs + // to wait here so we can write back a complete block. The first + // mChannelOffset%BLOCK_SIZE bytes have been filled in with good data, + // the rest are garbage. + // Heap allocate this buffer since the exact power-of-2 will cause allocation + // slop when combined with the rest of the object members. + // This partial buffer should always be read/write within the cache's monitor. + const UniquePtr mPartialBlockBuffer = + MakeUnique(BLOCK_SIZE); + + // True if associated with a private browsing window. + const bool mIsPrivateBrowsing; + + // True if the client is suspended. Accessed on the owner thread only. + bool mClientSuspended = false; + + MediaChannelStatistics mDownloadStatistics; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/MediaChannelStatistics.h b/dom/media/MediaChannelStatistics.h new file mode 100644 index 0000000000..9a3636cef2 --- /dev/null +++ b/dom/media/MediaChannelStatistics.h @@ -0,0 +1,89 @@ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(MediaChannelStatistics_h_) +# define MediaChannelStatistics_h_ + +# include "mozilla/TimeStamp.h" + +namespace mozilla { + +// Number of bytes we have accumulated before we assume the connection download +// rate can be reliably calculated. 57 Segments at IW=3 allows slow start to +// reach a CWND of 30 (See bug 831998) +static const int64_t RELIABLE_DATA_THRESHOLD = 57 * 1460; + +/** + * This class is useful for estimating rates of data passing through + * some channel. The idea is that activity on the channel "starts" + * and "stops" over time. At certain times data passes through the + * channel (usually while the channel is active; data passing through + * an inactive channel is ignored). The GetRate() function computes + * an estimate of the "current rate" of the channel, which is some + * kind of average of the data passing through over the time the + * channel is active. + * + * All methods take "now" as a parameter so the user of this class can + * control the timeline used. + */ +class MediaChannelStatistics { + public: + MediaChannelStatistics() = default; + MediaChannelStatistics(const MediaChannelStatistics&) = default; + MediaChannelStatistics& operator=(const MediaChannelStatistics&) = default; + + void Reset() { + mLastStartTime = TimeStamp(); + mAccumulatedTime = TimeDuration(0); + mAccumulatedBytes = 0; + mIsStarted = false; + } + void Start() { + if (mIsStarted) return; + mLastStartTime = TimeStamp::Now(); + mIsStarted = true; + } + void Stop() { + if (!mIsStarted) return; + mAccumulatedTime += TimeStamp::Now() - mLastStartTime; + mIsStarted = false; + } + void AddBytes(int64_t aBytes) { + if (!mIsStarted) { + // ignore this data, it may be related to seeking or some other + // operation we don't care about + return; + } + mAccumulatedBytes += aBytes; + } + double GetRateAtLastStop(bool* aReliable) const { + double seconds = mAccumulatedTime.ToSeconds(); + *aReliable = + (seconds >= 1.0) || (mAccumulatedBytes >= RELIABLE_DATA_THRESHOLD); + if (seconds <= 0.0) return 0.0; + return static_cast(mAccumulatedBytes) / seconds; + } + double GetRate(bool* aReliable) const { + TimeDuration time = mAccumulatedTime; + if (mIsStarted) { + time += TimeStamp::Now() - mLastStartTime; + } + double seconds = time.ToSeconds(); + *aReliable = + (seconds >= 3.0) || (mAccumulatedBytes >= RELIABLE_DATA_THRESHOLD); + if (seconds <= 0.0) return 0.0; + return static_cast(mAccumulatedBytes) / seconds; + } + + private: + int64_t mAccumulatedBytes = 0; + TimeDuration mAccumulatedTime; + TimeStamp mLastStartTime; + bool mIsStarted = false; +}; + +} // namespace mozilla + +#endif // MediaChannelStatistics_h_ diff --git a/dom/media/MediaContainerType.cpp b/dom/media/MediaContainerType.cpp new file mode 100644 index 0000000000..c657110e02 --- /dev/null +++ b/dom/media/MediaContainerType.cpp @@ -0,0 +1,35 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaContainerType.h" + +namespace mozilla { + +size_t MediaContainerType::SizeOfExcludingThis( + MallocSizeOf aMallocSizeOf) const { + return mExtendedMIMEType.SizeOfExcludingThis(aMallocSizeOf); +} + +Maybe MakeMediaContainerType(const nsAString& aType) { + Maybe mime = MakeMediaExtendedMIMEType(aType); + if (mime) { + return Some(MediaContainerType(std::move(*mime))); + } + return Nothing(); +} + +Maybe MakeMediaContainerType(const nsACString& aType) { + return MakeMediaContainerType(NS_ConvertUTF8toUTF16(aType)); +} + +Maybe MakeMediaContainerType(const char* aType) { + if (!aType) { + return Nothing(); + } + return MakeMediaContainerType(nsDependentCString(aType)); +} + +} // namespace mozilla diff --git a/dom/media/MediaContainerType.h b/dom/media/MediaContainerType.h new file mode 100644 index 0000000000..3b4d50f0be --- /dev/null +++ b/dom/media/MediaContainerType.h @@ -0,0 +1,51 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaContainerType_h_ +#define MediaContainerType_h_ + +#include "MediaMIMETypes.h" +#include "mozilla/Maybe.h" +#include "nsString.h" + +namespace mozilla { + +// Class containing media type information for containers. +class MediaContainerType { + public: + explicit MediaContainerType(const MediaMIMEType& aType) + : mExtendedMIMEType(aType) {} + explicit MediaContainerType(MediaMIMEType&& aType) + : mExtendedMIMEType(std::move(aType)) {} + explicit MediaContainerType(const MediaExtendedMIMEType& aType) + : mExtendedMIMEType(aType) {} + explicit MediaContainerType(MediaExtendedMIMEType&& aType) + : mExtendedMIMEType(std::move(aType)) {} + + const MediaMIMEType& Type() const { return mExtendedMIMEType.Type(); } + const MediaExtendedMIMEType& ExtendedType() const { + return mExtendedMIMEType; + } + + // Original string. Note that "type/subtype" may not be lowercase, + // use Type().AsString() instead to get the normalized "type/subtype". + const nsCString& OriginalString() const { + return mExtendedMIMEType.OriginalString(); + } + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + private: + MediaExtendedMIMEType mExtendedMIMEType; +}; + +Maybe MakeMediaContainerType(const nsAString& aType); +Maybe MakeMediaContainerType(const nsACString& aType); +Maybe MakeMediaContainerType(const char* aType); + +} // namespace mozilla + +#endif // MediaContainerType_h_ diff --git a/dom/media/MediaData.cpp b/dom/media/MediaData.cpp new file mode 100644 index 0000000000..fa545604e6 --- /dev/null +++ b/dom/media/MediaData.cpp @@ -0,0 +1,602 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaData.h" + +#include "ImageContainer.h" +#include "MediaInfo.h" +#include "MediaResult.h" +#include "PerformanceRecorder.h" +#include "VideoUtils.h" +#include "YCbCrUtils.h" +#include "mozilla/gfx/gfxVars.h" +#include "mozilla/layers/ImageBridgeChild.h" +#include "mozilla/layers/KnowsCompositor.h" +#include "mozilla/layers/SharedRGBImage.h" + +#include + +#ifdef XP_WIN +# include "mozilla/gfx/DeviceManagerDx.h" +# include "mozilla/layers/D3D11ShareHandleImage.h" +# include "mozilla/layers/D3D11YCbCrImage.h" +#elif XP_MACOSX +# include "MacIOSurfaceImage.h" +# include "mozilla/gfx/gfxVars.h" +#endif + +namespace mozilla { + +using namespace mozilla::gfx; +using layers::PlanarYCbCrData; +using layers::PlanarYCbCrImage; +using media::TimeUnit; + +const char* AudioData::sTypeName = "audio"; +const char* VideoData::sTypeName = "video"; + +AudioData::AudioData(int64_t aOffset, const media::TimeUnit& aTime, + AlignedAudioBuffer&& aData, uint32_t aChannels, + uint32_t aRate, uint32_t aChannelMap) + // Passing TimeUnit::Zero() here because we can't pass the result of an + // arithmetic operation to the CheckedInt ctor. We set the duration in the + // ctor body below. + : MediaData(sType, aOffset, aTime, TimeUnit::Zero()), + mChannels(aChannels), + mChannelMap(aChannelMap), + mRate(aRate), + mOriginalTime(aTime), + mAudioData(std::move(aData)), + mFrames(mAudioData.Length() / aChannels) { + MOZ_RELEASE_ASSERT(aChannels != 0, + "Can't create an AudioData with 0 channels."); + MOZ_RELEASE_ASSERT(aRate != 0, + "Can't create an AudioData with a sample-rate of 0."); + mDuration = TimeUnit(mFrames, aRate); +} + +Span AudioData::Data() const { + return Span{GetAdjustedData(), mFrames * mChannels}; +} + +void AudioData::SetOriginalStartTime(const media::TimeUnit& aStartTime) { + MOZ_ASSERT(mTime == mOriginalTime, + "Do not call this if data has been trimmed!"); + mTime = aStartTime; + mOriginalTime = aStartTime; +} + +bool AudioData::AdjustForStartTime(const media::TimeUnit& aStartTime) { + mOriginalTime -= aStartTime; + mTime -= aStartTime; + if (mTrimWindow) { + *mTrimWindow -= aStartTime; + } + if (mTime.IsNegative()) { + NS_WARNING("Negative audio start time after time-adjustment!"); + } + return mTime.IsValid() && mOriginalTime.IsValid(); +} + +bool AudioData::SetTrimWindow(const media::TimeInterval& aTrim) { + MOZ_DIAGNOSTIC_ASSERT(aTrim.mStart.IsValid() && aTrim.mEnd.IsValid(), + "An overflow occurred on the provided TimeInterval"); + if (!mAudioData) { + // MoveableData got called. Can no longer work on it. + return false; + } + if (aTrim.mStart < mOriginalTime || aTrim.mEnd > GetEndTime()) { + return false; + } + + auto trimBefore = aTrim.mStart - mOriginalTime; + auto trimAfter = aTrim.mEnd - mOriginalTime; + if (!trimBefore.IsValid() || !trimAfter.IsValid()) { + // Overflow. + return false; + } + if (!mTrimWindow && trimBefore.IsZero() && trimAfter == mDuration) { + // Nothing to change, abort early to prevent rounding errors. + return true; + } + + size_t frameOffset = trimBefore.ToTicksAtRate(mRate); + mTrimWindow = Some(aTrim); + mDataOffset = frameOffset * mChannels; + MOZ_DIAGNOSTIC_ASSERT(mDataOffset <= mAudioData.Length(), + "Data offset outside original buffer"); + int64_t frameCountAfterTrim = (trimAfter - trimBefore).ToTicksAtRate(mRate); + if (frameCountAfterTrim > + AssertedCast(mAudioData.Length() / mChannels)) { + // Accept rounding error caused by an imprecise time_base in the container, + // that can cause a mismatch but not other kind of unexpected frame count. + MOZ_RELEASE_ASSERT(!trimBefore.IsBase(mRate)); + mFrames = 0; + } else { + mFrames = frameCountAfterTrim; + } + mTime = mOriginalTime + trimBefore; + mDuration = TimeUnit(mFrames, mRate); + + return true; +} + +AudioDataValue* AudioData::GetAdjustedData() const { + if (!mAudioData) { + return nullptr; + } + return mAudioData.Data() + mDataOffset; +} + +void AudioData::EnsureAudioBuffer() { + if (mAudioBuffer || !mAudioData) { + return; + } + const AudioDataValue* srcData = GetAdjustedData(); + CheckedInt bufferSize(sizeof(AudioDataValue)); + bufferSize *= mFrames; + bufferSize *= mChannels; + mAudioBuffer = SharedBuffer::Create(bufferSize); + + AudioDataValue* destData = static_cast(mAudioBuffer->Data()); + for (uint32_t i = 0; i < mFrames; ++i) { + for (uint32_t j = 0; j < mChannels; ++j) { + destData[j * mFrames + i] = srcData[i * mChannels + j]; + } + } +} + +size_t AudioData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + size_t size = + aMallocSizeOf(this) + mAudioData.SizeOfExcludingThis(aMallocSizeOf); + if (mAudioBuffer) { + size += mAudioBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + return size; +} + +AlignedAudioBuffer AudioData::MoveableData() { + // Trim buffer according to trimming mask. + mAudioData.PopFront(mDataOffset); + mAudioData.SetLength(mFrames * mChannels); + mDataOffset = 0; + mFrames = 0; + mTrimWindow.reset(); + return std::move(mAudioData); +} + +static bool ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane) { + return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION && + aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION && + aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT && + aPlane.mStride > 0 && aPlane.mWidth <= aPlane.mStride; +} + +static MediaResult ValidateBufferAndPicture( + const VideoData::YCbCrBuffer& aBuffer, const IntRect& aPicture) { + // The following situation should never happen unless there is a bug + // in the decoder + if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth || + aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) { + return MediaResult(NS_ERROR_INVALID_ARG, + "Chroma planes with different sizes"); + } + + // The following situations could be triggered by invalid input + if (aPicture.width <= 0 || aPicture.height <= 0) { + return MediaResult(NS_ERROR_INVALID_ARG, "Empty picture rect"); + } + if (!ValidatePlane(aBuffer.mPlanes[0]) || + !ValidatePlane(aBuffer.mPlanes[1]) || + !ValidatePlane(aBuffer.mPlanes[2])) { + return MediaResult(NS_ERROR_INVALID_ARG, "Invalid plane size"); + } + + // Ensure the picture size specified in the headers can be extracted out of + // the frame we've been supplied without indexing out of bounds. + CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width); + CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height); + if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride || + !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight) { + // The specified picture dimensions can't be contained inside the video + // frame, we'll stomp memory if we try to copy it. Fail. + return MediaResult(NS_ERROR_INVALID_ARG, "Overflowing picture rect"); + } + return MediaResult(NS_OK); +} + +VideoData::VideoData(int64_t aOffset, const TimeUnit& aTime, + const TimeUnit& aDuration, bool aKeyframe, + const TimeUnit& aTimecode, IntSize aDisplay, + layers::ImageContainer::FrameID aFrameID) + : MediaData(Type::VIDEO_DATA, aOffset, aTime, aDuration), + mDisplay(aDisplay), + mFrameID(aFrameID), + mSentToCompositor(false), + mNextKeyFrameTime(TimeUnit::Invalid()) { + MOZ_ASSERT(!mDuration.IsNegative(), "Frame must have non-negative duration."); + mKeyframe = aKeyframe; + mTimecode = aTimecode; +} + +VideoData::~VideoData() = default; + +size_t VideoData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + size_t size = aMallocSizeOf(this); + + // Currently only PLANAR_YCBCR has a well defined function for determining + // it's size, so reporting is limited to that type. + if (mImage && mImage->GetFormat() == ImageFormat::PLANAR_YCBCR) { + const mozilla::layers::PlanarYCbCrImage* img = + static_cast(mImage.get()); + size += img->SizeOfIncludingThis(aMallocSizeOf); + } + + return size; +} + +ColorDepth VideoData::GetColorDepth() const { + if (!mImage) { + return ColorDepth::COLOR_8; + } + + return mImage->GetColorDepth(); +} + +void VideoData::UpdateDuration(const TimeUnit& aDuration) { + MOZ_ASSERT(!aDuration.IsNegative()); + mDuration = aDuration; +} + +void VideoData::UpdateTimestamp(const TimeUnit& aTimestamp) { + MOZ_ASSERT(!aTimestamp.IsNegative()); + + auto updatedDuration = GetEndTime() - aTimestamp; + MOZ_ASSERT(!updatedDuration.IsNegative()); + + mTime = aTimestamp; + mDuration = updatedDuration; +} + +bool VideoData::AdjustForStartTime(const media::TimeUnit& aStartTime) { + mTime -= aStartTime; + if (mTime.IsNegative()) { + NS_WARNING("Negative video start time after time-adjustment!"); + } + return mTime.IsValid(); +} + +PlanarYCbCrData ConstructPlanarYCbCrData(const VideoInfo& aInfo, + const VideoData::YCbCrBuffer& aBuffer, + const IntRect& aPicture) { + const VideoData::YCbCrBuffer::Plane& Y = aBuffer.mPlanes[0]; + const VideoData::YCbCrBuffer::Plane& Cb = aBuffer.mPlanes[1]; + const VideoData::YCbCrBuffer::Plane& Cr = aBuffer.mPlanes[2]; + + PlanarYCbCrData data; + data.mYChannel = Y.mData; + data.mYStride = AssertedCast(Y.mStride); + data.mYSkip = AssertedCast(Y.mSkip); + data.mCbChannel = Cb.mData; + data.mCrChannel = Cr.mData; + data.mCbCrStride = AssertedCast(Cb.mStride); + data.mCbSkip = AssertedCast(Cb.mSkip); + data.mCrSkip = AssertedCast(Cr.mSkip); + data.mPictureRect = aPicture; + data.mStereoMode = aInfo.mStereoMode; + data.mYUVColorSpace = aBuffer.mYUVColorSpace; + data.mColorPrimaries = aBuffer.mColorPrimaries; + data.mColorDepth = aBuffer.mColorDepth; + if (aInfo.mTransferFunction) { + data.mTransferFunction = *aInfo.mTransferFunction; + } + data.mColorRange = aBuffer.mColorRange; + data.mChromaSubsampling = aBuffer.mChromaSubsampling; + return data; +} + +/* static */ +MediaResult VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage, + const VideoInfo& aInfo, + const YCbCrBuffer& aBuffer, + const IntRect& aPicture, + bool aCopyData) { + MOZ_ASSERT(aVideoImage); + + PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture); + + if (aCopyData) { + return MediaResult(aVideoImage->CopyData(data), + RESULT_DETAIL("Failed to copy image data")); + } + return MediaResult(aVideoImage->AdoptData(data), + RESULT_DETAIL("Failed to adopt image data")); +} + +/* static */ +Result, MediaResult> VideoData::CreateAndCopyData( + const VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, + const TimeUnit& aTime, const TimeUnit& aDuration, + const YCbCrBuffer& aBuffer, bool aKeyframe, const TimeUnit& aTimecode, + const IntRect& aPicture, layers::KnowsCompositor* aAllocator) { + if (!aContainer) { + // Create a dummy VideoData with no image. This gives us something to + // send to media streams if necessary. + RefPtr v(new VideoData(aOffset, aTime, aDuration, aKeyframe, + aTimecode, aInfo.mDisplay, 0)); + return v.forget(); + } + + if (MediaResult r = ValidateBufferAndPicture(aBuffer, aPicture); + NS_FAILED(r)) { + return Err(r); + } + + PerformanceRecorder perfRecorder(MediaStage::CopyDecodedVideo, + aInfo.mImage.height); + RefPtr v(new VideoData(aOffset, aTime, aDuration, aKeyframe, + aTimecode, aInfo.mDisplay, 0)); + + // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR + // format. +#if XP_MACOSX + if (aAllocator && aAllocator->GetWebRenderCompositorType() != + layers::WebRenderCompositor::SOFTWARE) { + RefPtr ioImage = + new layers::MacIOSurfaceImage(nullptr); + PlanarYCbCrData data = ConstructPlanarYCbCrData(aInfo, aBuffer, aPicture); + if (ioImage->SetData(aContainer, data)) { + v->mImage = ioImage; + perfRecorder.Record(); + return v.forget(); + } + } +#endif + if (!v->mImage) { + v->mImage = aContainer->CreatePlanarYCbCrImage(); + } + + if (!v->mImage) { + // TODO: Should other error like NS_ERROR_UNEXPECTED be used here to + // distinguish this error from the NS_ERROR_OUT_OF_MEMORY below? + return Err(MediaResult(NS_ERROR_OUT_OF_MEMORY, + "Failed to create a PlanarYCbCrImage")); + } + NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR, + "Wrong format?"); + PlanarYCbCrImage* videoImage = v->mImage->AsPlanarYCbCrImage(); + MOZ_ASSERT(videoImage); + + if (MediaResult r = VideoData::SetVideoDataToImage( + videoImage, aInfo, aBuffer, aPicture, true /* aCopyData */); + NS_FAILED(r)) { + return Err(r); + } + + perfRecorder.Record(); + return v.forget(); +} + +/* static */ +already_AddRefed VideoData::CreateAndCopyData( + const VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, + const TimeUnit& aTime, const TimeUnit& aDuration, + const YCbCrBuffer& aBuffer, const YCbCrBuffer::Plane& aAlphaPlane, + bool aKeyframe, const TimeUnit& aTimecode, const IntRect& aPicture) { + if (!aContainer) { + // Create a dummy VideoData with no image. This gives us something to + // send to media streams if necessary. + RefPtr v(new VideoData(aOffset, aTime, aDuration, aKeyframe, + aTimecode, aInfo.mDisplay, 0)); + return v.forget(); + } + + if (MediaResult r = ValidateBufferAndPicture(aBuffer, aPicture); + NS_FAILED(r)) { + NS_ERROR(r.Message().get()); + return nullptr; + } + + RefPtr v(new VideoData(aOffset, aTime, aDuration, aKeyframe, + aTimecode, aInfo.mDisplay, 0)); + + // Convert from YUVA to BGRA format on the software side. + RefPtr videoImage = + aContainer->CreateSharedRGBImage(); + v->mImage = videoImage; + + if (!v->mImage) { + return nullptr; + } + if (!videoImage->Allocate( + IntSize(aBuffer.mPlanes[0].mWidth, aBuffer.mPlanes[0].mHeight), + SurfaceFormat::B8G8R8A8)) { + return nullptr; + } + + RefPtr texture = + videoImage->GetTextureClient(/* aKnowsCompositor */ nullptr); + if (!texture) { + NS_WARNING("Failed to allocate TextureClient"); + return nullptr; + } + + layers::TextureClientAutoLock autoLock(texture, + layers::OpenMode::OPEN_WRITE_ONLY); + if (!autoLock.Succeeded()) { + NS_WARNING("Failed to lock TextureClient"); + return nullptr; + } + + layers::MappedTextureData buffer; + if (!texture->BorrowMappedData(buffer)) { + NS_WARNING("Failed to borrow mapped data"); + return nullptr; + } + + // The naming convention for libyuv and associated utils is word-order. + // The naming convention in the gfx stack is byte-order. + ConvertI420AlphaToARGB(aBuffer.mPlanes[0].mData, aBuffer.mPlanes[1].mData, + aBuffer.mPlanes[2].mData, aAlphaPlane.mData, + AssertedCast(aBuffer.mPlanes[0].mStride), + AssertedCast(aBuffer.mPlanes[1].mStride), + buffer.data, buffer.stride, buffer.size.width, + buffer.size.height); + + return v.forget(); +} + +/* static */ +already_AddRefed VideoData::CreateFromImage( + const IntSize& aDisplay, int64_t aOffset, const TimeUnit& aTime, + const TimeUnit& aDuration, const RefPtr& aImage, bool aKeyframe, + const TimeUnit& aTimecode) { + RefPtr v(new VideoData(aOffset, aTime, aDuration, aKeyframe, + aTimecode, aDisplay, 0)); + v->mImage = aImage; + return v.forget(); +} + +nsCString VideoData::ToString() const { + std::array ImageFormatStrings = { + "PLANAR_YCBCR", + "NV_IMAGE", + "SHARED_RGB", + "MOZ2D_SURFACE", + "MAC_IOSURFACE", + "SURFACE_TEXTURE", + "D3D9_RGB32_TEXTURE", + "OVERLAY_IMAGE", + "D3D11_SHARE_HANDLE_TEXTURE", + "D3D11_TEXTURE_IMF_SAMPLE", + "TEXTURE_WRAPPER", + "D3D11_YCBCR_IMAGE", + "GPU_VIDEO", + "DMABUF", + "DCOMP_SURFACE", + }; + + nsCString rv; + rv.AppendPrintf( + "VideoFrame [%s,%s] [%dx%d] format: %s", mTime.ToString().get(), + mDuration.ToString().get(), mDisplay.Width(), mDisplay.Height(), + mImage ? ImageFormatStrings[static_cast(mImage->GetFormat())] + : "null"); + return rv; +} + +MediaRawData::MediaRawData() + : MediaData(Type::RAW_DATA), mCrypto(mCryptoInternal) {} + +MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize) + : MediaData(Type::RAW_DATA), + mCrypto(mCryptoInternal), + mBuffer(aData, aSize) {} + +MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize, + const uint8_t* aAlphaData, size_t aAlphaSize) + : MediaData(Type::RAW_DATA), + mCrypto(mCryptoInternal), + mBuffer(aData, aSize), + mAlphaBuffer(aAlphaData, aAlphaSize) {} + +MediaRawData::MediaRawData(AlignedByteBuffer&& aData) + : MediaData(Type::RAW_DATA), + mCrypto(mCryptoInternal), + mBuffer(std::move(aData)) {} + +MediaRawData::MediaRawData(AlignedByteBuffer&& aData, + AlignedByteBuffer&& aAlphaData) + : MediaData(Type::RAW_DATA), + mCrypto(mCryptoInternal), + mBuffer(std::move(aData)), + mAlphaBuffer(std::move(aAlphaData)) {} + +already_AddRefed MediaRawData::Clone() const { + int32_t sampleHeight = 0; + if (mTrackInfo && mTrackInfo->GetAsVideoInfo()) { + sampleHeight = mTrackInfo->GetAsVideoInfo()->mImage.height; + } + PerformanceRecorder perfRecorder(MediaStage::CopyDemuxedData, + sampleHeight); + RefPtr s = new MediaRawData; + s->mTimecode = mTimecode; + s->mTime = mTime; + s->mDuration = mDuration; + s->mOffset = mOffset; + s->mKeyframe = mKeyframe; + s->mExtraData = mExtraData; + s->mCryptoInternal = mCryptoInternal; + s->mTrackInfo = mTrackInfo; + s->mEOS = mEOS; + s->mOriginalPresentationWindow = mOriginalPresentationWindow; + if (!s->mBuffer.Append(mBuffer.Data(), mBuffer.Length())) { + return nullptr; + } + if (!s->mAlphaBuffer.Append(mAlphaBuffer.Data(), mAlphaBuffer.Length())) { + return nullptr; + } + perfRecorder.Record(); + return s.forget(); +} + +MediaRawData::~MediaRawData() = default; + +size_t MediaRawData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + size_t size = aMallocSizeOf(this); + size += mBuffer.SizeOfExcludingThis(aMallocSizeOf); + return size; +} + +UniquePtr MediaRawData::CreateWriter() { + UniquePtr p(new MediaRawDataWriter(this)); + return p; +} + +MediaRawDataWriter::MediaRawDataWriter(MediaRawData* aMediaRawData) + : mCrypto(aMediaRawData->mCryptoInternal), mTarget(aMediaRawData) {} + +bool MediaRawDataWriter::SetSize(size_t aSize) { + return mTarget->mBuffer.SetLength(aSize); +} + +bool MediaRawDataWriter::Prepend(const uint8_t* aData, size_t aSize) { + return mTarget->mBuffer.Prepend(aData, aSize); +} + +bool MediaRawDataWriter::Append(const uint8_t* aData, size_t aSize) { + return mTarget->mBuffer.Append(aData, aSize); +} + +bool MediaRawDataWriter::Replace(const uint8_t* aData, size_t aSize) { + return mTarget->mBuffer.Replace(aData, aSize); +} + +void MediaRawDataWriter::Clear() { mTarget->mBuffer.Clear(); } + +uint8_t* MediaRawDataWriter::Data() { return mTarget->mBuffer.Data(); } + +size_t MediaRawDataWriter::Size() { return mTarget->Size(); } + +void MediaRawDataWriter::PopFront(size_t aSize) { + mTarget->mBuffer.PopFront(aSize); +} + +const char* CryptoSchemeToString(const CryptoScheme& aScheme) { + switch (aScheme) { + case CryptoScheme::None: + return "None"; + case CryptoScheme::Cenc: + return "Cenc"; + case CryptoScheme::Cbcs: + return "Cbcs"; + default: + MOZ_ASSERT_UNREACHABLE(); + return ""; + } +} + +} // namespace mozilla diff --git a/dom/media/MediaData.h b/dom/media/MediaData.h new file mode 100644 index 0000000000..ee1e204815 --- /dev/null +++ b/dom/media/MediaData.h @@ -0,0 +1,762 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(MediaData_h) +# define MediaData_h + +# include "AudioConfig.h" +# include "AudioSampleFormat.h" +# include "ImageTypes.h" +# include "MediaResult.h" +# include "SharedBuffer.h" +# include "TimeUnits.h" +# include "mozilla/CheckedInt.h" +# include "mozilla/Maybe.h" +# include "mozilla/PodOperations.h" +# include "mozilla/RefPtr.h" +# include "mozilla/Result.h" +# include "mozilla/Span.h" +# include "mozilla/UniquePtr.h" +# include "mozilla/UniquePtrExtensions.h" +# include "mozilla/gfx/Rect.h" +# include "nsString.h" +# include "nsTArray.h" + +namespace mozilla { + +namespace layers { +class Image; +class ImageContainer; +class KnowsCompositor; +} // namespace layers + +class MediaByteBuffer; +class TrackInfoSharedPtr; + +// AlignedBuffer: +// Memory allocations are fallibles. Methods return a boolean indicating if +// memory allocations were successful. Return values should always be checked. +// AlignedBuffer::mData will be nullptr if no memory has been allocated or if +// an error occurred during construction. +// Existing data is only ever modified if new memory allocation has succeeded +// and preserved if not. +// +// The memory referenced by mData will always be Alignment bytes aligned and the +// underlying buffer will always have a size such that Alignment bytes blocks +// can be used to read the content, regardless of the mSize value. Buffer is +// zeroed on creation, elements are not individually constructed. +// An Alignment value of 0 means that the data isn't aligned. +// +// Type must be trivially copyable. +// +// AlignedBuffer can typically be used in place of UniquePtr however +// care must be taken as all memory allocations are fallible. +// Example: +// auto buffer = MakeUniqueFallible(samples) +// becomes: AlignedFloatBuffer buffer(samples) +// +// auto buffer = MakeUnique(samples) +// becomes: +// AlignedFloatBuffer buffer(samples); +// if (!buffer) { return NS_ERROR_OUT_OF_MEMORY; } +class InflatableShortBuffer; +template +class AlignedBuffer { + public: + friend InflatableShortBuffer; + AlignedBuffer() + : mData(nullptr), mLength(0), mBuffer(nullptr), mCapacity(0) {} + + explicit AlignedBuffer(size_t aLength) + : mData(nullptr), mLength(0), mBuffer(nullptr), mCapacity(0) { + if (EnsureCapacity(aLength)) { + mLength = aLength; + } + } + + AlignedBuffer(const Type* aData, size_t aLength) : AlignedBuffer(aLength) { + if (!mData) { + return; + } + PodCopy(mData, aData, aLength); + } + + AlignedBuffer(const AlignedBuffer& aOther) + : AlignedBuffer(aOther.Data(), aOther.Length()) {} + + AlignedBuffer(AlignedBuffer&& aOther) noexcept + : mData(aOther.mData), + mLength(aOther.mLength), + mBuffer(std::move(aOther.mBuffer)), + mCapacity(aOther.mCapacity) { + aOther.mData = nullptr; + aOther.mLength = 0; + aOther.mCapacity = 0; + } + + AlignedBuffer& operator=(AlignedBuffer&& aOther) noexcept { + this->~AlignedBuffer(); + new (this) AlignedBuffer(std::move(aOther)); + return *this; + } + + Type* Data() const { return mData; } + size_t Length() const { return mLength; } + size_t Size() const { return mLength * sizeof(Type); } + Type& operator[](size_t aIndex) { + MOZ_ASSERT(aIndex < mLength); + return mData[aIndex]; + } + const Type& operator[](size_t aIndex) const { + MOZ_ASSERT(aIndex < mLength); + return mData[aIndex]; + } + // Set length of buffer, allocating memory as required. + // If memory is allocated, additional buffer area is filled with 0. + bool SetLength(size_t aLength) { + if (aLength > mLength && !EnsureCapacity(aLength)) { + return false; + } + mLength = aLength; + return true; + } + // Add aData at the beginning of buffer. + bool Prepend(const Type* aData, size_t aLength) { + if (!EnsureCapacity(aLength + mLength)) { + return false; + } + + // Shift the data to the right by aLength to leave room for the new data. + PodMove(mData + aLength, mData, mLength); + PodCopy(mData, aData, aLength); + + mLength += aLength; + return true; + } + // Add aData at the end of buffer. + bool Append(const Type* aData, size_t aLength) { + if (!EnsureCapacity(aLength + mLength)) { + return false; + } + + PodCopy(mData + mLength, aData, aLength); + + mLength += aLength; + return true; + } + // Replace current content with aData. + bool Replace(const Type* aData, size_t aLength) { + // If aLength is smaller than our current length, we leave the buffer as is, + // only adjusting the reported length. + if (!EnsureCapacity(aLength)) { + return false; + } + + PodCopy(mData, aData, aLength); + mLength = aLength; + return true; + } + // Clear the memory buffer. Will set target mData and mLength to 0. + void Clear() { + mLength = 0; + mData = nullptr; + } + + // Methods for reporting memory. + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + size_t size = aMallocSizeOf(this); + size += aMallocSizeOf(mBuffer.get()); + return size; + } + // AlignedBuffer is typically allocated on the stack. As such, you likely + // want to use SizeOfExcludingThis + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(mBuffer.get()); + } + size_t ComputedSizeOfExcludingThis() const { return mCapacity; } + + // For backward compatibility with UniquePtr + Type* get() const { return mData; } + explicit operator bool() const { return mData != nullptr; } + + // Size in bytes of extra space allocated for padding. + static size_t AlignmentPaddingSize() { return AlignmentOffset() * 2; } + + void PopFront(size_t aCount) { + MOZ_DIAGNOSTIC_ASSERT(mLength >= aCount, "Popping too many elements."); + PodMove(mData, mData + aCount, mLength - aCount); + mLength -= aCount; + } + + void PopBack(size_t aCount) { + MOZ_DIAGNOSTIC_ASSERT(mLength >= aCount, "Popping too many elements."); + mLength -= aCount; + } + + private: + static size_t AlignmentOffset() { return Alignment ? Alignment - 1 : 0; } + + // Ensure that the backend buffer can hold aLength data. Will update mData. + // Will enforce that the start of allocated data is always Alignment bytes + // aligned and that it has sufficient end padding to allow for Alignment bytes + // block read as required by some data decoders. + // Returns false if memory couldn't be allocated. + bool EnsureCapacity(size_t aLength) { + if (!aLength) { + // No need to allocate a buffer yet. + return true; + } + const CheckedInt sizeNeeded = + CheckedInt(aLength) * sizeof(Type) + AlignmentPaddingSize(); + + if (!sizeNeeded.isValid() || sizeNeeded.value() >= INT32_MAX) { + // overflow or over an acceptable size. + return false; + } + if (mData && mCapacity >= sizeNeeded.value()) { + return true; + } + auto newBuffer = MakeUniqueFallible(sizeNeeded.value()); + if (!newBuffer) { + return false; + } + + // Find alignment address. + const uintptr_t alignmask = AlignmentOffset(); + Type* newData = reinterpret_cast( + (reinterpret_cast(newBuffer.get()) + alignmask) & + ~alignmask); + MOZ_ASSERT(uintptr_t(newData) % (AlignmentOffset() + 1) == 0); + + MOZ_ASSERT(!mLength || mData); + + PodZero(newData + mLength, aLength - mLength); + if (mLength) { + PodCopy(newData, mData, mLength); + } + + mBuffer = std::move(newBuffer); + mCapacity = sizeNeeded.value(); + mData = newData; + + return true; + } + Type* mData; + size_t mLength{}; // number of elements + UniquePtr mBuffer; + size_t mCapacity{}; // in bytes +}; + +using AlignedByteBuffer = AlignedBuffer; +using AlignedFloatBuffer = AlignedBuffer; +using AlignedShortBuffer = AlignedBuffer; +using AlignedAudioBuffer = AlignedBuffer; + +// A buffer in which int16_t audio can be written to, and then converted to +// float32 audio without reallocating. +// This class is useful when an API hands out int16_t audio but the samples +// need to be immediately converted to f32. +class InflatableShortBuffer { + public: + explicit InflatableShortBuffer(size_t aElementCount) + : mBuffer(aElementCount * 2) {} + AlignedFloatBuffer Inflate() { + // Convert the data from int16_t to f32 in place, in the same buffer. + // The reason this works is because the buffer has in fact twice the + // capacity, and the loop goes backward. + float* output = reinterpret_cast(mBuffer.mData); + for (size_t i = Length(); i--;) { + output[i] = AudioSampleToFloat(mBuffer.mData[i]); + } + AlignedFloatBuffer rv; + rv.mBuffer = std::move(mBuffer.mBuffer); + rv.mCapacity = mBuffer.mCapacity; + rv.mLength = Length(); + rv.mData = output; + return rv; + } + size_t Length() const { return mBuffer.mLength / 2; } + int16_t* get() const { return mBuffer.get(); } + explicit operator bool() const { return mBuffer.mData != nullptr; } + + protected: + AlignedShortBuffer mBuffer; +}; + +// Container that holds media samples. +class MediaData { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaData) + + enum class Type : uint8_t { AUDIO_DATA = 0, VIDEO_DATA, RAW_DATA, NULL_DATA }; + static const char* TypeToStr(Type aType) { + switch (aType) { + case Type::AUDIO_DATA: + return "AUDIO_DATA"; + case Type::VIDEO_DATA: + return "VIDEO_DATA"; + case Type::RAW_DATA: + return "RAW_DATA"; + case Type::NULL_DATA: + return "NULL_DATA"; + default: + MOZ_CRASH("bad value"); + } + } + + MediaData(Type aType, int64_t aOffset, const media::TimeUnit& aTimestamp, + const media::TimeUnit& aDuration) + : mType(aType), + mOffset(aOffset), + mTime(aTimestamp), + mTimecode(aTimestamp), + mDuration(aDuration), + mKeyframe(false) {} + + // Type of contained data. + const Type mType; + + // Approximate byte offset where this data was demuxed from its media. + int64_t mOffset; + + // Start time of sample. + media::TimeUnit mTime; + + // Codec specific internal time code. For Ogg based codecs this is the + // granulepos. + media::TimeUnit mTimecode; + + // Duration of sample, in microseconds. + media::TimeUnit mDuration; + + bool mKeyframe; + + media::TimeUnit GetEndTime() const { return mTime + mDuration; } + + media::TimeUnit GetEndTimecode() const { return mTimecode + mDuration; } + + bool HasValidTime() const { + return mTime.IsValid() && mTimecode.IsValid() && mDuration.IsValid() && + GetEndTime().IsValid() && GetEndTimecode().IsValid(); + } + + template + const ReturnType* As() const { + MOZ_ASSERT(this->mType == ReturnType::sType); + return static_cast(this); + } + + template + ReturnType* As() { + MOZ_ASSERT(this->mType == ReturnType::sType); + return static_cast(this); + } + + protected: + explicit MediaData(Type aType) : mType(aType), mOffset(0), mKeyframe(false) {} + + virtual ~MediaData() = default; +}; + +// NullData is for decoder generating a sample which doesn't need to be +// rendered. +class NullData : public MediaData { + public: + NullData(int64_t aOffset, const media::TimeUnit& aTime, + const media::TimeUnit& aDuration) + : MediaData(Type::NULL_DATA, aOffset, aTime, aDuration) {} + + static const Type sType = Type::NULL_DATA; +}; + +// Holds chunk a decoded audio frames. +class AudioData : public MediaData { + public: + AudioData(int64_t aOffset, const media::TimeUnit& aTime, + AlignedAudioBuffer&& aData, uint32_t aChannels, uint32_t aRate, + uint32_t aChannelMap = AudioConfig::ChannelLayout::UNKNOWN_MAP); + + static const Type sType = Type::AUDIO_DATA; + static const char* sTypeName; + + // Access the buffer as a Span. + Span Data() const; + + // Amount of frames for contained data. + uint32_t Frames() const { return mFrames; } + + // Trim the audio buffer such that its apparent content fits within the aTrim + // interval. The actual data isn't removed from the buffer and a followup call + // to SetTrimWindow could restore the content. mDuration, mTime and mFrames + // will be adjusted accordingly. + // Warning: rounding may occurs, in which case the new start time of the audio + // sample may still be lesser than aTrim.mStart. + bool SetTrimWindow(const media::TimeInterval& aTrim); + + // Get the internal audio buffer to be moved. After this call the original + // AudioData will be emptied and can't be used again. + AlignedAudioBuffer MoveableData(); + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const; + + // If mAudioBuffer is null, creates it from mAudioData. + void EnsureAudioBuffer(); + + // Return true if the adjusted time is valid. Caller should handle error when + // the result is invalid. + bool AdjustForStartTime(const media::TimeUnit& aStartTime); + + // This method is used to adjust the original start time, which would change + // `mTime` and `mOriginalTime` together, and should only be used for data + // which hasn't been trimmed before. + void SetOriginalStartTime(const media::TimeUnit& aStartTime); + + const uint32_t mChannels; + // The AudioConfig::ChannelLayout map. Channels are ordered as per SMPTE + // definition. A value of UNKNOWN_MAP indicates unknown layout. + // ChannelMap is an unsigned bitmap compatible with Windows' WAVE and FFmpeg + // channel map. + const AudioConfig::ChannelLayout::ChannelMap mChannelMap; + const uint32_t mRate; + + // At least one of mAudioBuffer/mAudioData must be non-null. + // mChannels channels, each with mFrames frames + RefPtr mAudioBuffer; + + protected: + ~AudioData() = default; + + private: + friend class ArrayOfRemoteAudioData; + AudioDataValue* GetAdjustedData() const; + media::TimeUnit mOriginalTime; + // mFrames frames, each with mChannels values + AlignedAudioBuffer mAudioData; + Maybe mTrimWindow; + // Amount of frames for contained data. + uint32_t mFrames; + size_t mDataOffset = 0; +}; + +namespace layers { +class TextureClient; +class PlanarYCbCrImage; +} // namespace layers + +class VideoInfo; + +// Holds a decoded video frame, in YCbCr format. These are queued in the reader. +class VideoData : public MediaData { + public: + using IntRect = gfx::IntRect; + using IntSize = gfx::IntSize; + using ColorDepth = gfx::ColorDepth; + using ColorRange = gfx::ColorRange; + using YUVColorSpace = gfx::YUVColorSpace; + using ColorSpace2 = gfx::ColorSpace2; + using ChromaSubsampling = gfx::ChromaSubsampling; + using ImageContainer = layers::ImageContainer; + using Image = layers::Image; + using PlanarYCbCrImage = layers::PlanarYCbCrImage; + + static const Type sType = Type::VIDEO_DATA; + static const char* sTypeName; + + // YCbCr data obtained from decoding the video. The index's are: + // 0 = Y + // 1 = Cb + // 2 = Cr + struct YCbCrBuffer { + struct Plane { + uint8_t* mData; + uint32_t mWidth; + uint32_t mHeight; + uint32_t mStride; + uint32_t mSkip; + }; + + Plane mPlanes[3]{}; + YUVColorSpace mYUVColorSpace = YUVColorSpace::Identity; + ColorSpace2 mColorPrimaries = ColorSpace2::UNKNOWN; + ColorDepth mColorDepth = ColorDepth::COLOR_8; + ColorRange mColorRange = ColorRange::LIMITED; + ChromaSubsampling mChromaSubsampling = ChromaSubsampling::FULL; + }; + + // Constructs a VideoData object. If aImage is nullptr, creates a new Image + // holding a copy of the YCbCr data passed in aBuffer. If aImage is not + // nullptr, it's stored as the underlying video image and aBuffer is assumed + // to point to memory within aImage so no copy is made. aTimecode is a codec + // specific number representing the timestamp of the frame of video data. + // Returns nsnull if an error occurs. This may indicate that memory couldn't + // be allocated to create the VideoData object, or it may indicate some + // problem with the input data (e.g. negative stride). + + static bool UseUseNV12ForSoftwareDecodedVideoIfPossible( + layers::KnowsCompositor* aAllocator); + + // Creates a new VideoData containing a deep copy of aBuffer. May use + // aContainer to allocate an Image to hold the copied data. + static Result, MediaResult> CreateAndCopyData( + const VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, + const media::TimeUnit& aTime, const media::TimeUnit& aDuration, + const YCbCrBuffer& aBuffer, bool aKeyframe, + const media::TimeUnit& aTimecode, const IntRect& aPicture, + layers::KnowsCompositor* aAllocator); + + static already_AddRefed CreateAndCopyData( + const VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, + const media::TimeUnit& aTime, const media::TimeUnit& aDuration, + const YCbCrBuffer& aBuffer, const YCbCrBuffer::Plane& aAlphaPlane, + bool aKeyframe, const media::TimeUnit& aTimecode, + const IntRect& aPicture); + + static already_AddRefed CreateFromImage( + const IntSize& aDisplay, int64_t aOffset, const media::TimeUnit& aTime, + const media::TimeUnit& aDuration, const RefPtr& aImage, + bool aKeyframe, const media::TimeUnit& aTimecode); + + // Initialize PlanarYCbCrImage. Only When aCopyData is true, + // video data is copied to PlanarYCbCrImage. + static MediaResult SetVideoDataToImage(PlanarYCbCrImage* aVideoImage, + const VideoInfo& aInfo, + const YCbCrBuffer& aBuffer, + const IntRect& aPicture, + bool aCopyData); + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const; + + // Dimensions at which to display the video frame. The picture region + // will be scaled to this size. This is should be the picture region's + // dimensions scaled with respect to its aspect ratio. + const IntSize mDisplay; + + // This frame's image. + RefPtr mImage; + + ColorDepth GetColorDepth() const; + + uint32_t mFrameID; + + VideoData(int64_t aOffset, const media::TimeUnit& aTime, + const media::TimeUnit& aDuration, bool aKeyframe, + const media::TimeUnit& aTimecode, IntSize aDisplay, + uint32_t aFrameID); + + nsCString ToString() const; + + void MarkSentToCompositor() { mSentToCompositor = true; } + bool IsSentToCompositor() { return mSentToCompositor; } + + void UpdateDuration(const media::TimeUnit& aDuration); + void UpdateTimestamp(const media::TimeUnit& aTimestamp); + + // Return true if the adjusted time is valid. Caller should handle error when + // the result is invalid. + bool AdjustForStartTime(const media::TimeUnit& aStartTime); + + void SetNextKeyFrameTime(const media::TimeUnit& aTime) { + mNextKeyFrameTime = aTime; + } + + const media::TimeUnit& NextKeyFrameTime() const { return mNextKeyFrameTime; } + + protected: + ~VideoData(); + + bool mSentToCompositor; + media::TimeUnit mNextKeyFrameTime; +}; + +enum class CryptoScheme : uint8_t { + None, + Cenc, + Cbcs, +}; + +const char* CryptoSchemeToString(const CryptoScheme& aScheme); + +class CryptoTrack { + public: + CryptoTrack() + : mCryptoScheme(CryptoScheme::None), + mIVSize(0), + mCryptByteBlock(0), + mSkipByteBlock(0) {} + CryptoScheme mCryptoScheme; + int32_t mIVSize; + CopyableTArray mKeyId; + uint8_t mCryptByteBlock; + uint8_t mSkipByteBlock; + CopyableTArray mConstantIV; + + bool IsEncrypted() const { return mCryptoScheme != CryptoScheme::None; } +}; + +class CryptoSample : public CryptoTrack { + public: + // The num clear bytes in each subsample. The nth element in the array is the + // number of clear bytes at the start of the nth subsample. + // Clear sizes are stored as uint16_t in containers per ISO/IEC + // 23001-7, but we store them as uint32_t for 2 reasons + // - The Widevine CDM accepts clear sizes as uint32_t. + // - When converting samples to Annex B we modify the clear sizes and + // clear sizes near UINT16_MAX can overflow if stored in a uint16_t. + CopyableTArray mPlainSizes; + // The num encrypted bytes in each subsample. The nth element in the array is + // the number of encrypted bytes at the start of the nth subsample. + CopyableTArray mEncryptedSizes; + CopyableTArray mIV; + CopyableTArray> mInitDatas; + nsString mInitDataType; +}; + +// MediaRawData is a MediaData container used to store demuxed, still compressed +// samples. +// Use MediaRawData::CreateWriter() to obtain a MediaRawDataWriter object that +// provides methods to modify and manipulate the data. +// Memory allocations are fallible. Methods return a boolean indicating if +// memory allocations were successful. Return values should always be checked. +// MediaRawData::mData will be nullptr if no memory has been allocated or if +// an error occurred during construction. +// Existing data is only ever modified if new memory allocation has succeeded +// and preserved if not. +// +// The memory referenced by mData will always be 32 bytes aligned and the +// underlying buffer will always have a size such that 32 bytes blocks can be +// used to read the content, regardless of the mSize value. Buffer is zeroed +// on creation. +// +// Typical usage: create new MediaRawData; create the associated +// MediaRawDataWriter, call SetSize() to allocate memory, write to mData, +// up to mSize bytes. + +class MediaRawData; + +class MediaRawDataWriter { + public: + // Pointer to data or null if not-yet allocated + uint8_t* Data(); + // Writeable size of buffer. + size_t Size(); + // Writeable reference to MediaRawData::mCryptoInternal + CryptoSample& mCrypto; + + // Data manipulation methods. mData and mSize may be updated accordingly. + + // Set size of buffer, allocating memory as required. + // If memory is allocated, additional buffer area is filled with 0. + [[nodiscard]] bool SetSize(size_t aSize); + // Add aData at the beginning of buffer. + [[nodiscard]] bool Prepend(const uint8_t* aData, size_t aSize); + [[nodiscard]] bool Append(const uint8_t* aData, size_t aSize); + // Replace current content with aData. + [[nodiscard]] bool Replace(const uint8_t* aData, size_t aSize); + // Clear the memory buffer. Will set target mData and mSize to 0. + void Clear(); + // Remove aSize bytes from the front of the sample. + void PopFront(size_t aSize); + + private: + friend class MediaRawData; + explicit MediaRawDataWriter(MediaRawData* aMediaRawData); + [[nodiscard]] bool EnsureSize(size_t aSize); + MediaRawData* mTarget; +}; + +class MediaRawData final : public MediaData { + public: + MediaRawData(); + MediaRawData(const uint8_t* aData, size_t aSize); + MediaRawData(const uint8_t* aData, size_t aSize, const uint8_t* aAlphaData, + size_t aAlphaSize); + explicit MediaRawData(AlignedByteBuffer&& aData); + MediaRawData(AlignedByteBuffer&& aData, AlignedByteBuffer&& aAlphaData); + + // Pointer to data or null if not-yet allocated + const uint8_t* Data() const { return mBuffer.Data(); } + // Pointer to alpha data or null if not-yet allocated + const uint8_t* AlphaData() const { return mAlphaBuffer.Data(); } + // Size of buffer. + size_t Size() const { return mBuffer.Length(); } + size_t AlphaSize() const { return mAlphaBuffer.Length(); } + size_t ComputedSizeOfIncludingThis() const { + return sizeof(*this) + mBuffer.ComputedSizeOfExcludingThis() + + mAlphaBuffer.ComputedSizeOfExcludingThis(); + } + // Access the buffer as a Span. + operator Span() { return Span{Data(), Size()}; } + + const CryptoSample& mCrypto; + RefPtr mExtraData; + + // Used by the Vorbis decoder and Ogg demuxer. + // Indicates that this is the last packet of the stream. + bool mEOS = false; + + RefPtr mTrackInfo; + + // Used to indicate the id of the temporal scalability layer. + Maybe mTemporalLayerId; + + // May contain the original start time and duration of the frames. + // mOriginalPresentationWindow.mStart would always be less or equal to mTime + // and mOriginalPresentationWindow.mEnd equal or greater to mTime + mDuration. + // This is used when the sample should get cropped so that its content will + // actually start on mTime and go for mDuration. If this interval is set, then + // the decoder should crop the content accordingly. + Maybe mOriginalPresentationWindow; + + // If it's true, the `mCrypto` should be copied into the remote data as well. + // Currently this is only used for the media engine DRM playback. + bool mShouldCopyCryptoToRemoteRawData = false; + + // It's only used when the remote decoder reconstructs the media raw data. + CryptoSample& GetWritableCrypto() { return mCryptoInternal; } + + // Return a deep copy or nullptr if out of memory. + already_AddRefed Clone() const; + // Create a MediaRawDataWriter for this MediaRawData. The writer is not + // thread-safe. + UniquePtr CreateWriter(); + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const; + + protected: + ~MediaRawData(); + + private: + friend class MediaRawDataWriter; + friend class ArrayOfRemoteMediaRawData; + AlignedByteBuffer mBuffer; + AlignedByteBuffer mAlphaBuffer; + CryptoSample mCryptoInternal; + MediaRawData(const MediaRawData&); // Not implemented +}; + +// MediaByteBuffer is a ref counted infallible TArray. +class MediaByteBuffer : public nsTArray { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer); + MediaByteBuffer() = default; + explicit MediaByteBuffer(size_t aCapacity) : nsTArray(aCapacity) {} + + private: + ~MediaByteBuffer() = default; +}; + +// MediaAlignedByteBuffer is a ref counted AlignedByteBuffer whose memory +// allocations are fallible. +class MediaAlignedByteBuffer final : public AlignedByteBuffer { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaAlignedByteBuffer); + MediaAlignedByteBuffer() = default; + MediaAlignedByteBuffer(const uint8_t* aData, size_t aLength) + : AlignedByteBuffer(aData, aLength) {} + + private: + ~MediaAlignedByteBuffer() = default; +}; + +} // namespace mozilla + +#endif // MediaData_h diff --git a/dom/media/MediaDataDemuxer.h b/dom/media/MediaDataDemuxer.h new file mode 100644 index 0000000000..79ef5f5f0a --- /dev/null +++ b/dom/media/MediaDataDemuxer.h @@ -0,0 +1,213 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(MediaDataDemuxer_h) +# define MediaDataDemuxer_h + +# include "DecoderDoctorLogger.h" +# include "mozilla/MozPromise.h" +# include "mozilla/UniquePtr.h" + +# include "MediaData.h" +# include "MediaInfo.h" +# include "MediaResult.h" +# include "TimeUnits.h" +# include "nsISupportsImpl.h" +# include "mozilla/RefPtr.h" +# include "nsTArray.h" + +namespace mozilla { + +class MediaTrackDemuxer; +class TrackMetadataHolder; + +DDLoggedTypeDeclName(MediaDataDemuxer); +DDLoggedTypeName(MediaTrackDemuxer); + +// Allows reading the media data: to retrieve the metadata and demux samples. +// MediaDataDemuxer isn't designed to be thread safe. +// When used by the MediaFormatDecoder, care is taken to ensure that the demuxer +// will never be called from more than one thread at once. +class MediaDataDemuxer : public DecoderDoctorLifeLogger { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDemuxer) + + typedef MozPromise + InitPromise; + + // Initializes the demuxer. Other methods cannot be called unless + // initialization has completed and succeeded. + // Typically a demuxer will wait to parse the metadata before resolving the + // promise. The promise must not be resolved until sufficient data is + // supplied. For example, an incomplete metadata would cause the promise to be + // rejected should no more data be coming, while the demuxer would wait + // otherwise. + virtual RefPtr Init() = 0; + + // Returns the number of tracks of aType type available. A value of + // 0 indicates that no such type is available. + virtual uint32_t GetNumberTracks(TrackInfo::TrackType aType) const = 0; + + // Returns the MediaTrackDemuxer associated with aTrackNumber aType track. + // aTrackNumber is not to be confused with the Track ID. + // aTrackNumber must be constrained between 0 and GetNumberTracks(aType) - 1 + // The actual Track ID is to be retrieved by calling + // MediaTrackDemuxer::TrackInfo. + virtual already_AddRefed GetTrackDemuxer( + TrackInfo::TrackType aType, uint32_t aTrackNumber) = 0; + + // Returns true if the underlying resource allows seeking. + virtual bool IsSeekable() const = 0; + + // Returns true if the underlying resource can only seek within buffered + // ranges. + virtual bool IsSeekableOnlyInBufferedRanges() const { return false; } + + // Returns the media's crypto information, or nullptr if media isn't + // encrypted. + virtual UniquePtr GetCrypto() { return nullptr; } + + // Notifies the demuxer that the underlying resource has received more data + // since the demuxer was initialized. + // The demuxer can use this mechanism to inform all track demuxers that new + // data is available and to refresh its buffered range. + virtual void NotifyDataArrived() {} + + // Notifies the demuxer that the underlying resource has had data removed + // since the demuxer was initialized. + // The demuxer can use this mechanism to inform all track demuxers to update + // its buffered range. + // This will be called should the demuxer be used with MediaSource. + virtual void NotifyDataRemoved() {} + + // Indicate to MediaFormatReader if it should compute the start time + // of the demuxed data. If true (default) the first sample returned will be + // used as reference time base. + virtual bool ShouldComputeStartTime() const { return true; } + + protected: + virtual ~MediaDataDemuxer() = default; +}; + +class MediaTrackDemuxer : public DecoderDoctorLifeLogger { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaTrackDemuxer) + + class SamplesHolder { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesHolder) + + void AppendSample(RefPtr& aSample) { + MOZ_DIAGNOSTIC_ASSERT(aSample->HasValidTime()); + mSamples.AppendElement(aSample); + } + + const nsTArray>& GetSamples() const { + return mSamples; + } + + // This method is only used to do the move semantic for mSamples, do not + // append any element to the samples we returns. We should always append new + // sample to mSamples via `AppendSample()`. + nsTArray>& GetMovableSamples() { return mSamples; } + + private: + ~SamplesHolder() = default; + nsTArray> mSamples; + }; + + class SkipFailureHolder { + public: + SkipFailureHolder(const MediaResult& aFailure, uint32_t aSkipped) + : mFailure(aFailure), mSkipped(aSkipped) {} + MediaResult mFailure; + uint32_t mSkipped; + }; + + typedef MozPromise + SeekPromise; + typedef MozPromise, MediaResult, + /* IsExclusive = */ true> + SamplesPromise; + typedef MozPromise + SkipAccessPointPromise; + + // Returns the TrackInfo (a.k.a Track Description) for this track. + // The TrackInfo returned will be: + // TrackInfo::kVideoTrack -> VideoInfo. + // TrackInfo::kAudioTrack -> AudioInfo. + // respectively. + virtual UniquePtr GetInfo() const = 0; + + // Seeks to aTime. Upon success, SeekPromise will be resolved with the + // actual time seeked to. Typically the random access point time + virtual RefPtr Seek(const media::TimeUnit& aTime) = 0; + + // Returns the next aNumSamples sample(s) available. + // If only a lesser amount of samples is available, only those will be + // returned. + // A aNumSamples value of -1 indicates to return all remaining samples. + // A video sample is typically made of a single video frame while an audio + // sample will contains multiple audio frames. + virtual RefPtr GetSamples(int32_t aNumSamples = 1) = 0; + + // Returns true if a call to GetSamples() may block while waiting on the + // underlying resource to return the data. + // This is used by the MediaFormatReader to determine if buffering heuristics + // should be used. + virtual bool GetSamplesMayBlock() const { return true; } + + // Cancel all pending actions (Seek, GetSamples) and reset current state + // All pending promises are to be rejected with CANCEL. + // The next call to GetSamples would return the first sample available in the + // track. + virtual void Reset() = 0; + + // Returns timestamp of next random access point or an error if the demuxer + // can't report this. + virtual nsresult GetNextRandomAccessPoint(media::TimeUnit* aTime) { + return NS_ERROR_NOT_IMPLEMENTED; + } + + // Returns timestamp of previous random access point or an error if the + // demuxer can't report this. + virtual nsresult GetPreviousRandomAccessPoint(media::TimeUnit* aTime) { + return NS_ERROR_NOT_IMPLEMENTED; + } + + // Skip frames until the next Random Access Point located after + // aTimeThreshold. + // The first frame returned by the next call to GetSamples() will be the + // first random access point found after aTimeThreshold. + // Upon success, returns the number of frames skipped. + virtual RefPtr SkipToNextRandomAccessPoint( + const media::TimeUnit& aTimeThreshold) = 0; + + // Gets the resource's offset used for the last Seek() or GetSample(). + // A negative value indicates that this functionality isn't supported. + virtual int64_t GetResourceOffset() const { return -1; } + + virtual TrackInfo::TrackType GetType() const { return GetInfo()->GetType(); } + + virtual media::TimeIntervals GetBuffered() = 0; + + // By default, it is assumed that the entire resource can be evicted once + // all samples have been demuxed. + virtual int64_t GetEvictionOffset(const media::TimeUnit& aTime) { + return INT64_MAX; + } + + // If the MediaTrackDemuxer and MediaDataDemuxer hold cross references. + // BreakCycles must be overridden. + virtual void BreakCycles() {} + + protected: + virtual ~MediaTrackDemuxer() = default; +}; + +} // namespace mozilla + +#endif // MediaDataDemuxer_h diff --git a/dom/media/MediaDecoder.cpp b/dom/media/MediaDecoder.cpp new file mode 100644 index 0000000000..c7fdcb6844 --- /dev/null +++ b/dom/media/MediaDecoder.cpp @@ -0,0 +1,1698 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaDecoder.h" + +#include "AudioDeviceInfo.h" +#include "DOMMediaStream.h" +#include "DecoderBenchmark.h" +#include "ImageContainer.h" +#include "MediaDecoderStateMachineBase.h" +#include "MediaFormatReader.h" +#include "MediaResource.h" +#include "MediaShutdownManager.h" +#include "MediaTrackGraph.h" +#include "TelemetryProbesReporter.h" +#include "VideoFrameContainer.h" +#include "VideoUtils.h" +#include "mozilla/AbstractThread.h" +#include "mozilla/dom/DOMTypes.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Preferences.h" +#include "mozilla/StaticPrefs_media.h" +#include "mozilla/StaticPtr.h" +#include "mozilla/Telemetry.h" +#include "mozilla/Unused.h" +#include "mozilla/glean/GleanMetrics.h" +#include "nsComponentManagerUtils.h" +#include "nsContentUtils.h" +#include "nsError.h" +#include "nsIMemoryReporter.h" +#include "nsPrintfCString.h" +#include "nsServiceManagerUtils.h" +#include "nsTArray.h" +#include "WindowRenderer.h" +#include +#include +#include + +using namespace mozilla::dom; +using namespace mozilla::layers; +using namespace mozilla::media; + +namespace mozilla { + +// avoid redefined macro in unified build +#undef LOG +#undef DUMP + +LazyLogModule gMediaDecoderLog("MediaDecoder"); + +#define LOG(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, x, ##__VA_ARGS__) + +#define DUMP(x, ...) printf_stderr(x "\n", ##__VA_ARGS__) + +#define NS_DispatchToMainThread(...) CompileError_UseAbstractMainThreadInstead + +static const char* ToPlayStateStr(MediaDecoder::PlayState aState) { + switch (aState) { + case MediaDecoder::PLAY_STATE_LOADING: + return "LOADING"; + case MediaDecoder::PLAY_STATE_PAUSED: + return "PAUSED"; + case MediaDecoder::PLAY_STATE_PLAYING: + return "PLAYING"; + case MediaDecoder::PLAY_STATE_ENDED: + return "ENDED"; + case MediaDecoder::PLAY_STATE_SHUTDOWN: + return "SHUTDOWN"; + default: + MOZ_ASSERT_UNREACHABLE("Invalid playState."); + } + return "UNKNOWN"; +} + +class MediaMemoryTracker : public nsIMemoryReporter { + virtual ~MediaMemoryTracker(); + + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSIMEMORYREPORTER + + MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf); + + MediaMemoryTracker(); + void InitMemoryReporter(); + + static StaticRefPtr sUniqueInstance; + + static MediaMemoryTracker* UniqueInstance() { + if (!sUniqueInstance) { + sUniqueInstance = new MediaMemoryTracker(); + sUniqueInstance->InitMemoryReporter(); + } + return sUniqueInstance; + } + + using DecodersArray = nsTArray; + static DecodersArray& Decoders() { return UniqueInstance()->mDecoders; } + + DecodersArray mDecoders; + + public: + static void AddMediaDecoder(MediaDecoder* aDecoder) { + Decoders().AppendElement(aDecoder); + } + + static void RemoveMediaDecoder(MediaDecoder* aDecoder) { + DecodersArray& decoders = Decoders(); + decoders.RemoveElement(aDecoder); + if (decoders.IsEmpty()) { + sUniqueInstance = nullptr; + } + } +}; + +StaticRefPtr MediaMemoryTracker::sUniqueInstance; + +LazyLogModule gMediaTimerLog("MediaTimer"); + +constexpr TimeUnit MediaDecoder::DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED; + +void MediaDecoder::InitStatics() { + MOZ_ASSERT(NS_IsMainThread()); + // Eagerly init gMediaDecoderLog to work around bug 1415441. + MOZ_LOG(gMediaDecoderLog, LogLevel::Info, ("MediaDecoder::InitStatics")); + +#if defined(NIGHTLY_BUILD) + // Allow people to force a bit but try to warn them about filing bugs if audio + // decoding does not work on utility + static const bool allowLockPrefs = + PR_GetEnv("MOZ_DONT_LOCK_UTILITY_PLZ_FILE_A_BUG") == nullptr; + if (XRE_IsParentProcess() && allowLockPrefs) { + // Lock Utility process preferences so that people cannot opt-out of + // Utility process + Preferences::Lock("media.utility-process.enabled"); +# if defined(MOZ_FFMPEG) + Preferences::Lock("media.utility-ffmpeg.enabled"); +# endif // defined(MOZ_FFMPEG) +# if defined(MOZ_FFVPX) + Preferences::Lock("media.utility-ffvpx.enabled"); +# endif // defined(MOZ_FFVPX) +# if defined(MOZ_WMF) + Preferences::Lock("media.utility-wmf.enabled"); +# endif // defined(MOZ_WMF) +# if defined(MOZ_APPLEMEDIA) + Preferences::Lock("media.utility-applemedia.enabled"); +# endif // defined(MOZ_APPLEMEDIA) + Preferences::Lock("media.utility-vorbis.enabled"); + Preferences::Lock("media.utility-wav.enabled"); + Preferences::Lock("media.utility-opus.enabled"); + } +#endif // defined(NIGHTLY_BUILD) +} + +NS_IMPL_ISUPPORTS(MediaMemoryTracker, nsIMemoryReporter) + +void MediaDecoder::NotifyOwnerActivityChanged(bool aIsOwnerInvisible, + bool aIsOwnerConnected) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + SetElementVisibility(aIsOwnerInvisible, aIsOwnerConnected); + + NotifyCompositor(); +} + +void MediaDecoder::Pause() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + LOG("Pause"); + if (mPlayState == PLAY_STATE_LOADING || IsEnded()) { + mNextState = PLAY_STATE_PAUSED; + return; + } + ChangeState(PLAY_STATE_PAUSED); +} + +void MediaDecoder::SetVolume(double aVolume) { + MOZ_ASSERT(NS_IsMainThread()); + mVolume = aVolume; +} + +RefPtr MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) { + MOZ_ASSERT(NS_IsMainThread()); + mSinkDevice = aSinkDevice; + return GetStateMachine()->InvokeSetSink(aSinkDevice); +} + +void MediaDecoder::SetOutputCaptureState(OutputCaptureState aState, + SharedDummyTrack* aDummyTrack) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load()."); + MOZ_ASSERT_IF(aState == OutputCaptureState::Capture, aDummyTrack); + mOutputCaptureState = aState; + if (mOutputDummyTrack.Ref().get() != aDummyTrack) { + mOutputDummyTrack = nsMainThreadPtrHandle( + MakeAndAddRef>( + "MediaDecoder::mOutputDummyTrack", aDummyTrack)); + } +} + +void MediaDecoder::AddOutputTrack(RefPtr aTrack) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load()."); + CopyableTArray> tracks = mOutputTracks; + tracks.AppendElement(std::move(aTrack)); + mOutputTracks = tracks; +} + +void MediaDecoder::RemoveOutputTrack( + const RefPtr& aTrack) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load()."); + CopyableTArray> tracks = mOutputTracks; + if (tracks.RemoveElement(aTrack)) { + mOutputTracks = tracks; + } +} + +void MediaDecoder::SetOutputTracksPrincipal( + const RefPtr& aPrincipal) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load()."); + mOutputPrincipal = MakePrincipalHandle(aPrincipal); +} + +double MediaDecoder::GetDuration() { + MOZ_ASSERT(NS_IsMainThread()); + return ToMicrosecondResolution(mDuration.match(DurationToDouble())); +} + +bool MediaDecoder::IsInfinite() const { + MOZ_ASSERT(NS_IsMainThread()); + return std::isinf(mDuration.match(DurationToDouble())); +} + +#define INIT_MIRROR(name, val) \ + name(mOwner->AbstractMainThread(), val, "MediaDecoder::" #name " (Mirror)") +#define INIT_CANONICAL(name, val) \ + name(mOwner->AbstractMainThread(), val, "MediaDecoder::" #name " (Canonical)") + +MediaDecoder::MediaDecoder(MediaDecoderInit& aInit) + : mWatchManager(this, aInit.mOwner->AbstractMainThread()), + mLogicalPosition(0.0), + mDuration(TimeUnit::Invalid()), + mOwner(aInit.mOwner), + mAbstractMainThread(aInit.mOwner->AbstractMainThread()), + mFrameStats(new FrameStatistics()), + mDecoderBenchmark(new DecoderBenchmark()), + mVideoFrameContainer(aInit.mOwner->GetVideoFrameContainer()), + mMinimizePreroll(aInit.mMinimizePreroll), + mFiredMetadataLoaded(false), + mIsOwnerInvisible(false), + mIsOwnerConnected(false), + mForcedHidden(false), + mHasSuspendTaint(aInit.mHasSuspendTaint), + mShouldResistFingerprinting( + aInit.mOwner->ShouldResistFingerprinting(RFPTarget::AudioSampleRate)), + mPlaybackRate(aInit.mPlaybackRate), + mLogicallySeeking(false, "MediaDecoder::mLogicallySeeking"), + INIT_MIRROR(mBuffered, TimeIntervals()), + INIT_MIRROR(mCurrentPosition, TimeUnit::Zero()), + INIT_MIRROR(mStateMachineDuration, NullableTimeUnit()), + INIT_MIRROR(mIsAudioDataAudible, false), + INIT_CANONICAL(mVolume, aInit.mVolume), + INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch), + INIT_CANONICAL(mLooping, aInit.mLooping), + INIT_CANONICAL(mStreamName, aInit.mStreamName), + INIT_CANONICAL(mSinkDevice, nullptr), + INIT_CANONICAL(mSecondaryVideoContainer, nullptr), + INIT_CANONICAL(mOutputCaptureState, OutputCaptureState::None), + INIT_CANONICAL(mOutputDummyTrack, nullptr), + INIT_CANONICAL(mOutputTracks, nsTArray>()), + INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE), + INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING), + mSameOriginMedia(false), + mVideoDecodingOberver( + new BackgroundVideoDecodingPermissionObserver(this)), + mIsBackgroundVideoDecodingAllowed(false), + mTelemetryReported(false), + mContainerType(aInit.mContainerType), + mTelemetryProbesReporter( + new TelemetryProbesReporter(aInit.mReporterOwner)) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mAbstractMainThread); + MediaMemoryTracker::AddMediaDecoder(this); + + // + // Initialize watchers. + // + + // mDuration + mWatchManager.Watch(mStateMachineDuration, &MediaDecoder::DurationChanged); + + // readyState + mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateReadyState); + // ReadyState computation depends on MediaDecoder::CanPlayThrough, which + // depends on the download rate. + mWatchManager.Watch(mBuffered, &MediaDecoder::UpdateReadyState); + + // mLogicalPosition + mWatchManager.Watch(mCurrentPosition, &MediaDecoder::UpdateLogicalPosition); + mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateLogicalPosition); + mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::UpdateLogicalPosition); + + mWatchManager.Watch(mIsAudioDataAudible, + &MediaDecoder::NotifyAudibleStateChanged); + + mWatchManager.Watch(mVolume, &MediaDecoder::NotifyVolumeChanged); + + mVideoDecodingOberver->RegisterEvent(); +} + +#undef INIT_MIRROR +#undef INIT_CANONICAL + +void MediaDecoder::Shutdown() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + // Unwatch all watch targets to prevent further notifications. + mWatchManager.Shutdown(); + + DiscardOngoingSeekIfExists(); + + // This changes the decoder state to SHUTDOWN and does other things + // necessary to unblock the state machine thread if it's blocked, so + // the asynchronous shutdown in nsDestroyStateMachine won't deadlock. + if (mDecoderStateMachine) { + ShutdownStateMachine()->Then(mAbstractMainThread, __func__, this, + &MediaDecoder::FinishShutdown, + &MediaDecoder::FinishShutdown); + } else { + // Ensure we always unregister asynchronously in order not to disrupt + // the hashtable iterating in MediaShutdownManager::Shutdown(). + RefPtr self = this; + nsCOMPtr r = NS_NewRunnableFunction( + "MediaDecoder::Shutdown", [self]() { self->ShutdownInternal(); }); + mAbstractMainThread->Dispatch(r.forget()); + } + + ChangeState(PLAY_STATE_SHUTDOWN); + mVideoDecodingOberver->UnregisterEvent(); + mVideoDecodingOberver = nullptr; + mOwner = nullptr; +} + +void MediaDecoder::NotifyXPCOMShutdown() { + MOZ_ASSERT(NS_IsMainThread()); + // NotifyXPCOMShutdown will clear its reference to mDecoder. So we must ensure + // that this MediaDecoder stays alive until completion. + RefPtr kungFuDeathGrip = this; + if (auto* owner = GetOwner()) { + owner->NotifyXPCOMShutdown(); + } else if (!IsShutdown()) { + Shutdown(); + } + MOZ_DIAGNOSTIC_ASSERT(IsShutdown()); +} + +MediaDecoder::~MediaDecoder() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(IsShutdown()); + MediaMemoryTracker::RemoveMediaDecoder(this); +} + +void MediaDecoder::OnPlaybackEvent(MediaPlaybackEvent&& aEvent) { + switch (aEvent.mType) { + case MediaPlaybackEvent::PlaybackEnded: + PlaybackEnded(); + break; + case MediaPlaybackEvent::SeekStarted: + SeekingStarted(); + break; + case MediaPlaybackEvent::Invalidate: + Invalidate(); + break; + case MediaPlaybackEvent::EnterVideoSuspend: + GetOwner()->DispatchAsyncEvent(u"mozentervideosuspend"_ns); + mTelemetryProbesReporter->OnDecodeSuspended(); + mIsVideoDecodingSuspended = true; + break; + case MediaPlaybackEvent::ExitVideoSuspend: + GetOwner()->DispatchAsyncEvent(u"mozexitvideosuspend"_ns); + mTelemetryProbesReporter->OnDecodeResumed(); + mIsVideoDecodingSuspended = false; + break; + case MediaPlaybackEvent::StartVideoSuspendTimer: + GetOwner()->DispatchAsyncEvent(u"mozstartvideosuspendtimer"_ns); + break; + case MediaPlaybackEvent::CancelVideoSuspendTimer: + GetOwner()->DispatchAsyncEvent(u"mozcancelvideosuspendtimer"_ns); + break; + case MediaPlaybackEvent::VideoOnlySeekBegin: + GetOwner()->DispatchAsyncEvent(u"mozvideoonlyseekbegin"_ns); + break; + case MediaPlaybackEvent::VideoOnlySeekCompleted: + GetOwner()->DispatchAsyncEvent(u"mozvideoonlyseekcompleted"_ns); + break; + default: + break; + } +} + +bool MediaDecoder::IsVideoDecodingSuspended() const { + return mIsVideoDecodingSuspended; +} + +void MediaDecoder::OnPlaybackErrorEvent(const MediaResult& aError) { + MOZ_ASSERT(NS_IsMainThread()); +#ifndef MOZ_WMF_MEDIA_ENGINE + DecodeError(aError); +#else + if (aError != NS_ERROR_DOM_MEDIA_EXTERNAL_ENGINE_NOT_SUPPORTED_ERR && + aError != NS_ERROR_DOM_MEDIA_CDM_PROXY_NOT_SUPPORTED_ERR) { + DecodeError(aError); + return; + } + + // Already in shutting down decoder, no need to create another state machine. + if (mPlayState == PLAY_STATE_SHUTDOWN) { + return; + } + + // External engine can't play the resource or we intentionally disable it, try + // to use our own state machine again. Here we will create a new state machine + // immediately and asynchrously shutdown the old one because we don't want to + // dispatch any task to the old state machine. Therefore, we will disconnect + // anything related with the old state machine, create a new state machine and + // setup events/mirror/etc, then shutdown the old one and release its + // reference once it finishes shutdown. + RefPtr discardStateMachine = + mDecoderStateMachine; + + // Disconnect mirror and events first. + SetStateMachine(nullptr); + DisconnectEvents(); + + // Recreate a state machine and shutdown the old one. + bool needExternalEngine = false; + if (aError == NS_ERROR_DOM_MEDIA_CDM_PROXY_NOT_SUPPORTED_ERR) { +# ifdef MOZ_WMF_CDM + if (aError.GetCDMProxy()->AsWMFCDMProxy()) { + needExternalEngine = true; + } +# endif + } + LOG("Need to create a new %s state machine", + needExternalEngine ? "external engine" : "normal"); + + nsresult rv = CreateAndInitStateMachine( + false /* live stream */, + !needExternalEngine /* disable external engine */); + if (NS_WARN_IF(NS_FAILED(rv))) { + LOG("Failed to create a new state machine!"); + glean::mfcdm::ErrorExtra extraData; + extraData.errorName = Some("FAILED_TO_FALLBACK_TO_STATE_MACHINE"_ns); + nsAutoCString resolution; + if (mInfo) { + if (mInfo->HasAudio()) { + extraData.audioCodec = Some(mInfo->mAudio.mMimeType); + } + if (mInfo->HasVideo()) { + extraData.videoCodec = Some(mInfo->mVideo.mMimeType); + DetermineResolutionForTelemetry(*mInfo, resolution); + extraData.resolution = Some(resolution); + } + } + glean::mfcdm::error.Record(Some(extraData)); + if (MOZ_LOG_TEST(gMediaDecoderLog, LogLevel::Debug)) { + nsPrintfCString logMessage{"MFCDM Error event, error=%s", + extraData.errorName->get()}; + if (mInfo) { + if (mInfo->HasAudio()) { + logMessage.Append( + nsPrintfCString{", audio=%s", mInfo->mAudio.mMimeType.get()}); + } + if (mInfo->HasVideo()) { + logMessage.Append(nsPrintfCString{", video=%s, resolution=%s", + mInfo->mVideo.mMimeType.get(), + resolution.get()}); + } + } + LOG("%s", logMessage.get()); + } + } + + // Some attributes might have been set on the destroyed state machine, and + // won't be reflected on the new MDSM by the state mirroring. We need to + // update them manually later, after MDSM finished reading the + // metadata because the MDSM might not be ready to perform the operations yet. + mPendingStatusUpdateForNewlyCreatedStateMachine = true; + + // If there is ongoing seek performed on the old MDSM, cancel it because we + // will perform seeking later again and don't want the old seeking affecting + // us. + DiscardOngoingSeekIfExists(); + + discardStateMachine->BeginShutdown()->Then( + AbstractThread::MainThread(), __func__, [discardStateMachine] {}); +#endif +} + +void MediaDecoder::OnDecoderDoctorEvent(DecoderDoctorEvent aEvent) { + MOZ_ASSERT(NS_IsMainThread()); + // OnDecoderDoctorEvent is disconnected at shutdown time. + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + Document* doc = GetOwner()->GetDocument(); + if (!doc) { + return; + } + DecoderDoctorDiagnostics diags; + diags.StoreEvent(doc, aEvent, __func__); +} + +static const char* NextFrameStatusToStr( + MediaDecoderOwner::NextFrameStatus aStatus) { + switch (aStatus) { + case MediaDecoderOwner::NEXT_FRAME_AVAILABLE: + return "NEXT_FRAME_AVAILABLE"; + case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE: + return "NEXT_FRAME_UNAVAILABLE"; + case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING: + return "NEXT_FRAME_UNAVAILABLE_BUFFERING"; + case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING: + return "NEXT_FRAME_UNAVAILABLE_SEEKING"; + case MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED: + return "NEXT_FRAME_UNINITIALIZED"; + } + return "UNKNOWN"; +} + +void MediaDecoder::OnNextFrameStatus( + MediaDecoderOwner::NextFrameStatus aStatus) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + if (mNextFrameStatus != aStatus) { + LOG("Changed mNextFrameStatus to %s", NextFrameStatusToStr(aStatus)); + mNextFrameStatus = aStatus; + UpdateReadyState(); + } +} + +void MediaDecoder::OnTrackInfoUpdated(const VideoInfo& aVideoInfo, + const AudioInfo& aAudioInfo) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + // Note that we don't check HasVideo() or HasAudio() here, because + // those are checks for existing validity. If we always set the values + // to what we receive, then we can go from not-video to video, for + // example. + mInfo->mVideo = aVideoInfo; + mInfo->mAudio = aAudioInfo; + + Invalidate(); + + EnsureTelemetryReported(); +} + +void MediaDecoder::OnSecondaryVideoContainerInstalled( + const RefPtr& aSecondaryVideoContainer) { + MOZ_ASSERT(NS_IsMainThread()); + GetOwner()->OnSecondaryVideoContainerInstalled(aSecondaryVideoContainer); +} + +void MediaDecoder::OnStoreDecoderBenchmark(const VideoInfo& aInfo) { + MOZ_ASSERT(NS_IsMainThread()); + + int32_t videoFrameRate = aInfo.GetFrameRate().ref(); + + if (mFrameStats && videoFrameRate) { + DecoderBenchmarkInfo benchmarkInfo{ + aInfo.mMimeType, + aInfo.mDisplay.width, + aInfo.mDisplay.height, + videoFrameRate, + BitDepthForColorDepth(aInfo.mColorDepth), + }; + + LOG("Store benchmark: Video width=%d, height=%d, frameRate=%d, content " + "type = %s\n", + benchmarkInfo.mWidth, benchmarkInfo.mHeight, benchmarkInfo.mFrameRate, + benchmarkInfo.mContentType.BeginReading()); + + mDecoderBenchmark->Store(benchmarkInfo, mFrameStats); + } +} + +void MediaDecoder::ShutdownInternal() { + MOZ_ASSERT(NS_IsMainThread()); + mVideoFrameContainer = nullptr; + mSecondaryVideoContainer = nullptr; + MediaShutdownManager::Instance().Unregister(this); +} + +void MediaDecoder::FinishShutdown() { + MOZ_ASSERT(NS_IsMainThread()); + SetStateMachine(nullptr); + ShutdownInternal(); +} + +nsresult MediaDecoder::CreateAndInitStateMachine(bool aIsLiveStream, + bool aDisableExternalEngine) { + MOZ_ASSERT(NS_IsMainThread()); + SetStateMachine(CreateStateMachine(aDisableExternalEngine)); + + NS_ENSURE_TRUE(GetStateMachine(), NS_ERROR_FAILURE); + GetStateMachine()->DispatchIsLiveStream(aIsLiveStream); + + nsresult rv = mDecoderStateMachine->Init(this); + NS_ENSURE_SUCCESS(rv, rv); + + // If some parameters got set before the state machine got created, + // set them now + SetStateMachineParameters(); + + return NS_OK; +} + +void MediaDecoder::SetStateMachineParameters() { + MOZ_ASSERT(NS_IsMainThread()); + if (mPlaybackRate != 1 && mPlaybackRate != 0) { + mDecoderStateMachine->DispatchSetPlaybackRate(mPlaybackRate); + } + mTimedMetadataListener = mDecoderStateMachine->TimedMetadataEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::OnMetadataUpdate); + mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::MetadataLoaded); + mFirstFrameLoadedListener = + mDecoderStateMachine->FirstFrameLoadedEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded); + + mOnPlaybackEvent = mDecoderStateMachine->OnPlaybackEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::OnPlaybackEvent); + mOnPlaybackErrorEvent = mDecoderStateMachine->OnPlaybackErrorEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::OnPlaybackErrorEvent); + mOnDecoderDoctorEvent = mDecoderStateMachine->OnDecoderDoctorEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::OnDecoderDoctorEvent); + mOnMediaNotSeekable = mDecoderStateMachine->OnMediaNotSeekable().Connect( + mAbstractMainThread, this, &MediaDecoder::OnMediaNotSeekable); + mOnNextFrameStatus = mDecoderStateMachine->OnNextFrameStatus().Connect( + mAbstractMainThread, this, &MediaDecoder::OnNextFrameStatus); + mOnTrackInfoUpdated = mDecoderStateMachine->OnTrackInfoUpdatedEvent().Connect( + mAbstractMainThread, this, &MediaDecoder::OnTrackInfoUpdated); + mOnSecondaryVideoContainerInstalled = + mDecoderStateMachine->OnSecondaryVideoContainerInstalled().Connect( + mAbstractMainThread, this, + &MediaDecoder::OnSecondaryVideoContainerInstalled); + mOnStoreDecoderBenchmark = mReader->OnStoreDecoderBenchmark().Connect( + mAbstractMainThread, this, &MediaDecoder::OnStoreDecoderBenchmark); + + mOnEncrypted = mReader->OnEncrypted().Connect( + mAbstractMainThread, GetOwner(), &MediaDecoderOwner::DispatchEncrypted); + mOnWaitingForKey = mReader->OnWaitingForKey().Connect( + mAbstractMainThread, GetOwner(), &MediaDecoderOwner::NotifyWaitingForKey); + mOnDecodeWarning = mReader->OnDecodeWarning().Connect( + mAbstractMainThread, GetOwner(), &MediaDecoderOwner::DecodeWarning); +} + +void MediaDecoder::DisconnectEvents() { + MOZ_ASSERT(NS_IsMainThread()); + mTimedMetadataListener.Disconnect(); + mMetadataLoadedListener.Disconnect(); + mFirstFrameLoadedListener.Disconnect(); + mOnPlaybackEvent.Disconnect(); + mOnPlaybackErrorEvent.Disconnect(); + mOnDecoderDoctorEvent.Disconnect(); + mOnMediaNotSeekable.Disconnect(); + mOnEncrypted.Disconnect(); + mOnWaitingForKey.Disconnect(); + mOnDecodeWarning.Disconnect(); + mOnNextFrameStatus.Disconnect(); + mOnTrackInfoUpdated.Disconnect(); + mOnSecondaryVideoContainerInstalled.Disconnect(); + mOnStoreDecoderBenchmark.Disconnect(); +} + +RefPtr MediaDecoder::ShutdownStateMachine() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(GetStateMachine()); + DisconnectEvents(); + return mDecoderStateMachine->BeginShutdown(); +} + +void MediaDecoder::Play() { + MOZ_ASSERT(NS_IsMainThread()); + + NS_ASSERTION(mDecoderStateMachine != nullptr, "Should have state machine."); + LOG("Play"); + if (mPlaybackRate == 0) { + return; + } + + if (IsEnded()) { + Seek(0, SeekTarget::PrevSyncPoint); + return; + } + + if (mPlayState == PLAY_STATE_LOADING) { + mNextState = PLAY_STATE_PLAYING; + return; + } + + ChangeState(PLAY_STATE_PLAYING); +} + +void MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + MOZ_ASSERT(aTime >= 0.0, "Cannot seek to a negative value."); + + LOG("Seek"); + auto time = TimeUnit::FromSeconds(aTime); + + mLogicalPosition = aTime; + mLogicallySeeking = true; + SeekTarget target = SeekTarget(time, aSeekType); + CallSeek(target); + + if (mPlayState == PLAY_STATE_ENDED) { + ChangeState(GetOwner()->GetPaused() ? PLAY_STATE_PAUSED + : PLAY_STATE_PLAYING); + } +} + +void MediaDecoder::SetDelaySeekMode(bool aShouldDelaySeek) { + MOZ_ASSERT(NS_IsMainThread()); + LOG("SetDelaySeekMode, shouldDelaySeek=%d", aShouldDelaySeek); + if (mShouldDelaySeek == aShouldDelaySeek) { + return; + } + mShouldDelaySeek = aShouldDelaySeek; + if (!mShouldDelaySeek && mDelayedSeekTarget) { + Seek(mDelayedSeekTarget->GetTime().ToSeconds(), + mDelayedSeekTarget->GetType()); + mDelayedSeekTarget.reset(); + } +} + +void MediaDecoder::DiscardOngoingSeekIfExists() { + MOZ_ASSERT(NS_IsMainThread()); + mSeekRequest.DisconnectIfExists(); +} + +void MediaDecoder::CallSeek(const SeekTarget& aTarget) { + MOZ_ASSERT(NS_IsMainThread()); + if (mShouldDelaySeek) { + LOG("Delay seek to %f and store it to delayed seek target", + mDelayedSeekTarget->GetTime().ToSeconds()); + mDelayedSeekTarget = Some(aTarget); + return; + } + DiscardOngoingSeekIfExists(); + mDecoderStateMachine->InvokeSeek(aTarget) + ->Then(mAbstractMainThread, __func__, this, &MediaDecoder::OnSeekResolved, + &MediaDecoder::OnSeekRejected) + ->Track(mSeekRequest); +} + +double MediaDecoder::GetCurrentTime() { + MOZ_ASSERT(NS_IsMainThread()); + return mLogicalPosition; +} + +void MediaDecoder::OnMetadataUpdate(TimedMetadata&& aMetadata) { + MOZ_ASSERT(NS_IsMainThread()); + MetadataLoaded(MakeUnique(*aMetadata.mInfo), + UniquePtr(std::move(aMetadata.mTags)), + MediaDecoderEventVisibility::Observable); + FirstFrameLoaded(std::move(aMetadata.mInfo), + MediaDecoderEventVisibility::Observable); +} + +void MediaDecoder::MetadataLoaded( + UniquePtr aInfo, UniquePtr aTags, + MediaDecoderEventVisibility aEventVisibility) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + LOG("MetadataLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d", + aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(), + aInfo->HasVideo()); + + mMediaSeekable = aInfo->mMediaSeekable; + mMediaSeekableOnlyInBufferedRanges = + aInfo->mMediaSeekableOnlyInBufferedRanges; + mInfo = std::move(aInfo); + + mTelemetryProbesReporter->OnMediaContentChanged( + TelemetryProbesReporter::MediaInfoToMediaContent(*mInfo)); + + // Make sure the element and the frame (if any) are told about + // our new size. + if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) { + mFiredMetadataLoaded = true; + GetOwner()->MetadataLoaded(mInfo.get(), std::move(aTags)); + } + // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last + // dimensions retrieved from the video frame container. The video frame + // container contains more up to date dimensions than aInfo. + // So we call Invalidate() after calling GetOwner()->MetadataLoaded to ensure + // the media element has the latest dimensions. + Invalidate(); + +#ifdef MOZ_WMF_MEDIA_ENGINE + if (mPendingStatusUpdateForNewlyCreatedStateMachine) { + mPendingStatusUpdateForNewlyCreatedStateMachine = false; + LOG("Set pending statuses if necessary (mLogicallySeeking=%d, " + "mLogicalPosition=%f, mPlaybackRate=%f)", + mLogicallySeeking.Ref(), mLogicalPosition, mPlaybackRate); + if (mLogicalPosition != 0) { + Seek(mLogicalPosition, SeekTarget::Accurate); + } + if (mPlaybackRate != 0 && mPlaybackRate != 1.0) { + mDecoderStateMachine->DispatchSetPlaybackRate(mPlaybackRate); + } + } +#endif + + EnsureTelemetryReported(); +} + +void MediaDecoder::EnsureTelemetryReported() { + MOZ_ASSERT(NS_IsMainThread()); + + if (mTelemetryReported || !mInfo) { + // Note: sometimes we get multiple MetadataLoaded calls (for example + // for chained ogg). So we ensure we don't report duplicate results for + // these resources. + return; + } + + nsTArray codecs; + if (mInfo->HasAudio() && + !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) { + codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType); + } + if (mInfo->HasVideo() && + !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) { + codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType); + } + if (codecs.IsEmpty()) { + codecs.AppendElement(nsPrintfCString( + "resource; %s", ContainerType().OriginalString().Data())); + } + for (const nsCString& codec : codecs) { + LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get()); + Telemetry::Accumulate(Telemetry::HistogramID::MEDIA_CODEC_USED, codec); + } + + mTelemetryReported = true; +} + +const char* MediaDecoder::PlayStateStr() { + MOZ_ASSERT(NS_IsMainThread()); + return ToPlayStateStr(mPlayState); +} + +void MediaDecoder::FirstFrameLoaded( + UniquePtr aInfo, MediaDecoderEventVisibility aEventVisibility) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d " + "mPlayState=%s transportSeekable=%d", + aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(), + aInfo->HasVideo(), PlayStateStr(), IsTransportSeekable()); + + mInfo = std::move(aInfo); + mTelemetryProbesReporter->OnMediaContentChanged( + TelemetryProbesReporter::MediaInfoToMediaContent(*mInfo)); + + Invalidate(); + + // The element can run javascript via events + // before reaching here, so only change the + // state if we're still set to the original + // loading state. + if (mPlayState == PLAY_STATE_LOADING) { + ChangeState(mNextState); + } + + // GetOwner()->FirstFrameLoaded() might call us back. Put it at the bottom of + // this function to avoid unexpected shutdown from reentrant calls. + if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) { + GetOwner()->FirstFrameLoaded(); + } +} + +void MediaDecoder::NetworkError(const MediaResult& aError) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + GetOwner()->NetworkError(aError); +} + +void MediaDecoder::DecodeError(const MediaResult& aError) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + GetOwner()->DecodeError(aError); +} + +void MediaDecoder::UpdateSameOriginStatus(bool aSameOrigin) { + MOZ_ASSERT(NS_IsMainThread()); + mSameOriginMedia = aSameOrigin; +} + +bool MediaDecoder::IsSeeking() const { + MOZ_ASSERT(NS_IsMainThread()); + return mLogicallySeeking; +} + +bool MediaDecoder::OwnerHasError() const { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + return GetOwner()->HasError(); +} + +bool MediaDecoder::IsEnded() const { + MOZ_ASSERT(NS_IsMainThread()); + return mPlayState == PLAY_STATE_ENDED; +} + +bool MediaDecoder::IsShutdown() const { + MOZ_ASSERT(NS_IsMainThread()); + return mPlayState == PLAY_STATE_SHUTDOWN; +} + +void MediaDecoder::PlaybackEnded() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + if (mLogicallySeeking || mPlayState == PLAY_STATE_LOADING || + mPlayState == PLAY_STATE_ENDED) { + LOG("MediaDecoder::PlaybackEnded bailed out, " + "mLogicallySeeking=%d mPlayState=%s", + mLogicallySeeking.Ref(), ToPlayStateStr(mPlayState)); + return; + } + + LOG("MediaDecoder::PlaybackEnded"); + + ChangeState(PLAY_STATE_ENDED); + InvalidateWithFlags(VideoFrameContainer::INVALIDATE_FORCE); + GetOwner()->PlaybackEnded(); +} + +void MediaDecoder::NotifyPrincipalChanged() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + GetOwner()->NotifyDecoderPrincipalChanged(); +} + +void MediaDecoder::OnSeekResolved() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + LOG("MediaDecoder::OnSeekResolved"); + mLogicallySeeking = false; + + // Ensure logical position is updated after seek. + UpdateLogicalPositionInternal(); + mSeekRequest.Complete(); + + GetOwner()->SeekCompleted(); +} + +void MediaDecoder::OnSeekRejected() { + MOZ_ASSERT(NS_IsMainThread()); + LOG("MediaDecoder::OnSeekRejected"); + mSeekRequest.Complete(); + mLogicallySeeking = false; + + GetOwner()->SeekAborted(); +} + +void MediaDecoder::SeekingStarted() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + GetOwner()->SeekStarted(); +} + +void MediaDecoder::ChangeState(PlayState aState) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(!IsShutdown(), "SHUTDOWN is the final state."); + + if (mNextState == aState) { + mNextState = PLAY_STATE_PAUSED; + } + + if (mPlayState != aState) { + DDLOG(DDLogCategory::Property, "play_state", ToPlayStateStr(aState)); + LOG("Play state changes from %s to %s", ToPlayStateStr(mPlayState), + ToPlayStateStr(aState)); + mPlayState = aState; + UpdateTelemetryHelperBasedOnPlayState(aState); + } +} + +TelemetryProbesReporter::Visibility MediaDecoder::OwnerVisibility() const { + return GetOwner()->IsActuallyInvisible() || mForcedHidden + ? TelemetryProbesReporter::Visibility::eInvisible + : TelemetryProbesReporter::Visibility::eVisible; +} + +void MediaDecoder::UpdateTelemetryHelperBasedOnPlayState( + PlayState aState) const { + if (aState == PlayState::PLAY_STATE_PLAYING) { + mTelemetryProbesReporter->OnPlay( + OwnerVisibility(), + TelemetryProbesReporter::MediaInfoToMediaContent(*mInfo), + mVolume == 0.f); + } else if (aState == PlayState::PLAY_STATE_PAUSED || + aState == PlayState::PLAY_STATE_ENDED) { + mTelemetryProbesReporter->OnPause(OwnerVisibility()); + } else if (aState == PLAY_STATE_SHUTDOWN) { + mTelemetryProbesReporter->OnShutdown(); + } +} + +MediaDecoder::PositionUpdate MediaDecoder::GetPositionUpdateReason( + double aPrevPos, const TimeUnit& aCurPos) const { + MOZ_ASSERT(NS_IsMainThread()); + // If current position is earlier than previous position and we didn't do + // seek, that means we looped back to the start position. + const bool notSeeking = !mSeekRequest.Exists(); + if (mLooping && notSeeking && aCurPos.ToSeconds() < aPrevPos) { + return PositionUpdate::eSeamlessLoopingSeeking; + } + return aPrevPos != aCurPos.ToSeconds() && notSeeking + ? PositionUpdate::ePeriodicUpdate + : PositionUpdate::eOther; +} + +void MediaDecoder::UpdateLogicalPositionInternal() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + TimeUnit currentPosition = CurrentPosition(); + if (mPlayState == PLAY_STATE_ENDED) { + currentPosition = + std::max(currentPosition, mDuration.match(DurationToTimeUnit())); + } + + const PositionUpdate reason = + GetPositionUpdateReason(mLogicalPosition, currentPosition); + switch (reason) { + case PositionUpdate::ePeriodicUpdate: + SetLogicalPosition(currentPosition); + // This is actually defined in `TimeMarchesOn`, but we do that in decoder. + // https://html.spec.whatwg.org/multipage/media.html#playing-the-media-resource:event-media-timeupdate-7 + // TODO (bug 1688137): should we move it back to `TimeMarchesOn`? + GetOwner()->MaybeQueueTimeupdateEvent(); + break; + case PositionUpdate::eSeamlessLoopingSeeking: + // When seamless seeking occurs, seeking was performed on the demuxer so + // the decoder doesn't know. That means decoder still thinks it's in + // playing. Therefore, we have to manually call those methods to notify + // the owner about seeking. + GetOwner()->SeekStarted(); + SetLogicalPosition(currentPosition); + GetOwner()->SeekCompleted(); + break; + default: + MOZ_ASSERT(reason == PositionUpdate::eOther); + SetLogicalPosition(currentPosition); + break; + } + + // Invalidate the frame so any video data is displayed. + // Do this before the timeupdate event so that if that + // event runs JavaScript that queries the media size, the + // frame has reflowed and the size updated beforehand. + Invalidate(); +} + +void MediaDecoder::SetLogicalPosition(const TimeUnit& aNewPosition) { + MOZ_ASSERT(NS_IsMainThread()); + if (TimeUnit::FromSeconds(mLogicalPosition) == aNewPosition || + mLogicalPosition == aNewPosition.ToSeconds()) { + return; + } + mLogicalPosition = aNewPosition.ToSeconds(); + DDLOG(DDLogCategory::Property, "currentTime", mLogicalPosition); +} + +void MediaDecoder::DurationChanged() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + Variant oldDuration = mDuration; + + // Use the explicit duration if we have one. + // Otherwise use the duration mirrored from MDSM. + if (mExplicitDuration.isSome()) { + mDuration.emplace(mExplicitDuration.ref()); + } else if (mStateMachineDuration.Ref().isSome()) { + MOZ_ASSERT(mStateMachineDuration.Ref().ref().IsValid()); + mDuration.emplace(mStateMachineDuration.Ref().ref()); + } + + LOG("New duration: %s", + mDuration.match(DurationToTimeUnit()).ToString().get()); + if (oldDuration.is() && oldDuration.as().IsValid()) { + LOG("Old Duration %s", + oldDuration.match(DurationToTimeUnit()).ToString().get()); + } + + if ((oldDuration.is() || oldDuration.as().IsValid())) { + if (mDuration.match(DurationToDouble()) == + oldDuration.match(DurationToDouble())) { + return; + } + } + + LOG("Duration changed to %s", + mDuration.match(DurationToTimeUnit()).ToString().get()); + + // See https://www.w3.org/Bugs/Public/show_bug.cgi?id=28822 for a discussion + // of whether we should fire durationchange on explicit infinity. + if (mFiredMetadataLoaded && + (!std::isinf(mDuration.match(DurationToDouble())) || + mExplicitDuration.isSome())) { + GetOwner()->DispatchAsyncEvent(u"durationchange"_ns); + } + + if (CurrentPosition().ToSeconds() > mDuration.match(DurationToDouble())) { + Seek(mDuration.match(DurationToDouble()), SeekTarget::Accurate); + } +} + +already_AddRefed MediaDecoder::GetCompositor() { + MediaDecoderOwner* owner = GetOwner(); + Document* ownerDoc = owner ? owner->GetDocument() : nullptr; + WindowRenderer* renderer = + ownerDoc ? nsContentUtils::WindowRendererForDocument(ownerDoc) : nullptr; + RefPtr knows = + renderer ? renderer->AsKnowsCompositor() : nullptr; + return knows ? knows->GetForMedia().forget() : nullptr; +} + +void MediaDecoder::NotifyCompositor() { + RefPtr knowsCompositor = GetCompositor(); + if (knowsCompositor) { + nsCOMPtr r = + NewRunnableMethod&&>( + "MediaFormatReader::UpdateCompositor", mReader, + &MediaFormatReader::UpdateCompositor, knowsCompositor.forget()); + Unused << mReader->OwnerThread()->Dispatch(r.forget()); + } +} + +void MediaDecoder::SetElementVisibility(bool aIsOwnerInvisible, + bool aIsOwnerConnected) { + MOZ_ASSERT(NS_IsMainThread()); + mIsOwnerInvisible = aIsOwnerInvisible; + mIsOwnerConnected = aIsOwnerConnected; + mTelemetryProbesReporter->OnVisibilityChanged(OwnerVisibility()); + UpdateVideoDecodeMode(); +} + +void MediaDecoder::SetForcedHidden(bool aForcedHidden) { + MOZ_ASSERT(NS_IsMainThread()); + mForcedHidden = aForcedHidden; + mTelemetryProbesReporter->OnVisibilityChanged(OwnerVisibility()); + UpdateVideoDecodeMode(); +} + +void MediaDecoder::SetSuspendTaint(bool aTainted) { + MOZ_ASSERT(NS_IsMainThread()); + mHasSuspendTaint = aTainted; + UpdateVideoDecodeMode(); +} + +void MediaDecoder::UpdateVideoDecodeMode() { + MOZ_ASSERT(NS_IsMainThread()); + + // The MDSM may yet be set. + if (!mDecoderStateMachine) { + LOG("UpdateVideoDecodeMode(), early return because we don't have MDSM."); + return; + } + + // Seeking is required when leaving suspend mode. + if (!mMediaSeekable) { + LOG("UpdateVideoDecodeMode(), set Normal because the media is not " + "seekable"); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Normal); + return; + } + + // If mHasSuspendTaint is set, never suspend the video decoder. + if (mHasSuspendTaint) { + LOG("UpdateVideoDecodeMode(), set Normal because the element has been " + "tainted."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Normal); + return; + } + + // If mSecondaryVideoContainer is set, never suspend the video decoder. + if (mSecondaryVideoContainer.Ref()) { + LOG("UpdateVideoDecodeMode(), set Normal because the element is cloning " + "itself visually to another video container."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Normal); + return; + } + + // Don't suspend elements that is not in a connected tree. + if (!mIsOwnerConnected) { + LOG("UpdateVideoDecodeMode(), set Normal because the element is not in " + "tree."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Normal); + return; + } + + // If mForcedHidden is set, suspend the video decoder anyway. + if (mForcedHidden) { + LOG("UpdateVideoDecodeMode(), set Suspend because the element is forced to " + "be suspended."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Suspend); + return; + } + + // Resume decoding in the advance, even the element is in the background. + if (mIsBackgroundVideoDecodingAllowed) { + LOG("UpdateVideoDecodeMode(), set Normal because the tab is in background " + "and hovered."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Normal); + return; + } + + if (mIsOwnerInvisible) { + LOG("UpdateVideoDecodeMode(), set Suspend because of invisible element."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Suspend); + } else { + LOG("UpdateVideoDecodeMode(), set Normal because of visible element."); + mDecoderStateMachine->SetVideoDecodeMode(VideoDecodeMode::Normal); + } +} + +void MediaDecoder::SetIsBackgroundVideoDecodingAllowed(bool aAllowed) { + mIsBackgroundVideoDecodingAllowed = aAllowed; + UpdateVideoDecodeMode(); +} + +bool MediaDecoder::HasSuspendTaint() const { + MOZ_ASSERT(NS_IsMainThread()); + return mHasSuspendTaint; +} + +void MediaDecoder::SetSecondaryVideoContainer( + const RefPtr& aSecondaryVideoContainer) { + MOZ_ASSERT(NS_IsMainThread()); + if (mSecondaryVideoContainer.Ref() == aSecondaryVideoContainer) { + return; + } + mSecondaryVideoContainer = aSecondaryVideoContainer; + UpdateVideoDecodeMode(); +} + +bool MediaDecoder::IsMediaSeekable() { + MOZ_ASSERT(NS_IsMainThread()); + NS_ENSURE_TRUE(GetStateMachine(), false); + return mMediaSeekable; +} + +namespace { + +// Returns zero, either as a TimeUnit or as a double. +template +constexpr T Zero() { + if constexpr (std::is_same::value) { + return 0.0; + } else if constexpr (std::is_same::value) { + return TimeUnit::Zero(); + } + MOZ_RELEASE_ASSERT(false); +}; + +// Returns Infinity either as a TimeUnit or as a double. +template +constexpr T Infinity() { + if constexpr (std::is_same::value) { + return std::numeric_limits::infinity(); + } else if constexpr (std::is_same::value) { + return TimeUnit::FromInfinity(); + } + MOZ_RELEASE_ASSERT(false); +}; + +}; // namespace + +// This method can be made to return either TimeIntervals, that is a set of +// interval that are delimited with TimeUnit, or TimeRanges, that is a set of +// intervals that are delimited by seconds, as doubles. +// seekable often depends on the duration of a media, in the very common case +// where the seekable range is [0, duration]. When playing a MediaSource, the +// duration of a media element can be set as an arbitrary number, that are +// 64-bits floating point values. +// This allows returning an interval that is [0, duration], with duration being +// a double that cannot be represented as a TimeUnit, either because it has too +// many significant digits, or because it's outside of the int64_t range that +// TimeUnit internally uses. +template +IntervalType MediaDecoder::GetSeekableImpl() { + MOZ_ASSERT(NS_IsMainThread()); + if (std::isnan(GetDuration())) { + // We do not have a duration yet, we can't determine the seekable range. + return IntervalType(); + } + + // Compute [0, duration] -- When dealing with doubles, use ::GetDuration to + // avoid rounding the value differently. When dealing with TimeUnit, it's + // returned directly. + typename IntervalType::InnerType duration; + if constexpr (std::is_same::value) { + duration = GetDuration(); + } else { + duration = mDuration.as(); + } + typename IntervalType::ElemType zeroToDuration = + typename IntervalType::ElemType( + Zero(), + IsInfinite() ? Infinity() + : duration); + auto buffered = IntervalType(GetBuffered()); + // Remove any negative range in the interval -- seeking to a non-positive + // position isn't possible. + auto positiveBuffered = buffered.Intersection(zeroToDuration); + + // We can seek in buffered range if the media is seekable. Also, we can seek + // in unbuffered ranges if the transport level is seekable (local file or the + // server supports range requests, etc.) or in cue-less WebMs + if (mMediaSeekableOnlyInBufferedRanges) { + return IntervalType(positiveBuffered); + } + if (!IsMediaSeekable()) { + return IntervalType(); + } + if (!IsTransportSeekable()) { + return IntervalType(positiveBuffered); + } + + // Common case: seeking is possible at any point of the stream. + return IntervalType(zeroToDuration); +} + +media::TimeIntervals MediaDecoder::GetSeekable() { + return GetSeekableImpl(); +} + +media::TimeRanges MediaDecoder::GetSeekableTimeRanges() { + return GetSeekableImpl(); +} + +void MediaDecoder::SetFragmentEndTime(double aTime) { + MOZ_ASSERT(NS_IsMainThread()); + if (mDecoderStateMachine) { + mDecoderStateMachine->DispatchSetFragmentEndTime( + TimeUnit::FromSeconds(aTime)); + } +} + +void MediaDecoder::SetPlaybackRate(double aPlaybackRate) { + MOZ_ASSERT(NS_IsMainThread()); + + double oldRate = mPlaybackRate; + mPlaybackRate = aPlaybackRate; + if (aPlaybackRate == 0) { + Pause(); + return; + } + + if (oldRate == 0 && !GetOwner()->GetPaused()) { + // PlaybackRate is no longer null. + // Restart the playback if the media was playing. + Play(); + } + + if (mDecoderStateMachine) { + mDecoderStateMachine->DispatchSetPlaybackRate(aPlaybackRate); + } +} + +void MediaDecoder::SetPreservesPitch(bool aPreservesPitch) { + MOZ_ASSERT(NS_IsMainThread()); + mPreservesPitch = aPreservesPitch; +} + +void MediaDecoder::SetLooping(bool aLooping) { + MOZ_ASSERT(NS_IsMainThread()); + mLooping = aLooping; +} + +void MediaDecoder::SetStreamName(const nsAutoString& aStreamName) { + MOZ_ASSERT(NS_IsMainThread()); + mStreamName = aStreamName; +} + +void MediaDecoder::ConnectMirrors(MediaDecoderStateMachineBase* aObject) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aObject); + mStateMachineDuration.Connect(aObject->CanonicalDuration()); + mBuffered.Connect(aObject->CanonicalBuffered()); + mCurrentPosition.Connect(aObject->CanonicalCurrentPosition()); + mIsAudioDataAudible.Connect(aObject->CanonicalIsAudioDataAudible()); +} + +void MediaDecoder::DisconnectMirrors() { + MOZ_ASSERT(NS_IsMainThread()); + mStateMachineDuration.DisconnectIfConnected(); + mBuffered.DisconnectIfConnected(); + mCurrentPosition.DisconnectIfConnected(); + mIsAudioDataAudible.DisconnectIfConnected(); +} + +void MediaDecoder::SetStateMachine( + MediaDecoderStateMachineBase* aStateMachine) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT_IF(aStateMachine, !mDecoderStateMachine); + if (aStateMachine) { + mDecoderStateMachine = aStateMachine; + LOG("set state machine %p", mDecoderStateMachine.get()); + ConnectMirrors(aStateMachine); + UpdateVideoDecodeMode(); + } else if (mDecoderStateMachine) { + LOG("null out state machine %p", mDecoderStateMachine.get()); + mDecoderStateMachine = nullptr; + DisconnectMirrors(); + } +} + +ImageContainer* MediaDecoder::GetImageContainer() { + return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer() + : nullptr; +} + +void MediaDecoder::InvalidateWithFlags(uint32_t aFlags) { + if (mVideoFrameContainer) { + mVideoFrameContainer->InvalidateWithFlags(aFlags); + } +} + +void MediaDecoder::Invalidate() { + if (mVideoFrameContainer) { + mVideoFrameContainer->Invalidate(); + } +} + +void MediaDecoder::Suspend() { + MOZ_ASSERT(NS_IsMainThread()); + GetStateMachine()->InvokeSuspendMediaSink(); +} + +void MediaDecoder::Resume() { + MOZ_ASSERT(NS_IsMainThread()); + GetStateMachine()->InvokeResumeMediaSink(); +} + +// Constructs the time ranges representing what segments of the media +// are buffered and playable. +media::TimeIntervals MediaDecoder::GetBuffered() { + MOZ_ASSERT(NS_IsMainThread()); + return mBuffered.Ref(); +} + +size_t MediaDecoder::SizeOfVideoQueue() { + MOZ_ASSERT(NS_IsMainThread()); + if (mDecoderStateMachine) { + return mDecoderStateMachine->SizeOfVideoQueue(); + } + return 0; +} + +size_t MediaDecoder::SizeOfAudioQueue() { + MOZ_ASSERT(NS_IsMainThread()); + if (mDecoderStateMachine) { + return mDecoderStateMachine->SizeOfAudioQueue(); + } + return 0; +} + +void MediaDecoder::NotifyReaderDataArrived() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + + nsresult rv = mReader->OwnerThread()->Dispatch( + NewRunnableMethod("MediaFormatReader::NotifyDataArrived", mReader.get(), + &MediaFormatReader::NotifyDataArrived)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +// Provide access to the state machine object +MediaDecoderStateMachineBase* MediaDecoder::GetStateMachine() const { + MOZ_ASSERT(NS_IsMainThread()); + return mDecoderStateMachine; +} + +bool MediaDecoder::CanPlayThrough() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + return CanPlayThroughImpl(); +} + +RefPtr MediaDecoder::SetCDMProxy(CDMProxy* aProxy) { + MOZ_ASSERT(NS_IsMainThread()); +#ifdef MOZ_WMF_MEDIA_ENGINE + // Switch to another state machine if the current one doesn't support the + // given CDM proxy. + if (aProxy && !GetStateMachine()->IsCDMProxySupported(aProxy)) { + LOG("CDM proxy not supported! Switch to another state machine."); + OnPlaybackErrorEvent( + MediaResult{NS_ERROR_DOM_MEDIA_CDM_PROXY_NOT_SUPPORTED_ERR, aProxy}); + } +#endif + MOZ_DIAGNOSTIC_ASSERT_IF(aProxy, + GetStateMachine()->IsCDMProxySupported(aProxy)); + return GetStateMachine()->SetCDMProxy(aProxy); +} + +bool MediaDecoder::IsOpusEnabled() { return StaticPrefs::media_opus_enabled(); } + +bool MediaDecoder::IsOggEnabled() { return StaticPrefs::media_ogg_enabled(); } + +bool MediaDecoder::IsWaveEnabled() { return StaticPrefs::media_wave_enabled(); } + +bool MediaDecoder::IsWebMEnabled() { return StaticPrefs::media_webm_enabled(); } + +NS_IMETHODIMP +MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport, + nsISupports* aData, bool aAnonymize) { + // NB: When resourceSizes' ref count goes to 0 the promise will report the + // resources memory and finish the asynchronous memory report. + RefPtr resourceSizes = + new MediaDecoder::ResourceSizes(MediaMemoryTracker::MallocSizeOf); + + nsCOMPtr handleReport = aHandleReport; + nsCOMPtr data = aData; + + resourceSizes->Promise()->Then( + AbstractThread::MainThread(), __func__, + [handleReport, data](size_t size) { + handleReport->Callback( + ""_ns, "explicit/media/resources"_ns, KIND_HEAP, UNITS_BYTES, + static_cast(size), + nsLiteralCString("Memory used by media resources including " + "streaming buffers, caches, etc."), + data); + + nsCOMPtr imgr = + do_GetService("@mozilla.org/memory-reporter-manager;1"); + + if (imgr) { + imgr->EndReport(); + } + }, + [](size_t) { /* unused reject function */ }); + + int64_t video = 0; + int64_t audio = 0; + DecodersArray& decoders = Decoders(); + for (size_t i = 0; i < decoders.Length(); ++i) { + MediaDecoder* decoder = decoders[i]; + video += static_cast(decoder->SizeOfVideoQueue()); + audio += static_cast(decoder->SizeOfAudioQueue()); + decoder->AddSizeOfResources(resourceSizes); + } + + MOZ_COLLECT_REPORT("explicit/media/decoded/video", KIND_HEAP, UNITS_BYTES, + video, "Memory used by decoded video frames."); + + MOZ_COLLECT_REPORT("explicit/media/decoded/audio", KIND_HEAP, UNITS_BYTES, + audio, "Memory used by decoded audio chunks."); + + return NS_OK; +} + +MediaDecoderOwner* MediaDecoder::GetOwner() const { + MOZ_ASSERT(NS_IsMainThread()); + // mOwner is valid until shutdown. + return mOwner; +} + +MediaDecoderOwner::NextFrameStatus MediaDecoder::NextFrameBufferedStatus() { + MOZ_ASSERT(NS_IsMainThread()); + // Next frame hasn't been decoded yet. + // Use the buffered range to consider if we have the next frame available. + auto currentPosition = CurrentPosition(); + media::TimeInterval interval( + currentPosition, currentPosition + DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED); + return GetBuffered().Contains(interval) + ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE + : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE; +} + +void MediaDecoder::GetDebugInfo(dom::MediaDecoderDebugInfo& aInfo) { + MOZ_ASSERT(NS_IsMainThread()); + CopyUTF8toUTF16(nsPrintfCString("%p", this), aInfo.mInstance); + aInfo.mChannels = mInfo ? mInfo->mAudio.mChannels : 0; + aInfo.mRate = mInfo ? mInfo->mAudio.mRate : 0; + aInfo.mHasAudio = mInfo ? mInfo->HasAudio() : false; + aInfo.mHasVideo = mInfo ? mInfo->HasVideo() : false; + CopyUTF8toUTF16(MakeStringSpan(PlayStateStr()), aInfo.mPlayState); + aInfo.mContainerType = + NS_ConvertUTF8toUTF16(ContainerType().Type().AsString()); +} + +RefPtr MediaDecoder::RequestDebugInfo( + MediaDecoderDebugInfo& aInfo) { + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + if (!NS_IsMainThread()) { + // Run the request on the main thread if it's not already. + return InvokeAsync(AbstractThread::MainThread(), __func__, + [this, self = RefPtr{this}, &aInfo]() { + return RequestDebugInfo(aInfo); + }); + } + GetDebugInfo(aInfo); + + return mReader->RequestDebugInfo(aInfo.mReader) + ->Then(AbstractThread::MainThread(), __func__, + [this, self = RefPtr{this}, &aInfo] { + if (!GetStateMachine()) { + return GenericPromise::CreateAndResolve(true, __func__); + } + return GetStateMachine()->RequestDebugInfo(aInfo.mStateMachine); + }); +} + +void MediaDecoder::NotifyAudibleStateChanged() { + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + GetOwner()->SetAudibleState(mIsAudioDataAudible); + mTelemetryProbesReporter->OnAudibleChanged( + mIsAudioDataAudible ? TelemetryProbesReporter::AudibleState::eAudible + : TelemetryProbesReporter::AudibleState::eNotAudible); +} + +void MediaDecoder::NotifyVolumeChanged() { + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + mTelemetryProbesReporter->OnMutedChanged(mVolume == 0.f); +} + +double MediaDecoder::GetTotalVideoPlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetTotalVideoPlayTimeInSeconds(); +} + +double MediaDecoder::GetTotalVideoHDRPlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetTotalVideoHDRPlayTimeInSeconds(); +} + +double MediaDecoder::GetVisibleVideoPlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetVisibleVideoPlayTimeInSeconds(); +} + +double MediaDecoder::GetInvisibleVideoPlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetInvisibleVideoPlayTimeInSeconds(); +} + +double MediaDecoder::GetVideoDecodeSuspendedTimeInSeconds() const { + return mTelemetryProbesReporter->GetVideoDecodeSuspendedTimeInSeconds(); +} + +double MediaDecoder::GetTotalAudioPlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetTotalAudioPlayTimeInSeconds(); +} + +double MediaDecoder::GetAudiblePlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetAudiblePlayTimeInSeconds(); +} + +double MediaDecoder::GetInaudiblePlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetInaudiblePlayTimeInSeconds(); +} + +double MediaDecoder::GetMutedPlayTimeInSeconds() const { + return mTelemetryProbesReporter->GetMutedPlayTimeInSeconds(); +} + +MediaMemoryTracker::MediaMemoryTracker() = default; + +void MediaMemoryTracker::InitMemoryReporter() { + RegisterWeakAsyncMemoryReporter(this); +} + +MediaMemoryTracker::~MediaMemoryTracker() { + UnregisterWeakMemoryReporter(this); +} + +} // namespace mozilla + +// avoid redefined macro in unified build +#undef DUMP +#undef LOG +#undef NS_DispatchToMainThread diff --git a/dom/media/MediaDecoder.h b/dom/media/MediaDecoder.h new file mode 100644 index 0000000000..f2f10a67c6 --- /dev/null +++ b/dom/media/MediaDecoder.h @@ -0,0 +1,822 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(MediaDecoder_h_) +# define MediaDecoder_h_ + +# include "BackgroundVideoDecodingPermissionObserver.h" +# include "DecoderDoctorDiagnostics.h" +# include "MediaContainerType.h" +# include "MediaDecoderOwner.h" +# include "MediaEventSource.h" +# include "MediaMetadataManager.h" +# include "MediaPromiseDefs.h" +# include "MediaResource.h" +# include "MediaStatistics.h" +# include "SeekTarget.h" +# include "TelemetryProbesReporter.h" +# include "TimeUnits.h" +# include "mozilla/Atomics.h" +# include "mozilla/CDMProxy.h" +# include "mozilla/MozPromise.h" +# include "mozilla/ReentrantMonitor.h" +# include "mozilla/StateMirroring.h" +# include "mozilla/StateWatching.h" +# include "mozilla/dom/MediaDebugInfoBinding.h" +# include "nsCOMPtr.h" +# include "nsIObserver.h" +# include "nsISupports.h" +# include "nsITimer.h" + +class AudioDeviceInfo; +class nsIPrincipal; + +namespace mozilla { + +class AbstractThread; +class DOMMediaStream; +class DecoderBenchmark; +class ProcessedMediaTrack; +class FrameStatistics; +class VideoFrameContainer; +class MediaFormatReader; +class MediaDecoderStateMachineBase; +struct MediaPlaybackEvent; +struct SharedDummyTrack; + +template +struct DurationToType { + double operator()(double aDouble); + double operator()(const media::TimeUnit& aTimeUnit); +}; + +template <> +struct DurationToType { + double operator()(double aDouble) { return aDouble; } + double operator()(const media::TimeUnit& aTimeUnit) { + if (aTimeUnit.IsValid()) { + if (aTimeUnit.IsPosInf()) { + return std::numeric_limits::infinity(); + } + if (aTimeUnit.IsNegInf()) { + return -std::numeric_limits::infinity(); + } + return aTimeUnit.ToSeconds(); + } + return std::numeric_limits::quiet_NaN(); + } +}; + +using DurationToDouble = DurationToType; + +template <> +struct DurationToType { + media::TimeUnit operator()(double aDouble) { + return media::TimeUnit::FromSeconds(aDouble); + } + media::TimeUnit operator()(const media::TimeUnit& aTimeUnit) { + return aTimeUnit; + } +}; + +using DurationToTimeUnit = DurationToType; + +struct MOZ_STACK_CLASS MediaDecoderInit { + MediaDecoderOwner* const mOwner; + TelemetryProbesReporterOwner* const mReporterOwner; + const double mVolume; + const bool mPreservesPitch; + const double mPlaybackRate; + const bool mMinimizePreroll; + const bool mHasSuspendTaint; + const bool mLooping; + const MediaContainerType mContainerType; + const nsAutoString mStreamName; + + MediaDecoderInit(MediaDecoderOwner* aOwner, + TelemetryProbesReporterOwner* aReporterOwner, double aVolume, + bool aPreservesPitch, double aPlaybackRate, + bool aMinimizePreroll, bool aHasSuspendTaint, bool aLooping, + const MediaContainerType& aContainerType) + : mOwner(aOwner), + mReporterOwner(aReporterOwner), + mVolume(aVolume), + mPreservesPitch(aPreservesPitch), + mPlaybackRate(aPlaybackRate), + mMinimizePreroll(aMinimizePreroll), + mHasSuspendTaint(aHasSuspendTaint), + mLooping(aLooping), + mContainerType(aContainerType) {} +}; + +DDLoggedTypeDeclName(MediaDecoder); + +class MediaDecoder : public DecoderDoctorLifeLogger { + public: + typedef MozPromise + SeekPromise; + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoder) + + // Enumeration for the valid play states (see mPlayState) + enum PlayState { + PLAY_STATE_LOADING, + PLAY_STATE_PAUSED, + PLAY_STATE_PLAYING, + PLAY_STATE_ENDED, + PLAY_STATE_SHUTDOWN + }; + + // Must be called exactly once, on the main thread, during startup. + static void InitStatics(); + + explicit MediaDecoder(MediaDecoderInit& aInit); + + // Returns the container content type of the resource. + // Safe to call from any thread. + const MediaContainerType& ContainerType() const { return mContainerType; } + + // Cleanup internal data structures. Must be called on the main + // thread by the owning object before that object disposes of this object. + virtual void Shutdown(); + + // Notified by the shutdown manager that XPCOM shutdown has begun. + // The decoder should notify its owner to drop the reference to the decoder + // to prevent further calls into the decoder. + void NotifyXPCOMShutdown(); + + // Called if the media file encounters a network error. + void NetworkError(const MediaResult& aError); + + // Return the principal of the current URI being played or downloaded. + virtual already_AddRefed GetCurrentPrincipal() = 0; + + // Return true if the loading of this resource required cross-origin + // redirects. + virtual bool HadCrossOriginRedirects() = 0; + + // Return the time position in the video stream being + // played measured in seconds. + virtual double GetCurrentTime(); + + // Seek to the time position in (seconds) from the start of the video. + // If aDoFastSeek is true, we'll seek to the sync point/keyframe preceeding + // the seek target. + void Seek(double aTime, SeekTarget::Type aSeekType); + + // Start playback of a video. 'Load' must have previously been + // called. + virtual void Play(); + + // Notify activity of the decoder owner is changed. + virtual void NotifyOwnerActivityChanged(bool aIsOwnerInvisible, + bool aIsOwnerConnected); + + // Pause video playback. + virtual void Pause(); + // Adjust the speed of the playback, optionally with pitch correction, + void SetVolume(double aVolume); + + void SetPlaybackRate(double aPlaybackRate); + void SetPreservesPitch(bool aPreservesPitch); + void SetLooping(bool aLooping); + void SetStreamName(const nsAutoString& aStreamName); + + // Set the given device as the output device. + RefPtr SetSink(AudioDeviceInfo* aSinkDevice); + + bool GetMinimizePreroll() const { return mMinimizePreroll; } + + // When we enable delay seek mode, media decoder won't actually ask MDSM to do + // seeking. During this period, we would store the latest seeking target and + // perform the seek to that target when we leave the mode. If we have any + // delayed seeks stored `IsSeeking()` will return true. E.g. During delay + // seeking mode, if we get seek target to 5s, 10s, 7s. When we stop delaying + // seeking, we would only seek to 7s. + void SetDelaySeekMode(bool aShouldDelaySeek); + + // All MediaStream-related data is protected by mReentrantMonitor. + // We have at most one DecodedStreamData per MediaDecoder. Its stream + // is used as the input for each ProcessedMediaTrack created by calls to + // captureStream(UntilEnded). Seeking creates a new source stream, as does + // replaying after the input as ended. In the latter case, the new source is + // not connected to streams created by captureStreamUntilEnded. + + enum class OutputCaptureState { Capture, Halt, None }; + // Set the output capture state of this decoder. + // @param aState Capture: Output is captured into output tracks, and + // aDummyTrack must be provided. + // Halt: A capturing media sink is used, but capture is + // halted. + // None: Output is not captured. + // @param aDummyTrack A SharedDummyTrack the capturing media sink can use to + // access a MediaTrackGraph, so it can create tracks even + // when there are no output tracks available. + void SetOutputCaptureState(OutputCaptureState aState, + SharedDummyTrack* aDummyTrack = nullptr); + // Add an output track. All decoder output for the track's media type will be + // sent to the track. + // Note that only one audio track and one video track is supported by + // MediaDecoder at this time. Passing in more of one type, or passing in a + // type that metadata says we are not decoding, is an error. + void AddOutputTrack(RefPtr aTrack); + // Remove an output track added with AddOutputTrack. + void RemoveOutputTrack(const RefPtr& aTrack); + // Update the principal for any output tracks. + void SetOutputTracksPrincipal(const RefPtr& aPrincipal); + + // Return the duration of the video in seconds. + virtual double GetDuration(); + + // Return true if the stream is infinite. + bool IsInfinite() const; + + // Return true if we are currently seeking in the media resource. + // Call on the main thread only. + bool IsSeeking() const; + + // Return true if the decoder has reached the end of playback. + bool IsEnded() const; + + // True if we are playing a MediaSource object. + virtual bool IsMSE() const { return false; } + + // Return true if the MediaDecoderOwner's error attribute is not null. + // Must be called before Shutdown(). + bool OwnerHasError() const; + + // Returns true if this media supports random seeking. False for example with + // chained ogg files. + bool IsMediaSeekable(); + // Returns true if seeking is supported on a transport level (e.g. the server + // supports range requests, we are playing a file, etc.). + virtual bool IsTransportSeekable() = 0; + + // Return the time ranges that can be seeked into, in TimeUnits. + virtual media::TimeIntervals GetSeekable(); + // Return the time ranges that can be seeked into, in seconds, double + // precision. + virtual media::TimeRanges GetSeekableTimeRanges(); + + template + T GetSeekableImpl(); + + // Set the end time of the media resource. When playback reaches + // this point the media pauses. aTime is in seconds. + virtual void SetFragmentEndTime(double aTime); + + // Invalidate the frame. + void Invalidate(); + void InvalidateWithFlags(uint32_t aFlags); + + // Suspend any media downloads that are in progress. Called by the + // media element when it is sent to the bfcache, or when we need + // to throttle the download. Call on the main thread only. This can + // be called multiple times, there's an internal "suspend count". + // When it is called the internal system audio resource are cleaned up. + virtual void Suspend(); + + // Resume any media downloads that have been suspended. Called by the + // media element when it is restored from the bfcache, or when we need + // to stop throttling the download. Call on the main thread only. + // The download will only actually resume once as many Resume calls + // have been made as Suspend calls. + virtual void Resume(); + + // Moves any existing channel loads into or out of background. Background + // loads don't block the load event. This is called when we stop or restart + // delaying the load event. This also determines whether any new loads + // initiated (for example to seek) will be in the background. This calls + // SetLoadInBackground() on mResource. + virtual void SetLoadInBackground(bool aLoadInBackground) {} + + MediaDecoderStateMachineBase* GetStateMachine() const; + void SetStateMachine(MediaDecoderStateMachineBase* aStateMachine); + + // Constructs the time ranges representing what segments of the media + // are buffered and playable. + virtual media::TimeIntervals GetBuffered(); + + // Returns the size, in bytes, of the heap memory used by the currently + // queued decoded video and audio data. + size_t SizeOfVideoQueue(); + size_t SizeOfAudioQueue(); + + // Helper struct for accumulating resource sizes that need to be measured + // asynchronously. Once all references are dropped the callback will be + // invoked. + struct ResourceSizes { + typedef MozPromise SizeOfPromise; + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ResourceSizes) + explicit ResourceSizes(MallocSizeOf aMallocSizeOf) + : mMallocSizeOf(aMallocSizeOf), mByteSize(0), mCallback() {} + + mozilla::MallocSizeOf mMallocSizeOf; + mozilla::Atomic mByteSize; + + RefPtr Promise() { return mCallback.Ensure(__func__); } + + private: + ~ResourceSizes() { mCallback.ResolveIfExists(mByteSize, __func__); } + + MozPromiseHolder mCallback; + }; + + virtual void AddSizeOfResources(ResourceSizes* aSizes) = 0; + + VideoFrameContainer* GetVideoFrameContainer() { return mVideoFrameContainer; } + + layers::ImageContainer* GetImageContainer(); + + // Returns true if we can play the entire media through without stopping + // to buffer, given the current download and playback rates. + bool CanPlayThrough(); + + // Called from HTMLMediaElement when owner document activity changes + virtual void SetElementVisibility(bool aIsOwnerInvisible, + bool aIsOwnerConnected); + + // Force override the visible state to hidden. + // Called from HTMLMediaElement when testing of video decode suspend from + // mochitests. + void SetForcedHidden(bool aForcedHidden); + + // Mark the decoder as tainted, meaning suspend-video-decoder is disabled. + void SetSuspendTaint(bool aTaint); + + // Returns true if the decoder can't participate in suspend-video-decoder. + bool HasSuspendTaint() const; + + void UpdateVideoDecodeMode(); + + void SetSecondaryVideoContainer( + const RefPtr& aSecondaryVideoContainer); + + void SetIsBackgroundVideoDecodingAllowed(bool aAllowed); + + bool IsVideoDecodingSuspended() const; + + // The MediaDecoderOwner of this decoder wants to resist fingerprinting. + bool ShouldResistFingerprinting() const { + return mShouldResistFingerprinting; + } + + /****** + * The following methods must only be called on the main + * thread. + ******/ + + // Change to a new play state. This updates the mState variable and + // notifies any thread blocking on this object's monitor of the + // change. Call on the main thread only. + virtual void ChangeState(PlayState aState); + + // Called when the video has completed playing. + // Call on the main thread only. + void PlaybackEnded(); + + void OnSeekRejected(); + void OnSeekResolved(); + + // Seeking has started. Inform the element on the main thread. + void SeekingStarted(); + + void UpdateLogicalPositionInternal(); + void UpdateLogicalPosition() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + // Per spec, offical position remains stable during pause and seek. + if (mPlayState == PLAY_STATE_PAUSED || IsSeeking()) { + return; + } + UpdateLogicalPositionInternal(); + } + + // Find the end of the cached data starting at the current decoder + // position. + int64_t GetDownloadPosition(); + + // Notifies the element that decoding has failed. + void DecodeError(const MediaResult& aError); + + // Indicate whether the media is same-origin with the element. + void UpdateSameOriginStatus(bool aSameOrigin); + + MediaDecoderOwner* GetOwner() const; + + AbstractThread* AbstractMainThread() const { return mAbstractMainThread; } + + RefPtr SetCDMProxy(CDMProxy* aProxy); + + void EnsureTelemetryReported(); + + static bool IsOggEnabled(); + static bool IsOpusEnabled(); + static bool IsWaveEnabled(); + static bool IsWebMEnabled(); + + // Return the frame decode/paint related statistics. + FrameStatistics& GetFrameStatistics() { return *mFrameStats; } + + void UpdateReadyState() { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + GetOwner()->UpdateReadyState(); + } + + MediaDecoderOwner::NextFrameStatus NextFrameStatus() const { + return mNextFrameStatus; + } + + virtual MediaDecoderOwner::NextFrameStatus NextFrameBufferedStatus(); + + RefPtr RequestDebugInfo(dom::MediaDecoderDebugInfo& aInfo); + + void GetDebugInfo(dom::MediaDecoderDebugInfo& aInfo); + + protected: + virtual ~MediaDecoder(); + + // Called when the first audio and/or video from the media file has been + // loaded by the state machine. Call on the main thread only. + virtual void FirstFrameLoaded(UniquePtr aInfo, + MediaDecoderEventVisibility aEventVisibility); + + // Return error if fail to init the state machine. + nsresult CreateAndInitStateMachine(bool aIsLiveStream, + bool aDisableExternalEngine = false); + + // Always return a state machine. If the decoder supports using external + // engine, `aDisableExternalEngine` can disable the external engine if needed. + virtual MediaDecoderStateMachineBase* CreateStateMachine( + bool aDisableExternalEngine) MOZ_NONNULL_RETURN = 0; + + void SetStateMachineParameters(); + + // Disconnect any events before shutting down the state machine. + void DisconnectEvents(); + RefPtr ShutdownStateMachine(); + + // Called when MediaDecoder shutdown is finished. Subclasses use this to clean + // up internal structures, and unregister potential shutdown blockers when + // they're done. + virtual void ShutdownInternal(); + + bool IsShutdown() const; + + // Called to notify the decoder that the duration has changed. + virtual void DurationChanged(); + + // State-watching manager. + WatchManager mWatchManager; + + double ExplicitDuration() { return mExplicitDuration.ref(); } + + void SetExplicitDuration(double aValue) { + MOZ_DIAGNOSTIC_ASSERT(!IsShutdown()); + mExplicitDuration = Some(aValue); + + // We Invoke DurationChanged explicitly, rather than using a watcher, so + // that it takes effect immediately, rather than at the end of the current + // task. + DurationChanged(); + } + + virtual void OnPlaybackEvent(MediaPlaybackEvent&& aEvent); + + // Called when the metadata from the media file has been loaded by the + // state machine. Call on the main thread only. + virtual void MetadataLoaded(UniquePtr aInfo, + UniquePtr aTags, + MediaDecoderEventVisibility aEventVisibility); + + void SetLogicalPosition(const media::TimeUnit& aNewPosition); + + /****** + * The following members should be accessed with the decoder lock held. + ******/ + + // The logical playback position of the media resource in units of + // seconds. This corresponds to the "official position" in HTML5. Note that + // we need to store this as a double, rather than an int64_t (like + // mCurrentPosition), so that |v.currentTime = foo; v.currentTime == foo| + // returns true without being affected by rounding errors. + double mLogicalPosition; + + // The current playback position of the underlying playback infrastructure. + // This corresponds to the "current position" in HTML5. + // We allow omx subclasses to substitute an alternative current position for + // usage with the audio offload player. + virtual media::TimeUnit CurrentPosition() { return mCurrentPosition.Ref(); } + + already_AddRefed GetCompositor(); + + // Official duration of the media resource as observed by script. + // This can be a TimeUnit representing the exact duration found by demuxing, + // as a TimeUnit. This can also be a duration set explicitly by script, as a + // double. + Variant mDuration; + + /****** + * The following member variables can be accessed from any thread. + ******/ + + RefPtr mReader; + + // Amount of buffered data ahead of current time required to consider that + // the next frame is available. + // An arbitrary value of 250ms is used. + static constexpr auto DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED = + media::TimeUnit::FromMicroseconds(250000); + + private: + // Called when the owner's activity changed. + void NotifyCompositor(); + + void OnPlaybackErrorEvent(const MediaResult& aError); + + void OnDecoderDoctorEvent(DecoderDoctorEvent aEvent); + + void OnMediaNotSeekable() { mMediaSeekable = false; } + + void OnNextFrameStatus(MediaDecoderOwner::NextFrameStatus); + + void OnTrackInfoUpdated(const VideoInfo& aVideoInfo, + const AudioInfo& aAudioInfo); + + void OnSecondaryVideoContainerInstalled( + const RefPtr& aSecondaryVideoContainer); + + void OnStoreDecoderBenchmark(const VideoInfo& aInfo); + + void FinishShutdown(); + + void ConnectMirrors(MediaDecoderStateMachineBase* aObject); + void DisconnectMirrors(); + + virtual bool CanPlayThroughImpl() = 0; + + // The state machine object for handling the decoding. It is safe to + // call methods of this object from other threads. Its internal data + // is synchronised on a monitor. The lifetime of this object is + // after mPlayState is LOADING and before mPlayState is SHUTDOWN. It + // is safe to access it during this period. + // + // Explicitly prievate to force access via accessors. + RefPtr mDecoderStateMachine; + + protected: + void NotifyReaderDataArrived(); + void DiscardOngoingSeekIfExists(); + void CallSeek(const SeekTarget& aTarget); + + // Called by MediaResource when the principal of the resource has + // changed. Called on main thread only. + virtual void NotifyPrincipalChanged(); + + MozPromiseRequestHolder mSeekRequest; + + const char* PlayStateStr(); + + void OnMetadataUpdate(TimedMetadata&& aMetadata); + + // This should only ever be accessed from the main thread. + // It is set in the constructor and cleared in Shutdown when the element goes + // away. The decoder does not add a reference the element. + MediaDecoderOwner* mOwner; + + // The AbstractThread from mOwner. + const RefPtr mAbstractMainThread; + + // Counters related to decode and presentation of frames. + const RefPtr mFrameStats; + + // Store a benchmark of the decoder based on FrameStatistics. + RefPtr mDecoderBenchmark; + + RefPtr mVideoFrameContainer; + + // True if the decoder has been directed to minimize its preroll before + // playback starts. After the first time playback starts, we don't attempt + // to minimize preroll, as we assume the user is likely to keep playing, + // or play the media again. + const bool mMinimizePreroll; + + // True if we've already fired metadataloaded. + bool mFiredMetadataLoaded; + + // True if the media is seekable (i.e. supports random access). + bool mMediaSeekable = true; + + // True if the media is only seekable within its buffered ranges + // like WebMs with no cues. + bool mMediaSeekableOnlyInBufferedRanges = false; + + // Stores media info, including info of audio tracks and video tracks, should + // only be accessed from main thread. + UniquePtr mInfo; + + // True if the owner element is actually visible to users. + bool mIsOwnerInvisible; + + // True if the owner element is connected to a document tree. + // https://dom.spec.whatwg.org/#connected + bool mIsOwnerConnected; + + // If true, forces the decoder to be considered hidden. + bool mForcedHidden; + + // True if the decoder has a suspend taint - meaning suspend-video-decoder is + // disabled. + bool mHasSuspendTaint; + + // If true, the decoder should resist fingerprinting. + const bool mShouldResistFingerprinting; + + MediaDecoderOwner::NextFrameStatus mNextFrameStatus = + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE; + + // A listener to receive metadata updates from MDSM. + MediaEventListener mTimedMetadataListener; + + MediaEventListener mMetadataLoadedListener; + MediaEventListener mFirstFrameLoadedListener; + + MediaEventListener mOnPlaybackEvent; + MediaEventListener mOnPlaybackErrorEvent; + MediaEventListener mOnDecoderDoctorEvent; + MediaEventListener mOnMediaNotSeekable; + MediaEventListener mOnEncrypted; + MediaEventListener mOnWaitingForKey; + MediaEventListener mOnDecodeWarning; + MediaEventListener mOnNextFrameStatus; + MediaEventListener mOnTrackInfoUpdated; + MediaEventListener mOnSecondaryVideoContainerInstalled; + MediaEventListener mOnStoreDecoderBenchmark; + + // True if we have suspended video decoding. + bool mIsVideoDecodingSuspended = false; + + protected: + // PlaybackRate and pitch preservation status we should start at. + double mPlaybackRate; + + // True if the decoder is seeking. + Watchable mLogicallySeeking; + + // Buffered range, mirrored from the reader. + Mirror mBuffered; + + // NB: Don't use mCurrentPosition directly, but rather CurrentPosition(). + Mirror mCurrentPosition; + + // Duration of the media resource according to the state machine. + Mirror mStateMachineDuration; + + // Used to distinguish whether the audio is producing sound. + Mirror mIsAudioDataAudible; + + // Volume of playback. 0.0 = muted. 1.0 = full volume. + Canonical mVolume; + + Canonical mPreservesPitch; + + Canonical mLooping; + + Canonical mStreamName; + + // The device used with SetSink, or nullptr if no explicit device has been + // set. + Canonical> mSinkDevice; + + // Set if the decoder is sending video to a secondary container. While set we + // should not suspend the decoder. + Canonical> mSecondaryVideoContainer; + + // Whether this MediaDecoder's output is captured, halted or not captured. + // When captured, all decoded data must be played out through mOutputTracks. + Canonical mOutputCaptureState; + + // A dummy track used to access the right MediaTrackGraph instance. Needed + // since there's no guarantee that output tracks are present. + Canonical> mOutputDummyTrack; + + // Tracks that, if set, will get data routed through them. + Canonical>> mOutputTracks; + + // PrincipalHandle to be used when feeding data into mOutputTracks. + Canonical mOutputPrincipal; + + // Media duration set explicitly by JS. At present, this is only ever present + // for MSE. + Maybe mExplicitDuration; + + // Set to one of the valid play states. + // This can only be changed on the main thread while holding the decoder + // monitor. Thus, it can be safely read while holding the decoder monitor + // OR on the main thread. + Canonical mPlayState; + + // This can only be changed on the main thread. + PlayState mNextState = PLAY_STATE_PAUSED; + + // True if the media is same-origin with the element. Data can only be + // passed to MediaStreams when this is true. + bool mSameOriginMedia; + + // We can allow video decoding in background when we match some special + // conditions, eg. when the cursor is hovering over the tab. This observer is + // used to listen the related events. + RefPtr mVideoDecodingOberver; + + // True if we want to resume video decoding even the media element is in the + // background. + bool mIsBackgroundVideoDecodingAllowed; + + // True if we want to delay seeking, and and save the latest seeking target to + // resume to when we stop delaying seeking. + bool mShouldDelaySeek = false; + Maybe mDelayedSeekTarget; + + public: + Canonical& CanonicalVolume() { return mVolume; } + Canonical& CanonicalPreservesPitch() { return mPreservesPitch; } + Canonical& CanonicalLooping() { return mLooping; } + Canonical& CanonicalStreamName() { return mStreamName; } + Canonical>& CanonicalSinkDevice() { + return mSinkDevice; + } + Canonical>& CanonicalSecondaryVideoContainer() { + return mSecondaryVideoContainer; + } + Canonical& CanonicalOutputCaptureState() { + return mOutputCaptureState; + } + Canonical>& + CanonicalOutputDummyTrack() { + return mOutputDummyTrack; + } + Canonical>>& + CanonicalOutputTracks() { + return mOutputTracks; + } + Canonical& CanonicalOutputPrincipal() { + return mOutputPrincipal; + } + Canonical& CanonicalPlayState() { return mPlayState; } + + void UpdateTelemetryHelperBasedOnPlayState(PlayState aState) const; + + TelemetryProbesReporter::Visibility OwnerVisibility() const; + + // Those methods exist to report telemetry related metrics. + double GetTotalVideoPlayTimeInSeconds() const; + double GetTotalVideoHDRPlayTimeInSeconds() const; + double GetVisibleVideoPlayTimeInSeconds() const; + double GetInvisibleVideoPlayTimeInSeconds() const; + double GetVideoDecodeSuspendedTimeInSeconds() const; + double GetTotalAudioPlayTimeInSeconds() const; + double GetAudiblePlayTimeInSeconds() const; + double GetInaudiblePlayTimeInSeconds() const; + double GetMutedPlayTimeInSeconds() const; + + private: + /** + * This enum describes the reason why we need to update the logical position. + * ePeriodicUpdate : the position grows periodically during playback + * eSeamlessLoopingSeeking : the position changes due to demuxer level seek. + * eOther : due to normal seeking or other attributes changes, eg. playstate + */ + enum class PositionUpdate { + ePeriodicUpdate, + eSeamlessLoopingSeeking, + eOther, + }; + PositionUpdate GetPositionUpdateReason(double aPrevPos, + const media::TimeUnit& aCurPos) const; + + // Notify owner when the audible state changed + void NotifyAudibleStateChanged(); + + void NotifyVolumeChanged(); + + bool mTelemetryReported; + const MediaContainerType mContainerType; + bool mCanPlayThrough = false; + + UniquePtr mTelemetryProbesReporter; + +# ifdef MOZ_WMF_MEDIA_ENGINE + // True when we need to update the newly created MDSM's status to make it + // consistent with the previous destroyed one. + bool mPendingStatusUpdateForNewlyCreatedStateMachine = false; +# endif +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/MediaDecoderOwner.h b/dom/media/MediaDecoderOwner.h new file mode 100644 index 0000000000..f53a59f193 --- /dev/null +++ b/dom/media/MediaDecoderOwner.h @@ -0,0 +1,200 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MediaDecoderOwner_h_ +#define MediaDecoderOwner_h_ + +#include "mozilla/UniquePtr.h" +#include "MediaInfo.h" +#include "MediaSegment.h" +#include "nsSize.h" + +namespace mozilla { + +class AbstractThread; +class GMPCrashHelper; +class VideoFrameContainer; +class MediaInfo; +class MediaResult; +enum class RFPTarget : uint64_t; + +namespace dom { +class Document; +class HTMLMediaElement; +} // namespace dom + +class MediaDecoderOwner { + public: + // Called by the media decoder to indicate that the download is progressing. + virtual void DownloadProgressed() = 0; + + // Dispatch an asynchronous event to the decoder owner + virtual void DispatchAsyncEvent(const nsAString& aName) = 0; + + // Triggers a recomputation of readyState. + virtual void UpdateReadyState() = 0; + + // Called by the decoder object to notify owner might need to dispatch the + // `timeupdate` event due to current time changes. + virtual void MaybeQueueTimeupdateEvent() = 0; + + // Return true if decoding should be paused + virtual bool GetPaused() = 0; + + // Called by the video decoder object, on the main thread, + // when it has read the metadata containing video dimensions, + // etc. + // Must take ownership of MetadataTags aTags argument. + virtual void MetadataLoaded(const MediaInfo* aInfo, + UniquePtr aTags) = 0; + + // Called by the decoder object, on the main thread, + // when it has read the first frame of the video or audio. + virtual void FirstFrameLoaded() = 0; + + // Called by the decoder object, on the main thread, + // when the resource has a network error during loading. + // The decoder owner should call Shutdown() on the decoder and drop the + // reference to the decoder to prevent further calls into the decoder. + virtual void NetworkError(const MediaResult& aError) = 0; + + // Called by the decoder object, on the main thread, when the + // resource has a decode error during metadata loading or decoding. + // The decoder owner should call Shutdown() on the decoder and drop the + // reference to the decoder to prevent further calls into the decoder. + virtual void DecodeError(const MediaResult& aError) = 0; + + // Called by the decoder object, on the main thread, when the + // resource has a decode issue during metadata loading or decoding, but can + // continue decoding. + virtual void DecodeWarning(const MediaResult& aError) = 0; + + // Return true if media element error attribute is not null. + virtual bool HasError() const = 0; + + // Called by the video decoder object, on the main thread, when the + // resource load has been cancelled. + virtual void LoadAborted() = 0; + + // Called by the video decoder object, on the main thread, + // when the video playback has ended. + virtual void PlaybackEnded() = 0; + + // Called by the video decoder object, on the main thread, + // when the resource has started seeking. + virtual void SeekStarted() = 0; + + // Called by the video decoder object, on the main thread, + // when the resource has completed seeking. + virtual void SeekCompleted() = 0; + + // Called by the video decoder object, on the main thread, + // when the resource has aborted seeking. + virtual void SeekAborted() = 0; + + // Called by the media stream, on the main thread, when the download + // has been suspended by the cache or because the element itself + // asked the decoder to suspend the download. + virtual void DownloadSuspended() = 0; + + // Called by the media decoder to indicate whether the media cache has + // suspended the channel. + virtual void NotifySuspendedByCache(bool aSuspendedByCache) = 0; + + // called to notify that the principal of the decoder's media resource has + // changed. + virtual void NotifyDecoderPrincipalChanged() = 0; + + // The status of the next frame which might be available from the decoder + enum NextFrameStatus { + // The next frame of audio/video is available + NEXT_FRAME_AVAILABLE, + // The next frame of audio/video is unavailable because the decoder + // is paused while it buffers up data + NEXT_FRAME_UNAVAILABLE_BUFFERING, + // The next frame of audio/video is unavailable for the decoder is seeking. + NEXT_FRAME_UNAVAILABLE_SEEKING, + // The next frame of audio/video is unavailable for some other reasons + NEXT_FRAME_UNAVAILABLE, + // Sentinel value + NEXT_FRAME_UNINITIALIZED + }; + + // Called by media decoder when the audible state changed + virtual void SetAudibleState(bool aAudible) = 0; + + // Notified by the decoder that XPCOM shutdown has begun. + // The decoder owner should call Shutdown() on the decoder and drop the + // reference to the decoder to prevent further calls into the decoder. + virtual void NotifyXPCOMShutdown() = 0; + + // Dispatches a "encrypted" event to the HTMLMediaElement, with the + // provided init data. Actual dispatch may be delayed until HAVE_METADATA. + // Main thread only. + virtual void DispatchEncrypted(const nsTArray& aInitData, + const nsAString& aInitDataType) = 0; + + // Notified by the decoder that a decryption key is required before emitting + // further output. + virtual void NotifyWaitingForKey() {} + + /* + * Methods that are used only in Gecko go here. We provide defaul + * implementations so they can compile in Servo without modification. + */ + // Return an abstract thread on which to run main thread runnables. + virtual AbstractThread* AbstractMainThread() const { return nullptr; } + + // Get the HTMLMediaElement object if the decoder is being used from an + // HTML media element, and null otherwise. + virtual dom::HTMLMediaElement* GetMediaElement() { return nullptr; } + + // Called by the media decoder and the video frame to get the + // ImageContainer containing the video data. + virtual VideoFrameContainer* GetVideoFrameContainer() { return nullptr; } + + // Return the decoder owner's owner document. + virtual mozilla::dom::Document* GetDocument() const { return nullptr; } + + // Called by the media decoder to create a GMPCrashHelper. + virtual already_AddRefed CreateGMPCrashHelper() { + return nullptr; + } + + // Called by the frame container to notify the layout engine that the + // size of the image has changed, or the video needs to be be repainted + // for some other reason. + enum class ImageSizeChanged { No, Yes }; + enum class ForceInvalidate { No, Yes }; + virtual void Invalidate(ImageSizeChanged aImageSizeChanged, + const Maybe& aNewIntrinsicSize, + ForceInvalidate aForceInvalidate) {} + + // Called after the MediaStream we're playing rendered a frame to aContainer + // with a different principalHandle than the previous frame. + virtual void PrincipalHandleChangedForVideoFrameContainer( + VideoFrameContainer* aContainer, + const PrincipalHandle& aNewPrincipalHandle) {} + + // Called after the MediaDecoder has installed the given secondary video + // container and render potential frames to it. + virtual void OnSecondaryVideoContainerInstalled( + const RefPtr& aSecondaryContainer) {} + + // Return true is the owner is actually invisible to users. + virtual bool IsActuallyInvisible() const = 0; + + // Returns true if the owner should resist fingerprinting. + virtual bool ShouldResistFingerprinting(RFPTarget aTarget) const = 0; + + /* + * Servo only methods go here. Please provide default implementations so they + * can build in Gecko without any modification. + */ +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/MediaDecoderStateMachine.cpp b/dom/media/MediaDecoderStateMachine.cpp new file mode 100644 index 0000000000..f3cd79047b --- /dev/null +++ b/dom/media/MediaDecoderStateMachine.cpp @@ -0,0 +1,4870 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include +#include +#include + +#include "mediasink/AudioSink.h" +#include "mediasink/AudioSinkWrapper.h" +#include "mediasink/DecodedStream.h" +#include "mediasink/VideoSink.h" +#include "mozilla/Logging.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/NotNull.h" +#include "mozilla/Preferences.h" +#include "mozilla/ProfilerLabels.h" +#include "mozilla/ProfilerMarkers.h" +#include "mozilla/ProfilerMarkerTypes.h" +#include "mozilla/SharedThreadPool.h" +#include "mozilla/Sprintf.h" +#include "mozilla/StaticPrefs_media.h" +#include "mozilla/Telemetry.h" +#include "mozilla/TaskQueue.h" + +#include "nsIMemoryReporter.h" +#include "nsPrintfCString.h" +#include "nsTArray.h" +#include "AudioSegment.h" +#include "DOMMediaStream.h" +#include "ImageContainer.h" +#include "MediaDecoder.h" +#include "MediaDecoderStateMachine.h" +#include "MediaShutdownManager.h" +#include "MediaTrackGraph.h" +#include "MediaTimer.h" +#include "PerformanceRecorder.h" +#include "ReaderProxy.h" +#include "TimeUnits.h" +#include "VideoSegment.h" +#include "VideoUtils.h" + +namespace mozilla { + +using namespace mozilla::media; + +#define NS_DispatchToMainThread(...) \ + CompileError_UseAbstractThreadDispatchInstead + +// avoid redefined macro in unified build +#undef FMT +#undef LOG +#undef LOGV +#undef LOGW +#undef LOGE +#undef SFMT +#undef SLOG +#undef SLOGW +#undef SLOGE + +#define FMT(x, ...) "Decoder=%p " x, mDecoderID, ##__VA_ARGS__ +#define LOG(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, "Decoder=%p " x, mDecoderID, \ + ##__VA_ARGS__) +#define LOGV(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, "Decoder=%p " x, mDecoderID, \ + ##__VA_ARGS__) +#define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get()) +#define LOGE(x, ...) \ + NS_DebugBreak(NS_DEBUG_WARNING, \ + nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, \ + __FILE__, __LINE__) + +// Used by StateObject and its sub-classes +#define SFMT(x, ...) \ + "Decoder=%p state=%s " x, mMaster->mDecoderID, ToStateStr(GetState()), \ + ##__VA_ARGS__ +#define SLOG(x, ...) \ + DDMOZ_LOGEX(mMaster, gMediaDecoderLog, LogLevel::Debug, "state=%s " x, \ + ToStateStr(GetState()), ##__VA_ARGS__) +#define SLOGW(x, ...) NS_WARNING(nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get()) +#define SLOGE(x, ...) \ + NS_DebugBreak(NS_DEBUG_WARNING, \ + nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get(), nullptr, \ + __FILE__, __LINE__) + +// Certain constants get stored as member variables and then adjusted by various +// scale factors on a per-decoder basis. We want to make sure to avoid using +// these constants directly, so we put them in a namespace. +namespace detail { + +// Resume a suspended video decoder to the current playback position plus this +// time premium for compensating the seeking delay. +static constexpr auto RESUME_VIDEO_PREMIUM = TimeUnit::FromMicroseconds(125000); + +static const int64_t AMPLE_AUDIO_USECS = 2000000; + +// If more than this much decoded audio is queued, we'll hold off +// decoding more audio. +static constexpr auto AMPLE_AUDIO_THRESHOLD = + TimeUnit::FromMicroseconds(AMPLE_AUDIO_USECS); + +} // namespace detail + +// If we have fewer than LOW_VIDEO_FRAMES decoded frames, and +// we're not "prerolling video", we'll skip the video up to the next keyframe +// which is at or after the current playback position. +static const uint32_t LOW_VIDEO_FRAMES = 2; + +// Arbitrary "frame duration" when playing only audio. +static const uint32_t AUDIO_DURATION_USECS = 40000; + +namespace detail { + +// If we have less than this much buffered data available, we'll consider +// ourselves to be running low on buffered data. We determine how much +// buffered data we have remaining using the reader's GetBuffered() +// implementation. +static const int64_t LOW_BUFFER_THRESHOLD_USECS = 5000000; + +static constexpr auto LOW_BUFFER_THRESHOLD = + TimeUnit::FromMicroseconds(LOW_BUFFER_THRESHOLD_USECS); + +// LOW_BUFFER_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, +// otherwise the skip-to-keyframe logic can activate when we're running low on +// data. +static_assert(LOW_BUFFER_THRESHOLD_USECS > AMPLE_AUDIO_USECS, + "LOW_BUFFER_THRESHOLD_USECS is too small"); + +} // namespace detail + +// Amount of excess data to add in to the "should we buffer" calculation. +static constexpr auto EXHAUSTED_DATA_MARGIN = + TimeUnit::FromMicroseconds(100000); + +static const uint32_t MIN_VIDEO_QUEUE_SIZE = 3; +static const uint32_t MAX_VIDEO_QUEUE_SIZE = 10; +#ifdef MOZ_APPLEMEDIA +static const uint32_t HW_VIDEO_QUEUE_SIZE = 10; +#else +static const uint32_t HW_VIDEO_QUEUE_SIZE = 3; +#endif +static const uint32_t VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE = 9999; + +static uint32_t sVideoQueueDefaultSize = MAX_VIDEO_QUEUE_SIZE; +static uint32_t sVideoQueueHWAccelSize = HW_VIDEO_QUEUE_SIZE; +static uint32_t sVideoQueueSendToCompositorSize = + VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE; + +static void InitVideoQueuePrefs() { + MOZ_ASSERT(NS_IsMainThread()); + static bool sPrefInit = false; + if (!sPrefInit) { + sPrefInit = true; + sVideoQueueDefaultSize = Preferences::GetUint( + "media.video-queue.default-size", MAX_VIDEO_QUEUE_SIZE); + sVideoQueueHWAccelSize = Preferences::GetUint( + "media.video-queue.hw-accel-size", HW_VIDEO_QUEUE_SIZE); + sVideoQueueSendToCompositorSize = + Preferences::GetUint("media.video-queue.send-to-compositor-size", + VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE); + } +} + +template +static void DiscardFramesFromTail(MediaQueue& aQueue, + const Function&& aTest) { + while (aQueue.GetSize()) { + if (aTest(aQueue.PeekBack()->mTime.ToMicroseconds())) { + RefPtr releaseMe = aQueue.PopBack(); + continue; + } + break; + } +} + +// Delay, in milliseconds, that tabs needs to be in background before video +// decoding is suspended. +static TimeDuration SuspendBackgroundVideoDelay() { + return TimeDuration::FromMilliseconds( + StaticPrefs::media_suspend_background_video_delay_ms()); +} + +class MediaDecoderStateMachine::StateObject { + public: + virtual ~StateObject() = default; + virtual void Exit() {} // Exit action. + virtual void Step() {} // Perform a 'cycle' of this state object. + virtual State GetState() const = 0; + + // Event handlers for various events. + virtual void HandleAudioCaptured() {} + virtual void HandleAudioDecoded(AudioData* aAudio) { + Crash("Unexpected event!", __func__); + } + virtual void HandleVideoDecoded(VideoData* aVideo) { + Crash("Unexpected event!", __func__); + } + virtual void HandleAudioWaited(MediaData::Type aType) { + Crash("Unexpected event!", __func__); + } + virtual void HandleVideoWaited(MediaData::Type aType) { + Crash("Unexpected event!", __func__); + } + virtual void HandleWaitingForAudio() { Crash("Unexpected event!", __func__); } + virtual void HandleAudioCanceled() { Crash("Unexpected event!", __func__); } + virtual void HandleEndOfAudio() { Crash("Unexpected event!", __func__); } + virtual void HandleWaitingForVideo() { Crash("Unexpected event!", __func__); } + virtual void HandleVideoCanceled() { Crash("Unexpected event!", __func__); } + virtual void HandleEndOfVideo() { Crash("Unexpected event!", __func__); } + + virtual RefPtr HandleSeek( + const SeekTarget& aTarget); + + virtual RefPtr HandleShutdown(); + + virtual void HandleVideoSuspendTimeout() = 0; + + virtual void HandleResumeVideoDecoding(const TimeUnit& aTarget); + + virtual void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) {} + + virtual void GetDebugInfo( + dom::MediaDecoderStateMachineDecodingStateDebugInfo& aInfo) {} + + virtual void HandleLoopingChanged() {} + + private: + template + auto ReturnTypeHelper(R (S::*)(As...)) -> R; + + void Crash(const char* aReason, const char* aSite) { + char buf[1024]; + SprintfLiteral(buf, "%s state=%s callsite=%s", aReason, + ToStateStr(GetState()), aSite); + MOZ_ReportAssertionFailure(buf, __FILE__, __LINE__); + MOZ_CRASH(); + } + + protected: + enum class EventVisibility : int8_t { Observable, Suppressed }; + + using Master = MediaDecoderStateMachine; + explicit StateObject(Master* aPtr) : mMaster(aPtr) {} + TaskQueue* OwnerThread() const { return mMaster->mTaskQueue; } + ReaderProxy* Reader() const { return mMaster->mReader; } + const MediaInfo& Info() const { return mMaster->Info(); } + MediaQueue& AudioQueue() const { return mMaster->mAudioQueue; } + MediaQueue& VideoQueue() const { return mMaster->mVideoQueue; } + + template + auto CallEnterMemberFunction(S* aS, std::tuple& aTuple, + std::index_sequence) + -> decltype(ReturnTypeHelper(&S::Enter)) { + AUTO_PROFILER_LABEL("StateObject::CallEnterMemberFunction", MEDIA_PLAYBACK); + return aS->Enter(std::move(std::get(aTuple))...); + } + + // Note this function will delete the current state object. + // Don't access members to avoid UAF after this call. + template + auto SetState(Ts&&... aArgs) -> decltype(ReturnTypeHelper(&S::Enter)) { + // |aArgs| must be passed by reference to avoid passing MOZ_NON_PARAM class + // SeekJob by value. See bug 1287006 and bug 1338374. But we still *must* + // copy the parameters, because |Exit()| can modify them. See bug 1312321. + // So we 1) pass the parameters by reference, but then 2) immediately copy + // them into a Tuple to be safe against modification, and finally 3) move + // the elements of the Tuple into the final function call. + auto copiedArgs = std::make_tuple(std::forward(aArgs)...); + + // Copy mMaster which will reset to null. + auto* master = mMaster; + + auto* s = new S(master); + + // It's possible to seek again during seeking, otherwise the new state + // should always be different from the original one. + MOZ_ASSERT(GetState() != s->GetState() || + GetState() == DECODER_STATE_SEEKING_ACCURATE || + GetState() == DECODER_STATE_SEEKING_FROMDORMANT || + GetState() == DECODER_STATE_SEEKING_NEXTFRAMESEEKING || + GetState() == DECODER_STATE_SEEKING_VIDEOONLY); + + SLOG("change state to: %s", ToStateStr(s->GetState())); + PROFILER_MARKER_TEXT("MDSM::StateChange", MEDIA_PLAYBACK, {}, + nsPrintfCString("%s", ToStateStr(s->GetState()))); + + Exit(); + + // Delete the old state asynchronously to avoid UAF if the caller tries to + // access its members after SetState() returns. + master->OwnerThread()->DispatchDirectTask( + NS_NewRunnableFunction("MDSM::StateObject::DeleteOldState", + [toDelete = std::move(master->mStateObj)]() {})); + // Also reset mMaster to catch potentail UAF. + mMaster = nullptr; + + master->mStateObj.reset(s); + return CallEnterMemberFunction(s, copiedArgs, + std::index_sequence_for{}); + } + + RefPtr SetSeekingState( + SeekJob&& aSeekJob, EventVisibility aVisibility); + + void SetDecodingState(); + + // Take a raw pointer in order not to change the life cycle of MDSM. + // It is guaranteed to be valid by MDSM. + Master* mMaster; +}; + +/** + * Purpose: decode metadata like duration and dimensions of the media resource. + * + * Transition to other states when decoding metadata is done: + * SHUTDOWN if failing to decode metadata. + * DECODING_FIRSTFRAME otherwise. + */ +class MediaDecoderStateMachine::DecodeMetadataState + : public MediaDecoderStateMachine::StateObject { + public: + explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) {} + + void Enter() { + MOZ_ASSERT(!mMaster->mVideoDecodeSuspended); + MOZ_ASSERT(!mMetadataRequest.Exists()); + SLOG("Dispatching AsyncReadMetadata"); + + // We disconnect mMetadataRequest in Exit() so it is fine to capture + // a raw pointer here. + Reader() + ->ReadMetadata() + ->Then( + OwnerThread(), __func__, + [this](MetadataHolder&& aMetadata) { + OnMetadataRead(std::move(aMetadata)); + }, + [this](const MediaResult& aError) { OnMetadataNotRead(aError); }) + ->Track(mMetadataRequest); + } + + void Exit() override { mMetadataRequest.DisconnectIfExists(); } + + State GetState() const override { return DECODER_STATE_DECODING_METADATA; } + + RefPtr HandleSeek( + const SeekTarget& aTarget) override { + MOZ_DIAGNOSTIC_ASSERT(false, "Can't seek while decoding metadata."); + return MediaDecoder::SeekPromise::CreateAndReject(true, __func__); + } + + void HandleVideoSuspendTimeout() override { + // Do nothing since no decoders are created yet. + } + + void HandleResumeVideoDecoding(const TimeUnit&) override { + // We never suspend video decoding in this state. + MOZ_ASSERT(false, "Shouldn't have suspended video decoding."); + } + + private: + void OnMetadataRead(MetadataHolder&& aMetadata); + + void OnMetadataNotRead(const MediaResult& aError) { + AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataNotRead", + MEDIA_PLAYBACK); + + mMetadataRequest.Complete(); + SLOGE("Decode metadata failed, shutting down decoder"); + mMaster->DecodeError(aError); + } + + MozPromiseRequestHolder mMetadataRequest; +}; + +/** + * Purpose: release decoder resources to save memory and hardware resources. + * + * Transition to: + * SEEKING if any seek request or play state changes to PLAYING. + */ +class MediaDecoderStateMachine::DormantState + : public MediaDecoderStateMachine::StateObject { + public: + explicit DormantState(Master* aPtr) : StateObject(aPtr) {} + + void Enter() { + if (mMaster->IsPlaying()) { + mMaster->StopPlayback(); + } + + // Calculate the position to seek to when exiting dormant. + auto t = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock() + : mMaster->GetMediaTime(); + mMaster->AdjustByLooping(t); + mPendingSeek.mTarget.emplace(t, SeekTarget::Accurate); + // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we + // need to create the promise even it is not used at all. + // The promise may be used when coming out of DormantState into + // SeekingState. + RefPtr x = + mPendingSeek.mPromise.Ensure(__func__); + + // Reset the decoding state to ensure that any queued video frames are + // released and don't consume video memory. + mMaster->ResetDecode(); + + // No need to call StopMediaSink() here. + // We will do it during seeking when exiting dormant. + + // Ignore WAIT_FOR_DATA since we won't decode in dormant. + mMaster->mAudioWaitRequest.DisconnectIfExists(); + mMaster->mVideoWaitRequest.DisconnectIfExists(); + + MaybeReleaseResources(); + } + + void Exit() override { + // mPendingSeek is either moved when exiting dormant or + // should be rejected here before transition to SHUTDOWN. + mPendingSeek.RejectIfExists(__func__); + } + + State GetState() const override { return DECODER_STATE_DORMANT; } + + RefPtr HandleSeek( + const SeekTarget& aTarget) override; + + void HandleVideoSuspendTimeout() override { + // Do nothing since we've released decoders in Enter(). + } + + void HandleResumeVideoDecoding(const TimeUnit&) override { + // Do nothing since we won't resume decoding until exiting dormant. + } + + void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override; + + void HandleAudioDecoded(AudioData*) override { MaybeReleaseResources(); } + void HandleVideoDecoded(VideoData*) override { MaybeReleaseResources(); } + void HandleWaitingForAudio() override { MaybeReleaseResources(); } + void HandleWaitingForVideo() override { MaybeReleaseResources(); } + void HandleAudioCanceled() override { MaybeReleaseResources(); } + void HandleVideoCanceled() override { MaybeReleaseResources(); } + void HandleEndOfAudio() override { MaybeReleaseResources(); } + void HandleEndOfVideo() override { MaybeReleaseResources(); } + + private: + void MaybeReleaseResources() { + if (!mMaster->mAudioDataRequest.Exists() && + !mMaster->mVideoDataRequest.Exists()) { + // Release decoders only when they are idle. Otherwise it might cause + // decode error later when resetting decoders during seeking. + mMaster->mReader->ReleaseResources(); + } + } + + SeekJob mPendingSeek; +}; + +/** + * Purpose: decode the 1st audio and video frames to fire the 'loadeddata' + * event. + * + * Transition to: + * SHUTDOWN if any decode error. + * SEEKING if any seek request. + * DECODING/LOOPING_DECODING when the 'loadeddata' event is fired. + */ +class MediaDecoderStateMachine::DecodingFirstFrameState + : public MediaDecoderStateMachine::StateObject { + public: + explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) {} + + void Enter(); + + void Exit() override { + // mPendingSeek is either moved in MaybeFinishDecodeFirstFrame() + // or should be rejected here before transition to SHUTDOWN. + mPendingSeek.RejectIfExists(__func__); + } + + State GetState() const override { return DECODER_STATE_DECODING_FIRSTFRAME; } + + void HandleAudioDecoded(AudioData* aAudio) override { + mMaster->PushAudio(aAudio); + MaybeFinishDecodeFirstFrame(); + } + + void HandleVideoDecoded(VideoData* aVideo) override { + mMaster->PushVideo(aVideo); + MaybeFinishDecodeFirstFrame(); + } + + void HandleWaitingForAudio() override { + mMaster->WaitForData(MediaData::Type::AUDIO_DATA); + } + + void HandleAudioCanceled() override { mMaster->RequestAudioData(); } + + void HandleEndOfAudio() override { + AudioQueue().Finish(); + MaybeFinishDecodeFirstFrame(); + } + + void HandleWaitingForVideo() override { + mMaster->WaitForData(MediaData::Type::VIDEO_DATA); + } + + void HandleVideoCanceled() override { + mMaster->RequestVideoData(media::TimeUnit()); + } + + void HandleEndOfVideo() override { + VideoQueue().Finish(); + MaybeFinishDecodeFirstFrame(); + } + + void HandleAudioWaited(MediaData::Type aType) override { + mMaster->RequestAudioData(); + } + + void HandleVideoWaited(MediaData::Type aType) override { + mMaster->RequestVideoData(media::TimeUnit()); + } + + void HandleVideoSuspendTimeout() override { + // Do nothing for we need to decode the 1st video frame to get the + // dimensions. + } + + void HandleResumeVideoDecoding(const TimeUnit&) override { + // We never suspend video decoding in this state. + MOZ_ASSERT(false, "Shouldn't have suspended video decoding."); + } + + RefPtr HandleSeek( + const SeekTarget& aTarget) override { + if (mMaster->mIsMSE) { + return StateObject::HandleSeek(aTarget); + } + // Delay seek request until decoding first frames for non-MSE media. + SLOG("Not Enough Data to seek at this stage, queuing seek"); + mPendingSeek.RejectIfExists(__func__); + mPendingSeek.mTarget.emplace(aTarget); + return mPendingSeek.mPromise.Ensure(__func__); + } + + private: + // Notify FirstFrameLoaded if having decoded first frames and + // transition to SEEKING if there is any pending seek, or DECODING otherwise. + void MaybeFinishDecodeFirstFrame(); + + SeekJob mPendingSeek; +}; + +/** + * Purpose: decode audio/video data for playback. + * + * Transition to: + * DORMANT if playback is paused for a while. + * SEEKING if any seek request. + * SHUTDOWN if any decode error. + * BUFFERING if playback can't continue due to lack of decoded data. + * COMPLETED when having decoded all audio/video data. + * LOOPING_DECODING when media start seamless looping + */ +class MediaDecoderStateMachine::DecodingState + : public MediaDecoderStateMachine::StateObject { + public: + explicit DecodingState(Master* aPtr) + : StateObject(aPtr), mDormantTimer(OwnerThread()) {} + + void Enter(); + + void Exit() override { + if (!mDecodeStartTime.IsNull()) { + TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime; + SLOG("Exiting DECODING, decoded for %.3lfs", decodeDuration.ToSeconds()); + } + mDormantTimer.Reset(); + mOnAudioPopped.DisconnectIfExists(); + mOnVideoPopped.DisconnectIfExists(); + } + + void Step() override; + + State GetState() const override { return DECODER_STATE_DECODING; } + + void HandleAudioDecoded(AudioData* aAudio) override { + mMaster->PushAudio(aAudio); + DispatchDecodeTasksIfNeeded(); + MaybeStopPrerolling(); + } + + void HandleVideoDecoded(VideoData* aVideo) override { + // We only do this check when we're not looping, which can be known by + // checking the queue's offset. + const auto currentTime = mMaster->GetMediaTime(); + if (aVideo->GetEndTime() < currentTime && + VideoQueue().GetOffset() == media::TimeUnit::Zero()) { + if (!mVideoFirstLateTime) { + mVideoFirstLateTime = Some(TimeStamp::Now()); + } + PROFILER_MARKER("Video falling behind", MEDIA_PLAYBACK, {}, + VideoFallingBehindMarker, aVideo->mTime.ToMicroseconds(), + currentTime.ToMicroseconds()); + SLOG("video %" PRId64 " starts being late (current=%" PRId64 ")", + aVideo->mTime.ToMicroseconds(), currentTime.ToMicroseconds()); + } else { + mVideoFirstLateTime.reset(); + } + mMaster->PushVideo(aVideo); + DispatchDecodeTasksIfNeeded(); + MaybeStopPrerolling(); + } + + void HandleAudioCanceled() override { mMaster->RequestAudioData(); } + + void HandleVideoCanceled() override { + mMaster->RequestVideoData(mMaster->GetMediaTime(), + ShouldRequestNextKeyFrame()); + } + + void HandleEndOfAudio() override; + void HandleEndOfVideo() override; + + void HandleWaitingForAudio() override { + mMaster->WaitForData(MediaData::Type::AUDIO_DATA); + MaybeStopPrerolling(); + } + + void HandleWaitingForVideo() override { + mMaster->WaitForData(MediaData::Type::VIDEO_DATA); + MaybeStopPrerolling(); + } + + void HandleAudioWaited(MediaData::Type aType) override { + mMaster->RequestAudioData(); + } + + void HandleVideoWaited(MediaData::Type aType) override { + mMaster->RequestVideoData(mMaster->GetMediaTime(), + ShouldRequestNextKeyFrame()); + } + + void HandleAudioCaptured() override { + MaybeStopPrerolling(); + // MediaSink is changed. Schedule Step() to check if we can start playback. + mMaster->ScheduleStateMachine(); + } + + void HandleVideoSuspendTimeout() override { + // No video, so nothing to suspend. + if (!mMaster->HasVideo()) { + return; + } + + PROFILER_MARKER_UNTYPED("MDSM::EnterVideoSuspend", MEDIA_PLAYBACK); + mMaster->mVideoDecodeSuspended = true; + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend); + Reader()->SetVideoBlankDecode(true); + } + + void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override { + if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) { + // Schedule Step() to check if we can start playback. + mMaster->ScheduleStateMachine(); + // Try to dispatch decoding tasks for mMinimizePreroll might be reset. + DispatchDecodeTasksIfNeeded(); + } + + if (aPlayState == MediaDecoder::PLAY_STATE_PAUSED) { + StartDormantTimer(); + mVideoFirstLateTime.reset(); + } else { + mDormantTimer.Reset(); + } + } + + void GetDebugInfo( + dom::MediaDecoderStateMachineDecodingStateDebugInfo& aInfo) override { + aInfo.mIsPrerolling = mIsPrerolling; + } + + void HandleLoopingChanged() override { SetDecodingState(); } + + protected: + virtual void EnsureAudioDecodeTaskQueued(); + virtual void EnsureVideoDecodeTaskQueued(); + + virtual bool ShouldStopPrerolling() const { + return mIsPrerolling && + (DonePrerollingAudio() || + IsWaitingData(MediaData::Type::AUDIO_DATA)) && + (DonePrerollingVideo() || + IsWaitingData(MediaData::Type::VIDEO_DATA)); + } + + virtual bool IsWaitingData(MediaData::Type aType) const { + if (aType == MediaData::Type::AUDIO_DATA) { + return mMaster->IsWaitingAudioData(); + } + MOZ_ASSERT(aType == MediaData::Type::VIDEO_DATA); + return mMaster->IsWaitingVideoData(); + } + + void MaybeStopPrerolling() { + if (ShouldStopPrerolling()) { + mIsPrerolling = false; + // Check if we can start playback. + mMaster->ScheduleStateMachine(); + } + } + + bool ShouldRequestNextKeyFrame() const { + if (!mVideoFirstLateTime) { + return false; + } + const double elapsedTimeMs = + (TimeStamp::Now() - *mVideoFirstLateTime).ToMilliseconds(); + const bool rv = elapsedTimeMs >= + StaticPrefs::media_decoder_skip_when_video_too_slow_ms(); + if (rv) { + PROFILER_MARKER_UNTYPED("Skipping to next keyframe", MEDIA_PLAYBACK); + SLOG( + "video has been late behind media time for %f ms, should skip to " + "next key frame", + elapsedTimeMs); + } + return rv; + } + + virtual bool IsBufferingAllowed() const { return true; } + + private: + void DispatchDecodeTasksIfNeeded(); + void MaybeStartBuffering(); + + // At the start of decoding we want to "preroll" the decode until we've + // got a few frames decoded before we consider whether decode is falling + // behind. Otherwise our "we're falling behind" logic will trigger + // unnecessarily if we start playing as soon as the first sample is + // decoded. These two fields store how many video frames and audio + // samples we must consume before are considered to be finished prerolling. + TimeUnit AudioPrerollThreshold() const { + return (mMaster->mAmpleAudioThreshold / 2) + .MultDouble(mMaster->mPlaybackRate); + } + + uint32_t VideoPrerollFrames() const { + return std::min( + static_cast( + mMaster->GetAmpleVideoFrames() / 2. * mMaster->mPlaybackRate + 1), + sVideoQueueDefaultSize); + } + + bool DonePrerollingAudio() const { + return !mMaster->IsAudioDecoding() || + mMaster->GetDecodedAudioDuration() >= AudioPrerollThreshold(); + } + + bool DonePrerollingVideo() const { + return !mMaster->IsVideoDecoding() || + static_cast(mMaster->VideoQueue().GetSize()) >= + VideoPrerollFrames(); + } + + void StartDormantTimer() { + if (!mMaster->mMediaSeekable) { + // Don't enter dormant if the media is not seekable because we need to + // seek when exiting dormant. + return; + } + + auto timeout = StaticPrefs::media_dormant_on_pause_timeout_ms(); + if (timeout < 0) { + // Disabled when timeout is negative. + return; + } + + if (timeout == 0) { + // Enter dormant immediately without scheduling a timer. + SetState(); + return; + } + + if (mMaster->mMinimizePreroll) { + SetState(); + return; + } + + TimeStamp target = + TimeStamp::Now() + TimeDuration::FromMilliseconds(timeout); + + mDormantTimer.Ensure( + target, + [this]() { + AUTO_PROFILER_LABEL("DecodingState::StartDormantTimer:SetDormant", + MEDIA_PLAYBACK); + mDormantTimer.CompleteRequest(); + SetState(); + }, + [this]() { mDormantTimer.CompleteRequest(); }); + } + + // Time at which we started decoding. + TimeStamp mDecodeStartTime; + + // When we start decoding (either for the first time, or after a pause) + // we may be low on decoded data. We don't want our "low data" logic to + // kick in and decide that we're low on decoded data because the download + // can't keep up with the decode, and cause us to pause playback. So we + // have a "preroll" stage, where we ignore the results of our "low data" + // logic during the first few frames of our decode. This occurs during + // playback. + bool mIsPrerolling = true; + + // Fired when playback is paused for a while to enter dormant. + DelayedScheduler mDormantTimer; + + MediaEventListener mOnAudioPopped; + MediaEventListener mOnVideoPopped; + + // If video has been later than the media time, this will records when the + // video started being late. It will be reset once video catches up with the + // media time. + Maybe mVideoFirstLateTime; +}; + +/** + * Purpose: decode audio data for playback when media is in seamless + * looping, we will adjust media time to make samples time monotonically + * increasing. All its methods runs on its owner thread (MDSM thread). + * + * Transition to: + * DORMANT if playback is paused for a while. + * SEEKING if any seek request. + * SHUTDOWN if any decode error. + * BUFFERING if playback can't continue due to lack of decoded data. + * COMPLETED when the media resource is closed and no data is available + * anymore. + * DECODING when media stops seamless looping. + */ +class MediaDecoderStateMachine::LoopingDecodingState + : public MediaDecoderStateMachine::DecodingState { + public: + explicit LoopingDecodingState(Master* aPtr) + : DecodingState(aPtr), + mIsReachingAudioEOS(!mMaster->IsAudioDecoding()), + mIsReachingVideoEOS(!mMaster->IsVideoDecoding()), + mAudioEndedBeforeEnteringStateWithoutDuration(false), + mVideoEndedBeforeEnteringStateWithoutDuration(false) { + MOZ_ASSERT(mMaster->mLooping); + SLOG( + "LoopingDecodingState ctor, mIsReachingAudioEOS=%d, " + "mIsReachingVideoEOS=%d", + mIsReachingAudioEOS, mIsReachingVideoEOS); + // If the track has reached EOS and we already have its last data, then we + // can know its duration. But if playback starts from EOS (due to seeking), + // the decoded end time would be zero because none of data gets decoded yet. + if (mIsReachingAudioEOS) { + if (mMaster->HasLastDecodedData(MediaData::Type::AUDIO_DATA) && + !mMaster->mAudioTrackDecodedDuration) { + mMaster->mAudioTrackDecodedDuration.emplace( + mMaster->mDecodedAudioEndTime); + SLOG("determine mAudioTrackDecodedDuration"); + } else { + mAudioEndedBeforeEnteringStateWithoutDuration = true; + SLOG("still don't know mAudioTrackDecodedDuration"); + } + } + + if (mIsReachingVideoEOS) { + if (mMaster->HasLastDecodedData(MediaData::Type::VIDEO_DATA) && + !mMaster->mVideoTrackDecodedDuration) { + mMaster->mVideoTrackDecodedDuration.emplace( + mMaster->mDecodedVideoEndTime); + SLOG("determine mVideoTrackDecodedDuration"); + } else { + mVideoEndedBeforeEnteringStateWithoutDuration = true; + SLOG("still don't know mVideoTrackDecodedDuration"); + } + } + + // We might be able to determine the duration already, let's check. + if (mIsReachingAudioEOS || mIsReachingVideoEOS) { + Unused << DetermineOriginalDecodedDurationIfNeeded(); + } + + // If we've looped at least once before, then we need to update queue offset + // correctly to make the media data time and the clock time consistent. + // Otherwise, it would cause a/v desync. + if (mMaster->mOriginalDecodedDuration != media::TimeUnit::Zero()) { + if (mIsReachingAudioEOS && mMaster->HasAudio()) { + AudioQueue().SetOffset(AudioQueue().GetOffset() + + mMaster->mOriginalDecodedDuration); + } + if (mIsReachingVideoEOS && mMaster->HasVideo()) { + VideoQueue().SetOffset(VideoQueue().GetOffset() + + mMaster->mOriginalDecodedDuration); + } + } + } + + void Enter() { + if (mMaster->HasAudio() && mIsReachingAudioEOS) { + SLOG("audio has ended, request the data again."); + RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack); + } + if (mMaster->HasVideo() && mIsReachingVideoEOS) { + SLOG("video has ended, request the data again."); + RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack); + } + DecodingState::Enter(); + } + + void Exit() override { + MOZ_DIAGNOSTIC_ASSERT(mMaster->OnTaskQueue()); + SLOG("Leaving looping state, offset [a=%" PRId64 ",v=%" PRId64 + "], endtime [a=%" PRId64 ",v=%" PRId64 "], track duration [a=%" PRId64 + ",v=%" PRId64 "], waiting=%s", + AudioQueue().GetOffset().ToMicroseconds(), + VideoQueue().GetOffset().ToMicroseconds(), + mMaster->mDecodedAudioEndTime.ToMicroseconds(), + mMaster->mDecodedVideoEndTime.ToMicroseconds(), + mMaster->mAudioTrackDecodedDuration + ? mMaster->mAudioTrackDecodedDuration->ToMicroseconds() + : 0, + mMaster->mVideoTrackDecodedDuration + ? mMaster->mVideoTrackDecodedDuration->ToMicroseconds() + : 0, + mDataWaitingTimestampAdjustment + ? MediaData::TypeToStr(mDataWaitingTimestampAdjustment->mType) + : "none"); + if (ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA)) { + DiscardLoopedData(MediaData::Type::AUDIO_DATA); + } + if (ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA)) { + DiscardLoopedData(MediaData::Type::VIDEO_DATA); + } + + if (mMaster->HasAudio() && HasDecodedLastAudioFrame()) { + SLOG("Mark audio queue as finished"); + mMaster->mAudioDataRequest.DisconnectIfExists(); + mMaster->mAudioWaitRequest.DisconnectIfExists(); + AudioQueue().Finish(); + } + if (mMaster->HasVideo() && HasDecodedLastVideoFrame()) { + SLOG("Mark video queue as finished"); + mMaster->mVideoDataRequest.DisconnectIfExists(); + mMaster->mVideoWaitRequest.DisconnectIfExists(); + VideoQueue().Finish(); + } + + // Clear waiting data should be done after marking queue as finished. + mDataWaitingTimestampAdjustment = nullptr; + + mAudioDataRequest.DisconnectIfExists(); + mVideoDataRequest.DisconnectIfExists(); + mAudioSeekRequest.DisconnectIfExists(); + mVideoSeekRequest.DisconnectIfExists(); + DecodingState::Exit(); + } + + ~LoopingDecodingState() { + MOZ_DIAGNOSTIC_ASSERT(!mAudioDataRequest.Exists()); + MOZ_DIAGNOSTIC_ASSERT(!mVideoDataRequest.Exists()); + MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest.Exists()); + MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists()); + } + + State GetState() const override { return DECODER_STATE_LOOPING_DECODING; } + + void HandleAudioDecoded(AudioData* aAudio) override { + // TODO : check if we need to update mOriginalDecodedDuration + + // After pushing data to the queue, timestamp might be adjusted. + DecodingState::HandleAudioDecoded(aAudio); + mMaster->mDecodedAudioEndTime = + std::max(aAudio->GetEndTime(), mMaster->mDecodedAudioEndTime); + SLOG("audio sample after time-adjustment [%" PRId64 ",%" PRId64 "]", + aAudio->mTime.ToMicroseconds(), aAudio->GetEndTime().ToMicroseconds()); + } + + void HandleVideoDecoded(VideoData* aVideo) override { + // TODO : check if we need to update mOriginalDecodedDuration + + // Here sample still keeps its original timestamp. + + // This indicates there is a shorter audio track, and it's the first time in + // the looping (audio ends but video is playing) so that we haven't been + // able to determine the decoded duration. Therefore, we fill the gap + // between two tracks before video ends. Afterward, this adjustment will be + // done in `HandleEndOfAudio()`. + if (mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero() && + mMaster->mAudioTrackDecodedDuration && + aVideo->GetEndTime() > *mMaster->mAudioTrackDecodedDuration) { + media::TimeUnit gap; + // First time we fill gap between the video frame to the last audio. + if (auto prevVideo = VideoQueue().PeekBack(); + prevVideo && + prevVideo->GetEndTime() < *mMaster->mAudioTrackDecodedDuration) { + gap = + aVideo->GetEndTime().ToBase(*mMaster->mAudioTrackDecodedDuration) - + *mMaster->mAudioTrackDecodedDuration; + } + // Then fill the gap for all following videos. + else { + gap = aVideo->mDuration.ToBase(*mMaster->mAudioTrackDecodedDuration); + } + SLOG("Longer video %" PRId64 "%s (audio-durtaion=%" PRId64 + "%s), insert silence to fill the gap %" PRId64 "%s", + aVideo->GetEndTime().ToMicroseconds(), + aVideo->GetEndTime().ToString().get(), + mMaster->mAudioTrackDecodedDuration->ToMicroseconds(), + mMaster->mAudioTrackDecodedDuration->ToString().get(), + gap.ToMicroseconds(), gap.ToString().get()); + PushFakeAudioDataIfNeeded(gap); + } + + // After pushing data to the queue, timestamp might be adjusted. + DecodingState::HandleVideoDecoded(aVideo); + mMaster->mDecodedVideoEndTime = + std::max(aVideo->GetEndTime(), mMaster->mDecodedVideoEndTime); + SLOG("video sample after time-adjustment [%" PRId64 ",%" PRId64 "]", + aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds()); + } + + void HandleEndOfAudio() override { + mIsReachingAudioEOS = true; + if (!mMaster->mAudioTrackDecodedDuration && + mMaster->HasLastDecodedData(MediaData::Type::AUDIO_DATA)) { + mMaster->mAudioTrackDecodedDuration.emplace( + mMaster->mDecodedAudioEndTime); + } + if (DetermineOriginalDecodedDurationIfNeeded()) { + AudioQueue().SetOffset(AudioQueue().GetOffset() + + mMaster->mOriginalDecodedDuration); + } + + // This indicates that the audio track is shorter than the video track, so + // we need to add some silence to fill the gap. + if (mMaster->mAudioTrackDecodedDuration && + mMaster->mOriginalDecodedDuration > + *mMaster->mAudioTrackDecodedDuration) { + MOZ_ASSERT(mMaster->HasVideo()); + MOZ_ASSERT(mMaster->mVideoTrackDecodedDuration); + MOZ_ASSERT(mMaster->mOriginalDecodedDuration == + *mMaster->mVideoTrackDecodedDuration); + auto gap = mMaster->mOriginalDecodedDuration.ToBase( + *mMaster->mAudioTrackDecodedDuration) - + *mMaster->mAudioTrackDecodedDuration; + SLOG( + "Audio track is shorter than the original decoded duration " + "(a=%" PRId64 "%s, t=%" PRId64 + "%s), insert silence to fill the gap %" PRId64 "%s", + mMaster->mAudioTrackDecodedDuration->ToMicroseconds(), + mMaster->mAudioTrackDecodedDuration->ToString().get(), + mMaster->mOriginalDecodedDuration.ToMicroseconds(), + mMaster->mOriginalDecodedDuration.ToString().get(), + gap.ToMicroseconds(), gap.ToString().get()); + PushFakeAudioDataIfNeeded(gap); + } + + SLOG( + "received audio EOS when seamless looping, starts seeking, " + "audioLoopingOffset=[%" PRId64 "], mAudioTrackDecodedDuration=[%" PRId64 + "]", + AudioQueue().GetOffset().ToMicroseconds(), + mMaster->mAudioTrackDecodedDuration->ToMicroseconds()); + if (!IsRequestingDataFromStartPosition(MediaData::Type::AUDIO_DATA)) { + RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack); + } + ProcessSamplesWaitingAdjustmentIfAny(); + } + + void HandleEndOfVideo() override { + mIsReachingVideoEOS = true; + if (!mMaster->mVideoTrackDecodedDuration && + mMaster->HasLastDecodedData(MediaData::Type::VIDEO_DATA)) { + mMaster->mVideoTrackDecodedDuration.emplace( + mMaster->mDecodedVideoEndTime); + } + if (DetermineOriginalDecodedDurationIfNeeded()) { + VideoQueue().SetOffset(VideoQueue().GetOffset() + + mMaster->mOriginalDecodedDuration); + } + + SLOG( + "received video EOS when seamless looping, starts seeking, " + "videoLoopingOffset=[%" PRId64 "], mVideoTrackDecodedDuration=[%" PRId64 + "]", + VideoQueue().GetOffset().ToMicroseconds(), + mMaster->mVideoTrackDecodedDuration->ToMicroseconds()); + if (!IsRequestingDataFromStartPosition(MediaData::Type::VIDEO_DATA)) { + RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack); + } + ProcessSamplesWaitingAdjustmentIfAny(); + } + + private: + void RequestDataFromStartPosition(TrackInfo::TrackType aType) { + MOZ_DIAGNOSTIC_ASSERT(aType == TrackInfo::TrackType::kAudioTrack || + aType == TrackInfo::TrackType::kVideoTrack); + + const bool isAudio = aType == TrackInfo::TrackType::kAudioTrack; + MOZ_ASSERT_IF(isAudio, mMaster->HasAudio()); + MOZ_ASSERT_IF(!isAudio, mMaster->HasVideo()); + + if (IsReaderSeeking()) { + MOZ_ASSERT(!mPendingSeekingType); + mPendingSeekingType = Some(aType); + SLOG("Delay %s seeking until the reader finishes current seeking", + isAudio ? "audio" : "video"); + return; + } + + auto& seekRequest = isAudio ? mAudioSeekRequest : mVideoSeekRequest; + Reader()->ResetDecode(aType); + Reader() + ->Seek(SeekTarget(media::TimeUnit::Zero(), SeekTarget::Type::Accurate, + isAudio ? SeekTarget::Track::AudioOnly + : SeekTarget::Track::VideoOnly)) + ->Then( + OwnerThread(), __func__, + [this, isAudio, master = RefPtr{mMaster}]() mutable -> void { + AUTO_PROFILER_LABEL( + nsPrintfCString( + "LoopingDecodingState::RequestDataFromStartPosition(%s)::" + "SeekResolved", + isAudio ? "audio" : "video") + .get(), + MEDIA_PLAYBACK); + if (auto& state = master->mStateObj; + state && + state->GetState() != DECODER_STATE_LOOPING_DECODING) { + MOZ_RELEASE_ASSERT(false, "This shouldn't happen!"); + return; + } + if (isAudio) { + mAudioSeekRequest.Complete(); + } else { + mVideoSeekRequest.Complete(); + } + SLOG( + "seeking completed, start to request first %s sample " + "(queued=%zu, decoder-queued=%zu)", + isAudio ? "audio" : "video", + isAudio ? AudioQueue().GetSize() : VideoQueue().GetSize(), + isAudio ? Reader()->SizeOfAudioQueueInFrames() + : Reader()->SizeOfVideoQueueInFrames()); + if (isAudio) { + RequestAudioDataFromReaderAfterEOS(); + } else { + RequestVideoDataFromReaderAfterEOS(); + } + if (mPendingSeekingType) { + auto seekingType = *mPendingSeekingType; + mPendingSeekingType.reset(); + SLOG("Perform pending %s seeking", TrackTypeToStr(seekingType)); + RequestDataFromStartPosition(seekingType); + } + }, + [this, isAudio, master = RefPtr{mMaster}]( + const SeekRejectValue& aReject) mutable -> void { + AUTO_PROFILER_LABEL( + nsPrintfCString("LoopingDecodingState::" + "RequestDataFromStartPosition(%s)::" + "SeekRejected", + isAudio ? "audio" : "video") + .get(), + MEDIA_PLAYBACK); + if (auto& state = master->mStateObj; + state && + state->GetState() != DECODER_STATE_LOOPING_DECODING) { + MOZ_RELEASE_ASSERT(false, "This shouldn't happen!"); + return; + } + if (isAudio) { + mAudioSeekRequest.Complete(); + } else { + mVideoSeekRequest.Complete(); + } + HandleError(aReject.mError, isAudio); + }) + ->Track(seekRequest); + } + + void RequestAudioDataFromReaderAfterEOS() { + MOZ_ASSERT(mMaster->HasAudio()); + Reader() + ->RequestAudioData() + ->Then( + OwnerThread(), __func__, + [this, master = RefPtr{mMaster}](const RefPtr& aAudio) { + AUTO_PROFILER_LABEL( + "LoopingDecodingState::" + "RequestAudioDataFromReader::" + "RequestDataResolved", + MEDIA_PLAYBACK); + if (auto& state = master->mStateObj; + state && + state->GetState() != DECODER_STATE_LOOPING_DECODING) { + MOZ_RELEASE_ASSERT(false, "This shouldn't happen!"); + return; + } + mIsReachingAudioEOS = false; + mAudioDataRequest.Complete(); + SLOG( + "got audio decoded sample " + "[%" PRId64 ",%" PRId64 "]", + aAudio->mTime.ToMicroseconds(), + aAudio->GetEndTime().ToMicroseconds()); + if (ShouldPutDataOnWaiting(MediaData::Type::AUDIO_DATA)) { + SLOG( + "decoded audio sample needs to wait for timestamp " + "adjustment after EOS"); + PutDataOnWaiting(aAudio); + return; + } + HandleAudioDecoded(aAudio); + ProcessSamplesWaitingAdjustmentIfAny(); + }, + [this, master = RefPtr{mMaster}](const MediaResult& aError) { + AUTO_PROFILER_LABEL( + "LoopingDecodingState::" + "RequestAudioDataFromReader::" + "RequestDataRejected", + MEDIA_PLAYBACK); + if (auto& state = master->mStateObj; + state && + state->GetState() != DECODER_STATE_LOOPING_DECODING) { + MOZ_RELEASE_ASSERT(false, "This shouldn't happen!"); + return; + } + mAudioDataRequest.Complete(); + HandleError(aError, true /* isAudio */); + }) + ->Track(mAudioDataRequest); + } + + void RequestVideoDataFromReaderAfterEOS() { + MOZ_ASSERT(mMaster->HasVideo()); + Reader() + ->RequestVideoData(media::TimeUnit(), + false /* aRequestNextVideoKeyFrame */) + ->Then( + OwnerThread(), __func__, + [this, master = RefPtr{mMaster}](const RefPtr& aVideo) { + AUTO_PROFILER_LABEL( + "LoopingDecodingState::" + "RequestVideoDataFromReaderAfterEOS()::" + "RequestDataResolved", + MEDIA_PLAYBACK); + if (auto& state = master->mStateObj; + state && + state->GetState() != DECODER_STATE_LOOPING_DECODING) { + MOZ_RELEASE_ASSERT(false, "This shouldn't happen!"); + return; + } + mIsReachingVideoEOS = false; + mVideoDataRequest.Complete(); + SLOG( + "got video decoded sample " + "[%" PRId64 ",%" PRId64 "]", + aVideo->mTime.ToMicroseconds(), + aVideo->GetEndTime().ToMicroseconds()); + if (ShouldPutDataOnWaiting(MediaData::Type::VIDEO_DATA)) { + SLOG( + "decoded video sample needs to wait for timestamp " + "adjustment after EOS"); + PutDataOnWaiting(aVideo); + return; + } + mMaster->mBypassingSkipToNextKeyFrameCheck = true; + HandleVideoDecoded(aVideo); + ProcessSamplesWaitingAdjustmentIfAny(); + }, + [this, master = RefPtr{mMaster}](const MediaResult& aError) { + AUTO_PROFILER_LABEL( + "LoopingDecodingState::" + "RequestVideoDataFromReaderAfterEOS()::" + "RequestDataRejected", + MEDIA_PLAYBACK); + if (auto& state = master->mStateObj; + state && + state->GetState() != DECODER_STATE_LOOPING_DECODING) { + MOZ_RELEASE_ASSERT(false, "This shouldn't happen!"); + return; + } + mVideoDataRequest.Complete(); + HandleError(aError, false /* isAudio */); + }) + ->Track(mVideoDataRequest); + } + + void HandleError(const MediaResult& aError, bool aIsAudio); + + bool ShouldRequestData(MediaData::Type aType) const { + MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + + if (aType == MediaData::Type::AUDIO_DATA && + (mAudioSeekRequest.Exists() || mAudioDataRequest.Exists() || + IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA) || + mMaster->IsWaitingAudioData())) { + return false; + } + if (aType == MediaData::Type::VIDEO_DATA && + (mVideoSeekRequest.Exists() || mVideoDataRequest.Exists() || + IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA) || + mMaster->IsWaitingVideoData())) { + return false; + } + return true; + } + + void HandleAudioCanceled() override { + if (ShouldRequestData(MediaData::Type::AUDIO_DATA)) { + mMaster->RequestAudioData(); + } + } + + void HandleAudioWaited(MediaData::Type aType) override { + if (ShouldRequestData(MediaData::Type::AUDIO_DATA)) { + mMaster->RequestAudioData(); + } + } + + void HandleVideoCanceled() override { + if (ShouldRequestData(MediaData::Type::VIDEO_DATA)) { + mMaster->RequestVideoData(mMaster->GetMediaTime(), + ShouldRequestNextKeyFrame()); + }; + } + + void HandleVideoWaited(MediaData::Type aType) override { + if (ShouldRequestData(MediaData::Type::VIDEO_DATA)) { + mMaster->RequestVideoData(mMaster->GetMediaTime(), + ShouldRequestNextKeyFrame()); + }; + } + + void EnsureAudioDecodeTaskQueued() override { + if (!ShouldRequestData(MediaData::Type::AUDIO_DATA)) { + return; + } + DecodingState::EnsureAudioDecodeTaskQueued(); + } + + void EnsureVideoDecodeTaskQueued() override { + if (!ShouldRequestData(MediaData::Type::VIDEO_DATA)) { + return; + } + DecodingState::EnsureVideoDecodeTaskQueued(); + } + + bool DetermineOriginalDecodedDurationIfNeeded() { + // Duration would only need to be set once, unless we get more data which is + // larger than the duration. That can happen on MSE (reopen stream). + if (mMaster->mOriginalDecodedDuration != media::TimeUnit::Zero()) { + return true; + } + + // Single track situations + if (mMaster->HasAudio() && !mMaster->HasVideo() && + mMaster->mAudioTrackDecodedDuration) { + mMaster->mOriginalDecodedDuration = *mMaster->mAudioTrackDecodedDuration; + SLOG("audio only, duration=%" PRId64, + mMaster->mOriginalDecodedDuration.ToMicroseconds()); + return true; + } + if (mMaster->HasVideo() && !mMaster->HasAudio() && + mMaster->mVideoTrackDecodedDuration) { + mMaster->mOriginalDecodedDuration = *mMaster->mVideoTrackDecodedDuration; + SLOG("video only, duration=%" PRId64, + mMaster->mOriginalDecodedDuration.ToMicroseconds()); + return true; + } + // Two tracks situation + if (mMaster->HasAudio() && mMaster->HasVideo()) { + // Both tracks have ended so that we can check which track is longer. + if (mMaster->mAudioTrackDecodedDuration && + mMaster->mVideoTrackDecodedDuration) { + mMaster->mOriginalDecodedDuration = + std::max(*mMaster->mVideoTrackDecodedDuration, + *mMaster->mAudioTrackDecodedDuration); + SLOG("Both tracks ended, original duration=%" PRId64 " (a=%" PRId64 + ", v=%" PRId64 ")", + mMaster->mOriginalDecodedDuration.ToMicroseconds(), + mMaster->mAudioTrackDecodedDuration->ToMicroseconds(), + mMaster->mVideoTrackDecodedDuration->ToMicroseconds()); + return true; + } + // When entering the state, video has ended but audio hasn't, which means + // audio is longer. + if (mMaster->mAudioTrackDecodedDuration && + mVideoEndedBeforeEnteringStateWithoutDuration) { + mMaster->mOriginalDecodedDuration = + *mMaster->mAudioTrackDecodedDuration; + mVideoEndedBeforeEnteringStateWithoutDuration = false; + SLOG("audio is longer, duration=%" PRId64, + mMaster->mOriginalDecodedDuration.ToMicroseconds()); + return true; + } + // When entering the state, audio has ended but video hasn't, which means + // video is longer. + if (mMaster->mVideoTrackDecodedDuration && + mAudioEndedBeforeEnteringStateWithoutDuration) { + mMaster->mOriginalDecodedDuration = + *mMaster->mVideoTrackDecodedDuration; + mAudioEndedBeforeEnteringStateWithoutDuration = false; + SLOG("video is longer, duration=%" PRId64, + mMaster->mOriginalDecodedDuration.ToMicroseconds()); + return true; + } + SLOG("Still waiting for another track ends..."); + MOZ_ASSERT(!mMaster->mAudioTrackDecodedDuration || + !mMaster->mVideoTrackDecodedDuration); + } + SLOG("can't determine the original decoded duration yet"); + MOZ_ASSERT(mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero()); + return false; + } + + void ProcessSamplesWaitingAdjustmentIfAny() { + if (!mDataWaitingTimestampAdjustment) { + return; + } + + RefPtr data = mDataWaitingTimestampAdjustment; + mDataWaitingTimestampAdjustment = nullptr; + const bool isAudio = data->mType == MediaData::Type::AUDIO_DATA; + SLOG("process %s sample waiting for timestamp adjustment", + isAudio ? "audio" : "video"); + if (isAudio) { + // Waiting sample is for next round of looping, so the queue offset + // shouldn't be zero. This happens when the track has reached EOS before + // entering the state (and looping never happens before). Same for below + // video case. + if (AudioQueue().GetOffset() == media::TimeUnit::Zero()) { + AudioQueue().SetOffset(mMaster->mOriginalDecodedDuration); + } + HandleAudioDecoded(data->As()); + } else { + MOZ_DIAGNOSTIC_ASSERT(data->mType == MediaData::Type::VIDEO_DATA); + if (VideoQueue().GetOffset() == media::TimeUnit::Zero()) { + VideoQueue().SetOffset(mMaster->mOriginalDecodedDuration); + } + HandleVideoDecoded(data->As()); + } + } + + bool IsDataWaitingForTimestampAdjustment(MediaData::Type aType) const { + return mDataWaitingTimestampAdjustment && + mDataWaitingTimestampAdjustment->mType == aType; + } + + bool ShouldPutDataOnWaiting(MediaData::Type aType) const { + // If another track is already waiting, this track shouldn't be waiting. + // This case only happens when both tracks reached EOS before entering the + // looping decoding state, so we don't know the decoded duration yet (used + // to adjust timestamp) But this is fine, because both tracks will start + // from 0 so we don't need to adjust them now. + if (mDataWaitingTimestampAdjustment && + !IsDataWaitingForTimestampAdjustment(aType)) { + return false; + } + + // Only have one track, no need to wait. + if ((aType == MediaData::Type::AUDIO_DATA && !mMaster->HasVideo()) || + (aType == MediaData::Type::VIDEO_DATA && !mMaster->HasAudio())) { + return false; + } + + // We don't know the duration yet, so we can't calculate the looping offset. + return mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero(); + } + + void PutDataOnWaiting(MediaData* aData) { + MOZ_ASSERT(!mDataWaitingTimestampAdjustment); + mDataWaitingTimestampAdjustment = aData; + SLOG("put %s [%" PRId64 ",%" PRId64 "] on waiting", + MediaData::TypeToStr(aData->mType), aData->mTime.ToMicroseconds(), + aData->GetEndTime().ToMicroseconds()); + MaybeStopPrerolling(); + } + + bool ShouldDiscardLoopedData(MediaData::Type aType) const { + if (!mMaster->mMediaSink->IsStarted()) { + return false; + } + + MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + const bool isAudio = aType == MediaData::Type::AUDIO_DATA; + if (isAudio && !mMaster->HasAudio()) { + return false; + } + if (!isAudio && !mMaster->HasVideo()) { + return false; + } + + /** + * If media cancels looping, we should check whether there is media data + * whose time is later than EOS. If so, we should discard them because we + * won't have a chance to play them. + * + * playback last decoded + * position EOS data time + * ----|---------------|------------|---------> (Increasing timeline) + * mCurrent looping mMaster's + * ClockTime offset mDecodedXXXEndTime + * + */ + const auto offset = + isAudio ? AudioQueue().GetOffset() : VideoQueue().GetOffset(); + const auto endTime = + isAudio ? mMaster->mDecodedAudioEndTime : mMaster->mDecodedVideoEndTime; + const auto clockTime = mMaster->GetClock(); + return (offset != media::TimeUnit::Zero() && clockTime < offset && + offset < endTime); + } + + void DiscardLoopedData(MediaData::Type aType) { + MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + const bool isAudio = aType == MediaData::Type::AUDIO_DATA; + const auto offset = + isAudio ? AudioQueue().GetOffset() : VideoQueue().GetOffset(); + if (offset == media::TimeUnit::Zero()) { + return; + } + + SLOG("Discard %s frames after the time=%" PRId64, + isAudio ? "audio" : "video", offset.ToMicroseconds()); + if (isAudio) { + DiscardFramesFromTail(AudioQueue(), [&](int64_t aSampleTime) { + return aSampleTime > offset.ToMicroseconds(); + }); + } else { + DiscardFramesFromTail(VideoQueue(), [&](int64_t aSampleTime) { + return aSampleTime > offset.ToMicroseconds(); + }); + } + } + + void PushFakeAudioDataIfNeeded(const media::TimeUnit& aDuration) { + MOZ_ASSERT(Info().HasAudio()); + + const auto& audioInfo = Info().mAudio; + CheckedInt64 frames = aDuration.ToTicksAtRate(audioInfo.mRate); + if (!frames.isValid() || !audioInfo.mChannels || !audioInfo.mRate) { + NS_WARNING("Can't create fake audio, invalid frames/channel/rate?"); + return; + } + + if (!frames.value()) { + NS_WARNING(nsPrintfCString("Duration (%s) too short, no frame needed", + aDuration.ToString().get()) + .get()); + return; + } + + // If we can get the last sample, use its frame. Otherwise, use common 1024. + int64_t typicalPacketFrameCount = 1024; + if (RefPtr audio = AudioQueue().PeekBack()) { + typicalPacketFrameCount = audio->Frames(); + } + + media::TimeUnit totalDuration = TimeUnit::Zero(audioInfo.mRate); + // Generate fake audio in a smaller size of audio chunk. + while (frames.value()) { + int64_t packetFrameCount = + std::min(frames.value(), typicalPacketFrameCount); + frames -= packetFrameCount; + AlignedAudioBuffer samples(packetFrameCount * audioInfo.mChannels); + if (!samples) { + NS_WARNING("Can't create audio buffer, OOM?"); + return; + } + // `mDecodedAudioEndTime` is adjusted time, and we want unadjusted time + // otherwise the time would be adjusted twice when pushing sample into the + // media queue. + media::TimeUnit startTime = mMaster->mDecodedAudioEndTime; + if (AudioQueue().GetOffset() != media::TimeUnit::Zero()) { + startTime -= AudioQueue().GetOffset(); + } + RefPtr data(new AudioData(0, startTime, std::move(samples), + audioInfo.mChannels, + audioInfo.mRate)); + SLOG("Created fake audio data (duration=%s, frame-left=%" PRId64 ")", + data->mDuration.ToString().get(), frames.value()); + totalDuration += data->mDuration; + HandleAudioDecoded(data); + } + SLOG("Pushed fake silence audio data in total duration=%" PRId64 "%s", + totalDuration.ToMicroseconds(), totalDuration.ToString().get()); + } + + bool HasDecodedLastAudioFrame() const { + // when we're going to leave looping state and have got EOS before, we + // should mark audio queue as ended because we have got all data we need. + return mAudioDataRequest.Exists() || mAudioSeekRequest.Exists() || + ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA) || + IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA) || + mIsReachingAudioEOS; + } + + bool HasDecodedLastVideoFrame() const { + // when we're going to leave looping state and have got EOS before, we + // should mark video queue as ended because we have got all data we need. + return mVideoDataRequest.Exists() || mVideoSeekRequest.Exists() || + ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA) || + IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA) || + mIsReachingVideoEOS; + } + + bool ShouldStopPrerolling() const override { + // These checks is used to handle the media queue aren't opened correctly + // because they've been close before entering the looping state. Therefore, + // we need to preroll data in order to let new data to reopen the queue + // automatically. Otherwise, playback can't start successfully. + bool isWaitingForNewData = false; + if (mMaster->HasAudio()) { + isWaitingForNewData |= (mIsReachingAudioEOS && AudioQueue().IsFinished()); + } + if (mMaster->HasVideo()) { + isWaitingForNewData |= (mIsReachingVideoEOS && VideoQueue().IsFinished()); + } + return !isWaitingForNewData && DecodingState::ShouldStopPrerolling(); + } + + bool IsReaderSeeking() const { + return mAudioSeekRequest.Exists() || mVideoSeekRequest.Exists(); + } + + bool IsWaitingData(MediaData::Type aType) const override { + if (aType == MediaData::Type::AUDIO_DATA) { + return mMaster->IsWaitingAudioData() || + IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA); + } + MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::VIDEO_DATA); + return mMaster->IsWaitingVideoData() || + IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA); + } + + bool IsRequestingDataFromStartPosition(MediaData::Type aType) const { + MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + if (aType == MediaData::Type::AUDIO_DATA) { + return mAudioSeekRequest.Exists() || mAudioDataRequest.Exists(); + } + return mVideoSeekRequest.Exists() || mVideoDataRequest.Exists(); + } + + bool IsBufferingAllowed() const override { + return !mIsReachingAudioEOS && !mIsReachingVideoEOS; + } + + bool mIsReachingAudioEOS; + bool mIsReachingVideoEOS; + + /** + * If we have both tracks which have different length, when one track ends + * first, we can't adjust new data from that track if another longer track + * hasn't ended yet. The adjusted timestamp needs to be based off the longer + * track's last data's timestamp, because otherwise it would cause a deviation + * and eventually a/v unsync. Those sample needs to be stored and we will + * adjust their timestamp later. + * + * Following graph explains the situation in details. + * o : decoded data with timestamp adjusted or no adjustment (not looping yet) + * x : decoded data without timestamp adjustment. + * - : stop decoding and nothing happens + * EOS : the track reaches to the end. We now know the offset of the track. + * + * Timeline -----------------------------------> + * Track1 : o EOS x - - o + * Track2 : o o o EOS o o + * + * Before reaching track2's EOS, we can't adjust samples from track1 because + * track2 might have longer duration than track1. The sample X would be + * stored in `mDataWaitingTimestampAdjustment` and we would also stop decoding + * for track1. + * + * After reaching track2's EOS, now we know another track's offset, and the + * larger one would be used for `mOriginalDecodedDuration`. Once that duration + * has been determined, we will no longer need to put samples on waiting + * because we already know how to adjust timestamp. + */ + RefPtr mDataWaitingTimestampAdjustment; + + MozPromiseRequestHolder mAudioSeekRequest; + MozPromiseRequestHolder mVideoSeekRequest; + MozPromiseRequestHolder mAudioDataRequest; + MozPromiseRequestHolder mVideoDataRequest; + + // The media format reader only allows seeking a track at a time, if we're + // already in seeking, then delay the new seek until the current one finishes. + Maybe mPendingSeekingType; + + // These are used to track a special case where the playback starts from EOS + // position via seeking. So even if EOS has reached, none of data has been + // decoded yet. They will be reset when `mOriginalDecodedDuration` is + // determined. + bool mAudioEndedBeforeEnteringStateWithoutDuration; + bool mVideoEndedBeforeEnteringStateWithoutDuration; +}; + +/** + * Purpose: seek to a particular new playback position. + * + * Transition to: + * SEEKING if any new seek request. + * SHUTDOWN if seek failed. + * COMPLETED if the new playback position is the end of the media resource. + * NextFrameSeekingState if completing a NextFrameSeekingFromDormantState. + * DECODING/LOOPING_DECODING otherwise. + */ +class MediaDecoderStateMachine::SeekingState + : public MediaDecoderStateMachine::StateObject { + public: + explicit SeekingState(Master* aPtr) + : StateObject(aPtr), mVisibility(static_cast(0)) {} + + RefPtr Enter(SeekJob&& aSeekJob, + EventVisibility aVisibility) { + mSeekJob = std::move(aSeekJob); + mVisibility = aVisibility; + + // Suppressed visibility comes from two cases: (1) leaving dormant state, + // and (2) resuming suspended video decoder. We want both cases to be + // transparent to the user. So we only notify the change when the seek + // request is from the user. + if (mVisibility == EventVisibility::Observable) { + // Don't stop playback for a video-only seek since we want to keep playing + // audio and we don't need to stop playback while leaving dormant for the + // playback should has been stopped. + mMaster->StopPlayback(); + mMaster->UpdatePlaybackPositionInternal(mSeekJob.mTarget->GetTime()); + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::SeekStarted); + mMaster->mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING); + } + + RefPtr p = mSeekJob.mPromise.Ensure(__func__); + + DoSeek(); + + return p; + } + + virtual void Exit() override = 0; + + State GetState() const override = 0; + + void HandleAudioDecoded(AudioData* aAudio) override = 0; + void HandleVideoDecoded(VideoData* aVideo) override = 0; + void HandleAudioWaited(MediaData::Type aType) override = 0; + void HandleVideoWaited(MediaData::Type aType) override = 0; + + void HandleVideoSuspendTimeout() override { + // Do nothing since we want a valid video frame to show when seek is done. + } + + void HandleResumeVideoDecoding(const TimeUnit&) override { + // Do nothing. We will resume video decoding in the decoding state. + } + + // We specially handle next frame seeks by ignoring them if we're already + // seeking. + RefPtr HandleSeek( + const SeekTarget& aTarget) override { + if (aTarget.IsNextFrame()) { + // We ignore next frame seeks if we already have a seek pending + SLOG("Already SEEKING, ignoring seekToNextFrame"); + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + return MediaDecoder::SeekPromise::CreateAndReject( + /* aRejectValue = */ true, __func__); + } + + return StateObject::HandleSeek(aTarget); + } + + protected: + SeekJob mSeekJob; + EventVisibility mVisibility; + + virtual void DoSeek() = 0; + // Transition to the next state (defined by the subclass) when seek is + // completed. + virtual void GoToNextState() { SetDecodingState(); } + void SeekCompleted(); + virtual TimeUnit CalculateNewCurrentTime() const = 0; +}; + +class MediaDecoderStateMachine::AccurateSeekingState + : public MediaDecoderStateMachine::SeekingState { + public: + explicit AccurateSeekingState(Master* aPtr) : SeekingState(aPtr) {} + + State GetState() const override { return DECODER_STATE_SEEKING_ACCURATE; } + + RefPtr Enter(SeekJob&& aSeekJob, + EventVisibility aVisibility) { + MOZ_ASSERT(aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast()); + mCurrentTimeBeforeSeek = mMaster->GetMediaTime(); + return SeekingState::Enter(std::move(aSeekJob), aVisibility); + } + + void Exit() override { + // Disconnect MediaDecoder. + mSeekJob.RejectIfExists(__func__); + + // Disconnect ReaderProxy. + mSeekRequest.DisconnectIfExists(); + + mWaitRequest.DisconnectIfExists(); + } + + void HandleAudioDecoded(AudioData* aAudio) override { + MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, + "Seek shouldn't be finished"); + MOZ_ASSERT(aAudio); + + AdjustFastSeekIfNeeded(aAudio); + + if (mSeekJob.mTarget->IsFast()) { + // Non-precise seek; we can stop the seek at the first sample. + mMaster->PushAudio(aAudio); + mDoneAudioSeeking = true; + } else { + nsresult rv = DropAudioUpToSeekTarget(aAudio); + if (NS_FAILED(rv)) { + mMaster->DecodeError(rv); + return; + } + } + + if (!mDoneAudioSeeking) { + RequestAudioData(); + return; + } + MaybeFinishSeek(); + } + + void HandleVideoDecoded(VideoData* aVideo) override { + MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, + "Seek shouldn't be finished"); + MOZ_ASSERT(aVideo); + + AdjustFastSeekIfNeeded(aVideo); + + if (mSeekJob.mTarget->IsFast()) { + // Non-precise seek. We can stop the seek at the first sample. + mMaster->PushVideo(aVideo); + mDoneVideoSeeking = true; + } else { + nsresult rv = DropVideoUpToSeekTarget(aVideo); + if (NS_FAILED(rv)) { + mMaster->DecodeError(rv); + return; + } + } + + if (!mDoneVideoSeeking) { + RequestVideoData(); + return; + } + MaybeFinishSeek(); + } + + void HandleWaitingForAudio() override { + MOZ_ASSERT(!mDoneAudioSeeking); + mMaster->WaitForData(MediaData::Type::AUDIO_DATA); + } + + void HandleAudioCanceled() override { + MOZ_ASSERT(!mDoneAudioSeeking); + RequestAudioData(); + } + + void HandleEndOfAudio() override { + HandleEndOfAudioInternal(); + MaybeFinishSeek(); + } + + void HandleWaitingForVideo() override { + MOZ_ASSERT(!mDoneVideoSeeking); + mMaster->WaitForData(MediaData::Type::VIDEO_DATA); + } + + void HandleVideoCanceled() override { + MOZ_ASSERT(!mDoneVideoSeeking); + RequestVideoData(); + } + + void HandleEndOfVideo() override { + HandleEndOfVideoInternal(); + MaybeFinishSeek(); + } + + void HandleAudioWaited(MediaData::Type aType) override { + MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, + "Seek shouldn't be finished"); + + RequestAudioData(); + } + + void HandleVideoWaited(MediaData::Type aType) override { + MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, + "Seek shouldn't be finished"); + + RequestVideoData(); + } + + void DoSeek() override { + mDoneAudioSeeking = !Info().HasAudio(); + mDoneVideoSeeking = !Info().HasVideo(); + + // Resetting decode should be called after stopping media sink, which can + // ensure that we have an empty media queue before seeking the demuxer. + mMaster->StopMediaSink(); + mMaster->ResetDecode(); + + DemuxerSeek(); + } + + TimeUnit CalculateNewCurrentTime() const override { + const auto seekTime = mSeekJob.mTarget->GetTime(); + + // For the accurate seek, we always set the newCurrentTime = seekTime so + // that the updated HTMLMediaElement.currentTime will always be the seek + // target; we rely on the MediaSink to handles the gap between the + // newCurrentTime and the real decoded samples' start time. + if (mSeekJob.mTarget->IsAccurate()) { + return seekTime; + } + + // For the fast seek, we update the newCurrentTime with the decoded audio + // and video samples, set it to be the one which is closet to the seekTime. + if (mSeekJob.mTarget->IsFast()) { + RefPtr audio = AudioQueue().PeekFront(); + RefPtr video = VideoQueue().PeekFront(); + + // A situation that both audio and video approaches the end. + if (!audio && !video) { + return seekTime; + } + + const int64_t audioStart = + audio ? audio->mTime.ToMicroseconds() : INT64_MAX; + const int64_t videoStart = + video ? video->mTime.ToMicroseconds() : INT64_MAX; + const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds()); + const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds()); + return TimeUnit::FromMicroseconds(audioGap <= videoGap ? audioStart + : videoStart); + } + + MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types."); + return TimeUnit::Zero(); + } + + protected: + void DemuxerSeek() { + // Request the demuxer to perform seek. + Reader() + ->Seek(mSeekJob.mTarget.ref()) + ->Then( + OwnerThread(), __func__, + [this](const media::TimeUnit& aUnit) { OnSeekResolved(aUnit); }, + [this](const SeekRejectValue& aReject) { OnSeekRejected(aReject); }) + ->Track(mSeekRequest); + } + + void OnSeekResolved(media::TimeUnit) { + AUTO_PROFILER_LABEL("AccurateSeekingState::OnSeekResolved", MEDIA_PLAYBACK); + mSeekRequest.Complete(); + + // We must decode the first samples of active streams, so we can determine + // the new stream time. So dispatch tasks to do that. + if (!mDoneVideoSeeking) { + RequestVideoData(); + } + if (!mDoneAudioSeeking) { + RequestAudioData(); + } + } + + void OnSeekRejected(const SeekRejectValue& aReject) { + AUTO_PROFILER_LABEL("AccurateSeekingState::OnSeekRejected", MEDIA_PLAYBACK); + mSeekRequest.Complete(); + + if (aReject.mError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { + SLOG("OnSeekRejected reason=WAITING_FOR_DATA type=%s", + MediaData::TypeToStr(aReject.mType)); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::AUDIO_DATA, + !mMaster->IsRequestingAudioData()); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::VIDEO_DATA, + !mMaster->IsRequestingVideoData()); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::AUDIO_DATA, + !mMaster->IsWaitingAudioData()); + MOZ_ASSERT_IF(aReject.mType == MediaData::Type::VIDEO_DATA, + !mMaster->IsWaitingVideoData()); + + // Fire 'waiting' to notify the player that we are waiting for data. + mMaster->mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING); + + Reader() + ->WaitForData(aReject.mType) + ->Then( + OwnerThread(), __func__, + [this](MediaData::Type aType) { + AUTO_PROFILER_LABEL( + "AccurateSeekingState::OnSeekRejected:WaitDataResolved", + MEDIA_PLAYBACK); + SLOG("OnSeekRejected wait promise resolved"); + mWaitRequest.Complete(); + DemuxerSeek(); + }, + [this](const WaitForDataRejectValue& aRejection) { + AUTO_PROFILER_LABEL( + "AccurateSeekingState::OnSeekRejected:WaitDataRejected", + MEDIA_PLAYBACK); + SLOG("OnSeekRejected wait promise rejected"); + mWaitRequest.Complete(); + mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); + }) + ->Track(mWaitRequest); + return; + } + + if (aReject.mError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { + if (!mDoneAudioSeeking) { + HandleEndOfAudioInternal(); + } + if (!mDoneVideoSeeking) { + HandleEndOfVideoInternal(); + } + MaybeFinishSeek(); + return; + } + + MOZ_ASSERT(NS_FAILED(aReject.mError), + "Cancels should also disconnect mSeekRequest"); + mMaster->DecodeError(aReject.mError); + } + + void RequestAudioData() { + MOZ_ASSERT(!mDoneAudioSeeking); + mMaster->RequestAudioData(); + } + + virtual void RequestVideoData() { + MOZ_ASSERT(!mDoneVideoSeeking); + mMaster->RequestVideoData(media::TimeUnit()); + } + + void AdjustFastSeekIfNeeded(MediaData* aSample) { + if (mSeekJob.mTarget->IsFast() && + mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek && + aSample->mTime < mCurrentTimeBeforeSeek) { + // We are doing a fastSeek, but we ended up *before* the previous + // playback position. This is surprising UX, so switch to an accurate + // seek and decode to the seek target. This is not conformant to the + // spec, fastSeek should always be fast, but until we get the time to + // change all Readers to seek to the keyframe after the currentTime + // in this case, we'll just decode forward. Bug 1026330. + mSeekJob.mTarget->SetType(SeekTarget::Accurate); + } + } + + nsresult DropAudioUpToSeekTarget(AudioData* aAudio) { + MOZ_ASSERT(aAudio && mSeekJob.mTarget->IsAccurate()); + + if (mSeekJob.mTarget->GetTime() >= aAudio->GetEndTime()) { + // Our seek target lies after the frames in this AudioData. Don't + // push it onto the audio queue, and keep decoding forwards. + return NS_OK; + } + + if (aAudio->mTime > mSeekJob.mTarget->GetTime()) { + // The seek target doesn't lie in the audio block just after the last + // audio frames we've seen which were before the seek target. This + // could have been the first audio data we've seen after seek, i.e. the + // seek terminated after the seek target in the audio stream. Just + // abort the audio decode-to-target, the state machine will play + // silence to cover the gap. Typically this happens in poorly muxed + // files. + SLOGW("Audio not synced after seek, maybe a poorly muxed file?"); + mMaster->PushAudio(aAudio); + mDoneAudioSeeking = true; + return NS_OK; + } + + bool ok = aAudio->SetTrimWindow( + {mSeekJob.mTarget->GetTime().ToBase(aAudio->mTime), + aAudio->GetEndTime()}); + if (!ok) { + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; + } + + MOZ_ASSERT(AudioQueue().GetSize() == 0, + "Should be the 1st sample after seeking"); + mMaster->PushAudio(aAudio); + mDoneAudioSeeking = true; + + return NS_OK; + } + + nsresult DropVideoUpToSeekTarget(VideoData* aVideo) { + MOZ_ASSERT(aVideo); + SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]", + aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds()); + const auto target = GetSeekTarget(); + + // If the frame end time is less than the seek target, we won't want + // to display this frame after the seek, so discard it. + if (target >= aVideo->GetEndTime()) { + SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 + "] target=%" PRId64, + aVideo->mTime.ToMicroseconds(), + aVideo->GetEndTime().ToMicroseconds(), target.ToMicroseconds()); + PROFILER_MARKER_UNTYPED("MDSM::DropVideoUpToSeekTarget", MEDIA_PLAYBACK); + mFirstVideoFrameAfterSeek = aVideo; + } else { + if (target >= aVideo->mTime && aVideo->GetEndTime() >= target) { + // The seek target lies inside this frame's time slice. Adjust the + // frame's start time to match the seek target. + aVideo->UpdateTimestamp(target); + } + mFirstVideoFrameAfterSeek = nullptr; + + SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 + "] containing target=%" PRId64, + aVideo->mTime.ToMicroseconds(), + aVideo->GetEndTime().ToMicroseconds(), target.ToMicroseconds()); + + MOZ_ASSERT(VideoQueue().GetSize() == 0, + "Should be the 1st sample after seeking"); + mMaster->PushVideo(aVideo); + mDoneVideoSeeking = true; + } + + return NS_OK; + } + + void HandleEndOfAudioInternal() { + MOZ_ASSERT(!mDoneAudioSeeking); + AudioQueue().Finish(); + mDoneAudioSeeking = true; + } + + void HandleEndOfVideoInternal() { + MOZ_ASSERT(!mDoneVideoSeeking); + if (mFirstVideoFrameAfterSeek) { + // Hit the end of stream. Move mFirstVideoFrameAfterSeek into + // mSeekedVideoData so we have something to display after seeking. + mMaster->PushVideo(mFirstVideoFrameAfterSeek); + } + VideoQueue().Finish(); + mDoneVideoSeeking = true; + } + + void MaybeFinishSeek() { + if (mDoneAudioSeeking && mDoneVideoSeeking) { + SeekCompleted(); + } + } + + /* + * Track the current seek promise made by the reader. + */ + MozPromiseRequestHolder mSeekRequest; + + /* + * Internal state. + */ + media::TimeUnit mCurrentTimeBeforeSeek; + bool mDoneAudioSeeking = false; + bool mDoneVideoSeeking = false; + MozPromiseRequestHolder mWaitRequest; + + // This temporarily stores the first frame we decode after we seek. + // This is so that if we hit end of stream while we're decoding to reach + // the seek target, we will still have a frame that we can display as the + // last frame in the media. + RefPtr mFirstVideoFrameAfterSeek; + + private: + virtual media::TimeUnit GetSeekTarget() const { + return mSeekJob.mTarget->GetTime(); + } +}; + +/* + * Remove samples from the queue until aCompare() returns false. + * aCompare A function object with the signature bool(int64_t) which returns + * true for samples that should be removed. + */ +template +static void DiscardFrames(MediaQueue& aQueue, const Function& aCompare) { + while (aQueue.GetSize() > 0) { + if (aCompare(aQueue.PeekFront()->mTime.ToMicroseconds())) { + RefPtr releaseMe = aQueue.PopFront(); + continue; + } + break; + } +} + +class MediaDecoderStateMachine::NextFrameSeekingState + : public MediaDecoderStateMachine::SeekingState { + public: + explicit NextFrameSeekingState(Master* aPtr) : SeekingState(aPtr) {} + + State GetState() const override { + return DECODER_STATE_SEEKING_NEXTFRAMESEEKING; + } + + RefPtr Enter(SeekJob&& aSeekJob, + EventVisibility aVisibility) { + MOZ_ASSERT(aSeekJob.mTarget->IsNextFrame()); + mCurrentTime = mMaster->GetMediaTime(); + mDuration = mMaster->Duration(); + return SeekingState::Enter(std::move(aSeekJob), aVisibility); + } + + void Exit() override { + // Disconnect my async seek operation. + if (mAsyncSeekTask) { + mAsyncSeekTask->Cancel(); + } + + // Disconnect MediaDecoder. + mSeekJob.RejectIfExists(__func__); + } + + void HandleAudioDecoded(AudioData* aAudio) override { + mMaster->PushAudio(aAudio); + } + + void HandleVideoDecoded(VideoData* aVideo) override { + MOZ_ASSERT(aVideo); + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + MOZ_ASSERT(NeedMoreVideo()); + + if (aVideo->mTime > mCurrentTime) { + mMaster->PushVideo(aVideo); + FinishSeek(); + } else { + RequestVideoData(); + } + } + + void HandleWaitingForAudio() override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + // We don't care about audio decode errors in this state which will be + // handled by other states after seeking. + } + + void HandleAudioCanceled() override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + // We don't care about audio decode errors in this state which will be + // handled by other states after seeking. + } + + void HandleEndOfAudio() override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + // We don't care about audio decode errors in this state which will be + // handled by other states after seeking. + } + + void HandleWaitingForVideo() override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + MOZ_ASSERT(NeedMoreVideo()); + mMaster->WaitForData(MediaData::Type::VIDEO_DATA); + } + + void HandleVideoCanceled() override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + MOZ_ASSERT(NeedMoreVideo()); + RequestVideoData(); + } + + void HandleEndOfVideo() override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + MOZ_ASSERT(NeedMoreVideo()); + VideoQueue().Finish(); + FinishSeek(); + } + + void HandleAudioWaited(MediaData::Type aType) override { + // We don't care about audio in this state. + } + + void HandleVideoWaited(MediaData::Type aType) override { + MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); + MOZ_ASSERT(NeedMoreVideo()); + RequestVideoData(); + } + + TimeUnit CalculateNewCurrentTime() const override { + // The HTMLMediaElement.currentTime should be updated to the seek target + // which has been updated to the next frame's time. + return mSeekJob.mTarget->GetTime(); + } + + void DoSeek() override { + mMaster->StopMediaSink(); + + auto currentTime = mCurrentTime; + DiscardFrames(VideoQueue(), [currentTime](int64_t aSampleTime) { + return aSampleTime <= currentTime.ToMicroseconds(); + }); + + // If there is a pending video request, finish the seeking if we don't need + // more data, or wait for HandleVideoDecoded() to finish seeking. + if (mMaster->IsRequestingVideoData()) { + if (!NeedMoreVideo()) { + FinishSeek(); + } + return; + } + + // Otherwise, we need to do the seek operation asynchronously for a special + // case (bug504613.ogv) which has no data at all, the 1st seekToNextFrame() + // operation reaches the end of the media. If we did the seek operation + // synchronously, we immediately resolve the SeekPromise in mSeekJob and + // then switch to the CompletedState which dispatches an "ended" event. + // However, the ThenValue of the SeekPromise has not yet been set, so the + // promise resolving is postponed and then the JS developer receives the + // "ended" event before the seek promise is resolved. + // An asynchronous seek operation helps to solve this issue since while the + // seek is actually performed, the ThenValue of SeekPromise has already + // been set so that it won't be postponed. + RefPtr r = mAsyncSeekTask = new AysncNextFrameSeekTask(this); + nsresult rv = OwnerThread()->Dispatch(r.forget()); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + } + + private: + void DoSeekInternal() { + // We don't need to discard frames to the mCurrentTime here because we have + // done it at DoSeek() and any video data received in between either + // finishes the seek operation or be discarded, see HandleVideoDecoded(). + + if (!NeedMoreVideo()) { + FinishSeek(); + } else if (!mMaster->IsRequestingVideoData() && + !mMaster->IsWaitingVideoData()) { + RequestVideoData(); + } + } + + class AysncNextFrameSeekTask : public Runnable { + public: + explicit AysncNextFrameSeekTask(NextFrameSeekingState* aStateObject) + : Runnable( + "MediaDecoderStateMachine::NextFrameSeekingState::" + "AysncNextFrameSeekTask"), + mStateObj(aStateObject) {} + + void Cancel() { mStateObj = nullptr; } + + NS_IMETHOD Run() override { + if (mStateObj) { + AUTO_PROFILER_LABEL("AysncNextFrameSeekTask::Run", MEDIA_PLAYBACK); + mStateObj->DoSeekInternal(); + } + return NS_OK; + } + + private: + NextFrameSeekingState* mStateObj; + }; + + void RequestVideoData() { mMaster->RequestVideoData(media::TimeUnit()); } + + bool NeedMoreVideo() const { + // Need to request video when we have none and video queue is not finished. + return VideoQueue().GetSize() == 0 && !VideoQueue().IsFinished(); + } + + // Update the seek target's time before resolving this seek task, the updated + // time will be used in the MDSM::SeekCompleted() to update the MDSM's + // position. + void UpdateSeekTargetTime() { + RefPtr data = VideoQueue().PeekFront(); + if (data) { + mSeekJob.mTarget->SetTime(data->mTime); + } else { + MOZ_ASSERT(VideoQueue().AtEndOfStream()); + mSeekJob.mTarget->SetTime(mDuration); + } + } + + void FinishSeek() { + MOZ_ASSERT(!NeedMoreVideo()); + UpdateSeekTargetTime(); + auto time = mSeekJob.mTarget->GetTime().ToMicroseconds(); + DiscardFrames(AudioQueue(), + [time](int64_t aSampleTime) { return aSampleTime < time; }); + SeekCompleted(); + } + + /* + * Internal state. + */ + TimeUnit mCurrentTime; + TimeUnit mDuration; + RefPtr mAsyncSeekTask; +}; + +class MediaDecoderStateMachine::NextFrameSeekingFromDormantState + : public MediaDecoderStateMachine::AccurateSeekingState { + public: + explicit NextFrameSeekingFromDormantState(Master* aPtr) + : AccurateSeekingState(aPtr) {} + + State GetState() const override { return DECODER_STATE_SEEKING_FROMDORMANT; } + + RefPtr Enter(SeekJob&& aCurrentSeekJob, + SeekJob&& aFutureSeekJob) { + mFutureSeekJob = std::move(aFutureSeekJob); + + AccurateSeekingState::Enter(std::move(aCurrentSeekJob), + EventVisibility::Suppressed); + + // Once seekToNextFrame() is called, we assume the user is likely to keep + // calling seekToNextFrame() repeatedly, and so, we should prevent the MDSM + // from getting into Dormant state. + mMaster->mMinimizePreroll = false; + + return mFutureSeekJob.mPromise.Ensure(__func__); + } + + void Exit() override { + mFutureSeekJob.RejectIfExists(__func__); + AccurateSeekingState::Exit(); + } + + private: + SeekJob mFutureSeekJob; + + // We don't want to transition to DecodingState once this seek completes, + // instead, we transition to NextFrameSeekingState. + void GoToNextState() override { + SetState(std::move(mFutureSeekJob), + EventVisibility::Observable); + } +}; + +class MediaDecoderStateMachine::VideoOnlySeekingState + : public MediaDecoderStateMachine::AccurateSeekingState { + public: + explicit VideoOnlySeekingState(Master* aPtr) : AccurateSeekingState(aPtr) {} + + State GetState() const override { return DECODER_STATE_SEEKING_VIDEOONLY; } + + RefPtr Enter(SeekJob&& aSeekJob, + EventVisibility aVisibility) { + MOZ_ASSERT(aSeekJob.mTarget->IsVideoOnly()); + MOZ_ASSERT(aVisibility == EventVisibility::Suppressed); + + RefPtr p = + AccurateSeekingState::Enter(std::move(aSeekJob), aVisibility); + + // Dispatch a mozvideoonlyseekbegin event to indicate UI for corresponding + // changes. + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::VideoOnlySeekBegin); + + return p; + } + + void Exit() override { + // We are completing or discarding this video-only seek operation now, + // dispatch an event so that the UI can change in response to the end + // of video-only seek. + mMaster->mOnPlaybackEvent.Notify( + MediaPlaybackEvent::VideoOnlySeekCompleted); + + AccurateSeekingState::Exit(); + } + + void HandleAudioDecoded(AudioData* aAudio) override { + MOZ_ASSERT(mDoneAudioSeeking && !mDoneVideoSeeking, + "Seek shouldn't be finished"); + MOZ_ASSERT(aAudio); + + // Video-only seek doesn't reset audio decoder. There might be pending audio + // requests when AccurateSeekTask::Seek() begins. We will just store the + // data without checking |mDiscontinuity| or calling + // DropAudioUpToSeekTarget(). + mMaster->PushAudio(aAudio); + } + + void HandleWaitingForAudio() override {} + + void HandleAudioCanceled() override {} + + void HandleEndOfAudio() override {} + + void HandleAudioWaited(MediaData::Type aType) override { + MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, + "Seek shouldn't be finished"); + + // Ignore pending requests from video-only seek. + } + + void DoSeek() override { + // TODO: keep decoding audio. + mDoneAudioSeeking = true; + mDoneVideoSeeking = !Info().HasVideo(); + + const auto offset = VideoQueue().GetOffset(); + mMaster->ResetDecode(TrackInfo::kVideoTrack); + + // Entering video-only state and we've looped at least once before, so we + // need to set offset in order to let new video frames catch up with the + // clock time. + if (offset != media::TimeUnit::Zero()) { + VideoQueue().SetOffset(offset); + } + + DemuxerSeek(); + } + + protected: + // Allow skip-to-next-key-frame to kick in if we fall behind the current + // playback position so decoding has a better chance to catch up. + void RequestVideoData() override { + MOZ_ASSERT(!mDoneVideoSeeking); + + auto clock = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock() + : mMaster->GetMediaTime(); + mMaster->AdjustByLooping(clock); + const auto& nextKeyFrameTime = GetNextKeyFrameTime(); + + auto threshold = clock; + + if (nextKeyFrameTime.IsValid() && + clock >= (nextKeyFrameTime - sSkipToNextKeyFrameThreshold)) { + threshold = nextKeyFrameTime; + } + + mMaster->RequestVideoData(threshold); + } + + private: + // Trigger skip to next key frame if the current playback position is very + // close the next key frame's time. + static constexpr TimeUnit sSkipToNextKeyFrameThreshold = + TimeUnit::FromMicroseconds(5000); + + // If the media is playing, drop video until catch up playback position. + media::TimeUnit GetSeekTarget() const override { + auto target = mMaster->mMediaSink->IsStarted() + ? mMaster->GetClock() + : mSeekJob.mTarget->GetTime(); + mMaster->AdjustByLooping(target); + return target; + } + + media::TimeUnit GetNextKeyFrameTime() const { + // We only call this method in RequestVideoData() and we only request video + // data if we haven't done video seeking. + MOZ_DIAGNOSTIC_ASSERT(!mDoneVideoSeeking); + MOZ_DIAGNOSTIC_ASSERT(mMaster->VideoQueue().GetSize() == 0); + + if (mFirstVideoFrameAfterSeek) { + return mFirstVideoFrameAfterSeek->NextKeyFrameTime(); + } + + return TimeUnit::Invalid(); + } +}; + +constexpr TimeUnit MediaDecoderStateMachine::VideoOnlySeekingState:: + sSkipToNextKeyFrameThreshold; + +RefPtr +MediaDecoderStateMachine::DormantState::HandleSeek(const SeekTarget& aTarget) { + if (aTarget.IsNextFrame()) { + // NextFrameSeekingState doesn't reset the decoder unlike + // AccurateSeekingState. So we first must come out of dormant by seeking to + // mPendingSeek and continue later with the NextFrameSeek + SLOG("Changed state to SEEKING (to %" PRId64 ")", + aTarget.GetTime().ToMicroseconds()); + SeekJob seekJob; + seekJob.mTarget = Some(aTarget); + return StateObject::SetState( + std::move(mPendingSeek), std::move(seekJob)); + } + + return StateObject::HandleSeek(aTarget); +} + +/** + * Purpose: stop playback until enough data is decoded to continue playback. + * + * Transition to: + * SEEKING if any seek request. + * SHUTDOWN if any decode error. + * COMPLETED when having decoded all audio/video data. + * DECODING/LOOPING_DECODING when having decoded enough data to continue + * playback. + */ +class MediaDecoderStateMachine::BufferingState + : public MediaDecoderStateMachine::StateObject { + public: + explicit BufferingState(Master* aPtr) : StateObject(aPtr) {} + + void Enter() { + if (mMaster->IsPlaying()) { + mMaster->StopPlayback(); + } + + mBufferingStart = TimeStamp::Now(); + mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S)); + mMaster->mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING); + } + + void Step() override; + + State GetState() const override { return DECODER_STATE_BUFFERING; } + + void HandleAudioDecoded(AudioData* aAudio) override { + mMaster->PushAudio(aAudio); + if (!mMaster->HaveEnoughDecodedAudio()) { + mMaster->RequestAudioData(); + } + // This might be the sample we need to exit buffering. + // Schedule Step() to check it. + mMaster->ScheduleStateMachine(); + } + + void HandleVideoDecoded(VideoData* aVideo) override { + mMaster->PushVideo(aVideo); + if (!mMaster->HaveEnoughDecodedVideo()) { + mMaster->RequestVideoData(media::TimeUnit()); + } + // This might be the sample we need to exit buffering. + // Schedule Step() to check it. + mMaster->ScheduleStateMachine(); + } + + void HandleAudioCanceled() override { mMaster->RequestAudioData(); } + + void HandleVideoCanceled() override { + mMaster->RequestVideoData(media::TimeUnit()); + } + + void HandleWaitingForAudio() override { + mMaster->WaitForData(MediaData::Type::AUDIO_DATA); + } + + void HandleWaitingForVideo() override { + mMaster->WaitForData(MediaData::Type::VIDEO_DATA); + } + + void HandleAudioWaited(MediaData::Type aType) override { + mMaster->RequestAudioData(); + } + + void HandleVideoWaited(MediaData::Type aType) override { + mMaster->RequestVideoData(media::TimeUnit()); + } + + void HandleEndOfAudio() override; + void HandleEndOfVideo() override; + + void HandleVideoSuspendTimeout() override { + // No video, so nothing to suspend. + if (!mMaster->HasVideo()) { + return; + } + + mMaster->mVideoDecodeSuspended = true; + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend); + Reader()->SetVideoBlankDecode(true); + } + + private: + TimeStamp mBufferingStart; + + // The maximum number of second we spend buffering when we are short on + // unbuffered data. + const uint32_t mBufferingWait = 15; +}; + +/** + * Purpose: play all the decoded data and fire the 'ended' event. + * + * Transition to: + * SEEKING if any seek request. + * LOOPING_DECODING if MDSM enable looping. + */ +class MediaDecoderStateMachine::CompletedState + : public MediaDecoderStateMachine::StateObject { + public: + explicit CompletedState(Master* aPtr) : StateObject(aPtr) {} + + void Enter() { + // On Android, the life cycle of graphic buffer is equal to Android's codec, + // we couldn't release it if we still need to render the frame. +#ifndef MOZ_WIDGET_ANDROID + if (!mMaster->mLooping) { + // We've decoded all samples. + // We don't need decoders anymore if not looping. + Reader()->ReleaseResources(); + } +#endif + bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted) && + (!mMaster->HasVideo() || !mMaster->mVideoCompleted); + + mMaster->mOnNextFrameStatus.Notify( + hasNextFrame ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE + : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE); + + Step(); + } + + void Exit() override { mSentPlaybackEndedEvent = false; } + + void Step() override { + if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING && + mMaster->IsPlaying()) { + mMaster->StopPlayback(); + } + + // Play the remaining media. We want to run AdvanceFrame() at least + // once to ensure the current playback position is advanced to the + // end of the media, and so that we update the readyState. + if ((mMaster->HasVideo() && !mMaster->mVideoCompleted) || + (mMaster->HasAudio() && !mMaster->mAudioCompleted)) { + // Start playback if necessary to play the remaining media. + mMaster->MaybeStartPlayback(); + mMaster->UpdatePlaybackPositionPeriodically(); + MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(), + "Must have timer scheduled"); + return; + } + + // StopPlayback in order to reset the IsPlaying() state so audio + // is restarted correctly. + mMaster->StopPlayback(); + + if (!mSentPlaybackEndedEvent) { + auto clockTime = + std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime()); + // Correct the time over the end once looping was turned on. + mMaster->AdjustByLooping(clockTime); + if (mMaster->mDuration.Ref()->IsInfinite()) { + // We have a finite duration when playback reaches the end. + mMaster->mDuration = Some(clockTime); + DDLOGEX(mMaster, DDLogCategory::Property, "duration_us", + mMaster->mDuration.Ref()->ToMicroseconds()); + } + mMaster->UpdatePlaybackPosition(clockTime); + + // Ensure readyState is updated before firing the 'ended' event. + mMaster->mOnNextFrameStatus.Notify( + MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE); + + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::PlaybackEnded); + + mSentPlaybackEndedEvent = true; + + // MediaSink::GetEndTime() must be called before stopping playback. + mMaster->StopMediaSink(); + } + } + + State GetState() const override { return DECODER_STATE_COMPLETED; } + + void HandleLoopingChanged() override { + if (mMaster->mLooping) { + SetDecodingState(); + } + } + + void HandleAudioCaptured() override { + // MediaSink is changed. Schedule Step() to check if we can start playback. + mMaster->ScheduleStateMachine(); + } + + void HandleVideoSuspendTimeout() override { + // Do nothing since no decoding is going on. + } + + void HandleResumeVideoDecoding(const TimeUnit&) override { + // Resume the video decoder and seek to the last video frame. + // This triggers a video-only seek which won't update the playback position. + auto target = mMaster->mDecodedVideoEndTime; + mMaster->AdjustByLooping(target); + StateObject::HandleResumeVideoDecoding(target); + } + + void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override { + if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) { + // Schedule Step() to check if we can start playback. + mMaster->ScheduleStateMachine(); + } + } + + private: + bool mSentPlaybackEndedEvent = false; +}; + +/** + * Purpose: release all resources allocated by MDSM. + * + * Transition to: + * None since this is the final state. + * + * Transition from: + * Any states other than SHUTDOWN. + */ +class MediaDecoderStateMachine::ShutdownState + : public MediaDecoderStateMachine::StateObject { + public: + explicit ShutdownState(Master* aPtr) : StateObject(aPtr) {} + + RefPtr Enter(); + + void Exit() override { + MOZ_DIAGNOSTIC_ASSERT(false, "Shouldn't escape the SHUTDOWN state."); + } + + State GetState() const override { return DECODER_STATE_SHUTDOWN; } + + RefPtr HandleSeek( + const SeekTarget& aTarget) override { + MOZ_DIAGNOSTIC_ASSERT(false, "Can't seek in shutdown state."); + return MediaDecoder::SeekPromise::CreateAndReject(true, __func__); + } + + RefPtr HandleShutdown() override { + MOZ_DIAGNOSTIC_ASSERT(false, "Already shutting down."); + return nullptr; + } + + void HandleVideoSuspendTimeout() override { + MOZ_DIAGNOSTIC_ASSERT(false, "Already shutting down."); + } + + void HandleResumeVideoDecoding(const TimeUnit&) override { + MOZ_DIAGNOSTIC_ASSERT(false, "Already shutting down."); + } +}; + +RefPtr +MediaDecoderStateMachine::StateObject::HandleSeek(const SeekTarget& aTarget) { + SLOG("Changed state to SEEKING (to %" PRId64 ")", + aTarget.GetTime().ToMicroseconds()); + SeekJob seekJob; + seekJob.mTarget = Some(aTarget); + return SetSeekingState(std::move(seekJob), EventVisibility::Observable); +} + +RefPtr +MediaDecoderStateMachine::StateObject::HandleShutdown() { + return SetState(); +} + +static void ReportRecoveryTelemetry(const TimeStamp& aRecoveryStart, + const MediaInfo& aMediaInfo, + bool aIsHardwareAccelerated) { + MOZ_ASSERT(NS_IsMainThread()); + if (!aMediaInfo.HasVideo()) { + return; + } + + // Keyed by audio+video or video alone, hardware acceleration, + // and by a resolution range. + nsCString key(aMediaInfo.HasAudio() ? "AV" : "V"); + key.AppendASCII(aIsHardwareAccelerated ? "(hw)," : ","); + static const struct { + int32_t mH; + const char* mRes; + } sResolutions[] = {{240, "0-240"}, + {480, "241-480"}, + {720, "481-720"}, + {1080, "721-1080"}, + {2160, "1081-2160"}}; + const char* resolution = "2161+"; + int32_t height = aMediaInfo.mVideo.mImage.height; + for (const auto& res : sResolutions) { + if (height <= res.mH) { + resolution = res.mRes; + break; + } + } + key.AppendASCII(resolution); + + TimeDuration duration = TimeStamp::Now() - aRecoveryStart; + double duration_ms = duration.ToMilliseconds(); + Telemetry::Accumulate(Telemetry::VIDEO_SUSPEND_RECOVERY_TIME_MS, key, + static_cast(lround(duration_ms))); + Telemetry::Accumulate(Telemetry::VIDEO_SUSPEND_RECOVERY_TIME_MS, "All"_ns, + static_cast(lround(duration_ms))); +} + +void MediaDecoderStateMachine::StateObject::HandleResumeVideoDecoding( + const TimeUnit& aTarget) { + MOZ_ASSERT(mMaster->mVideoDecodeSuspended); + + mMaster->mVideoDecodeSuspended = false; + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::ExitVideoSuspend); + Reader()->SetVideoBlankDecode(false); + + // Start counting recovery time from right now. + TimeStamp start = TimeStamp::Now(); + + // Local reference to mInfo, so that it will be copied in the lambda below. + const auto& info = Info(); + bool hw = Reader()->VideoIsHardwareAccelerated(); + + // Start video-only seek to the current time. + SeekJob seekJob; + + // We use fastseek to optimize the resuming time. + // FastSeek is only used for video-only media since we don't need to worry + // about A/V sync. + // Don't use fastSeek if we want to seek to the end because it might seek to a + // keyframe before the last frame (if the last frame itself is not a keyframe) + // and we always want to present the final frame to the user when seeking to + // the end. + const auto type = mMaster->HasAudio() || aTarget == mMaster->Duration() + ? SeekTarget::Type::Accurate + : SeekTarget::Type::PrevSyncPoint; + + seekJob.mTarget.emplace(aTarget, type, SeekTarget::Track::VideoOnly); + SLOG("video-only seek target=%" PRId64 ", current time=%" PRId64, + aTarget.ToMicroseconds(), mMaster->GetMediaTime().ToMicroseconds()); + + // Hold mMaster->mAbstractMainThread here because this->mMaster will be + // invalid after the current state object is deleted in SetState(); + RefPtr mainThread = mMaster->mAbstractMainThread; + + SetSeekingState(std::move(seekJob), EventVisibility::Suppressed) + ->Then( + mainThread, __func__, + [start, info, hw]() { ReportRecoveryTelemetry(start, info, hw); }, + []() {}); +} + +RefPtr +MediaDecoderStateMachine::StateObject::SetSeekingState( + SeekJob&& aSeekJob, EventVisibility aVisibility) { + if (aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast()) { + if (aSeekJob.mTarget->IsVideoOnly()) { + return SetState(std::move(aSeekJob), aVisibility); + } + return SetState(std::move(aSeekJob), aVisibility); + } + + if (aSeekJob.mTarget->IsNextFrame()) { + return SetState(std::move(aSeekJob), aVisibility); + } + + MOZ_ASSERT_UNREACHABLE("Unknown SeekTarget::Type."); + return nullptr; +} + +void MediaDecoderStateMachine::StateObject::SetDecodingState() { + if (mMaster->IsInSeamlessLooping()) { + SetState(); + return; + } + SetState(); +} + +void MediaDecoderStateMachine::DecodeMetadataState::OnMetadataRead( + MetadataHolder&& aMetadata) { + mMetadataRequest.Complete(); + + AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataRead", MEDIA_PLAYBACK); + mMaster->mInfo.emplace(*aMetadata.mInfo); + mMaster->mMediaSeekable = Info().mMediaSeekable; + mMaster->mMediaSeekableOnlyInBufferedRanges = + Info().mMediaSeekableOnlyInBufferedRanges; + + if (Info().mMetadataDuration.isSome()) { + mMaster->mDuration = Info().mMetadataDuration; + } else if (Info().mUnadjustedMetadataEndTime.isSome()) { + const TimeUnit unadjusted = Info().mUnadjustedMetadataEndTime.ref(); + const TimeUnit adjustment = Info().mStartTime; + mMaster->mInfo->mMetadataDuration.emplace(unadjusted - adjustment); + mMaster->mDuration = Info().mMetadataDuration; + } + + // If we don't know the duration by this point, we assume infinity, per spec. + if (mMaster->mDuration.Ref().isNothing()) { + mMaster->mDuration = Some(TimeUnit::FromInfinity()); + } + + DDLOGEX(mMaster, DDLogCategory::Property, "duration_us", + mMaster->mDuration.Ref()->ToMicroseconds()); + + if (mMaster->HasVideo()) { + SLOG("Video decode HWAccel=%d videoQueueSize=%d", + Reader()->VideoIsHardwareAccelerated(), + mMaster->GetAmpleVideoFrames()); + } + + MOZ_ASSERT(mMaster->mDuration.Ref().isSome()); + + mMaster->mMetadataLoadedEvent.Notify(std::move(aMetadata.mInfo), + std::move(aMetadata.mTags), + MediaDecoderEventVisibility::Observable); + + // Check whether the media satisfies the requirement of seamless looping. + // TODO : after we ensure video seamless looping is stable enough, then we can + // remove this to make the condition always true. + mMaster->mSeamlessLoopingAllowed = StaticPrefs::media_seamless_looping(); + if (mMaster->HasVideo()) { + mMaster->mSeamlessLoopingAllowed = + StaticPrefs::media_seamless_looping_video(); + } + + SetState(); +} + +void MediaDecoderStateMachine::DormantState::HandlePlayStateChanged( + MediaDecoder::PlayState aPlayState) { + if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) { + // Exit dormant when the user wants to play. + MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent); + SetSeekingState(std::move(mPendingSeek), EventVisibility::Suppressed); + } +} + +void MediaDecoderStateMachine::DecodingFirstFrameState::Enter() { + // Transition to DECODING if we've decoded first frames. + if (mMaster->mSentFirstFrameLoadedEvent) { + SetDecodingState(); + return; + } + + MOZ_ASSERT(!mMaster->mVideoDecodeSuspended); + + // Dispatch tasks to decode first frames. + if (mMaster->HasAudio()) { + mMaster->RequestAudioData(); + } + if (mMaster->HasVideo()) { + mMaster->RequestVideoData(media::TimeUnit()); + } +} + +void MediaDecoderStateMachine::DecodingFirstFrameState:: + MaybeFinishDecodeFirstFrame() { + MOZ_ASSERT(!mMaster->mSentFirstFrameLoadedEvent); + + if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0) || + (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) { + return; + } + + mMaster->FinishDecodeFirstFrame(); + if (mPendingSeek.Exists()) { + SetSeekingState(std::move(mPendingSeek), EventVisibility::Observable); + } else { + SetDecodingState(); + } +} + +void MediaDecoderStateMachine::DecodingState::Enter() { + MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent); + + if (mMaster->mVideoDecodeSuspended && + mMaster->mVideoDecodeMode == VideoDecodeMode::Normal) { + StateObject::HandleResumeVideoDecoding(mMaster->GetMediaTime()); + return; + } + + if (mMaster->mVideoDecodeMode == VideoDecodeMode::Suspend && + !mMaster->mVideoDecodeSuspendTimer.IsScheduled() && + !mMaster->mVideoDecodeSuspended) { + // If the VideoDecodeMode is Suspend and the timer is not schedule, it means + // the timer has timed out and we should suspend video decoding now if + // necessary. + HandleVideoSuspendTimeout(); + } + + // If we're in the normal decoding mode and the decoding has finished, then we + // should go to `completed` state because we don't need to decode anything + // later. However, if we're in the saemless decoding mode, we will restart + // decoding ASAP so we can still stay in `decoding` state. + if (!mMaster->IsVideoDecoding() && !mMaster->IsAudioDecoding() && + !mMaster->IsInSeamlessLooping()) { + SetState(); + return; + } + + mOnAudioPopped = + AudioQueue().PopFrontEvent().Connect(OwnerThread(), [this]() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnAudioPopped", + MEDIA_PLAYBACK); + if (mMaster->IsAudioDecoding() && !mMaster->HaveEnoughDecodedAudio()) { + EnsureAudioDecodeTaskQueued(); + } + }); + mOnVideoPopped = + VideoQueue().PopFrontEvent().Connect(OwnerThread(), [this]() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnVideoPopped", + MEDIA_PLAYBACK); + if (mMaster->IsVideoDecoding() && !mMaster->HaveEnoughDecodedVideo()) { + EnsureVideoDecodeTaskQueued(); + } + }); + + mMaster->mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); + + mDecodeStartTime = TimeStamp::Now(); + + MaybeStopPrerolling(); + + // Ensure that we've got tasks enqueued to decode data if we need to. + DispatchDecodeTasksIfNeeded(); + + mMaster->ScheduleStateMachine(); + + // Will enter dormant when playback is paused for a while. + if (mMaster->mPlayState == MediaDecoder::PLAY_STATE_PAUSED) { + StartDormantTimer(); + } +} + +void MediaDecoderStateMachine::DecodingState::Step() { + if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING && + mMaster->IsPlaying()) { + // We're playing, but the element/decoder is in paused state. Stop + // playing! + mMaster->StopPlayback(); + } + + // Start playback if necessary so that the clock can be properly queried. + if (!mIsPrerolling) { + mMaster->MaybeStartPlayback(); + } + + mMaster->UpdatePlaybackPositionPeriodically(); + MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(), + "Must have timer scheduled"); + if (IsBufferingAllowed()) { + MaybeStartBuffering(); + } +} + +void MediaDecoderStateMachine::DecodingState::HandleEndOfAudio() { + AudioQueue().Finish(); + if (!mMaster->IsVideoDecoding()) { + SetState(); + } else { + MaybeStopPrerolling(); + } +} + +void MediaDecoderStateMachine::DecodingState::HandleEndOfVideo() { + VideoQueue().Finish(); + if (!mMaster->IsAudioDecoding()) { + SetState(); + } else { + MaybeStopPrerolling(); + } +} + +void MediaDecoderStateMachine::DecodingState::DispatchDecodeTasksIfNeeded() { + if (mMaster->IsAudioDecoding() && !mMaster->mMinimizePreroll && + !mMaster->HaveEnoughDecodedAudio()) { + EnsureAudioDecodeTaskQueued(); + } + + if (mMaster->IsVideoDecoding() && !mMaster->mMinimizePreroll && + !mMaster->HaveEnoughDecodedVideo()) { + EnsureVideoDecodeTaskQueued(); + } +} + +void MediaDecoderStateMachine::DecodingState::EnsureAudioDecodeTaskQueued() { + if (!mMaster->IsAudioDecoding() || mMaster->IsRequestingAudioData() || + mMaster->IsWaitingAudioData()) { + return; + } + mMaster->RequestAudioData(); +} + +void MediaDecoderStateMachine::DecodingState::EnsureVideoDecodeTaskQueued() { + if (!mMaster->IsVideoDecoding() || mMaster->IsRequestingVideoData() || + mMaster->IsWaitingVideoData()) { + return; + } + mMaster->RequestVideoData(mMaster->GetMediaTime(), + ShouldRequestNextKeyFrame()); +} + +void MediaDecoderStateMachine::DecodingState::MaybeStartBuffering() { + // Buffering makes senses only after decoding first frames. + MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent); + + // Don't enter buffering when MediaDecoder is not playing. + if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING) { + return; + } + + // Don't enter buffering while prerolling so that the decoder has a chance to + // enqueue some decoded data before we give up and start buffering. + if (!mMaster->IsPlaying()) { + return; + } + + // Note we could have a wait promise pending when playing non-MSE EME. + if (mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData()) { + PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK, {}, + "OutOfDecodedAudio"); + SLOG("Enter buffering due to out of decoded audio"); + SetState(); + return; + } + if (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData()) { + PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK, {}, + "OutOfDecodedVideo"); + SLOG("Enter buffering due to out of decoded video"); + SetState(); + return; + } + + if (Reader()->UseBufferingHeuristics() && mMaster->HasLowDecodedData() && + mMaster->HasLowBufferedData() && !mMaster->mCanPlayThrough) { + PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK, {}, + "BufferingHeuristics"); + SLOG("Enter buffering due to buffering heruistics"); + SetState(); + } +} + +void MediaDecoderStateMachine::LoopingDecodingState::HandleError( + const MediaResult& aError, bool aIsAudio) { + SLOG("%s looping failed, aError=%s", aIsAudio ? "audio" : "video", + aError.ErrorName().get()); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + if (aIsAudio) { + HandleWaitingForAudio(); + } else { + HandleWaitingForVideo(); + } + [[fallthrough]]; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + // This could happen after either the resource has been close, or the data + // hasn't been appended in MSE, so that we won't be able to get any + // sample and need to fallback to normal looping. + if (mIsReachingAudioEOS && mIsReachingVideoEOS) { + SetState(); + } + break; + default: + mMaster->DecodeError(aError); + break; + } +} + +void MediaDecoderStateMachine::SeekingState::SeekCompleted() { + const auto newCurrentTime = CalculateNewCurrentTime(); + + if ((newCurrentTime == mMaster->Duration() || + newCurrentTime.EqualsAtLowestResolution( + mMaster->Duration().ToBase(USECS_PER_S))) && + !mMaster->mIsLiveStream) { + SLOG("Seek completed, seeked to end: %s", newCurrentTime.ToString().get()); + // will transition to COMPLETED immediately. Note we don't do + // this when playing a live stream, since the end of media will advance + // once we download more data! + AudioQueue().Finish(); + VideoQueue().Finish(); + + // We won't start MediaSink when paused. m{Audio,Video}Completed will + // remain false and 'playbackEnded' won't be notified. Therefore we + // need to set these flags explicitly when seeking to the end. + mMaster->mAudioCompleted = true; + mMaster->mVideoCompleted = true; + + // There might still be a pending audio request when doing video-only or + // next-frame seek. Discard it so we won't break the invariants of the + // COMPLETED state by adding audio samples to a finished queue. + mMaster->mAudioDataRequest.DisconnectIfExists(); + } + + // We want to resolve the seek request prior finishing the first frame + // to ensure that the seeked event is fired prior loadeded. + // Note: SeekJob.Resolve() resets SeekJob.mTarget. Don't use mSeekJob anymore + // hereafter. + mSeekJob.Resolve(__func__); + + // Notify FirstFrameLoaded now if we haven't since we've decoded some data + // for readyState to transition to HAVE_CURRENT_DATA and fire 'loadeddata'. + if (!mMaster->mSentFirstFrameLoadedEvent) { + mMaster->FinishDecodeFirstFrame(); + } + + // Ensure timestamps are up to date. + // Suppressed visibility comes from two cases: (1) leaving dormant state, + // and (2) resuming suspended video decoder. We want both cases to be + // transparent to the user. So we only notify the change when the seek + // request is from the user. + if (mVisibility == EventVisibility::Observable) { + // Don't update playback position for video-only seek. + // Otherwise we might have |newCurrentTime > mMediaSink->GetPosition()| + // and fail the assertion in GetClock() since we didn't stop MediaSink. + mMaster->UpdatePlaybackPositionInternal(newCurrentTime); + } + + // Try to decode another frame to detect if we're at the end... + SLOG("Seek completed, mCurrentPosition=%" PRId64, + mMaster->mCurrentPosition.Ref().ToMicroseconds()); + + if (mMaster->VideoQueue().PeekFront()) { + mMaster->mMediaSink->Redraw(Info().mVideo); + mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::Invalidate); + } + + GoToNextState(); +} + +void MediaDecoderStateMachine::BufferingState::Step() { + TimeStamp now = TimeStamp::Now(); + MOZ_ASSERT(!mBufferingStart.IsNull(), "Must know buffering start time."); + + if (Reader()->UseBufferingHeuristics()) { + if (mMaster->IsWaitingAudioData() || mMaster->IsWaitingVideoData()) { + // Can't exit buffering when we are still waiting for data. + // Note we don't schedule next loop for we will do that when the wait + // promise is resolved. + return; + } + // With buffering heuristics, we exit buffering state when we: + // 1. can play through or + // 2. time out (specified by mBufferingWait) or + // 3. have enough buffered data. + TimeDuration elapsed = now - mBufferingStart; + TimeDuration timeout = + TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate); + bool stopBuffering = + mMaster->mCanPlayThrough || elapsed >= timeout || + !mMaster->HasLowBufferedData(TimeUnit::FromSeconds(mBufferingWait)); + if (!stopBuffering) { + SLOG("Buffering: wait %ds, timeout in %.3lfs", mBufferingWait, + mBufferingWait - elapsed.ToSeconds()); + mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S)); + return; + } + } else if (mMaster->OutOfDecodedAudio() || mMaster->OutOfDecodedVideo()) { + MOZ_ASSERT(!mMaster->OutOfDecodedAudio() || + mMaster->IsRequestingAudioData() || + mMaster->IsWaitingAudioData()); + MOZ_ASSERT(!mMaster->OutOfDecodedVideo() || + mMaster->IsRequestingVideoData() || + mMaster->IsWaitingVideoData()); + SLOG( + "In buffering mode, waiting to be notified: outOfAudio: %d, " + "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s", + mMaster->OutOfDecodedAudio(), mMaster->AudioRequestStatus(), + mMaster->OutOfDecodedVideo(), mMaster->VideoRequestStatus()); + return; + } + + SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds()); + SetDecodingState(); +} + +void MediaDecoderStateMachine::BufferingState::HandleEndOfAudio() { + AudioQueue().Finish(); + if (!mMaster->IsVideoDecoding()) { + SetState(); + } else { + // Check if we can exit buffering. + mMaster->ScheduleStateMachine(); + } +} + +void MediaDecoderStateMachine::BufferingState::HandleEndOfVideo() { + VideoQueue().Finish(); + if (!mMaster->IsAudioDecoding()) { + SetState(); + } else { + // Check if we can exit buffering. + mMaster->ScheduleStateMachine(); + } +} + +RefPtr MediaDecoderStateMachine::ShutdownState::Enter() { + auto* master = mMaster; + + master->mDelayedScheduler.Reset(); + + // Shutdown happens while decode timer is active, we need to disconnect and + // dispose of the timer. + master->CancelSuspendTimer(); + + if (master->IsPlaying()) { + master->StopPlayback(); + } + + master->mAudioDataRequest.DisconnectIfExists(); + master->mVideoDataRequest.DisconnectIfExists(); + master->mAudioWaitRequest.DisconnectIfExists(); + master->mVideoWaitRequest.DisconnectIfExists(); + + // Resetting decode should be called after stopping media sink, which can + // ensure that we have an empty media queue before seeking the demuxer. + master->StopMediaSink(); + master->ResetDecode(); + master->mMediaSink->Shutdown(); + + // Prevent dangling pointers by disconnecting the listeners. + master->mAudioQueueListener.Disconnect(); + master->mVideoQueueListener.Disconnect(); + master->mMetadataManager.Disconnect(); + master->mOnMediaNotSeekable.Disconnect(); + master->mAudibleListener.DisconnectIfExists(); + + // Disconnect canonicals and mirrors before shutting down our task queue. + master->mStreamName.DisconnectIfConnected(); + master->mSinkDevice.DisconnectIfConnected(); + master->mOutputCaptureState.DisconnectIfConnected(); + master->mOutputDummyTrack.DisconnectIfConnected(); + master->mOutputTracks.DisconnectIfConnected(); + master->mOutputPrincipal.DisconnectIfConnected(); + + master->mDuration.DisconnectAll(); + master->mCurrentPosition.DisconnectAll(); + master->mIsAudioDataAudible.DisconnectAll(); + + // Shut down the watch manager to stop further notifications. + master->mWatchManager.Shutdown(); + + return Reader()->Shutdown()->Then(OwnerThread(), __func__, master, + &MediaDecoderStateMachine::FinishShutdown, + &MediaDecoderStateMachine::FinishShutdown); +} + +#define INIT_WATCHABLE(name, val) name(val, "MediaDecoderStateMachine::" #name) +#define INIT_MIRROR(name, val) \ + name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Mirror)") +#define INIT_CANONICAL(name, val) \ + name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Canonical)") + +MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder, + MediaFormatReader* aReader) + : MediaDecoderStateMachineBase(aDecoder, aReader), + mWatchManager(this, mTaskQueue), + mDispatchedStateMachine(false), + mDelayedScheduler(mTaskQueue, true /*aFuzzy*/), + mCurrentFrameID(0), + mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD), + mVideoDecodeSuspended(false), + mVideoDecodeSuspendTimer(mTaskQueue), + mVideoDecodeMode(VideoDecodeMode::Normal), + mIsMSE(aDecoder->IsMSE()), + mShouldResistFingerprinting(aDecoder->ShouldResistFingerprinting()), + mSeamlessLoopingAllowed(false), + INIT_MIRROR(mStreamName, nsAutoString()), + INIT_MIRROR(mSinkDevice, nullptr), + INIT_MIRROR(mOutputCaptureState, MediaDecoder::OutputCaptureState::None), + INIT_MIRROR(mOutputDummyTrack, nullptr), + INIT_MIRROR(mOutputTracks, nsTArray>()), + INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE), + INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE) { + MOZ_COUNT_CTOR(MediaDecoderStateMachine); + NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); + + InitVideoQueuePrefs(); + + DDLINKCHILD("reader", aReader); +} + +#undef INIT_WATCHABLE +#undef INIT_MIRROR +#undef INIT_CANONICAL + +MediaDecoderStateMachine::~MediaDecoderStateMachine() { + MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); + MOZ_COUNT_DTOR(MediaDecoderStateMachine); +} + +void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::InitializationTask", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + MediaDecoderStateMachineBase::InitializationTask(aDecoder); + + // Initialize watchers. + mWatchManager.Watch(mStreamName, + &MediaDecoderStateMachine::StreamNameChanged); + mWatchManager.Watch(mOutputCaptureState, + &MediaDecoderStateMachine::UpdateOutputCaptured); + mWatchManager.Watch(mOutputDummyTrack, + &MediaDecoderStateMachine::UpdateOutputCaptured); + mWatchManager.Watch(mOutputTracks, + &MediaDecoderStateMachine::UpdateOutputCaptured); + mWatchManager.Watch(mOutputPrincipal, + &MediaDecoderStateMachine::OutputPrincipalChanged); + + mMediaSink = CreateMediaSink(); + + MOZ_ASSERT(!mStateObj); + auto* s = new DecodeMetadataState(this); + mStateObj.reset(s); + s->Enter(); +} + +void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) { + mIsAudioDataAudible = aAudible; +} + +MediaSink* MediaDecoderStateMachine::CreateAudioSink() { + if (mOutputCaptureState != MediaDecoder::OutputCaptureState::None) { + DecodedStream* stream = new DecodedStream( + this, + mOutputCaptureState == MediaDecoder::OutputCaptureState::Capture + ? mOutputDummyTrack.Ref() + : nullptr, + mOutputTracks, mVolume, mPlaybackRate, mPreservesPitch, mAudioQueue, + mVideoQueue, mSinkDevice.Ref()); + mAudibleListener.DisconnectIfExists(); + mAudibleListener = stream->AudibleEvent().Connect( + OwnerThread(), this, &MediaDecoderStateMachine::AudioAudibleChanged); + return stream; + } + + auto audioSinkCreator = [s = RefPtr(this), this]() { + MOZ_ASSERT(OnTaskQueue()); + UniquePtr audioSink{new AudioSink( + mTaskQueue, mAudioQueue, Info().mAudio, mShouldResistFingerprinting)}; + mAudibleListener.DisconnectIfExists(); + mAudibleListener = audioSink->AudibleEvent().Connect( + mTaskQueue, this, &MediaDecoderStateMachine::AudioAudibleChanged); + return audioSink; + }; + return new AudioSinkWrapper( + mTaskQueue, mAudioQueue, std::move(audioSinkCreator), mVolume, + mPlaybackRate, mPreservesPitch, mSinkDevice.Ref()); +} + +already_AddRefed MediaDecoderStateMachine::CreateMediaSink() { + MOZ_ASSERT(OnTaskQueue()); + RefPtr audioSink = CreateAudioSink(); + RefPtr mediaSink = + new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer, + *mFrameStats, sVideoQueueSendToCompositorSize); + if (mSecondaryVideoContainer.Ref()) { + mediaSink->SetSecondaryVideoContainer(mSecondaryVideoContainer.Ref()); + } + return mediaSink.forget(); +} + +TimeUnit MediaDecoderStateMachine::GetDecodedAudioDuration() const { + MOZ_ASSERT(OnTaskQueue()); + if (mMediaSink->IsStarted()) { + return mMediaSink->UnplayedDuration(TrackInfo::kAudioTrack) + + TimeUnit::FromMicroseconds(AudioQueue().Duration()); + } + // MediaSink not started. All audio samples are in the queue. + return TimeUnit::FromMicroseconds(AudioQueue().Duration()); +} + +bool MediaDecoderStateMachine::HaveEnoughDecodedAudio() const { + MOZ_ASSERT(OnTaskQueue()); + auto ampleAudio = mAmpleAudioThreshold.MultDouble(mPlaybackRate); + return AudioQueue().GetSize() > 0 && GetDecodedAudioDuration() >= ampleAudio; +} + +bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() const { + MOZ_ASSERT(OnTaskQueue()); + return static_cast(VideoQueue().GetSize()) >= + GetAmpleVideoFrames() * mPlaybackRate + 1 && + IsVideoDataEnoughComparedWithAudio(); +} + +bool MediaDecoderStateMachine::IsVideoDataEnoughComparedWithAudio() const { + // HW decoding is usually fast enough and we don't need to worry about its + // speed. + // TODO : we can consider whether we need to enable this on other HW decoding + // except VAAPI. When enabling VAAPI on Linux, ffmpeg is not able to store too + // many frames because it has a limitation of amount of stored video frames. + // See bug1716638 and 1718309. + if (mReader->VideoIsHardwareAccelerated()) { + return true; + } + // In extreme situations (e.g. 4k+ video without hardware acceleration), the + // video decoding will be much slower than audio. So for 4K+ video, we want to + // consider audio decoding speed as well in order to reduce frame drops. This + // check tries to keep the decoded video buffered as much as audio. + if (HasAudio() && Info().mVideo.mImage.width >= 3840 && + Info().mVideo.mImage.height >= 2160) { + return VideoQueue().Duration() >= AudioQueue().Duration(); + } + // For non-4k video, the video decoding is usually really fast so we won't + // need to consider audio decoding speed to store extra frames. + return true; +} + +void MediaDecoderStateMachine::PushAudio(AudioData* aSample) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(aSample); + AudioQueue().Push(aSample); + PROFILER_MARKER("MDSM::PushAudio", MEDIA_PLAYBACK, {}, MediaSampleMarker, + aSample->mTime.ToMicroseconds(), + aSample->GetEndTime().ToMicroseconds(), + AudioQueue().GetSize()); +} + +void MediaDecoderStateMachine::PushVideo(VideoData* aSample) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(aSample); + aSample->mFrameID = ++mCurrentFrameID; + VideoQueue().Push(aSample); + PROFILER_MARKER("MDSM::PushVideo", MEDIA_PLAYBACK, {}, MediaSampleMarker, + aSample->mTime.ToMicroseconds(), + aSample->GetEndTime().ToMicroseconds(), + VideoQueue().GetSize()); +} + +void MediaDecoderStateMachine::OnAudioPopped(const RefPtr& aSample) { + MOZ_ASSERT(OnTaskQueue()); + mPlaybackOffset = std::max(mPlaybackOffset, aSample->mOffset); +} + +void MediaDecoderStateMachine::OnVideoPopped(const RefPtr& aSample) { + MOZ_ASSERT(OnTaskQueue()); + mPlaybackOffset = std::max(mPlaybackOffset, aSample->mOffset); +} + +bool MediaDecoderStateMachine::IsAudioDecoding() { + MOZ_ASSERT(OnTaskQueue()); + return HasAudio() && !AudioQueue().IsFinished(); +} + +bool MediaDecoderStateMachine::IsVideoDecoding() { + MOZ_ASSERT(OnTaskQueue()); + return HasVideo() && !VideoQueue().IsFinished(); +} + +bool MediaDecoderStateMachine::IsPlaying() const { + MOZ_ASSERT(OnTaskQueue()); + return mMediaSink->IsPlaying(); +} + +void MediaDecoderStateMachine::SetMediaNotSeekable() { mMediaSeekable = false; } + +nsresult MediaDecoderStateMachine::Init(MediaDecoder* aDecoder) { + MOZ_ASSERT(NS_IsMainThread()); + + nsresult rv = MediaDecoderStateMachineBase::Init(aDecoder); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + + // Connect mirrors. + aDecoder->CanonicalStreamName().ConnectMirror(&mStreamName); + aDecoder->CanonicalSinkDevice().ConnectMirror(&mSinkDevice); + aDecoder->CanonicalOutputCaptureState().ConnectMirror(&mOutputCaptureState); + aDecoder->CanonicalOutputDummyTrack().ConnectMirror(&mOutputDummyTrack); + aDecoder->CanonicalOutputTracks().ConnectMirror(&mOutputTracks); + aDecoder->CanonicalOutputPrincipal().ConnectMirror(&mOutputPrincipal); + + mAudioQueueListener = AudioQueue().PopFrontEvent().Connect( + mTaskQueue, this, &MediaDecoderStateMachine::OnAudioPopped); + mVideoQueueListener = VideoQueue().PopFrontEvent().Connect( + mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped); + mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect( + OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable); + + return NS_OK; +} + +void MediaDecoderStateMachine::StopPlayback() { + MOZ_ASSERT(OnTaskQueue()); + LOG("StopPlayback()"); + + if (IsPlaying()) { + mOnPlaybackEvent.Notify(MediaPlaybackEvent{ + MediaPlaybackEvent::PlaybackStopped, mPlaybackOffset}); + mMediaSink->SetPlaying(false); + MOZ_ASSERT(!IsPlaying()); + } +} + +void MediaDecoderStateMachine::MaybeStartPlayback() { + MOZ_ASSERT(OnTaskQueue()); + // Should try to start playback only after decoding first frames. + if (!mSentFirstFrameLoadedEvent) { + LOG("MaybeStartPlayback: Not starting playback before loading first frame"); + return; + } + + if (IsPlaying()) { + // Logging this case is really spammy - don't do it. + return; + } + + if (mIsMediaSinkSuspended) { + LOG("MaybeStartPlayback: Not starting playback when sink is suspended"); + return; + } + + if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) { + LOG("MaybeStartPlayback: Not starting playback [mPlayState=%d]", + mPlayState.Ref()); + return; + } + + LOG("MaybeStartPlayback() starting playback"); + StartMediaSink(); + + if (!IsPlaying()) { + mMediaSink->SetPlaying(true); + MOZ_ASSERT(IsPlaying()); + } + + mOnPlaybackEvent.Notify( + MediaPlaybackEvent{MediaPlaybackEvent::PlaybackStarted, mPlaybackOffset}); +} + +void MediaDecoderStateMachine::UpdatePlaybackPositionInternal( + const TimeUnit& aTime) { + MOZ_ASSERT(OnTaskQueue()); + LOGV("UpdatePlaybackPositionInternal(%" PRId64 ")", aTime.ToMicroseconds()); + + // Ensure the position has a precision that matches other TimeUnit such as + // buffering ranges and duration. + mCurrentPosition = aTime.ToBase(1000000); + NS_ASSERTION(mCurrentPosition.Ref() >= TimeUnit::Zero(), + "CurrentTime should be positive!"); + if (mDuration.Ref().ref() < mCurrentPosition.Ref()) { + mDuration = Some(mCurrentPosition.Ref()); + DDLOG(DDLogCategory::Property, "duration_us", + mDuration.Ref()->ToMicroseconds()); + } +} + +void MediaDecoderStateMachine::UpdatePlaybackPosition(const TimeUnit& aTime) { + MOZ_ASSERT(OnTaskQueue()); + UpdatePlaybackPositionInternal(aTime); + + bool fragmentEnded = + mFragmentEndTime.IsValid() && GetMediaTime() >= mFragmentEndTime; + mMetadataManager.DispatchMetadataIfNeeded(aTime); + + if (fragmentEnded) { + StopPlayback(); + } +} + +/* static */ const char* MediaDecoderStateMachine::ToStateStr(State aState) { + switch (aState) { + case DECODER_STATE_DECODING_METADATA: + return "DECODING_METADATA"; + case DECODER_STATE_DORMANT: + return "DORMANT"; + case DECODER_STATE_DECODING_FIRSTFRAME: + return "DECODING_FIRSTFRAME"; + case DECODER_STATE_DECODING: + return "DECODING"; + case DECODER_STATE_SEEKING_ACCURATE: + return "SEEKING_ACCURATE"; + case DECODER_STATE_SEEKING_FROMDORMANT: + return "SEEKING_FROMDORMANT"; + case DECODER_STATE_SEEKING_NEXTFRAMESEEKING: + return "DECODER_STATE_SEEKING_NEXTFRAMESEEKING"; + case DECODER_STATE_SEEKING_VIDEOONLY: + return "SEEKING_VIDEOONLY"; + case DECODER_STATE_BUFFERING: + return "BUFFERING"; + case DECODER_STATE_COMPLETED: + return "COMPLETED"; + case DECODER_STATE_SHUTDOWN: + return "SHUTDOWN"; + case DECODER_STATE_LOOPING_DECODING: + return "LOOPING_DECODING"; + default: + MOZ_ASSERT_UNREACHABLE("Invalid state."); + } + return "UNKNOWN"; +} + +const char* MediaDecoderStateMachine::ToStateStr() { + MOZ_ASSERT(OnTaskQueue()); + return ToStateStr(mStateObj->GetState()); +} + +void MediaDecoderStateMachine::VolumeChanged() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::VolumeChanged", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + mMediaSink->SetVolume(mVolume); +} + +RefPtr MediaDecoderStateMachine::Shutdown() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Shutdown", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + return mStateObj->HandleShutdown(); +} + +void MediaDecoderStateMachine::PlayStateChanged() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::PlayStateChanged", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) { + CancelSuspendTimer(); + } else if (mMinimizePreroll) { + // Once we start playing, we don't want to minimize our prerolling, as we + // assume the user is likely to want to keep playing in future. This needs + // to happen before we invoke StartDecoding(). + mMinimizePreroll = false; + } + + mStateObj->HandlePlayStateChanged(mPlayState); +} + +void MediaDecoderStateMachine::SetVideoDecodeMode(VideoDecodeMode aMode) { + MOZ_ASSERT(NS_IsMainThread()); + nsCOMPtr r = NewRunnableMethod( + "MediaDecoderStateMachine::SetVideoDecodeModeInternal", this, + &MediaDecoderStateMachine::SetVideoDecodeModeInternal, aMode); + OwnerThread()->DispatchStateChange(r.forget()); +} + +void MediaDecoderStateMachine::SetVideoDecodeModeInternal( + VideoDecodeMode aMode) { + MOZ_ASSERT(OnTaskQueue()); + + LOG("SetVideoDecodeModeInternal(), VideoDecodeMode=(%s->%s), " + "mVideoDecodeSuspended=%c", + mVideoDecodeMode == VideoDecodeMode::Normal ? "Normal" : "Suspend", + aMode == VideoDecodeMode::Normal ? "Normal" : "Suspend", + mVideoDecodeSuspended ? 'T' : 'F'); + + // Should not suspend decoding if we don't turn on the pref. + if (!StaticPrefs::media_suspend_background_video_enabled() && + aMode == VideoDecodeMode::Suspend) { + LOG("SetVideoDecodeModeInternal(), early return because preference off and " + "set to Suspend"); + return; + } + + if (aMode == mVideoDecodeMode) { + LOG("SetVideoDecodeModeInternal(), early return because the mode does not " + "change"); + return; + } + + // Set new video decode mode. + mVideoDecodeMode = aMode; + + // Start timer to trigger suspended video decoding. + if (mVideoDecodeMode == VideoDecodeMode::Suspend) { + TimeStamp target = TimeStamp::Now() + SuspendBackgroundVideoDelay(); + + RefPtr self = this; + mVideoDecodeSuspendTimer.Ensure( + target, [=]() { self->OnSuspendTimerResolved(); }, + []() { MOZ_DIAGNOSTIC_ASSERT(false); }); + mOnPlaybackEvent.Notify(MediaPlaybackEvent::StartVideoSuspendTimer); + return; + } + + // Resuming from suspended decoding + + // If suspend timer exists, destroy it. + CancelSuspendTimer(); + + if (mVideoDecodeSuspended) { + auto target = mMediaSink->IsStarted() ? GetClock() : GetMediaTime(); + AdjustByLooping(target); + mStateObj->HandleResumeVideoDecoding(target + detail::RESUME_VIDEO_PREMIUM); + } +} + +void MediaDecoderStateMachine::BufferedRangeUpdated() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::BufferedRangeUpdated", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + // While playing an unseekable stream of unknown duration, mDuration + // is updated as we play. But if data is being downloaded + // faster than played, mDuration won't reflect the end of playable data + // since we haven't played the frame at the end of buffered data. So update + // mDuration here as new data is downloaded to prevent such a lag. + if (mBuffered.Ref().IsInvalid()) { + return; + } + + bool exists; + media::TimeUnit end{mBuffered.Ref().GetEnd(&exists)}; + if (!exists) { + return; + } + + // Use estimated duration from buffer ranges when mDuration is unknown or + // the estimated duration is larger. + if (mDuration.Ref().isNothing() || mDuration.Ref()->IsInfinite() || + end > mDuration.Ref().ref()) { + mDuration = Some(end); + DDLOG(DDLogCategory::Property, "duration_us", + mDuration.Ref()->ToMicroseconds()); + } +} + +RefPtr MediaDecoderStateMachine::Seek( + const SeekTarget& aTarget) { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Seek", MEDIA_PLAYBACK); + PROFILER_MARKER_UNTYPED("MDSM::Seek", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + // We need to be able to seek in some way + if (!mMediaSeekable && !mMediaSeekableOnlyInBufferedRanges) { + LOGW("Seek() should not be called on a non-seekable media"); + return MediaDecoder::SeekPromise::CreateAndReject(/* aRejectValue = */ true, + __func__); + } + + if (aTarget.IsNextFrame() && !HasVideo()) { + LOGW("Ignore a NextFrameSeekTask on a media file without video track."); + return MediaDecoder::SeekPromise::CreateAndReject(/* aRejectValue = */ true, + __func__); + } + + MOZ_ASSERT(mDuration.Ref().isSome(), "We should have got duration already"); + + return mStateObj->HandleSeek(aTarget); +} + +void MediaDecoderStateMachine::StopMediaSink() { + MOZ_ASSERT(OnTaskQueue()); + if (mMediaSink->IsStarted()) { + LOG("Stop MediaSink"); + mMediaSink->Stop(); + mMediaSinkAudioEndedPromise.DisconnectIfExists(); + mMediaSinkVideoEndedPromise.DisconnectIfExists(); + } +} + +void MediaDecoderStateMachine::RequestAudioData() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RequestAudioData", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(IsAudioDecoding()); + MOZ_ASSERT(!IsRequestingAudioData()); + MOZ_ASSERT(!IsWaitingAudioData()); + LOGV("Queueing audio task - queued=%zu, decoder-queued=%zu", + AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames()); + + PerformanceRecorder perfRecorder(MediaStage::RequestData); + RefPtr self = this; + mReader->RequestAudioData() + ->Then( + OwnerThread(), __func__, + [this, self, perfRecorder(std::move(perfRecorder))]( + const RefPtr& aAudio) mutable { + perfRecorder.Record(); + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::RequestAudioData:Resolved", + MEDIA_PLAYBACK); + MOZ_ASSERT(aAudio); + mAudioDataRequest.Complete(); + // audio->GetEndTime() is not always mono-increasing in chained + // ogg. + mDecodedAudioEndTime = + std::max(aAudio->GetEndTime(), mDecodedAudioEndTime); + LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", + aAudio->mTime.ToMicroseconds(), + aAudio->GetEndTime().ToMicroseconds()); + mStateObj->HandleAudioDecoded(aAudio); + }, + [this, self](const MediaResult& aError) { + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::RequestAudioData:Rejected", + MEDIA_PLAYBACK); + LOGV("OnAudioNotDecoded ErrorName=%s Message=%s", + aError.ErrorName().get(), aError.Message().get()); + mAudioDataRequest.Complete(); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + mStateObj->HandleWaitingForAudio(); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + mStateObj->HandleAudioCanceled(); + break; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + mStateObj->HandleEndOfAudio(); + break; + default: + DecodeError(aError); + } + }) + ->Track(mAudioDataRequest); +} + +void MediaDecoderStateMachine::RequestVideoData( + const media::TimeUnit& aCurrentTime, bool aRequestNextKeyFrame) { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RequestVideoData", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(IsVideoDecoding()); + MOZ_ASSERT(!IsRequestingVideoData()); + MOZ_ASSERT(!IsWaitingVideoData()); + LOGV( + "Queueing video task - queued=%zu, decoder-queued=%zo" + ", stime=%" PRId64 ", by-pass-skip=%d", + VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(), + aCurrentTime.ToMicroseconds(), mBypassingSkipToNextKeyFrameCheck); + + PerformanceRecorder perfRecorder(MediaStage::RequestData, + Info().mVideo.mImage.height); + RefPtr self = this; + mReader + ->RequestVideoData( + mBypassingSkipToNextKeyFrameCheck ? media::TimeUnit() : aCurrentTime, + mBypassingSkipToNextKeyFrameCheck ? false : aRequestNextKeyFrame) + ->Then( + OwnerThread(), __func__, + [this, self, perfRecorder(std::move(perfRecorder))]( + const RefPtr& aVideo) mutable { + perfRecorder.Record(); + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::RequestVideoData:Resolved", + MEDIA_PLAYBACK); + MOZ_ASSERT(aVideo); + mVideoDataRequest.Complete(); + // Handle abnormal or negative timestamps. + mDecodedVideoEndTime = + std::max(mDecodedVideoEndTime, aVideo->GetEndTime()); + LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", + aVideo->mTime.ToMicroseconds(), + aVideo->GetEndTime().ToMicroseconds()); + mStateObj->HandleVideoDecoded(aVideo); + }, + [this, self](const MediaResult& aError) { + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::RequestVideoData:Rejected", + MEDIA_PLAYBACK); + LOGV("OnVideoNotDecoded ErrorName=%s Message=%s", + aError.ErrorName().get(), aError.Message().get()); + mVideoDataRequest.Complete(); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + mStateObj->HandleWaitingForVideo(); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + mStateObj->HandleVideoCanceled(); + break; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + mStateObj->HandleEndOfVideo(); + break; + default: + DecodeError(aError); + } + }) + ->Track(mVideoDataRequest); +} + +void MediaDecoderStateMachine::WaitForData(MediaData::Type aType) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + RefPtr self = this; + if (aType == MediaData::Type::AUDIO_DATA) { + mReader->WaitForData(MediaData::Type::AUDIO_DATA) + ->Then( + OwnerThread(), __func__, + [self](MediaData::Type aType) { + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::WaitForData:AudioResolved", + MEDIA_PLAYBACK); + self->mAudioWaitRequest.Complete(); + MOZ_ASSERT(aType == MediaData::Type::AUDIO_DATA); + self->mStateObj->HandleAudioWaited(aType); + }, + [self](const WaitForDataRejectValue& aRejection) { + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::WaitForData:AudioRejected", + MEDIA_PLAYBACK); + self->mAudioWaitRequest.Complete(); + self->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); + }) + ->Track(mAudioWaitRequest); + } else { + mReader->WaitForData(MediaData::Type::VIDEO_DATA) + ->Then( + OwnerThread(), __func__, + [self](MediaData::Type aType) { + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::WaitForData:VideoResolved", + MEDIA_PLAYBACK); + self->mVideoWaitRequest.Complete(); + MOZ_ASSERT(aType == MediaData::Type::VIDEO_DATA); + self->mStateObj->HandleVideoWaited(aType); + }, + [self](const WaitForDataRejectValue& aRejection) { + AUTO_PROFILER_LABEL( + "MediaDecoderStateMachine::WaitForData:VideoRejected", + MEDIA_PLAYBACK); + self->mVideoWaitRequest.Complete(); + self->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); + }) + ->Track(mVideoWaitRequest); + } +} + +nsresult MediaDecoderStateMachine::StartMediaSink() { + MOZ_ASSERT(OnTaskQueue()); + + if (mMediaSink->IsStarted()) { + return NS_OK; + } + + mAudioCompleted = false; + const auto startTime = GetMediaTime(); + LOG("StartMediaSink, mediaTime=%" PRId64, startTime.ToMicroseconds()); + nsresult rv = mMediaSink->Start(startTime, Info()); + StreamNameChanged(); + + auto videoPromise = mMediaSink->OnEnded(TrackInfo::kVideoTrack); + auto audioPromise = mMediaSink->OnEnded(TrackInfo::kAudioTrack); + + if (audioPromise) { + audioPromise + ->Then(OwnerThread(), __func__, this, + &MediaDecoderStateMachine::OnMediaSinkAudioComplete, + &MediaDecoderStateMachine::OnMediaSinkAudioError) + ->Track(mMediaSinkAudioEndedPromise); + } + if (videoPromise) { + videoPromise + ->Then(OwnerThread(), __func__, this, + &MediaDecoderStateMachine::OnMediaSinkVideoComplete, + &MediaDecoderStateMachine::OnMediaSinkVideoError) + ->Track(mMediaSinkVideoEndedPromise); + } + // Remember the initial offset when playback starts. This will be used + // to calculate the rate at which bytes are consumed as playback moves on. + RefPtr sample = mAudioQueue.PeekFront(); + mPlaybackOffset = sample ? sample->mOffset : 0; + sample = mVideoQueue.PeekFront(); + if (sample && sample->mOffset > mPlaybackOffset) { + mPlaybackOffset = sample->mOffset; + } + return rv; +} + +bool MediaDecoderStateMachine::HasLowDecodedAudio() { + MOZ_ASSERT(OnTaskQueue()); + return IsAudioDecoding() && + GetDecodedAudioDuration() < + EXHAUSTED_DATA_MARGIN.MultDouble(mPlaybackRate); +} + +bool MediaDecoderStateMachine::HasLowDecodedVideo() { + MOZ_ASSERT(OnTaskQueue()); + return IsVideoDecoding() && + VideoQueue().GetSize() < + static_cast(floorl(LOW_VIDEO_FRAMES * mPlaybackRate)); +} + +bool MediaDecoderStateMachine::HasLowDecodedData() { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(mReader->UseBufferingHeuristics()); + return HasLowDecodedAudio() || HasLowDecodedVideo(); +} + +bool MediaDecoderStateMachine::OutOfDecodedAudio() { + MOZ_ASSERT(OnTaskQueue()); + return IsAudioDecoding() && !AudioQueue().IsFinished() && + AudioQueue().GetSize() == 0 && + !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack); +} + +bool MediaDecoderStateMachine::HasLowBufferedData() { + MOZ_ASSERT(OnTaskQueue()); + return HasLowBufferedData(detail::LOW_BUFFER_THRESHOLD); +} + +bool MediaDecoderStateMachine::HasLowBufferedData(const TimeUnit& aThreshold) { + MOZ_ASSERT(OnTaskQueue()); + + // If we don't have a duration, mBuffered is probably not going to have + // a useful buffered range. Return false here so that we don't get stuck in + // buffering mode for live streams. + if (Duration().IsInfinite()) { + return false; + } + + if (mBuffered.Ref().IsInvalid()) { + return false; + } + + // We are never low in decoded data when we don't have audio/video or have + // decoded all audio/video samples. + TimeUnit endOfDecodedVideo = (HasVideo() && !VideoQueue().IsFinished()) + ? mDecodedVideoEndTime + : TimeUnit::FromNegativeInfinity(); + TimeUnit endOfDecodedAudio = (HasAudio() && !AudioQueue().IsFinished()) + ? mDecodedAudioEndTime + : TimeUnit::FromNegativeInfinity(); + + auto endOfDecodedData = std::max(endOfDecodedVideo, endOfDecodedAudio); + if (Duration() < endOfDecodedData) { + // Our duration is not up to date. No point buffering. + return false; + } + + if (endOfDecodedData.IsInfinite()) { + // Have decoded all samples. No point buffering. + return false; + } + + auto start = endOfDecodedData; + auto end = std::min(GetMediaTime() + aThreshold, Duration()); + if (start >= end) { + // Duration of decoded samples is greater than our threshold. + return false; + } + media::TimeInterval interval(start, end); + return !mBuffered.Ref().Contains(interval); +} + +void MediaDecoderStateMachine::EnqueueFirstFrameLoadedEvent() { + MOZ_ASSERT(OnTaskQueue()); + // Track value of mSentFirstFrameLoadedEvent from before updating it + bool firstFrameBeenLoaded = mSentFirstFrameLoadedEvent; + mSentFirstFrameLoadedEvent = true; + MediaDecoderEventVisibility visibility = + firstFrameBeenLoaded ? MediaDecoderEventVisibility::Suppressed + : MediaDecoderEventVisibility::Observable; + mFirstFrameLoadedEvent.Notify(UniquePtr(new MediaInfo(Info())), + visibility); +} + +void MediaDecoderStateMachine::FinishDecodeFirstFrame() { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(!mSentFirstFrameLoadedEvent); + LOG("FinishDecodeFirstFrame"); + + mMediaSink->Redraw(Info().mVideo); + + LOG("Media duration %" PRId64 ", mediaSeekable=%d", + Duration().ToMicroseconds(), mMediaSeekable); + + // Get potentially updated metadata + mReader->ReadUpdatedMetadata(mInfo.ptr()); + + EnqueueFirstFrameLoadedEvent(); +} + +RefPtr MediaDecoderStateMachine::FinishShutdown() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::FinishShutdown", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOG("Shutting down state machine task queue"); + return OwnerThread()->BeginShutdown(); +} + +void MediaDecoderStateMachine::RunStateMachine() { + MOZ_ASSERT(OnTaskQueue()); + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RunStateMachine", + MEDIA_PLAYBACK); + mDelayedScheduler.Reset(); // Must happen on state machine task queue. + mDispatchedStateMachine = false; + mStateObj->Step(); +} + +void MediaDecoderStateMachine::ResetDecode(const TrackSet& aTracks) { + MOZ_ASSERT(OnTaskQueue()); + LOG("MediaDecoderStateMachine::Reset"); + + // Assert that aTracks specifies to reset the video track because we + // don't currently support resetting just the audio track. + MOZ_ASSERT(aTracks.contains(TrackInfo::kVideoTrack)); + + if (aTracks.contains(TrackInfo::kVideoTrack)) { + mDecodedVideoEndTime = TimeUnit::Zero(); + mVideoCompleted = false; + VideoQueue().Reset(); + mVideoDataRequest.DisconnectIfExists(); + mVideoWaitRequest.DisconnectIfExists(); + } + + if (aTracks.contains(TrackInfo::kAudioTrack)) { + mDecodedAudioEndTime = TimeUnit::Zero(); + mAudioCompleted = false; + AudioQueue().Reset(); + mAudioDataRequest.DisconnectIfExists(); + mAudioWaitRequest.DisconnectIfExists(); + } + + mReader->ResetDecode(aTracks); +} + +media::TimeUnit MediaDecoderStateMachine::GetClock( + TimeStamp* aTimeStamp) const { + MOZ_ASSERT(OnTaskQueue()); + auto clockTime = mMediaSink->GetPosition(aTimeStamp); + // This fails on Windows some times, see 1765563 +#if defined(XP_WIN) + NS_ASSERTION(GetMediaTime() <= clockTime, "Clock should go forwards."); +#else + MOZ_ASSERT(GetMediaTime() <= clockTime, "Clock should go forwards."); +#endif + return clockTime; +} + +void MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically() { + MOZ_ASSERT(OnTaskQueue()); + + if (!IsPlaying()) { + return; + } + + // Cap the current time to the larger of the audio and video end time. + // This ensures that if we're running off the system clock, we don't + // advance the clock to after the media end time. + if (VideoEndTime() > TimeUnit::Zero() || AudioEndTime() > TimeUnit::Zero()) { + auto clockTime = GetClock(); + // Once looping was turned on, the time is probably larger than the duration + // of the media track, so the time over the end should be corrected. + AdjustByLooping(clockTime); + bool loopback = clockTime < GetMediaTime() && mLooping; + if (loopback && mBypassingSkipToNextKeyFrameCheck) { + LOG("media has looped back, no longer bypassing skip-to-next-key-frame"); + mBypassingSkipToNextKeyFrameCheck = false; + } + + // Skip frames up to the frame at the playback position, and figure out + // the time remaining until it's time to display the next frame and drop + // the current frame. + NS_ASSERTION(clockTime >= TimeUnit::Zero(), + "Should have positive clock time."); + + // These will be non -1 if we've displayed a video frame, or played an audio + // frame. + auto maxEndTime = std::max(VideoEndTime(), AudioEndTime()); + auto t = std::min(clockTime, maxEndTime); + // FIXME: Bug 1091422 - chained ogg files hit this assertion. + // MOZ_ASSERT(t >= GetMediaTime()); + if (loopback || t > GetMediaTime()) { + UpdatePlaybackPosition(t); + } + } + // Note we have to update playback position before releasing the monitor. + // Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside + // the monitor and get a staled value from GetCurrentTimeUs() which hits the + // assertion in GetClock(). + + int64_t delay = std::max( + 1, static_cast(AUDIO_DURATION_USECS / mPlaybackRate)); + ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay)); + + // Notify the listener as we progress in the playback offset. Note it would + // be too intensive to send notifications for each popped audio/video sample. + // It is good enough to send 'PlaybackProgressed' events every 40us (defined + // by AUDIO_DURATION_USECS), and we ensure 'PlaybackProgressed' events are + // always sent after 'PlaybackStarted' and before 'PlaybackStopped'. + mOnPlaybackEvent.Notify(MediaPlaybackEvent{ + MediaPlaybackEvent::PlaybackProgressed, mPlaybackOffset}); +} + +void MediaDecoderStateMachine::ScheduleStateMachine() { + MOZ_ASSERT(OnTaskQueue()); + if (mDispatchedStateMachine) { + return; + } + mDispatchedStateMachine = true; + + nsresult rv = OwnerThread()->Dispatch( + NewRunnableMethod("MediaDecoderStateMachine::RunStateMachine", this, + &MediaDecoderStateMachine::RunStateMachine)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +void MediaDecoderStateMachine::ScheduleStateMachineIn(const TimeUnit& aTime) { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::ScheduleStateMachineIn", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); // mDelayedScheduler.Ensure() may Disconnect() + // the promise, which must happen on the state + // machine task queue. + MOZ_ASSERT(aTime > TimeUnit::Zero()); + if (mDispatchedStateMachine) { + return; + } + + TimeStamp target = TimeStamp::Now() + aTime.ToTimeDuration(); + + // It is OK to capture 'this' without causing UAF because the callback + // always happens before shutdown. + RefPtr self = this; + mDelayedScheduler.Ensure( + target, + [self]() { + self->mDelayedScheduler.CompleteRequest(); + self->RunStateMachine(); + }, + []() { MOZ_DIAGNOSTIC_ASSERT(false); }); +} + +bool MediaDecoderStateMachine::IsStateMachineScheduled() const { + MOZ_ASSERT(OnTaskQueue()); + return mDispatchedStateMachine || mDelayedScheduler.IsScheduled(); +} + +void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(aPlaybackRate != 0, "Should be handled by MediaDecoder::Pause()"); + + mPlaybackRate = aPlaybackRate; + mMediaSink->SetPlaybackRate(mPlaybackRate); + + // Schedule next cycle to check if we can stop prerolling. + ScheduleStateMachine(); +} + +void MediaDecoderStateMachine::PreservesPitchChanged() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::PreservesPitchChanged", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + mMediaSink->SetPreservesPitch(mPreservesPitch); +} + +void MediaDecoderStateMachine::LoopingChanged() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::LoopingChanged", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOGV("LoopingChanged, looping=%d", mLooping.Ref()); + PROFILER_MARKER_TEXT("MDSM::LoopingChanged", MEDIA_PLAYBACK, {}, + mLooping ? "true"_ns : "false"_ns); + if (mSeamlessLoopingAllowed) { + mStateObj->HandleLoopingChanged(); + } +} + +void MediaDecoderStateMachine::StreamNameChanged() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::StreamNameChanged", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + mMediaSink->SetStreamName(mStreamName); +} + +void MediaDecoderStateMachine::UpdateOutputCaptured() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::UpdateOutputCaptured", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT_IF( + mOutputCaptureState == MediaDecoder::OutputCaptureState::Capture, + mOutputDummyTrack.Ref()); + + // Reset these flags so they are consistent with the status of the sink. + // TODO: Move these flags into MediaSink to improve cohesion so we don't need + // to reset these flags when switching MediaSinks. + mAudioCompleted = false; + mVideoCompleted = false; + + // Don't create a new media sink if we're still suspending media sink. + if (!mIsMediaSinkSuspended) { + const bool wasPlaying = IsPlaying(); + // Stop and shut down the existing sink. + StopMediaSink(); + mMediaSink->Shutdown(); + + // Create a new sink according to whether output is captured. + mMediaSink = CreateMediaSink(); + if (wasPlaying) { + DebugOnly rv = StartMediaSink(); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + } + } + + // Don't buffer as much when audio is captured because we don't need to worry + // about high latency audio devices. + mAmpleAudioThreshold = + mOutputCaptureState != MediaDecoder::OutputCaptureState::None + ? detail::AMPLE_AUDIO_THRESHOLD / 2 + : detail::AMPLE_AUDIO_THRESHOLD; + + mStateObj->HandleAudioCaptured(); +} + +void MediaDecoderStateMachine::OutputPrincipalChanged() { + MOZ_ASSERT(OnTaskQueue()); + mCanonicalOutputPrincipal = mOutputPrincipal; +} + +RefPtr MediaDecoderStateMachine::InvokeSetSink( + const RefPtr& aSink) { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aSink); + + return InvokeAsync(OwnerThread(), this, __func__, + &MediaDecoderStateMachine::SetSink, aSink); +} + +RefPtr MediaDecoderStateMachine::SetSink( + RefPtr aDevice) { + MOZ_ASSERT(OnTaskQueue()); + if (mIsMediaSinkSuspended) { + // Don't create a new media sink when suspended. + return GenericPromise::CreateAndResolve(true, __func__); + } + + return mMediaSink->SetAudioDevice(std::move(aDevice)); +} + +void MediaDecoderStateMachine::InvokeSuspendMediaSink() { + MOZ_ASSERT(NS_IsMainThread()); + + nsresult rv = OwnerThread()->Dispatch( + NewRunnableMethod("MediaDecoderStateMachine::SuspendMediaSink", this, + &MediaDecoderStateMachine::SuspendMediaSink)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +void MediaDecoderStateMachine::SuspendMediaSink() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::SuspendMediaSink", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + if (mIsMediaSinkSuspended) { + return; + } + LOG("SuspendMediaSink"); + mIsMediaSinkSuspended = true; + StopMediaSink(); + mMediaSink->Shutdown(); +} + +void MediaDecoderStateMachine::InvokeResumeMediaSink() { + MOZ_ASSERT(NS_IsMainThread()); + + nsresult rv = OwnerThread()->Dispatch( + NewRunnableMethod("MediaDecoderStateMachine::ResumeMediaSink", this, + &MediaDecoderStateMachine::ResumeMediaSink)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +void MediaDecoderStateMachine::ResumeMediaSink() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::ResumeMediaSink", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + if (!mIsMediaSinkSuspended) { + return; + } + LOG("ResumeMediaSink"); + mIsMediaSinkSuspended = false; + if (!mMediaSink->IsStarted()) { + mMediaSink = CreateMediaSink(); + MaybeStartPlayback(); + } +} + +void MediaDecoderStateMachine::UpdateSecondaryVideoContainer() { + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::UpdateSecondaryVideoContainer", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + MOZ_DIAGNOSTIC_ASSERT(mMediaSink); + mMediaSink->SetSecondaryVideoContainer(mSecondaryVideoContainer.Ref()); + mOnSecondaryVideoContainerInstalled.Notify(mSecondaryVideoContainer.Ref()); +} + +TimeUnit MediaDecoderStateMachine::AudioEndTime() const { + MOZ_ASSERT(OnTaskQueue()); + if (mMediaSink->IsStarted()) { + return mMediaSink->GetEndTime(TrackInfo::kAudioTrack); + } + return GetMediaTime(); +} + +TimeUnit MediaDecoderStateMachine::VideoEndTime() const { + MOZ_ASSERT(OnTaskQueue()); + if (mMediaSink->IsStarted()) { + return mMediaSink->GetEndTime(TrackInfo::kVideoTrack); + } + return GetMediaTime(); +} + +void MediaDecoderStateMachine::OnMediaSinkVideoComplete() { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(HasVideo()); + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkVideoComplete", + MEDIA_PLAYBACK); + LOG("[%s]", __func__); + + mMediaSinkVideoEndedPromise.Complete(); + mVideoCompleted = true; + ScheduleStateMachine(); +} + +void MediaDecoderStateMachine::OnMediaSinkVideoError() { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(HasVideo()); + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkVideoError", + MEDIA_PLAYBACK); + LOGE("[%s]", __func__); + + mMediaSinkVideoEndedPromise.Complete(); + mVideoCompleted = true; + if (HasAudio()) { + return; + } + DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); +} + +void MediaDecoderStateMachine::OnMediaSinkAudioComplete() { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(HasAudio()); + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkAudioComplete", + MEDIA_PLAYBACK); + LOG("[%s]", __func__); + + mMediaSinkAudioEndedPromise.Complete(); + mAudioCompleted = true; + // To notify PlaybackEnded as soon as possible. + ScheduleStateMachine(); + + // Report OK to Decoder Doctor (to know if issue may have been resolved). + mOnDecoderDoctorEvent.Notify( + DecoderDoctorEvent{DecoderDoctorEvent::eAudioSinkStartup, NS_OK}); +} + +void MediaDecoderStateMachine::OnMediaSinkAudioError(nsresult aResult) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(HasAudio()); + AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkAudioError", + MEDIA_PLAYBACK); + LOGE("[%s]", __func__); + + mMediaSinkAudioEndedPromise.Complete(); + mAudioCompleted = true; + + // Result should never be NS_OK in this *error* handler. Report to Dec-Doc. + MOZ_ASSERT(NS_FAILED(aResult)); + mOnDecoderDoctorEvent.Notify( + DecoderDoctorEvent{DecoderDoctorEvent::eAudioSinkStartup, aResult}); + + // Make the best effort to continue playback when there is video. + if (HasVideo()) { + return; + } + + // Otherwise notify media decoder/element about this error for it makes + // no sense to play an audio-only file without sound output. + DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); +} + +uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const { + MOZ_ASSERT(OnTaskQueue()); + return mReader->VideoIsHardwareAccelerated() + ? std::max(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE) + : std::max(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE); +} + +void MediaDecoderStateMachine::GetDebugInfo( + dom::MediaDecoderStateMachineDebugInfo& aInfo) { + MOZ_ASSERT(OnTaskQueue()); + aInfo.mDuration = + mDuration.Ref() ? mDuration.Ref().ref().ToMicroseconds() : -1; + aInfo.mMediaTime = GetMediaTime().ToMicroseconds(); + aInfo.mClock = mMediaSink->IsStarted() ? GetClock().ToMicroseconds() : -1; + aInfo.mPlayState = int32_t(mPlayState.Ref()); + aInfo.mSentFirstFrameLoadedEvent = mSentFirstFrameLoadedEvent; + aInfo.mIsPlaying = IsPlaying(); + CopyUTF8toUTF16(MakeStringSpan(AudioRequestStatus()), + aInfo.mAudioRequestStatus); + CopyUTF8toUTF16(MakeStringSpan(VideoRequestStatus()), + aInfo.mVideoRequestStatus); + aInfo.mDecodedAudioEndTime = mDecodedAudioEndTime.ToMicroseconds(); + aInfo.mDecodedVideoEndTime = mDecodedVideoEndTime.ToMicroseconds(); + aInfo.mAudioCompleted = mAudioCompleted; + aInfo.mVideoCompleted = mVideoCompleted; + mStateObj->GetDebugInfo(aInfo.mStateObj); + mMediaSink->GetDebugInfo(aInfo.mMediaSink); +} + +RefPtr MediaDecoderStateMachine::RequestDebugInfo( + dom::MediaDecoderStateMachineDebugInfo& aInfo) { + RefPtr p = new GenericPromise::Private(__func__); + RefPtr self = this; + nsresult rv = OwnerThread()->Dispatch( + NS_NewRunnableFunction("MediaDecoderStateMachine::RequestDebugInfo", + [self, p, &aInfo]() { + self->GetDebugInfo(aInfo); + p->Resolve(true, __func__); + }), + AbstractThread::TailDispatch); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + return p; +} + +class VideoQueueMemoryFunctor : public nsDequeFunctor { + public: + VideoQueueMemoryFunctor() : mSize(0) {} + + MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf); + + virtual void operator()(VideoData* aObject) override { + mSize += aObject->SizeOfIncludingThis(MallocSizeOf); + } + + size_t mSize; +}; + +class AudioQueueMemoryFunctor : public nsDequeFunctor { + public: + AudioQueueMemoryFunctor() : mSize(0) {} + + MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf); + + virtual void operator()(AudioData* aObject) override { + mSize += aObject->SizeOfIncludingThis(MallocSizeOf); + } + + size_t mSize; +}; + +size_t MediaDecoderStateMachine::SizeOfVideoQueue() const { + VideoQueueMemoryFunctor functor; + mVideoQueue.LockedForEach(functor); + return functor.mSize; +} + +size_t MediaDecoderStateMachine::SizeOfAudioQueue() const { + AudioQueueMemoryFunctor functor; + mAudioQueue.LockedForEach(functor); + return functor.mSize; +} + +const char* MediaDecoderStateMachine::AudioRequestStatus() const { + MOZ_ASSERT(OnTaskQueue()); + if (IsRequestingAudioData()) { + MOZ_DIAGNOSTIC_ASSERT(!IsWaitingAudioData()); + return "pending"; + } + + if (IsWaitingAudioData()) { + return "waiting"; + } + return "idle"; +} + +const char* MediaDecoderStateMachine::VideoRequestStatus() const { + MOZ_ASSERT(OnTaskQueue()); + if (IsRequestingVideoData()) { + MOZ_DIAGNOSTIC_ASSERT(!IsWaitingVideoData()); + return "pending"; + } + + if (IsWaitingVideoData()) { + return "waiting"; + } + return "idle"; +} + +void MediaDecoderStateMachine::OnSuspendTimerResolved() { + LOG("OnSuspendTimerResolved"); + mVideoDecodeSuspendTimer.CompleteRequest(); + mStateObj->HandleVideoSuspendTimeout(); +} + +void MediaDecoderStateMachine::CancelSuspendTimer() { + LOG("CancelSuspendTimer: State: %s, Timer.IsScheduled: %c", + ToStateStr(mStateObj->GetState()), + mVideoDecodeSuspendTimer.IsScheduled() ? 'T' : 'F'); + MOZ_ASSERT(OnTaskQueue()); + if (mVideoDecodeSuspendTimer.IsScheduled()) { + mOnPlaybackEvent.Notify(MediaPlaybackEvent::CancelVideoSuspendTimer); + } + mVideoDecodeSuspendTimer.Reset(); +} + +void MediaDecoderStateMachine::AdjustByLooping(media::TimeUnit& aTime) const { + MOZ_ASSERT(OnTaskQueue()); + + // No need to adjust time. + if (mOriginalDecodedDuration == media::TimeUnit::Zero()) { + return; + } + + // There are situations where we need to perform subtraction instead of modulo + // to accurately adjust the clock. When we are not in a state of seamless + // looping, it is usually necessary to normalize the clock time within the + // range of [0, duration]. However, if the current clock time is greater than + // the duration (i.e., duration+1) and not in looping, we should not adjust it + // to 1 as we are not looping back to the starting position. Instead, we + // should leave the clock time unchanged and trim it later to match the + // maximum duration time. + if (mStateObj->GetState() != DECODER_STATE_LOOPING_DECODING) { + // Use the smaller offset rather than the larger one, as the larger offset + // indicates the next round of looping. For example, if the duration is X + // and the playback is currently in the third round of looping, both + // queues will have an offset of 3X. However, if the audio decoding is + // faster and the fourth round of data has already been added to the audio + // queue, the audio offset will become 4X. Since playback is still in the + // third round, we should use the smaller offset of 3X to adjust the time. + TimeUnit offset = TimeUnit::FromInfinity(); + if (HasAudio()) { + offset = std::min(AudioQueue().GetOffset(), offset); + } + if (HasVideo()) { + offset = std::min(VideoQueue().GetOffset(), offset); + } + if (aTime > offset) { + aTime -= offset; + return; + } + } + + // When seamless looping happens at least once, it doesn't matter if we're + // looping or not. + aTime = aTime % mOriginalDecodedDuration; +} + +bool MediaDecoderStateMachine::IsInSeamlessLooping() const { + return mLooping && mSeamlessLoopingAllowed; +} + +bool MediaDecoderStateMachine::HasLastDecodedData(MediaData::Type aType) { + MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA || + aType == MediaData::Type::VIDEO_DATA); + if (aType == MediaData::Type::AUDIO_DATA) { + return mDecodedAudioEndTime != TimeUnit::Zero(); + } + return mDecodedVideoEndTime != TimeUnit::Zero(); +} + +bool MediaDecoderStateMachine::IsCDMProxySupported(CDMProxy* aProxy) { +#ifdef MOZ_WMF_CDM + MOZ_ASSERT(aProxy); + // This proxy only works with the external state machine. + return !aProxy->AsWMFCDMProxy(); +#else + return true; +#endif +} + +} // namespace mozilla + +// avoid redefined macro in unified build +#undef LOG +#undef LOGV +#undef LOGW +#undef LOGE +#undef SLOGW +#undef SLOGE +#undef NS_DispatchToMainThread diff --git a/dom/media/MediaDecoderStateMachine.h b/dom/media/MediaDecoderStateMachine.h new file mode 100644 index 0000000000..bcedf1790a --- /dev/null +++ b/dom/media/MediaDecoderStateMachine.h @@ -0,0 +1,570 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(MediaDecoderStateMachine_h__) +# define MediaDecoderStateMachine_h__ + +# include "AudioDeviceInfo.h" +# include "ImageContainer.h" +# include "MediaDecoder.h" +# include "MediaDecoderOwner.h" +# include "MediaDecoderStateMachineBase.h" +# include "MediaFormatReader.h" +# include "MediaQueue.h" +# include "MediaSink.h" +# include "MediaStatistics.h" +# include "MediaTimer.h" +# include "SeekJob.h" +# include "mozilla/Attributes.h" +# include "mozilla/ReentrantMonitor.h" +# include "mozilla/StateMirroring.h" +# include "nsThreadUtils.h" + +namespace mozilla { + +class AbstractThread; +class AudioSegment; +class DecodedStream; +class DOMMediaStream; +class ReaderProxy; +class TaskQueue; + +extern LazyLogModule gMediaDecoderLog; + +DDLoggedTypeDeclName(MediaDecoderStateMachine); + +/* + +Each media element for a media file has one thread called the "audio thread". + +The audio thread writes the decoded audio data to the audio +hardware. This is done in a separate thread to ensure that the +audio hardware gets a constant stream of data without +interruption due to decoding or display. At some point +AudioStream will be refactored to have a callback interface +where it asks for data and this thread will no longer be +needed. + +The element/state machine also has a TaskQueue which runs in a +SharedThreadPool that is shared with all other elements/decoders. The state +machine dispatches tasks to this to call into the MediaDecoderReader to +request decoded audio or video data. The Reader will callback with decoded +sampled when it has them available, and the state machine places the decoded +samples into its queues for the consuming threads to pull from. + +The MediaDecoderReader can choose to decode asynchronously, or synchronously +and return requested samples synchronously inside it's Request*Data() +functions via callback. Asynchronous decoding is preferred, and should be +used for any new readers. + +Synchronisation of state between the thread is done via a monitor owned +by MediaDecoder. + +The lifetime of the audio thread is controlled by the state machine when +it runs on the shared state machine thread. When playback needs to occur +the audio thread is created and an event dispatched to run it. The audio +thread exits when audio playback is completed or no longer required. + +A/V synchronisation is handled by the state machine. It examines the audio +playback time and compares this to the next frame in the queue of video +frames. If it is time to play the video frame it is then displayed, otherwise +it schedules the state machine to run again at the time of the next frame. + +Frame skipping is done in the following ways: + + 1) The state machine will skip all frames in the video queue whose + display time is less than the current audio time. This ensures + the correct frame for the current time is always displayed. + + 2) The decode tasks will stop decoding interframes and read to the + next keyframe if it determines that decoding the remaining + interframes will cause playback issues. It detects this by: + a) If the amount of audio data in the audio queue drops + below a threshold whereby audio may start to skip. + b) If the video queue drops below a threshold where it + will be decoding video data that won't be displayed due + to the decode thread dropping the frame immediately. + TODO: In future we should only do this when the Reader is decoding + synchronously. + +When hardware accelerated graphics is not available, YCbCr conversion +is done on the decode task queue when video frames are decoded. + +The decode task queue pushes decoded audio and videos frames into two +separate queues - one for audio and one for video. These are kept +separate to make it easy to constantly feed audio data to the audio +hardware while allowing frame skipping of video data. These queues are +threadsafe, and neither the decode, audio, or state machine should +be able to monopolize them, and cause starvation of the other threads. + +Both queues are bounded by a maximum size. When this size is reached +the decode tasks will no longer request video or audio depending on the +queue that has reached the threshold. If both queues are full, no more +decode tasks will be dispatched to the decode task queue, so other +decoders will have an opportunity to run. + +During playback the audio thread will be idle (via a Wait() on the +monitor) if the audio queue is empty. Otherwise it constantly pops +audio data off the queue and plays it with a blocking write to the audio +hardware (via AudioStream). + +*/ +class MediaDecoderStateMachine + : public MediaDecoderStateMachineBase, + public DecoderDoctorLifeLogger { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderStateMachine, override) + + using TrackSet = MediaFormatReader::TrackSet; + + public: + using FrameID = mozilla::layers::ImageContainer::FrameID; + MediaDecoderStateMachine(MediaDecoder* aDecoder, MediaFormatReader* aReader); + + nsresult Init(MediaDecoder* aDecoder) override; + + // Enumeration for the valid decoding states + enum State { + DECODER_STATE_DECODING_METADATA, + DECODER_STATE_DORMANT, + DECODER_STATE_DECODING_FIRSTFRAME, + DECODER_STATE_DECODING, + DECODER_STATE_LOOPING_DECODING, + DECODER_STATE_SEEKING_ACCURATE, + DECODER_STATE_SEEKING_FROMDORMANT, + DECODER_STATE_SEEKING_NEXTFRAMESEEKING, + DECODER_STATE_SEEKING_VIDEOONLY, + DECODER_STATE_BUFFERING, + DECODER_STATE_COMPLETED, + DECODER_STATE_SHUTDOWN + }; + + RefPtr RequestDebugInfo( + dom::MediaDecoderStateMachineDebugInfo& aInfo) override; + + size_t SizeOfVideoQueue() const override; + + size_t SizeOfAudioQueue() const override; + + // Sets the video decode mode. Used by the suspend-video-decoder feature. + void SetVideoDecodeMode(VideoDecodeMode aMode) override; + + RefPtr InvokeSetSink( + const RefPtr& aSink) override; + + void InvokeSuspendMediaSink() override; + void InvokeResumeMediaSink() override; + + bool IsCDMProxySupported(CDMProxy* aProxy) override; + + private: + class StateObject; + class DecodeMetadataState; + class DormantState; + class DecodingFirstFrameState; + class DecodingState; + class LoopingDecodingState; + class SeekingState; + class AccurateSeekingState; + class NextFrameSeekingState; + class NextFrameSeekingFromDormantState; + class VideoOnlySeekingState; + class BufferingState; + class CompletedState; + class ShutdownState; + + static const char* ToStateStr(State aState); + const char* ToStateStr(); + + void GetDebugInfo(dom::MediaDecoderStateMachineDebugInfo& aInfo); + + // Initialization that needs to happen on the task queue. This is the first + // task that gets run on the task queue, and is dispatched from the MDSM + // constructor immediately after the task queue is created. + void InitializationTask(MediaDecoder* aDecoder) override; + + RefPtr Seek(const SeekTarget& aTarget) override; + + RefPtr Shutdown() override; + + RefPtr FinishShutdown(); + + // Update the playback position. This can result in a timeupdate event + // and an invalidate of the frame being dispatched asynchronously if + // there is no such event currently queued. + // Only called on the decoder thread. Must be called with + // the decode monitor held. + void UpdatePlaybackPosition(const media::TimeUnit& aTime); + + // Schedules the shared state machine thread to run the state machine. + void ScheduleStateMachine(); + + // Invokes ScheduleStateMachine to run in |aTime|, + // unless it's already scheduled to run earlier, in which case the + // request is discarded. + void ScheduleStateMachineIn(const media::TimeUnit& aTime); + + bool HaveEnoughDecodedAudio() const; + bool HaveEnoughDecodedVideo() const; + + // The check is used to store more video frames than usual when playing 4K+ + // video. + bool IsVideoDataEnoughComparedWithAudio() const; + + // Returns true if we're currently playing. The decoder monitor must + // be held. + bool IsPlaying() const; + + // Sets mMediaSeekable to false. + void SetMediaNotSeekable(); + + // Resets all states related to decoding and aborts all pending requests + // to the decoders. + void ResetDecode(const TrackSet& aTracks = TrackSet(TrackInfo::kAudioTrack, + TrackInfo::kVideoTrack)); + + void SetVideoDecodeModeInternal(VideoDecodeMode aMode); + + RefPtr SetSink(RefPtr aDevice); + + // Shutdown MediaSink on suspend to clean up resources. + void SuspendMediaSink(); + // Create a new MediaSink, it must have been stopped first. + void ResumeMediaSink(); + + protected: + virtual ~MediaDecoderStateMachine(); + + void BufferedRangeUpdated() override; + void VolumeChanged() override; + void PreservesPitchChanged() override; + void PlayStateChanged() override; + void LoopingChanged() override; + void UpdateSecondaryVideoContainer() override; + + void ReaderSuspendedChanged(); + + // Inserts a sample into the Audio/Video queue. + // aSample must not be null. + void PushAudio(AudioData* aSample); + void PushVideo(VideoData* aSample); + + void OnAudioPopped(const RefPtr& aSample); + void OnVideoPopped(const RefPtr& aSample); + + void AudioAudibleChanged(bool aAudible); + + void SetPlaybackRate(double aPlaybackRate) override; + void SetIsLiveStream(bool aIsLiveStream) override { + mIsLiveStream = aIsLiveStream; + } + void SetCanPlayThrough(bool aCanPlayThrough) override { + mCanPlayThrough = aCanPlayThrough; + } + void SetFragmentEndTime(const media::TimeUnit& aEndTime) override { + // A negative number means we don't have a fragment end time at all. + mFragmentEndTime = aEndTime >= media::TimeUnit::Zero() + ? aEndTime + : media::TimeUnit::Invalid(); + } + + void StreamNameChanged(); + void UpdateOutputCaptured(); + void OutputPrincipalChanged(); + + MediaQueue& AudioQueue() { return mAudioQueue; } + MediaQueue& VideoQueue() { return mVideoQueue; } + + const MediaQueue& AudioQueue() const { return mAudioQueue; } + const MediaQueue& VideoQueue() const { return mVideoQueue; } + + // True if we are low in decoded audio/video data. + // May not be invoked when mReader->UseBufferingHeuristics() is false. + bool HasLowDecodedData(); + + bool HasLowDecodedAudio(); + + bool HasLowDecodedVideo(); + + bool OutOfDecodedAudio(); + + bool OutOfDecodedVideo() { + MOZ_ASSERT(OnTaskQueue()); + return IsVideoDecoding() && VideoQueue().GetSize() <= 1; + } + + // Returns true if we're running low on buffered data. + bool HasLowBufferedData(); + + // Returns true if we have less than aThreshold of buffered data available. + bool HasLowBufferedData(const media::TimeUnit& aThreshold); + + // Return the current time, either the audio clock if available (if the media + // has audio, and the playback is possible), or a clock for the video. + // Called on the state machine thread. + // If aTimeStamp is non-null, set *aTimeStamp to the TimeStamp corresponding + // to the returned stream time. + media::TimeUnit GetClock(TimeStamp* aTimeStamp = nullptr) const; + + // Update only the state machine's current playback position (and duration, + // if unknown). Does not update the playback position on the decoder or + // media element -- use UpdatePlaybackPosition for that. Called on the state + // machine thread, caller must hold the decoder lock. + void UpdatePlaybackPositionInternal(const media::TimeUnit& aTime); + + // Update playback position and trigger next update by default time period. + // Called on the state machine thread. + void UpdatePlaybackPositionPeriodically(); + + MediaSink* CreateAudioSink(); + + // Always create mediasink which contains an AudioSink or DecodedStream + // inside. + already_AddRefed CreateMediaSink(); + + // Stops the media sink and shut it down. + // The decoder monitor must be held with exactly one lock count. + // Called on the state machine thread. + void StopMediaSink(); + + // Create and start the media sink. + // The decoder monitor must be held with exactly one lock count. + // Called on the state machine thread. + // If start fails an NS_ERROR_FAILURE is returned. + nsresult StartMediaSink(); + + // Notification method invoked when mIsVisible changes. + void VisibilityChanged(); + + // Sets internal state which causes playback of media to pause. + // The decoder monitor must be held. + void StopPlayback(); + + // If the conditions are right, sets internal state which causes playback + // of media to begin or resume. + // Must be called with the decode monitor held. + void MaybeStartPlayback(); + + void EnqueueFirstFrameLoadedEvent(); + + // Start a task to decode audio. + void RequestAudioData(); + + // Start a task to decode video. + // @param aRequestNextVideoKeyFrame + // If aRequestNextKeyFrame is true, will request data for the next keyframe + // after aCurrentTime. + void RequestVideoData(const media::TimeUnit& aCurrentTime, + bool aRequestNextKeyFrame = false); + + void WaitForData(MediaData::Type aType); + + // Returns the "current playback position" in HTML5, which is in the range + // [0,duration]. The first frame of the media resource corresponds to 0 + // regardless of any codec-specific internal time code. + media::TimeUnit GetMediaTime() const { + MOZ_ASSERT(OnTaskQueue()); + return mCurrentPosition; + } + + // Returns an upper bound on the number of microseconds of audio that is + // decoded and playable. This is the sum of the number of usecs of audio which + // is decoded and in the reader's audio queue, and the usecs of unplayed audio + // which has been pushed to the audio hardware for playback. Note that after + // calling this, the audio hardware may play some of the audio pushed to + // hardware, so this can only be used as a upper bound. The decoder monitor + // must be held when calling this. Called on the decode thread. + media::TimeUnit GetDecodedAudioDuration() const; + + void FinishDecodeFirstFrame(); + + // Performs one "cycle" of the state machine. + void RunStateMachine(); + + bool IsStateMachineScheduled() const; + + // These return true if the respective stream's decode has not yet reached + // the end of stream. + bool IsAudioDecoding(); + bool IsVideoDecoding(); + + private: + // Resolved by the MediaSink to signal that all audio/video outstanding + // work is complete and identify which part(a/v) of the sink is shutting down. + void OnMediaSinkAudioComplete(); + void OnMediaSinkVideoComplete(); + + // Rejected by the MediaSink to signal errors for audio/video. + void OnMediaSinkAudioError(nsresult aResult); + void OnMediaSinkVideoError(); + + // State-watching manager. + WatchManager mWatchManager; + + // True if we've dispatched a task to run the state machine but the task has + // yet to run. + bool mDispatchedStateMachine; + + // Used to dispatch another round schedule with specific target time. + DelayedScheduler mDelayedScheduler; + + // Queue of audio frames. This queue is threadsafe, and is accessed from + // the audio, decoder, state machine, and main threads. + MediaQueue mAudioQueue; + // Queue of video frames. This queue is threadsafe, and is accessed from + // the decoder, state machine, and main threads. + MediaQueue mVideoQueue; + + UniquePtr mStateObj; + + media::TimeUnit Duration() const { + MOZ_ASSERT(OnTaskQueue()); + return mDuration.Ref().ref(); + } + + // FrameID which increments every time a frame is pushed to our queue. + FrameID mCurrentFrameID; + + // Media Fragment end time. + media::TimeUnit mFragmentEndTime = media::TimeUnit::Invalid(); + + // The media sink resource. Used on the state machine thread. + RefPtr mMediaSink; + + // The end time of the last audio frame that's been pushed onto the media sink + // in microseconds. This will approximately be the end time + // of the audio stream, unless another frame is pushed to the hardware. + media::TimeUnit AudioEndTime() const; + + // The end time of the last rendered video frame that's been sent to + // compositor. + media::TimeUnit VideoEndTime() const; + + // The end time of the last decoded audio frame. This signifies the end of + // decoded audio data. Used to check if we are low in decoded data. + media::TimeUnit mDecodedAudioEndTime; + + // The end time of the last decoded video frame. Used to check if we are low + // on decoded video data. + media::TimeUnit mDecodedVideoEndTime; + + // If we've got more than this number of decoded video frames waiting in + // the video queue, we will not decode any more video frames until some have + // been consumed by the play state machine thread. + // Must hold monitor. + uint32_t GetAmpleVideoFrames() const; + + // Our "ample" audio threshold. Once we've this much audio decoded, we + // pause decoding. + media::TimeUnit mAmpleAudioThreshold; + + const char* AudioRequestStatus() const; + const char* VideoRequestStatus() const; + + void OnSuspendTimerResolved(); + void CancelSuspendTimer(); + + bool IsInSeamlessLooping() const; + + bool mCanPlayThrough = false; + + bool mIsLiveStream = false; + + // True if all audio frames are already rendered. + bool mAudioCompleted = false; + + // True if all video frames are already rendered. + bool mVideoCompleted = false; + + // True if video decoding is suspended. + bool mVideoDecodeSuspended; + + // Track enabling video decode suspension via timer + DelayedScheduler mVideoDecodeSuspendTimer; + + // Track the current video decode mode. + VideoDecodeMode mVideoDecodeMode; + + // Track the complete & error for audio/video separately + MozPromiseRequestHolder mMediaSinkAudioEndedPromise; + MozPromiseRequestHolder mMediaSinkVideoEndedPromise; + + MediaEventListener mAudioQueueListener; + MediaEventListener mVideoQueueListener; + MediaEventListener mAudibleListener; + MediaEventListener mOnMediaNotSeekable; + + const bool mIsMSE; + + const bool mShouldResistFingerprinting; + + bool mSeamlessLoopingAllowed; + + // If media was in looping and had reached to the end before, then we need + // to adjust sample time from clock time to media time. + void AdjustByLooping(media::TimeUnit& aTime) const; + + // These are used for seamless looping. When looping has been enable at least + // once, `mOriginalDecodedDuration` would be set to the larger duration + // between two tracks. + media::TimeUnit mOriginalDecodedDuration; + Maybe mAudioTrackDecodedDuration; + Maybe mVideoTrackDecodedDuration; + + bool HasLastDecodedData(MediaData::Type aType); + + // Current playback position in the stream in bytes. + int64_t mPlaybackOffset = 0; + + // For seamless looping video, we don't want to trigger skip-to-next-keyframe + // after reaching video EOS. Because we've reset the demuxer to 0, and are + // going to request data from start. If playback hasn't looped back, the media + // time would still be too large, which makes the reader think the playback is + // way behind and performs unnecessary skipping. Eg. Media is 10s long, + // reaching EOS at 8s, requesting data at 9s. Assume media's keyframe interval + // is 3s, which means keyframes will appear on 0s, 3s, 6s and 9s. If we use + // current time as a threshold, the reader sees the next key frame is 3s but + // the threashold is 9s, which usually happens when the decoding is too slow. + // But that is not the case for us, we should by pass thskip-to-next-keyframe + // logic until the media loops back. + bool mBypassingSkipToNextKeyFrameCheck = false; + + private: + // Audio stream name + Mirror mStreamName; + + // The device used with SetSink, or nullptr if no explicit device has been + // set. + Mirror> mSinkDevice; + + // Whether all output should be captured into mOutputTracks, halted, or not + // captured. + Mirror mOutputCaptureState; + + // A dummy track used to access the right MediaTrackGraph instance. Needed + // since there's no guarantee that output tracks are present. + Mirror> mOutputDummyTrack; + + // Tracks to capture data into. + Mirror>> mOutputTracks; + + // PrincipalHandle to feed with data captured into mOutputTracks. + Mirror mOutputPrincipal; + + Canonical mCanonicalOutputPrincipal; + + // Track when MediaSink is supsended. When that happens some actions are + // restricted like starting the sink or changing sink id. The flag is valid + // after Initialization. TaskQueue thread only. + bool mIsMediaSinkSuspended = false; + + public: + AbstractCanonical* CanonicalOutputPrincipal() { + return &mCanonicalOutputPrincipal; + } +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/MediaDecoderStateMachineBase.cpp b/dom/media/MediaDecoderStateMachineBase.cpp new file mode 100644 index 0000000000..e60937c31f --- /dev/null +++ b/dom/media/MediaDecoderStateMachineBase.cpp @@ -0,0 +1,186 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaDecoderStateMachineBase.h" + +#include "MediaDecoder.h" +#include "mozilla/ProfilerMarkers.h" +#include "mozilla/TaskQueue.h" +#include "nsThreadUtils.h" + +namespace mozilla { + +#define INIT_MIRROR(name, val) \ + name(mTaskQueue, val, "MediaDecoderStateMachineBase::" #name " (Mirror)") +#define INIT_CANONICAL(name, val) \ + name(mTaskQueue, val, "MediaDecoderStateMachineBase::" #name " (Canonical)") +#define FMT(x, ...) "Decoder=%p " x, mDecoderID, ##__VA_ARGS__ +#define LOG(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, "Decoder=%p " x, mDecoderID, \ + ##__VA_ARGS__) +#define LOGV(x, ...) \ + DDMOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, "Decoder=%p " x, mDecoderID, \ + ##__VA_ARGS__) +#define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get()) +#define LOGE(x, ...) \ + NS_DebugBreak(NS_DEBUG_WARNING, \ + nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, \ + __FILE__, __LINE__) + +MediaDecoderStateMachineBase::MediaDecoderStateMachineBase( + MediaDecoder* aDecoder, MediaFormatReader* aReader) + : mDecoderID(aDecoder), + mAbstractMainThread(aDecoder->AbstractMainThread()), + mFrameStats(&aDecoder->GetFrameStatistics()), + mVideoFrameContainer(aDecoder->GetVideoFrameContainer()), + mTaskQueue(TaskQueue::Create(GetMediaThreadPool(MediaThreadType::MDSM), + "MDSM::mTaskQueue", + /* aSupportsTailDispatch = */ true)), + mReader(new ReaderProxy(mTaskQueue, aReader)), + mPlaybackRate(1.0), + INIT_MIRROR(mBuffered, media::TimeIntervals()), + INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING), + INIT_MIRROR(mVolume, 1.0), + INIT_MIRROR(mPreservesPitch, true), + INIT_MIRROR(mLooping, false), + INIT_MIRROR(mSecondaryVideoContainer, nullptr), + INIT_CANONICAL(mDuration, media::NullableTimeUnit()), + INIT_CANONICAL(mCurrentPosition, media::TimeUnit::Zero()), + INIT_CANONICAL(mIsAudioDataAudible, false), + mMinimizePreroll(aDecoder->GetMinimizePreroll()), + mWatchManager(this, mTaskQueue) {} + +MediaEventSource& MediaDecoderStateMachineBase::OnMediaNotSeekable() + const { + return mReader->OnMediaNotSeekable(); +} + +AbstractCanonical* +MediaDecoderStateMachineBase::CanonicalBuffered() const { + return mReader->CanonicalBuffered(); +} + +void MediaDecoderStateMachineBase::DispatchSetFragmentEndTime( + const media::TimeUnit& aEndTime) { + OwnerThread()->DispatchStateChange(NewRunnableMethod( + "MediaDecoderStateMachineBase::SetFragmentEndTime", this, + &MediaDecoderStateMachineBase::SetFragmentEndTime, aEndTime)); +} + +void MediaDecoderStateMachineBase::DispatchCanPlayThrough( + bool aCanPlayThrough) { + OwnerThread()->DispatchStateChange(NewRunnableMethod( + "MediaDecoderStateMachineBase::SetCanPlayThrough", this, + &MediaDecoderStateMachineBase::SetCanPlayThrough, aCanPlayThrough)); +} + +void MediaDecoderStateMachineBase::DispatchIsLiveStream(bool aIsLiveStream) { + OwnerThread()->DispatchStateChange(NewRunnableMethod( + "MediaDecoderStateMachineBase::SetIsLiveStream", this, + &MediaDecoderStateMachineBase::SetIsLiveStream, aIsLiveStream)); +} + +void MediaDecoderStateMachineBase::DispatchSetPlaybackRate( + double aPlaybackRate) { + OwnerThread()->DispatchStateChange(NewRunnableMethod( + "MediaDecoderStateMachineBase::SetPlaybackRate", this, + &MediaDecoderStateMachineBase::SetPlaybackRate, aPlaybackRate)); +} + +nsresult MediaDecoderStateMachineBase::Init(MediaDecoder* aDecoder) { + MOZ_ASSERT(NS_IsMainThread()); + + // Dispatch initialization that needs to happen on that task queue. + nsCOMPtr r = NewRunnableMethod>( + "MediaDecoderStateMachineBase::InitializationTask", this, + &MediaDecoderStateMachineBase::InitializationTask, aDecoder); + mTaskQueue->DispatchStateChange(r.forget()); + + // Connect mirrors. + aDecoder->CanonicalPlayState().ConnectMirror(&mPlayState); + aDecoder->CanonicalVolume().ConnectMirror(&mVolume); + aDecoder->CanonicalPreservesPitch().ConnectMirror(&mPreservesPitch); + aDecoder->CanonicalLooping().ConnectMirror(&mLooping); + aDecoder->CanonicalSecondaryVideoContainer().ConnectMirror( + &mSecondaryVideoContainer); + + nsresult rv = mReader->Init(); + NS_ENSURE_SUCCESS(rv, rv); + + mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread()); + + return NS_OK; +} + +void MediaDecoderStateMachineBase::InitializationTask(MediaDecoder* aDecoder) { + MOZ_ASSERT(OnTaskQueue()); + + // Connect mirrors. + mBuffered.Connect(mReader->CanonicalBuffered()); + mReader->SetCanonicalDuration(mDuration); + + // Initialize watchers. + mWatchManager.Watch(mBuffered, + &MediaDecoderStateMachineBase::BufferedRangeUpdated); + mWatchManager.Watch(mVolume, &MediaDecoderStateMachineBase::VolumeChanged); + mWatchManager.Watch(mPreservesPitch, + &MediaDecoderStateMachineBase::PreservesPitchChanged); + mWatchManager.Watch(mPlayState, + &MediaDecoderStateMachineBase::PlayStateChanged); + mWatchManager.Watch(mLooping, &MediaDecoderStateMachineBase::LoopingChanged); + mWatchManager.Watch( + mSecondaryVideoContainer, + &MediaDecoderStateMachineBase::UpdateSecondaryVideoContainer); +} + +RefPtr MediaDecoderStateMachineBase::BeginShutdown() { + MOZ_ASSERT(NS_IsMainThread()); + return InvokeAsync( + OwnerThread(), __func__, + [self = RefPtr(this), this]() { + mWatchManager.Shutdown(); + mBuffered.DisconnectIfConnected(); + mPlayState.DisconnectIfConnected(); + mVolume.DisconnectIfConnected(); + mPreservesPitch.DisconnectIfConnected(); + mLooping.DisconnectIfConnected(); + mSecondaryVideoContainer.DisconnectIfConnected(); + return Shutdown(); + }); +} + +RefPtr MediaDecoderStateMachineBase::InvokeSeek( + const SeekTarget& aTarget) { + return InvokeAsync(OwnerThread(), __func__, + [self = RefPtr(this), + target = aTarget]() { return self->Seek(target); }); +} + +bool MediaDecoderStateMachineBase::OnTaskQueue() const { + return OwnerThread()->IsCurrentThreadIn(); +} + +void MediaDecoderStateMachineBase::DecodeError(const MediaResult& aError) { + MOZ_ASSERT(OnTaskQueue()); + LOGE("Decode error: %s", aError.Description().get()); + PROFILER_MARKER_TEXT("MDSMBase::DecodeError", MEDIA_PLAYBACK, {}, + aError.Description()); + // Notify the decode error and MediaDecoder will shut down MDSM. + mOnPlaybackErrorEvent.Notify(aError); +} + +RefPtr MediaDecoderStateMachineBase::SetCDMProxy( + CDMProxy* aProxy) { + return mReader->SetCDMProxy(aProxy); +} + +#undef INIT_MIRROR +#undef INIT_CANONICAL +#undef FMT +#undef LOG +#undef LOGV +#undef LOGW +#undef LOGE + +} // namespace mozilla diff --git a/dom/media/MediaDecoderStateMachineBase.h b/dom/media/MediaDecoderStateMachineBase.h new file mode 100644 index 0000000000..b4950746e8 --- /dev/null +++ b/dom/media/MediaDecoderStateMachineBase.h @@ -0,0 +1,308 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_MEDIADECODERSTATEMACHINEBASE_H_ +#define DOM_MEDIA_MEDIADECODERSTATEMACHINEBASE_H_ + +#include "DecoderDoctorDiagnostics.h" +#include "MediaDecoder.h" +#include "MediaDecoderOwner.h" +#include "MediaEventSource.h" +#include "MediaInfo.h" +#include "MediaMetadataManager.h" +#include "MediaPromiseDefs.h" +#include "ReaderProxy.h" +#include "VideoFrameContainer.h" +#include "mozilla/dom/MediaDebugInfoBinding.h" +#include "mozilla/Variant.h" +#include "nsISupportsImpl.h" + +class AudioDeviceInfo; + +namespace mozilla { + +class AbstractThread; +class CDMProxy; +class FrameStatistics; +class MediaFormatReader; +class TaskQueue; + +struct MediaPlaybackEvent { + enum EventType { + PlaybackStarted, + PlaybackStopped, + PlaybackProgressed, + PlaybackEnded, + SeekStarted, + Invalidate, + EnterVideoSuspend, + ExitVideoSuspend, + StartVideoSuspendTimer, + CancelVideoSuspendTimer, + VideoOnlySeekBegin, + VideoOnlySeekCompleted, + } mType; + + using DataType = Variant; + DataType mData; + + MOZ_IMPLICIT MediaPlaybackEvent(EventType aType) + : mType(aType), mData(Nothing{}) {} + + template + MediaPlaybackEvent(EventType aType, T&& aArg) + : mType(aType), mData(std::forward(aArg)) {} +}; + +enum class VideoDecodeMode : uint8_t { Normal, Suspend }; + +/** + * The state machine class. This manages the decoding and seeking in the + * MediaDecoderReader on the decode task queue, and A/V sync on the shared + * state machine thread, and controls the audio "push" thread. + * + * All internal state is synchronised via the decoder monitor. State changes + * are propagated by scheduling the state machine to run another cycle on the + * shared state machine thread. + */ +class MediaDecoderStateMachineBase { + public: + NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING + + using FirstFrameEventSourceExc = + MediaEventSourceExc, MediaDecoderEventVisibility>; + using MetadataEventSourceExc = + MediaEventSourceExc, UniquePtr, + MediaDecoderEventVisibility>; + using NextFrameStatus = MediaDecoderOwner::NextFrameStatus; + + MediaDecoderStateMachineBase(MediaDecoder* aDecoder, + MediaFormatReader* aReader); + + virtual nsresult Init(MediaDecoder* aDecoder); + + RefPtr BeginShutdown(); + + // Seeks to the decoder to aTarget asynchronously. + RefPtr InvokeSeek(const SeekTarget& aTarget); + + virtual size_t SizeOfVideoQueue() const = 0; + virtual size_t SizeOfAudioQueue() const = 0; + + // Sets the video decode mode. Used by the suspend-video-decoder feature. + virtual void SetVideoDecodeMode(VideoDecodeMode aMode) = 0; + + // Set new sink device. ExternalEngineStateMachine will reject the returned + // promise with NS_ERROR_ABORT to indicate that the action is not supported. + // MediaDecoderStateMachine will resolve the promise when the previous + // device is no longer in use and an attempt to open the new device + // completes (successfully or not) or is deemed unnecessary because the + // device is not required for output at this time. MediaDecoderStateMachine + // will always consider the switch in underlying output device successful + // and continue attempting to open the new device even if opening initially + // fails. + virtual RefPtr InvokeSetSink( + const RefPtr& aSink) = 0; + virtual void InvokeSuspendMediaSink() = 0; + virtual void InvokeResumeMediaSink() = 0; + + virtual RefPtr RequestDebugInfo( + dom::MediaDecoderStateMachineDebugInfo& aInfo) = 0; + + // Returns the state machine task queue. + TaskQueue* OwnerThread() const { return mTaskQueue; } + + MetadataEventSourceExc& MetadataLoadedEvent() { return mMetadataLoadedEvent; } + + FirstFrameEventSourceExc& FirstFrameLoadedEvent() { + return mFirstFrameLoadedEvent; + } + + MediaEventSourceExc>& + OnSecondaryVideoContainerInstalled() { + return mOnSecondaryVideoContainerInstalled; + } + + TimedMetadataEventSource& TimedMetadataEvent() { + return mMetadataManager.TimedMetadataEvent(); + } + + MediaEventSource& OnPlaybackEvent() { + return mOnPlaybackEvent; + } + MediaEventSource& OnPlaybackErrorEvent() { + return mOnPlaybackErrorEvent; + } + + MediaEventSource& OnDecoderDoctorEvent() { + return mOnDecoderDoctorEvent; + } + + MediaEventSource& OnNextFrameStatus() { + return mOnNextFrameStatus; + } + + MediaEventProducer& OnTrackInfoUpdatedEvent() { + return mReader->OnTrackInfoUpdatedEvent(); + } + + MediaEventSource& OnMediaNotSeekable() const; + + AbstractCanonical* CanonicalDuration() { + return &mDuration; + } + AbstractCanonical* CanonicalCurrentPosition() { + return &mCurrentPosition; + } + AbstractCanonical* CanonicalIsAudioDataAudible() { + return &mIsAudioDataAudible; + } + AbstractCanonical* CanonicalBuffered() const; + + void DispatchSetFragmentEndTime(const media::TimeUnit& aEndTime); + void DispatchCanPlayThrough(bool aCanPlayThrough); + void DispatchIsLiveStream(bool aIsLiveStream); + void DispatchSetPlaybackRate(double aPlaybackRate); + + virtual RefPtr SetCDMProxy(CDMProxy* aProxy); + + virtual bool IsCDMProxySupported(CDMProxy* aProxy) = 0; + + protected: + virtual ~MediaDecoderStateMachineBase() = default; + + bool HasAudio() const { return mInfo.ref().HasAudio(); } + bool HasVideo() const { return mInfo.ref().HasVideo(); } + const MediaInfo& Info() const { return mInfo.ref(); } + + virtual void SetPlaybackRate(double aPlaybackRate) = 0; + virtual void SetIsLiveStream(bool aIsLiveStream) = 0; + virtual void SetCanPlayThrough(bool aCanPlayThrough) = 0; + virtual void SetFragmentEndTime(const media::TimeUnit& aFragmentEndTime) = 0; + + virtual void BufferedRangeUpdated() = 0; + virtual void VolumeChanged() = 0; + virtual void PreservesPitchChanged() = 0; + virtual void PlayStateChanged() = 0; + virtual void LoopingChanged() = 0; + virtual void UpdateSecondaryVideoContainer() = 0; + + // Init tasks which should be done on the task queue. + virtual void InitializationTask(MediaDecoder* aDecoder); + + virtual RefPtr Shutdown() = 0; + + virtual RefPtr Seek(const SeekTarget& aTarget) = 0; + + void DecodeError(const MediaResult& aError); + + // Functions used by assertions to ensure we're calling things + // on the appropriate threads. + bool OnTaskQueue() const; + + bool IsRequestingAudioData() const { return mAudioDataRequest.Exists(); } + bool IsRequestingVideoData() const { return mVideoDataRequest.Exists(); } + bool IsWaitingAudioData() const { return mAudioWaitRequest.Exists(); } + bool IsWaitingVideoData() const { return mVideoWaitRequest.Exists(); } + + void* const mDecoderID; + const RefPtr mAbstractMainThread; + const RefPtr mFrameStats; + const RefPtr mVideoFrameContainer; + const RefPtr mTaskQueue; + const RefPtr mReader; + mozilla::MediaMetadataManager mMetadataManager; + + // Playback rate. 1.0 : normal speed, 0.5 : two times slower. + double mPlaybackRate; + + // Event producers + MediaEventProducerExc, UniquePtr, + MediaDecoderEventVisibility> + mMetadataLoadedEvent; + MediaEventProducerExc, MediaDecoderEventVisibility> + mFirstFrameLoadedEvent; + MediaEventProducerExc> + mOnSecondaryVideoContainerInstalled; + MediaEventProducer mOnPlaybackEvent; + MediaEventProducer mOnPlaybackErrorEvent; + MediaEventProducer mOnDecoderDoctorEvent; + MediaEventProducer mOnNextFrameStatus; + + // The buffered range. Mirrored from the decoder thread. + Mirror mBuffered; + + // The current play state, mirrored from the main thread. + Mirror mPlayState; + + // Volume of playback. 0.0 = muted. 1.0 = full volume. + Mirror mVolume; + + // Pitch preservation for the playback rate. + Mirror mPreservesPitch; + + // Whether to seek back to the start of the media resource + // upon reaching the end. + Mirror mLooping; + + // Set if the decoder is sending video to a secondary container. While set we + // should not suspend the decoder. + Mirror> mSecondaryVideoContainer; + + // Duration of the media. This is guaranteed to be non-null after we finish + // decoding the first frame. + Canonical mDuration; + + // The time of the current frame, corresponding to the "current + // playback position" in HTML5. This is referenced from 0, which is the + // initial playback position. + Canonical mCurrentPosition; + + // Used to distinguish whether the audio is producing sound. + Canonical mIsAudioDataAudible; + + // Stores presentation info required for playback. + Maybe mInfo; + + // True if the media is seekable (i.e. supports random access). + bool mMediaSeekable = true; + + // True if the media is seekable only in buffered ranges. + bool mMediaSeekableOnlyInBufferedRanges = false; + + // True if we've decoded first frames (thus having the start time) and + // notified the FirstFrameLoaded event. Note we can't initiate seek until the + // start time is known which happens when the first frames are decoded or we + // are playing an MSE stream (the start time is always assumed 0). + bool mSentFirstFrameLoadedEvent = false; + + // True if we should not decode/preroll unnecessary samples, unless we're + // played. "Prerolling" in this context refers to when we decode and + // buffer decoded samples in advance of when they're needed for playback. + // This flag is set for preload=metadata media, and means we won't + // decode more than the first video frame and first block of audio samples + // for that media when we startup, or after a seek. When Play() is called, + // we reset this flag, as we assume the user is playing the media, so + // prerolling is appropriate then. This flag is used to reduce the overhead + // of prerolling samples for media elements that may not play, both + // memory and CPU overhead. + bool mMinimizePreroll; + + // Only one of a given pair of ({Audio,Video}DataPromise, WaitForDataPromise) + // should exist at any given moment. + using AudioDataPromise = MediaFormatReader::AudioDataPromise; + using VideoDataPromise = MediaFormatReader::VideoDataPromise; + using WaitForDataPromise = MediaFormatReader::WaitForDataPromise; + MozPromiseRequestHolder mAudioDataRequest; + MozPromiseRequestHolder mVideoDataRequest; + MozPromiseRequestHolder mAudioWaitRequest; + MozPromiseRequestHolder mVideoWaitRequest; + + private: + WatchManager mWatchManager; +}; + +} // namespace mozilla + +#endif // DOM_MEDIA_MEDIADECODERSTATEMACHINEBASE_H_ diff --git a/dom/media/MediaDeviceInfo.cpp b/dom/media/MediaDeviceInfo.cpp new file mode 100644 index 0000000000..4183da2865 --- /dev/null +++ b/dom/media/MediaDeviceInfo.cpp @@ -0,0 +1,42 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/MediaDeviceInfo.h" +#include "mozilla/dom/MediaStreamBinding.h" +#include "mozilla/MediaManager.h" +#include "nsIScriptGlobalObject.h" + +namespace mozilla::dom { + +MediaDeviceInfo::MediaDeviceInfo(const nsAString& aDeviceId, + MediaDeviceKind aKind, const nsAString& aLabel, + const nsAString& aGroupId) + : mKind(aKind), mDeviceId(aDeviceId), mLabel(aLabel), mGroupId(aGroupId) {} + +NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_0(MediaDeviceInfo) +NS_IMPL_CYCLE_COLLECTING_ADDREF(MediaDeviceInfo) +NS_IMPL_CYCLE_COLLECTING_RELEASE(MediaDeviceInfo) +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaDeviceInfo) + NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY + NS_INTERFACE_MAP_ENTRY(nsISupports) +NS_INTERFACE_MAP_END + +JSObject* MediaDeviceInfo::WrapObject(JSContext* aCx, + JS::Handle aGivenProto) { + return MediaDeviceInfo_Binding::Wrap(aCx, this, aGivenProto); +} + +nsISupports* MediaDeviceInfo::GetParentObject() { return nullptr; } + +void MediaDeviceInfo::GetDeviceId(nsString& retval) { retval = mDeviceId; } + +MediaDeviceKind MediaDeviceInfo::Kind() { return mKind; } + +void MediaDeviceInfo::GetGroupId(nsString& retval) { retval = mGroupId; } + +void MediaDeviceInfo::GetLabel(nsString& retval) { retval = mLabel; } + +MediaDeviceKind Kind(); + +} // namespace mozilla::dom diff --git a/dom/media/MediaDeviceInfo.h b/dom/media/MediaDeviceInfo.h new file mode 100644 index 0000000000..e68ffba9dc --- /dev/null +++ b/dom/media/MediaDeviceInfo.h @@ -0,0 +1,59 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_MediaDeviceInfo_h +#define mozilla_dom_MediaDeviceInfo_h + +#include "js/RootingAPI.h" +#include "mozilla/Assertions.h" +#include "mozilla/dom/MediaDeviceInfoBinding.h" +#include "nsCycleCollectionParticipant.h" +#include "nsID.h" +#include "nsISupports.h" +#include "nsStringFwd.h" +#include "nsWrapperCache.h" + +namespace mozilla::dom { + +#define MOZILLA_DOM_MEDIADEVICEINFO_IMPLEMENTATION_IID \ + { \ + 0x25091870, 0x84d6, 0x4acf, { \ + 0xaf, 0x97, 0x6e, 0xd5, 0x5b, 0xe0, 0x47, 0xb2 \ + } \ + } + +class MediaDeviceInfo final : public nsISupports, public nsWrapperCache { + public: + explicit MediaDeviceInfo(const nsAString& aDeviceId, MediaDeviceKind aKind, + const nsAString& aLabel, const nsAString& aGroupId); + + NS_DECL_CYCLE_COLLECTING_ISUPPORTS + NS_DECL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(MediaDeviceInfo) + NS_DECLARE_STATIC_IID_ACCESSOR(MOZILLA_DOM_MEDIADEVICEINFO_IMPLEMENTATION_IID) + + JSObject* WrapObject(JSContext* cx, + JS::Handle aGivenProto) override; + + nsISupports* GetParentObject(); + + void GetDeviceId(nsString& retval); + MediaDeviceKind Kind(); + void GetLabel(nsString& retval); + void GetGroupId(nsString& retval); + + private: + MediaDeviceKind mKind; + nsString mDeviceId; + nsString mLabel; + nsString mGroupId; + + virtual ~MediaDeviceInfo() = default; +}; + +NS_DEFINE_STATIC_IID_ACCESSOR(MediaDeviceInfo, + MOZILLA_DOM_MEDIADEVICEINFO_IMPLEMENTATION_IID) + +} // namespace mozilla::dom + +#endif // mozilla_dom_MediaDeviceInfo_h diff --git a/dom/media/MediaDevices.cpp b/dom/media/MediaDevices.cpp new file mode 100644 index 0000000000..cfbc148337 --- /dev/null +++ b/dom/media/MediaDevices.cpp @@ -0,0 +1,798 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/MediaDevices.h" + +#include "AudioDeviceInfo.h" +#include "MediaEngine.h" +#include "MediaEngineFake.h" +#include "mozilla/dom/BrowsingContext.h" +#include "mozilla/dom/Document.h" +#include "mozilla/dom/FeaturePolicyUtils.h" +#include "mozilla/dom/MediaStreamBinding.h" +#include "mozilla/dom/MediaDeviceInfo.h" +#include "mozilla/dom/MediaDevicesBinding.h" +#include "mozilla/dom/NavigatorBinding.h" +#include "mozilla/dom/Promise.h" +#include "mozilla/dom/WindowContext.h" +#include "mozilla/intl/Localization.h" +#include "mozilla/MediaManager.h" +#include "mozilla/StaticPrefs_media.h" +#include "MediaTrackConstraints.h" +#include "nsContentUtils.h" +#include "nsINamed.h" +#include "nsIScriptGlobalObject.h" +#include "nsPIDOMWindow.h" +#include "nsGlobalWindowInner.h" +#include "nsQueryObject.h" + +namespace mozilla::dom { + +using ConstDeviceSetPromise = MediaManager::ConstDeviceSetPromise; +using LocalDeviceSetPromise = MediaManager::LocalDeviceSetPromise; +using LocalMediaDeviceSetRefCnt = MediaManager::LocalMediaDeviceSetRefCnt; +using MediaDeviceSetRefCnt = MediaManager::MediaDeviceSetRefCnt; +using mozilla::intl::Localization; + +MediaDevices::MediaDevices(nsPIDOMWindowInner* aWindow) + : DOMEventTargetHelper(aWindow), mDefaultOutputLabel(VoidString()) {} + +MediaDevices::~MediaDevices() { + MOZ_ASSERT(NS_IsMainThread()); + mDeviceChangeListener.DisconnectIfExists(); +} + +already_AddRefed MediaDevices::GetUserMedia( + const MediaStreamConstraints& aConstraints, CallerType aCallerType, + ErrorResult& aRv) { + MOZ_ASSERT(NS_IsMainThread()); + // Get the relevant global for the promise from the wrapper cache because + // DOMEventTargetHelper::GetOwner() returns null if the document is unloaded. + // We know the wrapper exists because it is being used for |this| from JS. + // See https://github.com/heycam/webidl/issues/932 for why the relevant + // global is used instead of the current global. + nsCOMPtr global = xpc::NativeGlobal(GetWrapper()); + // global is a window because MediaDevices is exposed only to Window. + nsCOMPtr owner = do_QueryInterface(global); + if (Document* doc = owner->GetExtantDoc()) { + if (!owner->IsSecureContext()) { + doc->SetUseCounter(eUseCounter_custom_GetUserMediaInsec); + } + Document* topDoc = doc->GetTopLevelContentDocumentIfSameProcess(); + IgnoredErrorResult ignored; + if (topDoc && !topDoc->HasFocus(ignored)) { + doc->SetUseCounter(eUseCounter_custom_GetUserMediaUnfocused); + } + } + RefPtr p = Promise::Create(global, aRv); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + /* If requestedMediaTypes is the empty set, return a promise rejected with a + * TypeError. */ + if (!MediaManager::IsOn(aConstraints.mVideo) && + !MediaManager::IsOn(aConstraints.mAudio)) { + p->MaybeRejectWithTypeError("audio and/or video is required"); + return p.forget(); + } + /* If the relevant settings object's responsible document is NOT fully + * active, return a promise rejected with a DOMException object whose name + * attribute has the value "InvalidStateError". */ + if (!owner->IsFullyActive()) { + p->MaybeRejectWithInvalidStateError("The document is not fully active."); + return p.forget(); + } + const OwningBooleanOrMediaTrackConstraints& video = aConstraints.mVideo; + if (aCallerType != CallerType::System && video.IsMediaTrackConstraints()) { + const Optional& mediaSource = + video.GetAsMediaTrackConstraints().mMediaSource; + if (mediaSource.WasPassed() && + !mediaSource.Value().EqualsLiteral("camera")) { + WindowContext* wc = owner->GetWindowContext(); + if (!wc || !wc->HasValidTransientUserGestureActivation()) { + p->MaybeRejectWithInvalidStateError( + "Display capture requires transient activation " + "from a user gesture."); + return p.forget(); + } + } + } + RefPtr self(this); + GetUserMedia(owner, aConstraints, aCallerType) + ->Then( + GetCurrentSerialEventTarget(), __func__, + [this, self, p](RefPtr&& aStream) { + if (!GetWindowIfCurrent()) { + return; // Leave Promise pending after navigation by design. + } + p->MaybeResolve(std::move(aStream)); + }, + [this, self, p](const RefPtr& error) { + nsPIDOMWindowInner* window = GetWindowIfCurrent(); + if (!window) { + return; // Leave Promise pending after navigation by design. + } + error->Reject(p); + }); + return p.forget(); +} + +RefPtr MediaDevices::GetUserMedia( + nsPIDOMWindowInner* aWindow, const MediaStreamConstraints& aConstraints, + CallerType aCallerType) { + MOZ_ASSERT(NS_IsMainThread()); + bool haveFake = aConstraints.mFake.WasPassed() && aConstraints.mFake.Value(); + const OwningBooleanOrMediaTrackConstraints& video = aConstraints.mVideo; + const OwningBooleanOrMediaTrackConstraints& audio = aConstraints.mAudio; + bool isMicrophone = + !haveFake && + (audio.IsBoolean() + ? audio.GetAsBoolean() + : !audio.GetAsMediaTrackConstraints().mMediaSource.WasPassed()); + bool isCamera = + !haveFake && + (video.IsBoolean() + ? video.GetAsBoolean() + : !video.GetAsMediaTrackConstraints().mMediaSource.WasPassed()); + + RefPtr self(this); + return MediaManager::Get() + ->GetUserMedia(aWindow, aConstraints, aCallerType) + ->Then( + GetCurrentSerialEventTarget(), __func__, + [this, self, isMicrophone, + isCamera](RefPtr&& aStream) { + if (isMicrophone) { + mCanExposeMicrophoneInfo = true; + } + if (isCamera) { + mCanExposeCameraInfo = true; + } + return StreamPromise::CreateAndResolve(std::move(aStream), + __func__); + }, + [](RefPtr&& aError) { + return StreamPromise::CreateAndReject(std::move(aError), __func__); + }); +} + +already_AddRefed MediaDevices::EnumerateDevices(ErrorResult& aRv) { + MOZ_ASSERT(NS_IsMainThread()); + nsCOMPtr global = xpc::NativeGlobal(GetWrapper()); + nsCOMPtr owner = do_QueryInterface(global); + if (Document* doc = owner->GetExtantDoc()) { + if (!owner->IsSecureContext()) { + doc->SetUseCounter(eUseCounter_custom_EnumerateDevicesInsec); + } + Document* topDoc = doc->GetTopLevelContentDocumentIfSameProcess(); + IgnoredErrorResult ignored; + if (topDoc && !topDoc->HasFocus(ignored)) { + doc->SetUseCounter(eUseCounter_custom_EnumerateDevicesUnfocused); + } + } + RefPtr p = Promise::Create(global, aRv); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + mPendingEnumerateDevicesPromises.AppendElement(p); + MaybeResumeDeviceExposure(); + return p.forget(); +} + +void MediaDevices::MaybeResumeDeviceExposure() { + if (mPendingEnumerateDevicesPromises.IsEmpty() && + !mHaveUnprocessedDeviceListChange) { + return; + } + nsPIDOMWindowInner* window = GetOwner(); + if (!window || !window->IsFullyActive()) { + return; + } + if (!StaticPrefs::media_devices_unfocused_enabled()) { + // Device list changes are not exposed to unfocused contexts because the + // timing information would allow fingerprinting for content to identify + // concurrent browsing, even when pages are in different containers. + BrowsingContext* bc = window->GetBrowsingContext(); + if (!bc->IsActive() || // background tab or browser window fully obscured + !bc->GetIsActiveBrowserWindow()) { // browser window without focus + return; + } + } + MediaManager::Get()->GetPhysicalDevices()->Then( + GetCurrentSerialEventTarget(), __func__, + [self = RefPtr(this), this, + haveDeviceListChange = mHaveUnprocessedDeviceListChange, + enumerateDevicesPromises = std::move(mPendingEnumerateDevicesPromises)]( + RefPtr aAllDevices) mutable { + RefPtr exposedDevices = + FilterExposedDevices(*aAllDevices); + if (haveDeviceListChange) { + if (ShouldQueueDeviceChange(*exposedDevices)) { + NS_DispatchToCurrentThread(NS_NewRunnableFunction( + "devicechange", [self = RefPtr(this), this] { + DispatchTrustedEvent(u"devicechange"_ns); + })); + } + mLastPhysicalDevices = std::move(aAllDevices); + } + if (!enumerateDevicesPromises.IsEmpty()) { + ResumeEnumerateDevices(std::move(enumerateDevicesPromises), + std::move(exposedDevices)); + } + }, + [](RefPtr&&) { + MOZ_ASSERT_UNREACHABLE("GetPhysicalDevices does not reject"); + }); + mHaveUnprocessedDeviceListChange = false; +} + +RefPtr MediaDevices::FilterExposedDevices( + const MediaDeviceSet& aDevices) const { + nsPIDOMWindowInner* window = GetOwner(); + RefPtr exposed = new MediaDeviceSetRefCnt(); + if (!window) { + return exposed; // Promises will be left pending + } + Document* doc = window->GetExtantDoc(); + if (!doc) { + return exposed; + } + // Only expose devices which are allowed to use: + // https://w3c.github.io/mediacapture-main/#dom-mediadevices-enumeratedevices + bool dropMics = !FeaturePolicyUtils::IsFeatureAllowed(doc, u"microphone"_ns); + bool dropCams = !FeaturePolicyUtils::IsFeatureAllowed(doc, u"camera"_ns); + bool dropSpeakers = + !Preferences::GetBool("media.setsinkid.enabled") || + !FeaturePolicyUtils::IsFeatureAllowed(doc, u"speaker-selection"_ns); + + if (doc->ShouldResistFingerprinting(RFPTarget::MediaDevices)) { + RefPtr fakeEngine = new MediaEngineFake(); + fakeEngine->EnumerateDevices(MediaSourceEnum::Microphone, + MediaSinkEnum::Other, exposed); + fakeEngine->EnumerateDevices(MediaSourceEnum::Camera, MediaSinkEnum::Other, + exposed); + dropMics = dropCams = true; + // Speakers are not handled specially with resistFingerprinting because + // they are exposed only when explicitly and individually allowed by the + // user. + } + bool legacy = StaticPrefs::media_devices_enumerate_legacy_enabled(); + bool outputIsDefault = true; // First output is the default. + bool haveDefaultOutput = false; + nsTHashSet exposedMicrophoneGroupIds; + for (const auto& device : aDevices) { + switch (device->mKind) { + case MediaDeviceKind::Audioinput: + if (dropMics) { + continue; + } + if (mCanExposeMicrophoneInfo) { + exposedMicrophoneGroupIds.Insert(device->mRawGroupID); + } + if (!mCanExposeMicrophoneInfo && !legacy) { + dropMics = true; + } + break; + case MediaDeviceKind::Videoinput: + if (dropCams) { + continue; + } + if (!mCanExposeCameraInfo && !legacy) { + dropCams = true; + } + break; + case MediaDeviceKind::Audiooutput: + if (dropSpeakers || + (!mExplicitlyGrantedAudioOutputRawIds.Contains(device->mRawID) && + // Assumes aDevices order has microphones before speakers. + !exposedMicrophoneGroupIds.Contains(device->mRawGroupID))) { + outputIsDefault = false; + continue; + } + if (!haveDefaultOutput && !outputIsDefault) { + // Insert a virtual default device so that the first enumerated + // device is the default output. + if (mDefaultOutputLabel.IsVoid()) { + mDefaultOutputLabel.SetIsVoid(false); + AutoTArray resourceIds{"dom/media.ftl"_ns}; + RefPtr l10n = Localization::Create(resourceIds, /*sync*/ true); + nsAutoCString translation; + IgnoredErrorResult rv; + l10n->FormatValueSync("default-audio-output-device-label"_ns, {}, + translation, rv); + if (!rv.Failed()) { + AppendUTF8toUTF16(translation, mDefaultOutputLabel); + } + } + RefPtr info = new AudioDeviceInfo( + nullptr, mDefaultOutputLabel, u""_ns, u""_ns, + CUBEB_DEVICE_TYPE_OUTPUT, CUBEB_DEVICE_STATE_ENABLED, + CUBEB_DEVICE_PREF_ALL, CUBEB_DEVICE_FMT_ALL, + CUBEB_DEVICE_FMT_S16NE, 2, 44100, 44100, 44100, 128, 128); + exposed->AppendElement( + new MediaDevice(new MediaEngineFake(), info, u""_ns)); + } + haveDefaultOutput = true; + break; + case MediaDeviceKind::EndGuard_: + continue; + // Avoid `default:` so that `-Wswitch` catches missing + // enumerators at compile time. + } + exposed->AppendElement(device); + } + return exposed; +} + +bool MediaDevices::CanExposeInfo(MediaDeviceKind aKind) const { + switch (aKind) { + case MediaDeviceKind::Audioinput: + return mCanExposeMicrophoneInfo; + case MediaDeviceKind::Videoinput: + return mCanExposeCameraInfo; + case MediaDeviceKind::Audiooutput: + // Assumes caller has used FilterExposedDevices() + return true; + case MediaDeviceKind::EndGuard_: + break; + // Avoid `default:` so that `-Wswitch` catches missing enumerators at + // compile time. + } + MOZ_ASSERT_UNREACHABLE("unexpected MediaDeviceKind"); + return false; +} + +bool MediaDevices::ShouldQueueDeviceChange( + const MediaDeviceSet& aExposedDevices) const { + if (!mLastPhysicalDevices) { // SetupDeviceChangeListener not complete + return false; + } + RefPtr lastExposedDevices = + FilterExposedDevices(*mLastPhysicalDevices); + auto exposed = aExposedDevices.begin(); + auto exposedEnd = aExposedDevices.end(); + auto last = lastExposedDevices->begin(); + auto lastEnd = lastExposedDevices->end(); + // Lists from FilterExposedDevices may have multiple devices of the same + // kind even when only a single anonymous device of that kind should be + // exposed by enumerateDevices() (but multiple devices are currently exposed + // - bug 1528042). "devicechange" events are not queued when the number + // of such devices changes but remains non-zero. + while (exposed < exposedEnd && last < lastEnd) { + // First determine whether there is at least one device of the same kind + // in both `aExposedDevices` and `lastExposedDevices`. + // A change between zero and non-zero numbers of microphone or camera + // devices triggers a devicechange event even if that kind of device is + // not yet exposed. + MediaDeviceKind kind = (*exposed)->mKind; + if (kind != (*last)->mKind) { + return true; + } + // `exposed` and `last` have matching kind. + if (CanExposeInfo(kind)) { + // Queue "devicechange" if there has been any change in devices of this + // exposed kind. ID and kind uniquely identify a device. + if ((*exposed)->mRawID != (*last)->mRawID) { + return true; + } + ++exposed; + ++last; + continue; + } + // `aExposedDevices` and `lastExposedDevices` both have non-zero numbers + // of devices of this unexposed kind. + // Skip remaining devices of this kind because all devices of this kind + // should be exposed as a single anonymous device. + do { + ++exposed; + } while (exposed != exposedEnd && (*exposed)->mKind == kind); + do { + ++last; + } while (last != lastEnd && (*last)->mKind == kind); + } + // Queue "devicechange" if the number of exposed devices differs. + return exposed < exposedEnd || last < lastEnd; +} + +void MediaDevices::ResumeEnumerateDevices( + nsTArray>&& aPromises, + RefPtr aExposedDevices) const { + nsCOMPtr window = GetOwner(); + if (!window) { + return; // Leave Promise pending after navigation by design. + } + MediaManager::Get() + ->AnonymizeDevices(window, std::move(aExposedDevices)) + ->Then(GetCurrentSerialEventTarget(), __func__, + [self = RefPtr(this), this, promises = std::move(aPromises)]( + const LocalDeviceSetPromise::ResolveOrRejectValue& + aLocalDevices) { + nsPIDOMWindowInner* window = GetWindowIfCurrent(); + if (!window) { + return; // Leave Promises pending after navigation by design. + } + for (const RefPtr& promise : promises) { + if (aLocalDevices.IsReject()) { + aLocalDevices.RejectValue()->Reject(promise); + } else { + ResolveEnumerateDevicesPromise( + promise, *aLocalDevices.ResolveValue()); + } + } + }); +} + +void MediaDevices::ResolveEnumerateDevicesPromise( + Promise* aPromise, const LocalMediaDeviceSet& aDevices) const { + nsCOMPtr window = GetOwner(); + auto windowId = window->WindowID(); + nsTArray> infos; + bool legacy = StaticPrefs::media_devices_enumerate_legacy_enabled(); + bool capturePermitted = + legacy && + MediaManager::Get()->IsActivelyCapturingOrHasAPermission(windowId); + + for (const RefPtr& device : aDevices) { + bool exposeInfo = CanExposeInfo(device->Kind()) || legacy; + bool exposeLabel = legacy ? capturePermitted : exposeInfo; + infos.AppendElement(MakeRefPtr( + exposeInfo ? device->mID : u""_ns, device->Kind(), + exposeLabel ? device->mName : u""_ns, + exposeInfo ? device->mGroupID : u""_ns)); + } + aPromise->MaybeResolve(std::move(infos)); +} + +already_AddRefed MediaDevices::GetDisplayMedia( + const DisplayMediaStreamConstraints& aConstraints, CallerType aCallerType, + ErrorResult& aRv) { + nsCOMPtr global = xpc::NativeGlobal(GetWrapper()); + RefPtr p = Promise::Create(global, aRv); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + nsCOMPtr owner = do_QueryInterface(global); + /* If the relevant global object of this does not have transient activation, + * return a promise rejected with a DOMException object whose name attribute + * has the value InvalidStateError. */ + WindowContext* wc = owner->GetWindowContext(); + if (!wc || !wc->HasValidTransientUserGestureActivation()) { + p->MaybeRejectWithInvalidStateError( + "getDisplayMedia requires transient activation from a user gesture."); + return p.forget(); + } + /* If constraints.video is false, return a promise rejected with a newly + * created TypeError. */ + if (!MediaManager::IsOn(aConstraints.mVideo)) { + p->MaybeRejectWithTypeError("video is required"); + return p.forget(); + } + MediaStreamConstraints c; + auto& vc = c.mVideo.SetAsMediaTrackConstraints(); + + if (aConstraints.mVideo.IsMediaTrackConstraints()) { + vc = aConstraints.mVideo.GetAsMediaTrackConstraints(); + /* If CS contains a member named advanced, return a promise rejected with + * a newly created TypeError. */ + if (vc.mAdvanced.WasPassed()) { + p->MaybeRejectWithTypeError("advanced not allowed"); + return p.forget(); + } + auto getCLR = [](const auto& aCon) -> const ConstrainLongRange& { + static ConstrainLongRange empty; + return (aCon.WasPassed() && !aCon.Value().IsLong()) + ? aCon.Value().GetAsConstrainLongRange() + : empty; + }; + auto getCDR = [](auto&& aCon) -> const ConstrainDoubleRange& { + static ConstrainDoubleRange empty; + return (aCon.WasPassed() && !aCon.Value().IsDouble()) + ? aCon.Value().GetAsConstrainDoubleRange() + : empty; + }; + const auto& w = getCLR(vc.mWidth); + const auto& h = getCLR(vc.mHeight); + const auto& f = getCDR(vc.mFrameRate); + /* If CS contains a member whose name specifies a constrainable property + * applicable to display surfaces, and whose value in turn is a dictionary + * containing a member named either min or exact, return a promise + * rejected with a newly created TypeError. */ + if (w.mMin.WasPassed() || h.mMin.WasPassed() || f.mMin.WasPassed()) { + p->MaybeRejectWithTypeError("min not allowed"); + return p.forget(); + } + if (w.mExact.WasPassed() || h.mExact.WasPassed() || f.mExact.WasPassed()) { + p->MaybeRejectWithTypeError("exact not allowed"); + return p.forget(); + } + /* If CS contains a member whose name, failedConstraint specifies a + * constrainable property, constraint, applicable to display surfaces, and + * whose value in turn is a dictionary containing a member named max, and + * that member's value in turn is less than the constrainable property's + * floor value, then let failedConstraint be the name of the constraint, + * let message be either undefined or an informative human-readable + * message, and return a promise rejected with a new OverconstrainedError + * created by calling OverconstrainedError(failedConstraint, message). */ + // We fail early without incurring a prompt, on known-to-fail constraint + // values that don't reveal anything about the user's system. + const char* badConstraint = nullptr; + if (w.mMax.WasPassed() && w.mMax.Value() < 1) { + badConstraint = "width"; + } + if (h.mMax.WasPassed() && h.mMax.Value() < 1) { + badConstraint = "height"; + } + if (f.mMax.WasPassed() && f.mMax.Value() < 1) { + badConstraint = "frameRate"; + } + if (badConstraint) { + p->MaybeReject(MakeRefPtr( + owner, *MakeRefPtr( + MediaMgrError::Name::OverconstrainedError, "", + NS_ConvertASCIItoUTF16(badConstraint)))); + return p.forget(); + } + } + /* If the relevant settings object's responsible document is NOT fully + * active, return a promise rejected with a DOMException object whose name + * attribute has the value "InvalidStateError". */ + if (!owner->IsFullyActive()) { + p->MaybeRejectWithInvalidStateError("The document is not fully active."); + return p.forget(); + } + // We ask for "screen" sharing. + // + // If this is a privileged call or permission is disabled, this gives us full + // screen sharing by default, which is useful for internal testing. + // + // If this is a non-priviliged call, GetUserMedia() will change it to "window" + // for us. + vc.mMediaSource.Reset(); + vc.mMediaSource.Construct().AssignASCII( + dom::MediaSourceEnumValues::GetString(MediaSourceEnum::Screen)); + + RefPtr self(this); + MediaManager::Get() + ->GetUserMedia(owner, c, aCallerType) + ->Then( + GetCurrentSerialEventTarget(), __func__, + [this, self, p](RefPtr&& aStream) { + if (!GetWindowIfCurrent()) { + return; // leave promise pending after navigation. + } + p->MaybeResolve(std::move(aStream)); + }, + [this, self, p](RefPtr&& error) { + nsPIDOMWindowInner* window = GetWindowIfCurrent(); + if (!window) { + return; // leave promise pending after navigation. + } + error->Reject(p); + }); + return p.forget(); +} + +already_AddRefed MediaDevices::SelectAudioOutput( + const AudioOutputOptions& aOptions, CallerType aCallerType, + ErrorResult& aRv) { + nsCOMPtr global = xpc::NativeGlobal(GetWrapper()); + RefPtr p = Promise::Create(global, aRv); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + /* (This includes the expected user activation update of + * https://github.com/w3c/mediacapture-output/issues/107) + * If the relevant global object of this does not have transient activation, + * return a promise rejected with a DOMException object whose name attribute + * has the value InvalidStateError. */ + nsCOMPtr owner = do_QueryInterface(global); + WindowContext* wc = owner->GetWindowContext(); + if (!wc || !wc->HasValidTransientUserGestureActivation()) { + p->MaybeRejectWithInvalidStateError( + "selectAudioOutput requires transient user activation."); + return p.forget(); + } + RefPtr self(this); + MediaManager::Get() + ->SelectAudioOutput(owner, aOptions, aCallerType) + ->Then( + GetCurrentSerialEventTarget(), __func__, + [this, self, p](RefPtr aDevice) { + nsPIDOMWindowInner* window = GetWindowIfCurrent(); + if (!window) { + return; // Leave Promise pending after navigation by design. + } + MOZ_ASSERT(aDevice->Kind() == dom::MediaDeviceKind::Audiooutput); + mExplicitlyGrantedAudioOutputRawIds.Insert(aDevice->RawID()); + p->MaybeResolve( + MakeRefPtr(aDevice->mID, aDevice->Kind(), + aDevice->mName, aDevice->mGroupID)); + }, + [this, self, p](const RefPtr& error) { + nsPIDOMWindowInner* window = GetWindowIfCurrent(); + if (!window) { + return; // Leave Promise pending after navigation by design. + } + error->Reject(p); + }); + return p.forget(); +} + +static RefPtr CopyWithNullDeviceId( + AudioDeviceInfo* aDeviceInfo) { + MOZ_ASSERT(aDeviceInfo->Preferred()); + + nsString vendor; + aDeviceInfo->GetVendor(vendor); + uint16_t type; + aDeviceInfo->GetType(&type); + uint16_t state; + aDeviceInfo->GetState(&state); + uint16_t pref; + aDeviceInfo->GetPreferred(&pref); + uint16_t supportedFormat; + aDeviceInfo->GetSupportedFormat(&supportedFormat); + uint16_t defaultFormat; + aDeviceInfo->GetDefaultFormat(&defaultFormat); + uint32_t maxChannels; + aDeviceInfo->GetMaxChannels(&maxChannels); + uint32_t defaultRate; + aDeviceInfo->GetDefaultRate(&defaultRate); + uint32_t maxRate; + aDeviceInfo->GetMaxRate(&maxRate); + uint32_t minRate; + aDeviceInfo->GetMinRate(&minRate); + uint32_t maxLatency; + aDeviceInfo->GetMaxLatency(&maxLatency); + uint32_t minLatency; + aDeviceInfo->GetMinLatency(&minLatency); + + return MakeRefPtr( + nullptr, aDeviceInfo->Name(), aDeviceInfo->GroupID(), vendor, type, state, + pref, supportedFormat, defaultFormat, maxChannels, defaultRate, maxRate, + minRate, maxLatency, minLatency); +} + +RefPtr MediaDevices::GetSinkDevice( + const nsString& aDeviceId) { + MOZ_ASSERT(NS_IsMainThread()); + return MediaManager::Get() + ->GetPhysicalDevices() + ->Then( + GetCurrentSerialEventTarget(), __func__, + [self = RefPtr(this), this, + aDeviceId](RefPtr aRawDevices) { + nsCOMPtr window = GetOwner(); + if (!window) { + return LocalDeviceSetPromise::CreateAndReject( + new MediaMgrError(MediaMgrError::Name::AbortError), __func__); + } + // Don't filter if matching the preferred device, because that may + // not be exposed. + RefPtr devices = aDeviceId.IsEmpty() + ? std::move(aRawDevices) + : FilterExposedDevices(*aRawDevices); + return MediaManager::Get()->AnonymizeDevices(window, + std::move(devices)); + }, + [](RefPtr&& reason) { + MOZ_ASSERT_UNREACHABLE("GetPhysicalDevices does not reject"); + return RefPtr(); + }) + ->Then( + GetCurrentSerialEventTarget(), __func__, + [aDeviceId](RefPtr aDevices) { + RefPtr outputInfo; + // Check for a matching device. + for (const RefPtr& device : *aDevices) { + if (device->Kind() != dom::MediaDeviceKind::Audiooutput) { + continue; + } + if (aDeviceId.IsEmpty()) { + MOZ_ASSERT(device->GetAudioDeviceInfo()->Preferred(), + "First Audiooutput should be preferred"); + return SinkInfoPromise::CreateAndResolve( + CopyWithNullDeviceId(device->GetAudioDeviceInfo()), + __func__); + } else if (aDeviceId.Equals(device->mID)) { + return SinkInfoPromise::CreateAndResolve( + device->GetAudioDeviceInfo(), __func__); + } + } + /* If sinkId is not the empty string and does not match any audio + * output device identified by the result that would be provided + * by enumerateDevices(), reject p with a new DOMException whose + * name is NotFoundError and abort these substeps. */ + return SinkInfoPromise::CreateAndReject(NS_ERROR_NOT_AVAILABLE, + __func__); + }, + // aRejectMethod = + [](RefPtr&& aError) { + return SinkInfoPromise::CreateAndReject(NS_ERROR_NOT_AVAILABLE, + __func__); + }); +} + +NS_IMPL_ISUPPORTS_CYCLE_COLLECTION_INHERITED_0(MediaDevices, + DOMEventTargetHelper) +NS_IMPL_CYCLE_COLLECTION_INHERITED(MediaDevices, DOMEventTargetHelper, + mPendingEnumerateDevicesPromises) + +void MediaDevices::OnDeviceChange() { + MOZ_ASSERT(NS_IsMainThread()); + if (NS_FAILED(CheckCurrentGlobalCorrectness())) { + // This is a ghost window, don't do anything. + return; + } + + // Do not fire event to content script when + // privacy.resistFingerprinting is true. + + if (nsContentUtils::ShouldResistFingerprinting( + "Guarding the more expensive RFP check with a simple one", + RFPTarget::MediaDevices)) { + nsCOMPtr window = GetOwner(); + auto* wrapper = GetWrapper(); + if (!window && wrapper) { + nsCOMPtr global = xpc::NativeGlobal(wrapper); + window = do_QueryInterface(global); + } + if (!window) { + return; + } + + if (nsGlobalWindowInner::Cast(window)->ShouldResistFingerprinting( + RFPTarget::MediaDevices)) { + return; + } + } + + mHaveUnprocessedDeviceListChange = true; + MaybeResumeDeviceExposure(); +} + +mozilla::dom::EventHandlerNonNull* MediaDevices::GetOndevicechange() { + return GetEventHandler(nsGkAtoms::ondevicechange); +} + +void MediaDevices::SetupDeviceChangeListener() { + if (mIsDeviceChangeListenerSetUp) { + return; + } + + nsPIDOMWindowInner* window = GetOwner(); + if (!window) { + return; + } + + mDeviceChangeListener = MediaManager::Get()->DeviceListChangeEvent().Connect( + GetMainThreadSerialEventTarget(), this, &MediaDevices::OnDeviceChange); + mIsDeviceChangeListenerSetUp = true; + + MediaManager::Get()->GetPhysicalDevices()->Then( + GetCurrentSerialEventTarget(), __func__, + [self = RefPtr(this), this](RefPtr aDevices) { + mLastPhysicalDevices = std::move(aDevices); + }, + [](RefPtr&& reason) { + MOZ_ASSERT_UNREACHABLE("GetPhysicalDevices does not reject"); + }); +} + +void MediaDevices::SetOndevicechange( + mozilla::dom::EventHandlerNonNull* aCallback) { + SetEventHandler(nsGkAtoms::ondevicechange, aCallback); +} + +void MediaDevices::EventListenerAdded(nsAtom* aType) { + DOMEventTargetHelper::EventListenerAdded(aType); + SetupDeviceChangeListener(); +} + +JSObject* MediaDevices::WrapObject(JSContext* aCx, + JS::Handle aGivenProto) { + return MediaDevices_Binding::Wrap(aCx, this, aGivenProto); +} + +} // namespace mozilla::dom diff --git a/dom/media/MediaDevices.h b/dom/media/MediaDevices.h new file mode 100644 index 0000000000..5390a583b3 --- /dev/null +++ b/dom/media/MediaDevices.h @@ -0,0 +1,140 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_MediaDevices_h +#define mozilla_dom_MediaDevices_h + +#include "MediaEventSource.h" +#include "js/RootingAPI.h" +#include "mozilla/AlreadyAddRefed.h" +#include "mozilla/DOMEventTargetHelper.h" +#include "mozilla/UseCounter.h" +#include "mozilla/dom/BindingDeclarations.h" +#include "mozilla/dom/MediaDeviceInfoBinding.h" +#include "nsCOMPtr.h" +#include "nsID.h" +#include "nsISupports.h" +#include "nsTHashSet.h" + +class AudioDeviceInfo; + +namespace mozilla { + +class LocalMediaDevice; +class MediaDevice; +class MediaMgrError; +class DOMMediaStream; +template +class MozPromise; + +namespace media { +template +class Refcountable; +} // namespace media + +namespace dom { + +class Promise; +struct MediaStreamConstraints; +struct DisplayMediaStreamConstraints; +struct MediaTrackSupportedConstraints; +struct AudioOutputOptions; + +class MediaDevices final : public DOMEventTargetHelper { + public: + using StreamPromise = + MozPromise, RefPtr, true>; + using SinkInfoPromise = MozPromise, nsresult, true>; + + explicit MediaDevices(nsPIDOMWindowInner* aWindow); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaDevices, DOMEventTargetHelper) + + JSObject* WrapObject(JSContext* cx, + JS::Handle aGivenProto) override; + + // No code needed, as MediaTrackSupportedConstraints members default to true. + void GetSupportedConstraints(MediaTrackSupportedConstraints& aResult){}; + + already_AddRefed GetUserMedia( + const MediaStreamConstraints& aConstraints, CallerType aCallerType, + ErrorResult& aRv); + + RefPtr GetUserMedia(nsPIDOMWindowInner* aWindow, + const MediaStreamConstraints& aConstraints, + CallerType aCallerType); + + already_AddRefed EnumerateDevices(ErrorResult& aRv); + + already_AddRefed GetDisplayMedia( + const DisplayMediaStreamConstraints& aConstraints, CallerType aCallerType, + ErrorResult& aRv); + + already_AddRefed SelectAudioOutput( + const AudioOutputOptions& aOptions, CallerType aCallerType, + ErrorResult& aRv); + + // Get the sink that corresponds to the given device id. + // The returned promise will be resolved with the device + // information if the aDeviceId matches a device that would be exposed by + // enumerateDevices(). + // The promise will be rejected with NS_ERROR_NOT_AVAILABLE if aDeviceId + // does not match any exposed device. + RefPtr GetSinkDevice(const nsString& aDeviceId); + + // Called when MediaManager encountered a change in its device lists. + void OnDeviceChange(); + + void SetupDeviceChangeListener(); + + mozilla::dom::EventHandlerNonNull* GetOndevicechange(); + void SetOndevicechange(mozilla::dom::EventHandlerNonNull* aCallback); + + void EventListenerAdded(nsAtom* aType) override; + using DOMEventTargetHelper::EventListenerAdded; + + void BackgroundStateChanged() { MaybeResumeDeviceExposure(); } + void WindowResumed() { MaybeResumeDeviceExposure(); } + void BrowserWindowBecameActive() { MaybeResumeDeviceExposure(); } + + private: + using MediaDeviceSet = nsTArray>; + using MediaDeviceSetRefCnt = media::Refcountable; + using LocalMediaDeviceSet = nsTArray>; + + virtual ~MediaDevices(); + void MaybeResumeDeviceExposure(); + void ResumeEnumerateDevices( + nsTArray>&& aPromises, + RefPtr aExposedDevices) const; + RefPtr FilterExposedDevices( + const MediaDeviceSet& aDevices) const; + bool CanExposeInfo(MediaDeviceKind aKind) const; + bool ShouldQueueDeviceChange(const MediaDeviceSet& aExposedDevices) const; + void ResolveEnumerateDevicesPromise( + Promise* aPromise, const LocalMediaDeviceSet& aDevices) const; + + nsTHashSet mExplicitlyGrantedAudioOutputRawIds; + nsTArray> mPendingEnumerateDevicesPromises; + // Set only once, if and when required. + mutable nsString mDefaultOutputLabel; + + // Connect/Disconnect on main thread only + MediaEventListener mDeviceChangeListener; + // Ordered set of the system physical devices when devicechange event + // decisions were last performed. + RefPtr mLastPhysicalDevices; + bool mIsDeviceChangeListenerSetUp = false; + bool mHaveUnprocessedDeviceListChange = false; + bool mCanExposeMicrophoneInfo = false; + bool mCanExposeCameraInfo = false; + + void RecordAccessTelemetry(const UseCounter counter) const; +}; + +} // namespace dom +} // namespace mozilla + +#endif // mozilla_dom_MediaDevices_h diff --git a/dom/media/MediaEventSource.h b/dom/media/MediaEventSource.h new file mode 100644 index 0000000000..cb0a851ffe --- /dev/null +++ b/dom/media/MediaEventSource.h @@ -0,0 +1,594 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaEventSource_h_ +#define MediaEventSource_h_ + +#include +#include + +#include "mozilla/AbstractThread.h" +#include "mozilla/Atomics.h" +#include "mozilla/DataMutex.h" +#include "mozilla/Mutex.h" + +#include "mozilla/Unused.h" + +#include "nsISupportsImpl.h" +#include "nsTArray.h" +#include "nsThreadUtils.h" + +namespace mozilla { + +/** + * A thread-safe tool to communicate "revocation" across threads. It is used to + * disconnect a listener from the event source to prevent future notifications + * from coming. Revoke() can be called on any thread. However, it is recommended + * to be called on the target thread to avoid race condition. + * + * RevocableToken is not exposed to the client code directly. + * Use MediaEventListener below to do the job. + */ +class RevocableToken { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RevocableToken); + + public: + RevocableToken() = default; + + virtual void Revoke() = 0; + virtual bool IsRevoked() const = 0; + + protected: + // Virtual destructor is required since we might delete a Listener object + // through its base type pointer. + virtual ~RevocableToken() = default; +}; + +enum class ListenerPolicy : int8_t { + // Allow at most one listener. Move will be used when possible + // to pass the event data to save copy. + Exclusive, + // Allow multiple listeners. Event data will always be copied when passed + // to the listeners. + NonExclusive +}; + +namespace detail { + +/** + * Define how an event type is passed internally in MediaEventSource and to the + * listeners. Specialized for the void type to pass a dummy bool instead. + */ +template +struct EventTypeTraits { + typedef T ArgType; +}; + +template <> +struct EventTypeTraits { + typedef bool ArgType; +}; + +/** + * Test if a method function or lambda accepts one or more arguments. + */ +template +class TakeArgsHelper { + template + static std::false_type test(void (C::*)(), int); + template + static std::false_type test(void (C::*)() const, int); + template + static std::false_type test(void (C::*)() volatile, int); + template + static std::false_type test(void (C::*)() const volatile, int); + template + static std::false_type test(F&&, decltype(std::declval()(), 0)); + static std::true_type test(...); + + public: + typedef decltype(test(std::declval(), 0)) type; +}; + +template +struct TakeArgs : public TakeArgsHelper::type {}; + +template +struct EventTarget; + +template <> +struct EventTarget { + static void Dispatch(nsIEventTarget* aTarget, + already_AddRefed aTask) { + aTarget->Dispatch(std::move(aTask), NS_DISPATCH_NORMAL); + } + static bool IsOnTargetThread(nsIEventTarget* aTarget) { + bool rv; + aTarget->IsOnCurrentThread(&rv); + return rv; + } +}; + +template <> +struct EventTarget { + static void Dispatch(AbstractThread* aTarget, + already_AddRefed aTask) { + aTarget->Dispatch(std::move(aTask)); + } + static bool IsOnTargetThread(AbstractThread* aTarget) { + bool rv; + aTarget->IsOnCurrentThread(&rv); + return rv; + } +}; + +/** + * Encapsulate a raw pointer to be captured by a lambda without causing + * static-analysis errors. + */ +template +class RawPtr { + public: + explicit RawPtr(T* aPtr) : mPtr(aPtr) {} + T* get() const { return mPtr; } + + private: + T* const mPtr; +}; + +template +class Listener : public RevocableToken { + public: + template + void Dispatch(Ts&&... aEvents) { + if (CanTakeArgs()) { + DispatchTask(NewRunnableMethod&&...>( + "detail::Listener::ApplyWithArgs", this, &Listener::ApplyWithArgs, + std::forward(aEvents)...)); + } else { + DispatchTask(NewRunnableMethod("detail::Listener::ApplyWithNoArgs", this, + &Listener::ApplyWithNoArgs)); + } + } + + private: + virtual void DispatchTask(already_AddRefed aTask) = 0; + + // True if the underlying listener function takes non-zero arguments. + virtual bool CanTakeArgs() const = 0; + // Pass the event data to the underlying listener function. Should be called + // only when CanTakeArgs() returns true. + virtual void ApplyWithArgs(As&&... aEvents) = 0; + // Invoke the underlying listener function. Should be called only when + // CanTakeArgs() returns false. + virtual void ApplyWithNoArgs() = 0; +}; + +/** + * Store the registered target thread and function so it knows where and to + * whom to send the event data. + */ +template +class ListenerImpl : public Listener { + // Strip CV and reference from Function. + using FunctionStorage = std::decay_t; + using SelfType = ListenerImpl; + + public: + ListenerImpl(Target* aTarget, Function&& aFunction) + : mData(MakeRefPtr(aTarget, std::forward(aFunction)), + "MediaEvent ListenerImpl::mData") { + MOZ_DIAGNOSTIC_ASSERT(aTarget); + } + + protected: + virtual ~ListenerImpl() { + MOZ_ASSERT(IsRevoked(), "Must disconnect the listener."); + } + + private: + void DispatchTask(already_AddRefed aTask) override { + RefPtr data; + { + auto d = mData.Lock(); + data = *d; + } + if (NS_WARN_IF(!data)) { + // already_AddRefed doesn't allow releasing the ref, so transfer it first. + RefPtr temp(aTask); + return; + } + EventTarget::Dispatch(data->mTarget, std::move(aTask)); + } + + bool CanTakeArgs() const override { return TakeArgs::value; } + + // |F| takes one or more arguments. + template + std::enable_if_t::value, void> ApplyWithArgsImpl( + Target* aTarget, const F& aFunc, As&&... aEvents) { + MOZ_DIAGNOSTIC_ASSERT(EventTarget::IsOnTargetThread(aTarget)); + aFunc(std::move(aEvents)...); + } + + // |F| takes no arguments. + template + std::enable_if_t::value, void> ApplyWithArgsImpl( + Target* aTarget, const F& aFunc, As&&... aEvents) { + MOZ_CRASH("Call ApplyWithNoArgs instead."); + } + + void ApplyWithArgs(As&&... aEvents) override { + MOZ_RELEASE_ASSERT(TakeArgs::value); + // Don't call the listener if it is disconnected. + RefPtr data; + { + auto d = mData.Lock(); + data = *d; + } + if (!data) { + return; + } + MOZ_DIAGNOSTIC_ASSERT(EventTarget::IsOnTargetThread(data->mTarget)); + ApplyWithArgsImpl(data->mTarget, data->mFunction, std::move(aEvents)...); + } + + // |F| takes one or more arguments. + template + std::enable_if_t::value, void> ApplyWithNoArgsImpl( + Target* aTarget, const F& aFunc) { + MOZ_CRASH("Call ApplyWithArgs instead."); + } + + // |F| takes no arguments. + template + std::enable_if_t::value, void> ApplyWithNoArgsImpl( + Target* aTarget, const F& aFunc) { + MOZ_DIAGNOSTIC_ASSERT(EventTarget::IsOnTargetThread(aTarget)); + aFunc(); + } + + void ApplyWithNoArgs() override { + MOZ_RELEASE_ASSERT(!TakeArgs::value); + // Don't call the listener if it is disconnected. + RefPtr data; + { + auto d = mData.Lock(); + data = *d; + } + if (!data) { + return; + } + MOZ_DIAGNOSTIC_ASSERT(EventTarget::IsOnTargetThread(data->mTarget)); + ApplyWithNoArgsImpl(data->mTarget, data->mFunction); + } + + void Revoke() override { + { + auto data = mData.Lock(); + *data = nullptr; + } + } + + bool IsRevoked() const override { + auto data = mData.Lock(); + return !*data; + } + + struct RefCountedMediaEventListenerData { + // Keep ref-counting here since Data holds a template member, leading to + // instances of varying size, which the memory leak logging system dislikes. + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedMediaEventListenerData) + protected: + virtual ~RefCountedMediaEventListenerData() = default; + }; + struct Data : public RefCountedMediaEventListenerData { + Data(RefPtr aTarget, Function&& aFunction) + : mTarget(std::move(aTarget)), + mFunction(std::forward(aFunction)) {} + const RefPtr mTarget; + FunctionStorage mFunction; + }; + + // Storage for target and function. Also used to track revocation. + mutable DataMutex> mData; +}; + +/** + * Return true if any type is a reference type. + */ +template +struct IsAnyReference { + static const bool value = + std::is_reference_v || IsAnyReference::value; +}; + +template +struct IsAnyReference { + static const bool value = std::is_reference_v; +}; + +} // namespace detail + +template +class MediaEventSourceImpl; + +/** + * Not thread-safe since this is not meant to be shared and therefore only + * move constructor is provided. Used to hold the result of + * MediaEventSource::Connect() and call Disconnect() to disconnect the + * listener from an event source. + */ +class MediaEventListener { + template + friend class MediaEventSourceImpl; + + public: + MediaEventListener() = default; + + MediaEventListener(MediaEventListener&& aOther) + : mToken(std::move(aOther.mToken)) {} + + MediaEventListener& operator=(MediaEventListener&& aOther) { + MOZ_ASSERT(!mToken, "Must disconnect the listener."); + mToken = std::move(aOther.mToken); + return *this; + } + + ~MediaEventListener() { + MOZ_ASSERT(!mToken, "Must disconnect the listener."); + } + + void Disconnect() { + mToken->Revoke(); + mToken = nullptr; + } + + void DisconnectIfExists() { + if (mToken) { + Disconnect(); + } + } + + private: + // Avoid exposing RevocableToken directly to the client code so that + // listeners can be disconnected in a controlled manner. + explicit MediaEventListener(RevocableToken* aToken) : mToken(aToken) {} + RefPtr mToken; +}; + +/** + * A generic and thread-safe class to implement the observer pattern. + */ +template +class MediaEventSourceImpl { + static_assert(!detail::IsAnyReference::value, + "Ref-type not supported!"); + + template + using ArgType = typename detail::EventTypeTraits::ArgType; + + typedef detail::Listener...> Listener; + + template + using ListenerImpl = detail::ListenerImpl...>; + + template + using TakeArgs = detail::TakeArgs; + + void PruneListeners() { + mListeners.RemoveElementsBy( + [](const auto& listener) { return listener->IsRevoked(); }); + } + + template + MediaEventListener ConnectInternal(Target* aTarget, Function&& aFunction) { + MutexAutoLock lock(mMutex); + PruneListeners(); + MOZ_ASSERT(Lp == ListenerPolicy::NonExclusive || mListeners.IsEmpty()); + auto l = mListeners.AppendElement(); + *l = new ListenerImpl(aTarget, + std::forward(aFunction)); + return MediaEventListener(*l); + } + + // |Method| takes one or more arguments. + template + std::enable_if_t::value, MediaEventListener> ConnectInternal( + Target* aTarget, This* aThis, Method aMethod) { + detail::RawPtr thiz(aThis); + return ConnectInternal(aTarget, [=](ArgType&&... aEvents) { + (thiz.get()->*aMethod)(std::move(aEvents)...); + }); + } + + // |Method| takes no arguments. Don't bother passing the event data. + template + std::enable_if_t::value, MediaEventListener> + ConnectInternal(Target* aTarget, This* aThis, Method aMethod) { + detail::RawPtr thiz(aThis); + return ConnectInternal(aTarget, [=]() { (thiz.get()->*aMethod)(); }); + } + + public: + /** + * Register a function to receive notifications from the event source. + * + * @param aTarget The target thread on which the function will run. + * @param aFunction A function to be called on the target thread. The function + * parameter must be convertible from |EventType|. + * @return An object used to disconnect from the event source. + */ + template + MediaEventListener Connect(AbstractThread* aTarget, Function&& aFunction) { + return ConnectInternal(aTarget, std::forward(aFunction)); + } + + template + MediaEventListener Connect(nsIEventTarget* aTarget, Function&& aFunction) { + return ConnectInternal(aTarget, std::forward(aFunction)); + } + + /** + * As above. + * + * Note we deliberately keep a weak reference to |aThis| in order not to + * change its lifetime. This is because notifications are dispatched + * asynchronously and removing a listener doesn't always break the reference + * cycle for the pending event could still hold a reference to |aThis|. + * + * The caller must call MediaEventListener::Disconnect() to avoid dangling + * pointers. + */ + template + MediaEventListener Connect(AbstractThread* aTarget, This* aThis, + Method aMethod) { + return ConnectInternal(aTarget, aThis, aMethod); + } + + template + MediaEventListener Connect(nsIEventTarget* aTarget, This* aThis, + Method aMethod) { + return ConnectInternal(aTarget, aThis, aMethod); + } + + protected: + MediaEventSourceImpl() : mMutex("MediaEventSourceImpl::mMutex") {} + + template + void NotifyInternal(Ts&&... aEvents) { + MutexAutoLock lock(mMutex); + int32_t last = static_cast(mListeners.Length()) - 1; + for (int32_t i = last; i >= 0; --i) { + auto&& l = mListeners[i]; + // Remove disconnected listeners. + // It is not optimal but is simple and works well. + if (l->IsRevoked()) { + mListeners.RemoveElementAt(i); + continue; + } + l->Dispatch(std::forward(aEvents)...); + } + } + + private: + Mutex mMutex MOZ_UNANNOTATED; + nsTArray> mListeners; +}; + +template +using MediaEventSource = + MediaEventSourceImpl; + +template +using MediaEventSourceExc = + MediaEventSourceImpl; + +/** + * A class to separate the interface of event subject (MediaEventSource) + * and event publisher. Mostly used as a member variable to publish events + * to the listeners. + */ +template +class MediaEventProducer : public MediaEventSource { + public: + template + void Notify(Ts&&... aEvents) { + // Pass lvalues to prevent move in NonExclusive mode. + this->NotifyInternal(aEvents...); + } +}; + +/** + * Specialization for void type. A dummy bool is passed to NotifyInternal + * since there is no way to pass a void value. + */ +template <> +class MediaEventProducer : public MediaEventSource { + public: + void Notify() { this->NotifyInternal(true /* dummy */); } +}; + +/** + * A producer allowing at most one listener. + */ +template +class MediaEventProducerExc : public MediaEventSourceExc { + public: + template + void Notify(Ts&&... aEvents) { + this->NotifyInternal(std::forward(aEvents)...); + } +}; + +/** + * A class that facilitates forwarding MediaEvents from multiple sources of the + * same type into a single source. + * + * Lifetimes are convenient. A forwarded source is disconnected either by + * the source itself going away, or the forwarder being destroyed. + * + * Not threadsafe. The caller is responsible for calling Forward() in a + * threadsafe manner. + */ +template +class MediaEventForwarder : public MediaEventSource { + public: + template + using ArgType = typename detail::EventTypeTraits::ArgType; + + explicit MediaEventForwarder(nsCOMPtr aEventTarget) + : mEventTarget(std::move(aEventTarget)) {} + + MediaEventForwarder(MediaEventForwarder&& aOther) + : mEventTarget(aOther.mEventTarget), + mListeners(std::move(aOther.mListeners)) {} + + ~MediaEventForwarder() { MOZ_ASSERT(mListeners.IsEmpty()); } + + MediaEventForwarder& operator=(MediaEventForwarder&& aOther) { + MOZ_RELEASE_ASSERT(mEventTarget == aOther.mEventTarget); + MOZ_ASSERT(mListeners.IsEmpty()); + mListeners = std::move(aOther.mListeners); + } + + void Forward(MediaEventSource& aSource) { + // Forwarding a rawptr `this` here is fine, since DisconnectAll disconnect + // all mListeners synchronously and prevents this handler from running. + mListeners.AppendElement( + aSource.Connect(mEventTarget, [this](ArgType&&... aEvents) { + this->NotifyInternal(std::forward...>(aEvents)...); + })); + } + + template + void ForwardIf(MediaEventSource& aSource, Function&& aFunction) { + // Forwarding a rawptr `this` here is fine, since DisconnectAll disconnect + // all mListeners synchronously and prevents this handler from running. + mListeners.AppendElement(aSource.Connect( + mEventTarget, [this, func = aFunction](ArgType&&... aEvents) { + if (!func()) { + return; + } + this->NotifyInternal(std::forward...>(aEvents)...); + })); + } + + void DisconnectAll() { + for (auto& l : mListeners) { + l.Disconnect(); + } + mListeners.Clear(); + } + + private: + const nsCOMPtr mEventTarget; + nsTArray mListeners; +}; + +} // namespace mozilla + +#endif // MediaEventSource_h_ diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp new file mode 100644 index 0000000000..9553e67b00 --- /dev/null +++ b/dom/media/MediaFormatReader.cpp @@ -0,0 +1,3454 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaFormatReader.h" + +#include +#include +#include + +#include "AllocationPolicy.h" +#ifdef MOZ_AV1 +# include "AOMDecoder.h" +#endif +#include "DecoderBenchmark.h" +#include "MediaData.h" +#include "MediaDataDecoderProxy.h" +#include "MediaInfo.h" +#include "MP4Decoder.h" +#include "PDMFactory.h" +#include "PerformanceRecorder.h" +#include "VideoFrameContainer.h" +#include "VideoUtils.h" +#include "VPXDecoder.h" +#include "mozilla/AbstractThread.h" +#include "mozilla/CDMProxy.h" +#include "mozilla/ClearOnShutdown.h" +#include "mozilla/NotNull.h" +#include "mozilla/Preferences.h" +#include "mozilla/ProfilerLabels.h" +#include "mozilla/ProfilerMarkers.h" +#include "mozilla/SharedThreadPool.h" +#include "mozilla/StaticPrefs_media.h" +#include "mozilla/TaskQueue.h" +#include "mozilla/Unused.h" +#include "nsContentUtils.h" +#include "nsLiteralString.h" +#include "nsPrintfCString.h" +#include "nsTHashSet.h" + +using namespace mozilla::media; + +static mozilla::LazyLogModule sFormatDecoderLog("MediaFormatReader"); +mozilla::LazyLogModule gMediaDemuxerLog("MediaDemuxer"); + +#define LOG(arg, ...) \ + DDMOZ_LOG(sFormatDecoderLog, mozilla::LogLevel::Debug, "::%s: " arg, \ + __func__, ##__VA_ARGS__) +#define LOGV(arg, ...) \ + DDMOZ_LOG(sFormatDecoderLog, mozilla::LogLevel::Verbose, "::%s: " arg, \ + __func__, ##__VA_ARGS__) + +#define NS_DispatchToMainThread(...) CompileError_UseAbstractMainThreadInstead + +namespace mozilla { + +using MediaDataDecoderID = void*; + +/** + * This class tracks shutdown promises to ensure all decoders are shut down + * completely before MFR continues the rest of the shutdown procedure. + */ +class MediaFormatReader::ShutdownPromisePool { + public: + ShutdownPromisePool() + : mOnShutdownComplete(new ShutdownPromise::Private(__func__)) {} + + // Return a promise which will be resolved when all the tracking promises + // are resolved. Note no more promises should be added for tracking once + // this function is called. + RefPtr Shutdown(); + + // Track a shutdown promise. + void Track(const RefPtr& aPromise); + + // Shut down a decoder and track its shutdown promise. + void ShutdownDecoder(already_AddRefed aDecoder) { + Track(RefPtr(aDecoder)->Shutdown()); + } + + private: + bool mShutdown = false; + const RefPtr mOnShutdownComplete; + nsTHashSet> mPromises; +}; + +RefPtr MediaFormatReader::ShutdownPromisePool::Shutdown() { + MOZ_DIAGNOSTIC_ASSERT(!mShutdown); + mShutdown = true; + if (mPromises.Count() == 0) { + mOnShutdownComplete->Resolve(true, __func__); + } + return mOnShutdownComplete; +} + +void MediaFormatReader::ShutdownPromisePool::Track( + const RefPtr& aPromise) { + MOZ_DIAGNOSTIC_ASSERT(!mShutdown); + MOZ_DIAGNOSTIC_ASSERT(!mPromises.Contains(aPromise)); + mPromises.Insert(aPromise); + aPromise->Then(AbstractThread::GetCurrent(), __func__, [aPromise, this]() { + MOZ_DIAGNOSTIC_ASSERT(mPromises.Contains(aPromise)); + mPromises.Remove(aPromise); + if (mShutdown && mPromises.Count() == 0) { + mOnShutdownComplete->Resolve(true, __func__); + } + }); +} + +void MediaFormatReader::DecoderData::ShutdownDecoder() { + MOZ_ASSERT(mOwner->OnTaskQueue()); + + MutexAutoLock lock(mMutex); + + if (!mDecoder) { + // No decoder to shut down. + return; + } + + if (mFlushing) { + // Flush is is in action. Shutdown will be initiated after flush completes. + MOZ_DIAGNOSTIC_ASSERT(mShutdownPromise); + mOwner->mShutdownPromisePool->Track(mShutdownPromise->Ensure(__func__)); + // The order of decoder creation and shutdown is handled by LocalAllocPolicy + // and ShutdownPromisePool. MFR can now reset these members to a fresh state + // and be ready to create new decoders again without explicitly waiting for + // flush/shutdown to complete. + mShutdownPromise = nullptr; + mFlushing = false; + } else { + // No flush is in action. We can shut down the decoder now. + mOwner->mShutdownPromisePool->Track(mDecoder->Shutdown()); + } + + // mShutdownPromisePool will handle the order of decoder shutdown so + // we can forget mDecoder and be ready to create a new one. + mDecoder = nullptr; + mDescription = "shutdown"_ns; + mHasReportedVideoHardwareSupportTelemtry = false; + mOwner->ScheduleUpdate(mType == MediaData::Type::AUDIO_DATA + ? TrackType::kAudioTrack + : TrackType::kVideoTrack); +} + +void MediaFormatReader::DecoderData::Flush() { + AUTO_PROFILER_LABEL("MediaFormatReader::Flush", MEDIA_PLAYBACK); + MOZ_ASSERT(mOwner->OnTaskQueue()); + + if (mFlushing || mFlushed) { + // Flush still pending or already flushed, nothing more to do. + return; + } + mDecodeRequest.DisconnectIfExists(); + mDrainRequest.DisconnectIfExists(); + mDrainState = DrainState::None; + CancelWaitingForKey(); + mOutput.Clear(); + mNumSamplesInput = 0; + mNumSamplesOutput = 0; + mSizeOfQueue = 0; + if (mDecoder) { + TrackType type = mType == MediaData::Type::AUDIO_DATA + ? TrackType::kAudioTrack + : TrackType::kVideoTrack; + mFlushing = true; + MOZ_DIAGNOSTIC_ASSERT(!mShutdownPromise); + mShutdownPromise = new SharedShutdownPromiseHolder(); + RefPtr p = mShutdownPromise; + RefPtr d = mDecoder; + DDLOGEX2("MediaFormatReader::DecoderData", this, DDLogCategory::Log, + "flushing", DDNoValue{}); + mDecoder->Flush()->Then( + mOwner->OwnerThread(), __func__, + [type, this, p, d]() { + AUTO_PROFILER_LABEL("MediaFormatReader::Flush:Resolved", + MEDIA_PLAYBACK); + DDLOGEX2("MediaFormatReader::DecoderData", this, DDLogCategory::Log, + "flushed", DDNoValue{}); + if (!p->IsEmpty()) { + // Shutdown happened before flush completes. + // Let's continue to shut down the decoder. Note + // we don't access |this| because this decoder + // is no longer managed by MFR::DecoderData. + d->Shutdown()->ChainTo(p->Steal(), __func__); + return; + } + mFlushing = false; + mShutdownPromise = nullptr; + mOwner->ScheduleUpdate(type); + }, + [type, this, p, d](const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::Flush:Rejected", + MEDIA_PLAYBACK); + DDLOGEX2("MediaFormatReader::DecoderData", this, DDLogCategory::Log, + "flush_error", aError); + if (!p->IsEmpty()) { + d->Shutdown()->ChainTo(p->Steal(), __func__); + return; + } + mFlushing = false; + mShutdownPromise = nullptr; + mOwner->NotifyError(type, aError); + }); + } + mFlushed = true; +} + +class MediaFormatReader::DecoderFactory { + using InitPromise = MediaDataDecoder::InitPromise; + using TokenPromise = AllocPolicy::Promise; + using Token = AllocPolicy::Token; + using CreateDecoderPromise = PlatformDecoderModule::CreateDecoderPromise; + + public: + explicit DecoderFactory(MediaFormatReader* aOwner) + : mAudio(aOwner->mAudio, TrackInfo::kAudioTrack, aOwner->OwnerThread()), + mVideo(aOwner->mVideo, TrackInfo::kVideoTrack, aOwner->OwnerThread()), + mOwner(WrapNotNull(aOwner)) { + DecoderDoctorLogger::LogConstruction("MediaFormatReader::DecoderFactory", + this); + DecoderDoctorLogger::LinkParentAndChild( + aOwner, "decoder factory", "MediaFormatReader::DecoderFactory", this); + } + + ~DecoderFactory() { + DecoderDoctorLogger::LogDestruction("MediaFormatReader::DecoderFactory", + this); + } + + void CreateDecoder(TrackType aTrack); + + // Shutdown any decoder pending initialization and reset mAudio/mVideo to its + // pristine state so CreateDecoder() is ready to be called again immediately. + void ShutdownDecoder(TrackType aTrack) { + MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack || + aTrack == TrackInfo::kVideoTrack); + auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo; + data.mPolicy->Cancel(); + data.mTokenRequest.DisconnectIfExists(); + if (data.mLiveToken) { + // We haven't completed creation of the decoder, and it hasn't been + // initialised yet. + data.mLiveToken = nullptr; + // The decoder will be shutdown as soon as it's available and tracked by + // the ShutdownPromisePool. + mOwner->mShutdownPromisePool->Track(data.mCreateDecoderPromise->Then( + mOwner->mTaskQueue, __func__, + [](CreateDecoderPromise::ResolveOrRejectValue&& aResult) { + if (aResult.IsReject()) { + return ShutdownPromise::CreateAndResolve(true, __func__); + } + return aResult.ResolveValue()->Shutdown(); + })); + // Free the token to leave room for a new decoder. + data.mToken = nullptr; + } + data.mInitRequest.DisconnectIfExists(); + if (data.mDecoder) { + mOwner->mShutdownPromisePool->ShutdownDecoder(data.mDecoder.forget()); + } + data.mStage = Stage::None; + MOZ_ASSERT(!data.mToken); + } + + private: + enum class Stage : int8_t { None, WaitForToken, CreateDecoder, WaitForInit }; + + struct Data { + Data(DecoderData& aOwnerData, TrackType aTrack, TaskQueue* aThread) + : mOwnerData(aOwnerData), + mTrack(aTrack), + mPolicy(new SingleAllocPolicy(aTrack, aThread)) {} + DecoderData& mOwnerData; + const TrackType mTrack; + RefPtr mPolicy; + Stage mStage = Stage::None; + RefPtr mToken; + RefPtr mDecoder; + MozPromiseRequestHolder mTokenRequest; + struct DecoderCancelled : public SupportsWeakPtr { + NS_INLINE_DECL_REFCOUNTING_ONEVENTTARGET(DecoderCancelled) + private: + ~DecoderCancelled() = default; + }; + // Set when decoder is about to be created. If cleared before the decoder + // creation promise is resolved; it indicates that Shutdown() was called and + // further processing such as initialization should stop. + RefPtr mLiveToken; + RefPtr mCreateDecoderPromise; + MozPromiseRequestHolder mInitRequest; + } mAudio, mVideo; + + void RunStage(Data& aData); + void DoCreateDecoder(Data& aData); + void DoInitDecoder(Data& aData); + + // guaranteed to be valid by the owner. + const NotNull mOwner; +}; + +void MediaFormatReader::DecoderFactory::CreateDecoder(TrackType aTrack) { + MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack || + aTrack == TrackInfo::kVideoTrack); + Data& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo; + MOZ_DIAGNOSTIC_ASSERT_IF(mOwner->GetDecoderData(data.mTrack).IsEncrypted(), + mOwner->mCDMProxy); + RunStage(data); +} + +void MediaFormatReader::DecoderFactory::RunStage(Data& aData) { + switch (aData.mStage) { + case Stage::None: { + MOZ_DIAGNOSTIC_ASSERT(!aData.mToken); + aData.mPolicy->Alloc() + ->Then( + mOwner->OwnerThread(), __func__, + [this, &aData](RefPtr aToken) { + aData.mTokenRequest.Complete(); + aData.mToken = std::move(aToken); + aData.mStage = Stage::CreateDecoder; + RunStage(aData); + }, + [&aData]() { + aData.mTokenRequest.Complete(); + aData.mStage = Stage::None; + }) + ->Track(aData.mTokenRequest); + aData.mStage = Stage::WaitForToken; + break; + } + + case Stage::WaitForToken: { + MOZ_DIAGNOSTIC_ASSERT(!aData.mToken); + MOZ_DIAGNOSTIC_ASSERT(aData.mTokenRequest.Exists()); + break; + } + + case Stage::CreateDecoder: { + MOZ_DIAGNOSTIC_ASSERT(aData.mToken); + MOZ_DIAGNOSTIC_ASSERT(!aData.mDecoder); + MOZ_DIAGNOSTIC_ASSERT(!aData.mInitRequest.Exists()); + + DoCreateDecoder(aData); + aData.mStage = Stage::WaitForInit; + break; + } + + case Stage::WaitForInit: { + MOZ_DIAGNOSTIC_ASSERT((aData.mDecoder && aData.mInitRequest.Exists()) || + aData.mLiveToken); + break; + } + } +} + +void MediaFormatReader::DecoderFactory::DoCreateDecoder(Data& aData) { + AUTO_PROFILER_LABEL("DecoderFactory::DoCreateDecoder", MEDIA_PLAYBACK); + auto& ownerData = aData.mOwnerData; + auto& decoder = mOwner->GetDecoderData(aData.mTrack); + + RefPtr platform = new PDMFactory(); + if (decoder.IsEncrypted()) { + MOZ_DIAGNOSTIC_ASSERT(mOwner->mCDMProxy); + platform->SetCDMProxy(mOwner->mCDMProxy); + } + + RefPtr p; + MediaFormatReader* owner = mOwner; + auto onWaitingForKeyEvent = + [owner = ThreadSafeWeakPtr(owner)]() { + RefPtr mfr(owner); + MOZ_DIAGNOSTIC_ASSERT(mfr, "The MediaFormatReader didn't wait for us"); + return mfr ? &mfr->OnTrackWaitingForKeyProducer() : nullptr; + }; + + switch (aData.mTrack) { + case TrackInfo::kAudioTrack: { + p = platform->CreateDecoder( + {*ownerData.GetCurrentInfo()->GetAsAudioInfo(), mOwner->mCrashHelper, + CreateDecoderParams::UseNullDecoder(ownerData.mIsNullDecode), + TrackInfo::kAudioTrack, std::move(onWaitingForKeyEvent), + mOwner->mMediaEngineId, mOwner->mTrackingId}); + break; + } + + case TrackType::kVideoTrack: { + // Decoders use the layers backend to decide if they can use hardware + // decoding, so specify LAYERS_NONE if we want to forcibly disable it. + using Option = CreateDecoderParams::Option; + using OptionSet = CreateDecoderParams::OptionSet; + + p = platform->CreateDecoder( + {*ownerData.GetCurrentInfo()->GetAsVideoInfo(), + mOwner->mKnowsCompositor, mOwner->GetImageContainer(), + mOwner->mCrashHelper, + CreateDecoderParams::UseNullDecoder(ownerData.mIsNullDecode), + TrackType::kVideoTrack, std::move(onWaitingForKeyEvent), + CreateDecoderParams::VideoFrameRate(ownerData.mMeanRate.Mean()), + OptionSet(ownerData.mHardwareDecodingDisabled + ? Option::HardwareDecoderNotAllowed + : Option::Default), + mOwner->mMediaEngineId, mOwner->mTrackingId}); + break; + } + + default: + p = PlatformDecoderModule::CreateDecoderPromise::CreateAndReject( + NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); + } + + aData.mLiveToken = MakeRefPtr(); + + aData.mCreateDecoderPromise = p->Then( + mOwner->OwnerThread(), __func__, + [this, &aData, &ownerData, live = WeakPtr{aData.mLiveToken}, + owner = ThreadSafeWeakPtr(owner)]( + RefPtr&& aDecoder) { + if (!live) { + return CreateDecoderPromise::CreateAndResolve(std::move(aDecoder), + __func__); + } + aData.mLiveToken = nullptr; + aData.mDecoder = new MediaDataDecoderProxy( + aDecoder.forget(), do_AddRef(ownerData.mTaskQueue.get())); + aData.mDecoder = new AllocationWrapper(aData.mDecoder.forget(), + aData.mToken.forget()); + DecoderDoctorLogger::LinkParentAndChild( + aData.mDecoder.get(), "decoder", + "MediaFormatReader::DecoderFactory", this); + + DoInitDecoder(aData); + + return CreateDecoderPromise::CreateAndResolve(aData.mDecoder, __func__); + }, + [this, &aData, + live = WeakPtr{aData.mLiveToken}](const MediaResult& aError) { + NS_WARNING("Error constructing decoders"); + if (!live) { + return CreateDecoderPromise::CreateAndReject(aError, __func__); + } + aData.mLiveToken = nullptr; + aData.mToken = nullptr; + aData.mStage = Stage::None; + aData.mOwnerData.mDescription = aError.Description(); + DDLOGEX2("MediaFormatReader::DecoderFactory", this, DDLogCategory::Log, + "create_decoder_error", aError); + mOwner->NotifyError(aData.mTrack, aError); + + return CreateDecoderPromise::CreateAndReject(aError, __func__); + }); +} + +void MediaFormatReader::DecoderFactory::DoInitDecoder(Data& aData) { + AUTO_PROFILER_LABEL("DecoderFactory::DoInitDecoder", MEDIA_PLAYBACK); + auto& ownerData = aData.mOwnerData; + + DDLOGEX2("MediaFormatReader::DecoderFactory", this, DDLogCategory::Log, + "initialize_decoder", DDNoValue{}); + aData.mDecoder->Init() + ->Then( + mOwner->OwnerThread(), __func__, + [this, &aData, &ownerData](TrackType aTrack) { + AUTO_PROFILER_LABEL("DecoderFactory::DoInitDecoder:Resolved", + MEDIA_PLAYBACK); + aData.mInitRequest.Complete(); + aData.mStage = Stage::None; + MutexAutoLock lock(ownerData.mMutex); + ownerData.mDecoder = std::move(aData.mDecoder); + ownerData.mDescription = ownerData.mDecoder->GetDescriptionName(); + DDLOGEX2("MediaFormatReader::DecoderFactory", this, + DDLogCategory::Log, "decoder_initialized", DDNoValue{}); + DecoderDoctorLogger::LinkParentAndChild( + "MediaFormatReader::DecoderData", &ownerData, "decoder", + ownerData.mDecoder.get()); + mOwner->SetVideoDecodeThreshold(); + mOwner->ScheduleUpdate(aTrack); + if (aTrack == TrackInfo::kVideoTrack) { + DecoderBenchmark::CheckVersion( + ownerData.GetCurrentInfo()->mMimeType); + } + if (aTrack == TrackInfo::kAudioTrack) { + ownerData.mProcessName = ownerData.mDecoder->GetProcessName(); + ownerData.mCodecName = ownerData.mDecoder->GetCodecName(); + } + }, + [this, &aData, &ownerData](const MediaResult& aError) { + AUTO_PROFILER_LABEL("DecoderFactory::DoInitDecoder:Rejected", + MEDIA_PLAYBACK); + aData.mInitRequest.Complete(); + MOZ_RELEASE_ASSERT(!ownerData.mDecoder, + "Can't have a decoder already set"); + aData.mStage = Stage::None; + mOwner->mShutdownPromisePool->ShutdownDecoder( + aData.mDecoder.forget()); + DDLOGEX2("MediaFormatReader::DecoderFactory", this, + DDLogCategory::Log, "initialize_decoder_error", aError); + mOwner->NotifyError(aData.mTrack, aError); + }) + ->Track(aData.mInitRequest); +} + +// DemuxerProxy ensures that the original main demuxer is only ever accessed +// via its own dedicated task queue. +// This ensure that the reader's taskqueue will never blocked while a demuxer +// is itself blocked attempting to access the MediaCache or the MediaResource. +class MediaFormatReader::DemuxerProxy { + using TrackType = TrackInfo::TrackType; + class Wrapper; + + public: + explicit DemuxerProxy(MediaDataDemuxer* aDemuxer) + : mTaskQueue(TaskQueue::Create( + GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), + "DemuxerProxy::mTaskQueue")), + mData(new Data(aDemuxer)) { + MOZ_COUNT_CTOR(DemuxerProxy); + } + + MOZ_COUNTED_DTOR(DemuxerProxy) + + RefPtr Shutdown() { + RefPtr data = std::move(mData); + return InvokeAsync(mTaskQueue, __func__, [data]() { + // We need to clear our reference to the demuxer now. So that in the event + // the init promise wasn't resolved, such as what can happen with the + // mediasource demuxer that is waiting on more data, it will force the + // init promise to be rejected. + data->mDemuxer = nullptr; + data->mAudioDemuxer = nullptr; + data->mVideoDemuxer = nullptr; + return ShutdownPromise::CreateAndResolve(true, __func__); + }); + } + + RefPtr Init(); + + Wrapper* GetTrackDemuxer(TrackType aTrack, uint32_t aTrackNumber) { + MOZ_RELEASE_ASSERT(mData && mData->mInitDone); + + switch (aTrack) { + case TrackInfo::kAudioTrack: + return mData->mAudioDemuxer; + case TrackInfo::kVideoTrack: + return mData->mVideoDemuxer; + default: + return nullptr; + } + } + + uint32_t GetNumberTracks(TrackType aTrack) const { + MOZ_RELEASE_ASSERT(mData && mData->mInitDone); + + switch (aTrack) { + case TrackInfo::kAudioTrack: + return mData->mNumAudioTrack; + case TrackInfo::kVideoTrack: + return mData->mNumVideoTrack; + default: + return 0; + } + } + + bool IsSeekable() const { + MOZ_RELEASE_ASSERT(mData && mData->mInitDone); + + return mData->mSeekable; + } + + bool IsSeekableOnlyInBufferedRanges() const { + MOZ_RELEASE_ASSERT(mData && mData->mInitDone); + + return mData->mSeekableOnlyInBufferedRange; + } + + UniquePtr GetCrypto() const { + MOZ_RELEASE_ASSERT(mData && mData->mInitDone); + + if (!mData->mCrypto) { + return nullptr; + } + auto crypto = MakeUnique(); + *crypto = *mData->mCrypto; + return crypto; + } + + RefPtr NotifyDataArrived(); + + bool ShouldComputeStartTime() const { + MOZ_RELEASE_ASSERT(mData && mData->mInitDone); + + return mData->mShouldComputeStartTime; + } + + private: + const RefPtr mTaskQueue; + struct Data { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Data) + + explicit Data(MediaDataDemuxer* aDemuxer) + : mInitDone(false), mDemuxer(aDemuxer) {} + + Atomic mInitDone; + // Only ever accessed over mTaskQueue once. + RefPtr mDemuxer; + // Only accessed once InitPromise has been resolved and immutable after. + // So we can safely access them without the use of the mutex. + uint32_t mNumAudioTrack = 0; + RefPtr mAudioDemuxer; + uint32_t mNumVideoTrack = 0; + RefPtr mVideoDemuxer; + bool mSeekable = false; + bool mSeekableOnlyInBufferedRange = false; + bool mShouldComputeStartTime = true; + UniquePtr mCrypto; + + private: + ~Data() = default; + }; + RefPtr mData; +}; + +class MediaFormatReader::DemuxerProxy::Wrapper : public MediaTrackDemuxer { + public: + Wrapper(MediaTrackDemuxer* aTrackDemuxer, TaskQueue* aTaskQueue) + : mMutex("TrackDemuxer Mutex"), + mTaskQueue(aTaskQueue), + mGetSamplesMayBlock(aTrackDemuxer->GetSamplesMayBlock()), + mInfo(aTrackDemuxer->GetInfo()), + mTrackDemuxer(aTrackDemuxer) { + DecoderDoctorLogger::LogConstructionAndBase( + "MediaFormatReader::DemuxerProxy::Wrapper", this, + static_cast(this)); + DecoderDoctorLogger::LinkParentAndChild( + "MediaFormatReader::DemuxerProxy::Wrapper", this, "track demuxer", + aTrackDemuxer); + } + + UniquePtr GetInfo() const override { + if (!mInfo) { + return nullptr; + } + return mInfo->Clone(); + } + + RefPtr Seek(const TimeUnit& aTime) override { + RefPtr self = this; + return InvokeAsync( + mTaskQueue, __func__, + [self, aTime]() { return self->mTrackDemuxer->Seek(aTime); }) + ->Then( + mTaskQueue, __func__, + [self](const TimeUnit& aTime) { + self->UpdateRandomAccessPoint(); + return SeekPromise::CreateAndResolve(aTime, __func__); + }, + [self](const MediaResult& aError) { + self->UpdateRandomAccessPoint(); + return SeekPromise::CreateAndReject(aError, __func__); + }); + } + + RefPtr GetSamples(int32_t aNumSamples) override { + RefPtr self = this; + return InvokeAsync(mTaskQueue, __func__, + [self, aNumSamples]() { + return self->mTrackDemuxer->GetSamples(aNumSamples); + }) + ->Then( + mTaskQueue, __func__, + [self](RefPtr aSamples) { + self->UpdateRandomAccessPoint(); + return SamplesPromise::CreateAndResolve(aSamples.forget(), + __func__); + }, + [self](const MediaResult& aError) { + self->UpdateRandomAccessPoint(); + return SamplesPromise::CreateAndReject(aError, __func__); + }); + } + + bool GetSamplesMayBlock() const override { return mGetSamplesMayBlock; } + + void Reset() override { + RefPtr self = this; + nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction( + "MediaFormatReader::DemuxerProxy::Wrapper::Reset", + [self]() { self->mTrackDemuxer->Reset(); })); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + } + + nsresult GetNextRandomAccessPoint(TimeUnit* aTime) override { + MutexAutoLock lock(mMutex); + if (NS_SUCCEEDED(mNextRandomAccessPointResult)) { + *aTime = mNextRandomAccessPoint; + } + return mNextRandomAccessPointResult; + } + + RefPtr SkipToNextRandomAccessPoint( + const TimeUnit& aTimeThreshold) override { + RefPtr self = this; + return InvokeAsync( + mTaskQueue, __func__, + [self, aTimeThreshold]() { + return self->mTrackDemuxer->SkipToNextRandomAccessPoint( + aTimeThreshold); + }) + ->Then( + mTaskQueue, __func__, + [self](uint32_t aVal) { + self->UpdateRandomAccessPoint(); + return SkipAccessPointPromise::CreateAndResolve(aVal, __func__); + }, + [self](const SkipFailureHolder& aError) { + self->UpdateRandomAccessPoint(); + return SkipAccessPointPromise::CreateAndReject(aError, __func__); + }); + } + + TimeIntervals GetBuffered() override { + MutexAutoLock lock(mMutex); + return mBuffered; + } + + void BreakCycles() override {} + + private: + Mutex mMutex MOZ_UNANNOTATED; + const RefPtr mTaskQueue; + const bool mGetSamplesMayBlock; + const UniquePtr mInfo; + // mTrackDemuxer is only ever accessed on demuxer's task queue. + RefPtr mTrackDemuxer; + // All following members are protected by mMutex + nsresult mNextRandomAccessPointResult = NS_OK; + TimeUnit mNextRandomAccessPoint; + TimeIntervals mBuffered; + friend class DemuxerProxy; + + ~Wrapper() { + RefPtr trackDemuxer = std::move(mTrackDemuxer); + nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction( + "MediaFormatReader::DemuxerProxy::Wrapper::~Wrapper", + [trackDemuxer]() { trackDemuxer->BreakCycles(); })); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + DecoderDoctorLogger::LogDestruction( + "MediaFormatReader::DemuxerProxy::Wrapper", this); + } + + void UpdateRandomAccessPoint() { + MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); + if (!mTrackDemuxer) { + // Detached. + return; + } + MutexAutoLock lock(mMutex); + mNextRandomAccessPointResult = + mTrackDemuxer->GetNextRandomAccessPoint(&mNextRandomAccessPoint); + } + + void UpdateBuffered() { + MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); + if (!mTrackDemuxer) { + // Detached. + return; + } + MutexAutoLock lock(mMutex); + mBuffered = mTrackDemuxer->GetBuffered(); + } +}; + +RefPtr MediaFormatReader::DemuxerProxy::Init() { + AUTO_PROFILER_LABEL("DemuxerProxy::Init", MEDIA_PLAYBACK); + using InitPromise = MediaDataDemuxer::InitPromise; + + RefPtr data = mData; + RefPtr taskQueue = mTaskQueue; + return InvokeAsync(mTaskQueue, __func__, + [data, taskQueue]() { + if (!data->mDemuxer) { + return InitPromise::CreateAndReject( + NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + return data->mDemuxer->Init(); + }) + ->Then( + taskQueue, __func__, + [data, taskQueue]() { + AUTO_PROFILER_LABEL("DemuxerProxy::Init:Resolved", MEDIA_PLAYBACK); + if (!data->mDemuxer) { // Was shutdown. + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, + __func__); + } + data->mNumAudioTrack = + data->mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack); + if (data->mNumAudioTrack) { + RefPtr d = + data->mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0); + if (d) { + RefPtr wrapper = + new DemuxerProxy::Wrapper(d, taskQueue); + wrapper->UpdateBuffered(); + data->mAudioDemuxer = wrapper; + DecoderDoctorLogger::LinkParentAndChild( + data->mDemuxer.get(), "decoder factory wrapper", + "MediaFormatReader::DecoderFactory::Wrapper", + wrapper.get()); + } + } + data->mNumVideoTrack = + data->mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack); + if (data->mNumVideoTrack) { + RefPtr d = + data->mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0); + if (d) { + RefPtr wrapper = + new DemuxerProxy::Wrapper(d, taskQueue); + wrapper->UpdateBuffered(); + data->mVideoDemuxer = wrapper; + DecoderDoctorLogger::LinkParentAndChild( + data->mDemuxer.get(), "decoder factory wrapper", + "MediaFormatReader::DecoderFactory::Wrapper", + wrapper.get()); + } + } + data->mCrypto = data->mDemuxer->GetCrypto(); + data->mSeekable = data->mDemuxer->IsSeekable(); + data->mSeekableOnlyInBufferedRange = + data->mDemuxer->IsSeekableOnlyInBufferedRanges(); + data->mShouldComputeStartTime = + data->mDemuxer->ShouldComputeStartTime(); + data->mInitDone = true; + return InitPromise::CreateAndResolve(NS_OK, __func__); + }, + [](const MediaResult& aError) { + return InitPromise::CreateAndReject(aError, __func__); + }); +} + +RefPtr +MediaFormatReader::DemuxerProxy::NotifyDataArrived() { + RefPtr data = mData; + return InvokeAsync(mTaskQueue, __func__, [data]() { + if (!data->mDemuxer) { + // Was shutdown. + return NotifyDataArrivedPromise::CreateAndReject( + NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + data->mDemuxer->NotifyDataArrived(); + if (data->mAudioDemuxer) { + data->mAudioDemuxer->UpdateBuffered(); + } + if (data->mVideoDemuxer) { + data->mVideoDemuxer->UpdateBuffered(); + } + return NotifyDataArrivedPromise::CreateAndResolve(true, __func__); + }); +} + +MediaFormatReader::MediaFormatReader(MediaFormatReaderInit& aInit, + MediaDataDemuxer* aDemuxer) + : mTaskQueue( + TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR), + "MediaFormatReader::mTaskQueue", + /* aSupportsTailDispatch = */ true)), + mAudio(this, MediaData::Type::AUDIO_DATA, + StaticPrefs::media_audio_max_decode_error()), + mVideo(this, MediaData::Type::VIDEO_DATA, + StaticPrefs::media_video_max_decode_error()), + mWorkingInfoChanged(false, "MediaFormatReader::mWorkingInfoChanged"), + mWatchManager(this, OwnerThread()), + mIsWatchingWorkingInfo(false), + mDemuxer(new DemuxerProxy(aDemuxer)), + mDemuxerInitDone(false), + mPendingNotifyDataArrived(false), + mLastReportedNumDecodedFrames(0), + mPreviousDecodedKeyframeTime_us(sNoPreviousDecodedKeyframe), + mKnowsCompositor(aInit.mKnowsCompositor), + mInitDone(false), + mTrackDemuxersMayBlock(false), + mSeekScheduled(false), + mVideoFrameContainer(aInit.mVideoFrameContainer), + mCrashHelper(aInit.mCrashHelper), + mDecoderFactory(new DecoderFactory(this)), + mShutdownPromisePool(new ShutdownPromisePool()), + mBuffered(mTaskQueue, TimeIntervals(), + "MediaFormatReader::mBuffered (Canonical)"), + mFrameStats(aInit.mFrameStats), + mMediaDecoderOwnerID(aInit.mMediaDecoderOwnerID), + mTrackingId(std::move(aInit.mTrackingId)) { + MOZ_ASSERT(aDemuxer); + MOZ_COUNT_CTOR(MediaFormatReader); + DDLINKCHILD("audio decoder data", "MediaFormatReader::DecoderDataWithPromise", + &mAudio); + DDLINKCHILD("video decoder data", "MediaFormatReader::DecoderDataWithPromise", + &mVideo); + DDLINKCHILD("demuxer", aDemuxer); + mOnTrackWaitingForKeyListener = OnTrackWaitingForKey().Connect( + mTaskQueue, this, &MediaFormatReader::NotifyWaitingForKey); +} + +MediaFormatReader::~MediaFormatReader() { + MOZ_COUNT_DTOR(MediaFormatReader); + MOZ_ASSERT(mShutdown); +} + +RefPtr MediaFormatReader::Shutdown() { + MOZ_ASSERT(OnTaskQueue()); + LOG(""); + + mDemuxerInitRequest.DisconnectIfExists(); + mNotifyDataArrivedPromise.DisconnectIfExists(); + mMetadataPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + mSeekPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + mSkipRequest.DisconnectIfExists(); + mSetCDMPromise.RejectIfExists( + MediaResult(NS_ERROR_DOM_INVALID_STATE_ERR, + "MediaFormatReader is shutting down"), + __func__); + + if (mIsWatchingWorkingInfo) { + mWatchManager.Unwatch(mWorkingInfoChanged, + &MediaFormatReader::NotifyTrackInfoUpdated); + } + mWatchManager.Shutdown(); + + if (mAudio.HasPromise()) { + mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + if (mVideo.HasPromise()) { + mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + + if (HasAudio()) { + mAudio.ResetDemuxer(); + mAudio.mTrackDemuxer->BreakCycles(); + { + MutexAutoLock lock(mAudio.mMutex); + mAudio.mTrackDemuxer = nullptr; + } + mAudio.ResetState(); + ShutdownDecoder(TrackInfo::kAudioTrack); + } + + if (HasVideo()) { + mVideo.ResetDemuxer(); + mVideo.mTrackDemuxer->BreakCycles(); + { + MutexAutoLock lock(mVideo.mMutex); + mVideo.mTrackDemuxer = nullptr; + } + mVideo.ResetState(); + ShutdownDecoder(TrackInfo::kVideoTrack); + } + + mShutdownPromisePool->Track(mDemuxer->Shutdown()); + mDemuxer = nullptr; + + mOnTrackWaitingForKeyListener.Disconnect(); + + mShutdown = true; + return mShutdownPromisePool->Shutdown()->Then( + OwnerThread(), __func__, this, &MediaFormatReader::TearDownDecoders, + &MediaFormatReader::TearDownDecoders); +} + +void MediaFormatReader::ShutdownDecoder(TrackType aTrack) { + LOGV("%s", TrackTypeToStr(aTrack)); + + // Shut down the pending decoder if any. + mDecoderFactory->ShutdownDecoder(aTrack); + + auto& decoder = GetDecoderData(aTrack); + // Flush the decoder if necessary. + decoder.Flush(); + + // Shut down the decoder if any. + decoder.ShutdownDecoder(); +} + +void MediaFormatReader::NotifyDecoderBenchmarkStore() { + MOZ_ASSERT(OnTaskQueue()); + if (!StaticPrefs::media_mediacapabilities_from_database()) { + return; + } + auto& decoder = GetDecoderData(TrackInfo::kVideoTrack); + if (decoder.GetCurrentInfo() && decoder.GetCurrentInfo()->GetAsVideoInfo()) { + VideoInfo info = *(decoder.GetCurrentInfo()->GetAsVideoInfo()); + info.SetFrameRate(static_cast(ceil(decoder.mMeanRate.Mean()))); + mOnStoreDecoderBenchmark.Notify(std::move(info)); + } +} + +void MediaFormatReader::NotifyTrackInfoUpdated() { + MOZ_ASSERT(OnTaskQueue()); + if (mWorkingInfoChanged) { + mWorkingInfoChanged = false; + + VideoInfo videoInfo; + AudioInfo audioInfo; + { + MutexAutoLock lock(mVideo.mMutex); + if (HasVideo()) { + videoInfo = *mVideo.GetWorkingInfo()->GetAsVideoInfo(); + } + } + { + MutexAutoLock lock(mAudio.mMutex); + if (HasAudio()) { + audioInfo = *mAudio.GetWorkingInfo()->GetAsAudioInfo(); + } + } + + mTrackInfoUpdatedEvent.Notify(videoInfo, audioInfo); + } +} + +RefPtr MediaFormatReader::TearDownDecoders() { + if (mAudio.mTaskQueue) { + mAudio.mTaskQueue->BeginShutdown(); + mAudio.mTaskQueue->AwaitShutdownAndIdle(); + mAudio.mTaskQueue = nullptr; + } + if (mVideo.mTaskQueue) { + mVideo.mTaskQueue->BeginShutdown(); + mVideo.mTaskQueue->AwaitShutdownAndIdle(); + mVideo.mTaskQueue = nullptr; + } + + mDecoderFactory = nullptr; + mVideoFrameContainer = nullptr; + + ReleaseResources(); + mBuffered.DisconnectAll(); + return mTaskQueue->BeginShutdown(); +} + +nsresult MediaFormatReader::Init() { + MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread."); + + mAudio.mTaskQueue = + TaskQueue::Create(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), + "MFR::mAudio::mTaskQueue"); + + mVideo.mTaskQueue = + TaskQueue::Create(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), + "MFR::mVideo::mTaskQueue"); + + return NS_OK; +} + +bool MediaFormatReader::ResolveSetCDMPromiseIfDone(TrackType aTrack) { + // When a CDM proxy is set, MFR would shutdown the existing MediaDataDecoder + // and would create new one for specific track in the next Update. + MOZ_ASSERT(OnTaskQueue()); + + if (mSetCDMPromise.IsEmpty()) { + return true; + } + + MOZ_ASSERT(mCDMProxy); + if (mSetCDMForTracks.contains(aTrack)) { + mSetCDMForTracks -= aTrack; + } + + if (mSetCDMForTracks.isEmpty()) { + LOGV("%s : Done ", __func__); + mSetCDMPromise.Resolve(/* aResolveValue = */ true, __func__); + if (HasAudio()) { + ScheduleUpdate(TrackInfo::kAudioTrack); + } + if (HasVideo()) { + ScheduleUpdate(TrackInfo::kVideoTrack); + } + return true; + } + LOGV("%s : %s track is ready.", __func__, TrackTypeToStr(aTrack)); + return false; +} + +void MediaFormatReader::PrepareToSetCDMForTrack(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + LOGV("%s : %s", __func__, TrackTypeToStr(aTrack)); + + mSetCDMForTracks += aTrack; + if (mCDMProxy) { + // An old cdm proxy exists, so detaching old cdm proxy by shutting down + // MediaDataDecoder. + ShutdownDecoder(aTrack); + } + ScheduleUpdate(aTrack); +} + +bool MediaFormatReader::IsDecoderWaitingForCDM(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + return GetDecoderData(aTrack).IsEncrypted() && + mSetCDMForTracks.contains(aTrack) && !mCDMProxy; +} + +RefPtr MediaFormatReader::SetCDMProxy(CDMProxy* aProxy) { + MOZ_ASSERT(OnTaskQueue()); + LOGV("SetCDMProxy (%p)", aProxy); + + if (mShutdown) { + return SetCDMPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_INVALID_STATE_ERR, + "MediaFormatReader is shutting down"), + __func__); + } + + mSetCDMPromise.RejectIfExists( + MediaResult(NS_ERROR_DOM_INVALID_STATE_ERR, + "Another new CDM proxy is being set."), + __func__); + + // Shutdown all decoders as switching CDM proxy indicates that it's + // inappropriate for the existing decoders to continue decoding via the old + // CDM proxy. + if (HasAudio()) { + PrepareToSetCDMForTrack(TrackInfo::kAudioTrack); + } + if (HasVideo()) { + PrepareToSetCDMForTrack(TrackInfo::kVideoTrack); + } + + mCDMProxy = aProxy; + + if (!mInitDone || mSetCDMForTracks.isEmpty() || !mCDMProxy) { + // 1) MFR is not initialized yet or + // 2) Demuxer is initialized without active audio and video or + // 3) A null cdm proxy is set + // the promise can be resolved directly. + mSetCDMForTracks.clear(); + return SetCDMPromise::CreateAndResolve(/* aResolveValue = */ true, + __func__); + } + + RefPtr p = mSetCDMPromise.Ensure(__func__); + return p; +} + +bool MediaFormatReader::IsWaitingOnCDMResource() { + MOZ_ASSERT(OnTaskQueue()); + return IsEncrypted() && !mCDMProxy; +} + +RefPtr +MediaFormatReader::AsyncReadMetadata() { + AUTO_PROFILER_LABEL("MediaFormatReader::AsyncReadMetadata", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + MOZ_DIAGNOSTIC_ASSERT(mMetadataPromise.IsEmpty()); + + if (mInitDone) { + // We are returning from dormant. + MetadataHolder metadata; + metadata.mInfo = MakeUnique(mInfo); + return MetadataPromise::CreateAndResolve(std::move(metadata), __func__); + } + + RefPtr p = mMetadataPromise.Ensure(__func__); + + mDemuxer->Init() + ->Then(OwnerThread(), __func__, this, + &MediaFormatReader::OnDemuxerInitDone, + &MediaFormatReader::OnDemuxerInitFailed) + ->Track(mDemuxerInitRequest); + return p; +} + +void MediaFormatReader::OnDemuxerInitDone(const MediaResult& aResult) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnDemuxerInitDone", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + mDemuxerInitRequest.Complete(); + + if (NS_FAILED(aResult) && StaticPrefs::media_playback_warnings_as_errors()) { + mMetadataPromise.Reject(aResult, __func__); + return; + } + + mDemuxerInitDone = true; + + UniquePtr tags(MakeUnique()); + + RefPtr platform; + if (!IsWaitingOnCDMResource()) { + platform = new PDMFactory(); + } + + // To decode, we need valid video and a place to put it. + bool videoActive = !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) && + GetImageContainer(); + + if (videoActive) { + // We currently only handle the first video track. + MutexAutoLock lock(mVideo.mMutex); + mVideo.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0); + if (!mVideo.mTrackDemuxer) { + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); + return; + } + + UniquePtr videoInfo = mVideo.mTrackDemuxer->GetInfo(); + videoActive = videoInfo && videoInfo->IsValid(); + if (videoActive) { + if (platform && + platform->SupportsMimeType(videoInfo->mMimeType).isEmpty()) { + // We have no decoder for this track. Error. + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); + return; + } + mInfo.mVideo = *videoInfo->GetAsVideoInfo(); + mVideo.mWorkingInfo = MakeUnique(mInfo.mVideo); + for (const MetadataTag& tag : videoInfo->mTags) { + tags->InsertOrUpdate(tag.mKey, tag.mValue); + } + mWorkingInfoChanged = true; + mVideo.mOriginalInfo = std::move(videoInfo); + mTrackDemuxersMayBlock |= mVideo.mTrackDemuxer->GetSamplesMayBlock(); + } else { + mVideo.mTrackDemuxer->BreakCycles(); + mVideo.mTrackDemuxer = nullptr; + } + } + + bool audioActive = !!mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack); + if (audioActive) { + MutexAutoLock lock(mAudio.mMutex); + mAudio.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0); + if (!mAudio.mTrackDemuxer) { + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); + return; + } + + UniquePtr audioInfo = mAudio.mTrackDemuxer->GetInfo(); + // We actively ignore audio tracks that we know we can't play. + audioActive = audioInfo && audioInfo->IsValid() && + (!platform || + !platform->SupportsMimeType(audioInfo->mMimeType).isEmpty()); + + if (audioActive) { + mInfo.mAudio = *audioInfo->GetAsAudioInfo(); + mAudio.mWorkingInfo = MakeUnique(mInfo.mAudio); + for (const MetadataTag& tag : audioInfo->mTags) { + tags->InsertOrUpdate(tag.mKey, tag.mValue); + } + mWorkingInfoChanged = true; + mAudio.mOriginalInfo = std::move(audioInfo); + mTrackDemuxersMayBlock |= mAudio.mTrackDemuxer->GetSamplesMayBlock(); + } else { + mAudio.mTrackDemuxer->BreakCycles(); + mAudio.mTrackDemuxer = nullptr; + } + } + + UniquePtr crypto = mDemuxer->GetCrypto(); + if (crypto && crypto->IsEncrypted()) { + // Try and dispatch 'encrypted'. Won't go if ready state still HAVE_NOTHING. + for (uint32_t i = 0; i < crypto->mInitDatas.Length(); i++) { + mOnEncrypted.Notify(crypto->mInitDatas[i].mInitData, + crypto->mInitDatas[i].mType); + } + mInfo.mCrypto = *crypto; + } + + auto videoDuration = HasVideo() ? mInfo.mVideo.mDuration : TimeUnit::Zero(); + auto audioDuration = HasAudio() ? mInfo.mAudio.mDuration : TimeUnit::Zero(); + + // If the duration is 0 on both audio and video, it mMetadataDuration is to be + // Nothing(). Duration will use buffered ranges. + if (videoDuration.IsPositive() || audioDuration.IsPositive()) { + auto duration = std::max(videoDuration, audioDuration); + mInfo.mMetadataDuration = Some(duration); + } + + mInfo.mMediaSeekable = mDemuxer->IsSeekable(); + mInfo.mMediaSeekableOnlyInBufferedRanges = + mDemuxer->IsSeekableOnlyInBufferedRanges(); + + if (!videoActive && !audioActive) { + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); + return; + } + + mTags = std::move(tags); + mInitDone = true; + + // Try to get the start time. + // For MSE case, the start time of each track is assumed to be 0. + // For others, we must demux the first sample to know the start time for each + // track. + if (!mDemuxer->ShouldComputeStartTime()) { + mAudio.mFirstDemuxedSampleTime.emplace(TimeUnit::Zero()); + mVideo.mFirstDemuxedSampleTime.emplace(TimeUnit::Zero()); + } else { + if (HasAudio()) { + RequestDemuxSamples(TrackInfo::kAudioTrack); + } + + if (HasVideo()) { + RequestDemuxSamples(TrackInfo::kVideoTrack); + } + } + + if (aResult != NS_OK) { + mOnDecodeWarning.Notify(aResult); + } + + MaybeResolveMetadataPromise(); +} + +void MediaFormatReader::MaybeResolveMetadataPromise() { + MOZ_ASSERT(OnTaskQueue()); + + if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing()) || + (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) { + return; + } + + TimeUnit startTime = + std::min(mAudio.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()), + mVideo.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity())); + + if (!startTime.IsInfinite()) { + mInfo.mStartTime = startTime; // mInfo.mStartTime is initialized to 0. + } + + MetadataHolder metadata; + metadata.mInfo = MakeUnique(mInfo); + metadata.mTags = mTags->Count() ? std::move(mTags) : nullptr; + + // We now have all the informations required to calculate the initial buffered + // range. + mHasStartTime = true; + UpdateBuffered(); + + mWatchManager.Watch(mWorkingInfoChanged, + &MediaFormatReader::NotifyTrackInfoUpdated); + mIsWatchingWorkingInfo = true; + + mMetadataPromise.Resolve(std::move(metadata), __func__); +} + +bool MediaFormatReader::IsEncrypted() const { + return (HasAudio() && mAudio.GetCurrentInfo()->mCrypto.IsEncrypted()) || + (HasVideo() && mVideo.GetCurrentInfo()->mCrypto.IsEncrypted()); +} + +void MediaFormatReader::OnDemuxerInitFailed(const MediaResult& aError) { + mDemuxerInitRequest.Complete(); + mMetadataPromise.Reject(aError, __func__); +} + +void MediaFormatReader::ReadUpdatedMetadata(MediaInfo* aInfo) { + // Called on the MDSM's TaskQueue. + { + MutexAutoLock lock(mVideo.mMutex); + if (HasVideo()) { + aInfo->mVideo = *mVideo.GetWorkingInfo()->GetAsVideoInfo(); + } + } + { + MutexAutoLock lock(mAudio.mMutex); + if (HasAudio()) { + aInfo->mAudio = *mAudio.GetWorkingInfo()->GetAsAudioInfo(); + Maybe audioProcessPerCodecName = GetAudioProcessPerCodec(); + if (audioProcessPerCodecName.isSome()) { + Telemetry::ScalarAdd( + Telemetry::ScalarID::MEDIA_AUDIO_PROCESS_PER_CODEC_NAME, + NS_ConvertUTF8toUTF16(*audioProcessPerCodecName), 1); + } + } + } +} + +MediaFormatReader::DecoderData& MediaFormatReader::GetDecoderData( + TrackType aTrack) { + MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack || + aTrack == TrackInfo::kVideoTrack); + if (aTrack == TrackInfo::kAudioTrack) { + return mAudio; + } + return mVideo; +} + +Maybe MediaFormatReader::ShouldSkip(TimeUnit aTimeThreshold, + bool aRequestNextVideoKeyFrame) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_ASSERT(HasVideo()); + + if (!StaticPrefs::media_decoder_skip_to_next_key_frame_enabled()) { + return Nothing(); + } + + // Ensure we have no pending seek going as skip-to-keyframe could return out + // of date information. + if (mVideo.HasInternalSeekPending()) { + return Nothing(); + } + + TimeUnit nextKeyframe; + nsresult rv = mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe); + if (NS_FAILED(rv)) { + // Only OggTrackDemuxer with video type gets into here. + // We don't support skip-to-next-frame for this case. + return Nothing(); + } + + const bool isNextKeyframeValid = + nextKeyframe.ToMicroseconds() >= 0 && !nextKeyframe.IsInfinite(); + // If we request the next keyframe, only return times greater than + // aTimeThreshold. Otherwise, data will be already behind the threshold and + // will be eventually discarded somewhere in the media pipeline. + if (aRequestNextVideoKeyFrame && isNextKeyframeValid && + nextKeyframe > aTimeThreshold) { + return Some(nextKeyframe); + } + + const bool isNextVideoBehindTheThreshold = + (isNextKeyframeValid && nextKeyframe <= aTimeThreshold) || + GetInternalSeekTargetEndTime() < aTimeThreshold; + return isNextVideoBehindTheThreshold ? Some(aTimeThreshold) : Nothing(); +} + +RefPtr MediaFormatReader::RequestVideoData( + const TimeUnit& aTimeThreshold, bool aRequestNextVideoKeyFrame) { + MOZ_ASSERT(OnTaskQueue()); + MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests"); + // Requesting video can be done independently from audio, even during audio + // seeking. But it shouldn't happen if we're doing video seek. + if (!IsAudioOnlySeeking()) { + MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), + "No sample requests allowed while seeking"); + MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists() || + mVideo.mTimeThreshold.isSome()); + MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek"); + } + LOGV("RequestVideoData(%" PRId64 "), requestNextKeyFrame=%d", + aTimeThreshold.ToMicroseconds(), aRequestNextVideoKeyFrame); + + if (!HasVideo()) { + LOG("called with no video track"); + return VideoDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__); + } + + if (IsSeeking()) { + LOG("called mid-seek. Rejecting."); + return VideoDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, + __func__); + } + + if (mShutdown) { + NS_WARNING("RequestVideoData on shutdown MediaFormatReader!"); + return VideoDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, + __func__); + } + + if (Maybe target = + ShouldSkip(aTimeThreshold, aRequestNextVideoKeyFrame)) { + PROFILER_MARKER_UNTYPED("RequestVideoData SkipVideoDemuxToNextKeyFrame", + MEDIA_PLAYBACK); + RefPtr p = mVideo.EnsurePromise(__func__); + SkipVideoDemuxToNextKeyFrame(*target); + return p; + } + + RefPtr p = mVideo.EnsurePromise(__func__); + ScheduleUpdate(TrackInfo::kVideoTrack); + + return p; +} + +void MediaFormatReader::OnDemuxFailed(TrackType aTrack, + const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnDemuxFailed", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOG("Failed to demux %s, failure:%s", + aTrack == TrackType::kVideoTrack ? "video" : "audio", + aError.ErrorName().get()); + auto& decoder = GetDecoderData(aTrack); + decoder.mDemuxRequest.Complete(); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + DDLOG(DDLogCategory::Log, + aTrack == TrackType::kVideoTrack ? "video_demux_interruption" + : "audio_demux_interruption", + aError); + if (!decoder.mWaitingForData) { + decoder.RequestDrain(); + } + NotifyEndOfStream(aTrack); + break; + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + DDLOG(DDLogCategory::Log, + aTrack == TrackType::kVideoTrack ? "video_demux_interruption" + : "audio_demux_interruption", + aError); + if (!decoder.mWaitingForData) { + decoder.RequestDrain(); + } + NotifyWaitingForData(aTrack); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + DDLOG(DDLogCategory::Log, + aTrack == TrackType::kVideoTrack ? "video_demux_interruption" + : "audio_demux_interruption", + aError); + if (decoder.HasPromise()) { + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + break; + default: + DDLOG(DDLogCategory::Log, + aTrack == TrackType::kVideoTrack ? "video_demux_error" + : "audio_demux_error", + aError); + NotifyError(aTrack, aError); + break; + } +} + +void MediaFormatReader::DoDemuxVideo() { + AUTO_PROFILER_LABEL("MediaFormatReader::DoDemuxVideo", MEDIA_PLAYBACK); + using SamplesPromise = MediaTrackDemuxer::SamplesPromise; + + DDLOG(DDLogCategory::Log, "video_demuxing", DDNoValue{}); + PerformanceRecorder perfRecorder( + MediaStage::RequestDemux, + mVideo.GetCurrentInfo()->GetAsVideoInfo()->mImage.height); + auto p = mVideo.mTrackDemuxer->GetSamples(1); + + RefPtr self = this; + if (mVideo.mFirstDemuxedSampleTime.isNothing()) { + p = p->Then( + OwnerThread(), __func__, + [self](RefPtr aSamples) { + AUTO_PROFILER_LABEL("MediaFormatReader::DoDemuxVideo:Resolved", + MEDIA_PLAYBACK); + DDLOGEX(self.get(), DDLogCategory::Log, "video_first_demuxed", + DDNoValue{}); + self->OnFirstDemuxCompleted(TrackInfo::kVideoTrack, aSamples); + return SamplesPromise::CreateAndResolve(aSamples.forget(), __func__); + }, + [self](const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::DoDemuxVideo:Rejected", + MEDIA_PLAYBACK); + DDLOGEX(self.get(), DDLogCategory::Log, "video_first_demuxing_error", + aError); + self->OnFirstDemuxFailed(TrackInfo::kVideoTrack, aError); + return SamplesPromise::CreateAndReject(aError, __func__); + }); + } + + p->Then( + OwnerThread(), __func__, + [self, perfRecorder(std::move(perfRecorder))]( + const RefPtr& aSamples) mutable { + perfRecorder.Record(); + self->OnVideoDemuxCompleted(aSamples); + }, + [self](const MediaResult& aError) { self->OnVideoDemuxFailed(aError); }) + ->Track(mVideo.mDemuxRequest); +} + +void MediaFormatReader::OnVideoDemuxCompleted( + const RefPtr& aSamples) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnVideoDemuxCompleted", + MEDIA_PLAYBACK); + LOGV("%zu video samples demuxed (sid:%d)", aSamples->GetSamples().Length(), + aSamples->GetSamples()[0]->mTrackInfo + ? aSamples->GetSamples()[0]->mTrackInfo->GetID() + : 0); + DDLOG(DDLogCategory::Log, "video_demuxed_samples", + uint64_t(aSamples->GetSamples().Length())); + mVideo.mDemuxRequest.Complete(); + mVideo.mQueuedSamples.AppendElements(aSamples->GetSamples()); + ScheduleUpdate(TrackInfo::kVideoTrack); +} + +RefPtr +MediaFormatReader::RequestAudioData() { + MOZ_ASSERT(OnTaskQueue()); + MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise(), "No duplicate sample requests"); + // Requesting audio can be done independently from video, even during video + // seeking. But it shouldn't happen if we're doing audio seek. + if (!IsVideoOnlySeeking()) { + MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), + "No sample requests allowed while seeking"); + MOZ_DIAGNOSTIC_ASSERT(!mAudio.mSeekRequest.Exists() || + mAudio.mTimeThreshold.isSome()); + MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek"); + } + LOGV(""); + + if (!HasAudio()) { + LOG("called with no audio track"); + return AudioDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__); + } + + if (IsSeeking()) { + LOG("called mid-seek. Rejecting."); + return AudioDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, + __func__); + } + + if (mShutdown) { + NS_WARNING("RequestAudioData on shutdown MediaFormatReader!"); + return AudioDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, + __func__); + } + + RefPtr p = mAudio.EnsurePromise(__func__); + ScheduleUpdate(TrackInfo::kAudioTrack); + + return p; +} + +void MediaFormatReader::DoDemuxAudio() { + AUTO_PROFILER_LABEL("MediaFormatReader::DoDemuxAudio", MEDIA_PLAYBACK); + using SamplesPromise = MediaTrackDemuxer::SamplesPromise; + + DDLOG(DDLogCategory::Log, "audio_demuxing", DDNoValue{}); + PerformanceRecorder perfRecorder(MediaStage::RequestDemux); + auto p = mAudio.mTrackDemuxer->GetSamples(1); + + RefPtr self = this; + if (mAudio.mFirstDemuxedSampleTime.isNothing()) { + p = p->Then( + OwnerThread(), __func__, + [self](RefPtr aSamples) { + AUTO_PROFILER_LABEL("MediaFormatReader::DoDemuxAudio:Resolved", + MEDIA_PLAYBACK); + DDLOGEX(self.get(), DDLogCategory::Log, "audio_first_demuxed", + DDNoValue{}); + self->OnFirstDemuxCompleted(TrackInfo::kAudioTrack, aSamples); + return SamplesPromise::CreateAndResolve(aSamples.forget(), __func__); + }, + [self](const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::DoDemuxAudio:Rejected", + MEDIA_PLAYBACK); + DDLOGEX(self.get(), DDLogCategory::Log, "audio_first_demuxing_error", + aError); + self->OnFirstDemuxFailed(TrackInfo::kAudioTrack, aError); + return SamplesPromise::CreateAndReject(aError, __func__); + }); + } + + p->Then( + OwnerThread(), __func__, + [self, perfRecorder(std::move(perfRecorder))]( + const RefPtr& aSamples) mutable { + perfRecorder.Record(); + self->OnAudioDemuxCompleted(aSamples); + }, + [self](const MediaResult& aError) { self->OnAudioDemuxFailed(aError); }) + ->Track(mAudio.mDemuxRequest); +} + +void MediaFormatReader::OnAudioDemuxCompleted( + const RefPtr& aSamples) { + LOGV("%zu audio samples demuxed (sid:%d)", aSamples->GetSamples().Length(), + aSamples->GetSamples()[0]->mTrackInfo + ? aSamples->GetSamples()[0]->mTrackInfo->GetID() + : 0); + DDLOG(DDLogCategory::Log, "audio_demuxed_samples", + uint64_t(aSamples->GetSamples().Length())); + mAudio.mDemuxRequest.Complete(); + mAudio.mQueuedSamples.AppendElements(aSamples->GetSamples()); + ScheduleUpdate(TrackInfo::kAudioTrack); +} + +void MediaFormatReader::NotifyNewOutput( + TrackType aTrack, MediaDataDecoder::DecodedData&& aResults) { + AUTO_PROFILER_LABEL("MediaFormatReader::NotifyNewOutput", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + if (aResults.IsEmpty()) { + DDLOG(DDLogCategory::Log, + aTrack == TrackInfo::kAudioTrack ? "decoded_audio" : "decoded_video", + "no output samples"); + } else { + for (auto&& sample : aResults) { + if (DecoderDoctorLogger::IsDDLoggingEnabled()) { + switch (sample->mType) { + case MediaData::Type::AUDIO_DATA: + DDLOGPR(DDLogCategory::Log, + aTrack == TrackInfo::kAudioTrack ? "decoded_audio" + : "decoded_got_audio!?", + "{\"type\":\"AudioData\", \"offset\":%" PRIi64 + ", \"time_us\":%" PRIi64 ", \"timecode_us\":%" PRIi64 + ", \"duration_us\":%" PRIi64 ", \"frames\":%" PRIu32 + ", \"channels\":%" PRIu32 ", \"rate\":%" PRIu32 + ", \"bytes\":%zu}", + sample->mOffset, sample->mTime.ToMicroseconds(), + sample->mTimecode.ToMicroseconds(), + sample->mDuration.ToMicroseconds(), + sample->As()->Frames(), + sample->As()->mChannels, + sample->As()->mRate, + sample->As()->Data().Length()); + break; + case MediaData::Type::VIDEO_DATA: + DDLOGPR(DDLogCategory::Log, + aTrack == TrackInfo::kVideoTrack ? "decoded_video" + : "decoded_got_video!?", + "{\"type\":\"VideoData\", \"offset\":%" PRIi64 + ", \"time_us\":%" PRIi64 ", \"timecode_us\":%" PRIi64 + ", \"duration_us\":%" PRIi64 + ", \"kf\":%s, \"size\":[%" PRIi32 ",%" PRIi32 "]}", + sample->mOffset, sample->mTime.ToMicroseconds(), + sample->mTimecode.ToMicroseconds(), + sample->mDuration.ToMicroseconds(), + sample->mKeyframe ? "true" : "false", + sample->As()->mDisplay.width, + sample->As()->mDisplay.height); + break; + case MediaData::Type::RAW_DATA: + DDLOGPR(DDLogCategory::Log, + aTrack == TrackInfo::kAudioTrack ? "decoded_audio" + : aTrack == TrackInfo::kVideoTrack ? "decoded_video" + : "decoded_?", + "{\"type\":\"RawData\", \"offset\":%" PRIi64 + " \"time_us\":%" PRIi64 ", \"timecode_us\":%" PRIi64 + ", \"duration_us\":%" PRIi64 ", \"kf\":%s}", + sample->mOffset, sample->mTime.ToMicroseconds(), + sample->mTimecode.ToMicroseconds(), + sample->mDuration.ToMicroseconds(), + sample->mKeyframe ? "true" : "false"); + break; + case MediaData::Type::NULL_DATA: + DDLOGPR(DDLogCategory::Log, + aTrack == TrackInfo::kAudioTrack ? "decoded_audio" + : aTrack == TrackInfo::kVideoTrack ? "decoded_video" + : "decoded_?", + "{\"type\":\"NullData\", \"offset\":%" PRIi64 + " \"time_us\":%" PRIi64 ", \"timecode_us\":%" PRIi64 + ", \"duration_us\":%" PRIi64 ", \"kf\":%s}", + sample->mOffset, sample->mTime.ToMicroseconds(), + sample->mTimecode.ToMicroseconds(), + sample->mDuration.ToMicroseconds(), + sample->mKeyframe ? "true" : "false"); + break; + } + } + LOGV("Received new %s sample time:%" PRId64 " duration:%" PRId64, + TrackTypeToStr(aTrack), sample->mTime.ToMicroseconds(), + sample->mDuration.ToMicroseconds()); + decoder.mOutput.AppendElement(sample); + decoder.mNumSamplesOutput++; + decoder.mNumOfConsecutiveDecodingError = 0; + decoder.mNumOfConsecutiveRDDOrGPUCrashes = 0; + if (aTrack == TrackInfo::kAudioTrack) { + decoder.mNumOfConsecutiveUtilityCrashes = 0; + } + } + } + LOG("Done processing new %s samples", TrackTypeToStr(aTrack)); + + if (!aResults.IsEmpty()) { + // We have decoded our first frame, we can now starts to skip future errors. + decoder.mFirstFrameTime.reset(); + } + ScheduleUpdate(aTrack); +} + +void MediaFormatReader::NotifyError(TrackType aTrack, + const MediaResult& aError) { + MOZ_ASSERT(OnTaskQueue()); + NS_WARNING(aError.Description().get()); + LOGV("%s Decoding error", TrackTypeToStr(aTrack)); + auto& decoder = GetDecoderData(aTrack); + decoder.mError = decoder.HasFatalError() ? decoder.mError : Some(aError); + + ScheduleUpdate(aTrack); +} + +void MediaFormatReader::NotifyWaitingForData(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + decoder.mWaitingForData = true; + if (decoder.mTimeThreshold) { + decoder.mTimeThreshold.ref().mWaiting = true; + } + ScheduleUpdate(aTrack); +} + +void MediaFormatReader::NotifyWaitingForKey(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + mOnWaitingForKey.Notify(); + if (!decoder.mDecodeRequest.Exists()) { + LOGV("WaitingForKey received while no pending decode. Ignoring"); + return; + } + decoder.mWaitingForKey = true; + ScheduleUpdate(aTrack); +} + +void MediaFormatReader::NotifyEndOfStream(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + decoder.mDemuxEOS = true; + ScheduleUpdate(aTrack); +} + +bool MediaFormatReader::NeedInput(DecoderData& aDecoder) { + // The decoder will not be fed a new raw sample until the current decoding + // requests has completed. + return (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome()) && + !aDecoder.HasPendingDrain() && !aDecoder.HasFatalError() && + !aDecoder.mDemuxRequest.Exists() && !aDecoder.mOutput.Length() && + !aDecoder.HasInternalSeekPending() && + !aDecoder.mDecodeRequest.Exists(); +} + +void MediaFormatReader::ScheduleUpdate(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + if (mShutdown) { + return; + } + auto& decoder = GetDecoderData(aTrack); + MOZ_RELEASE_ASSERT(decoder.GetCurrentInfo(), + "Can only schedule update when track exists"); + + if (decoder.mUpdateScheduled) { + return; + } + LOGV("SchedulingUpdate(%s)", TrackTypeToStr(aTrack)); + decoder.mUpdateScheduled = true; + RefPtr task(NewRunnableMethod( + "MediaFormatReader::Update", this, &MediaFormatReader::Update, aTrack)); + nsresult rv = OwnerThread()->Dispatch(task.forget()); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +bool MediaFormatReader::UpdateReceivedNewData(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + + if (!decoder.mReceivedNewData) { + return false; + } + + // We do not want to clear mWaitingForData while there are pending + // demuxing or seeking operations that could affect the value of this flag. + // This is in order to ensure that we will retry once they complete as we may + // now have new data that could potentially allow those operations to + // successfully complete if tried again. + if (decoder.mSeekRequest.Exists()) { + // Nothing more to do until this operation complete. + return true; + } + + if (aTrack == TrackType::kVideoTrack && mSkipRequest.Exists()) { + LOGV("Skipping in progress, nothing more to do"); + return true; + } + + if (decoder.mDemuxRequest.Exists()) { + // We may have pending operations to process, so we want to continue + // after UpdateReceivedNewData returns. + return false; + } + + if (decoder.HasPendingDrain()) { + // We do not want to clear mWaitingForData or mDemuxEOS while + // a drain is in progress in order to properly complete the operation. + return false; + } + + decoder.mReceivedNewData = false; + if (decoder.mTimeThreshold) { + decoder.mTimeThreshold.ref().mWaiting = false; + } + decoder.mWaitingForData = false; + + if (decoder.HasFatalError()) { + return false; + } + + if (!mSeekPromise.IsEmpty() && + (!IsVideoOnlySeeking() || aTrack == TrackInfo::kVideoTrack)) { + MOZ_ASSERT(!decoder.HasPromise()); + MOZ_DIAGNOSTIC_ASSERT( + (IsVideoOnlySeeking() || !mAudio.mTimeThreshold) && + !mVideo.mTimeThreshold, + "InternalSeek must have been aborted when Seek was first called"); + MOZ_DIAGNOSTIC_ASSERT( + (IsVideoOnlySeeking() || !mAudio.HasWaitingPromise()) && + !mVideo.HasWaitingPromise(), + "Waiting promises must have been rejected when Seek was first called"); + if (mVideo.mSeekRequest.Exists() || + (!IsVideoOnlySeeking() && mAudio.mSeekRequest.Exists())) { + // Already waiting for a seek to complete. Nothing more to do. + return true; + } + LOG("Attempting Seek"); + ScheduleSeek(); + return true; + } + if (decoder.HasInternalSeekPending() || decoder.HasWaitingPromise()) { + if (decoder.HasInternalSeekPending()) { + LOG("Attempting Internal Seek"); + InternalSeek(aTrack, decoder.mTimeThreshold.ref()); + } + if (decoder.HasWaitingPromise() && !decoder.IsWaitingForKey() && + !decoder.IsWaitingForData()) { + MOZ_ASSERT(!decoder.HasPromise()); + LOG("We have new data. Resolving WaitingPromise"); + decoder.mWaitingPromise.Resolve(decoder.mType, __func__); + } + return true; + } + return false; +} + +void MediaFormatReader::RequestDemuxSamples(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + MOZ_ASSERT(!decoder.mDemuxRequest.Exists()); + + if (!decoder.mQueuedSamples.IsEmpty()) { + // No need to demux new samples. + return; + } + + if (decoder.mDemuxEOS) { + // Nothing left to demux. + // We do not want to attempt to demux while in waiting for data mode + // as it would retrigger an unnecessary drain. + return; + } + + LOGV("Requesting extra demux %s", TrackTypeToStr(aTrack)); + if (aTrack == TrackInfo::kVideoTrack) { + DoDemuxVideo(); + } else { + DoDemuxAudio(); + } +} + +void MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack, + MediaRawData* aSample) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + RefPtr self = this; + decoder.mFlushed = false; + DDLOGPR(DDLogCategory::Log, + aTrack == TrackInfo::kAudioTrack ? "decode_audio" + : aTrack == TrackInfo::kVideoTrack ? "decode_video" + : "decode_?", + "{\"type\":\"MediaRawData\", \"offset\":%" PRIi64 + ", \"bytes\":%zu, \"time_us\":%" PRIi64 ", \"timecode_us\":%" PRIi64 + ", \"duration_us\":%" PRIi64 ",%s%s}", + aSample->mOffset, aSample->Size(), aSample->mTime.ToMicroseconds(), + aSample->mTimecode.ToMicroseconds(), + aSample->mDuration.ToMicroseconds(), aSample->mKeyframe ? " kf" : "", + aSample->mEOS ? " eos" : ""); + + const int32_t height = + aTrack == TrackInfo::kVideoTrack + ? decoder.GetCurrentInfo()->GetAsVideoInfo()->mImage.height + : 0; + MediaInfoFlag flag = MediaInfoFlag::None; + flag |= + aSample->mKeyframe ? MediaInfoFlag::KeyFrame : MediaInfoFlag::NonKeyFrame; + if (aTrack == TrackInfo::kVideoTrack) { + flag |= VideoIsHardwareAccelerated() ? MediaInfoFlag::HardwareDecoding + : MediaInfoFlag::SoftwareDecoding; + const nsCString& mimeType = decoder.GetCurrentInfo()->mMimeType; + if (MP4Decoder::IsH264(mimeType)) { + flag |= MediaInfoFlag::VIDEO_H264; + } else if (VPXDecoder::IsVPX(mimeType, VPXDecoder::VP8)) { + flag |= MediaInfoFlag::VIDEO_VP8; + } else if (VPXDecoder::IsVPX(mimeType, VPXDecoder::VP9)) { + flag |= MediaInfoFlag::VIDEO_VP9; + } +#ifdef MOZ_AV1 + else if (AOMDecoder::IsAV1(mimeType)) { + flag |= MediaInfoFlag::VIDEO_AV1; + } +#endif + } + PerformanceRecorder perfRecorder(MediaStage::RequestDecode, + height, flag); + if (mMediaEngineId && aSample->mCrypto.IsEncrypted()) { + aSample->mShouldCopyCryptoToRemoteRawData = true; + } + decoder.mDecoder->Decode(aSample) + ->Then( + mTaskQueue, __func__, + [self, aTrack, &decoder, perfRecorder(std::move(perfRecorder))]( + MediaDataDecoder::DecodedData&& aResults) mutable { + perfRecorder.Record(); + decoder.mDecodeRequest.Complete(); + self->NotifyNewOutput(aTrack, std::move(aResults)); + }, + [self, aTrack, &decoder](const MediaResult& aError) { + decoder.mDecodeRequest.Complete(); + self->NotifyError(aTrack, aError); + }) + ->Track(decoder.mDecodeRequest); +} + +void MediaFormatReader::HandleDemuxedSamples( + TrackType aTrack, FrameStatistics::AutoNotifyDecoded& aA) { + MOZ_ASSERT(OnTaskQueue()); + + auto& decoder = GetDecoderData(aTrack); + + if (decoder.mFlushing) { + LOGV("Decoder operation in progress, let it complete."); + return; + } + + if (decoder.mQueuedSamples.IsEmpty()) { + return; + } + + RefPtr sample = decoder.mQueuedSamples[0]; + const RefPtr info = sample->mTrackInfo; + + if (info && decoder.mLastStreamSourceID != info->GetID()) { + nsTArray> samples; + if (decoder.mDecoder) { + bool recyclable = + StaticPrefs::media_decoder_recycle_enabled() && + decoder.mDecoder->SupportDecoderRecycling() && + (*info)->mCrypto.mCryptoScheme == + decoder.GetCurrentInfo()->mCrypto.mCryptoScheme && + (*info)->mMimeType == decoder.GetCurrentInfo()->mMimeType; + if (!recyclable && decoder.mTimeThreshold.isNothing() && + (decoder.mNextStreamSourceID.isNothing() || + decoder.mNextStreamSourceID.ref() != info->GetID())) { + LOG("%s stream id has changed from:%d to:%d, draining decoder.", + TrackTypeToStr(aTrack), decoder.mLastStreamSourceID, info->GetID()); + decoder.RequestDrain(); + decoder.mNextStreamSourceID = Some(info->GetID()); + ScheduleUpdate(aTrack); + return; + } + + // If flushing is required, it will clear our array of queued samples. + // So we may need to make a copy. + samples = decoder.mQueuedSamples.Clone(); + if (!recyclable) { + LOG("Decoder does not support recycling, recreate decoder."); + ShutdownDecoder(aTrack); + // We're going to be using a new decoder following the change of content + // We can attempt to use hardware decoding again. + decoder.mHardwareDecodingDisabled = false; + } else if (decoder.HasWaitingPromise()) { + decoder.Flush(); + } + } + + nsPrintfCString markerString( + "%s stream id changed from:%" PRIu32 " to:%" PRIu32, + TrackTypeToStr(aTrack), decoder.mLastStreamSourceID, info->GetID()); + PROFILER_MARKER_TEXT("StreamID Change", MEDIA_PLAYBACK, {}, markerString); + LOG("%s", markerString.get()); + + if (aTrack == TrackInfo::kVideoTrack) { + // We are about to create a new decoder thus the benchmark, + // up to this point, is stored. + NotifyDecoderBenchmarkStore(); + } + decoder.mNextStreamSourceID.reset(); + decoder.mLastStreamSourceID = info->GetID(); + decoder.mInfo = info; + { + MutexAutoLock lock(decoder.mMutex); + if (aTrack == TrackInfo::kAudioTrack) { + decoder.mWorkingInfo = MakeUnique(*info->GetAsAudioInfo()); + } else if (aTrack == TrackInfo::kVideoTrack) { + decoder.mWorkingInfo = MakeUnique(*info->GetAsVideoInfo()); + } + mWorkingInfoChanged = true; + } + + decoder.mMeanRate.Reset(); + + if (sample->mKeyframe) { + if (samples.Length()) { + decoder.mQueuedSamples = std::move(samples); + } + } else { + auto time = TimeInterval(sample->mTime, sample->GetEndTime()); + InternalSeekTarget seekTarget = + decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false)); + LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64, + sample->mTime.ToMicroseconds()); + InternalSeek(aTrack, seekTarget); + return; + } + } + + // Calculate the average frame rate. The first frame will be accounted + // for twice. + decoder.mMeanRate.Update(sample->mDuration); + + if (!decoder.mDecoder) { + // In Clear Lead situation, the `mInfo` could change from unencrypted to + // encrypted so we need to ensure the CDM proxy is ready before creating a + // decoder. + if (decoder.IsEncrypted() && + (IsWaitingOnCDMResource() || !ResolveSetCDMPromiseIfDone(aTrack))) { + return; + } + mDecoderFactory->CreateDecoder(aTrack); + return; + } + + LOGV("Input:%" PRId64 " (dts:%" PRId64 " kf:%d)", + sample->mTime.ToMicroseconds(), sample->mTimecode.ToMicroseconds(), + sample->mKeyframe); + decoder.mNumSamplesInput++; + decoder.mSizeOfQueue++; + if (aTrack == TrackInfo::kVideoTrack) { + aA.mStats.mParsedFrames++; + } + + DecodeDemuxedSamples(aTrack, sample); + + decoder.mQueuedSamples.RemoveElementAt(0); +} + +media::TimeUnit MediaFormatReader::GetInternalSeekTargetEndTime() const { + MOZ_ASSERT(OnTaskQueue()); + return mVideo.mTimeThreshold ? mVideo.mTimeThreshold->EndTime() + : TimeUnit::FromInfinity(); +} + +void MediaFormatReader::InternalSeek(TrackType aTrack, + const InternalSeekTarget& aTarget) { + MOZ_ASSERT(OnTaskQueue()); + LOG("%s internal seek to %f", TrackTypeToStr(aTrack), + aTarget.Time().ToSeconds()); + + auto& decoder = GetDecoderData(aTrack); + decoder.Flush(); + decoder.ResetDemuxer(); + decoder.mTimeThreshold = Some(aTarget); + DDLOG(DDLogCategory::Log, "seeking", DDNoValue{}); + RefPtr self = this; + decoder.mTrackDemuxer->Seek(decoder.mTimeThreshold.ref().Time()) + ->Then( + OwnerThread(), __func__, + [self, aTrack](TimeUnit aTime) { + DDLOGEX(self.get(), DDLogCategory::Log, "seeked", DDNoValue{}); + auto& decoder = self->GetDecoderData(aTrack); + decoder.mSeekRequest.Complete(); + MOZ_ASSERT(decoder.mTimeThreshold, + "Seek promise must be disconnected when " + "timethreshold is reset"); + decoder.mTimeThreshold.ref().mHasSeeked = true; + self->SetVideoDecodeThreshold(); + self->ScheduleUpdate(aTrack); + }, + [self, aTrack](const MediaResult& aError) { + auto& decoder = self->GetDecoderData(aTrack); + decoder.mSeekRequest.Complete(); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + DDLOGEX(self.get(), DDLogCategory::Log, "seeking_interrupted", + aError); + self->NotifyWaitingForData(aTrack); + break; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + DDLOGEX(self.get(), DDLogCategory::Log, "seeking_interrupted", + aError); + decoder.mTimeThreshold.reset(); + self->NotifyEndOfStream(aTrack); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + DDLOGEX(self.get(), DDLogCategory::Log, "seeking_interrupted", + aError); + decoder.mTimeThreshold.reset(); + break; + default: + DDLOGEX(self.get(), DDLogCategory::Log, "seeking_error", + aError); + decoder.mTimeThreshold.reset(); + self->NotifyError(aTrack, aError); + break; + } + }) + ->Track(decoder.mSeekRequest); +} + +void MediaFormatReader::DrainDecoder(TrackType aTrack) { + AUTO_PROFILER_LABEL("MediaFormatReader::DrainDecoder", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + auto& decoder = GetDecoderData(aTrack); + if (decoder.mDrainState == DrainState::Draining) { + return; + } + if (!decoder.mDecoder || + (decoder.mDrainState != DrainState::PartialDrainPending && + decoder.mNumSamplesInput == decoder.mNumSamplesOutput)) { + // No frames to drain. + LOGV("Draining %s with nothing to drain", TrackTypeToStr(aTrack)); + decoder.mDrainState = DrainState::DrainAborted; + ScheduleUpdate(aTrack); + return; + } + + decoder.mDrainState = DrainState::Draining; + + DDLOG(DDLogCategory::Log, "draining", DDNoValue{}); + RefPtr self = this; + decoder.mDecoder->Drain() + ->Then( + mTaskQueue, __func__, + [self, aTrack, &decoder](MediaDataDecoder::DecodedData&& aResults) { + decoder.mDrainRequest.Complete(); + DDLOGEX(self.get(), DDLogCategory::Log, "drained", DDNoValue{}); + if (aResults.IsEmpty()) { + decoder.mDrainState = DrainState::DrainCompleted; + } else { + self->NotifyNewOutput(aTrack, std::move(aResults)); + // Let's see if we have any more data available to drain. + decoder.mDrainState = DrainState::PartialDrainPending; + } + self->ScheduleUpdate(aTrack); + }, + [self, aTrack, &decoder](const MediaResult& aError) { + decoder.mDrainRequest.Complete(); + DDLOGEX(self.get(), DDLogCategory::Log, "draining_error", aError); + self->NotifyError(aTrack, aError); + }) + ->Track(decoder.mDrainRequest); + LOG("Requesting %s decoder to drain", TrackTypeToStr(aTrack)); +} + +void MediaFormatReader::Update(TrackType aTrack) { + AUTO_PROFILER_LABEL("MediaFormatReader::Update", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + if (mShutdown) { + return; + } + + LOGV("Processing update for %s", TrackTypeToStr(aTrack)); + + bool needOutput = false; + auto& decoder = GetDecoderData(aTrack); + decoder.mUpdateScheduled = false; + + if (!mInitDone) { + return; + } + + if (aTrack == TrackType::kVideoTrack && mSkipRequest.Exists()) { + LOGV("Skipping in progress, nothing more to do"); + return; + } + + if (UpdateReceivedNewData(aTrack)) { + LOGV("Nothing more to do"); + return; + } + + if (decoder.mSeekRequest.Exists()) { + LOGV("Seeking hasn't completed, nothing more to do"); + return; + } + + MOZ_DIAGNOSTIC_ASSERT( + !decoder.HasInternalSeekPending() || + (!decoder.mOutput.Length() && !decoder.mQueuedSamples.Length()), + "No frames can be demuxed or decoded while an internal seek is pending"); + + // Record number of frames decoded and parsed. Automatically update the + // stats counters using the AutoNotifyDecoded stack-based class. + FrameStatistics::AutoNotifyDecoded a(mFrameStats); + + // Drop any frames found prior our internal seek target. + while (decoder.mTimeThreshold && decoder.mOutput.Length()) { + RefPtr& output = decoder.mOutput[0]; + InternalSeekTarget target = decoder.mTimeThreshold.ref(); + auto time = output->mTime; + if (time >= target.Time()) { + // We have reached our internal seek target. + decoder.mTimeThreshold.reset(); + // We might have dropped some keyframes. + mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe; + } + if (time < target.Time() || (target.mDropTarget && target.Contains(time))) { + LOGV("Internal Seeking: Dropping %s frame time:%f wanted:%f (kf:%d)", + TrackTypeToStr(aTrack), output->mTime.ToSeconds(), + target.Time().ToSeconds(), output->mKeyframe); + decoder.mOutput.RemoveElementAt(0); + decoder.mSizeOfQueue -= 1; + } + } + + while (decoder.mOutput.Length() && + decoder.mOutput[0]->mType == MediaData::Type::NULL_DATA) { + LOGV("Dropping null data. Time: %" PRId64, + decoder.mOutput[0]->mTime.ToMicroseconds()); + decoder.mOutput.RemoveElementAt(0); + decoder.mSizeOfQueue -= 1; + } + + if (decoder.HasPromise()) { + needOutput = true; + if (decoder.mOutput.Length()) { + RefPtr output = decoder.mOutput[0]; + decoder.mOutput.RemoveElementAt(0); + decoder.mSizeOfQueue -= 1; + decoder.mLastDecodedSampleTime = + Some(TimeInterval(output->mTime, output->GetEndTime())); + decoder.mNumSamplesOutputTotal++; + ReturnOutput(output, aTrack); + // We have a decoded sample ready to be returned. + if (aTrack == TrackType::kVideoTrack) { + uint64_t delta = + decoder.mNumSamplesOutputTotal - mLastReportedNumDecodedFrames; + a.mStats.mDecodedFrames = static_cast(delta); + mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal; + if (output->mKeyframe) { + if (mPreviousDecodedKeyframeTime_us < + output->mTime.ToMicroseconds()) { + // There is a previous keyframe -> Record inter-keyframe stats. + uint64_t segment_us = output->mTime.ToMicroseconds() - + mPreviousDecodedKeyframeTime_us; + a.mStats.mInterKeyframeSum_us += segment_us; + a.mStats.mInterKeyframeCount += 1; + if (a.mStats.mInterKeyFrameMax_us < segment_us) { + a.mStats.mInterKeyFrameMax_us = segment_us; + } + } + mPreviousDecodedKeyframeTime_us = output->mTime.ToMicroseconds(); + } + bool wasHardwareAccelerated = mVideo.mIsHardwareAccelerated; + nsCString error; + mVideo.mIsHardwareAccelerated = + mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated(error); + VideoData* videoData = output->As(); + if (!mVideo.mHasReportedVideoHardwareSupportTelemtry || + wasHardwareAccelerated != mVideo.mIsHardwareAccelerated) { + mVideo.mHasReportedVideoHardwareSupportTelemtry = true; + Telemetry::ScalarSet( + Telemetry::ScalarID::MEDIA_VIDEO_HARDWARE_DECODING_SUPPORT, + NS_ConvertUTF8toUTF16(mVideo.GetCurrentInfo()->mMimeType), + !!mVideo.mIsHardwareAccelerated); + static constexpr gfx::IntSize HD_VIDEO_SIZE{1280, 720}; + if (videoData->mDisplay.width >= HD_VIDEO_SIZE.Width() && + videoData->mDisplay.height >= HD_VIDEO_SIZE.Height()) { + Telemetry::ScalarSet( + Telemetry::ScalarID::MEDIA_VIDEO_HD_HARDWARE_DECODING_SUPPORT, + NS_ConvertUTF8toUTF16(mVideo.GetCurrentInfo()->mMimeType), + !!mVideo.mIsHardwareAccelerated); + } + } +#ifdef XP_WIN + // D3D11_YCBCR_IMAGE images are GPU based, we try to limit the amount + // of GPU RAM used. + mVideo.mIsHardwareAccelerated = + mVideo.mIsHardwareAccelerated || + (videoData->mImage && + videoData->mImage->GetFormat() == ImageFormat::D3D11_YCBCR_IMAGE); +#endif + } + } else if (decoder.HasFatalError()) { + nsCString mimeType = decoder.GetCurrentInfo()->mMimeType; + if (!mimeType.IsEmpty()) { + Telemetry::ScalarAdd( + Telemetry::ScalarID::MEDIA_DECODE_ERROR_PER_MIME_TYPE, + NS_ConvertUTF8toUTF16(mimeType), 1 /* error count */); + } + LOG("Rejecting %s promise for %s : DECODE_ERROR", TrackTypeToStr(aTrack), + mimeType.get()); + decoder.RejectPromise(decoder.mError.ref(), __func__); + return; + } else if (decoder.HasCompletedDrain()) { + if (decoder.mDemuxEOS) { + LOG("Rejecting %s promise: EOS", TrackTypeToStr(aTrack)); + if (aTrack == TrackInfo::kVideoTrack) { + // End of video, store the benchmark of the decoder. + NotifyDecoderBenchmarkStore(); + } + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); + } else if (decoder.mWaitingForData) { + if (decoder.mDrainState == DrainState::DrainCompleted && + decoder.mLastDecodedSampleTime && !decoder.mNextStreamSourceID) { + // We have completed draining the decoder following WaitingForData. + // Set up the internal seek machinery to be able to resume from the + // last sample decoded. + LOG("Seeking to last sample time: %" PRId64, + decoder.mLastDecodedSampleTime.ref().mStart.ToMicroseconds()); + InternalSeek(aTrack, InternalSeekTarget( + decoder.mLastDecodedSampleTime.ref(), true)); + } + if (!decoder.mReceivedNewData) { + LOG("Rejecting %s promise: WAITING_FOR_DATA", TrackTypeToStr(aTrack)); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); + } + } + + decoder.mDrainState = DrainState::None; + + // Now that draining has completed, we check if we have received + // new data again as the result may now be different from the earlier + // run. + if (UpdateReceivedNewData(aTrack) || decoder.mSeekRequest.Exists()) { + LOGV("Nothing more to do"); + return; + } + } else if (decoder.mDemuxEOS && !decoder.HasPendingDrain() && + decoder.mQueuedSamples.IsEmpty()) { + // It is possible to transition from WAITING_FOR_DATA directly to EOS + // state during the internal seek; in which case no draining would occur. + // There is no more samples left to be decoded and we are already in + // EOS state. We can immediately reject the data promise. + LOG("Rejecting %s promise: EOS", TrackTypeToStr(aTrack)); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); + } else if (decoder.mWaitingForKey) { + LOG("Rejecting %s promise: WAITING_FOR_DATA due to waiting for key", + TrackTypeToStr(aTrack)); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); + } else if (IsDecoderWaitingForCDM(aTrack)) { + // Rejecting the promise could lead to entering buffering state for MDSM, + // once a qualified(with the same key system and sessions created by the + // same InitData) new cdm proxy is set, decoding can be resumed. + LOG("Rejecting %s promise: WAITING_FOR_DATA due to waiting for CDM", + TrackTypeToStr(aTrack)); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); + } + } + + if (decoder.mDrainState == DrainState::DrainRequested || + decoder.mDrainState == DrainState::PartialDrainPending) { + if (decoder.mOutput.IsEmpty()) { + DrainDecoder(aTrack); + } + return; + } + + if (decoder.mError && !decoder.HasFatalError()) { + MOZ_RELEASE_ASSERT(!decoder.HasInternalSeekPending(), + "No error can occur while an internal seek is pending"); + + nsCString error; + bool firstFrameDecodingFailedWithHardware = + decoder.mFirstFrameTime && + decoder.mError.ref() == NS_ERROR_DOM_MEDIA_DECODE_ERR && + decoder.mDecoder && decoder.mDecoder->IsHardwareAccelerated(error) && + !decoder.mHardwareDecodingDisabled; + bool needsNewDecoder = + decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER || + firstFrameDecodingFailedWithHardware; + // Limit number of process restarts after crash + if ((decoder.mError.ref() == + NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_RDD_OR_GPU_ERR && + decoder.mNumOfConsecutiveRDDOrGPUCrashes++ < + decoder.mMaxConsecutiveRDDOrGPUCrashes) || + (decoder.mError.ref() == + NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_UTILITY_ERR && + decoder.mNumOfConsecutiveUtilityCrashes++ < + decoder.mMaxConsecutiveUtilityCrashes)) { + needsNewDecoder = true; + } + // For MF CDM crash, it needs to be handled differently. We need to shutdown + // current decoder and report that error to the state machine in order to + // let it to determine if playback can keep going or not. + if (decoder.mError.ref() == + NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR) { + LOG("Error: notify MF CDM crash and shutdown %s decoder", + TrackTypeToStr(aTrack)); + ShutdownDecoder(aTrack); + decoder.RejectPromise(decoder.mError.ref(), __func__); + decoder.mError.reset(); + return; + } +#ifdef XP_LINUX + // We failed to decode on Linux with HW decoder, + // give it another try without HW decoder. + if (decoder.mError.ref() == NS_ERROR_DOM_MEDIA_DECODE_ERR && + decoder.mDecoder->IsHardwareAccelerated(error)) { + LOG("Error: %s decode error, disable HW acceleration", + TrackTypeToStr(aTrack)); + needsNewDecoder = true; + decoder.mHardwareDecodingDisabled = true; + } + // RDD process crashed on Linux, give it another try without HW decoder. + if (decoder.mError.ref() == + NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_RDD_OR_GPU_ERR) { + LOG("Error: %s remote decoder crashed, disable HW acceleration", + TrackTypeToStr(aTrack)); + decoder.mHardwareDecodingDisabled = true; + } +#endif + // We don't want to expose crash error so switch to + // NS_ERROR_DOM_MEDIA_DECODE_ERR. + if (decoder.mError.ref() == + NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_RDD_OR_GPU_ERR || + decoder.mError.ref() == + NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_UTILITY_ERR) { + decoder.mError = Some(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, + RESULT_DETAIL("Unable to decode"))); + } + if (!needsNewDecoder && ++decoder.mNumOfConsecutiveDecodingError > + decoder.mMaxConsecutiveDecodingError) { + DDLOG(DDLogCategory::Log, "too_many_decode_errors", decoder.mError.ref()); + NotifyError(aTrack, decoder.mError.ref()); + return; + } + + if (firstFrameDecodingFailedWithHardware) { + decoder.mHardwareDecodingDisabled = true; + } + decoder.mError.reset(); + + LOG("%s decoded error count %d RDD crashes count %d", + TrackTypeToStr(aTrack), decoder.mNumOfConsecutiveDecodingError, + decoder.mNumOfConsecutiveRDDOrGPUCrashes); + + if (needsNewDecoder) { + LOG("Error: %s needs a new decoder", TrackTypeToStr(aTrack)); + ShutdownDecoder(aTrack); + } + if (decoder.mFirstFrameTime) { + TimeInterval seekInterval = TimeInterval(decoder.mFirstFrameTime.ref(), + decoder.mFirstFrameTime.ref()); + InternalSeek(aTrack, InternalSeekTarget(seekInterval, false)); + return; + } + + TimeUnit nextKeyframe; + if (aTrack == TrackType::kVideoTrack && + NS_SUCCEEDED( + decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe)) && + !nextKeyframe.IsInfinite()) { + SkipVideoDemuxToNextKeyFrame( + decoder.mLastDecodedSampleTime.refOr(TimeInterval()).Length()); + } else if (aTrack == TrackType::kAudioTrack) { + decoder.Flush(); + } else { + DDLOG(DDLogCategory::Log, "no_keyframe", NS_ERROR_DOM_MEDIA_FATAL_ERR); + // We can't recover from this error. + NotifyError(aTrack, NS_ERROR_DOM_MEDIA_FATAL_ERR); + } + return; + } + + bool needInput = NeedInput(decoder); + + LOGV("Update(%s) ni=%d no=%d in:%" PRIu64 " out:%" PRIu64 + " qs=%u decoding:%d flushing:%d desc:%s pending:%u waiting:%d eos:%d " + "ds:%d sid:%u waitcdm:%d", + TrackTypeToStr(aTrack), needInput, needOutput, decoder.mNumSamplesInput, + decoder.mNumSamplesOutput, uint32_t(size_t(decoder.mSizeOfQueue)), + decoder.mDecodeRequest.Exists(), decoder.mFlushing, + decoder.mDescription.get(), uint32_t(decoder.mOutput.Length()), + decoder.mWaitingForData, decoder.mDemuxEOS, int32_t(decoder.mDrainState), + decoder.mLastStreamSourceID, IsDecoderWaitingForCDM(aTrack)); + + if (IsWaitingOnCDMResource() || !ResolveSetCDMPromiseIfDone(aTrack)) { + // If the content is encrypted, MFR won't start to create decoder until + // CDMProxy is set. + return; + } + + if ((decoder.IsWaitingForData() && + (!decoder.mTimeThreshold || decoder.mTimeThreshold.ref().mWaiting)) || + (decoder.IsWaitingForKey())) { + // Nothing more we can do at present. + LOGV("Still waiting for data or key. data(%d)/key(%d)", + decoder.mWaitingForData, decoder.mWaitingForKey); + return; + } + + if (decoder.CancelWaitingForKey()) { + LOGV("No longer waiting for key. Resolving waiting promise"); + return; + } + + if (!needInput) { + LOGV("No need for additional input (pending:%u)", + uint32_t(decoder.mOutput.Length())); + return; + } + + // Demux samples if we don't have some. + RequestDemuxSamples(aTrack); + + HandleDemuxedSamples(aTrack, a); +} + +void MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack) { + AUTO_PROFILER_LABEL("MediaFormatReader::ReturnOutput", MEDIA_PLAYBACK); + MOZ_ASSERT(GetDecoderData(aTrack).HasPromise()); + MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::Type::NULL_DATA); + LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", + TrackTypeToStr(aTrack), aData->mTime.ToMicroseconds(), + aData->GetEndTime().ToMicroseconds()); + + if (aTrack == TrackInfo::kAudioTrack) { + AudioData* audioData = aData->As(); + + if (audioData->mChannels != mInfo.mAudio.mChannels || + audioData->mRate != mInfo.mAudio.mRate) { + LOG("change of audio format (rate:%d->%d). " + "This is an unsupported configuration", + mInfo.mAudio.mRate, audioData->mRate); + mInfo.mAudio.mRate = audioData->mRate; + mInfo.mAudio.mChannels = audioData->mChannels; + MutexAutoLock lock(mAudio.mMutex); + mAudio.mWorkingInfo->GetAsAudioInfo()->mRate = audioData->mRate; + mAudio.mWorkingInfo->GetAsAudioInfo()->mChannels = audioData->mChannels; + mWorkingInfoChanged = true; + } + mAudio.ResolvePromise(audioData, __func__); + } else if (aTrack == TrackInfo::kVideoTrack) { + VideoData* videoData = aData->As(); + + if (videoData->mDisplay != mInfo.mVideo.mDisplay) { + LOG("change of video display size (%dx%d->%dx%d)", + mInfo.mVideo.mDisplay.width, mInfo.mVideo.mDisplay.height, + videoData->mDisplay.width, videoData->mDisplay.height); + mInfo.mVideo.mDisplay = videoData->mDisplay; + MutexAutoLock lock(mVideo.mMutex); + mVideo.mWorkingInfo->GetAsVideoInfo()->mDisplay = videoData->mDisplay; + mWorkingInfoChanged = true; + } + + mozilla::gfx::ColorDepth colorDepth = videoData->GetColorDepth(); + if (colorDepth != mInfo.mVideo.mColorDepth) { + LOG("change of video color depth (enum %u -> enum %u)", + (unsigned)mInfo.mVideo.mColorDepth, (unsigned)colorDepth); + mInfo.mVideo.mColorDepth = colorDepth; + MutexAutoLock lock(mVideo.mMutex); + mVideo.mWorkingInfo->GetAsVideoInfo()->mColorDepth = colorDepth; + mWorkingInfoChanged = true; + } + + TimeUnit nextKeyframe; + if (!mVideo.HasInternalSeekPending() && + NS_SUCCEEDED( + mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) { + videoData->SetNextKeyFrameTime(nextKeyframe); + } + + mVideo.ResolvePromise(videoData, __func__); + } +} + +size_t MediaFormatReader::SizeOfVideoQueueInFrames() { + return SizeOfQueue(TrackInfo::kVideoTrack); +} + +size_t MediaFormatReader::SizeOfAudioQueueInFrames() { + return SizeOfQueue(TrackInfo::kAudioTrack); +} + +size_t MediaFormatReader::SizeOfQueue(TrackType aTrack) { + auto& decoder = GetDecoderData(aTrack); + return decoder.mSizeOfQueue; +} + +RefPtr MediaFormatReader::WaitForData( + MediaData::Type aType) { + MOZ_ASSERT(OnTaskQueue()); + TrackType trackType = aType == MediaData::Type::VIDEO_DATA + ? TrackType::kVideoTrack + : TrackType::kAudioTrack; + auto& decoder = GetDecoderData(trackType); + if (!decoder.IsWaitingForData() && !decoder.IsWaitingForKey()) { + // We aren't waiting for anything. + return WaitForDataPromise::CreateAndResolve(decoder.mType, __func__); + } + RefPtr p = decoder.mWaitingPromise.Ensure(__func__); + ScheduleUpdate(trackType); + return p; +} + +nsresult MediaFormatReader::ResetDecode(const TrackSet& aTracks) { + AUTO_PROFILER_LABEL("MediaFormatReader::ResetDecode", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOGV(""); + + mSeekPromise.RejectIfExists(NS_OK, __func__); + mSkipRequest.DisconnectIfExists(); + + // Do the same for any data wait promises. + if (aTracks.contains(TrackInfo::kAudioTrack)) { + mAudio.mWaitingPromise.RejectIfExists( + WaitForDataRejectValue(MediaData::Type::AUDIO_DATA, + WaitForDataRejectValue::CANCELED), + __func__); + } + + if (aTracks.contains(TrackInfo::kVideoTrack)) { + mVideo.mWaitingPromise.RejectIfExists( + WaitForDataRejectValue(MediaData::Type::VIDEO_DATA, + WaitForDataRejectValue::CANCELED), + __func__); + } + + // Reset miscellaneous seeking state. + mPendingSeekTime.reset(); + + if (HasVideo() && aTracks.contains(TrackInfo::kVideoTrack)) { + mVideo.ResetDemuxer(); + mVideo.mFirstFrameTime = Some(media::TimeUnit::Zero()); + Reset(TrackInfo::kVideoTrack); + if (mVideo.HasPromise()) { + mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + } + + if (HasAudio() && aTracks.contains(TrackInfo::kAudioTrack)) { + mAudio.ResetDemuxer(); + mVideo.mFirstFrameTime = Some(media::TimeUnit::Zero()); + Reset(TrackInfo::kAudioTrack); + if (mAudio.HasPromise()) { + mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + } + } + + return NS_OK; +} + +void MediaFormatReader::Reset(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + LOG("Reset(%s) BEGIN", TrackTypeToStr(aTrack)); + + auto& decoder = GetDecoderData(aTrack); + + decoder.ResetState(); + decoder.Flush(); + + LOG("Reset(%s) END", TrackTypeToStr(aTrack)); +} + +void MediaFormatReader::DropDecodedSamples(TrackType aTrack) { + MOZ_ASSERT(OnTaskQueue()); + auto& decoder = GetDecoderData(aTrack); + size_t lengthDecodedQueue = decoder.mOutput.Length(); + if (lengthDecodedQueue && decoder.mTimeThreshold.isSome()) { + auto time = decoder.mOutput.LastElement()->mTime; + if (time >= decoder.mTimeThreshold.ref().Time()) { + // We would have reached our internal seek target. + decoder.mTimeThreshold.reset(); + } + } + decoder.mOutput.Clear(); + decoder.mSizeOfQueue -= lengthDecodedQueue; + if (aTrack == TrackInfo::kVideoTrack && mFrameStats) { + mFrameStats->Accumulate({0, 0, 0, lengthDecodedQueue, 0, 0}); + } +} + +void MediaFormatReader::SkipVideoDemuxToNextKeyFrame(TimeUnit aTimeThreshold) { + AUTO_PROFILER_LABEL("MediaFormatReader::SkipVideoDemuxToNextKeyFrame", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOG("Skipping up to %" PRId64, aTimeThreshold.ToMicroseconds()); + + // We've reached SkipVideoDemuxToNextKeyFrame when our decoding is late. + // As such we can drop all already decoded samples and discard all pending + // samples. + DropDecodedSamples(TrackInfo::kVideoTrack); + + mVideo.mTrackDemuxer->SkipToNextRandomAccessPoint(aTimeThreshold) + ->Then(OwnerThread(), __func__, this, + &MediaFormatReader::OnVideoSkipCompleted, + &MediaFormatReader::OnVideoSkipFailed) + ->Track(mSkipRequest); +} + +void MediaFormatReader::VideoSkipReset(uint32_t aSkipped) { + PROFILER_MARKER_UNTYPED("SkippedVideoDecode", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + // Some frames may have been output by the decoder since we initiated the + // videoskip process and we know they would be late. + DropDecodedSamples(TrackInfo::kVideoTrack); + // Report the pending frames as dropped. + if (mFrameStats) { + uint32_t droppedDecoderCount = SizeOfVideoQueueInFrames(); + mFrameStats->Accumulate({0, 0, 0, droppedDecoderCount, 0, 0}); + } + + // Cancel any pending demux request and pending demuxed samples. + mVideo.mDemuxRequest.DisconnectIfExists(); + Reset(TrackType::kVideoTrack); + + if (mFrameStats) { + mFrameStats->Accumulate({aSkipped, 0, 0, aSkipped, 0, 0}); + } + + mVideo.mNumSamplesSkippedTotal += aSkipped; +} + +void MediaFormatReader::OnVideoSkipCompleted(uint32_t aSkipped) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnVideoSkipCompleted", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOG("Skipping succeeded, skipped %u frames", aSkipped); + mSkipRequest.Complete(); + + DDLOG(DDLogCategory::Log, "video_skipped", DDNoValue()); + + VideoSkipReset(aSkipped); + + ScheduleUpdate(TrackInfo::kVideoTrack); +} + +void MediaFormatReader::OnVideoSkipFailed( + MediaTrackDemuxer::SkipFailureHolder aFailure) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnVideoSkipFailed", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOG("Skipping failed, skipped %u frames", aFailure.mSkipped); + mSkipRequest.Complete(); + + switch (aFailure.mFailure.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + DDLOG(DDLogCategory::Log, "video_skipping_interruption", + aFailure.mFailure); + // Some frames may have been output by the decoder since we initiated the + // videoskip process and we know they would be late. + DropDecodedSamples(TrackInfo::kVideoTrack); + // We can't complete the skip operation, will just service a video frame + // normally. + ScheduleUpdate(TrackInfo::kVideoTrack); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + DDLOG(DDLogCategory::Log, "video_skipping_interruption", + aFailure.mFailure); + if (mVideo.HasPromise()) { + mVideo.RejectPromise(aFailure.mFailure, __func__); + } + break; + default: + DDLOG(DDLogCategory::Log, "video_skipping_error", aFailure.mFailure); + NotifyError(TrackType::kVideoTrack, aFailure.mFailure); + break; + } +} + +RefPtr MediaFormatReader::Seek( + const SeekTarget& aTarget) { + AUTO_PROFILER_LABEL("MediaFormatReader::Seek", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + LOG("aTarget=(%" PRId64 "), track=%s", aTarget.GetTime().ToMicroseconds(), + SeekTarget::TrackToStr(aTarget.GetTrack())); + + MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty()); + MOZ_DIAGNOSTIC_ASSERT(mPendingSeekTime.isNothing()); + // Should reset data request, and no pending internal seek. + if (aTarget.IsAllTracks()) { + MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise()); + MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise()); + MOZ_DIAGNOSTIC_ASSERT(mVideo.mTimeThreshold.isNothing()); + MOZ_DIAGNOSTIC_ASSERT(mAudio.mTimeThreshold.isNothing()); + } else if (aTarget.IsVideoOnly()) { + MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise()); + MOZ_DIAGNOSTIC_ASSERT(mVideo.mTimeThreshold.isNothing()); + } else if (aTarget.IsAudioOnly()) { + MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise()); + MOZ_DIAGNOSTIC_ASSERT(mAudio.mTimeThreshold.isNothing()); + } + + if (!mInfo.mMediaSeekable && !mInfo.mMediaSeekableOnlyInBufferedRanges) { + LOG("Seek() END (Unseekable)"); + return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__); + } + + if (mShutdown) { + return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__); + } + + SetSeekTarget(aTarget); + + RefPtr p = mSeekPromise.Ensure(__func__); + + ScheduleSeek(); + + return p; +} + +void MediaFormatReader::SetSeekTarget(const SeekTarget& aTarget) { + MOZ_ASSERT(OnTaskQueue()); + + mOriginalSeekTarget = aTarget; + mFallbackSeekTime = mPendingSeekTime = Some(aTarget.GetTime()); +} + +void MediaFormatReader::ScheduleSeek() { + if (mSeekScheduled) { + return; + } + mSeekScheduled = true; + nsresult rv = OwnerThread()->Dispatch(NewRunnableMethod( + "MediaFormatReader::AttemptSeek", this, &MediaFormatReader::AttemptSeek)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; +} + +void MediaFormatReader::AttemptSeek() { + AUTO_PROFILER_LABEL("MediaFormatReader::AttemptSeek", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + mSeekScheduled = false; + + if (mPendingSeekTime.isNothing()) { + LOGV("AttemptSeek, no pending seek time?"); + return; + } + + // Only reset the demuxers targeted by this SeekTarget, to avoid A/V sync + // issues. + const bool isSeekingAudio = HasAudio() && !mOriginalSeekTarget.IsVideoOnly(); + const bool isSeekingVideo = HasVideo() && !mOriginalSeekTarget.IsAudioOnly(); + LOG("AttemptSeek, seekingAudio=%d, seekingVideo=%d", isSeekingAudio, + isSeekingVideo); + if (isSeekingVideo) { + mVideo.ResetDemuxer(); + mVideo.ResetState(); + } + if (isSeekingAudio) { + mAudio.ResetDemuxer(); + mAudio.ResetState(); + } + + // If seeking both tracks, seek the video track, and then the audio track when + // the video track seek has completed. Otherwise, only seek a specific track. + if (isSeekingVideo) { + DoVideoSeek(); + } else if (isSeekingAudio) { + DoAudioSeek(); + } else { + MOZ_CRASH(); + } +} + +void MediaFormatReader::OnSeekFailed(TrackType aTrack, + const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnSeekFailed", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOGV("%s failure:%s", TrackTypeToStr(aTrack), aError.ErrorName().get()); + if (aTrack == TrackType::kVideoTrack) { + mVideo.mSeekRequest.Complete(); + } else { + mAudio.mSeekRequest.Complete(); + } + + if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { + if (HasVideo() && aTrack == TrackType::kAudioTrack && + mFallbackSeekTime.isSome() && + mPendingSeekTime.ref() != mFallbackSeekTime.ref()) { + // We have failed to seek audio where video seeked to earlier. + // Attempt to seek instead to the closest point that we know we have in + // order to limit A/V sync discrepency. + + // Ensure we have the most up to date buffered ranges. + UpdateReceivedNewData(TrackType::kAudioTrack); + Maybe nextSeekTime; + // Find closest buffered time found after video seeked time. + for (const auto& timeRange : mAudio.mTimeRanges) { + if (timeRange.mStart >= mPendingSeekTime.ref()) { + nextSeekTime.emplace(timeRange.mStart); + break; + } + } + if (nextSeekTime.isNothing() || + nextSeekTime.ref() > mFallbackSeekTime.ref()) { + nextSeekTime = Some(mFallbackSeekTime.ref()); + LOG("Unable to seek audio to video seek time. A/V sync may be broken"); + } else { + mFallbackSeekTime.reset(); + } + mPendingSeekTime = nextSeekTime; + DoAudioSeek(); + return; + } + NotifyWaitingForData(aTrack); + } + MOZ_ASSERT(!mVideo.mSeekRequest.Exists() && !mAudio.mSeekRequest.Exists()); + mPendingSeekTime.reset(); + + auto type = aTrack == TrackType::kAudioTrack ? MediaData::Type::AUDIO_DATA + : MediaData::Type::VIDEO_DATA; + mSeekPromise.RejectIfExists(SeekRejectValue(type, aError), __func__); +} + +void MediaFormatReader::DoVideoSeek() { + AUTO_PROFILER_LABEL("MediaFormatReader::DoVideoSeek", MEDIA_PLAYBACK); + MOZ_ASSERT(mPendingSeekTime.isSome()); + LOGV("Seeking video to %" PRId64, mPendingSeekTime.ref().ToMicroseconds()); + MOZ_DIAGNOSTIC_ASSERT(!IsAudioOnlySeeking()); + MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists()); + auto seekTime = mPendingSeekTime.ref(); + mVideo.mTrackDemuxer->Seek(seekTime) + ->Then(OwnerThread(), __func__, this, + &MediaFormatReader::OnVideoSeekCompleted, + &MediaFormatReader::OnVideoSeekFailed) + ->Track(mVideo.mSeekRequest); +} + +void MediaFormatReader::OnVideoSeekCompleted(TimeUnit aTime) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnVideoSeekCompleted", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + LOGV("Video seeked to %" PRId64, aTime.ToMicroseconds()); + mVideo.mSeekRequest.Complete(); + + mVideo.mFirstFrameTime = Some(aTime); + mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe; + + SetVideoDecodeThreshold(); + + if (HasAudio() && !mOriginalSeekTarget.IsVideoOnly()) { + MOZ_ASSERT(mPendingSeekTime.isSome()); + if (mOriginalSeekTarget.IsFast()) { + // We are performing a fast seek. We need to seek audio to where the + // video seeked to, to ensure proper A/V sync once playback resume. + mPendingSeekTime = Some(aTime); + } + DoAudioSeek(); + } else { + mPendingSeekTime.reset(); + mSeekPromise.ResolveIfExists(aTime, __func__); + } +} + +void MediaFormatReader::OnVideoSeekFailed(const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnVideoSeekFailed", MEDIA_PLAYBACK); + mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe; + OnSeekFailed(TrackType::kVideoTrack, aError); +} + +void MediaFormatReader::SetVideoDecodeThreshold() { + MOZ_ASSERT(OnTaskQueue()); + + if (!HasVideo() || !mVideo.mDecoder) { + return; + } + + if (!mVideo.mTimeThreshold && !IsSeeking()) { + return; + } + + TimeUnit threshold; + if (mVideo.mTimeThreshold) { + // For internalSeek. + threshold = mVideo.mTimeThreshold.ref().Time(); + } else if (IsSeeking()) { + // If IsSeeking() is true, then video seek must have completed already. + TimeUnit keyframe; + if (NS_FAILED(mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&keyframe))) { + return; + } + + // If the key frame is invalid/infinite, it means the target position is + // closing to end of stream. We don't want to skip any frame at this point. + threshold = keyframe.IsValid() && !keyframe.IsInfinite() + ? mOriginalSeekTarget.GetTime() + : TimeUnit::Invalid(); + } else { + return; + } + + if (threshold.IsValid()) { + LOG("Set seek threshold to %" PRId64, threshold.ToMicroseconds()); + } else { + LOG("Resetting seek threshold"); + } + mVideo.mDecoder->SetSeekThreshold(threshold); +} + +void MediaFormatReader::DoAudioSeek() { + AUTO_PROFILER_LABEL("MediaFormatReader::DoAudioSeek", MEDIA_PLAYBACK); + MOZ_ASSERT(mPendingSeekTime.isSome()); + LOGV("Seeking audio to %" PRId64, mPendingSeekTime.ref().ToMicroseconds()); + MOZ_DIAGNOSTIC_ASSERT(!IsVideoOnlySeeking()); + MOZ_DIAGNOSTIC_ASSERT(!mAudio.mSeekRequest.Exists()); + auto seekTime = mPendingSeekTime.ref(); + mAudio.mTrackDemuxer->Seek(seekTime) + ->Then(OwnerThread(), __func__, this, + &MediaFormatReader::OnAudioSeekCompleted, + &MediaFormatReader::OnAudioSeekFailed) + ->Track(mAudio.mSeekRequest); +} + +void MediaFormatReader::OnAudioSeekCompleted(TimeUnit aTime) { + MOZ_ASSERT(OnTaskQueue()); + AUTO_PROFILER_LABEL("MediaFormatReader::OnAudioSeekCompleted", + MEDIA_PLAYBACK); + LOGV("Audio seeked to %" PRId64, aTime.ToMicroseconds()); + mAudio.mSeekRequest.Complete(); + mAudio.mFirstFrameTime = Some(aTime); + mPendingSeekTime.reset(); + mSeekPromise.ResolveIfExists(aTime, __func__); +} + +void MediaFormatReader::OnAudioSeekFailed(const MediaResult& aError) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnAudioSeekFailed", MEDIA_PLAYBACK); + OnSeekFailed(TrackType::kAudioTrack, aError); +} + +void MediaFormatReader::ReleaseResources() { + LOGV(""); + if (mShutdown) { + return; + } + ShutdownDecoder(TrackInfo::kAudioTrack); + ShutdownDecoder(TrackInfo::kVideoTrack); +} + +bool MediaFormatReader::VideoIsHardwareAccelerated() const { + return mVideo.mIsHardwareAccelerated; +} + +void MediaFormatReader::NotifyTrackDemuxers() { + MOZ_ASSERT(OnTaskQueue()); + + LOGV(""); + + if (!mInitDone) { + return; + } + + if (HasVideo()) { + mVideo.mReceivedNewData = true; + ScheduleUpdate(TrackType::kVideoTrack); + } + if (HasAudio()) { + mAudio.mReceivedNewData = true; + ScheduleUpdate(TrackType::kAudioTrack); + } +} + +void MediaFormatReader::NotifyDataArrived() { + AUTO_PROFILER_LABEL("MediaFormatReader::NotifyDataArrived", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + if (mShutdown || !mDemuxer || !mDemuxerInitDone) { + return; + } + + if (mNotifyDataArrivedPromise.Exists()) { + // Already one in progress. Set the dirty flag so we can process it later. + mPendingNotifyDataArrived = true; + return; + } + + RefPtr self = this; + mDemuxer->NotifyDataArrived() + ->Then( + OwnerThread(), __func__, + [self]() { + AUTO_PROFILER_LABEL("MediaFormatReader::NotifyDataArrived:Resolved", + MEDIA_PLAYBACK); + self->mNotifyDataArrivedPromise.Complete(); + self->UpdateBuffered(); + self->NotifyTrackDemuxers(); + if (self->mPendingNotifyDataArrived) { + self->mPendingNotifyDataArrived = false; + self->NotifyDataArrived(); + } + }, + [self]() { self->mNotifyDataArrivedPromise.Complete(); }) + ->Track(mNotifyDataArrivedPromise); +} + +void MediaFormatReader::UpdateMediaEngineId(uint64_t aMediaEngineId) { + LOG("Update external media engine Id %" PRIu64, aMediaEngineId); + mMediaEngineId = Some(aMediaEngineId); +} + +void MediaFormatReader::UpdateBuffered() { + AUTO_PROFILER_LABEL("MediaFormatReader::UpdateBuffered", MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + if (mShutdown) { + return; + } + + if (!mInitDone || !mHasStartTime) { + mBuffered = TimeIntervals(); + return; + } + + if (HasVideo()) { + mVideo.mTimeRanges = mVideo.mTrackDemuxer->GetBuffered(); + bool hasLastEnd; + auto lastEnd = mVideo.mTimeRanges.GetEnd(&hasLastEnd); + if (hasLastEnd) { + if (mVideo.mLastTimeRangesEnd && + mVideo.mLastTimeRangesEnd.ref() < lastEnd) { + // New data was added after our previous end, we can clear the EOS flag. + mVideo.mDemuxEOS = false; + ScheduleUpdate(TrackInfo::kVideoTrack); + } + mVideo.mLastTimeRangesEnd = Some(lastEnd); + } + } + if (HasAudio()) { + mAudio.mTimeRanges = mAudio.mTrackDemuxer->GetBuffered(); + bool hasLastEnd; + auto lastEnd = mAudio.mTimeRanges.GetEnd(&hasLastEnd); + if (hasLastEnd) { + if (mAudio.mLastTimeRangesEnd && + mAudio.mLastTimeRangesEnd.ref() < lastEnd) { + // New data was added after our previous end, we can clear the EOS flag. + mAudio.mDemuxEOS = false; + ScheduleUpdate(TrackInfo::kAudioTrack); + } + mAudio.mLastTimeRangesEnd = Some(lastEnd); + } + } + + media::TimeIntervals intervals; + if (HasAudio() && HasVideo()) { + intervals = media::Intersection(mVideo.mTimeRanges, mAudio.mTimeRanges); + } else if (HasAudio()) { + intervals = mAudio.mTimeRanges; + } else if (HasVideo()) { + intervals = mVideo.mTimeRanges; + } + + if (intervals.IsEmpty() || intervals.GetStart() == TimeUnit::Zero()) { + // IntervalSet already starts at 0 or is empty, nothing to shift. + mBuffered = intervals; + } else { + mBuffered = intervals.Shift(TimeUnit::Zero() - mInfo.mStartTime); + } +} + +layers::ImageContainer* MediaFormatReader::GetImageContainer() { + return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer() + : nullptr; +} + +RefPtr MediaFormatReader::RequestDebugInfo( + dom::MediaFormatReaderDebugInfo& aInfo) { + if (!OnTaskQueue()) { + // Run the request on the task queue if it's not already. + return InvokeAsync(mTaskQueue, __func__, + [this, self = RefPtr{this}, &aInfo] { + return RequestDebugInfo(aInfo); + }); + } + GetDebugInfo(aInfo); + return GenericPromise::CreateAndResolve(true, __func__); +} + +Maybe MediaFormatReader::GetAudioProcessPerCodec() { + if (mAudio.mDescription == "uninitialized"_ns) { + return Nothing(); + } + + MOZ_ASSERT(mAudio.mProcessName.Length() > 0, + "Should have had a process name"); + MOZ_ASSERT(mAudio.mCodecName.Length() > 0, "Should have had a codec name"); + + nsCString processName = mAudio.mProcessName; + nsCString audioProcessPerCodecName(processName + ","_ns + mAudio.mCodecName); + if (processName != "utility"_ns) { + if (!StaticPrefs::media_rdd_process_enabled()) { + audioProcessPerCodecName += ",rdd-disabled"_ns; + } + if (!StaticPrefs::media_utility_process_enabled()) { + audioProcessPerCodecName += ",utility-disabled"_ns; + } + if (StaticPrefs::media_allow_audio_non_utility()) { + audioProcessPerCodecName += ",allow-non-utility"_ns; + } + } + return Some(audioProcessPerCodecName); +} + +void MediaFormatReader::GetDebugInfo(dom::MediaFormatReaderDebugInfo& aInfo) { + MOZ_ASSERT(OnTaskQueue(), + "Don't call this off the task queue, it's going to touch a lot of " + "data members"); + nsCString result; + nsAutoCString audioDecoderName("unavailable"); + nsAutoCString videoDecoderName = audioDecoderName; + nsAutoCString audioType("none"); + nsAutoCString videoType("none"); + + AudioInfo audioInfo; + if (HasAudio()) { + audioInfo = *mAudio.GetWorkingInfo()->GetAsAudioInfo(); + audioDecoderName = mAudio.mDecoder ? mAudio.mDecoder->GetDescriptionName() + : mAudio.mDescription; + audioType = audioInfo.mMimeType; + aInfo.mAudioState.mNeedInput = NeedInput(mAudio); + aInfo.mAudioState.mHasPromise = mAudio.HasPromise(); + aInfo.mAudioState.mWaitingPromise = !mAudio.mWaitingPromise.IsEmpty(); + aInfo.mAudioState.mHasDemuxRequest = mAudio.mDemuxRequest.Exists(); + aInfo.mAudioState.mDemuxQueueSize = + AssertedCast(mAudio.mQueuedSamples.Length()); + aInfo.mAudioState.mHasDecoder = mAudio.mDecodeRequest.Exists(); + aInfo.mAudioState.mTimeTreshold = + mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().Time().ToSeconds() + : -1.0; + aInfo.mAudioState.mTimeTresholdHasSeeked = + mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().mHasSeeked : false; + aInfo.mAudioState.mNumSamplesInput = + AssertedCast(mAudio.mNumSamplesInput); + aInfo.mAudioState.mNumSamplesOutput = + AssertedCast(mAudio.mNumSamplesOutput); + aInfo.mAudioState.mQueueSize = + AssertedCast(size_t(mAudio.mSizeOfQueue)); + aInfo.mAudioState.mPending = AssertedCast(mAudio.mOutput.Length()); + aInfo.mAudioState.mWaitingForData = mAudio.mWaitingForData; + aInfo.mAudioState.mDemuxEOS = mAudio.mDemuxEOS; + aInfo.mAudioState.mDrainState = int32_t(mAudio.mDrainState); + aInfo.mAudioState.mWaitingForKey = mAudio.mWaitingForKey; + aInfo.mAudioState.mLastStreamSourceID = + AssertedCast(mAudio.mLastStreamSourceID); + } + + CopyUTF8toUTF16(audioDecoderName, aInfo.mAudioDecoderName); + CopyUTF8toUTF16(audioType, aInfo.mAudioType); + aInfo.mAudioChannels = AssertedCast(audioInfo.mChannels); + aInfo.mAudioRate = audioInfo.mRate; + aInfo.mAudioFramesDecoded = + AssertedCast(mAudio.mNumSamplesOutputTotal); + + VideoInfo videoInfo; + if (HasVideo()) { + videoInfo = *mVideo.GetWorkingInfo()->GetAsVideoInfo(); + videoDecoderName = mVideo.mDecoder ? mVideo.mDecoder->GetDescriptionName() + : mVideo.mDescription; + videoType = videoInfo.mMimeType; + aInfo.mVideoState.mNeedInput = NeedInput(mVideo); + aInfo.mVideoState.mHasPromise = mVideo.HasPromise(); + aInfo.mVideoState.mWaitingPromise = !mVideo.mWaitingPromise.IsEmpty(); + aInfo.mVideoState.mHasDemuxRequest = mVideo.mDemuxRequest.Exists(); + aInfo.mVideoState.mDemuxQueueSize = + AssertedCast(mVideo.mQueuedSamples.Length()); + aInfo.mVideoState.mHasDecoder = mVideo.mDecodeRequest.Exists(); + aInfo.mVideoState.mTimeTreshold = + mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().Time().ToSeconds() + : -1.0; + aInfo.mVideoState.mTimeTresholdHasSeeked = + mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().mHasSeeked : false; + aInfo.mVideoState.mNumSamplesInput = + AssertedCast(mVideo.mNumSamplesInput); + aInfo.mVideoState.mNumSamplesOutput = + AssertedCast(mVideo.mNumSamplesOutput); + aInfo.mVideoState.mQueueSize = + AssertedCast(size_t(mVideo.mSizeOfQueue)); + aInfo.mVideoState.mPending = AssertedCast(mVideo.mOutput.Length()); + aInfo.mVideoState.mWaitingForData = mVideo.mWaitingForData; + aInfo.mVideoState.mDemuxEOS = mVideo.mDemuxEOS; + aInfo.mVideoState.mDrainState = int32_t(mVideo.mDrainState); + aInfo.mVideoState.mWaitingForKey = mVideo.mWaitingForKey; + aInfo.mVideoState.mLastStreamSourceID = + AssertedCast(mVideo.mLastStreamSourceID); + } + + CopyUTF8toUTF16(videoDecoderName, aInfo.mVideoDecoderName); + CopyUTF8toUTF16(videoType, aInfo.mVideoType); + aInfo.mVideoWidth = + videoInfo.mDisplay.width < 0 ? 0 : videoInfo.mDisplay.width; + aInfo.mVideoHeight = + videoInfo.mDisplay.height < 0 ? 0 : videoInfo.mDisplay.height; + aInfo.mVideoRate = mVideo.mMeanRate.Mean(); + aInfo.mVideoHardwareAccelerated = VideoIsHardwareAccelerated(); + aInfo.mVideoNumSamplesOutputTotal = + AssertedCast(mVideo.mNumSamplesOutputTotal); + aInfo.mVideoNumSamplesSkippedTotal = + AssertedCast(mVideo.mNumSamplesSkippedTotal); + + // Looking at dropped frames + FrameStatisticsData stats = mFrameStats->GetFrameStatisticsData(); + aInfo.mFrameStats.mDroppedDecodedFrames = + AssertedCast(stats.mDroppedDecodedFrames); + aInfo.mFrameStats.mDroppedSinkFrames = + AssertedCast(stats.mDroppedSinkFrames); + aInfo.mFrameStats.mDroppedCompositorFrames = + AssertedCast(stats.mDroppedCompositorFrames); +} + +void MediaFormatReader::SetVideoNullDecode(bool aIsNullDecode) { + MOZ_ASSERT(OnTaskQueue()); + return SetNullDecode(TrackType::kVideoTrack, aIsNullDecode); +} + +void MediaFormatReader::UpdateCompositor( + already_AddRefed aCompositor) { + MOZ_ASSERT(OnTaskQueue()); + mKnowsCompositor = aCompositor; +} + +void MediaFormatReader::SetNullDecode(TrackType aTrack, bool aIsNullDecode) { + MOZ_ASSERT(OnTaskQueue()); + + auto& decoder = GetDecoderData(aTrack); + if (decoder.mIsNullDecode == aIsNullDecode) { + return; + } + + LOG("%s, decoder.mIsNullDecode = %d => aIsNullDecode = %d", + TrackTypeToStr(aTrack), decoder.mIsNullDecode, aIsNullDecode); + + decoder.mIsNullDecode = aIsNullDecode; + ShutdownDecoder(aTrack); +} + +void MediaFormatReader::OnFirstDemuxCompleted( + TrackInfo::TrackType aType, + const RefPtr& aSamples) { + AUTO_PROFILER_LABEL("MediaFormatReader::OnFirstDemuxCompleted", + MEDIA_PLAYBACK); + MOZ_ASSERT(OnTaskQueue()); + + if (mShutdown) { + return; + } + + auto& decoder = GetDecoderData(aType); + MOZ_ASSERT(decoder.mFirstDemuxedSampleTime.isNothing()); + decoder.mFirstDemuxedSampleTime.emplace(aSamples->GetSamples()[0]->mTime); + MaybeResolveMetadataPromise(); +} + +void MediaFormatReader::OnFirstDemuxFailed(TrackInfo::TrackType aType, + const MediaResult& aError) { + MOZ_ASSERT(OnTaskQueue()); + + if (mShutdown) { + return; + } + + auto& decoder = GetDecoderData(aType); + MOZ_ASSERT(decoder.mFirstDemuxedSampleTime.isNothing()); + decoder.mFirstDemuxedSampleTime.emplace(TimeUnit::FromInfinity()); + MaybeResolveMetadataPromise(); +} + +} // namespace mozilla + +#undef NS_DispatchToMainThread +#undef LOGV +#undef LOG diff --git a/dom/media/MediaFormatReader.h b/dom/media/MediaFormatReader.h new file mode 100644 index 0000000000..fcc3f20036 --- /dev/null +++ b/dom/media/MediaFormatReader.h @@ -0,0 +1,892 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#if !defined(MediaFormatReader_h_) +# define MediaFormatReader_h_ + +# include "FrameStatistics.h" +# include "MediaDataDemuxer.h" +# include "MediaEventSource.h" +# include "MediaMetadataManager.h" +# include "MediaPromiseDefs.h" +# include "PlatformDecoderModule.h" +# include "SeekTarget.h" +# include "mozilla/Atomics.h" +# include "mozilla/Maybe.h" +# include "mozilla/MozPromise.h" +# include "mozilla/Mutex.h" +# include "mozilla/StateMirroring.h" +# include "mozilla/StaticPrefs_media.h" +# include "mozilla/TaskQueue.h" +# include "mozilla/ThreadSafeWeakPtr.h" +# include "mozilla/dom/MediaDebugInfoBinding.h" + +namespace mozilla { + +class CDMProxy; +class GMPCrashHelper; +class MediaResource; +class VideoFrameContainer; + +struct WaitForDataRejectValue { + enum Reason { SHUTDOWN, CANCELED }; + + WaitForDataRejectValue(MediaData::Type aType, Reason aReason) + : mType(aType), mReason(aReason) {} + MediaData::Type mType; + Reason mReason; +}; + +struct SeekRejectValue { + MOZ_IMPLICIT SeekRejectValue(const MediaResult& aError) + : mType(MediaData::Type::NULL_DATA), mError(aError) {} + MOZ_IMPLICIT SeekRejectValue(nsresult aResult) + : mType(MediaData::Type::NULL_DATA), mError(aResult) {} + SeekRejectValue(MediaData::Type aType, const MediaResult& aError) + : mType(aType), mError(aError) {} + MediaData::Type mType; + MediaResult mError; +}; + +struct MetadataHolder { + UniquePtr mInfo; + UniquePtr mTags; +}; + +using MediaDecoderOwnerID = void*; + +struct MOZ_STACK_CLASS MediaFormatReaderInit { + MediaResource* mResource = nullptr; + VideoFrameContainer* mVideoFrameContainer = nullptr; + FrameStatistics* mFrameStats = nullptr; + already_AddRefed mKnowsCompositor; + already_AddRefed mCrashHelper; + // Used in bug 1393399 for temporary telemetry. + MediaDecoderOwnerID mMediaDecoderOwnerID = nullptr; + Maybe mTrackingId; +}; + +DDLoggedTypeDeclName(MediaFormatReader); + +class MediaFormatReader final + : public SupportsThreadSafeWeakPtr, + public DecoderDoctorLifeLogger { + static const bool IsExclusive = true; + using TrackType = TrackInfo::TrackType; + using NotifyDataArrivedPromise = MozPromise; + + public: + MOZ_DECLARE_REFCOUNTED_TYPENAME(MediaFormatReader) + + using TrackSet = EnumSet; + using MetadataPromise = MozPromise; + + template + using DataPromise = MozPromise, MediaResult, IsExclusive>; + using AudioDataPromise = DataPromise; + using VideoDataPromise = DataPromise; + + using SeekPromise = MozPromise; + + // Note that, conceptually, WaitForData makes sense in a non-exclusive sense. + // But in the current architecture it's only ever used exclusively (by MDSM), + // so we mark it that way to verify our assumptions. If you have a use-case + // for multiple WaitForData consumers, feel free to flip the exclusivity here. + using WaitForDataPromise = + MozPromise; + + MediaFormatReader(MediaFormatReaderInit& aInit, MediaDataDemuxer* aDemuxer); + virtual ~MediaFormatReader(); + + // Initializes the reader, returns NS_OK on success, or NS_ERROR_FAILURE + // on failure. + nsresult Init(); + + size_t SizeOfVideoQueueInFrames(); + size_t SizeOfAudioQueueInFrames(); + + // Requests one video sample from the reader. + RefPtr RequestVideoData( + const media::TimeUnit& aTimeThreshold, + bool aRequestNextVideoKeyFrame = false); + + // Requests one audio sample from the reader. + // + // The decode should be performed asynchronously, and the promise should + // be resolved when it is complete. + RefPtr RequestAudioData(); + + // The default implementation of AsyncReadMetadata is implemented in terms of + // synchronous ReadMetadata() calls. Implementations may also + // override AsyncReadMetadata to create a more proper async implementation. + RefPtr AsyncReadMetadata(); + + // Fills aInfo with the latest cached data required to present the media, + // ReadUpdatedMetadata will always be called once ReadMetadata has succeeded. + void ReadUpdatedMetadata(MediaInfo* aInfo); + + RefPtr Seek(const SeekTarget& aTarget); + + // Called once new data has been cached by the MediaResource. + // mBuffered should be recalculated and updated accordingly. + void NotifyDataArrived(); + + // Update ID for the external playback engine. Currently it's only used on + // Windows when the media engine playback is enabled. + void UpdateMediaEngineId(uint64_t aMediaEngineId); + + protected: + // Recomputes mBuffered. + void UpdateBuffered(); + + public: + // Called by MDSM in dormant state to release resources allocated by this + // reader. The reader can resume decoding by calling Seek() to a specific + // position. + void ReleaseResources(); + + bool OnTaskQueue() const { return OwnerThread()->IsCurrentThreadIn(); } + + // Resets all state related to decoding, emptying all buffers etc. + // Cancels all pending Request*Data() request callbacks, rejects any + // outstanding seek promises, and flushes the decode pipeline. The + // decoder must not call any of the callbacks for outstanding + // Request*Data() calls after this is called. Calls to Request*Data() + // made after this should be processed as usual. + // + // Normally this call preceedes a Seek() call, or shutdown. + // + // aParam is a set of TrackInfo::TrackType enums specifying which + // queues need to be reset, defaulting to both audio and video tracks. + nsresult ResetDecode(const TrackSet& aTracks); + + // Destroys the decoding state. The reader cannot be made usable again. + // This is different from ReleaseMediaResources() as it is irreversable, + // whereas ReleaseMediaResources() is. Must be called on the decode + // thread. + RefPtr Shutdown(); + + // Returns true if this decoder reader uses hardware accelerated video + // decoding. + bool VideoIsHardwareAccelerated() const; + + // By default, the state machine polls the reader once per second when it's + // in buffering mode. Some readers support a promise-based mechanism by which + // they notify the state machine when the data arrives. + bool IsWaitForDataSupported() const { return true; } + + RefPtr WaitForData(MediaData::Type aType); + + // The MediaDecoderStateMachine uses various heuristics that assume that + // raw media data is arriving sequentially from a network channel. This + // makes sense in the