summaryrefslogtreecommitdiffstats
path: root/dom/media/gtest
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media/gtest')
-rw-r--r--dom/media/gtest/AudioGenerator.h64
-rw-r--r--dom/media/gtest/AudioVerifier.h155
-rw-r--r--dom/media/gtest/Cargo.toml8
-rw-r--r--dom/media/gtest/GMPTestMonitor.h40
-rw-r--r--dom/media/gtest/MockCubeb.cpp739
-rw-r--r--dom/media/gtest/MockCubeb.h595
-rw-r--r--dom/media/gtest/MockMediaResource.cpp91
-rw-r--r--dom/media/gtest/MockMediaResource.h56
-rw-r--r--dom/media/gtest/TestAudioBuffer.cpp46
-rw-r--r--dom/media/gtest/TestAudioBuffers.cpp68
-rw-r--r--dom/media/gtest/TestAudioCallbackDriver.cpp477
-rw-r--r--dom/media/gtest/TestAudioCompactor.cpp131
-rw-r--r--dom/media/gtest/TestAudioDecoderInputTrack.cpp454
-rw-r--r--dom/media/gtest/TestAudioDeviceEnumerator.cpp272
-rw-r--r--dom/media/gtest/TestAudioInputProcessing.cpp395
-rw-r--r--dom/media/gtest/TestAudioInputSource.cpp269
-rw-r--r--dom/media/gtest/TestAudioMixer.cpp177
-rw-r--r--dom/media/gtest/TestAudioPacketizer.cpp163
-rw-r--r--dom/media/gtest/TestAudioRingBuffer.cpp1287
-rw-r--r--dom/media/gtest/TestAudioSegment.cpp470
-rw-r--r--dom/media/gtest/TestAudioSinkWrapper.cpp148
-rw-r--r--dom/media/gtest/TestAudioTrackEncoder.cpp298
-rw-r--r--dom/media/gtest/TestAudioTrackGraph.cpp2726
-rw-r--r--dom/media/gtest/TestBenchmarkStorage.cpp92
-rw-r--r--dom/media/gtest/TestBitWriter.cpp117
-rw-r--r--dom/media/gtest/TestBlankVideoDataCreator.cpp30
-rw-r--r--dom/media/gtest/TestBufferReader.cpp53
-rw-r--r--dom/media/gtest/TestCDMStorage.cpp1342
-rw-r--r--dom/media/gtest/TestCubebInputStream.cpp187
-rw-r--r--dom/media/gtest/TestDataMutex.cpp46
-rw-r--r--dom/media/gtest/TestDecoderBenchmark.cpp66
-rw-r--r--dom/media/gtest/TestDeviceInputTrack.cpp558
-rw-r--r--dom/media/gtest/TestDriftCompensation.cpp86
-rw-r--r--dom/media/gtest/TestGMPCrossOrigin.cpp212
-rw-r--r--dom/media/gtest/TestGMPRemoveAndDelete.cpp472
-rw-r--r--dom/media/gtest/TestGMPUtils.cpp84
-rw-r--r--dom/media/gtest/TestGroupId.cpp302
-rw-r--r--dom/media/gtest/TestIntervalSet.cpp819
-rw-r--r--dom/media/gtest/TestKeyValueStorage.cpp109
-rw-r--r--dom/media/gtest/TestMP3Demuxer.cpp579
-rw-r--r--dom/media/gtest/TestMP4Demuxer.cpp613
-rw-r--r--dom/media/gtest/TestMediaCodecsSupport.cpp239
-rw-r--r--dom/media/gtest/TestMediaDataDecoder.cpp97
-rw-r--r--dom/media/gtest/TestMediaDataEncoder.cpp775
-rw-r--r--dom/media/gtest/TestMediaEventSource.cpp490
-rw-r--r--dom/media/gtest/TestMediaMIMETypes.cpp284
-rw-r--r--dom/media/gtest/TestMediaQueue.cpp288
-rw-r--r--dom/media/gtest/TestMediaSpan.cpp110
-rw-r--r--dom/media/gtest/TestMediaUtils.cpp240
-rw-r--r--dom/media/gtest/TestMuxer.cpp212
-rw-r--r--dom/media/gtest/TestOggWriter.cpp62
-rw-r--r--dom/media/gtest/TestOpusParser.cpp24
-rw-r--r--dom/media/gtest/TestPacer.cpp189
-rw-r--r--dom/media/gtest/TestRTCStatsTimestampMaker.cpp115
-rw-r--r--dom/media/gtest/TestRust.cpp10
-rw-r--r--dom/media/gtest/TestTimeUnit.cpp295
-rw-r--r--dom/media/gtest/TestVPXDecoding.cpp96
-rw-r--r--dom/media/gtest/TestVideoFrameConverter.cpp508
-rw-r--r--dom/media/gtest/TestVideoSegment.cpp44
-rw-r--r--dom/media/gtest/TestVideoTrackEncoder.cpp1467
-rw-r--r--dom/media/gtest/TestVideoUtils.cpp128
-rw-r--r--dom/media/gtest/TestWebMBuffered.cpp234
-rw-r--r--dom/media/gtest/TestWebMWriter.cpp388
-rw-r--r--dom/media/gtest/YUVBufferGenerator.cpp144
-rw-r--r--dom/media/gtest/YUVBufferGenerator.h32
-rw-r--r--dom/media/gtest/dash_dashinit.mp4bin0 -> 80388 bytes
-rw-r--r--dom/media/gtest/hello.rs6
-rw-r--r--dom/media/gtest/id3v2header.mp3bin0 -> 191302 bytes
-rw-r--r--dom/media/gtest/moz.build148
-rw-r--r--dom/media/gtest/mp4_demuxer/TestInterval.cpp88
-rw-r--r--dom/media/gtest/mp4_demuxer/TestMP4.cpp133
-rw-r--r--dom/media/gtest/mp4_demuxer/TestParser.cpp1022
-rw-r--r--dom/media/gtest/mp4_demuxer/moz.build66
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1156505.mp4bin0 -> 296 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1181213.mp4bin0 -> 2834 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1181215.mp4bin0 -> 3086 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1181223.mp4bin0 -> 2834 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1181719.mp4bin0 -> 3095 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1185230.mp4bin0 -> 3250 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1187067.mp4bin0 -> 2835 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1200326.mp4bin0 -> 1694 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1204580.mp4bin0 -> 5833 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1216748.mp4bin0 -> 296 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1296473.mp4bin0 -> 5995 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1296532.mp4bin0 -> 152132 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-harder.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-i64max.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-i64min.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-max-ez.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-max-ok.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-overfl.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-u32max.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065-u64max.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1301065.mp4bin0 -> 632 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1329061.movbin0 -> 93681 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1351094.mp4bin0 -> 80388 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1388991.mp4bin0 -> 288821 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1389299.mp4bin0 -> 152132 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1389527.mp4bin0 -> 92225 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1395244.mp4bin0 -> 13651 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1410565.mp4bin0 -> 955656 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1513651-2-sample-description-entries.mp4bin0 -> 1100 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1519617-cenc-init-with-track_id-0.mp4bin0 -> 767 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1519617-track2-trafs-removed.mp4bin0 -> 282228 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1519617-video-has-track_id-0.mp4bin0 -> 282024 bytes
-rw-r--r--dom/media/gtest/mp4_demuxer/test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4bin0 -> 1119 bytes
-rw-r--r--dom/media/gtest/negative_duration.mp4bin0 -> 684 bytes
-rw-r--r--dom/media/gtest/noise.mp3bin0 -> 965257 bytes
-rw-r--r--dom/media/gtest/noise_vbr.mp3bin0 -> 583679 bytes
-rw-r--r--dom/media/gtest/short-zero-in-moov.mp4bin0 -> 13655 bytes
-rw-r--r--dom/media/gtest/short-zero-inband.movbin0 -> 93641 bytes
-rw-r--r--dom/media/gtest/small-shot-false-positive.mp3bin0 -> 6845 bytes
-rw-r--r--dom/media/gtest/small-shot-partial-xing.mp3bin0 -> 6825 bytes
-rw-r--r--dom/media/gtest/small-shot.mp3bin0 -> 6825 bytes
-rw-r--r--dom/media/gtest/test.webmbin0 -> 1980 bytes
-rw-r--r--dom/media/gtest/test_InvalidElementId.webmbin0 -> 1122 bytes
-rw-r--r--dom/media/gtest/test_InvalidElementSize.webmbin0 -> 1122 bytes
-rw-r--r--dom/media/gtest/test_InvalidLargeEBMLMaxIdLength.webmbin0 -> 1122 bytes
-rw-r--r--dom/media/gtest/test_InvalidLargeElementId.webmbin0 -> 1129 bytes
-rw-r--r--dom/media/gtest/test_InvalidSmallEBMLMaxIdLength.webmbin0 -> 1122 bytes
-rw-r--r--dom/media/gtest/test_ValidLargeEBMLMaxIdLength.webmbin0 -> 1128 bytes
-rw-r--r--dom/media/gtest/test_ValidSmallEBMLMaxSizeLength.webmbin0 -> 1116 bytes
-rw-r--r--dom/media/gtest/test_case_1224361.vp8.ivfbin0 -> 1497 bytes
-rw-r--r--dom/media/gtest/test_case_1224363.vp8.ivfbin0 -> 1388 bytes
-rw-r--r--dom/media/gtest/test_case_1224369.vp8.ivfbin0 -> 204 bytes
-rw-r--r--dom/media/gtest/test_vbri.mp3bin0 -> 16519 bytes
126 files changed, 22830 insertions, 0 deletions
diff --git a/dom/media/gtest/AudioGenerator.h b/dom/media/gtest/AudioGenerator.h
new file mode 100644
index 0000000000..da7a31b9dc
--- /dev/null
+++ b/dom/media/gtest/AudioGenerator.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_
+#define DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_
+
+#include "AudioSegment.h"
+#include "prtime.h"
+#include "SineWaveGenerator.h"
+
+namespace mozilla {
+
+template <typename Sample>
+class AudioGenerator {
+ public:
+ AudioGenerator(uint32_t aChannels, uint32_t aSampleRate,
+ uint32_t aFrequency = 1000)
+ : mSampleRate(aSampleRate),
+ mFrequency(aFrequency),
+ mChannelCount(aChannels),
+ mGenerator(aSampleRate, aFrequency) {}
+
+ void Generate(mozilla::AudioSegment& aSegment, uint32_t aFrameCount) {
+ CheckedInt<size_t> bufferSize(sizeof(Sample));
+ bufferSize *= aFrameCount;
+ RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize);
+ Sample* dest = static_cast<Sample*>(buffer->Data());
+ mGenerator.generate(dest, aFrameCount);
+ AutoTArray<const Sample*, 1> channels;
+ for (uint32_t i = 0; i < mChannelCount; ++i) {
+ channels.AppendElement(dest);
+ }
+ aSegment.AppendFrames(buffer.forget(), channels, aFrameCount,
+ PRINCIPAL_HANDLE_NONE);
+ }
+
+ void GenerateInterleaved(Sample* aSamples, uint32_t aFrameCount) {
+ mGenerator.generate(aSamples, aFrameCount, mChannelCount);
+ }
+
+ void SetChannelsCount(uint32_t aChannelCount) {
+ mChannelCount = aChannelCount;
+ }
+
+ uint32_t ChannelCount() const { return mChannelCount; }
+
+ static float Amplitude() {
+ return mozilla::SineWaveGenerator<Sample>::Amplitude();
+ }
+
+ const uint32_t mSampleRate;
+ const uint32_t mFrequency;
+
+ private:
+ uint32_t mChannelCount;
+ mozilla::SineWaveGenerator<Sample> mGenerator;
+};
+
+} // namespace mozilla
+
+#endif // DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_
diff --git a/dom/media/gtest/AudioVerifier.h b/dom/media/gtest/AudioVerifier.h
new file mode 100644
index 0000000000..ba67f6e489
--- /dev/null
+++ b/dom/media/gtest/AudioVerifier.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_GTEST_AUDIOVERIFIER_H_
+#define DOM_MEDIA_GTEST_AUDIOVERIFIER_H_
+
+#include "AudioGenerator.h"
+
+namespace mozilla {
+
+template <typename Sample>
+class AudioVerifier {
+ public:
+ explicit AudioVerifier(uint32_t aRate, uint32_t aFrequency)
+ : mRate(aRate), mFrequency(aFrequency) {}
+
+ // Only the mono channel is taken into account.
+ void AppendData(const AudioSegment& segment) {
+ for (AudioSegment::ConstChunkIterator iter(segment); !iter.IsEnded();
+ iter.Next()) {
+ const AudioChunk& c = *iter;
+ if (c.IsNull()) {
+ for (int i = 0; i < c.GetDuration(); ++i) {
+ CheckSample(0);
+ }
+ } else {
+ const Sample* buffer = c.ChannelData<Sample>()[0];
+ for (int i = 0; i < c.GetDuration(); ++i) {
+ CheckSample(buffer[i]);
+ }
+ }
+ }
+ }
+
+ void AppendDataInterleaved(const Sample* aBuffer, uint32_t aFrames,
+ uint32_t aChannels) {
+ for (uint32_t i = 0; i < aFrames * aChannels; i += aChannels) {
+ CheckSample(aBuffer[i]);
+ }
+ }
+
+ float EstimatedFreq() const {
+ if (mTotalFramesSoFar == PreSilenceSamples()) {
+ return 0;
+ }
+ if (mSumPeriodInSamples == 0) {
+ return 0;
+ }
+ if (mZeroCrossCount <= 1) {
+ return 0;
+ }
+ return mRate /
+ (static_cast<float>(mSumPeriodInSamples) / (mZeroCrossCount - 1));
+ }
+
+ // Returns the maximum difference in value between two adjacent samples along
+ // the sine curve.
+ Sample MaxMagnitudeDifference() {
+ return static_cast<Sample>(AudioGenerator<Sample>::Amplitude() *
+ sin(2 * M_PI * mFrequency / mRate));
+ }
+
+ bool PreSilenceEnded() const {
+ return mTotalFramesSoFar > mPreSilenceSamples;
+ }
+ uint64_t PreSilenceSamples() const { return mPreSilenceSamples; }
+ uint32_t CountDiscontinuities() const { return mDiscontinuitiesCount; }
+
+ private:
+ void CheckSample(Sample aCurrentSample) {
+ ++mTotalFramesSoFar;
+ // Avoid pre-silence
+ if (!CountPreSilence(aCurrentSample)) {
+ CountZeroCrossing(aCurrentSample);
+ CountDiscontinuities(aCurrentSample);
+ }
+
+ mPrevious = aCurrentSample;
+ }
+
+ bool CountPreSilence(Sample aCurrentSample) {
+ if (IsZero(aCurrentSample) && mPreSilenceSamples == mTotalFramesSoFar - 1) {
+ ++mPreSilenceSamples;
+ return true;
+ }
+ if (IsZero(mPrevious) && aCurrentSample > 0 &&
+ aCurrentSample < 2 * MaxMagnitudeDifference() &&
+ mPreSilenceSamples == mTotalFramesSoFar - 1) {
+ // Previous zero considered the first sample of the waveform.
+ --mPreSilenceSamples;
+ }
+ return false;
+ }
+
+ // Positive to negative direction
+ void CountZeroCrossing(Sample aCurrentSample) {
+ if (mPrevious > 0 && aCurrentSample <= 0) {
+ if (mZeroCrossCount++) {
+ MOZ_ASSERT(mZeroCrossCount > 1);
+ mSumPeriodInSamples += mTotalFramesSoFar - mLastZeroCrossPosition;
+ }
+ mLastZeroCrossPosition = mTotalFramesSoFar;
+ }
+ }
+
+ void CountDiscontinuities(Sample aCurrentSample) {
+ const bool discontinuity = fabs(fabs(aCurrentSample) - fabs(mPrevious)) >
+ 3 * MaxMagnitudeDifference();
+
+ if (mCurrentDiscontinuityFrameCount > 0) {
+ if (++mCurrentDiscontinuityFrameCount == 5) {
+ // Allow a grace-period of 5 samples for any given discontinuity.
+ // For instance the speex resampler can smooth out a sudden drop to 0
+ // over several samples.
+ mCurrentDiscontinuityFrameCount = 0;
+ }
+ return;
+ }
+
+ MOZ_ASSERT(mCurrentDiscontinuityFrameCount == 0);
+ if (!discontinuity) {
+ return;
+ }
+
+ // Encountered a new discontinuity.
+ ++mCurrentDiscontinuityFrameCount;
+ ++mDiscontinuitiesCount;
+ }
+
+ bool IsZero(float aValue) { return fabs(aValue) < 1e-8; }
+ bool IsZero(short aValue) { return aValue == 0; }
+
+ private:
+ const uint32_t mRate;
+ const uint32_t mFrequency;
+
+ uint32_t mZeroCrossCount = 0;
+ uint64_t mLastZeroCrossPosition = 0;
+ uint64_t mSumPeriodInSamples = 0;
+
+ uint64_t mTotalFramesSoFar = 0;
+ uint64_t mPreSilenceSamples = 0;
+
+ uint32_t mCurrentDiscontinuityFrameCount = 0;
+ uint32_t mDiscontinuitiesCount = 0;
+ // This is needed to connect the previous buffers.
+ Sample mPrevious = {};
+};
+
+} // namespace mozilla
+
+#endif // DOM_MEDIA_GTEST_AUDIOVERIFIER_H_
diff --git a/dom/media/gtest/Cargo.toml b/dom/media/gtest/Cargo.toml
new file mode 100644
index 0000000000..a9318c24f6
--- /dev/null
+++ b/dom/media/gtest/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "mp4parse-gtest"
+version = "0.1.0"
+authors = ["The Mozilla Project Developers"]
+license = "MPL-2.0"
+
+[lib]
+path = "hello.rs"
diff --git a/dom/media/gtest/GMPTestMonitor.h b/dom/media/gtest/GMPTestMonitor.h
new file mode 100644
index 0000000000..27477b6a42
--- /dev/null
+++ b/dom/media/gtest/GMPTestMonitor.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef __GMPTestMonitor_h__
+#define __GMPTestMonitor_h__
+
+#include "nsThreadUtils.h"
+#include "mozilla/SchedulerGroup.h"
+#include "mozilla/SpinEventLoopUntil.h"
+
+class GMPTestMonitor {
+ public:
+ GMPTestMonitor() : mFinished(false) {}
+
+ void AwaitFinished() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mozilla::SpinEventLoopUntil("GMPTestMonitor::AwaitFinished"_ns,
+ [&]() { return mFinished; });
+ mFinished = false;
+ }
+
+ private:
+ void MarkFinished() {
+ MOZ_ASSERT(NS_IsMainThread());
+ mFinished = true;
+ }
+
+ public:
+ void SetFinished() {
+ mozilla::SchedulerGroup::Dispatch(mozilla::NewNonOwningRunnableMethod(
+ "GMPTestMonitor::MarkFinished", this, &GMPTestMonitor::MarkFinished));
+ }
+
+ private:
+ bool mFinished;
+};
+
+#endif // __GMPTestMonitor_h__
diff --git a/dom/media/gtest/MockCubeb.cpp b/dom/media/gtest/MockCubeb.cpp
new file mode 100644
index 0000000000..ae3a676ac8
--- /dev/null
+++ b/dom/media/gtest/MockCubeb.cpp
@@ -0,0 +1,739 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MockCubeb.h"
+
+#include "gtest/gtest.h"
+
+namespace mozilla {
+
+using KeepProcessing = MockCubebStream::KeepProcessing;
+
+void PrintDevice(cubeb_device_info aInfo) {
+ printf(
+ "id: %zu\n"
+ "device_id: %s\n"
+ "friendly_name: %s\n"
+ "group_id: %s\n"
+ "vendor_name: %s\n"
+ "type: %d\n"
+ "state: %d\n"
+ "preferred: %d\n"
+ "format: %d\n"
+ "default_format: %d\n"
+ "max_channels: %d\n"
+ "default_rate: %d\n"
+ "max_rate: %d\n"
+ "min_rate: %d\n"
+ "latency_lo: %d\n"
+ "latency_hi: %d\n",
+ reinterpret_cast<uintptr_t>(aInfo.devid), aInfo.device_id,
+ aInfo.friendly_name, aInfo.group_id, aInfo.vendor_name, aInfo.type,
+ aInfo.state, aInfo.preferred, aInfo.format, aInfo.default_format,
+ aInfo.max_channels, aInfo.default_rate, aInfo.max_rate, aInfo.min_rate,
+ aInfo.latency_lo, aInfo.latency_hi);
+}
+
+void PrintDevice(AudioDeviceInfo* aInfo) {
+ cubeb_devid id;
+ nsString name;
+ nsString groupid;
+ nsString vendor;
+ uint16_t type;
+ uint16_t state;
+ uint16_t preferred;
+ uint16_t supportedFormat;
+ uint16_t defaultFormat;
+ uint32_t maxChannels;
+ uint32_t defaultRate;
+ uint32_t maxRate;
+ uint32_t minRate;
+ uint32_t maxLatency;
+ uint32_t minLatency;
+
+ id = aInfo->DeviceID();
+ aInfo->GetName(name);
+ aInfo->GetGroupId(groupid);
+ aInfo->GetVendor(vendor);
+ aInfo->GetType(&type);
+ aInfo->GetState(&state);
+ aInfo->GetPreferred(&preferred);
+ aInfo->GetSupportedFormat(&supportedFormat);
+ aInfo->GetDefaultFormat(&defaultFormat);
+ aInfo->GetMaxChannels(&maxChannels);
+ aInfo->GetDefaultRate(&defaultRate);
+ aInfo->GetMaxRate(&maxRate);
+ aInfo->GetMinRate(&minRate);
+ aInfo->GetMinLatency(&minLatency);
+ aInfo->GetMaxLatency(&maxLatency);
+
+ printf(
+ "device id: %zu\n"
+ "friendly_name: %s\n"
+ "group_id: %s\n"
+ "vendor_name: %s\n"
+ "type: %d\n"
+ "state: %d\n"
+ "preferred: %d\n"
+ "format: %d\n"
+ "default_format: %d\n"
+ "max_channels: %d\n"
+ "default_rate: %d\n"
+ "max_rate: %d\n"
+ "min_rate: %d\n"
+ "latency_lo: %d\n"
+ "latency_hi: %d\n",
+ reinterpret_cast<uintptr_t>(id), NS_LossyConvertUTF16toASCII(name).get(),
+ NS_LossyConvertUTF16toASCII(groupid).get(),
+ NS_LossyConvertUTF16toASCII(vendor).get(), type, state, preferred,
+ supportedFormat, defaultFormat, maxChannels, defaultRate, maxRate,
+ minRate, minLatency, maxLatency);
+}
+
+cubeb_device_info DeviceTemplate(cubeb_devid aId, cubeb_device_type aType,
+ const char* name) {
+ // A fake input device
+ cubeb_device_info device;
+ device.devid = aId;
+ device.device_id = "nice name";
+ device.friendly_name = name;
+ device.group_id = "the physical device";
+ device.vendor_name = "mozilla";
+ device.type = aType;
+ device.state = CUBEB_DEVICE_STATE_ENABLED;
+ device.preferred = CUBEB_DEVICE_PREF_NONE;
+ device.format = CUBEB_DEVICE_FMT_F32NE;
+ device.default_format = CUBEB_DEVICE_FMT_F32NE;
+ device.max_channels = 2;
+ device.default_rate = 44100;
+ device.max_rate = 44100;
+ device.min_rate = 16000;
+ device.latency_lo = 256;
+ device.latency_hi = 1024;
+
+ return device;
+}
+
+cubeb_device_info DeviceTemplate(cubeb_devid aId, cubeb_device_type aType) {
+ return DeviceTemplate(aId, aType, "nice name");
+}
+
+void AddDevices(MockCubeb* mock, uint32_t device_count,
+ cubeb_device_type deviceType) {
+ mock->ClearDevices(deviceType);
+ // Add a few input devices (almost all the same but it does not really
+ // matter as long as they have distinct IDs and only one is the default
+ // devices)
+ for (uintptr_t i = 0; i < device_count; i++) {
+ cubeb_device_info device =
+ DeviceTemplate(reinterpret_cast<void*>(i + 1), deviceType);
+ // Make it so that the last device is the default input device.
+ if (i == device_count - 1) {
+ device.preferred = CUBEB_DEVICE_PREF_ALL;
+ }
+ mock->AddDevice(device);
+ }
+}
+
+void cubeb_mock_destroy(cubeb* context) {
+ MockCubeb::AsMock(context)->Destroy();
+}
+
+MockCubebStream::MockCubebStream(
+ cubeb* aContext, char const* aStreamName, cubeb_devid aInputDevice,
+ cubeb_stream_params* aInputStreamParams, cubeb_devid aOutputDevice,
+ cubeb_stream_params* aOutputStreamParams, cubeb_data_callback aDataCallback,
+ cubeb_state_callback aStateCallback, void* aUserPtr,
+ SmartMockCubebStream* aSelf, RunningMode aRunningMode, bool aFrozenStart)
+ : context(aContext),
+ mUserPtr(aUserPtr),
+ mRunningMode(aRunningMode),
+ mHasInput(aInputStreamParams),
+ mHasOutput(aOutputStreamParams),
+ mSelf(aSelf),
+ mFrozenStartMonitor("MockCubebStream::mFrozenStartMonitor"),
+ mFrozenStart(aFrozenStart),
+ mDataCallback(aDataCallback),
+ mStateCallback(aStateCallback),
+ mName(aStreamName),
+ mInputDeviceID(aInputDevice),
+ mOutputDeviceID(aOutputDevice),
+ mAudioGenerator(aInputStreamParams ? aInputStreamParams->channels
+ : MAX_INPUT_CHANNELS,
+ aInputStreamParams ? aInputStreamParams->rate
+ : aOutputStreamParams->rate,
+ 100 /* aFrequency */),
+ mAudioVerifier(aInputStreamParams ? aInputStreamParams->rate
+ : aOutputStreamParams->rate,
+ 100 /* aFrequency */) {
+ MOZ_ASSERT(mAudioGenerator.ChannelCount() <= MAX_INPUT_CHANNELS,
+ "mInputBuffer has no enough space to hold generated data");
+ MOZ_ASSERT_IF(mFrozenStart, mRunningMode == RunningMode::Automatic);
+ if (aInputStreamParams) {
+ mInputParams = *aInputStreamParams;
+ }
+ if (aOutputStreamParams) {
+ mOutputParams = *aOutputStreamParams;
+ MOZ_ASSERT(SampleRate() == mOutputParams.rate);
+ }
+}
+
+MockCubebStream::~MockCubebStream() = default;
+
+int MockCubebStream::Start() {
+ NotifyState(CUBEB_STATE_STARTED);
+ mStreamStop = false;
+ if (mFrozenStart) {
+ // We need to grab mFrozenStartMonitor before returning to avoid races in
+ // the calling code -- it controls when to mFrozenStartMonitor.Notify().
+ // TempData helps facilitate this by holding what's needed to block the
+ // calling thread until the background thread has grabbed the lock.
+ struct TempData {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TempData)
+ static_assert(HasThreadSafeRefCnt::value,
+ "Silence a -Wunused-local-typedef warning");
+ Monitor mMonitor{"MockCubebStream::Start::TempData::mMonitor"};
+ bool mFinished = false;
+
+ private:
+ ~TempData() = default;
+ };
+ auto temp = MakeRefPtr<TempData>();
+ MonitorAutoLock lock(temp->mMonitor);
+ NS_DispatchBackgroundTask(NS_NewRunnableFunction(
+ "MockCubebStream::WaitForThawBeforeStart",
+ [temp, this, self = RefPtr<SmartMockCubebStream>(mSelf)]() mutable {
+ MonitorAutoLock lock(mFrozenStartMonitor);
+ {
+ // Unblock MockCubebStream::Start now that we have locked the frozen
+ // start monitor.
+ MonitorAutoLock tempLock(temp->mMonitor);
+ temp->mFinished = true;
+ temp->mMonitor.Notify();
+ temp = nullptr;
+ }
+ while (mFrozenStart) {
+ mFrozenStartMonitor.Wait();
+ }
+ if (!mStreamStop) {
+ MockCubeb::AsMock(context)->StartStream(mSelf);
+ }
+ }));
+ while (!temp->mFinished) {
+ temp->mMonitor.Wait();
+ }
+ return CUBEB_OK;
+ }
+ MockCubeb::AsMock(context)->StartStream(this);
+ return CUBEB_OK;
+}
+
+int MockCubebStream::Stop() {
+ mOutputVerificationEvent.Notify(std::make_tuple(
+ mAudioVerifier.PreSilenceSamples(), mAudioVerifier.EstimatedFreq(),
+ mAudioVerifier.CountDiscontinuities()));
+ MockCubeb::AsMock(context)->StopStream(this);
+ mStreamStop = true;
+ NotifyState(CUBEB_STATE_STOPPED);
+ return CUBEB_OK;
+}
+
+uint64_t MockCubebStream::Position() { return mPosition; }
+
+void MockCubebStream::Destroy() {
+ // Stop() even if cubeb_stream_stop() has already been called, as with
+ // audioipc. https://bugzilla.mozilla.org/show_bug.cgi?id=1801190#c1
+ // This provides an extra STOPPED state callback as with audioipc.
+ // It also ensures that this stream is removed from MockCubeb::mLiveStreams.
+ Stop();
+ mDestroyed = true;
+ MockCubeb::AsMock(context)->StreamDestroy(this);
+}
+
+int MockCubebStream::SetName(char const* aName) {
+ mName = aName;
+ mNameSetEvent.Notify(mName);
+ return CUBEB_OK;
+}
+
+int MockCubebStream::RegisterDeviceChangedCallback(
+ cubeb_device_changed_callback aDeviceChangedCallback) {
+ if (mDeviceChangedCallback && aDeviceChangedCallback) {
+ return CUBEB_ERROR_INVALID_PARAMETER;
+ }
+ mDeviceChangedCallback = aDeviceChangedCallback;
+ return CUBEB_OK;
+}
+
+cubeb_stream* MockCubebStream::AsCubebStream() {
+ MOZ_ASSERT(!mDestroyed);
+ return reinterpret_cast<cubeb_stream*>(this);
+}
+
+MockCubebStream* MockCubebStream::AsMock(cubeb_stream* aStream) {
+ auto* mockStream = reinterpret_cast<MockCubebStream*>(aStream);
+ MOZ_ASSERT(!mockStream->mDestroyed);
+ return mockStream;
+}
+
+cubeb_devid MockCubebStream::GetInputDeviceID() const { return mInputDeviceID; }
+
+cubeb_devid MockCubebStream::GetOutputDeviceID() const {
+ return mOutputDeviceID;
+}
+
+uint32_t MockCubebStream::InputChannels() const {
+ return mAudioGenerator.ChannelCount();
+}
+
+uint32_t MockCubebStream::OutputChannels() const {
+ return mOutputParams.channels;
+}
+
+uint32_t MockCubebStream::SampleRate() const {
+ return mAudioGenerator.mSampleRate;
+}
+
+uint32_t MockCubebStream::InputFrequency() const {
+ return mAudioGenerator.mFrequency;
+}
+
+nsTArray<AudioDataValue>&& MockCubebStream::TakeRecordedOutput() {
+ return std::move(mRecordedOutput);
+}
+
+nsTArray<AudioDataValue>&& MockCubebStream::TakeRecordedInput() {
+ return std::move(mRecordedInput);
+}
+
+void MockCubebStream::SetDriftFactor(float aDriftFactor) {
+ MOZ_ASSERT(mRunningMode == MockCubeb::RunningMode::Automatic);
+ mDriftFactor = aDriftFactor;
+}
+
+void MockCubebStream::ForceError() { mForceErrorState = true; }
+
+void MockCubebStream::ForceDeviceChanged() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ mForceDeviceChanged = true;
+};
+
+void MockCubebStream::NotifyDeviceChangedNow() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Manual);
+ NotifyDeviceChanged();
+}
+
+void MockCubebStream::Thaw() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MonitorAutoLock l(mFrozenStartMonitor);
+ mFrozenStart = false;
+ mFrozenStartMonitor.Notify();
+}
+
+void MockCubebStream::SetOutputRecordingEnabled(bool aEnabled) {
+ mOutputRecordingEnabled = aEnabled;
+}
+
+void MockCubebStream::SetInputRecordingEnabled(bool aEnabled) {
+ mInputRecordingEnabled = aEnabled;
+}
+
+MediaEventSource<nsCString>& MockCubebStream::NameSetEvent() {
+ return mNameSetEvent;
+}
+
+MediaEventSource<cubeb_state>& MockCubebStream::StateEvent() {
+ return mStateEvent;
+}
+
+MediaEventSource<uint32_t>& MockCubebStream::FramesProcessedEvent() {
+ return mFramesProcessedEvent;
+}
+
+MediaEventSource<uint32_t>& MockCubebStream::FramesVerifiedEvent() {
+ return mFramesVerifiedEvent;
+}
+
+MediaEventSource<std::tuple<uint64_t, float, uint32_t>>&
+MockCubebStream::OutputVerificationEvent() {
+ return mOutputVerificationEvent;
+}
+
+MediaEventSource<void>& MockCubebStream::ErrorForcedEvent() {
+ return mErrorForcedEvent;
+}
+
+MediaEventSource<void>& MockCubebStream::DeviceChangeForcedEvent() {
+ return mDeviceChangedForcedEvent;
+}
+
+KeepProcessing MockCubebStream::ManualDataCallback(long aNrFrames) {
+ MOZ_ASSERT(mRunningMode == RunningMode::Manual);
+ MOZ_ASSERT(aNrFrames <= kMaxNrFrames);
+ return Process(aNrFrames);
+}
+
+KeepProcessing MockCubebStream::Process(long aNrFrames) {
+ if (mInputParams.rate) {
+ mAudioGenerator.GenerateInterleaved(mInputBuffer, aNrFrames);
+ }
+ cubeb_stream* stream = AsCubebStream();
+ const long outframes =
+ mDataCallback(stream, mUserPtr, mHasInput ? mInputBuffer : nullptr,
+ mHasOutput ? mOutputBuffer : nullptr, aNrFrames);
+
+ if (mInputRecordingEnabled && mHasInput) {
+ mRecordedInput.AppendElements(mInputBuffer, outframes * InputChannels());
+ }
+ if (mOutputRecordingEnabled && mHasOutput) {
+ mRecordedOutput.AppendElements(mOutputBuffer, outframes * OutputChannels());
+ }
+ mAudioVerifier.AppendDataInterleaved(mOutputBuffer, outframes,
+ MAX_OUTPUT_CHANNELS);
+ mPosition += outframes;
+
+ mFramesProcessedEvent.Notify(outframes);
+ if (mAudioVerifier.PreSilenceEnded()) {
+ mFramesVerifiedEvent.Notify(outframes);
+ }
+
+ if (outframes < aNrFrames) {
+ NotifyState(CUBEB_STATE_DRAINED);
+ return KeepProcessing::No;
+ }
+ if (mForceErrorState) {
+ mForceErrorState = false;
+ NotifyState(CUBEB_STATE_ERROR);
+ mErrorForcedEvent.Notify();
+ return KeepProcessing::No;
+ }
+ if (mForceDeviceChanged) {
+ mForceDeviceChanged = false;
+ // The device-changed callback is not necessary to be run in the
+ // audio-callback thread. It's up to the platform APIs. We don't have any
+ // control over them. Fire the device-changed callback in another thread to
+ // simulate this.
+ NS_DispatchBackgroundTask(NS_NewRunnableFunction(
+ __func__, [this, self = RefPtr(mSelf)] { NotifyDeviceChanged(); }));
+ }
+ return KeepProcessing::Yes;
+}
+
+KeepProcessing MockCubebStream::Process10Ms() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ uint32_t rate = SampleRate();
+ const long nrFrames =
+ static_cast<long>(static_cast<float>(rate * 10) * mDriftFactor) /
+ PR_MSEC_PER_SEC;
+ return Process(nrFrames);
+}
+
+void MockCubebStream::NotifyState(cubeb_state aState) {
+ mStateCallback(AsCubebStream(), mUserPtr, aState);
+ mStateEvent.Notify(aState);
+}
+
+void MockCubebStream::NotifyDeviceChanged() {
+ mDeviceChangedCallback(this->mUserPtr);
+ mDeviceChangedForcedEvent.Notify();
+}
+
+MockCubeb::MockCubeb() : MockCubeb(MockCubeb::RunningMode::Automatic) {}
+
+MockCubeb::MockCubeb(RunningMode aRunningMode)
+ : ops(&mock_ops), mRunningMode(aRunningMode) {}
+
+MockCubeb::~MockCubeb() { MOZ_ASSERT(!mFakeAudioThread); };
+
+void MockCubeb::Destroy() {
+ MOZ_ASSERT(mHasCubebContext);
+ {
+ auto streams = mLiveStreams.Lock();
+ MOZ_ASSERT(streams->IsEmpty());
+ }
+ mDestroyed = true;
+ Release();
+}
+
+cubeb* MockCubeb::AsCubebContext() {
+ MOZ_ASSERT(!mDestroyed);
+ if (mHasCubebContext.compareExchange(false, true)) {
+ AddRef();
+ }
+ return reinterpret_cast<cubeb*>(this);
+}
+
+MockCubeb* MockCubeb::AsMock(cubeb* aContext) {
+ auto* mockCubeb = reinterpret_cast<MockCubeb*>(aContext);
+ MOZ_ASSERT(!mockCubeb->mDestroyed);
+ return mockCubeb;
+}
+
+int MockCubeb::EnumerateDevices(cubeb_device_type aType,
+ cubeb_device_collection* aCollection) {
+#ifdef ANDROID
+ EXPECT_TRUE(false) << "This is not to be called on Android.";
+#endif
+ size_t count = 0;
+ if (aType & CUBEB_DEVICE_TYPE_INPUT) {
+ count += mInputDevices.Length();
+ }
+ if (aType & CUBEB_DEVICE_TYPE_OUTPUT) {
+ count += mOutputDevices.Length();
+ }
+ aCollection->device = new cubeb_device_info[count];
+ aCollection->count = count;
+
+ uint32_t collection_index = 0;
+ if (aType & CUBEB_DEVICE_TYPE_INPUT) {
+ for (auto& device : mInputDevices) {
+ aCollection->device[collection_index] = device;
+ collection_index++;
+ }
+ }
+ if (aType & CUBEB_DEVICE_TYPE_OUTPUT) {
+ for (auto& device : mOutputDevices) {
+ aCollection->device[collection_index] = device;
+ collection_index++;
+ }
+ }
+
+ return CUBEB_OK;
+}
+
+int MockCubeb::DestroyDeviceCollection(cubeb_device_collection* aCollection) {
+ delete[] aCollection->device;
+ aCollection->count = 0;
+ return CUBEB_OK;
+}
+
+int MockCubeb::RegisterDeviceCollectionChangeCallback(
+ cubeb_device_type aDevType,
+ cubeb_device_collection_changed_callback aCallback, void* aUserPtr) {
+ if (!mSupportsDeviceCollectionChangedCallback) {
+ return CUBEB_ERROR;
+ }
+
+ if (aDevType & CUBEB_DEVICE_TYPE_INPUT) {
+ mInputDeviceCollectionChangeCallback = aCallback;
+ mInputDeviceCollectionChangeUserPtr = aUserPtr;
+ }
+ if (aDevType & CUBEB_DEVICE_TYPE_OUTPUT) {
+ mOutputDeviceCollectionChangeCallback = aCallback;
+ mOutputDeviceCollectionChangeUserPtr = aUserPtr;
+ }
+
+ return CUBEB_OK;
+}
+
+void MockCubeb::AddDevice(cubeb_device_info aDevice) {
+ if (aDevice.type == CUBEB_DEVICE_TYPE_INPUT) {
+ mInputDevices.AppendElement(aDevice);
+ } else if (aDevice.type == CUBEB_DEVICE_TYPE_OUTPUT) {
+ mOutputDevices.AppendElement(aDevice);
+ } else {
+ MOZ_CRASH("bad device type when adding a device in mock cubeb backend");
+ }
+
+ bool isInput = aDevice.type & CUBEB_DEVICE_TYPE_INPUT;
+ if (isInput && mInputDeviceCollectionChangeCallback) {
+ mInputDeviceCollectionChangeCallback(AsCubebContext(),
+ mInputDeviceCollectionChangeUserPtr);
+ }
+ if (!isInput && mOutputDeviceCollectionChangeCallback) {
+ mOutputDeviceCollectionChangeCallback(AsCubebContext(),
+ mOutputDeviceCollectionChangeUserPtr);
+ }
+}
+
+bool MockCubeb::RemoveDevice(cubeb_devid aId) {
+ bool foundInput = false;
+ bool foundOutput = false;
+ mInputDevices.RemoveElementsBy(
+ [aId, &foundInput](cubeb_device_info& aDeviceInfo) {
+ bool foundThisTime = aDeviceInfo.devid == aId;
+ foundInput |= foundThisTime;
+ return foundThisTime;
+ });
+ mOutputDevices.RemoveElementsBy(
+ [aId, &foundOutput](cubeb_device_info& aDeviceInfo) {
+ bool foundThisTime = aDeviceInfo.devid == aId;
+ foundOutput |= foundThisTime;
+ return foundThisTime;
+ });
+
+ if (foundInput && mInputDeviceCollectionChangeCallback) {
+ mInputDeviceCollectionChangeCallback(AsCubebContext(),
+ mInputDeviceCollectionChangeUserPtr);
+ }
+ if (foundOutput && mOutputDeviceCollectionChangeCallback) {
+ mOutputDeviceCollectionChangeCallback(AsCubebContext(),
+ mOutputDeviceCollectionChangeUserPtr);
+ }
+ // If the device removed was a default device, set another device as the
+ // default, if there are still devices available.
+ bool foundDefault = false;
+ for (uint32_t i = 0; i < mInputDevices.Length(); i++) {
+ foundDefault |= mInputDevices[i].preferred != CUBEB_DEVICE_PREF_NONE;
+ }
+
+ if (!foundDefault) {
+ if (!mInputDevices.IsEmpty()) {
+ mInputDevices[mInputDevices.Length() - 1].preferred =
+ CUBEB_DEVICE_PREF_ALL;
+ }
+ }
+
+ foundDefault = false;
+ for (uint32_t i = 0; i < mOutputDevices.Length(); i++) {
+ foundDefault |= mOutputDevices[i].preferred != CUBEB_DEVICE_PREF_NONE;
+ }
+
+ if (!foundDefault) {
+ if (!mOutputDevices.IsEmpty()) {
+ mOutputDevices[mOutputDevices.Length() - 1].preferred =
+ CUBEB_DEVICE_PREF_ALL;
+ }
+ }
+
+ return foundInput | foundOutput;
+}
+
+void MockCubeb::ClearDevices(cubeb_device_type aType) {
+ mInputDevices.Clear();
+ mOutputDevices.Clear();
+}
+
+void MockCubeb::SetSupportDeviceChangeCallback(bool aSupports) {
+ mSupportsDeviceCollectionChangedCallback = aSupports;
+}
+
+void MockCubeb::ForceStreamInitError() { mStreamInitErrorState = true; }
+
+void MockCubeb::SetStreamStartFreezeEnabled(bool aEnabled) {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ mStreamStartFreezeEnabled = aEnabled;
+}
+
+auto MockCubeb::ForceAudioThread() -> RefPtr<ForcedAudioThreadPromise> {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ RefPtr<ForcedAudioThreadPromise> p =
+ mForcedAudioThreadPromise.Ensure(__func__);
+ mForcedAudioThread = true;
+ StartStream(nullptr);
+ return p;
+}
+
+void MockCubeb::UnforceAudioThread() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ mForcedAudioThread = false;
+}
+
+int MockCubeb::StreamInit(cubeb* aContext, cubeb_stream** aStream,
+ char const* aStreamName, cubeb_devid aInputDevice,
+ cubeb_stream_params* aInputStreamParams,
+ cubeb_devid aOutputDevice,
+ cubeb_stream_params* aOutputStreamParams,
+ cubeb_data_callback aDataCallback,
+ cubeb_state_callback aStateCallback, void* aUserPtr) {
+ if (mStreamInitErrorState.compareExchange(true, false)) {
+ mStreamInitEvent.Notify(nullptr);
+ return CUBEB_ERROR_DEVICE_UNAVAILABLE;
+ }
+
+ auto mockStream = MakeRefPtr<SmartMockCubebStream>(
+ aContext, aStreamName, aInputDevice, aInputStreamParams, aOutputDevice,
+ aOutputStreamParams, aDataCallback, aStateCallback, aUserPtr,
+ mRunningMode, mStreamStartFreezeEnabled);
+ *aStream = mockStream->AsCubebStream();
+ mStreamInitEvent.Notify(mockStream);
+ // AddRef the stream to keep it alive. StreamDestroy releases it.
+ Unused << mockStream.forget().take();
+ return CUBEB_OK;
+}
+
+void MockCubeb::StreamDestroy(MockCubebStream* aStream) {
+ RefPtr<SmartMockCubebStream> mockStream = dont_AddRef(aStream->mSelf);
+ mStreamDestroyEvent.Notify(mockStream);
+}
+
+void MockCubeb::GoFaster() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ mFastMode = true;
+}
+
+void MockCubeb::DontGoFaster() {
+ MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ mFastMode = false;
+}
+
+MediaEventSource<RefPtr<SmartMockCubebStream>>& MockCubeb::StreamInitEvent() {
+ return mStreamInitEvent;
+}
+
+MediaEventSource<RefPtr<SmartMockCubebStream>>&
+MockCubeb::StreamDestroyEvent() {
+ return mStreamDestroyEvent;
+}
+
+void MockCubeb::StartStream(MockCubebStream* aStream) {
+ auto streams = mLiveStreams.Lock();
+ MOZ_ASSERT_IF(!aStream, mForcedAudioThread);
+ // Forcing an audio thread must happen before starting streams
+ MOZ_ASSERT_IF(!aStream, streams->IsEmpty());
+ if (aStream) {
+ MOZ_ASSERT(!streams->Contains(aStream->mSelf));
+ streams->AppendElement(aStream->mSelf);
+ }
+ if (!mFakeAudioThread && mRunningMode == RunningMode::Automatic) {
+ AddRef(); // released when the thread exits
+ mFakeAudioThread = WrapUnique(new std::thread(ThreadFunction_s, this));
+ }
+}
+
+void MockCubeb::StopStream(MockCubebStream* aStream) {
+ {
+ auto streams = mLiveStreams.Lock();
+ if (!streams->Contains(aStream->mSelf)) {
+ return;
+ }
+ streams->RemoveElement(aStream->mSelf);
+ }
+}
+
+void MockCubeb::ThreadFunction() {
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
+ if (mForcedAudioThread) {
+ mForcedAudioThreadPromise.Resolve(MakeRefPtr<AudioThreadAutoUnforcer>(this),
+ __func__);
+ }
+ while (true) {
+ {
+ auto streams = mLiveStreams.Lock();
+ for (auto& stream : *streams) {
+ auto keepProcessing = stream->Process10Ms();
+ if (keepProcessing == KeepProcessing::No) {
+ stream = nullptr;
+ }
+ }
+ streams->RemoveElementsBy([](const auto& stream) { return !stream; });
+ MOZ_ASSERT(mFakeAudioThread);
+ if (streams->IsEmpty() && !mForcedAudioThread) {
+ // This leaks the std::thread if Gecko's main thread has already been
+ // shut down.
+ NS_DispatchToMainThread(NS_NewRunnableFunction(
+ __func__, [audioThread = std::move(mFakeAudioThread)] {
+ audioThread->join();
+ }));
+ break;
+ }
+ }
+ std::this_thread::sleep_for(
+ std::chrono::microseconds(mFastMode ? 0 : 10 * PR_USEC_PER_MSEC));
+ }
+ Release();
+}
+
+} // namespace mozilla
diff --git a/dom/media/gtest/MockCubeb.h b/dom/media/gtest/MockCubeb.h
new file mode 100644
index 0000000000..ed6342a779
--- /dev/null
+++ b/dom/media/gtest/MockCubeb.h
@@ -0,0 +1,595 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef MOCKCUBEB_H_
+#define MOCKCUBEB_H_
+
+#include "AudioDeviceInfo.h"
+#include "AudioGenerator.h"
+#include "AudioVerifier.h"
+#include "MediaEventSource.h"
+#include "mozilla/DataMutex.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/ThreadSafeWeakPtr.h"
+#include "nsTArray.h"
+
+#include <thread>
+#include <atomic>
+#include <chrono>
+
+namespace mozilla {
+const uint32_t MAX_OUTPUT_CHANNELS = 2;
+const uint32_t MAX_INPUT_CHANNELS = 2;
+
+struct cubeb_ops {
+ int (*init)(cubeb** context, char const* context_name);
+ char const* (*get_backend_id)(cubeb* context);
+ int (*get_max_channel_count)(cubeb* context, uint32_t* max_channels);
+ int (*get_min_latency)(cubeb* context, cubeb_stream_params params,
+ uint32_t* latency_ms);
+ int (*get_preferred_sample_rate)(cubeb* context, uint32_t* rate);
+ int (*get_supported_input_processing_params)(
+ cubeb* context, cubeb_input_processing_params* params);
+ int (*enumerate_devices)(cubeb* context, cubeb_device_type type,
+ cubeb_device_collection* collection);
+ int (*device_collection_destroy)(cubeb* context,
+ cubeb_device_collection* collection);
+ void (*destroy)(cubeb* context);
+ int (*stream_init)(cubeb* context, cubeb_stream** stream,
+ char const* stream_name, cubeb_devid input_device,
+ cubeb_stream_params* input_stream_params,
+ cubeb_devid output_device,
+ cubeb_stream_params* output_stream_params,
+ unsigned int latency, cubeb_data_callback data_callback,
+ cubeb_state_callback state_callback, void* user_ptr);
+ void (*stream_destroy)(cubeb_stream* stream);
+ int (*stream_start)(cubeb_stream* stream);
+ int (*stream_stop)(cubeb_stream* stream);
+ int (*stream_get_position)(cubeb_stream* stream, uint64_t* position);
+ int (*stream_get_latency)(cubeb_stream* stream, uint32_t* latency);
+ int (*stream_get_input_latency)(cubeb_stream* stream, uint32_t* latency);
+ int (*stream_set_volume)(cubeb_stream* stream, float volumes);
+ int (*stream_set_name)(cubeb_stream* stream, char const* stream_name);
+ int (*stream_get_current_device)(cubeb_stream* stream,
+ cubeb_device** const device);
+ int (*stream_set_input_mute)(cubeb_stream* stream, int mute);
+ int (*stream_set_input_processing_params)(
+ cubeb_stream* stream, cubeb_input_processing_params params);
+ int (*stream_device_destroy)(cubeb_stream* stream, cubeb_device* device);
+ int (*stream_register_device_changed_callback)(
+ cubeb_stream* stream,
+ cubeb_device_changed_callback device_changed_callback);
+ int (*register_device_collection_changed)(
+ cubeb* context, cubeb_device_type devtype,
+ cubeb_device_collection_changed_callback callback, void* user_ptr);
+};
+
+// Keep those and the struct definition in sync with cubeb.h and
+// cubeb-internal.h
+void cubeb_mock_destroy(cubeb* context);
+static int cubeb_mock_enumerate_devices(cubeb* context, cubeb_device_type type,
+ cubeb_device_collection* out);
+
+static int cubeb_mock_device_collection_destroy(
+ cubeb* context, cubeb_device_collection* collection);
+
+static int cubeb_mock_register_device_collection_changed(
+ cubeb* context, cubeb_device_type devtype,
+ cubeb_device_collection_changed_callback callback, void* user_ptr);
+
+static int cubeb_mock_stream_init(
+ cubeb* context, cubeb_stream** stream, char const* stream_name,
+ cubeb_devid input_device, cubeb_stream_params* input_stream_params,
+ cubeb_devid output_device, cubeb_stream_params* output_stream_params,
+ unsigned int latency, cubeb_data_callback data_callback,
+ cubeb_state_callback state_callback, void* user_ptr);
+
+static int cubeb_mock_stream_start(cubeb_stream* stream);
+
+static int cubeb_mock_stream_stop(cubeb_stream* stream);
+
+static int cubeb_mock_stream_get_position(cubeb_stream* stream,
+ uint64_t* position);
+
+static void cubeb_mock_stream_destroy(cubeb_stream* stream);
+
+static char const* cubeb_mock_get_backend_id(cubeb* context);
+
+static int cubeb_mock_stream_set_volume(cubeb_stream* stream, float volume);
+
+static int cubeb_mock_stream_set_name(cubeb_stream* stream,
+ char const* stream_name);
+
+static int cubeb_mock_stream_register_device_changed_callback(
+ cubeb_stream* stream,
+ cubeb_device_changed_callback device_changed_callback);
+
+static int cubeb_mock_get_min_latency(cubeb* context,
+ cubeb_stream_params params,
+ uint32_t* latency_ms);
+
+static int cubeb_mock_get_preferred_sample_rate(cubeb* context, uint32_t* rate);
+
+static int cubeb_mock_get_max_channel_count(cubeb* context,
+ uint32_t* max_channels);
+
+// Mock cubeb impl, only supports device enumeration for now.
+cubeb_ops const mock_ops = {
+ /*.init =*/NULL,
+ /*.get_backend_id =*/cubeb_mock_get_backend_id,
+ /*.get_max_channel_count =*/cubeb_mock_get_max_channel_count,
+ /*.get_min_latency =*/cubeb_mock_get_min_latency,
+ /*.get_preferred_sample_rate =*/cubeb_mock_get_preferred_sample_rate,
+ /*.get_supported_input_processing_params =*/
+ NULL,
+ /*.enumerate_devices =*/cubeb_mock_enumerate_devices,
+ /*.device_collection_destroy =*/cubeb_mock_device_collection_destroy,
+ /*.destroy =*/cubeb_mock_destroy,
+ /*.stream_init =*/cubeb_mock_stream_init,
+ /*.stream_destroy =*/cubeb_mock_stream_destroy,
+ /*.stream_start =*/cubeb_mock_stream_start,
+ /*.stream_stop =*/cubeb_mock_stream_stop,
+ /*.stream_get_position =*/cubeb_mock_stream_get_position,
+ /*.stream_get_latency =*/NULL,
+ /*.stream_get_input_latency =*/NULL,
+ /*.stream_set_volume =*/cubeb_mock_stream_set_volume,
+ /*.stream_set_name =*/cubeb_mock_stream_set_name,
+ /*.stream_get_current_device =*/NULL,
+
+ /*.stream_set_input_mute =*/NULL,
+ /*.stream_set_input_processing_params =*/
+ NULL,
+ /*.stream_device_destroy =*/NULL,
+ /*.stream_register_device_changed_callback =*/
+ cubeb_mock_stream_register_device_changed_callback,
+ /*.register_device_collection_changed =*/
+ cubeb_mock_register_device_collection_changed};
+
+class SmartMockCubebStream;
+
+// Represents the fake cubeb_stream. The context instance is needed to
+// provide access on cubeb_ops struct.
+class MockCubebStream {
+ friend class MockCubeb;
+
+ // These members need to have the exact same memory layout as a real
+ // cubeb_stream, so that AsMock() returns a pointer to this that can be used
+ // as a cubeb_stream.
+ cubeb* context;
+ void* mUserPtr;
+
+ public:
+ enum class KeepProcessing { No, Yes };
+ enum class RunningMode { Automatic, Manual };
+
+ MockCubebStream(cubeb* aContext, char const* aStreamName,
+ cubeb_devid aInputDevice,
+ cubeb_stream_params* aInputStreamParams,
+ cubeb_devid aOutputDevice,
+ cubeb_stream_params* aOutputStreamParams,
+ cubeb_data_callback aDataCallback,
+ cubeb_state_callback aStateCallback, void* aUserPtr,
+ SmartMockCubebStream* aSelf, RunningMode aRunningMode,
+ bool aFrozenStart);
+
+ ~MockCubebStream();
+
+ int Start();
+ int Stop();
+ uint64_t Position();
+ void Destroy();
+ int SetName(char const* aName);
+ int RegisterDeviceChangedCallback(
+ cubeb_device_changed_callback aDeviceChangedCallback);
+
+ cubeb_stream* AsCubebStream();
+ static MockCubebStream* AsMock(cubeb_stream* aStream);
+
+ char const* StreamName() const { return mName.get(); }
+ cubeb_devid GetInputDeviceID() const;
+ cubeb_devid GetOutputDeviceID() const;
+
+ uint32_t InputChannels() const;
+ uint32_t OutputChannels() const;
+ uint32_t SampleRate() const;
+ uint32_t InputFrequency() const;
+
+ void SetDriftFactor(float aDriftFactor);
+ void ForceError();
+ void ForceDeviceChanged();
+ void Thaw();
+
+ // For RunningMode::Manual, drive this MockCubebStream forward.
+ KeepProcessing ManualDataCallback(long aNrFrames);
+
+ // For RunningMode::Manual, notify the client of a DeviceChanged event
+ // synchronously.
+ void NotifyDeviceChangedNow();
+
+ // Enable input recording for this driver. This is best called before
+ // the thread is running, but is safe to call whenever.
+ void SetOutputRecordingEnabled(bool aEnabled);
+ // Enable input recording for this driver. This is best called before
+ // the thread is running, but is safe to call whenever.
+ void SetInputRecordingEnabled(bool aEnabled);
+ // Get the recorded output from this stream. This doesn't copy, and therefore
+ // only works once.
+ nsTArray<AudioDataValue>&& TakeRecordedOutput();
+ // Get the recorded input from this stream. This doesn't copy, and therefore
+ // only works once.
+ nsTArray<AudioDataValue>&& TakeRecordedInput();
+
+ MediaEventSource<nsCString>& NameSetEvent();
+ MediaEventSource<cubeb_state>& StateEvent();
+ MediaEventSource<uint32_t>& FramesProcessedEvent();
+ // Notified when frames are processed after first non-silent output
+ MediaEventSource<uint32_t>& FramesVerifiedEvent();
+ // Notified when the stream is Stop()ed
+ MediaEventSource<std::tuple<uint64_t, float, uint32_t>>&
+ OutputVerificationEvent();
+ MediaEventSource<void>& ErrorForcedEvent();
+ MediaEventSource<void>& DeviceChangeForcedEvent();
+
+ private:
+ KeepProcessing Process(long aNrFrames);
+ KeepProcessing Process10Ms();
+
+ public:
+ const RunningMode mRunningMode;
+ const bool mHasInput;
+ const bool mHasOutput;
+ SmartMockCubebStream* const mSelf;
+
+ private:
+ void NotifyState(cubeb_state aState);
+ void NotifyDeviceChanged();
+
+ static constexpr long kMaxNrFrames = 1920;
+ // Monitor used to block start until mFrozenStart is false.
+ Monitor mFrozenStartMonitor MOZ_UNANNOTATED;
+ // Whether this stream should wait for an explicit start request before
+ // starting. Protected by FrozenStartMonitor.
+ bool mFrozenStart;
+ // Used to abort a frozen start if cubeb_stream_start() is called currently
+ // with a blocked cubeb_stream_start() call.
+ std::atomic_bool mStreamStop{true};
+ // Whether or not the output-side of this stream (what is written from the
+ // callback output buffer) is recorded in an internal buffer. The data is then
+ // available via `GetRecordedOutput`.
+ std::atomic_bool mOutputRecordingEnabled{false};
+ // Whether or not the input-side of this stream (what is written from the
+ // callback input buffer) is recorded in an internal buffer. The data is then
+ // available via `TakeRecordedInput`.
+ std::atomic_bool mInputRecordingEnabled{false};
+ // The audio buffer used on data callback.
+ AudioDataValue mOutputBuffer[MAX_OUTPUT_CHANNELS * kMaxNrFrames] = {};
+ AudioDataValue mInputBuffer[MAX_INPUT_CHANNELS * kMaxNrFrames] = {};
+ // The audio callback
+ cubeb_data_callback mDataCallback = nullptr;
+ // The stream state callback
+ cubeb_state_callback mStateCallback = nullptr;
+ // The device changed callback
+ cubeb_device_changed_callback mDeviceChangedCallback = nullptr;
+ // A name for this stream
+ nsCString mName;
+ // The stream params
+ cubeb_stream_params mOutputParams = {};
+ cubeb_stream_params mInputParams = {};
+ /* Device IDs */
+ cubeb_devid mInputDeviceID;
+ cubeb_devid mOutputDeviceID;
+
+ std::atomic<float> mDriftFactor{1.0};
+ std::atomic_bool mFastMode{false};
+ std::atomic_bool mForceErrorState{false};
+ std::atomic_bool mForceDeviceChanged{false};
+ std::atomic_bool mDestroyed{false};
+ std::atomic<uint64_t> mPosition{0};
+ AudioGenerator<AudioDataValue> mAudioGenerator;
+ AudioVerifier<AudioDataValue> mAudioVerifier;
+
+ MediaEventProducer<nsCString> mNameSetEvent;
+ MediaEventProducer<cubeb_state> mStateEvent;
+ MediaEventProducer<uint32_t> mFramesProcessedEvent;
+ MediaEventProducer<uint32_t> mFramesVerifiedEvent;
+ MediaEventProducer<std::tuple<uint64_t, float, uint32_t>>
+ mOutputVerificationEvent;
+ MediaEventProducer<void> mErrorForcedEvent;
+ MediaEventProducer<void> mDeviceChangedForcedEvent;
+ // The recorded data, copied from the output_buffer of the callback.
+ // Interleaved.
+ nsTArray<AudioDataValue> mRecordedOutput;
+ // The recorded data, copied from the input buffer of the callback.
+ // Interleaved.
+ nsTArray<AudioDataValue> mRecordedInput;
+};
+
+class SmartMockCubebStream
+ : public MockCubebStream,
+ public SupportsThreadSafeWeakPtr<SmartMockCubebStream> {
+ public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(SmartMockCubebStream)
+ SmartMockCubebStream(cubeb* aContext, char const* aStreamName,
+ cubeb_devid aInputDevice,
+ cubeb_stream_params* aInputStreamParams,
+ cubeb_devid aOutputDevice,
+ cubeb_stream_params* aOutputStreamParams,
+ cubeb_data_callback aDataCallback,
+ cubeb_state_callback aStateCallback, void* aUserPtr,
+ RunningMode aRunningMode, bool aFrozenStart)
+ : MockCubebStream(aContext, aStreamName, aInputDevice, aInputStreamParams,
+ aOutputDevice, aOutputStreamParams, aDataCallback,
+ aStateCallback, aUserPtr, this, aRunningMode,
+ aFrozenStart) {}
+};
+
+// This class has two facets: it is both a fake cubeb backend that is intended
+// to be used for testing, and passed to Gecko code that expects a normal
+// backend, but is also controllable by the test code to decide what the backend
+// should do, depending on what is being tested.
+class MockCubeb {
+ // This needs to have the exact same memory layout as a real cubeb backend.
+ // It's very important for the `ops` member to be the very first member of
+ // the class, and for MockCubeb to not have any virtual members (to avoid
+ // having a vtable), so that AsMock() returns a pointer to this that can be
+ // used as a cubeb backend.
+ const cubeb_ops* ops;
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockCubeb);
+
+ public:
+ using RunningMode = MockCubebStream::RunningMode;
+
+ MockCubeb();
+ explicit MockCubeb(RunningMode aRunningMode);
+ // Cubeb backend implementation
+ // This allows passing this class as a cubeb* instance.
+ // cubeb_destroy(context) should eventually be called on the return value
+ // iff this method is called.
+ cubeb* AsCubebContext();
+ static MockCubeb* AsMock(cubeb* aContext);
+ void Destroy();
+ // Fill in the collection parameter with all devices of aType.
+ int EnumerateDevices(cubeb_device_type aType,
+ cubeb_device_collection* aCollection);
+ // Clear the collection parameter and deallocate its related memory space.
+ int DestroyDeviceCollection(cubeb_device_collection* aCollection);
+
+ // For a given device type, add a callback, called with a user pointer, when
+ // the device collection for this backend changes (i.e. a device has been
+ // removed or added).
+ int RegisterDeviceCollectionChangeCallback(
+ cubeb_device_type aDevType,
+ cubeb_device_collection_changed_callback aCallback, void* aUserPtr);
+
+ // Control API
+
+ // Add an input or output device to this backend. This calls the device
+ // collection invalidation callback if needed.
+ void AddDevice(cubeb_device_info aDevice);
+ // Remove a specific input or output device to this backend, returns true if
+ // a device was removed. This calls the device collection invalidation
+ // callback if needed.
+ bool RemoveDevice(cubeb_devid aId);
+ // Remove all input or output devices from this backend, without calling the
+ // callback. This is meant to clean up in between tests.
+ void ClearDevices(cubeb_device_type aType);
+
+ // This allows simulating a backend that does not support setting a device
+ // collection invalidation callback, to be able to test the fallback path.
+ void SetSupportDeviceChangeCallback(bool aSupports);
+
+ // This causes the next stream init with this context to return failure;
+ void ForceStreamInitError();
+
+ // Makes MockCubebStreams starting after this point wait for AllowStart().
+ // Callers must ensure they get a hold of the stream through StreamInitEvent
+ // to be able to start them.
+ void SetStreamStartFreezeEnabled(bool aEnabled);
+
+ // Helper class that automatically unforces a forced audio thread on release.
+ class AudioThreadAutoUnforcer {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioThreadAutoUnforcer)
+
+ public:
+ explicit AudioThreadAutoUnforcer(MockCubeb* aContext)
+ : mContext(aContext) {}
+
+ protected:
+ virtual ~AudioThreadAutoUnforcer() { mContext->UnforceAudioThread(); }
+ MockCubeb* mContext;
+ };
+
+ // Creates the audio thread if one is not available. The audio thread remains
+ // forced until UnforceAudioThread is called. The returned promise is resolved
+ // when the audio thread is running. With this, a test can ensure starting
+ // audio streams is deterministically fast across platforms for more accurate
+ // results.
+ using ForcedAudioThreadPromise =
+ MozPromise<RefPtr<AudioThreadAutoUnforcer>, nsresult, false>;
+ RefPtr<ForcedAudioThreadPromise> ForceAudioThread();
+
+ // Allows a forced audio thread to stop.
+ void UnforceAudioThread();
+
+ int StreamInit(cubeb* aContext, cubeb_stream** aStream,
+ char const* aStreamName, cubeb_devid aInputDevice,
+ cubeb_stream_params* aInputStreamParams,
+ cubeb_devid aOutputDevice,
+ cubeb_stream_params* aOutputStreamParams,
+ cubeb_data_callback aDataCallback,
+ cubeb_state_callback aStateCallback, void* aUserPtr);
+
+ void StreamDestroy(MockCubebStream* aStream);
+
+ void GoFaster();
+ void DontGoFaster();
+
+ MediaEventSource<RefPtr<SmartMockCubebStream>>& StreamInitEvent();
+ MediaEventSource<RefPtr<SmartMockCubebStream>>& StreamDestroyEvent();
+
+ // MockCubeb specific API
+ void StartStream(MockCubebStream* aStream);
+ void StopStream(MockCubebStream* aStream);
+
+ // Simulates the audio thread. The thread is created at Start and destroyed
+ // at Stop. At next StreamStart a new thread is created.
+ static void ThreadFunction_s(MockCubeb* aContext) {
+ aContext->ThreadFunction();
+ }
+
+ void ThreadFunction();
+
+ private:
+ ~MockCubeb();
+ // The callback to call when the device list has been changed.
+ cubeb_device_collection_changed_callback
+ mInputDeviceCollectionChangeCallback = nullptr;
+ cubeb_device_collection_changed_callback
+ mOutputDeviceCollectionChangeCallback = nullptr;
+ // The pointer to pass in the callback.
+ void* mInputDeviceCollectionChangeUserPtr = nullptr;
+ void* mOutputDeviceCollectionChangeUserPtr = nullptr;
+ void* mUserPtr = nullptr;
+ // Whether or not this backend supports device collection change
+ // notification via a system callback. If not, Gecko is expected to re-query
+ // the list every time.
+ bool mSupportsDeviceCollectionChangedCallback = true;
+ const RunningMode mRunningMode;
+ Atomic<bool> mStreamInitErrorState;
+ // Whether new MockCubebStreams should be frozen on start.
+ Atomic<bool> mStreamStartFreezeEnabled{false};
+ // Whether the audio thread is forced, i.e., whether it remains active even
+ // with no live streams.
+ Atomic<bool> mForcedAudioThread{false};
+ Atomic<bool> mHasCubebContext{false};
+ Atomic<bool> mDestroyed{false};
+ MozPromiseHolder<ForcedAudioThreadPromise> mForcedAudioThreadPromise;
+ // Our input and output devices.
+ nsTArray<cubeb_device_info> mInputDevices;
+ nsTArray<cubeb_device_info> mOutputDevices;
+
+ // The streams that are currently running.
+ DataMutex<nsTArray<RefPtr<SmartMockCubebStream>>> mLiveStreams{
+ "MockCubeb::mLiveStreams"};
+ // Thread that simulates the audio thread, shared across MockCubebStreams to
+ // avoid unintended drift. This is set together with mLiveStreams, under the
+ // mLiveStreams DataMutex.
+ UniquePtr<std::thread> mFakeAudioThread;
+ // Whether to run the fake audio thread in fast mode, not caring about wall
+ // clock time. false is default and means data is processed every 10ms. When
+ // true we sleep(0) between iterations instead of 10ms.
+ std::atomic<bool> mFastMode{false};
+
+ MediaEventProducer<RefPtr<SmartMockCubebStream>> mStreamInitEvent;
+ MediaEventProducer<RefPtr<SmartMockCubebStream>> mStreamDestroyEvent;
+};
+
+int cubeb_mock_enumerate_devices(cubeb* context, cubeb_device_type type,
+ cubeb_device_collection* out) {
+ return MockCubeb::AsMock(context)->EnumerateDevices(type, out);
+}
+
+int cubeb_mock_device_collection_destroy(cubeb* context,
+ cubeb_device_collection* collection) {
+ return MockCubeb::AsMock(context)->DestroyDeviceCollection(collection);
+}
+
+int cubeb_mock_register_device_collection_changed(
+ cubeb* context, cubeb_device_type devtype,
+ cubeb_device_collection_changed_callback callback, void* user_ptr) {
+ return MockCubeb::AsMock(context)->RegisterDeviceCollectionChangeCallback(
+ devtype, callback, user_ptr);
+}
+
+int cubeb_mock_stream_init(
+ cubeb* context, cubeb_stream** stream, char const* stream_name,
+ cubeb_devid input_device, cubeb_stream_params* input_stream_params,
+ cubeb_devid output_device, cubeb_stream_params* output_stream_params,
+ unsigned int latency, cubeb_data_callback data_callback,
+ cubeb_state_callback state_callback, void* user_ptr) {
+ return MockCubeb::AsMock(context)->StreamInit(
+ context, stream, stream_name, input_device, input_stream_params,
+ output_device, output_stream_params, data_callback, state_callback,
+ user_ptr);
+}
+
+int cubeb_mock_stream_start(cubeb_stream* stream) {
+ return MockCubebStream::AsMock(stream)->Start();
+}
+
+int cubeb_mock_stream_stop(cubeb_stream* stream) {
+ return MockCubebStream::AsMock(stream)->Stop();
+}
+
+int cubeb_mock_stream_get_position(cubeb_stream* stream, uint64_t* position) {
+ *position = MockCubebStream::AsMock(stream)->Position();
+ return CUBEB_OK;
+}
+
+void cubeb_mock_stream_destroy(cubeb_stream* stream) {
+ MockCubebStream::AsMock(stream)->Destroy();
+}
+
+static char const* cubeb_mock_get_backend_id(cubeb* context) {
+#if defined(XP_MACOSX)
+ return "audiounit";
+#elif defined(XP_WIN)
+ return "wasapi";
+#elif defined(ANDROID)
+ return "opensl";
+#elif defined(__OpenBSD__)
+ return "sndio";
+#else
+ return "pulse";
+#endif
+}
+
+static int cubeb_mock_stream_set_volume(cubeb_stream* stream, float volume) {
+ return CUBEB_OK;
+}
+
+static int cubeb_mock_stream_set_name(cubeb_stream* stream,
+ char const* stream_name) {
+ return MockCubebStream::AsMock(stream)->SetName(stream_name);
+ return CUBEB_OK;
+}
+
+int cubeb_mock_stream_register_device_changed_callback(
+ cubeb_stream* stream,
+ cubeb_device_changed_callback device_changed_callback) {
+ return MockCubebStream::AsMock(stream)->RegisterDeviceChangedCallback(
+ device_changed_callback);
+}
+
+int cubeb_mock_get_min_latency(cubeb* context, cubeb_stream_params params,
+ uint32_t* latency_ms) {
+ *latency_ms = 10;
+ return CUBEB_OK;
+}
+
+int cubeb_mock_get_preferred_sample_rate(cubeb* context, uint32_t* rate) {
+ *rate = 44100;
+ return CUBEB_OK;
+}
+
+int cubeb_mock_get_max_channel_count(cubeb* context, uint32_t* max_channels) {
+ *max_channels = MAX_OUTPUT_CHANNELS;
+ return CUBEB_OK;
+}
+
+void PrintDevice(cubeb_device_info aInfo);
+
+void PrintDevice(AudioDeviceInfo* aInfo);
+
+cubeb_device_info DeviceTemplate(cubeb_devid aId, cubeb_device_type aType,
+ const char* name);
+
+cubeb_device_info DeviceTemplate(cubeb_devid aId, cubeb_device_type aType);
+
+void AddDevices(MockCubeb* mock, uint32_t device_count,
+ cubeb_device_type deviceType);
+
+} // namespace mozilla
+
+#endif // MOCKCUBEB_H_
diff --git a/dom/media/gtest/MockMediaResource.cpp b/dom/media/gtest/MockMediaResource.cpp
new file mode 100644
index 0000000000..8811af7c0b
--- /dev/null
+++ b/dom/media/gtest/MockMediaResource.cpp
@@ -0,0 +1,91 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MockMediaResource.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+namespace mozilla {
+
+MockMediaResource::MockMediaResource(const char* aFileName)
+ : mFileHandle(nullptr), mFileName(aFileName) {}
+
+nsresult MockMediaResource::Open() {
+ mFileHandle = fopen(mFileName, "rb");
+ if (mFileHandle == nullptr) {
+ printf_stderr("Can't open %s\n", mFileName);
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+MockMediaResource::~MockMediaResource() {
+ if (mFileHandle != nullptr) {
+ fclose(mFileHandle);
+ }
+}
+
+nsresult MockMediaResource::ReadAt(int64_t aOffset, char* aBuffer,
+ uint32_t aCount, uint32_t* aBytes) {
+ if (mFileHandle == nullptr) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Make it fail if we're re-entrant
+ if (mEntry++) {
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ fseek(mFileHandle, aOffset, SEEK_SET);
+ *aBytes = fread(aBuffer, 1, aCount, mFileHandle);
+
+ mEntry--;
+
+ return ferror(mFileHandle) ? NS_ERROR_FAILURE : NS_OK;
+}
+
+int64_t MockMediaResource::GetLength() {
+ if (mFileHandle == nullptr) {
+ return -1;
+ }
+ fseek(mFileHandle, 0, SEEK_END);
+ return ftell(mFileHandle);
+}
+
+void MockMediaResource::MockClearBufferedRanges() { mRanges.Clear(); }
+
+void MockMediaResource::MockAddBufferedRange(int64_t aStart, int64_t aEnd) {
+ mRanges += MediaByteRange(aStart, aEnd);
+}
+
+int64_t MockMediaResource::GetNextCachedData(int64_t aOffset) {
+ if (!aOffset) {
+ return mRanges.Length() ? mRanges[0].mStart : -1;
+ }
+ for (size_t i = 0; i < mRanges.Length(); i++) {
+ if (aOffset == mRanges[i].mStart) {
+ ++i;
+ return i < mRanges.Length() ? mRanges[i].mStart : -1;
+ }
+ }
+ return -1;
+}
+
+int64_t MockMediaResource::GetCachedDataEnd(int64_t aOffset) {
+ for (size_t i = 0; i < mRanges.Length(); i++) {
+ if (aOffset == mRanges[i].mStart) {
+ return mRanges[i].mEnd;
+ }
+ }
+ return aOffset;
+}
+
+nsresult MockMediaResource::GetCachedRanges(MediaByteRangeSet& aRanges) {
+ aRanges = mRanges;
+ return NS_OK;
+}
+
+} // namespace mozilla
diff --git a/dom/media/gtest/MockMediaResource.h b/dom/media/gtest/MockMediaResource.h
new file mode 100644
index 0000000000..9ec2a884a0
--- /dev/null
+++ b/dom/media/gtest/MockMediaResource.h
@@ -0,0 +1,56 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOCK_MEDIA_RESOURCE_H_
+#define MOCK_MEDIA_RESOURCE_H_
+
+#include "MediaResource.h"
+#include "nsTArray.h"
+#include "mozilla/Atomics.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(MockMediaResource, MediaResource);
+
+class MockMediaResource : public MediaResource,
+ public DecoderDoctorLifeLogger<MockMediaResource> {
+ public:
+ explicit MockMediaResource(const char* aFileName);
+ nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
+ uint32_t* aBytes) override;
+ // Data stored in file, caching recommended.
+ bool ShouldCacheReads() override { return true; }
+ void Pin() override {}
+ void Unpin() override {}
+ int64_t GetLength() override;
+ int64_t GetNextCachedData(int64_t aOffset) override;
+ int64_t GetCachedDataEnd(int64_t aOffset) override;
+ bool IsDataCachedToEndOfResource(int64_t aOffset) override { return false; }
+ nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
+ uint32_t aCount) override {
+ uint32_t bytesRead = 0;
+ nsresult rv = ReadAt(aOffset, aBuffer, aCount, &bytesRead);
+ NS_ENSURE_SUCCESS(rv, rv);
+ return bytesRead == aCount ? NS_OK : NS_ERROR_FAILURE;
+ }
+
+ nsresult Open();
+ nsresult GetCachedRanges(MediaByteRangeSet& aRanges) override;
+
+ void MockClearBufferedRanges();
+ void MockAddBufferedRange(int64_t aStart, int64_t aEnd);
+
+ protected:
+ virtual ~MockMediaResource();
+
+ private:
+ FILE* mFileHandle;
+ const char* mFileName;
+ MediaByteRangeSet mRanges;
+ Atomic<int> mEntry;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/gtest/TestAudioBuffer.cpp b/dom/media/gtest/TestAudioBuffer.cpp
new file mode 100644
index 0000000000..63b96eac2b
--- /dev/null
+++ b/dom/media/gtest/TestAudioBuffer.cpp
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaData.h"
+#include "gtest/gtest.h"
+
+using mozilla::AlignedFloatBuffer;
+using mozilla::AudioDataValue;
+using mozilla::FloatToAudioSample;
+using mozilla::InflatableShortBuffer;
+
+void FillSine(InflatableShortBuffer& aBuf, AlignedFloatBuffer& aFloatBuf) {
+ // Write a constant-pitch sine wave in both the integer and float buffers.
+ float phase = 0;
+ float phaseIncrement = 2 * M_PI * 440. / 44100.f;
+ for (uint32_t i = 0; i < aBuf.Length(); i++) {
+ aBuf.get()[i] = FloatToAudioSample<int16_t>(sin(phase));
+ aFloatBuf.get()[i] = sin(phase);
+ phase += phaseIncrement;
+ if (phase >= 2 * M_PI) {
+ phase -= 2 * M_PI;
+ }
+ }
+}
+
+TEST(InflatableAudioBuffer, Test)
+{
+ for (uint32_t i = 1; i < 10000; i++) {
+ InflatableShortBuffer buf(i);
+ AlignedFloatBuffer bufFloat(i);
+ FillSine(buf, bufFloat);
+ AlignedFloatBuffer inflated = buf.Inflate();
+ for (uint32_t j = 0; j < buf.Length(); j++) {
+ // Accept a very small difference because floats are floored in the
+ // conversion to integer.
+ if (std::abs(bufFloat.get()[j] - inflated.get()[j]) * 32767. > 1.0) {
+ fprintf(stderr, "%f != %f (size: %u, index: %u)\n", bufFloat.get()[j],
+ inflated.get()[j], i, j);
+ ASSERT_TRUE(false);
+ }
+ }
+ }
+
+} // namespace audio_mixer
diff --git a/dom/media/gtest/TestAudioBuffers.cpp b/dom/media/gtest/TestAudioBuffers.cpp
new file mode 100644
index 0000000000..b9f85a8194
--- /dev/null
+++ b/dom/media/gtest/TestAudioBuffers.cpp
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdint.h>
+#include "AudioBufferUtils.h"
+#include "gtest/gtest.h"
+#include <vector>
+
+const uint32_t FRAMES = 256;
+
+void test_for_number_of_channels(const uint32_t channels) {
+ const uint32_t samples = channels * FRAMES;
+
+ mozilla::AudioCallbackBufferWrapper<float> mBuffer(channels);
+ mozilla::SpillBuffer<float, 128> b(channels);
+ std::vector<float> fromCallback(samples, 0.0);
+ std::vector<float> other(samples, 1.0);
+ mozilla::AudioChunk chunk;
+ chunk.mBufferFormat = mozilla::AUDIO_FORMAT_FLOAT32;
+ chunk.mChannelData.SetLength(channels);
+ for (uint32_t i = 0; i < channels; ++i) {
+ chunk.mChannelData[i] = other.data() + i * channels;
+ }
+
+ // Set the buffer in the wrapper from the callback
+ mBuffer.SetBuffer(fromCallback.data(), FRAMES);
+
+ // Fill the SpillBuffer with data.
+ chunk.mDuration = 15;
+ ASSERT_TRUE(b.Fill(chunk) == 15);
+ chunk.mDuration = 17;
+ ASSERT_TRUE(b.Fill(chunk) == 17);
+ for (uint32_t i = 0; i < 32 * channels; i++) {
+ other[i] = 0.0;
+ }
+
+ // Empty it in the AudioCallbackBufferWrapper
+ ASSERT_TRUE(b.Empty(mBuffer) == 32);
+
+ // Check available return something reasonnable
+ ASSERT_TRUE(mBuffer.Available() == FRAMES - 32);
+
+ // Fill the buffer with the rest of the data
+ mBuffer.WriteFrames(other.data() + 32 * channels, FRAMES - 32);
+
+ // Check the buffer is now full
+ ASSERT_TRUE(mBuffer.Available() == 0);
+
+ for (uint32_t i = 0; i < samples; i++) {
+ ASSERT_TRUE(fromCallback[i] == 1.0)
+ << "Difference at " << i << " (" << fromCallback[i] << " != " << 1.0
+ << ")\n";
+ }
+
+ chunk.mDuration = FRAMES;
+ ASSERT_TRUE(b.Fill(chunk) == 128);
+ ASSERT_TRUE(b.Fill(chunk) == 0);
+ ASSERT_TRUE(b.Empty(mBuffer) == 0);
+}
+
+TEST(AudioBuffers, Test)
+{
+ for (uint32_t ch = 1; ch <= 8; ++ch) {
+ test_for_number_of_channels(ch);
+ }
+}
diff --git a/dom/media/gtest/TestAudioCallbackDriver.cpp b/dom/media/gtest/TestAudioCallbackDriver.cpp
new file mode 100644
index 0000000000..050395fa44
--- /dev/null
+++ b/dom/media/gtest/TestAudioCallbackDriver.cpp
@@ -0,0 +1,477 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <tuple>
+
+#include "CubebUtils.h"
+#include "GraphDriver.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+
+#include "MediaTrackGraphImpl.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/UniquePtr.h"
+#include "nsTArray.h"
+
+#include "MockCubeb.h"
+
+using namespace mozilla;
+using IterationResult = GraphInterface::IterationResult;
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::NiceMock;
+
+class MockGraphInterface : public GraphInterface {
+ NS_DECL_THREADSAFE_ISUPPORTS
+ explicit MockGraphInterface(TrackRate aSampleRate)
+ : mSampleRate(aSampleRate) {}
+ MOCK_METHOD0(NotifyInputStopped, void());
+ MOCK_METHOD5(NotifyInputData, void(const AudioDataValue*, size_t, TrackRate,
+ uint32_t, uint32_t));
+ MOCK_METHOD0(DeviceChanged, void());
+#ifdef DEBUG
+ MOCK_CONST_METHOD1(InDriverIteration, bool(const GraphDriver*));
+#endif
+ /* OneIteration cannot be mocked because IterationResult is non-memmovable and
+ * cannot be passed as a parameter, which GMock does internally. */
+ IterationResult OneIteration(GraphTime aStateComputedTime, GraphTime,
+ MixerCallbackReceiver* aMixerReceiver) {
+ GraphDriver* driver = mCurrentDriver;
+ if (aMixerReceiver) {
+ mMixer.StartMixing();
+ mMixer.Mix(nullptr, driver->AsAudioCallbackDriver()->OutputChannelCount(),
+ aStateComputedTime - mStateComputedTime, mSampleRate);
+ aMixerReceiver->MixerCallback(mMixer.MixedChunk(), mSampleRate);
+ }
+ if (aStateComputedTime != mStateComputedTime) {
+ mFramesIteratedEvent.Notify(aStateComputedTime - mStateComputedTime);
+ ++mIterationCount;
+ }
+ mStateComputedTime = aStateComputedTime;
+ if (!mKeepProcessing) {
+ return IterationResult::CreateStop(
+ NS_NewRunnableFunction(__func__, [] {}));
+ }
+ if (auto guard = mNextDriver.Lock(); guard->isSome()) {
+ auto tup = guard->extract();
+ const auto& [driver, switchedRunnable] = tup;
+ return IterationResult::CreateSwitchDriver(driver, switchedRunnable);
+ }
+ if (mEnsureNextIteration) {
+ driver->EnsureNextIteration();
+ }
+ return IterationResult::CreateStillProcessing();
+ }
+ void SetEnsureNextIteration(bool aEnsure) { mEnsureNextIteration = aEnsure; }
+
+ size_t IterationCount() const { return mIterationCount; }
+
+ GraphTime StateComputedTime() const { return mStateComputedTime; }
+ void SetCurrentDriver(GraphDriver* aDriver) { mCurrentDriver = aDriver; }
+
+ void StopIterating() { mKeepProcessing = false; }
+
+ void SwitchTo(RefPtr<GraphDriver> aDriver,
+ RefPtr<Runnable> aSwitchedRunnable = NS_NewRunnableFunction(
+ "DefaultNoopSwitchedRunnable", [] {})) {
+ auto guard = mNextDriver.Lock();
+ MOZ_ASSERT(guard->isNothing());
+ *guard =
+ Some(std::make_tuple(std::move(aDriver), std::move(aSwitchedRunnable)));
+ }
+ const TrackRate mSampleRate;
+
+ MediaEventSource<uint32_t>& FramesIteratedEvent() {
+ return mFramesIteratedEvent;
+ }
+
+ protected:
+ Atomic<size_t> mIterationCount{0};
+ Atomic<GraphTime> mStateComputedTime{0};
+ Atomic<GraphDriver*> mCurrentDriver{nullptr};
+ Atomic<bool> mEnsureNextIteration{false};
+ Atomic<bool> mKeepProcessing{true};
+ DataMutex<Maybe<std::tuple<RefPtr<GraphDriver>, RefPtr<Runnable>>>>
+ mNextDriver{"MockGraphInterface::mNextDriver"};
+ RefPtr<Runnable> mNextDriverSwitchedRunnable;
+ MediaEventProducer<uint32_t> mFramesIteratedEvent;
+ AudioMixer mMixer;
+ virtual ~MockGraphInterface() = default;
+};
+
+NS_IMPL_ISUPPORTS0(MockGraphInterface)
+
+TEST(TestAudioCallbackDriver, StartStop)
+MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
+ const TrackRate rate = 44100;
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ RefPtr<AudioCallbackDriver> driver;
+ auto graph = MakeRefPtr<NiceMock<MockGraphInterface>>(rate);
+ EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
+
+ driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, rate, 2, 0, nullptr,
+ nullptr, AudioInputType::Unknown);
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+
+ graph->SetCurrentDriver(driver);
+ driver->Start();
+ // Allow some time to "play" audio.
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+ EXPECT_TRUE(driver->ThreadRunning()) << "Verify thread is running";
+ EXPECT_TRUE(driver->IsStarted()) << "Verify thread is started";
+
+ // This will block untill all events have been executed.
+ MOZ_KnownLive(driver)->Shutdown();
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+}
+
+void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
+ std::cerr << "TestSlowStart with rate " << aRate << std::endl;
+
+ MockCubeb* cubeb = new MockCubeb();
+ cubeb->SetStreamStartFreezeEnabled(true);
+ auto unforcer = WaitFor(cubeb->ForceAudioThread()).unwrap();
+ Unused << unforcer;
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ RefPtr<AudioCallbackDriver> driver;
+ auto graph = MakeRefPtr<NiceMock<MockGraphInterface>>(aRate);
+ EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
+
+ nsIThread* mainThread = NS_GetCurrentThread();
+ Maybe<int64_t> audioStart;
+ Maybe<uint32_t> alreadyBuffered;
+ int64_t inputFrameCount = 0;
+ int64_t processedFrameCount = -1;
+ ON_CALL(*graph, NotifyInputData)
+ .WillByDefault([&](const AudioDataValue*, size_t aFrames, TrackRate,
+ uint32_t, uint32_t aAlreadyBuffered) {
+ if (!audioStart) {
+ audioStart = Some(graph->StateComputedTime());
+ alreadyBuffered = Some(aAlreadyBuffered);
+ mainThread->Dispatch(NS_NewRunnableFunction(__func__, [&] {
+ // Start processedFrameCount now, ignoring frames processed while
+ // waiting for the fallback driver to stop.
+ processedFrameCount = 0;
+ }));
+ }
+ EXPECT_NEAR(inputFrameCount,
+ static_cast<int64_t>(graph->StateComputedTime() -
+ *audioStart + *alreadyBuffered),
+ WEBAUDIO_BLOCK_SIZE)
+ << "Input should be behind state time, due to the delayed start. "
+ "stateComputedTime="
+ << graph->StateComputedTime() << ", audioStartTime=" << *audioStart
+ << ", alreadyBuffered=" << *alreadyBuffered;
+ inputFrameCount += aFrames;
+ });
+
+ driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, aRate, 2, 2, nullptr,
+ (void*)1, AudioInputType::Voice);
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+
+ graph->SetCurrentDriver(driver);
+ graph->SetEnsureNextIteration(true);
+
+ driver->Start();
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ cubeb->SetStreamStartFreezeEnabled(false);
+
+ const size_t fallbackIterations = 3;
+ WaitUntil(graph->FramesIteratedEvent(), [&](uint32_t aFrames) {
+ const GraphTime tenMillis = aRate / 100;
+ // An iteration is always rounded upwards to the next full block.
+ const GraphTime tenMillisIteration =
+ MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(tenMillis);
+ // The iteration may be smaller because up to an extra block may have been
+ // processed and buffered.
+ const GraphTime tenMillisMinIteration =
+ tenMillisIteration - WEBAUDIO_BLOCK_SIZE;
+ // An iteration must be at least one audio block.
+ const GraphTime minIteration =
+ std::max<GraphTime>(WEBAUDIO_BLOCK_SIZE, tenMillisMinIteration);
+ EXPECT_GE(aFrames, minIteration)
+ << "Fallback driver iteration >= 10ms, modulo an audio block";
+ EXPECT_LT(aFrames, static_cast<size_t>(aRate))
+ << "Fallback driver iteration <1s (sanity)";
+ return graph->IterationCount() >= fallbackIterations;
+ });
+
+ MediaEventListener processedListener =
+ stream->FramesProcessedEvent().Connect(mainThread, [&](uint32_t aFrames) {
+ if (processedFrameCount >= 0) {
+ processedFrameCount += aFrames;
+ }
+ });
+ stream->Thaw();
+
+ SpinEventLoopUntil(
+ "processed at least 100ms of audio data from stream callback"_ns,
+ [&] { return processedFrameCount >= aRate / 10; });
+
+ // This will block until all events have been queued.
+ MOZ_KnownLive(driver)->Shutdown();
+ // Process processListener events.
+ NS_ProcessPendingEvents(mainThread);
+ processedListener.Disconnect();
+
+ EXPECT_EQ(inputFrameCount, processedFrameCount);
+ EXPECT_NEAR(graph->StateComputedTime() - *audioStart,
+ inputFrameCount + *alreadyBuffered, WEBAUDIO_BLOCK_SIZE)
+ << "Graph progresses while audio driver runs. stateComputedTime="
+ << graph->StateComputedTime() << ", inputFrameCount=" << inputFrameCount;
+}
+
+TEST(TestAudioCallbackDriver, SlowStart)
+MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
+ TestSlowStart(1000); // 10ms = 10 <<< 128 samples
+ TestSlowStart(8000); // 10ms = 80 < 128 samples
+ TestSlowStart(44100); // 10ms = 441 > 128 samples
+}
+
+#ifdef DEBUG
+template <typename T>
+class MOZ_STACK_CLASS AutoSetter {
+ std::atomic<T>& mVal;
+ T mNew;
+ T mOld;
+
+ public:
+ explicit AutoSetter(std::atomic<T>& aVal, T aNew)
+ : mVal(aVal), mNew(aNew), mOld(mVal.exchange(aNew)) {}
+ ~AutoSetter() {
+ DebugOnly<T> oldNew = mVal.exchange(mOld);
+ MOZ_ASSERT(oldNew == mNew);
+ }
+};
+#endif
+
+TEST(TestAudioCallbackDriver, SlowDeviceChange)
+MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ constexpr TrackRate rate = 48000;
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ auto graph = MakeRefPtr<MockGraphInterface>(rate);
+ auto driver = MakeRefPtr<AudioCallbackDriver>(
+ graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice);
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+
+#ifdef DEBUG
+ std::atomic<std::thread::id> threadInDriverIteration((std::thread::id()));
+ EXPECT_CALL(*graph, InDriverIteration(driver.get())).WillRepeatedly([&] {
+ return std::this_thread::get_id() == threadInDriverIteration;
+ });
+#endif
+ constexpr size_t ignoredFrameCount = 1337;
+ EXPECT_CALL(*graph, NotifyInputData(_, 0, rate, 1, _)).Times(AnyNumber());
+ EXPECT_CALL(*graph, NotifyInputData(_, ignoredFrameCount, _, _, _)).Times(0);
+ EXPECT_CALL(*graph, DeviceChanged);
+
+ graph->SetCurrentDriver(driver);
+ graph->SetEnsureNextIteration(true);
+ // This starts the fallback driver.
+ driver->Start();
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+
+ // Wait for the audio driver to have started the stream before running data
+ // callbacks. driver->Start() does a dispatch to the cubeb operation thread
+ // and starts the stream there.
+ nsCOMPtr<nsIEventTarget> cubebOpThread = CUBEB_TASK_THREAD;
+ MOZ_ALWAYS_SUCCEEDS(SyncRunnable::DispatchToThread(
+ cubebOpThread, NS_NewRunnableFunction(__func__, [] {})));
+
+ // This makes the fallback driver stop on its next callback.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ {
+#ifdef DEBUG
+ AutoSetter as(threadInDriverIteration, std::this_thread::get_id());
+#endif
+ while (driver->OnFallback()) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ }
+
+ const TimeStamp wallClockStart = TimeStamp::Now();
+ const GraphTime graphClockStart = graph->StateComputedTime();
+ const size_t iterationCountStart = graph->IterationCount();
+
+ // Flag that the stream should force a devicechange event.
+ stream->NotifyDeviceChangedNow();
+
+ // The audio driver should now have switched on the fallback driver again.
+ {
+#ifdef DEBUG
+ AutoSetter as(threadInDriverIteration, std::this_thread::get_id());
+#endif
+ EXPECT_TRUE(driver->OnFallback());
+ }
+
+ // Make sure that the audio driver can handle (and ignore) data callbacks for
+ // a little while after the devicechange callback. Cubeb does not provide
+ // ordering guarantees here.
+ auto start = TimeStamp::Now();
+ while (start + TimeDuration::FromMilliseconds(5) > TimeStamp::Now()) {
+ EXPECT_EQ(stream->ManualDataCallback(ignoredFrameCount),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Let the fallback driver start and spin for one second.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // Tell the fallback driver to hand over to the audio driver which has
+ // finished changing devices.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+
+ // Wait for the fallback to stop.
+ {
+#ifdef DEBUG
+ AutoSetter as(threadInDriverIteration, std::this_thread::get_id());
+#endif
+ while (driver->OnFallback()) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ }
+
+ TimeStamp wallClockEnd = TimeStamp::Now();
+ GraphTime graphClockEnd = graph->StateComputedTime();
+ size_t iterationCountEnd = graph->IterationCount();
+
+ auto wallClockDuration =
+ media::TimeUnit::FromTimeDuration(wallClockEnd - wallClockStart);
+ auto graphClockDuration =
+ media::TimeUnit(CheckedInt64(graphClockEnd) - graphClockStart, rate);
+
+ // Check that the time while we switched devices was accounted for by the
+ // fallback driver.
+ EXPECT_NEAR(
+ wallClockDuration.ToSeconds(), graphClockDuration.ToSeconds(),
+#ifdef XP_MACOSX
+ // SystemClockDriver on macOS in CI is underrunning, i.e. the driver
+ // thread when waiting for the next iteration waits too long. Therefore
+ // the graph clock is unable to keep up with wall clock.
+ wallClockDuration.ToSeconds() * 0.8
+#else
+ 0.1
+#endif
+ );
+ // Check that each fallback driver was of reasonable cadence. It's a thread
+ // that tries to run a task every 10ms. Check that the average callback
+ // interval i falls in 8ms ≤ i ≤ 40ms.
+ auto fallbackCadence =
+ graphClockDuration /
+ static_cast<int64_t>(iterationCountEnd - iterationCountStart);
+ EXPECT_LE(8, fallbackCadence.ToMilliseconds());
+ EXPECT_LE(fallbackCadence.ToMilliseconds(), 40.0);
+
+ // This will block until all events have been queued.
+ MOZ_KnownLive(driver)->Shutdown();
+ // Drain the event queue.
+ NS_ProcessPendingEvents(nullptr);
+}
+
+TEST(TestAudioCallbackDriver, DeviceChangeAfterStop)
+MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ constexpr TrackRate rate = 48000;
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ auto graph = MakeRefPtr<MockGraphInterface>(rate);
+ auto driver = MakeRefPtr<AudioCallbackDriver>(
+ graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice);
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+
+ auto newDriver = MakeRefPtr<AudioCallbackDriver>(
+ graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice);
+ EXPECT_FALSE(newDriver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(newDriver->IsStarted()) << "Verify thread is not started";
+
+#ifdef DEBUG
+ std::atomic<std::thread::id> threadInDriverIteration(
+ (std::this_thread::get_id()));
+ EXPECT_CALL(*graph, InDriverIteration(_)).WillRepeatedly([&] {
+ return std::this_thread::get_id() == threadInDriverIteration;
+ });
+#endif
+ EXPECT_CALL(*graph, NotifyInputData(_, 0, rate, 1, _)).Times(AnyNumber());
+ EXPECT_CALL(*graph, DeviceChanged);
+
+ graph->SetCurrentDriver(driver);
+ graph->SetEnsureNextIteration(true);
+ // This starts the fallback driver.
+ driver->Start();
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+
+ // Wait for the audio driver to have started or the DeviceChanged event will
+ // be ignored. driver->Start() does a dispatch to the cubeb operation thread
+ // and starts the stream there.
+ nsCOMPtr<nsIEventTarget> cubebOpThread = CUBEB_TASK_THREAD;
+ MOZ_ALWAYS_SUCCEEDS(SyncRunnable::DispatchToThread(
+ cubebOpThread, NS_NewRunnableFunction(__func__, [] {})));
+
+#ifdef DEBUG
+ AutoSetter as(threadInDriverIteration, std::this_thread::get_id());
+#endif
+
+ // This marks the audio driver as running.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+
+ // If a fallback driver callback happens between the audio callback above, and
+ // the SwitchTo below, the audio driver will perform the switch instead of the
+ // fallback since the fallback will have stopped. This test may therefore
+ // intermittently take different code paths.
+
+ // Stop the fallback driver by switching audio driver in the graph.
+ {
+ Monitor mon(__func__);
+ MonitorAutoLock lock(mon);
+ bool switched = false;
+ graph->SwitchTo(newDriver, NS_NewRunnableFunction(__func__, [&] {
+ MonitorAutoLock lock(mon);
+ switched = true;
+ lock.Notify();
+ }));
+ while (!switched) {
+ lock.Wait();
+ }
+ }
+
+ {
+#ifdef DEBUG
+ AutoSetter as(threadInDriverIteration, std::thread::id());
+#endif
+ // After stopping the fallback driver, but before newDriver has stopped the
+ // old audio driver, fire a DeviceChanged event to ensure it is handled
+ // properly.
+ AudioCallbackDriver::DeviceChangedCallback_s(driver);
+ }
+
+ graph->StopIterating();
+ newDriver->EnsureNextIteration();
+ while (newDriver->OnFallback()) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // This will block until all events have been queued.
+ MOZ_KnownLive(driver)->Shutdown();
+ MOZ_KnownLive(newDriver)->Shutdown();
+ // Drain the event queue.
+ NS_ProcessPendingEvents(nullptr);
+}
diff --git a/dom/media/gtest/TestAudioCompactor.cpp b/dom/media/gtest/TestAudioCompactor.cpp
new file mode 100644
index 0000000000..8c37a98ddf
--- /dev/null
+++ b/dom/media/gtest/TestAudioCompactor.cpp
@@ -0,0 +1,131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "gtest/gtest.h"
+#include "AudioCompactor.h"
+#include "nsDeque.h"
+#include "nsIMemoryReporter.h"
+
+using mozilla::AudioCompactor;
+using mozilla::AudioData;
+using mozilla::AudioDataValue;
+using mozilla::MediaQueue;
+
+class MemoryFunctor : public nsDequeFunctor<AudioData> {
+ public:
+ MemoryFunctor() : mSize(0) {}
+ MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
+
+ void operator()(AudioData* aObject) override {
+ mSize += aObject->SizeOfIncludingThis(MallocSizeOf);
+ }
+
+ size_t mSize;
+};
+
+class TestCopy {
+ public:
+ TestCopy(uint32_t aFrames, uint32_t aChannels, uint32_t& aCallCount,
+ uint32_t& aFrameCount)
+ : mFrames(aFrames),
+ mChannels(aChannels),
+ mCallCount(aCallCount),
+ mFrameCount(aFrameCount) {}
+
+ uint32_t operator()(AudioDataValue* aBuffer, uint32_t aSamples) {
+ mCallCount += 1;
+ uint32_t frames = std::min(mFrames - mFrameCount, aSamples / mChannels);
+ mFrameCount += frames;
+ return frames;
+ }
+
+ private:
+ const uint32_t mFrames;
+ const uint32_t mChannels;
+ uint32_t& mCallCount;
+ uint32_t& mFrameCount;
+};
+
+static void TestAudioCompactor(size_t aBytes) {
+ MediaQueue<AudioData> queue;
+ AudioCompactor compactor(queue);
+
+ uint64_t offset = 0;
+ uint64_t time = 0;
+ uint32_t sampleRate = 44000;
+ uint32_t channels = 2;
+ uint32_t frames = aBytes / (channels * sizeof(AudioDataValue));
+ size_t maxSlop = aBytes / AudioCompactor::MAX_SLOP_DIVISOR;
+
+ uint32_t callCount = 0;
+ uint32_t frameCount = 0;
+
+ compactor.Push(offset, time, sampleRate, frames, channels,
+ TestCopy(frames, channels, callCount, frameCount));
+
+ EXPECT_GT(callCount, 0U) << "copy functor never called";
+ EXPECT_EQ(frames, frameCount) << "incorrect number of frames copied";
+
+ MemoryFunctor memoryFunc;
+ queue.LockedForEach(memoryFunc);
+ size_t allocSize = memoryFunc.mSize - (callCount * sizeof(AudioData));
+ size_t slop = allocSize - aBytes;
+ EXPECT_LE(slop, maxSlop) << "allowed too much allocation slop";
+}
+
+TEST(Media, AudioCompactor_4000)
+{ TestAudioCompactor(4000); }
+
+TEST(Media, AudioCompactor_4096)
+{ TestAudioCompactor(4096); }
+
+TEST(Media, AudioCompactor_5000)
+{ TestAudioCompactor(5000); }
+
+TEST(Media, AudioCompactor_5256)
+{ TestAudioCompactor(5256); }
+
+TEST(Media, AudioCompactor_NativeCopy)
+{
+ const uint32_t channels = 2;
+ const size_t srcBytes = 32;
+ const uint32_t srcSamples = srcBytes / sizeof(AudioDataValue);
+ const uint32_t srcFrames = srcSamples / channels;
+ uint8_t src[srcBytes];
+
+ for (uint32_t i = 0; i < srcBytes; ++i) {
+ src[i] = i;
+ }
+
+ AudioCompactor::NativeCopy copy(src, srcBytes, channels);
+
+ const uint32_t dstSamples = srcSamples * 2;
+ AudioDataValue dst[dstSamples];
+
+ const AudioDataValue notCopied = 0xffff;
+ for (uint32_t i = 0; i < dstSamples; ++i) {
+ dst[i] = notCopied;
+ }
+
+ const uint32_t copyCount = 8;
+ uint32_t copiedFrames = 0;
+ uint32_t nextSample = 0;
+ for (uint32_t i = 0; i < copyCount; ++i) {
+ uint32_t copySamples = dstSamples / copyCount;
+ copiedFrames += copy(dst + nextSample, copySamples);
+ nextSample += copySamples;
+ }
+
+ EXPECT_EQ(srcFrames, copiedFrames) << "copy exact number of source frames";
+
+ // Verify that the only the correct bytes were copied.
+ for (uint32_t i = 0; i < dstSamples; ++i) {
+ if (i < srcSamples) {
+ EXPECT_NE(notCopied, dst[i]) << "should have copied over these bytes";
+ } else {
+ EXPECT_EQ(notCopied, dst[i]) << "should not have copied over these bytes";
+ }
+ }
+}
diff --git a/dom/media/gtest/TestAudioDecoderInputTrack.cpp b/dom/media/gtest/TestAudioDecoderInputTrack.cpp
new file mode 100644
index 0000000000..7ab55ea9cb
--- /dev/null
+++ b/dom/media/gtest/TestAudioDecoderInputTrack.cpp
@@ -0,0 +1,454 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <utility>
+
+#include "AudioDecoderInputTrack.h"
+#include "gmock/gmock.h"
+#include "GraphDriver.h"
+#include "gtest/gtest.h"
+#include "MediaInfo.h"
+#include "MediaTrackGraphImpl.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "nsThreadUtils.h"
+#include "VideoUtils.h"
+
+using namespace mozilla;
+using namespace mozilla::media;
+using testing::AssertionResult;
+using testing::NiceMock;
+using testing::Return;
+using ControlMessageInterface = MediaTrack::ControlMessageInterface;
+
+constexpr uint32_t kNoFlags = 0;
+constexpr TrackRate kRate = 44100;
+constexpr uint32_t kChannels = 2;
+
+class MockTestGraph : public MediaTrackGraphImpl {
+ public:
+ explicit MockTestGraph(TrackRate aRate)
+ : MediaTrackGraphImpl(0, aRate, nullptr, NS_GetCurrentThread()) {
+ ON_CALL(*this, OnGraphThread).WillByDefault(Return(true));
+ }
+
+ void Init(uint32_t aChannels) {
+ MediaTrackGraphImpl::Init(OFFLINE_THREAD_DRIVER, DIRECT_DRIVER, aChannels);
+ // We have to call `Destroy()` manually in order to break the reference.
+ // The reason we don't assign a null driver is because we would add a track
+ // to the graph, then it would trigger graph's `EnsureNextIteration()` that
+ // requires a non-null driver.
+ SetCurrentDriver(new NiceMock<MockDriver>());
+ }
+
+ MOCK_CONST_METHOD0(OnGraphThread, bool());
+ MOCK_METHOD1(AppendMessage, void(UniquePtr<ControlMessageInterface>));
+
+ protected:
+ ~MockTestGraph() = default;
+
+ class MockDriver : public GraphDriver {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockDriver, override);
+
+ MockDriver() : GraphDriver(nullptr, nullptr, 0) {
+ ON_CALL(*this, OnThread).WillByDefault(Return(true));
+ ON_CALL(*this, ThreadRunning).WillByDefault(Return(true));
+ }
+
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(Shutdown, void());
+ MOCK_METHOD0(IterationDuration, uint32_t());
+ MOCK_METHOD0(EnsureNextIteration, void());
+ MOCK_CONST_METHOD0(OnThread, bool());
+ MOCK_CONST_METHOD0(ThreadRunning, bool());
+
+ protected:
+ ~MockDriver() = default;
+ };
+
+ bool mEnableFakeAppend = false;
+};
+
+AudioData* CreateAudioDataFromInfo(uint32_t aFrames, const AudioInfo& aInfo) {
+ AlignedAudioBuffer samples(aFrames * aInfo.mChannels);
+ return new AudioData(0, TimeUnit::Zero(), std::move(samples), aInfo.mChannels,
+ aInfo.mRate);
+}
+
+AudioDecoderInputTrack* CreateTrack(MediaTrackGraph* aGraph,
+ nsISerialEventTarget* aThread,
+ const AudioInfo& aInfo,
+ float aPlaybackRate = 1.0,
+ float aVolume = 1.0,
+ bool aPreservesPitch = true) {
+ return AudioDecoderInputTrack::Create(aGraph, aThread, aInfo, aPlaybackRate,
+ aVolume, aPreservesPitch);
+}
+
+class TestAudioDecoderInputTrack : public testing::Test {
+ protected:
+ void SetUp() override {
+ mGraph = MakeRefPtr<NiceMock<MockTestGraph>>(kRate);
+ mGraph->Init(kChannels);
+
+ mInfo.mRate = kRate;
+ mInfo.mChannels = kChannels;
+ mTrack = CreateTrack(mGraph, NS_GetCurrentThread(), mInfo);
+ EXPECT_FALSE(mTrack->Ended());
+ }
+
+ void TearDown() override {
+ // This simulates the normal usage where the `Close()` is always be called
+ // before the `Destroy()`.
+ mTrack->Close();
+ mTrack->Destroy();
+ // Remove the reference of the track from the mock graph, and then release
+ // the self-reference of mock graph.
+ mGraph->RemoveTrackGraphThread(mTrack);
+ mGraph->Destroy();
+ }
+
+ AudioData* CreateAudioData(uint32_t aFrames) {
+ return CreateAudioDataFromInfo(aFrames, mInfo);
+ }
+
+ AudioSegment* GetTrackSegment() { return mTrack->GetData<AudioSegment>(); }
+
+ AssertionResult ExpectSegmentNonSilence(const char* aStartExpr,
+ const char* aEndExpr,
+ TrackTime aStart, TrackTime aEnd) {
+ AudioSegment checkedRange;
+ checkedRange.AppendSlice(*mTrack->GetData(), aStart, aEnd);
+ if (!checkedRange.IsNull()) {
+ return testing::AssertionSuccess();
+ }
+ return testing::AssertionFailure()
+ << "segment [" << aStart << ":" << aEnd << "] should be non-silence";
+ }
+
+ AssertionResult ExpectSegmentSilence(const char* aStartExpr,
+ const char* aEndExpr, TrackTime aStart,
+ TrackTime aEnd) {
+ AudioSegment checkedRange;
+ checkedRange.AppendSlice(*mTrack->GetData(), aStart, aEnd);
+ if (checkedRange.IsNull()) {
+ return testing::AssertionSuccess();
+ }
+ return testing::AssertionFailure()
+ << "segment [" << aStart << ":" << aEnd << "] should be silence";
+ }
+
+ RefPtr<MockTestGraph> mGraph;
+ RefPtr<AudioDecoderInputTrack> mTrack;
+ AudioInfo mInfo;
+};
+
+TEST_F(TestAudioDecoderInputTrack, BasicAppendData) {
+ // Start from [0:10] and each time we move the time by 10ms.
+ // Expected: outputDuration=10, outputFrames=0, outputSilence=10
+ TrackTime start = 0;
+ TrackTime end = 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_EQ(mTrack->GetEnd(), end);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+
+ // Expected: outputDuration=20, outputFrames=5, outputSilence=15
+ RefPtr<AudioData> audio1 = CreateAudioData(5);
+ mTrack->AppendData(audio1, nullptr);
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_EQ(mTrack->GetEnd(), end);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, start + audio1->Frames());
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start + audio1->Frames(), end);
+
+ // Expected: outputDuration=30, outputFrames=15, outputSilence=15
+ RefPtr<AudioData> audio2 = CreateAudioData(10);
+ mTrack->AppendData(audio2, nullptr);
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+ EXPECT_EQ(mTrack->GetEnd(), end);
+
+ // Expected : sent all data, track should be ended in the next iteration and
+ // fill slience in this iteration.
+ mTrack->NotifyEndOfStream();
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, ProcessedMediaTrack::ALLOW_END);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+ EXPECT_EQ(mTrack->GetEnd(), end);
+ EXPECT_FALSE(mTrack->Ended());
+
+ // Expected : track ended
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, ProcessedMediaTrack::ALLOW_END);
+ EXPECT_EQ(mTrack->WrittenFrames(), audio1->Frames() + audio2->Frames());
+}
+
+TEST_F(TestAudioDecoderInputTrack, ClearFuture) {
+ // Start from [0:10] and each time we move the time by 10ms.
+ // Expected: appended=30, expected duration=10
+ RefPtr<AudioData> audio1 = CreateAudioData(30);
+ mTrack->AppendData(audio1, nullptr);
+ TrackTime start = 0;
+ TrackTime end = 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // In next iteration [10:20], we would consume the remaining data that was
+ // appended in the previous iteration.
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // Clear future data which is the remaining 10 frames so the track would
+ // only output silence.
+ mTrack->ClearFutureData();
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+
+ // Test appending data again, to see if we can append data correctly after
+ // calling `ClearFutureData()`.
+ RefPtr<AudioData> audio2 = CreateAudioData(10);
+ mTrack->AppendData(audio2, nullptr);
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // Run another iteration that should only contains silence because the data
+ // we appended only enough for one iteration.
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+
+ // Clear future data would also remove the EOS.
+ mTrack->NotifyEndOfStream();
+ mTrack->ClearFutureData();
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, ProcessedMediaTrack::ALLOW_END);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+ EXPECT_FALSE(mTrack->Ended());
+
+ // As EOS has been removed, in next iteration the track would still be
+ // running.
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, ProcessedMediaTrack::ALLOW_END);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+ EXPECT_FALSE(mTrack->Ended());
+ EXPECT_EQ(mTrack->WrittenFrames(),
+ (audio1->Frames() - 10 /* got clear */) + audio2->Frames());
+}
+
+TEST_F(TestAudioDecoderInputTrack, InputRateChange) {
+ // Start from [0:10] and each time we move the time by 10ms.
+ // Expected: appended=10, expected duration=10
+ RefPtr<AudioData> audio1 = CreateAudioData(10);
+ mTrack->AppendData(audio1, nullptr);
+ TrackTime start = 0;
+ TrackTime end = 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // Change input sample rate to the half, input data should be resampled and
+ // its duration would become longer.
+ // Expected: appended=10 + 5,
+ // expected duration=10 + 5*2 (resampled)
+ mInfo.mRate = kRate / 2;
+ RefPtr<AudioData> audioHalfSampleRate = CreateAudioData(5);
+ mTrack->AppendData(audioHalfSampleRate, nullptr);
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // Change input sample rate to the double, input data should be resampled and
+ // its duration would become shorter.
+ // Expected: appended=10 + 10 + 10,
+ // expected duration=10 + 10 + 10/2(resampled) + 5(silence)
+ mInfo.mRate = kRate * 2;
+ RefPtr<AudioData> audioDoubleSampleRate = CreateAudioData(10);
+ TrackTime expectedDuration = audioDoubleSampleRate->Frames() / 2;
+ mTrack->AppendData(audioDoubleSampleRate, nullptr);
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, start + expectedDuration);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start + expectedDuration, end);
+ EXPECT_EQ(mTrack->WrittenFrames(), audio1->Frames() +
+ audioHalfSampleRate->Frames() * 2 +
+ audioDoubleSampleRate->Frames() / 2);
+}
+
+TEST_F(TestAudioDecoderInputTrack, ChannelChange) {
+ // Start from [0:10] and each time we move the time by 10ms.
+ // Track was initialized in stero.
+ EXPECT_EQ(mTrack->NumberOfChannels(), uint32_t(2));
+
+ // But first audio data is mono, so the `NumberOfChannels()` changes to
+ // reflect the maximum channel in the audio segment.
+ mInfo.mChannels = 1;
+ RefPtr<AudioData> audioMono = CreateAudioData(10);
+ mTrack->AppendData(audioMono, nullptr);
+ TrackTime start = 0;
+ TrackTime end = 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+ EXPECT_EQ(mTrack->NumberOfChannels(), audioMono->mChannels);
+
+ // Then append audio data with 5 channels.
+ mInfo.mChannels = 5;
+ RefPtr<AudioData> audioWithFiveChannels = CreateAudioData(10);
+ mTrack->AppendData(audioWithFiveChannels, nullptr);
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+ EXPECT_EQ(mTrack->NumberOfChannels(), audioWithFiveChannels->mChannels);
+ EXPECT_EQ(mTrack->WrittenFrames(),
+ audioMono->Frames() + audioWithFiveChannels->Frames());
+}
+
+TEST_F(TestAudioDecoderInputTrack, VolumeChange) {
+ // In order to run the volume change directly without using a real graph.
+ // one for setting the track's volume, another for the track destruction.
+ EXPECT_CALL(*mGraph, AppendMessage)
+ .Times(2)
+ .WillOnce(
+ [](UniquePtr<ControlMessageInterface> aMessage) { aMessage->Run(); })
+ .WillOnce([](UniquePtr<ControlMessageInterface> aMessage) {});
+
+ // The default volume is 1.0.
+ float expectedVolume = 1.0;
+ RefPtr<AudioData> audio = CreateAudioData(20);
+ TrackTime start = 0;
+ TrackTime end = 10;
+ mTrack->AppendData(audio, nullptr);
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+ EXPECT_TRUE(GetTrackSegment()->GetLastChunk()->mVolume == expectedVolume);
+
+ // After setting volume on the track, the data in the output chunk should be
+ // changed as well.
+ expectedVolume = 0.1;
+ mTrack->SetVolume(expectedVolume);
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST_F(TestAudioDecoderInputTrack, VolumeChange)"_ns,
+ [&] { return mTrack->Volume() == expectedVolume; });
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+ EXPECT_TRUE(GetTrackSegment()->GetLastChunk()->mVolume == expectedVolume);
+}
+
+TEST_F(TestAudioDecoderInputTrack, BatchedData) {
+ uint32_t appendedFrames = 0;
+ RefPtr<AudioData> audio = CreateAudioData(10);
+ for (size_t idx = 0; idx < 50; idx++) {
+ mTrack->AppendData(audio, nullptr);
+ appendedFrames += audio->Frames();
+ }
+
+ // First we need to call `ProcessInput` at least once to drain the track's
+ // SPSC queue, otherwise we're not able to push the batched data later.
+ TrackTime start = 0;
+ TrackTime end = 10;
+ uint32_t expectedFrames = end - start;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // The batched data would be pushed to the graph thread in around 10ms after
+ // the track first time started to batch data, which we can't control here.
+ // Therefore, we need to wait until the batched data gets cleared.
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST_F(TestAudioDecoderInputTrack, BatchedData)"_ns,
+ [&] { return !mTrack->HasBatchedData(); });
+
+ // Check that we received all the remainging data previously appended.
+ start = end;
+ end = start + (appendedFrames - expectedFrames);
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, end);
+
+ // Check that we received no more data than previously appended.
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start, end);
+ EXPECT_EQ(mTrack->WrittenFrames(), appendedFrames);
+}
+
+TEST_F(TestAudioDecoderInputTrack, OutputAndEndEvent) {
+ // Append an audio and EOS, the output event should notify the amount of
+ // frames that is equal to the amount of audio we appended.
+ RefPtr<AudioData> audio = CreateAudioData(10);
+ MozPromiseHolder<GenericPromise> holder;
+ RefPtr<GenericPromise> p = holder.Ensure(__func__);
+ MediaEventListener outputListener =
+ mTrack->OnOutput().Connect(NS_GetCurrentThread(), [&](TrackTime aFrame) {
+ EXPECT_EQ(aFrame, audio->Frames());
+ holder.Resolve(true, __func__);
+ });
+ mTrack->AppendData(audio, nullptr);
+ mTrack->NotifyEndOfStream();
+ TrackTime start = 0;
+ TrackTime end = 10;
+ mTrack->ProcessInput(start, end, ProcessedMediaTrack::ALLOW_END);
+ Unused << WaitFor(p);
+
+ // Track should end in this iteration, so the end event should be notified.
+ p = holder.Ensure(__func__);
+ MediaEventListener endListener = mTrack->OnEnd().Connect(
+ NS_GetCurrentThread(), [&]() { holder.Resolve(true, __func__); });
+ start = end;
+ end += 10;
+ mTrack->ProcessInput(start, end, ProcessedMediaTrack::ALLOW_END);
+ Unused << WaitFor(p);
+ outputListener.Disconnect();
+ endListener.Disconnect();
+}
+
+TEST_F(TestAudioDecoderInputTrack, PlaybackRateChange) {
+ // In order to run the playback change directly without using a real graph.
+ // one for setting the track's playback, another for the track destruction.
+ EXPECT_CALL(*mGraph, AppendMessage)
+ .Times(2)
+ .WillOnce(
+ [](UniquePtr<ControlMessageInterface> aMessage) { aMessage->Run(); })
+ .WillOnce([](UniquePtr<ControlMessageInterface> aMessage) {});
+
+ // Changing the playback rate.
+ float expectedPlaybackRate = 2.0;
+ mTrack->SetPlaybackRate(expectedPlaybackRate);
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST_F(TestAudioDecoderInputTrack, PlaybackRateChange)"_ns,
+ [&] { return mTrack->PlaybackRate() == expectedPlaybackRate; });
+
+ // Time stretcher in the track would usually need certain amount of data
+ // before it outputs the time-stretched result. As we're in testing, we would
+ // only append data once, so signal an EOS after appending data, in order to
+ // ask the track to flush all samples from the time strecther.
+ RefPtr<AudioData> audio = CreateAudioData(100);
+ mTrack->AppendData(audio, nullptr);
+ mTrack->NotifyEndOfStream();
+
+ // Playback rate is 2x, so we should only get 1/2x sample frames, another 1/2
+ // should be silence.
+ TrackTime start = 0;
+ TrackTime end = audio->Frames();
+ mTrack->ProcessInput(start, end, kNoFlags);
+ EXPECT_PRED_FORMAT2(ExpectSegmentNonSilence, start, audio->Frames() / 2);
+ EXPECT_PRED_FORMAT2(ExpectSegmentSilence, start + audio->Frames() / 2, end);
+}
diff --git a/dom/media/gtest/TestAudioDeviceEnumerator.cpp b/dom/media/gtest/TestAudioDeviceEnumerator.cpp
new file mode 100644
index 0000000000..e3c10f961e
--- /dev/null
+++ b/dom/media/gtest/TestAudioDeviceEnumerator.cpp
@@ -0,0 +1,272 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#define ENABLE_SET_CUBEB_BACKEND 1
+#include "CubebDeviceEnumerator.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/media/MediaUtils.h"
+#include "nsTArray.h"
+
+#include "MockCubeb.h"
+
+using namespace mozilla;
+using AudioDeviceSet = CubebDeviceEnumerator::AudioDeviceSet;
+
+const bool DEBUG_PRINTS = false;
+
+enum DeviceOperation { ADD, REMOVE };
+
+void TestEnumeration(MockCubeb* aMock, uint32_t aExpectedDeviceCount,
+ DeviceOperation aOperation, cubeb_device_type aType) {
+ RefPtr<CubebDeviceEnumerator> enumerator =
+ CubebDeviceEnumerator::GetInstance();
+
+ RefPtr<const AudioDeviceSet> devices;
+
+ if (aType == CUBEB_DEVICE_TYPE_INPUT) {
+ devices = enumerator->EnumerateAudioInputDevices();
+ }
+
+ if (aType == CUBEB_DEVICE_TYPE_OUTPUT) {
+ devices = enumerator->EnumerateAudioOutputDevices();
+ }
+
+ EXPECT_EQ(devices->Length(), aExpectedDeviceCount)
+ << "Device count is correct when enumerating";
+
+ if (DEBUG_PRINTS) {
+ for (const auto& deviceInfo : *devices) {
+ printf("=== Before removal\n");
+ PrintDevice(deviceInfo);
+ }
+ }
+
+ if (aOperation == DeviceOperation::REMOVE) {
+ aMock->RemoveDevice(reinterpret_cast<cubeb_devid>(1));
+ } else {
+ aMock->AddDevice(DeviceTemplate(reinterpret_cast<cubeb_devid>(123), aType));
+ }
+
+ if (aType == CUBEB_DEVICE_TYPE_INPUT) {
+ devices = enumerator->EnumerateAudioInputDevices();
+ }
+
+ if (aType == CUBEB_DEVICE_TYPE_OUTPUT) {
+ devices = enumerator->EnumerateAudioOutputDevices();
+ }
+
+ uint32_t newExpectedDeviceCount = aOperation == DeviceOperation::REMOVE
+ ? aExpectedDeviceCount - 1
+ : aExpectedDeviceCount + 1;
+
+ EXPECT_EQ(devices->Length(), newExpectedDeviceCount)
+ << "Device count is correct when enumerating after operation";
+
+ if (DEBUG_PRINTS) {
+ for (const auto& deviceInfo : *devices) {
+ printf("=== After removal\n");
+ PrintDevice(deviceInfo);
+ }
+ }
+}
+
+#ifndef ANDROID
+TEST(CubebDeviceEnumerator, EnumerateSimple)
+{
+ // It looks like we're leaking this object, but in fact it will be owned by
+ // CubebUtils: `cubeb_destroy()` is called when `ForceSetCubebContext()` is
+ // called again or when layout statics are shutdown, and we cast back to a
+ // MockCubeb* and call the dtor.
+ MockCubeb* mock = new MockCubeb();
+ mozilla::CubebUtils::ForceSetCubebContext(mock->AsCubebContext());
+
+ // We want to test whether CubebDeviceEnumerator works with and without a
+ // backend that can notify of a device collection change via callback.
+ // Additionally, we're testing that both adding and removing a device
+ // invalidates the list correctly.
+ bool supportsDeviceChangeCallback[2] = {true, false};
+ DeviceOperation operations[2] = {DeviceOperation::ADD,
+ DeviceOperation::REMOVE};
+
+ for (bool supports : supportsDeviceChangeCallback) {
+ // Shutdown for `supports` to take effect
+ CubebDeviceEnumerator::Shutdown();
+ mock->SetSupportDeviceChangeCallback(supports);
+ for (DeviceOperation op : operations) {
+ uint32_t device_count = 4;
+
+ cubeb_device_type deviceType = CUBEB_DEVICE_TYPE_INPUT;
+ AddDevices(mock, device_count, deviceType);
+ TestEnumeration(mock, device_count, op, deviceType);
+
+ deviceType = CUBEB_DEVICE_TYPE_OUTPUT;
+ AddDevices(mock, device_count, deviceType);
+ TestEnumeration(mock, device_count, op, deviceType);
+ }
+ }
+ // Shutdown to clean up the last `supports` effect
+ CubebDeviceEnumerator::Shutdown();
+}
+
+TEST(CubebDeviceEnumerator, ZeroChannelDevices)
+{
+ MockCubeb* mock = new MockCubeb();
+ mozilla::CubebUtils::ForceSetCubebContext(mock->AsCubebContext());
+
+ // Create devices with different channel count, including 0-channel
+
+ cubeb_device_info dev1 = DeviceTemplate(reinterpret_cast<cubeb_devid>(1),
+ CUBEB_DEVICE_TYPE_INPUT, "dev 1");
+ dev1.max_channels = 1;
+ mock->AddDevice(dev1);
+
+ cubeb_device_info dev2 = DeviceTemplate(reinterpret_cast<cubeb_devid>(2),
+ CUBEB_DEVICE_TYPE_INPUT, "dev 2");
+ dev2.max_channels = 0;
+ mock->AddDevice(dev2);
+
+ cubeb_device_info dev3 = DeviceTemplate(reinterpret_cast<cubeb_devid>(3),
+ CUBEB_DEVICE_TYPE_OUTPUT, "dev 3");
+ dev3.max_channels = 2;
+ mock->AddDevice(dev3);
+
+ cubeb_device_info dev4 = DeviceTemplate(reinterpret_cast<cubeb_devid>(4),
+ CUBEB_DEVICE_TYPE_OUTPUT, "dev 4");
+ dev4.max_channels = 0;
+ mock->AddDevice(dev4);
+
+ // Make sure the devices are added to cubeb.
+
+ cubeb_device_collection inputCollection = {nullptr, 0};
+ mock->EnumerateDevices(CUBEB_DEVICE_TYPE_INPUT, &inputCollection);
+ EXPECT_EQ(inputCollection.count, 2U);
+ EXPECT_EQ(inputCollection.device[0].devid, dev1.devid);
+ EXPECT_EQ(inputCollection.device[1].devid, dev2.devid);
+ mock->DestroyDeviceCollection(&inputCollection);
+ EXPECT_EQ(inputCollection.count, 0U);
+
+ cubeb_device_collection outputCollection = {nullptr, 0};
+ mock->EnumerateDevices(CUBEB_DEVICE_TYPE_OUTPUT, &outputCollection);
+ EXPECT_EQ(outputCollection.count, 2U);
+ EXPECT_EQ(outputCollection.device[0].devid, dev3.devid);
+ EXPECT_EQ(outputCollection.device[1].devid, dev4.devid);
+ mock->DestroyDeviceCollection(&outputCollection);
+ EXPECT_EQ(outputCollection.count, 0U);
+
+ // Enumerate the devices. The result should exclude the 0-channel devices.
+
+ RefPtr<CubebDeviceEnumerator> enumerator =
+ CubebDeviceEnumerator::GetInstance();
+
+ RefPtr<const AudioDeviceSet> inputDevices =
+ enumerator->EnumerateAudioInputDevices();
+ EXPECT_EQ(inputDevices->Length(), 1U);
+ EXPECT_EQ(inputDevices->ElementAt(0)->DeviceID(), dev1.devid);
+ EXPECT_EQ(inputDevices->ElementAt(0)->MaxChannels(), dev1.max_channels);
+
+ RefPtr<const AudioDeviceSet> outputDevices =
+ enumerator->EnumerateAudioOutputDevices();
+ EXPECT_EQ(outputDevices->Length(), 1U);
+ EXPECT_EQ(outputDevices->ElementAt(0)->DeviceID(), dev3.devid);
+ EXPECT_EQ(outputDevices->ElementAt(0)->MaxChannels(), dev3.max_channels);
+}
+
+#else // building for Android, which has no device enumeration support
+TEST(CubebDeviceEnumerator, EnumerateAndroid)
+{
+ MockCubeb* mock = new MockCubeb();
+ mozilla::CubebUtils::ForceSetCubebContext(mock->AsCubebContext());
+
+ RefPtr<CubebDeviceEnumerator> enumerator =
+ CubebDeviceEnumerator::GetInstance();
+
+ RefPtr<const AudioDeviceSet> inputDevices =
+ enumerator->EnumerateAudioInputDevices();
+ EXPECT_EQ(inputDevices->Length(), 1u)
+ << "Android always exposes a single input device.";
+ EXPECT_EQ((*inputDevices)[0]->MaxChannels(), 1u) << "With a single channel.";
+ EXPECT_EQ((*inputDevices)[0]->DeviceID(), nullptr)
+ << "It's always the default input device.";
+ EXPECT_TRUE((*inputDevices)[0]->Preferred())
+ << "it's always the prefered input device.";
+
+ RefPtr<const AudioDeviceSet> outputDevices =
+ enumerator->EnumerateAudioOutputDevices();
+ EXPECT_EQ(outputDevices->Length(), 1u)
+ << "Android always exposes a single output device.";
+ EXPECT_EQ((*outputDevices)[0]->MaxChannels(), 2u) << "With stereo channels.";
+ EXPECT_EQ((*outputDevices)[0]->DeviceID(), nullptr)
+ << "It's always the default output device.";
+ EXPECT_TRUE((*outputDevices)[0]->Preferred())
+ << "it's always the prefered output device.";
+}
+#endif
+
+TEST(CubebDeviceEnumerator, ForceNullCubebContext)
+{
+ mozilla::CubebUtils::ForceSetCubebContext(nullptr);
+ RefPtr<CubebDeviceEnumerator> enumerator =
+ CubebDeviceEnumerator::GetInstance();
+
+ RefPtr inputDevices = enumerator->EnumerateAudioInputDevices();
+ EXPECT_EQ(inputDevices->Length(), 0u)
+ << "Enumeration must fail, input device list must be empty.";
+
+ RefPtr outputDevices = enumerator->EnumerateAudioOutputDevices();
+ EXPECT_EQ(outputDevices->Length(), 0u)
+ << "Enumeration must fail, output device list must be empty.";
+
+ // Shutdown to clean up the null context effect
+ CubebDeviceEnumerator::Shutdown();
+}
+
+TEST(CubebDeviceEnumerator, DeviceInfoFromName)
+{
+ MockCubeb* mock = new MockCubeb();
+ mozilla::CubebUtils::ForceSetCubebContext(mock->AsCubebContext());
+
+ cubeb_device_type deviceTypes[2] = {CUBEB_DEVICE_TYPE_INPUT,
+ CUBEB_DEVICE_TYPE_OUTPUT};
+
+ bool supportsDeviceChangeCallback[2] = {true, false};
+ for (bool supports : supportsDeviceChangeCallback) {
+ // Shutdown for `supports` to take effect
+ CubebDeviceEnumerator::Shutdown();
+ mock->SetSupportDeviceChangeCallback(supports);
+ for (cubeb_device_type& deviceType : deviceTypes) {
+ cubeb_devid id_1 = reinterpret_cast<cubeb_devid>(1);
+ mock->AddDevice(DeviceTemplate(id_1, deviceType, "device name 1"));
+ cubeb_devid id_2 = reinterpret_cast<cubeb_devid>(2);
+ nsCString device_name = "device name 2"_ns;
+ mock->AddDevice(DeviceTemplate(id_2, deviceType, device_name.get()));
+ cubeb_devid id_3 = reinterpret_cast<cubeb_devid>(3);
+ mock->AddDevice(DeviceTemplate(id_3, deviceType, "device name 3"));
+
+ RefPtr<CubebDeviceEnumerator> enumerator =
+ CubebDeviceEnumerator::GetInstance();
+
+ EnumeratorSide side = (deviceType == CUBEB_DEVICE_TYPE_INPUT)
+ ? EnumeratorSide::INPUT
+ : EnumeratorSide::OUTPUT;
+ RefPtr<AudioDeviceInfo> devInfo = enumerator->DeviceInfoFromName(
+ NS_ConvertUTF8toUTF16(device_name), side);
+ EXPECT_TRUE(devInfo) << "the device exist";
+ EXPECT_EQ(devInfo->Name(), NS_ConvertUTF8toUTF16(device_name))
+ << "verify the device";
+
+ mock->RemoveDevice(id_2);
+
+ devInfo = enumerator->DeviceInfoFromName(
+ NS_ConvertUTF8toUTF16(device_name), side);
+ EXPECT_FALSE(devInfo) << "the device does not exist any more";
+ }
+ }
+ // Shutdown for `supports` to take effect
+ CubebDeviceEnumerator::Shutdown();
+}
+#undef ENABLE_SET_CUBEB_BACKEND
diff --git a/dom/media/gtest/TestAudioInputProcessing.cpp b/dom/media/gtest/TestAudioInputProcessing.cpp
new file mode 100644
index 0000000000..82c1831e84
--- /dev/null
+++ b/dom/media/gtest/TestAudioInputProcessing.cpp
@@ -0,0 +1,395 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "AudioGenerator.h"
+#include "MediaEngineWebRTCAudio.h"
+#include "MediaTrackGraphImpl.h"
+#include "PrincipalHandle.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/NullPrincipal.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
+#include "nsContentUtils.h"
+#include "nsTArray.h"
+
+using namespace mozilla;
+using testing::NiceMock;
+using testing::Return;
+
+class MockGraph : public MediaTrackGraphImpl {
+ public:
+ explicit MockGraph(TrackRate aRate)
+ : MediaTrackGraphImpl(0, aRate, nullptr, AbstractThread::MainThread()) {
+ ON_CALL(*this, OnGraphThread).WillByDefault(Return(true));
+ }
+
+ void Init(uint32_t aChannels) {
+ MediaTrackGraphImpl::Init(OFFLINE_THREAD_DRIVER, DIRECT_DRIVER, aChannels);
+ // Remove this graph's driver since it holds a ref. If no AppendMessage
+ // takes place, the driver never starts. This will also make sure no-one
+ // tries to use it. We are still kept alive by the self-ref. Destroy() must
+ // be called to break that cycle.
+ SetCurrentDriver(nullptr);
+ }
+
+ MOCK_CONST_METHOD0(OnGraphThread, bool());
+
+ protected:
+ ~MockGraph() = default;
+};
+
+// AudioInputProcessing will put extra frames as pre-buffering data to avoid
+// glitchs in non pass-through mode. The main goal of the test is to check how
+// many frames left in the AudioInputProcessing's mSegment in various situations
+// after input data has been processed.
+TEST(TestAudioInputProcessing, Buffering)
+{
+ const TrackRate rate = 8000; // So packet size is 80
+ const uint32_t channels = 1;
+ auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
+ graph->Init(channels);
+
+ auto aip = MakeRefPtr<AudioInputProcessing>(channels);
+
+ const size_t frames = 72;
+
+ AudioGenerator<AudioDataValue> generator(channels, rate);
+ GraphTime processedTime;
+ GraphTime nextTime;
+ AudioSegment output;
+
+ // Toggle pass-through mode without starting
+ {
+ EXPECT_EQ(aip->PassThrough(graph), false);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+
+ aip->SetPassThrough(graph, true);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+
+ aip->SetPassThrough(graph, false);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+
+ aip->SetPassThrough(graph, true);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+ }
+
+ {
+ // Need (nextTime - processedTime) = 128 - 0 = 128 frames this round.
+ // aip has not started and set to processing mode yet, so output will be
+ // filled with silence data directly.
+ processedTime = 0;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+ }
+
+ // Set aip to processing/non-pass-through mode
+ aip->SetPassThrough(graph, false);
+ {
+ // Need (nextTime - processedTime) = 256 - 128 = 128 frames this round.
+ // aip has not started yet, so output will be filled with silence data
+ // directly.
+ processedTime = nextTime;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(2 * frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+ }
+
+ // aip has been started and set to processing mode so it will insert 80 frames
+ // into aip's internal buffer as pre-buffering.
+ aip->Start(graph);
+ {
+ // Need (nextTime - processedTime) = 256 - 256 = 0 frames this round.
+ // The Process() aip will take 0 frames from input, packetize and process
+ // these frames into 0 80-frame packet(0 frames left in packetizer), insert
+ // packets into aip's internal buffer, then move 0 frames the internal
+ // buffer to output, leaving 80 + 0 - 0 = 80 frames in aip's internal
+ // buffer.
+ processedTime = nextTime;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(3 * frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 80);
+ }
+
+ {
+ // Need (nextTime - processedTime) = 384 - 256 = 128 frames this round.
+ // The Process() aip will take 128 frames from input, packetize and process
+ // these frames into floor(128/80) = 1 80-frame packet (48 frames left in
+ // packetizer), insert packets into aip's internal buffer, then move 128
+ // frames the internal buffer to output, leaving 80 + 80 - 128 = 32 frames
+ // in aip's internal buffer.
+ processedTime = nextTime;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(4 * frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
+ }
+
+ {
+ // Need (nextTime - processedTime) = 384 - 384 = 0 frames this round.
+ processedTime = nextTime;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(5 * frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
+ }
+
+ {
+ // Need (nextTime - processedTime) = 512 - 384 = 128 frames this round.
+ // The Process() aip will take 128 frames from input, packetize and process
+ // these frames into floor(128+48/80) = 2 80-frame packet (16 frames left in
+ // packetizer), insert packets into aip's internal buffer, then move 128
+ // frames the internal buffer to output, leaving 32 + 2*80 - 128 = 64 frames
+ // in aip's internal buffer.
+ processedTime = nextTime;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(6 * frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 64);
+ }
+
+ aip->SetPassThrough(graph, true);
+ {
+ // Need (nextTime - processedTime) = 512 - 512 = 0 frames this round.
+ // No buffering in pass-through mode
+ processedTime = nextTime;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(7 * frames);
+
+ AudioSegment input;
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), processedTime);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
+ }
+
+ aip->Stop(graph);
+ graph->Destroy();
+}
+
+TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
+{
+ const TrackRate rate = 48000; // so # of output frames from packetizer is 480
+ const uint32_t channels = 2;
+ auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
+ graph->Init(channels);
+
+ auto aip = MakeRefPtr<AudioInputProcessing>(channels);
+ AudioGenerator<AudioDataValue> generator(channels, rate);
+
+ RefPtr<nsIPrincipal> dummy_principal =
+ NullPrincipal::CreateWithoutOriginAttributes();
+ const PrincipalHandle principal1 = MakePrincipalHandle(dummy_principal.get());
+ const PrincipalHandle principal2 =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+
+ // Total 4800 frames. It's easier to test with frames of multiples of 480.
+ nsTArray<std::pair<TrackTime, PrincipalHandle>> framesWithPrincipal = {
+ {100, principal1},
+ {200, PRINCIPAL_HANDLE_NONE},
+ {300, principal2},
+ {400, principal1},
+ {440, PRINCIPAL_HANDLE_NONE},
+ // 3 packet-size above.
+ {480, principal1},
+ {480, principal2},
+ {480, PRINCIPAL_HANDLE_NONE},
+ // 3 packet-size above.
+ {500, principal2},
+ {490, principal1},
+ {600, principal1},
+ {330, principal1}
+ // 4 packet-size above.
+ };
+
+ // Generate 4800 frames of data with different principals.
+ AudioSegment input;
+ {
+ for (const auto& [duration, principal] : framesWithPrincipal) {
+ AudioSegment data;
+ generator.Generate(data, duration);
+ for (AudioSegment::ChunkIterator it(data); !it.IsEnded(); it.Next()) {
+ it->mPrincipalHandle = principal;
+ }
+
+ input.AppendFrom(&data);
+ }
+ }
+
+ auto verifyPrincipals = [&](const AudioSegment& data) {
+ TrackTime start = 0;
+ for (const auto& [duration, principal] : framesWithPrincipal) {
+ const TrackTime end = start + duration;
+
+ AudioSegment slice;
+ slice.AppendSlice(data, start, end);
+ start = end;
+
+ for (AudioSegment::ChunkIterator it(slice); !it.IsEnded(); it.Next()) {
+ EXPECT_EQ(it->mPrincipalHandle, principal);
+ }
+ }
+ };
+
+ // Check the principals in audio-processing mode.
+ EXPECT_EQ(aip->PassThrough(graph), false);
+ aip->Start(graph);
+ {
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 480);
+ AudioSegment output;
+ {
+ // Trim the prebuffering silence.
+
+ AudioSegment data;
+ aip->Process(graph, 0, 4800, &input, &data);
+ EXPECT_EQ(input.GetDuration(), 4800);
+ EXPECT_EQ(data.GetDuration(), 4800);
+
+ AudioSegment dummy;
+ dummy.AppendNullData(480);
+ aip->Process(graph, 0, 480, &dummy, &data);
+ EXPECT_EQ(dummy.GetDuration(), 480);
+ EXPECT_EQ(data.GetDuration(), 480 + 4800);
+
+ // Ignore the pre-buffering data
+ output.AppendSlice(data, 480, 480 + 4800);
+ }
+
+ verifyPrincipals(output);
+ }
+
+ // Check the principals in pass-through mode.
+ aip->SetPassThrough(graph, true);
+ {
+ AudioSegment output;
+ aip->Process(graph, 0, 4800, &input, &output);
+ EXPECT_EQ(input.GetDuration(), 4800);
+ EXPECT_EQ(output.GetDuration(), 4800);
+
+ verifyPrincipals(output);
+ }
+
+ aip->Stop(graph);
+ graph->Destroy();
+}
+
+TEST(TestAudioInputProcessing, Downmixing)
+{
+ const TrackRate rate = 44100;
+ const uint32_t channels = 4;
+ auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
+ graph->Init(channels);
+
+ auto aip = MakeRefPtr<AudioInputProcessing>(channels);
+
+ const size_t frames = 44100;
+
+ AudioGenerator<AudioDataValue> generator(channels, rate);
+ GraphTime processedTime;
+ GraphTime nextTime;
+
+ aip->SetPassThrough(graph, false);
+ aip->Start(graph);
+
+ processedTime = 0;
+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
+
+ {
+ AudioSegment input;
+ AudioSegment output;
+ generator.Generate(input, nextTime - processedTime);
+
+ // Intentionally reduce the amplitude of the generated sine wave so there's
+ // no chance the max amplitude reaches 1.0, but not enough so that 4
+ // channels summed together won't clip.
+ input.ApplyVolume(0.9);
+
+ // Process is going to see that it has 4 channels of input, and is going to
+ // downmix to mono, scaling the input by 1/4 in the process.
+ // We can't compare the input and output signal because the sine is going to
+ // be mangledui
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime);
+ EXPECT_EQ(output.MaxChannelCount(), 1u);
+
+ // Verify that it doesn't clip: the input signal has likely been mangled by
+ // the various processing passes, but at least it shouldn't clip. We know we
+ // always have floating point audio here, regardless of the sample-type used
+ // by Gecko.
+ for (AudioSegment::ChunkIterator iterOutput(output); !iterOutput.IsEnded();
+ iterOutput.Next()) {
+ const float* const output = iterOutput->ChannelData<float>()[0];
+ for (uint32_t i = 0; i < iterOutput->GetDuration(); i++) {
+ // Very conservative here, it's likely that the AGC lowers the volume a
+ // lot.
+ EXPECT_LE(std::abs(output[i]), 0.95);
+ }
+ }
+ }
+
+ // Now, repeat the test, checking we get the unmodified 4 channels.
+ aip->SetPassThrough(graph, true);
+
+ AudioSegment input, output;
+ processedTime = nextTime;
+ nextTime += MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
+ generator.Generate(input, nextTime - processedTime);
+
+ aip->Process(graph, processedTime, nextTime, &input, &output);
+ EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
+ EXPECT_EQ(output.GetDuration(), nextTime - processedTime);
+ // This time, no downmix: 4 channels of input, 4 channels of output
+ EXPECT_EQ(output.MaxChannelCount(), 4u);
+
+ nsTArray<AudioDataValue> inputLinearized, outputLinearized;
+ input.WriteToInterleavedBuffer(inputLinearized, input.MaxChannelCount());
+ output.WriteToInterleavedBuffer(outputLinearized, output.MaxChannelCount());
+
+ // The data should be passed through, and exactly equal.
+ for (uint32_t i = 0; i < frames * channels; i++) {
+ EXPECT_EQ(inputLinearized[i], outputLinearized[i]);
+ }
+
+ aip->Stop(graph);
+ graph->Destroy();
+}
diff --git a/dom/media/gtest/TestAudioInputSource.cpp b/dom/media/gtest/TestAudioInputSource.cpp
new file mode 100644
index 0000000000..f3f18b26a9
--- /dev/null
+++ b/dom/media/gtest/TestAudioInputSource.cpp
@@ -0,0 +1,269 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "AudioInputSource.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "MockCubeb.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "nsContentUtils.h"
+
+using namespace mozilla;
+using testing::ContainerEq;
+
+namespace {
+#define DispatchFunction(f) \
+ NS_DispatchToCurrentThread(NS_NewRunnableFunction(__func__, f))
+} // namespace
+
+class MockEventListener : public AudioInputSource::EventListener {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockEventListener, override);
+ MOCK_METHOD1(AudioDeviceChanged, void(AudioInputSource::Id));
+ MOCK_METHOD2(AudioStateCallback,
+ void(AudioInputSource::Id,
+ AudioInputSource::EventListener::State));
+
+ private:
+ ~MockEventListener() = default;
+};
+
+TEST(TestAudioInputSource, StartAndStop)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate sourceRate = 44100;
+ const TrackRate targetRate = 48000;
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started))
+ .Times(2);
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(4);
+
+ RefPtr<AudioInputSource> ais = MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ sourceRate, targetRate);
+ ASSERT_TRUE(ais);
+
+ // Make sure start and stop works.
+ {
+ DispatchFunction([&] { ais->Start(); });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(sourceRate));
+
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { ais->Stop(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+ }
+
+ // Make sure restart is ok.
+ {
+ DispatchFunction([&] { ais->Start(); });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(sourceRate));
+
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { ais->Stop(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+ }
+
+ ais = nullptr; // Drop the SharedThreadPool here.
+}
+
+TEST(TestAudioInputSource, DataOutputBeforeStartAndAfterStop)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate sourceRate = 44100;
+ const TrackRate targetRate = 48000;
+
+ const TrackTime requestFrames = 2 * WEBAUDIO_BLOCK_SIZE;
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ RefPtr<AudioInputSource> ais = MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ sourceRate, targetRate);
+ ASSERT_TRUE(ais);
+
+ // It's ok to call GetAudioSegment before starting
+ {
+ AudioSegment data =
+ ais->GetAudioSegment(requestFrames, AudioInputSource::Consumer::Same);
+ EXPECT_EQ(data.GetDuration(), requestFrames);
+ EXPECT_TRUE(data.IsNull());
+ }
+
+ DispatchFunction([&] { ais->Start(); });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->InputChannels(), channels);
+
+ stream->SetInputRecordingEnabled(true);
+
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { ais->Stop(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ // Check the data output
+ {
+ nsTArray<AudioDataValue> record = stream->TakeRecordedInput();
+ size_t frames = record.Length() / channels;
+ AudioSegment deinterleaved;
+ deinterleaved.AppendFromInterleavedBuffer(record.Elements(), frames,
+ channels, testPrincipal);
+ AudioDriftCorrection driftCorrector(sourceRate, targetRate, testPrincipal);
+ AudioSegment expectedSegment = driftCorrector.RequestFrames(
+ deinterleaved, static_cast<uint32_t>(requestFrames));
+
+ CopyableTArray<AudioDataValue> expected;
+ size_t expectedSamples =
+ expectedSegment.WriteToInterleavedBuffer(expected, channels);
+
+ AudioSegment actualSegment =
+ ais->GetAudioSegment(requestFrames, AudioInputSource::Consumer::Same);
+ EXPECT_EQ(actualSegment.GetDuration(), requestFrames);
+ CopyableTArray<AudioDataValue> actual;
+ size_t actualSamples =
+ actualSegment.WriteToInterleavedBuffer(actual, channels);
+
+ EXPECT_EQ(actualSamples, expectedSamples);
+ EXPECT_EQ(actualSamples / channels, static_cast<size_t>(requestFrames));
+ EXPECT_THAT(actual, ContainerEq(expected));
+ }
+
+ ais = nullptr; // Drop the SharedThreadPool here.
+}
+
+TEST(TestAudioInputSource, ErrorCallback)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate sourceRate = 44100;
+ const TrackRate targetRate = 48000;
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Error));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ RefPtr<AudioInputSource> ais = MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ sourceRate, targetRate);
+ ASSERT_TRUE(ais);
+
+ DispatchFunction([&] { ais->Start(); });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->InputChannels(), channels);
+
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { stream->ForceError(); });
+ WaitFor(stream->ErrorForcedEvent());
+
+ DispatchFunction([&] { ais->Stop(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ ais = nullptr; // Drop the SharedThreadPool here.
+}
+
+TEST(TestAudioInputSource, DeviceChangedCallback)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate sourceRate = 44100;
+ const TrackRate targetRate = 48000;
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener, AudioDeviceChanged(sourceId));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ RefPtr<AudioInputSource> ais = MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ sourceRate, targetRate);
+ ASSERT_TRUE(ais);
+
+ DispatchFunction([&] { ais->Start(); });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->InputChannels(), channels);
+
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { stream->ForceDeviceChanged(); });
+ WaitFor(stream->DeviceChangeForcedEvent());
+
+ DispatchFunction([&] { ais->Stop(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ ais = nullptr; // Drop the SharedThreadPool here.
+}
diff --git a/dom/media/gtest/TestAudioMixer.cpp b/dom/media/gtest/TestAudioMixer.cpp
new file mode 100644
index 0000000000..2b1b5cd6a8
--- /dev/null
+++ b/dom/media/gtest/TestAudioMixer.cpp
@@ -0,0 +1,177 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioMixer.h"
+#include "gtest/gtest.h"
+
+using mozilla::AudioDataValue;
+using mozilla::AudioSampleFormat;
+
+namespace audio_mixer {
+
+struct MixerConsumer : public mozilla::MixerCallbackReceiver {
+ /* In this test, the different audio stream and channels are always created to
+ * cancel each other. */
+ void MixerCallback(mozilla::AudioChunk* aMixedBuffer, uint32_t aSampleRate) {
+ bool silent = true;
+ ASSERT_EQ(aMixedBuffer->mBufferFormat, mozilla::AUDIO_FORMAT_FLOAT32);
+ for (uint32_t c = 0; c < aMixedBuffer->ChannelCount(); c++) {
+ const float* channelData = aMixedBuffer->ChannelData<AudioDataValue>()[c];
+ for (uint32_t i = 0; i < aMixedBuffer->mDuration; i++) {
+ if (channelData[i] != 0.0) {
+ fprintf(stderr, "Sample at %d in channel %c is not silent: %f\n", i,
+ c, channelData[i]);
+ silent = false;
+ }
+ }
+ }
+ ASSERT_TRUE(silent);
+ }
+};
+
+/* Helper function to give us the maximum and minimum value that don't clip,
+ * for a given sample format (integer or floating-point). */
+template <typename T>
+T GetLowValue();
+
+template <typename T>
+T GetHighValue();
+
+template <>
+float GetLowValue<float>() {
+ return -1.0;
+}
+
+template <>
+short GetLowValue<short>() {
+ return -INT16_MAX;
+}
+
+template <>
+float GetHighValue<float>() {
+ return 1.0;
+}
+
+template <>
+short GetHighValue<short>() {
+ return INT16_MAX;
+}
+
+void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength,
+ AudioDataValue aValue) {
+ AudioDataValue* end = aBuffer + aLength;
+ while (aBuffer != end) {
+ *aBuffer++ = aValue;
+ }
+}
+
+TEST(AudioMixer, Test)
+{
+ const uint32_t CHANNEL_LENGTH = 256;
+ const uint32_t AUDIO_RATE = 44100;
+ MixerConsumer consumer;
+ AudioDataValue a[CHANNEL_LENGTH * 2];
+ AudioDataValue b[CHANNEL_LENGTH * 2];
+ FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
+ FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH,
+ GetHighValue<AudioDataValue>());
+ FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
+ FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
+
+ {
+ int iterations = 2;
+ mozilla::AudioMixer mixer;
+
+ fprintf(stderr, "Test AudioMixer constant buffer length.\n");
+
+ while (iterations--) {
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ }
+ }
+
+ {
+ mozilla::AudioMixer mixer;
+
+ fprintf(stderr, "Test AudioMixer variable buffer length.\n");
+
+ FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
+ FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2,
+ GetLowValue<AudioDataValue>());
+ FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
+ FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2,
+ GetHighValue<AudioDataValue>());
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
+ FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH,
+ GetHighValue<AudioDataValue>());
+ FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
+ FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH,
+ GetLowValue<AudioDataValue>());
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
+ FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2,
+ GetLowValue<AudioDataValue>());
+ FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
+ FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2,
+ GetHighValue<AudioDataValue>());
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ }
+
+ FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
+ FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
+
+ {
+ mozilla::AudioMixer mixer;
+
+ fprintf(stderr, "Test AudioMixer variable channel count.\n");
+
+ mixer.StartMixing();
+ mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ mixer.StartMixing();
+ mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ mixer.StartMixing();
+ mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ }
+
+ {
+ mozilla::AudioMixer mixer;
+ fprintf(stderr, "Test AudioMixer variable stream count.\n");
+
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ mixer.StartMixing();
+ mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
+ consumer.MixerCallback(mixer.MixedChunk(), AUDIO_RATE);
+ }
+}
+
+} // namespace audio_mixer
diff --git a/dom/media/gtest/TestAudioPacketizer.cpp b/dom/media/gtest/TestAudioPacketizer.cpp
new file mode 100644
index 0000000000..96a2d6f08c
--- /dev/null
+++ b/dom/media/gtest/TestAudioPacketizer.cpp
@@ -0,0 +1,163 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdint.h>
+#include <math.h>
+#include <memory>
+#include "../AudioPacketizer.h"
+#include "gtest/gtest.h"
+
+using namespace mozilla;
+
+template <typename T>
+class AutoBuffer {
+ public:
+ explicit AutoBuffer(size_t aLength) { mStorage = new T[aLength]; }
+ ~AutoBuffer() { delete[] mStorage; }
+ T* Get() { return mStorage; }
+
+ private:
+ T* mStorage;
+};
+
+int16_t Sequence(int16_t* aBuffer, uint32_t aSize, uint32_t aStart = 0) {
+ uint32_t i;
+ for (i = 0; i < aSize; i++) {
+ aBuffer[i] = aStart + i;
+ }
+ return aStart + i;
+}
+
+void IsSequence(std::unique_ptr<int16_t[]> aBuffer, uint32_t aSize,
+ uint32_t aStart = 0) {
+ for (uint32_t i = 0; i < aSize; i++) {
+ ASSERT_TRUE(aBuffer[i] == static_cast<int64_t>(aStart + i))
+ << "Buffer is not a sequence at offset " << i << '\n';
+ }
+ // Buffer is a sequence.
+}
+
+void Zero(std::unique_ptr<int16_t[]> aBuffer, uint32_t aSize) {
+ for (uint32_t i = 0; i < aSize; i++) {
+ ASSERT_TRUE(aBuffer[i] == 0)
+ << "Buffer is not null at offset " << i << '\n';
+ }
+}
+
+double sine(uint32_t aPhase) { return sin(aPhase * 2 * M_PI * 440 / 44100); }
+
+TEST(AudioPacketizer, Test)
+{
+ for (int16_t channels = 1; channels < 2; channels++) {
+ // Test that the packetizer returns zero on underrun
+ {
+ AudioPacketizer<int16_t, int16_t> ap(441, channels);
+ for (int16_t i = 0; i < 10; i++) {
+ std::unique_ptr<int16_t[]> out(ap.Output());
+ Zero(std::move(out), 441);
+ }
+ }
+ // Simple test, with input/output buffer size aligned on the packet size,
+ // alternating Input and Output calls.
+ {
+ AudioPacketizer<int16_t, int16_t> ap(441, channels);
+ int16_t seqEnd = 0;
+ for (int16_t i = 0; i < 10; i++) {
+ AutoBuffer<int16_t> b(441 * channels);
+ int16_t prevEnd = seqEnd;
+ seqEnd = Sequence(b.Get(), channels * 441, prevEnd);
+ ap.Input(b.Get(), 441);
+ std::unique_ptr<int16_t[]> out(ap.Output());
+ IsSequence(std::move(out), 441 * channels, prevEnd);
+ }
+ }
+ // Simple test, with input/output buffer size aligned on the packet size,
+ // alternating two Input and Output calls.
+ {
+ AudioPacketizer<int16_t, int16_t> ap(441, channels);
+ int16_t seqEnd = 0;
+ for (int16_t i = 0; i < 10; i++) {
+ AutoBuffer<int16_t> b(441 * channels);
+ AutoBuffer<int16_t> b1(441 * channels);
+ int16_t prevEnd0 = seqEnd;
+ seqEnd = Sequence(b.Get(), 441 * channels, prevEnd0);
+ int16_t prevEnd1 = seqEnd;
+ seqEnd = Sequence(b1.Get(), 441 * channels, seqEnd);
+ ap.Input(b.Get(), 441);
+ ap.Input(b1.Get(), 441);
+ std::unique_ptr<int16_t[]> out(ap.Output());
+ std::unique_ptr<int16_t[]> out2(ap.Output());
+ IsSequence(std::move(out), 441 * channels, prevEnd0);
+ IsSequence(std::move(out2), 441 * channels, prevEnd1);
+ }
+ }
+ // Input/output buffer size not aligned on the packet size,
+ // alternating two Input and Output calls.
+ {
+ AudioPacketizer<int16_t, int16_t> ap(441, channels);
+ int16_t prevEnd = 0;
+ int16_t prevSeq = 0;
+ for (int16_t i = 0; i < 10; i++) {
+ AutoBuffer<int16_t> b(480 * channels);
+ AutoBuffer<int16_t> b1(480 * channels);
+ prevSeq = Sequence(b.Get(), 480 * channels, prevSeq);
+ prevSeq = Sequence(b1.Get(), 480 * channels, prevSeq);
+ ap.Input(b.Get(), 480);
+ ap.Input(b1.Get(), 480);
+ std::unique_ptr<int16_t[]> out(ap.Output());
+ std::unique_ptr<int16_t[]> out2(ap.Output());
+ IsSequence(std::move(out), 441 * channels, prevEnd);
+ prevEnd += 441 * channels;
+ IsSequence(std::move(out2), 441 * channels, prevEnd);
+ prevEnd += 441 * channels;
+ }
+ printf("Available: %d\n", ap.PacketsAvailable());
+ }
+
+ // "Real-life" test case: streaming a sine wave through a packetizer, and
+ // checking that we have the right output.
+ // 128 is, for example, the size of a Web Audio API block, and 441 is the
+ // size of a webrtc.org packet when the sample rate is 44100 (10ms)
+ {
+ AudioPacketizer<int16_t, int16_t> ap(441, channels);
+ AutoBuffer<int16_t> b(128 * channels);
+ uint32_t phase = 0;
+ uint32_t outPhase = 0;
+ for (int16_t i = 0; i < 1000; i++) {
+ for (int32_t j = 0; j < 128; j++) {
+ for (int32_t c = 0; c < channels; c++) {
+ // int16_t sinewave at 440Hz/44100Hz sample rate
+ b.Get()[j * channels + c] = (2 << 14) * sine(phase);
+ }
+ phase++;
+ }
+ ap.Input(b.Get(), 128);
+ while (ap.PacketsAvailable()) {
+ std::unique_ptr<int16_t[]> packet(ap.Output());
+ for (uint32_t k = 0; k < ap.mPacketSize; k++) {
+ for (int32_t c = 0; c < channels; c++) {
+ ASSERT_TRUE(packet[k * channels + c] ==
+ static_cast<int16_t>(((2 << 14) * sine(outPhase))));
+ }
+ outPhase++;
+ }
+ }
+ }
+ }
+ // Test that clearing the packetizer empties it and starts returning zeros.
+ {
+ AudioPacketizer<int16_t, int16_t> ap(441, channels);
+ AutoBuffer<int16_t> b(440 * channels);
+ Sequence(b.Get(), 440 * channels);
+ ap.Input(b.Get(), 440);
+ EXPECT_EQ(ap.FramesAvailable(), 440U);
+ ap.Clear();
+ EXPECT_EQ(ap.FramesAvailable(), 0U);
+ EXPECT_TRUE(ap.Empty());
+ std::unique_ptr<int16_t[]> out(ap.Output());
+ Zero(std::move(out), 441);
+ }
+ }
+}
diff --git a/dom/media/gtest/TestAudioRingBuffer.cpp b/dom/media/gtest/TestAudioRingBuffer.cpp
new file mode 100644
index 0000000000..082323efd1
--- /dev/null
+++ b/dom/media/gtest/TestAudioRingBuffer.cpp
@@ -0,0 +1,1287 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioRingBuffer.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "mozilla/PodOperations.h"
+
+using namespace mozilla;
+using testing::ElementsAre;
+
+TEST(TestAudioRingBuffer, BasicFloat)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(float));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+
+ uint32_t rv = ringBuffer.WriteSilence(4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+
+ float in[4] = {.1, .2, .3, .4};
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ rv = ringBuffer.WriteSilence(4);
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+
+ float out[4] = {};
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 4u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 6u);
+ for (float f : out) {
+ EXPECT_FLOAT_EQ(f, 0.0);
+ }
+
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 8u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 2u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < 2; ++i) {
+ EXPECT_FLOAT_EQ(out[i], 0.0);
+ }
+
+ rv = ringBuffer.Clear();
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+}
+
+TEST(TestAudioRingBuffer, BasicShort)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(short));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+
+ uint32_t rv = ringBuffer.WriteSilence(4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+
+ short in[4] = {1, 2, 3, 4};
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ rv = ringBuffer.WriteSilence(4);
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+
+ short out[4] = {};
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 4u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 6u);
+ for (float f : out) {
+ EXPECT_EQ(f, 0);
+ }
+
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 8u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 2u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_EQ(in[i], out[i]);
+ }
+
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < 2; ++i) {
+ EXPECT_EQ(out[i], 0);
+ }
+
+ rv = ringBuffer.Clear();
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+}
+
+TEST(TestAudioRingBuffer, BasicFloat2)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(float));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+
+ float in[4] = {.1, .2, .3, .4};
+ uint32_t rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ float out[4] = {};
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+
+ // WriteIndex = 12
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+
+ rv = ringBuffer.Read(Span(out, 8));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+
+ rv = ringBuffer.Read(Span(out, 8));
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+
+ // WriteIndex = 16
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+}
+
+TEST(TestAudioRingBuffer, BasicShort2)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(int16_t));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+
+ int16_t in[4] = {1, 2, 3, 4};
+ uint32_t rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ int16_t out[4] = {};
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_EQ(in[i], out[i]);
+ }
+
+ // WriteIndex = 12
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ rv = ringBuffer.Read(Span(out, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_EQ(in[i], out[i]);
+ }
+
+ rv = ringBuffer.Read(Span(out, 8));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_EQ(in[i], out[i]);
+ }
+
+ rv = ringBuffer.Read(Span(out, 8));
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_EQ(in[i], out[i]);
+ }
+
+ // WriteIndex = 16
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 6u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 4u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 2u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 8u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+
+ rv = ringBuffer.Write(Span(in, 4));
+ EXPECT_EQ(rv, 0u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 0u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 10u);
+}
+
+TEST(TestAudioRingBuffer, NoCopyFloat)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(float));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[8] = {.0, .1, .2, .3, .4, .5, .6, .7};
+ ringBuffer.Write(Span(in, 6));
+ // v ReadIndex
+ // [x0: .0, x1: .1, x2: .2, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .0, x8: .0, x9: .0, x10: .0]
+
+ float out[10] = {};
+ float* out_ptr = out;
+
+ uint32_t rv =
+ ringBuffer.ReadNoCopy([&out_ptr](const Span<const float> aInBuffer) {
+ PodMove(out_ptr, aInBuffer.data(), aInBuffer.Length());
+ out_ptr += aInBuffer.Length();
+ return aInBuffer.Length();
+ });
+ EXPECT_EQ(rv, 6u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i], in[i]);
+ }
+
+ ringBuffer.Write(Span(in, 8));
+ // Now the buffer contains:
+ // [x0: .5, x1: .6, x2: .2, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .1, x8: .2, x9: .3, x10: .4
+ // ^ ReadIndex
+ out_ptr = out; // reset the pointer before lambdas reuse
+ rv = ringBuffer.ReadNoCopy([&out_ptr](const Span<const float> aInBuffer) {
+ PodMove(out_ptr, aInBuffer.data(), aInBuffer.Length());
+ out_ptr += aInBuffer.Length();
+ return aInBuffer.Length();
+ });
+ EXPECT_EQ(rv, 8u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i], in[i]);
+ }
+}
+
+TEST(TestAudioRingBuffer, NoCopyShort)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(short));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ ringBuffer.Write(Span(in, 6));
+ // v ReadIndex
+ // [x0: 0, x1: 1, x2: 2, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 0, x8: 0, x9: 0, x10: 0]
+
+ short out[10] = {};
+ short* out_ptr = out;
+
+ uint32_t rv =
+ ringBuffer.ReadNoCopy([&out_ptr](const Span<const short> aInBuffer) {
+ PodMove(out_ptr, aInBuffer.data(), aInBuffer.Length());
+ out_ptr += aInBuffer.Length();
+ return aInBuffer.Length();
+ });
+ EXPECT_EQ(rv, 6u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i], in[i]);
+ }
+
+ ringBuffer.Write(Span(in, 8));
+ // Now the buffer contains:
+ // [x0: 5, x1: 6, x2: 2, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 1, x8: 2, x9: 3, x10: 4
+ // ^ ReadIndex
+ out_ptr = out; // reset the pointer before lambdas reuse
+ rv = ringBuffer.ReadNoCopy([&out_ptr](const Span<const short> aInBuffer) {
+ PodMove(out_ptr, aInBuffer.data(), aInBuffer.Length());
+ out_ptr += aInBuffer.Length();
+ return aInBuffer.Length();
+ });
+ EXPECT_EQ(rv, 8u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i], in[i]);
+ }
+}
+
+TEST(TestAudioRingBuffer, NoCopyFloat2)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(float));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[8] = {.0, .1, .2, .3, .4, .5, .6, .7};
+ ringBuffer.Write(Span(in, 6));
+ // v ReadIndex
+ // [x0: .0, x1: .1, x2: .2, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .0, x8: .0, x9: .0, x10: .0]
+
+ float out[10] = {};
+ float* out_ptr = out;
+ uint32_t total_frames = 3;
+
+ uint32_t rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const float>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // v ReadIndex
+ // [x0: .0, x1: .1, x2: .2, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .0, x8: .0, x9: .0, x10: .0]
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 7u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 3u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i], in[i]);
+ }
+
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const float>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // [x0: .0, x1: .1, x2: .2, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .0, x8: .0, x9: .0, x10: .0]
+ // ^ ReadIndex
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i + 3], in[i + 3]);
+ }
+
+ ringBuffer.Write(Span(in, 8));
+ // Now the buffer contains:
+ // [x0: .5, x1: .6, x2: .7, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .1, x8: .2, x9: .3, x10: .4
+ // ^ ReadIndex
+
+ // reset the pointer before lambdas reuse
+ out_ptr = out;
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const float>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // Now the buffer contains:
+ // [x0: .5, x1: .6, x2: .2, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .1, x8: .2, x9: .3, x10: .4
+ // ^ ReadIndex
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 5u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 5u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i], in[i]);
+ }
+
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const float>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // Now the buffer contains:
+ // v ReadIndex
+ // [x0: .5, x1: .6, x2: .7, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .1, x8: .2, x9: .3, x10: .4
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 8u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 2u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i + 3], in[i + 3]);
+ }
+
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const float>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // Now the buffer contains:
+ // v ReadIndex
+ // [x0: .5, x1: .6, x2: .7, x3: .3, x4: .4,
+ // x5: .5, x6: .0, x7: .1, x8: .2, x9: .3, x10: .4
+ EXPECT_EQ(rv, 2u);
+ EXPECT_EQ(total_frames, 1u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i + 6], in[i + 6]);
+ }
+}
+
+TEST(TestAudioRingBuffer, NoCopyShort2)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(short));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ ringBuffer.Write(Span(in, 6));
+ // v ReadIndex
+ // [x0: 0, x1: 1, x2: 2, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 0, x8: 0, x9: 0, x10: 0]
+
+ short out[10] = {};
+ short* out_ptr = out;
+ uint32_t total_frames = 3;
+
+ uint32_t rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const short>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // v ReadIndex
+ // [x0: 0, x1: 1, x2: 2, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 0, x8: 0, x9: 0, x10: 0]
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 7u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 3u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i], in[i]);
+ }
+
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const short>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // [x0: 0, x1: 1, x2: 2, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 0, x8: 0, x9: 0, x10: .0]
+ // ^ ReadIndex
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i + 3], in[i + 3]);
+ }
+
+ ringBuffer.Write(Span(in, 8));
+ // Now the buffer contains:
+ // [x0: 5, x1: 6, x2: 7, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 1, x8: 2, x9: 3, x10: 4
+ // ^ ReadIndex
+
+ // reset the pointer before lambdas reuse
+ out_ptr = out;
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const short>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // Now the buffer contains:
+ // [x0: 5, x1: 6, x2: 2, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 1, x8: 2, x9: 3, x10: 4
+ // ^ ReadIndex
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 5u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 5u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i], in[i]);
+ }
+
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const short>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // Now the buffer contains:
+ // v ReadIndex
+ // [x0: 5, x1: 6, x2: 7, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 1, x8: 2, x9: 3, x10: 4
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 8u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 2u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i + 3], in[i + 3]);
+ }
+
+ total_frames = 3;
+ rv = ringBuffer.ReadNoCopy(
+ [&out_ptr, &total_frames](const Span<const short>& aInBuffer) {
+ uint32_t inFramesUsed =
+ std::min<uint32_t>(total_frames, aInBuffer.Length());
+ PodMove(out_ptr, aInBuffer.data(), inFramesUsed);
+ out_ptr += inFramesUsed;
+ total_frames -= inFramesUsed;
+ return inFramesUsed;
+ });
+ // Now the buffer contains:
+ // v ReadIndex
+ // [x0: 5, x1: 6, x2: 7, x3: 3, x4: 4,
+ // x5: 5, x6: 0, x7: 1, x8: 2, x9: 3, x10: 4
+ EXPECT_EQ(rv, 2u);
+ EXPECT_EQ(total_frames, 1u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i + 6], in[i + 6]);
+ }
+}
+
+TEST(TestAudioRingBuffer, DiscardFloat)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(float));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[8] = {.0, .1, .2, .3, .4, .5, .6, .7};
+ ringBuffer.Write(Span(in, 8));
+
+ uint32_t rv = ringBuffer.Discard(3);
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 5u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 5u);
+
+ float out[8] = {};
+ rv = ringBuffer.Read(Span(out, 3));
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 8u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 2u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i], in[i + 3]);
+ }
+
+ rv = ringBuffer.Discard(3);
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+
+ ringBuffer.WriteSilence(4);
+ rv = ringBuffer.Discard(6);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+}
+
+TEST(TestAudioRingBuffer, DiscardShort)
+{
+ AudioRingBuffer ringBuffer(11 * sizeof(short));
+ ringBuffer.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ ringBuffer.Write(Span(in, 8));
+
+ uint32_t rv = ringBuffer.Discard(3);
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 5u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 5u);
+
+ short out[8] = {};
+ rv = ringBuffer.Read(Span(out, 3));
+ EXPECT_EQ(rv, 3u);
+ EXPECT_TRUE(!ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 8u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 2u);
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i], in[i + 3]);
+ }
+
+ rv = ringBuffer.Discard(3);
+ EXPECT_EQ(rv, 2u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+
+ ringBuffer.WriteSilence(4);
+ rv = ringBuffer.Discard(6);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_TRUE(ringBuffer.IsEmpty());
+ EXPECT_TRUE(!ringBuffer.IsFull());
+ EXPECT_EQ(ringBuffer.AvailableWrite(), 10u);
+ EXPECT_EQ(ringBuffer.AvailableRead(), 0u);
+}
+
+TEST(TestRingBuffer, WriteFromRing1)
+{
+ AudioRingBuffer ringBuffer1(11 * sizeof(float));
+ ringBuffer1.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+ AudioRingBuffer ringBuffer2(11 * sizeof(float));
+ ringBuffer2.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[4] = {.1, .2, .3, .4};
+ uint32_t rv = ringBuffer1.Write(Span<const float>(in, 4));
+ EXPECT_EQ(rv, 4u);
+
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 0u);
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+
+ float out[4] = {};
+ rv = ringBuffer2.Read(Span<float>(out, 4));
+ EXPECT_EQ(rv, 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+}
+
+TEST(TestRingBuffer, WriteFromRing2)
+{
+ AudioRingBuffer ringBuffer1(11 * sizeof(float));
+ ringBuffer1.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+ AudioRingBuffer ringBuffer2(11 * sizeof(float));
+ ringBuffer2.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ // Advance the index
+ ringBuffer2.WriteSilence(8);
+ ringBuffer2.Clear();
+
+ float in[4] = {.1, .2, .3, .4};
+ uint32_t rv = ringBuffer1.Write(Span<const float>(in, 4));
+ EXPECT_EQ(rv, 4u);
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+
+ float out[4] = {};
+ rv = ringBuffer2.Read(Span<float>(out, 4));
+ EXPECT_EQ(rv, 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+}
+
+TEST(TestRingBuffer, WriteFromRing3)
+{
+ AudioRingBuffer ringBuffer1(11 * sizeof(float));
+ ringBuffer1.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+ AudioRingBuffer ringBuffer2(11 * sizeof(float));
+ ringBuffer2.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ // Advance the index
+ ringBuffer2.WriteSilence(8);
+ ringBuffer2.Clear();
+ ringBuffer2.WriteSilence(4);
+ ringBuffer2.Clear();
+
+ float in[4] = {.1, .2, .3, .4};
+ uint32_t rv = ringBuffer1.Write(Span<const float>(in, 4));
+ EXPECT_EQ(rv, 4u);
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+
+ float out[4] = {};
+ rv = ringBuffer2.Read(Span<float>(out, 4));
+ EXPECT_EQ(rv, 4u);
+ for (uint32_t i = 0; i < 4; ++i) {
+ EXPECT_FLOAT_EQ(in[i], out[i]);
+ }
+}
+
+TEST(TestAudioRingBuffer, WriteFromRingShort)
+{
+ AudioRingBuffer ringBuffer1(11 * sizeof(short));
+ ringBuffer1.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ uint32_t rv = ringBuffer1.Write(Span(in, 8));
+ EXPECT_EQ(rv, 8u);
+
+ AudioRingBuffer ringBuffer2(11 * sizeof(short));
+ ringBuffer2.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+ EXPECT_EQ(ringBuffer1.AvailableRead(), 8u);
+
+ short out[4] = {};
+ rv = ringBuffer2.Read(Span(out, 4));
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out[i], in[i]);
+ }
+
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+ EXPECT_EQ(ringBuffer1.AvailableRead(), 8u);
+
+ ringBuffer1.Discard(4);
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 8u);
+ EXPECT_EQ(ringBuffer1.AvailableRead(), 4u);
+
+ short out2[8] = {};
+ rv = ringBuffer2.Read(Span(out2, 8));
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_EQ(out2[i], in[i]);
+ }
+}
+
+TEST(TestAudioRingBuffer, WriteFromRingFloat)
+{
+ AudioRingBuffer ringBuffer1(11 * sizeof(float));
+ ringBuffer1.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[8] = {.0, .1, .2, .3, .4, .5, .6, .7};
+ uint32_t rv = ringBuffer1.Write(Span(in, 8));
+ EXPECT_EQ(rv, 8u);
+
+ AudioRingBuffer ringBuffer2(11 * sizeof(float));
+ ringBuffer2.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+ EXPECT_EQ(ringBuffer1.AvailableRead(), 8u);
+
+ float out[4] = {};
+ rv = ringBuffer2.Read(Span(out, 4));
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out[i], in[i]);
+ }
+
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 4u);
+ EXPECT_EQ(ringBuffer1.AvailableRead(), 8u);
+
+ ringBuffer1.Discard(4);
+ rv = ringBuffer2.Write(ringBuffer1, 4);
+ EXPECT_EQ(rv, 4u);
+ EXPECT_EQ(ringBuffer2.AvailableRead(), 8u);
+ EXPECT_EQ(ringBuffer1.AvailableRead(), 4u);
+
+ float out2[8] = {};
+ rv = ringBuffer2.Read(Span(out2, 8));
+ for (uint32_t i = 0; i < rv; ++i) {
+ EXPECT_FLOAT_EQ(out2[i], in[i]);
+ }
+}
+
+TEST(TestAudioRingBuffer, PrependSilenceWrapsFloat)
+{
+ AudioRingBuffer rb(9 * sizeof(float));
+ rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[6] = {.2, .3, .4, .5, .6, .7};
+ uint32_t rv = rb.Write(Span(in, 6));
+ EXPECT_EQ(rv, 6u);
+
+ float out[8] = {};
+ auto outSpan = Span(out, 8);
+ rv = rb.Read(outSpan.Subspan(0, 1));
+ EXPECT_EQ(rv, 1u);
+
+ // PrependSilence will have to wrap around the start and put the silent
+ // samples at indices 0 and 8 of the ring buffer.
+ rv = rb.PrependSilence(2);
+ EXPECT_EQ(rv, 2u);
+
+ rv = rb.Read(outSpan.Subspan(1, 7));
+ EXPECT_EQ(rv, 7u);
+
+ EXPECT_THAT(out, ElementsAre(.2, 0, 0, .3, .4, .5, .6, .7));
+}
+
+TEST(TestAudioRingBuffer, PrependSilenceWrapsShort)
+{
+ AudioRingBuffer rb(9 * sizeof(short));
+ rb.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[6] = {2, 3, 4, 5, 6, 7};
+ uint32_t rv = rb.Write(Span(in, 6));
+ EXPECT_EQ(rv, 6u);
+
+ short out[8] = {};
+ auto outSpan = Span(out, 8);
+ rv = rb.Read(outSpan.Subspan(0, 1));
+ EXPECT_EQ(rv, 1u);
+
+ // PrependSilence will have to wrap around the start and put the silent
+ // samples at indices 0 and 8 of the ring buffer.
+ rv = rb.PrependSilence(2);
+ EXPECT_EQ(rv, 2u);
+
+ rv = rb.Read(outSpan.Subspan(1, 7));
+ EXPECT_EQ(rv, 7u);
+
+ EXPECT_THAT(out, ElementsAre(2, 0, 0, 3, 4, 5, 6, 7));
+}
+
+TEST(TestAudioRingBuffer, PrependSilenceNoWrapFloat)
+{
+ AudioRingBuffer rb(9 * sizeof(float));
+ rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[6] = {.2, .3, .4, .5, .6, .7};
+ uint32_t rv = rb.Write(Span(in, 6));
+ EXPECT_EQ(rv, 6u);
+
+ float out[8] = {};
+ auto outSpan = Span(out, 8);
+ rv = rb.Read(outSpan.To(4));
+ EXPECT_EQ(rv, 4u);
+
+ // PrependSilence will put the silent samples at indices 2 and 3 of the ring
+ // buffer.
+ rv = rb.PrependSilence(2);
+ EXPECT_EQ(rv, 2u);
+
+ rv = rb.Read(outSpan.Subspan(4, 4));
+ EXPECT_EQ(rv, 4u);
+
+ EXPECT_THAT(out, ElementsAre(.2, .3, .4, .5, 0, 0, .6, .7));
+}
+
+TEST(TestAudioRingBuffer, PrependSilenceNoWrapShort)
+{
+ AudioRingBuffer rb(9 * sizeof(short));
+ rb.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[6] = {2, 3, 4, 5, 6, 7};
+ uint32_t rv = rb.Write(Span(in, 6));
+ EXPECT_EQ(rv, 6u);
+
+ short out[8] = {};
+ auto outSpan = Span(out, 8);
+ rv = rb.Read(outSpan.To(4));
+ EXPECT_EQ(rv, 4u);
+
+ // PrependSilence will put the silent samples at indices 2 and 3 of the ring
+ // buffer.
+ rv = rb.PrependSilence(2);
+ EXPECT_EQ(rv, 2u);
+
+ rv = rb.Read(outSpan.Subspan(4, 4));
+ EXPECT_EQ(rv, 4u);
+
+ EXPECT_THAT(out, ElementsAre(2, 3, 4, 5, 0, 0, 6, 7));
+}
+
+TEST(TestAudioRingBuffer, SetLengthBytesNoWrapFloat)
+{
+ AudioRingBuffer rb(6 * sizeof(float));
+ rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[5] = {.1, .2, .3, .4, .5};
+ uint32_t rv = rb.Write(Span(in, 5));
+ EXPECT_EQ(rv, 5u);
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+ EXPECT_EQ(rb.Capacity(), 6u);
+
+ EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(float)));
+ float out[10] = {};
+ rv = rb.Read(Span(out, 10));
+ EXPECT_EQ(rv, 5u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 10u);
+ EXPECT_EQ(rb.Capacity(), 11u);
+ EXPECT_THAT(out, ElementsAre(.1, .2, .3, .4, .5, 0, 0, 0, 0, 0));
+}
+
+TEST(TestAudioRingBuffer, SetLengthBytesNoWrapShort)
+{
+ AudioRingBuffer rb(6 * sizeof(short));
+ rb.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ short in[5] = {1, 2, 3, 4, 5};
+ uint32_t rv = rb.Write(Span(in, 5));
+ EXPECT_EQ(rv, 5u);
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+ EXPECT_EQ(rb.Capacity(), 6u);
+
+ EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(short)));
+ short out[10] = {};
+ rv = rb.Read(Span(out, 10));
+ EXPECT_EQ(rv, 5u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 10u);
+ EXPECT_EQ(rb.Capacity(), 11u);
+ EXPECT_THAT(out, ElementsAre(1, 2, 3, 4, 5, 0, 0, 0, 0, 0));
+}
+
+TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartFloat)
+{
+ AudioRingBuffer rb(6 * sizeof(float));
+ rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ EXPECT_EQ(rb.WriteSilence(3), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 3u);
+ EXPECT_EQ(rb.AvailableWrite(), 2u);
+ EXPECT_EQ(rb.Capacity(), 6u);
+
+ float outSilence[3] = {};
+ EXPECT_EQ(rb.Read(Span(outSilence, 3)), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 5u);
+
+ float in[5] = {.1, .2, .3, .4, .5};
+ EXPECT_EQ(rb.Write(Span(in, 5)), 5u);
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+
+ EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(float)));
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 5u);
+
+ float in2[2] = {.6, .7};
+ EXPECT_EQ(rb.Write(Span(in2, 2)), 2u);
+ EXPECT_EQ(rb.AvailableRead(), 7u);
+ EXPECT_EQ(rb.AvailableWrite(), 3u);
+
+ float out[10] = {};
+ EXPECT_EQ(rb.Read(Span(out, 10)), 7u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 10u);
+ EXPECT_EQ(rb.Capacity(), 11u);
+ EXPECT_THAT(out, ElementsAre(.1, .2, .3, .4, .5, .6, .7, 0, 0, 0));
+}
+
+TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartShort)
+{
+ AudioRingBuffer rb(6 * sizeof(short));
+ rb.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ EXPECT_EQ(rb.WriteSilence(3), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 3u);
+ EXPECT_EQ(rb.AvailableWrite(), 2u);
+ EXPECT_EQ(rb.Capacity(), 6u);
+
+ short outSilence[3] = {};
+ EXPECT_EQ(rb.Read(Span(outSilence, 3)), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 5u);
+
+ short in[5] = {1, 2, 3, 4, 5};
+ EXPECT_EQ(rb.Write(Span(in, 5)), 5u);
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+
+ EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(short)));
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 5u);
+
+ short in2[2] = {6, 7};
+ EXPECT_EQ(rb.Write(Span(in2, 2)), 2u);
+ EXPECT_EQ(rb.AvailableRead(), 7u);
+ EXPECT_EQ(rb.AvailableWrite(), 3u);
+
+ short out[10] = {};
+ EXPECT_EQ(rb.Read(Span(out, 10)), 7u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 10u);
+ EXPECT_EQ(rb.Capacity(), 11u);
+ EXPECT_THAT(out, ElementsAre(1, 2, 3, 4, 5, 6, 7, 0, 0, 0));
+}
+
+TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsFloat)
+{
+ AudioRingBuffer rb(6 * sizeof(float));
+ rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ EXPECT_EQ(rb.WriteSilence(3), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 3u);
+ EXPECT_EQ(rb.AvailableWrite(), 2u);
+ EXPECT_EQ(rb.Capacity(), 6u);
+
+ float outSilence[3] = {};
+ EXPECT_EQ(rb.Read(Span(outSilence, 3)), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 5u);
+
+ float in[5] = {.1, .2, .3, .4, .5};
+ EXPECT_EQ(rb.Write(Span(in, 5)), 5u);
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+
+ EXPECT_TRUE(rb.SetLengthBytes(8 * sizeof(float)));
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 2u);
+
+ float in2[2] = {.6, .7};
+ EXPECT_EQ(rb.Write(Span(in2, 2)), 2u);
+ EXPECT_EQ(rb.AvailableRead(), 7u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+
+ float out[8] = {};
+ EXPECT_EQ(rb.Read(Span(out, 8)), 7u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 7u);
+ EXPECT_EQ(rb.Capacity(), 8u);
+ EXPECT_THAT(out, ElementsAre(.1, .2, .3, .4, .5, .6, .7, 0));
+}
+
+TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsShort)
+{
+ AudioRingBuffer rb(6 * sizeof(short));
+ rb.SetSampleFormat(AUDIO_FORMAT_S16);
+
+ EXPECT_EQ(rb.WriteSilence(3), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 3u);
+ EXPECT_EQ(rb.AvailableWrite(), 2u);
+ EXPECT_EQ(rb.Capacity(), 6u);
+
+ short outSilence[3] = {};
+ EXPECT_EQ(rb.Read(Span(outSilence, 3)), 3u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 5u);
+
+ short in[5] = {1, 2, 3, 4, 5};
+ EXPECT_EQ(rb.Write(Span(in, 5)), 5u);
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+
+ EXPECT_TRUE(rb.SetLengthBytes(8 * sizeof(short)));
+ EXPECT_EQ(rb.AvailableRead(), 5u);
+ EXPECT_EQ(rb.AvailableWrite(), 2u);
+
+ short in2[2] = {6, 7};
+ EXPECT_EQ(rb.Write(Span(in2, 2)), 2u);
+ EXPECT_EQ(rb.AvailableRead(), 7u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+
+ short out[8] = {};
+ EXPECT_EQ(rb.Read(Span(out, 8)), 7u);
+ EXPECT_EQ(rb.AvailableRead(), 0u);
+ EXPECT_EQ(rb.AvailableWrite(), 7u);
+ EXPECT_EQ(rb.Capacity(), 8u);
+ EXPECT_THAT(out, ElementsAre(1, 2, 3, 4, 5, 6, 7, 0));
+}
diff --git a/dom/media/gtest/TestAudioSegment.cpp b/dom/media/gtest/TestAudioSegment.cpp
new file mode 100644
index 0000000000..ee44839283
--- /dev/null
+++ b/dom/media/gtest/TestAudioSegment.cpp
@@ -0,0 +1,470 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioSegment.h"
+#include <iostream>
+#include "gtest/gtest.h"
+
+#include "AudioGenerator.h"
+
+using namespace mozilla;
+
+namespace audio_segment {
+
+/* Helper function to give us the maximum and minimum value that don't clip,
+ * for a given sample format (integer or floating-point). */
+template <typename T>
+T GetLowValue();
+
+template <typename T>
+T GetHighValue();
+
+template <typename T>
+T GetSilentValue();
+
+template <>
+float GetLowValue<float>() {
+ return -1.0;
+}
+
+template <>
+int16_t GetLowValue<short>() {
+ return -INT16_MAX;
+}
+
+template <>
+float GetHighValue<float>() {
+ return 1.0;
+}
+
+template <>
+int16_t GetHighValue<short>() {
+ return INT16_MAX;
+}
+
+template <>
+float GetSilentValue() {
+ return 0.0;
+}
+
+template <>
+int16_t GetSilentValue() {
+ return 0;
+}
+
+// Get an array of planar audio buffers that has the inverse of the index of the
+// channel (1-indexed) as samples.
+template <typename T>
+const T* const* GetPlanarChannelArray(size_t aChannels, size_t aSize) {
+ T** channels = new T*[aChannels];
+ for (size_t c = 0; c < aChannels; c++) {
+ channels[c] = new T[aSize];
+ for (size_t i = 0; i < aSize; i++) {
+ channels[c][i] = FloatToAudioSample<T>(1. / (c + 1));
+ }
+ }
+ return channels;
+}
+
+template <typename T>
+void DeletePlanarChannelsArray(const T* const* aArrays, size_t aChannels) {
+ for (size_t channel = 0; channel < aChannels; channel++) {
+ delete[] aArrays[channel];
+ }
+ delete[] aArrays;
+}
+
+template <typename T>
+T** GetPlanarArray(size_t aChannels, size_t aSize) {
+ T** channels = new T*[aChannels];
+ for (size_t c = 0; c < aChannels; c++) {
+ channels[c] = new T[aSize];
+ for (size_t i = 0; i < aSize; i++) {
+ channels[c][i] = 0.0f;
+ }
+ }
+ return channels;
+}
+
+template <typename T>
+void DeletePlanarArray(T** aArrays, size_t aChannels) {
+ for (size_t channel = 0; channel < aChannels; channel++) {
+ delete[] aArrays[channel];
+ }
+ delete[] aArrays;
+}
+
+// Get an array of audio samples that have the inverse of the index of the
+// channel (1-indexed) as samples.
+template <typename T>
+const T* GetInterleavedChannelArray(size_t aChannels, size_t aSize) {
+ size_t sampleCount = aChannels * aSize;
+ T* samples = new T[sampleCount];
+ for (size_t i = 0; i < sampleCount; i++) {
+ uint32_t channel = (i % aChannels) + 1;
+ samples[i] = FloatToAudioSample<T>(1. / channel);
+ }
+ return samples;
+}
+
+template <typename T>
+void DeleteInterleavedChannelArray(const T* aArray) {
+ delete[] aArray;
+}
+
+bool FuzzyEqual(float aLhs, float aRhs) { return std::abs(aLhs - aRhs) < 0.01; }
+
+template <typename SrcT, typename DstT>
+void TestInterleaveAndConvert() {
+ size_t arraySize = 1024;
+ size_t maxChannels = 8; // 7.1
+ for (uint32_t channels = 1; channels < maxChannels; channels++) {
+ const SrcT* const* src = GetPlanarChannelArray<SrcT>(channels, arraySize);
+ DstT* dst = new DstT[channels * arraySize];
+
+ InterleaveAndConvertBuffer(src, arraySize, 1.0, channels, dst);
+
+ uint32_t channelIndex = 0;
+ for (size_t i = 0; i < arraySize * channels; i++) {
+ ASSERT_TRUE(FuzzyEqual(
+ dst[i], FloatToAudioSample<DstT>(1. / (channelIndex + 1))));
+ channelIndex++;
+ channelIndex %= channels;
+ }
+
+ DeletePlanarChannelsArray(src, channels);
+ delete[] dst;
+ }
+}
+
+template <typename SrcT, typename DstT>
+void TestDeinterleaveAndConvert() {
+ size_t arraySize = 1024;
+ size_t maxChannels = 8; // 7.1
+ for (uint32_t channels = 1; channels < maxChannels; channels++) {
+ const SrcT* src = GetInterleavedChannelArray<SrcT>(channels, arraySize);
+ DstT** dst = GetPlanarArray<DstT>(channels, arraySize);
+
+ DeinterleaveAndConvertBuffer(src, arraySize, channels, dst);
+
+ for (size_t channel = 0; channel < channels; channel++) {
+ for (size_t i = 0; i < arraySize; i++) {
+ ASSERT_TRUE(FuzzyEqual(dst[channel][i],
+ FloatToAudioSample<DstT>(1. / (channel + 1))));
+ }
+ }
+
+ DeleteInterleavedChannelArray(src);
+ DeletePlanarArray(dst, channels);
+ }
+}
+
+uint8_t gSilence[4096] = {0};
+
+template <typename T>
+T* SilentChannel() {
+ return reinterpret_cast<T*>(gSilence);
+}
+
+template <typename T>
+void TestUpmixStereo() {
+ size_t arraySize = 1024;
+ nsTArray<T*> channels;
+ nsTArray<const T*> channelsptr;
+
+ channels.SetLength(1);
+ channelsptr.SetLength(1);
+
+ channels[0] = new T[arraySize];
+
+ for (size_t i = 0; i < arraySize; i++) {
+ channels[0][i] = GetHighValue<T>();
+ }
+ channelsptr[0] = channels[0];
+
+ AudioChannelsUpMix(&channelsptr, 2, SilentChannel<T>());
+
+ for (size_t channel = 0; channel < 2; channel++) {
+ for (size_t i = 0; i < arraySize; i++) {
+ ASSERT_TRUE(channelsptr[channel][i] == GetHighValue<T>());
+ }
+ }
+ delete[] channels[0];
+}
+
+template <typename T>
+void TestDownmixStereo() {
+ const size_t arraySize = 1024;
+ nsTArray<const T*> inputptr;
+ nsTArray<T*> input;
+ T** output;
+
+ output = new T*[1];
+ output[0] = new T[arraySize];
+
+ input.SetLength(2);
+ inputptr.SetLength(2);
+
+ for (size_t channel = 0; channel < input.Length(); channel++) {
+ input[channel] = new T[arraySize];
+ for (size_t i = 0; i < arraySize; i++) {
+ input[channel][i] = channel == 0 ? GetLowValue<T>() : GetHighValue<T>();
+ }
+ inputptr[channel] = input[channel];
+ }
+
+ AudioChannelsDownMix<T, T>(inputptr, Span(output, 1), arraySize);
+
+ for (size_t i = 0; i < arraySize; i++) {
+ ASSERT_TRUE(output[0][i] == GetSilentValue<T>());
+ ASSERT_TRUE(output[0][i] == GetSilentValue<T>());
+ }
+
+ delete[] output[0];
+ delete[] output;
+}
+
+TEST(AudioSegment, Test)
+{
+ TestInterleaveAndConvert<float, float>();
+ TestInterleaveAndConvert<float, int16_t>();
+ TestInterleaveAndConvert<int16_t, float>();
+ TestInterleaveAndConvert<int16_t, int16_t>();
+ TestDeinterleaveAndConvert<float, float>();
+ TestDeinterleaveAndConvert<float, int16_t>();
+ TestDeinterleaveAndConvert<int16_t, float>();
+ TestDeinterleaveAndConvert<int16_t, int16_t>();
+ TestUpmixStereo<float>();
+ TestUpmixStereo<int16_t>();
+ TestDownmixStereo<float>();
+ TestDownmixStereo<int16_t>();
+}
+
+template <class T, uint32_t Channels>
+void fillChunk(AudioChunk* aChunk, int aDuration) {
+ static_assert(Channels != 0, "Filling 0 channels is a no-op");
+
+ aChunk->mDuration = aDuration;
+
+ AutoTArray<nsTArray<T>, Channels> buffer;
+ buffer.SetLength(Channels);
+ aChunk->mChannelData.ClearAndRetainStorage();
+ aChunk->mChannelData.SetCapacity(Channels);
+ for (nsTArray<T>& channel : buffer) {
+ T* ch = channel.AppendElements(aDuration);
+ for (int i = 0; i < aDuration; ++i) {
+ ch[i] = GetHighValue<T>();
+ }
+ aChunk->mChannelData.AppendElement(ch);
+ }
+
+ aChunk->mBuffer = new mozilla::SharedChannelArrayBuffer<T>(std::move(buffer));
+ aChunk->mBufferFormat = AudioSampleTypeToFormat<T>::Format;
+}
+
+TEST(AudioSegment, FlushAfter_ZeroDuration)
+{
+ AudioChunk c;
+ fillChunk<float, 2>(&c, 10);
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(c));
+ s.FlushAfter(0);
+ EXPECT_EQ(s.GetDuration(), 0);
+}
+
+TEST(AudioSegment, FlushAfter_SmallerDuration)
+{
+ // It was crashing when the first chunk was silence (null) and FlushAfter
+ // was called for a duration, smaller or equal to the duration of the
+ // first chunk.
+ TrackTime duration = 10;
+ TrackTime smaller_duration = 8;
+ AudioChunk c1;
+ c1.SetNull(duration);
+ AudioChunk c2;
+ fillChunk<float, 2>(&c2, duration);
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(c1));
+ s.AppendAndConsumeChunk(std::move(c2));
+ s.FlushAfter(smaller_duration);
+ EXPECT_EQ(s.GetDuration(), smaller_duration) << "Check new duration";
+
+ TrackTime chunkByChunkDuration = 0;
+ for (AudioSegment::ChunkIterator iter(s); !iter.IsEnded(); iter.Next()) {
+ chunkByChunkDuration += iter->GetDuration();
+ }
+ EXPECT_EQ(s.GetDuration(), chunkByChunkDuration)
+ << "Confirm duration chunk by chunk";
+}
+
+TEST(AudioSegment, MemoizedOutputChannelCount)
+{
+ AudioSegment s;
+ EXPECT_EQ(s.MaxChannelCount(), 0U) << "0 channels on init";
+
+ s.AppendNullData(1);
+ EXPECT_EQ(s.MaxChannelCount(), 0U) << "Null data has 0 channels";
+
+ s.Clear();
+ EXPECT_EQ(s.MaxChannelCount(), 0U) << "Still 0 after clearing";
+
+ AudioChunk c1;
+ fillChunk<float, 1>(&c1, 1);
+ s.AppendAndConsumeChunk(std::move(c1));
+ EXPECT_EQ(s.MaxChannelCount(), 1U) << "A single chunk's channel count";
+
+ AudioChunk c2;
+ fillChunk<float, 2>(&c2, 1);
+ s.AppendAndConsumeChunk(std::move(c2));
+ EXPECT_EQ(s.MaxChannelCount(), 2U) << "The max of two chunks' channel count";
+
+ s.ForgetUpTo(2);
+ EXPECT_EQ(s.MaxChannelCount(), 2U) << "Memoized value with null chunks";
+
+ s.Clear();
+ EXPECT_EQ(s.MaxChannelCount(), 2U) << "Still memoized after clearing";
+
+ AudioChunk c3;
+ fillChunk<float, 1>(&c3, 1);
+ s.AppendAndConsumeChunk(std::move(c3));
+ EXPECT_EQ(s.MaxChannelCount(), 1U) << "Real chunk trumps memoized value";
+
+ s.Clear();
+ EXPECT_EQ(s.MaxChannelCount(), 1U) << "Memoized value was updated";
+}
+
+TEST(AudioSegment, AppendAndConsumeChunk)
+{
+ AudioChunk c;
+ fillChunk<float, 2>(&c, 10);
+ AudioChunk temp(c);
+ EXPECT_TRUE(c.mBuffer->IsShared());
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(temp));
+ EXPECT_FALSE(s.IsEmpty());
+ EXPECT_TRUE(c.mBuffer->IsShared());
+
+ s.Clear();
+ EXPECT_FALSE(c.mBuffer->IsShared());
+}
+
+TEST(AudioSegment, AppendAndConsumeEmptyChunk)
+{
+ AudioChunk c;
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(c));
+ EXPECT_TRUE(s.IsEmpty());
+}
+
+TEST(AudioSegment, AppendAndConsumeNonEmptyZeroDurationChunk)
+{
+ AudioChunk c;
+ fillChunk<float, 2>(&c, 0);
+ AudioChunk temp(c);
+ EXPECT_TRUE(c.mBuffer->IsShared());
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(temp));
+ EXPECT_TRUE(s.IsEmpty());
+ EXPECT_FALSE(c.mBuffer->IsShared());
+}
+
+TEST(AudioSegment, CombineChunksInAppendAndConsumeChunk)
+{
+ AudioChunk source;
+ fillChunk<float, 2>(&source, 10);
+
+ auto checkChunks = [&](const AudioSegment& aSegement,
+ const nsTArray<TrackTime>& aDurations) {
+ size_t i = 0;
+ for (AudioSegment::ConstChunkIterator iter(aSegement); !iter.IsEnded();
+ iter.Next()) {
+ EXPECT_EQ(iter->GetDuration(), aDurations[i++]);
+ }
+ EXPECT_EQ(i, aDurations.Length());
+ };
+
+ // The chunks can be merged if their duration are adjacent.
+ {
+ AudioChunk c1(source);
+ c1.SliceTo(2, 5);
+
+ AudioChunk c2(source);
+ c2.SliceTo(5, 9);
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(c1));
+ EXPECT_EQ(s.GetDuration(), 3);
+
+ s.AppendAndConsumeChunk(std::move(c2));
+ EXPECT_EQ(s.GetDuration(), 7);
+
+ checkChunks(s, {7});
+ }
+ // Otherwise, they cannot be merged.
+ {
+ // If durations of chunks are overlapped, they cannot be merged.
+ AudioChunk c1(source);
+ c1.SliceTo(2, 5);
+
+ AudioChunk c2(source);
+ c2.SliceTo(4, 9);
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(c1));
+ EXPECT_EQ(s.GetDuration(), 3);
+
+ s.AppendAndConsumeChunk(std::move(c2));
+ EXPECT_EQ(s.GetDuration(), 8);
+
+ checkChunks(s, {3, 5});
+ }
+ {
+ // If durations of chunks are discontinuous, they cannot be merged.
+ AudioChunk c1(source);
+ c1.SliceTo(2, 4);
+
+ AudioChunk c2(source);
+ c2.SliceTo(5, 9);
+
+ AudioSegment s;
+ s.AppendAndConsumeChunk(std::move(c1));
+ EXPECT_EQ(s.GetDuration(), 2);
+
+ s.AppendAndConsumeChunk(std::move(c2));
+ EXPECT_EQ(s.GetDuration(), 6);
+
+ checkChunks(s, {2, 4});
+ }
+}
+
+TEST(AudioSegment, ConvertFromAndToInterleaved)
+{
+ const uint32_t channels = 2;
+ const uint32_t rate = 44100;
+ AudioGenerator<AudioDataValue> generator(channels, rate);
+
+ const size_t frames = 10;
+ const size_t bufferSize = frames * channels;
+ nsTArray<AudioDataValue> buffer(bufferSize);
+ buffer.AppendElements(bufferSize);
+
+ generator.GenerateInterleaved(buffer.Elements(), frames);
+
+ AudioSegment data;
+ data.AppendFromInterleavedBuffer(buffer.Elements(), frames, channels,
+ PRINCIPAL_HANDLE_NONE);
+
+ nsTArray<AudioDataValue> interleaved;
+ size_t sampleCount = data.WriteToInterleavedBuffer(interleaved, channels);
+
+ EXPECT_EQ(sampleCount, bufferSize);
+ EXPECT_EQ(interleaved, buffer);
+}
+
+} // namespace audio_segment
diff --git a/dom/media/gtest/TestAudioSinkWrapper.cpp b/dom/media/gtest/TestAudioSinkWrapper.cpp
new file mode 100644
index 0000000000..87f17042a8
--- /dev/null
+++ b/dom/media/gtest/TestAudioSinkWrapper.cpp
@@ -0,0 +1,148 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioSink.h"
+#include "AudioSinkWrapper.h"
+#include "CubebUtils.h"
+#include "MockCubeb.h"
+#include "TimeUnits.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "nsThreadManager.h"
+#include "nsThreadUtils.h"
+
+using namespace mozilla;
+
+// This is a crashtest to check that AudioSinkWrapper::mEndedPromiseHolder is
+// not settled twice when sync and async AudioSink initializations race.
+TEST(TestAudioSinkWrapper, AsyncInitFailureWithSyncInitSuccess)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaQueue<AudioData> audioQueue;
+ MediaInfo info;
+ info.EnableAudio();
+ auto audioSinkCreator = [&]() {
+ return UniquePtr<AudioSink>{new AudioSink(AbstractThread::GetCurrent(),
+ audioQueue, info.mAudio,
+ /*resistFingerprinting*/ false)};
+ };
+ const double initialVolume = 0.0; // so that there is initially no AudioSink
+ RefPtr wrapper = new AudioSinkWrapper(
+ AbstractThread::GetCurrent(), audioQueue, std::move(audioSinkCreator),
+ initialVolume, /*playbackRate*/ 1.0, /*preservesPitch*/ true,
+ /*sinkDevice*/ nullptr);
+
+ wrapper->Start(media::TimeUnit::Zero(), info);
+ // The first AudioSink init occurs on a background thread. Listen for this,
+ // but don't process any events on the current thread so that the
+ // AudioSinkWrapper does not yet handle the result of AudioSink
+ // initialization.
+ RefPtr backgroundQueue =
+ nsThreadManager::get().CreateBackgroundTaskQueue(__func__);
+ Monitor monitor(__func__);
+ bool initDone = false;
+ MediaEventListener initListener = cubeb->StreamInitEvent().Connect(
+ backgroundQueue, [&](RefPtr<SmartMockCubebStream> aStream) {
+ EXPECT_EQ(aStream, nullptr);
+ MonitorAutoLock lock(monitor);
+ initDone = true;
+ lock.Notify();
+ });
+ cubeb->ForceStreamInitError();
+ wrapper->SetVolume(0.5); // triggers async sink init, which fails
+ {
+ // Wait for the async init to complete.
+ MonitorAutoLock lock(monitor);
+ while (!initDone) {
+ lock.Wait();
+ }
+ }
+ initListener.Disconnect();
+ wrapper->SetPlaying(false);
+ // The second AudioSink init is synchronous.
+ nsIThread* currentThread = NS_GetCurrentThread();
+ RefPtr<SmartMockCubebStream> stream;
+ initListener = cubeb->StreamInitEvent().Connect(
+ currentThread, [&](RefPtr<SmartMockCubebStream> aStream) {
+ stream = std::move(aStream);
+ });
+ wrapper->SetPlaying(true); // sync sink init, which succeeds
+ // Let AudioSinkWrapper handle the (first) AudioSink initialization failure
+ // and allow `stream` to be set.
+ NS_ProcessPendingEvents(currentThread);
+ initListener.Disconnect();
+ cubeb_state state = CUBEB_STATE_STARTED;
+ MediaEventListener stateListener = stream->StateEvent().Connect(
+ currentThread, [&](cubeb_state aState) { state = aState; });
+ // Run AudioSinkWrapper::OnAudioEnded().
+ // This test passes if there is no crash. Bug 1845811.
+ audioQueue.Finish();
+ SpinEventLoopUntil("stream state change"_ns,
+ [&] { return state != CUBEB_STATE_STARTED; });
+ stateListener.Disconnect();
+ EXPECT_EQ(state, CUBEB_STATE_DRAINED);
+ wrapper->Stop();
+ wrapper->Shutdown();
+}
+
+// This is a crashtest to check that AudioSinkWrapper::mEndedPromiseHolder is
+// not settled twice when the audio ends during async AudioSink initialization.
+TEST(TestAudioSinkWrapper, AsyncInitWithEndOfAudio)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaQueue<AudioData> audioQueue;
+ MediaInfo info;
+ info.EnableAudio();
+ auto audioSinkCreator = [&]() {
+ return UniquePtr<AudioSink>{new AudioSink(AbstractThread::GetCurrent(),
+ audioQueue, info.mAudio,
+ /*resistFingerprinting*/ false)};
+ };
+ const double initialVolume = 0.0; // so that there is initially no AudioSink
+ RefPtr wrapper = new AudioSinkWrapper(
+ AbstractThread::GetCurrent(), audioQueue, std::move(audioSinkCreator),
+ initialVolume, /*playbackRate*/ 1.0, /*preservesPitch*/ true,
+ /*sinkDevice*/ nullptr);
+
+ wrapper->Start(media::TimeUnit::Zero(), info);
+ // The first AudioSink init occurs on a background thread. Listen for this,
+ // but don't process any events on the current thread so that the
+ // AudioSinkWrapper does not yet use the initialized AudioSink.
+ RefPtr backgroundQueue =
+ nsThreadManager::get().CreateBackgroundTaskQueue(__func__);
+ Monitor monitor(__func__);
+ RefPtr<SmartMockCubebStream> stream;
+ MediaEventListener initListener = cubeb->StreamInitEvent().Connect(
+ backgroundQueue, [&](RefPtr<SmartMockCubebStream> aStream) {
+ EXPECT_NE(aStream, nullptr);
+ MonitorAutoLock lock(monitor);
+ stream = std::move(aStream);
+ lock.Notify();
+ });
+ wrapper->SetVolume(0.5); // triggers async sink init
+ {
+ // Wait for the async init to complete.
+ MonitorAutoLock lock(monitor);
+ while (!stream) {
+ lock.Wait();
+ }
+ }
+ initListener.Disconnect();
+ // Finish the audio before AudioSinkWrapper considers using the initialized
+ // AudioSink.
+ audioQueue.Finish();
+ // Wait for AudioSinkWrapper to destroy the initialized stream.
+ // This test passes if there is no crash. Bug 1846854.
+ WaitFor(cubeb->StreamDestroyEvent());
+ wrapper->Stop();
+ wrapper->Shutdown();
+}
diff --git a/dom/media/gtest/TestAudioTrackEncoder.cpp b/dom/media/gtest/TestAudioTrackEncoder.cpp
new file mode 100644
index 0000000000..e0bfa6a696
--- /dev/null
+++ b/dom/media/gtest/TestAudioTrackEncoder.cpp
@@ -0,0 +1,298 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "OpusTrackEncoder.h"
+
+#include "AudioGenerator.h"
+#include "AudioSampleFormat.h"
+
+using namespace mozilla;
+
+class TestOpusTrackEncoder : public OpusTrackEncoder {
+ public:
+ TestOpusTrackEncoder(TrackRate aTrackRate,
+ MediaQueue<EncodedFrame>& aEncodedDataQueue)
+ : OpusTrackEncoder(aTrackRate, aEncodedDataQueue) {}
+
+ // Return true if it has successfully initialized the Opus encoder.
+ bool TestOpusRawCreation(int aChannels) {
+ if (Init(aChannels) == NS_OK) {
+ if (IsInitialized()) {
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+static bool TestOpusInit(int aChannels, TrackRate aSamplingRate) {
+ MediaQueue<EncodedFrame> frames;
+ TestOpusTrackEncoder encoder(aSamplingRate, frames);
+ return encoder.TestOpusRawCreation(aChannels);
+}
+
+TEST(OpusAudioTrackEncoder, InitRaw)
+{
+ // Expect false with 0 or negative channels of input signal.
+ EXPECT_FALSE(TestOpusInit(0, 16000));
+ EXPECT_FALSE(TestOpusInit(-1, 16000));
+
+ // The Opus format supports up to 8 channels, and supports multitrack audio up
+ // to 255 channels, but the current implementation supports only mono and
+ // stereo, and downmixes any more than that.
+ // Expect false with channels of input signal exceed the max supported number.
+ EXPECT_FALSE(TestOpusInit(8 + 1, 16000));
+
+ // Should accept channels within valid range.
+ for (int i = 1; i <= 8; i++) {
+ EXPECT_TRUE(TestOpusInit(i, 16000));
+ }
+
+ // Expect false with 0 or negative sampling rate of input signal.
+ EXPECT_FALSE(TestOpusInit(1, 0));
+ EXPECT_FALSE(TestOpusInit(1, -1));
+
+ // Verify sample rate bounds checking.
+ EXPECT_FALSE(TestOpusInit(2, 2000));
+ EXPECT_FALSE(TestOpusInit(2, 4000));
+ EXPECT_FALSE(TestOpusInit(2, 7999));
+ EXPECT_TRUE(TestOpusInit(2, 8000));
+ EXPECT_TRUE(TestOpusInit(2, 192000));
+ EXPECT_FALSE(TestOpusInit(2, 192001));
+ EXPECT_FALSE(TestOpusInit(2, 200000));
+}
+
+TEST(OpusAudioTrackEncoder, Init)
+{
+ {
+ // The encoder does not normally recieve enough info from null data to
+ // init. However, multiple attempts to do so, with sufficiently long
+ // duration segments, should result in a default-init. The first attempt
+ // should never do this though, even if the duration is long:
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(48000, frames);
+ AudioSegment segment;
+ segment.AppendNullData(48000 * 100);
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_FALSE(encoder.IsInitialized());
+
+ // Multiple init attempts should result in best effort init:
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_TRUE(encoder.IsInitialized());
+ }
+
+ {
+ // For non-null segments we should init immediately
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(48000, frames);
+ AudioSegment segment;
+ AudioGenerator<AudioDataValue> generator(2, 48000);
+ generator.Generate(segment, 1);
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_TRUE(encoder.IsInitialized());
+ }
+
+ {
+ // Test low sample rate bound
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(7999, frames);
+ AudioSegment segment;
+ AudioGenerator<AudioDataValue> generator(2, 7999);
+ generator.Generate(segment, 1);
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_FALSE(encoder.IsInitialized());
+ }
+
+ {
+ // Test low sample rate bound
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(8000, frames);
+ AudioSegment segment;
+ AudioGenerator<AudioDataValue> generator(2, 8000);
+ generator.Generate(segment, 1);
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_TRUE(encoder.IsInitialized());
+ }
+
+ {
+ // Test high sample rate bound
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(192001, frames);
+ AudioSegment segment;
+ AudioGenerator<AudioDataValue> generator(2, 192001);
+ generator.Generate(segment, 1);
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_FALSE(encoder.IsInitialized());
+ }
+
+ {
+ // Test high sample rate bound
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(192000, frames);
+ AudioSegment segment;
+ AudioGenerator<AudioDataValue> generator(2, 192000);
+ generator.Generate(segment, 1);
+ encoder.TryInit(segment, segment.GetDuration());
+ EXPECT_TRUE(encoder.IsInitialized());
+ }
+
+ {
+ // Test that it takes 10s to trigger default-init.
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(48000, frames);
+ AudioSegment longSegment;
+ longSegment.AppendNullData(48000 * 10 - 1);
+ AudioSegment shortSegment;
+ shortSegment.AppendNullData(1);
+ encoder.TryInit(longSegment, longSegment.GetDuration());
+ EXPECT_FALSE(encoder.IsInitialized());
+ encoder.TryInit(shortSegment, shortSegment.GetDuration());
+ EXPECT_FALSE(encoder.IsInitialized());
+ encoder.TryInit(shortSegment, shortSegment.GetDuration());
+ EXPECT_TRUE(encoder.IsInitialized());
+ }
+}
+
+static int TestOpusResampler(TrackRate aSamplingRate) {
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(aSamplingRate, frames);
+ return encoder.mOutputSampleRate;
+}
+
+TEST(OpusAudioTrackEncoder, Resample)
+{
+ // Sampling rates of data to be fed to Opus encoder, should remain unchanged
+ // if it is one of Opus supported rates (8000, 12000, 16000, 24000 and 48000
+ // (kHz)) at initialization.
+ EXPECT_TRUE(TestOpusResampler(8000) == 8000);
+ EXPECT_TRUE(TestOpusResampler(12000) == 12000);
+ EXPECT_TRUE(TestOpusResampler(16000) == 16000);
+ EXPECT_TRUE(TestOpusResampler(24000) == 24000);
+ EXPECT_TRUE(TestOpusResampler(48000) == 48000);
+
+ // Otherwise, it should be resampled to 48kHz by resampler.
+ EXPECT_TRUE(TestOpusResampler(9600) == 48000);
+ EXPECT_TRUE(TestOpusResampler(44100) == 48000);
+}
+
+TEST(OpusAudioTrackEncoder, FetchMetadata)
+{
+ const int32_t channels = 1;
+ const TrackRate sampleRate = 44100;
+ MediaQueue<EncodedFrame> frames;
+ TestOpusTrackEncoder encoder(sampleRate, frames);
+ EXPECT_TRUE(encoder.TestOpusRawCreation(channels));
+
+ RefPtr<TrackMetadataBase> metadata = encoder.GetMetadata();
+ ASSERT_EQ(TrackMetadataBase::METADATA_OPUS, metadata->GetKind());
+
+ RefPtr<OpusMetadata> opusMeta = static_cast<OpusMetadata*>(metadata.get());
+ EXPECT_EQ(channels, opusMeta->mChannels);
+ EXPECT_EQ(sampleRate, opusMeta->mSamplingFrequency);
+}
+
+TEST(OpusAudioTrackEncoder, FrameEncode)
+{
+ const int32_t channels = 1;
+ const TrackRate sampleRate = 44100;
+ MediaQueue<EncodedFrame> frames;
+ TestOpusTrackEncoder encoder(sampleRate, frames);
+ EXPECT_TRUE(encoder.TestOpusRawCreation(channels));
+
+ // Generate five seconds of raw audio data.
+ AudioGenerator<AudioDataValue> generator(channels, sampleRate);
+ AudioSegment segment;
+ const int32_t samples = sampleRate * 5;
+ generator.Generate(segment, samples);
+
+ encoder.AppendAudioSegment(std::move(segment));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(frames.IsFinished());
+
+ // Verify that encoded data is 5 seconds long.
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = frames.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ // 44100 as used above gets resampled to 48000 for opus.
+ const uint64_t five = 48000 * 5;
+ EXPECT_EQ(five + encoder.GetLookahead(), totalDuration);
+}
+
+TEST(OpusAudioTrackEncoder, DefaultInitDuration)
+{
+ const TrackRate rate = 44100;
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(rate, frames);
+ AudioGenerator<AudioDataValue> generator(2, rate);
+ AudioSegment segment;
+ // 15 seconds should trigger the default-init rate.
+ // The default-init timeout is evaluated once per chunk, so keep chunks
+ // reasonably short.
+ for (int i = 0; i < 150; ++i) {
+ generator.Generate(segment, rate / 10);
+ }
+ encoder.AppendAudioSegment(std::move(segment));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(frames.IsFinished());
+
+ // Verify that encoded data is 15 seconds long.
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = frames.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ // 44100 as used above gets resampled to 48000 for opus.
+ const uint64_t fifteen = 48000 * 15;
+ EXPECT_EQ(totalDuration, fifteen + encoder.GetLookahead());
+}
+
+uint64_t TestSampleRate(TrackRate aSampleRate, uint64_t aInputFrames) {
+ MediaQueue<EncodedFrame> frames;
+ OpusTrackEncoder encoder(aSampleRate, frames);
+ AudioGenerator<AudioDataValue> generator(2, aSampleRate);
+ AudioSegment segment;
+ const uint64_t chunkSize = aSampleRate / 10;
+ const uint64_t chunks = aInputFrames / chunkSize;
+ // 15 seconds should trigger the default-init rate.
+ // The default-init timeout is evaluated once per chunk, so keep chunks
+ // reasonably short.
+ for (size_t i = 0; i < chunks; ++i) {
+ generator.Generate(segment, chunkSize);
+ }
+ generator.Generate(segment, aInputFrames % chunks);
+ encoder.AppendAudioSegment(std::move(segment));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(frames.IsFinished());
+
+ // Verify that encoded data is 15 seconds long.
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = frames.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ return totalDuration - encoder.GetLookahead();
+}
+
+TEST(OpusAudioTrackEncoder, DurationSampleRates)
+{
+ // Factors of 48k
+ EXPECT_EQ(TestSampleRate(48000, 48000 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(24000, 24000 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(16000, 16000 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(12000, 12000 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(8000, 8000 * 3 / 2), 48000U * 3 / 2);
+
+ // Non-factors of 48k, resampled
+ EXPECT_EQ(TestSampleRate(44100, 44100 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(32000, 32000 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(96000, 96000 * 3 / 2), 48000U * 3 / 2);
+ EXPECT_EQ(TestSampleRate(33330, 33330 * 3 / 2), 48000U * 3 / 2);
+}
diff --git a/dom/media/gtest/TestAudioTrackGraph.cpp b/dom/media/gtest/TestAudioTrackGraph.cpp
new file mode 100644
index 0000000000..457c50e731
--- /dev/null
+++ b/dom/media/gtest/TestAudioTrackGraph.cpp
@@ -0,0 +1,2726 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaTrackGraphImpl.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+
+#include "CrossGraphPort.h"
+#include "DeviceInputTrack.h"
+#ifdef MOZ_WEBRTC
+# include "MediaEngineWebRTCAudio.h"
+#endif // MOZ_WEBRTC
+#include "MockCubeb.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "WavDumper.h"
+
+using namespace mozilla;
+
+// Short-hand for InvokeAsync on the current thread.
+#define Invoke(f) InvokeAsync(GetCurrentSerialEventTarget(), __func__, f)
+
+// Short-hand for DispatchToCurrentThread with a function.
+#define DispatchFunction(f) \
+ NS_DispatchToCurrentThread(NS_NewRunnableFunction(__func__, f))
+
+// Short-hand for DispatchToCurrentThread with a method with arguments
+#define DispatchMethod(t, m, args...) \
+ NS_DispatchToCurrentThread(NewRunnableMethod(__func__, t, m, ##args))
+
+namespace {
+#ifdef MOZ_WEBRTC
+/*
+ * Common ControlMessages
+ */
+struct StartInputProcessing : public ControlMessage {
+ const RefPtr<AudioProcessingTrack> mProcessingTrack;
+ const RefPtr<AudioInputProcessing> mInputProcessing;
+
+ StartInputProcessing(AudioProcessingTrack* aTrack,
+ AudioInputProcessing* aInputProcessing)
+ : ControlMessage(aTrack),
+ mProcessingTrack(aTrack),
+ mInputProcessing(aInputProcessing) {}
+ void Run() override { mInputProcessing->Start(mTrack->Graph()); }
+};
+
+struct StopInputProcessing : public ControlMessage {
+ const RefPtr<AudioInputProcessing> mInputProcessing;
+
+ explicit StopInputProcessing(AudioProcessingTrack* aTrack,
+ AudioInputProcessing* aInputProcessing)
+ : ControlMessage(aTrack), mInputProcessing(aInputProcessing) {}
+ void Run() override { mInputProcessing->Stop(mTrack->Graph()); }
+};
+
+struct SetPassThrough : public ControlMessage {
+ const RefPtr<AudioInputProcessing> mInputProcessing;
+ const bool mPassThrough;
+
+ SetPassThrough(MediaTrack* aTrack, AudioInputProcessing* aInputProcessing,
+ bool aPassThrough)
+ : ControlMessage(aTrack),
+ mInputProcessing(aInputProcessing),
+ mPassThrough(aPassThrough) {}
+ void Run() override {
+ EXPECT_EQ(mInputProcessing->PassThrough(mTrack->Graph()), !mPassThrough);
+ mInputProcessing->SetPassThrough(mTrack->Graph(), mPassThrough);
+ }
+};
+
+struct SetRequestedInputChannelCount : public ControlMessage {
+ const CubebUtils::AudioDeviceID mDeviceId;
+ const RefPtr<AudioInputProcessing> mInputProcessing;
+ const uint32_t mChannelCount;
+
+ SetRequestedInputChannelCount(MediaTrack* aTrack,
+ CubebUtils::AudioDeviceID aDeviceId,
+ AudioInputProcessing* aInputProcessing,
+ uint32_t aChannelCount)
+ : ControlMessage(aTrack),
+ mDeviceId(aDeviceId),
+ mInputProcessing(aInputProcessing),
+ mChannelCount(aChannelCount) {}
+ void Run() override {
+ mInputProcessing->SetRequestedInputChannelCount(mTrack->Graph(), mDeviceId,
+ mChannelCount);
+ }
+};
+#endif // MOZ_WEBRTC
+
+class GoFaster : public ControlMessage {
+ MockCubeb* mCubeb;
+
+ public:
+ explicit GoFaster(MockCubeb* aCubeb)
+ : ControlMessage(nullptr), mCubeb(aCubeb) {}
+ void Run() override { mCubeb->GoFaster(); }
+};
+
+struct StartNonNativeInput : public ControlMessage {
+ const RefPtr<NonNativeInputTrack> mInputTrack;
+ RefPtr<AudioInputSource> mInputSource;
+
+ StartNonNativeInput(NonNativeInputTrack* aInputTrack,
+ RefPtr<AudioInputSource>&& aInputSource)
+ : ControlMessage(aInputTrack),
+ mInputTrack(aInputTrack),
+ mInputSource(std::move(aInputSource)) {}
+ void Run() override { mInputTrack->StartAudio(std::move(mInputSource)); }
+};
+
+struct StopNonNativeInput : public ControlMessage {
+ const RefPtr<NonNativeInputTrack> mInputTrack;
+
+ explicit StopNonNativeInput(NonNativeInputTrack* aInputTrack)
+ : ControlMessage(aInputTrack), mInputTrack(aInputTrack) {}
+ void Run() override { mInputTrack->StopAudio(); }
+};
+
+} // namespace
+
+/*
+ * The set of tests here are a bit special. In part because they're async and
+ * depends on the graph thread to function. In part because they depend on main
+ * thread stable state to send messages to the graph.
+ *
+ * Any message sent from the main thread to the graph through the graph's
+ * various APIs are scheduled to run in stable state. Stable state occurs after
+ * a task in the main thread eventloop has run to completion.
+ *
+ * Since gtests are generally sync and on main thread, calling into the graph
+ * may schedule a stable state runnable but with no task in the eventloop to
+ * trigger stable state. Therefore care must be taken to always call into the
+ * graph from a task, typically via InvokeAsync or a dispatch to main thread.
+ */
+
+TEST(TestAudioTrackGraph, DifferentDeviceIDs)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* g1 = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::AUDIO_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ /*OutputDeviceID*/ nullptr, GetMainThreadSerialEventTarget());
+
+ MediaTrackGraph* g2 = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::AUDIO_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ /*OutputDeviceID*/ reinterpret_cast<cubeb_devid>(1),
+ GetMainThreadSerialEventTarget());
+
+ MediaTrackGraph* g1_2 = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::AUDIO_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ /*OutputDeviceID*/ nullptr, GetMainThreadSerialEventTarget());
+
+ MediaTrackGraph* g2_2 = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::AUDIO_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ /*OutputDeviceID*/ reinterpret_cast<cubeb_devid>(1),
+ GetMainThreadSerialEventTarget());
+
+ EXPECT_NE(g1, g2) << "Different graphs due to different device ids";
+ EXPECT_EQ(g1, g1_2) << "Same graphs for same device ids";
+ EXPECT_EQ(g2, g2_2) << "Same graphs for same device ids";
+
+ for (MediaTrackGraph* g : {g1, g2}) {
+ // Dummy track to make graph rolling. Add it and remove it to remove the
+ // graph from the global hash table and let it shutdown.
+
+ using SourceTrackPromise = MozPromise<SourceMediaTrack*, nsresult, true>;
+ auto p = Invoke([g] {
+ return SourceTrackPromise::CreateAndResolve(
+ g->CreateSourceTrack(MediaSegment::AUDIO), __func__);
+ });
+
+ WaitFor(cubeb->StreamInitEvent());
+ RefPtr<SourceMediaTrack> dummySource = WaitFor(p).unwrap();
+
+ DispatchMethod(dummySource, &SourceMediaTrack::Destroy);
+
+ WaitFor(cubeb->StreamDestroyEvent());
+ }
+}
+
+TEST(TestAudioTrackGraph, SetOutputDeviceID)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Set the output device id in GetInstance method confirm that it is the one
+ // used in cubeb_stream_init.
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::AUDIO_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ /*OutputDeviceID*/ reinterpret_cast<cubeb_devid>(2),
+ GetMainThreadSerialEventTarget());
+
+ // Dummy track to make graph rolling. Add it and remove it to remove the
+ // graph from the global hash table and let it shutdown.
+ RefPtr<SourceMediaTrack> dummySource;
+ DispatchFunction(
+ [&] { dummySource = graph->CreateSourceTrack(MediaSegment::AUDIO); });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+
+ EXPECT_EQ(stream->GetOutputDeviceID(), reinterpret_cast<cubeb_devid>(2))
+ << "After init confirm the expected output device id";
+
+ // Test has finished, destroy the track to shutdown the MTG.
+ DispatchMethod(dummySource, &SourceMediaTrack::Destroy);
+ WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestAudioTrackGraph, StreamName)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Initialize a graph with a system thread driver to check that the stream
+ // name survives the driver switch.
+ MediaTrackGraphImpl* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ /*OutputDeviceID*/ reinterpret_cast<cubeb_devid>(1),
+ GetMainThreadSerialEventTarget());
+ nsLiteralCString name1("name1");
+ graph->CurrentDriver()->SetStreamName(name1);
+
+ // Dummy track to start the graph rolling and switch to an
+ // AudioCallbackDriver.
+ RefPtr<SourceMediaTrack> dummySource;
+ DispatchFunction(
+ [&] { dummySource = graph->CreateSourceTrack(MediaSegment::AUDIO); });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_STREQ(stream->StreamName(), name1.get());
+
+ // Test a name change on an existing stream.
+ nsLiteralCString name2("name2");
+ DispatchFunction([&] {
+ graph->QueueControlMessageWithNoShutdown(
+ [&] { graph->CurrentDriver()->SetStreamName(name2); });
+ });
+ nsCString name = WaitFor(stream->NameSetEvent());
+ EXPECT_EQ(name, name2);
+
+ // Test has finished. Destroy the track to shutdown the MTG.
+ DispatchMethod(dummySource, &SourceMediaTrack::Destroy);
+ WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestAudioTrackGraph, NotifyDeviceStarted)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::AUDIO_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ RefPtr<SourceMediaTrack> dummySource;
+ Unused << WaitFor(Invoke([&] {
+ // Dummy track to make graph rolling. Add it and remove it to remove the
+ // graph from the global hash table and let it shutdown.
+ dummySource = graph->CreateSourceTrack(MediaSegment::AUDIO);
+
+ return graph->NotifyWhenDeviceStarted(nullptr);
+ }));
+
+ {
+ MediaTrackGraphImpl* graph = dummySource->GraphImpl();
+ MonitorAutoLock lock(graph->GetMonitor());
+ EXPECT_TRUE(graph->CurrentDriver()->AsAudioCallbackDriver());
+ EXPECT_TRUE(graph->CurrentDriver()->ThreadRunning());
+ }
+
+ // Test has finished, destroy the track to shutdown the MTG.
+ DispatchMethod(dummySource, &SourceMediaTrack::Destroy);
+ WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestAudioTrackGraph, NonNativeInputTrackStartAndStop)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+
+ // Add a NonNativeInputTrack to graph, making graph create an output-only
+ // AudioCallbackDriver since NonNativeInputTrack is an audio-type MediaTrack.
+ RefPtr<NonNativeInputTrack> track;
+ DispatchFunction([&] {
+ track = new NonNativeInputTrack(graph->GraphRate(), deviceId,
+ PRINCIPAL_HANDLE_NONE);
+ graph->AddTrack(track);
+ });
+
+ RefPtr<SmartMockCubebStream> driverStream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_FALSE(driverStream->mHasInput);
+ EXPECT_TRUE(driverStream->mHasOutput);
+
+ // Main test below:
+ {
+ const AudioInputSource::Id sourceId = 1;
+ const uint32_t channels = 2;
+ const TrackRate rate = 48000;
+
+ // Start and stop the audio in NonNativeInputTrack.
+ {
+ struct DeviceInfo {
+ uint32_t mChannelCount;
+ AudioInputType mType;
+ };
+ using DeviceQueryPromise =
+ MozPromise<DeviceInfo, nsresult, /* IsExclusive = */ true>;
+
+ struct DeviceQueryMessage : public ControlMessage {
+ const NonNativeInputTrack* mInputTrack;
+ MozPromiseHolder<DeviceQueryPromise> mHolder;
+
+ DeviceQueryMessage(NonNativeInputTrack* aInputTrack,
+ MozPromiseHolder<DeviceQueryPromise>&& aHolder)
+ : ControlMessage(aInputTrack),
+ mInputTrack(aInputTrack),
+ mHolder(std::move(aHolder)) {}
+ void Run() override {
+ DeviceInfo info = {mInputTrack->NumberOfChannels(),
+ mInputTrack->DevicePreference()};
+ // mHolder.Resolve(info, __func__);
+ mTrack->GraphImpl()->Dispatch(NS_NewRunnableFunction(
+ "TestAudioTrackGraph::DeviceQueryMessage",
+ [holder = std::move(mHolder), devInfo = info]() mutable {
+ holder.Resolve(devInfo, __func__);
+ }));
+ }
+ };
+
+ // No input channels and device preference before start.
+ {
+ MozPromiseHolder<DeviceQueryPromise> h;
+ RefPtr<DeviceQueryPromise> p = h.Ensure(__func__);
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<DeviceQueryMessage>(track.get(), std::move(h)));
+ });
+ Result<DeviceInfo, nsresult> r = WaitFor(p);
+ ASSERT_TRUE(r.isOk());
+ DeviceInfo info = r.unwrap();
+
+ EXPECT_EQ(info.mChannelCount, 0U);
+ EXPECT_EQ(info.mType, AudioInputType::Unknown);
+ }
+
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(MakeUnique<StartNonNativeInput>(
+ track.get(), MakeRefPtr<AudioInputSource>(
+ MakeRefPtr<AudioInputSourceListener>(track.get()),
+ sourceId, deviceId, channels, true /* voice */,
+ PRINCIPAL_HANDLE_NONE, rate, graph->GraphRate())));
+ });
+ RefPtr<SmartMockCubebStream> nonNativeStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nonNativeStream->mHasInput);
+ EXPECT_FALSE(nonNativeStream->mHasOutput);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(nonNativeStream->InputChannels(), channels);
+ EXPECT_EQ(nonNativeStream->SampleRate(), static_cast<uint32_t>(rate));
+
+ // Input channels and device preference should be set after start.
+ {
+ MozPromiseHolder<DeviceQueryPromise> h;
+ RefPtr<DeviceQueryPromise> p = h.Ensure(__func__);
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<DeviceQueryMessage>(track.get(), std::move(h)));
+ });
+ Result<DeviceInfo, nsresult> r = WaitFor(p);
+ ASSERT_TRUE(r.isOk());
+ DeviceInfo info = r.unwrap();
+
+ EXPECT_EQ(info.mChannelCount, channels);
+ EXPECT_EQ(info.mType, AudioInputType::Voice);
+ }
+
+ Unused << WaitFor(nonNativeStream->FramesProcessedEvent());
+
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StopNonNativeInput>(track.get()));
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nonNativeStream.get());
+
+ // No input channels and device preference after stop.
+ {
+ MozPromiseHolder<DeviceQueryPromise> h;
+ RefPtr<DeviceQueryPromise> p = h.Ensure(__func__);
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<DeviceQueryMessage>(track.get(), std::move(h)));
+ });
+ Result<DeviceInfo, nsresult> r = WaitFor(p);
+ ASSERT_TRUE(r.isOk());
+ DeviceInfo info = r.unwrap();
+
+ EXPECT_EQ(info.mChannelCount, 0U);
+ EXPECT_EQ(info.mType, AudioInputType::Unknown);
+ }
+ }
+
+ // Make sure the NonNativeInputTrack can restart and stop its audio.
+ {
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(MakeUnique<StartNonNativeInput>(
+ track.get(), MakeRefPtr<AudioInputSource>(
+ MakeRefPtr<AudioInputSourceListener>(track.get()),
+ sourceId, deviceId, channels, true,
+ PRINCIPAL_HANDLE_NONE, rate, graph->GraphRate())));
+ });
+ RefPtr<SmartMockCubebStream> nonNativeStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nonNativeStream->mHasInput);
+ EXPECT_FALSE(nonNativeStream->mHasOutput);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(nonNativeStream->InputChannels(), channels);
+ EXPECT_EQ(nonNativeStream->SampleRate(), static_cast<uint32_t>(rate));
+
+ Unused << WaitFor(nonNativeStream->FramesProcessedEvent());
+
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StopNonNativeInput>(track.get()));
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nonNativeStream.get());
+ }
+ }
+
+ // Clean up.
+ DispatchFunction([&] { track->Destroy(); });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), driverStream.get());
+}
+
+TEST(TestAudioTrackGraph, NonNativeInputTrackErrorCallback)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+
+ // Add a NonNativeInputTrack to graph, making graph create an output-only
+ // AudioCallbackDriver since NonNativeInputTrack is an audio-type MediaTrack.
+ RefPtr<NonNativeInputTrack> track;
+ DispatchFunction([&] {
+ track = new NonNativeInputTrack(graph->GraphRate(), deviceId,
+ PRINCIPAL_HANDLE_NONE);
+ graph->AddTrack(track);
+ });
+
+ RefPtr<SmartMockCubebStream> driverStream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_FALSE(driverStream->mHasInput);
+ EXPECT_TRUE(driverStream->mHasOutput);
+
+ // Main test below:
+ {
+ const AudioInputSource::Id sourceId = 1;
+ const uint32_t channels = 2;
+ const TrackRate rate = 48000;
+
+ // Launch and start the non-native audio stream.
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(MakeUnique<StartNonNativeInput>(
+ track.get(), MakeRefPtr<AudioInputSource>(
+ MakeRefPtr<AudioInputSourceListener>(track.get()),
+ sourceId, deviceId, channels, true,
+ PRINCIPAL_HANDLE_NONE, rate, graph->GraphRate())));
+ });
+ RefPtr<SmartMockCubebStream> nonNativeStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nonNativeStream->mHasInput);
+ EXPECT_FALSE(nonNativeStream->mHasOutput);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(nonNativeStream->InputChannels(), channels);
+ EXPECT_EQ(nonNativeStream->SampleRate(), static_cast<uint32_t>(rate));
+
+ // Make sure the audio stream is running.
+ Unused << WaitFor(nonNativeStream->FramesProcessedEvent());
+
+ // Force an error. This results in the audio stream destroying.
+ DispatchFunction([&] { nonNativeStream->ForceError(); });
+ WaitFor(nonNativeStream->ErrorForcedEvent());
+
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nonNativeStream.get());
+ }
+
+ // Make sure it's ok to call audio stop again.
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StopNonNativeInput>(track.get()));
+ });
+
+ // Clean up.
+ DispatchFunction([&] { track->Destroy(); });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), driverStream.get());
+}
+
+class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
+ public:
+ static TestDeviceInputConsumerTrack* Create(MediaTrackGraph* aGraph) {
+ MOZ_ASSERT(NS_IsMainThread());
+ TestDeviceInputConsumerTrack* track =
+ new TestDeviceInputConsumerTrack(aGraph->GraphRate());
+ aGraph->AddTrack(track);
+ return track;
+ }
+
+ void Destroy() {
+ MOZ_ASSERT(NS_IsMainThread());
+ DisconnectDeviceInput();
+ DeviceInputConsumerTrack::Destroy();
+ }
+
+ void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override {
+ if (aFrom >= aTo) {
+ return;
+ }
+
+ if (mInputs.IsEmpty()) {
+ GetData<AudioSegment>()->AppendNullData(aTo - aFrom);
+ } else {
+ MOZ_ASSERT(mInputs.Length() == 1);
+ AudioSegment data;
+ DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom,
+ aTo);
+ GetData<AudioSegment>()->AppendFrom(&data);
+ }
+ };
+
+ uint32_t NumberOfChannels() const override {
+ if (mInputs.IsEmpty()) {
+ return 0;
+ }
+ DeviceInputTrack* t = mInputs[0]->GetSource()->AsDeviceInputTrack();
+ MOZ_ASSERT(t);
+ return t->NumberOfChannels();
+ }
+
+ private:
+ explicit TestDeviceInputConsumerTrack(TrackRate aSampleRate)
+ : DeviceInputConsumerTrack(aSampleRate) {}
+};
+
+TEST(TestAudioTrackGraph, DeviceChangedCallback)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graphImpl = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ class TestAudioDataListener : public AudioDataListener {
+ public:
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
+ : mChannelCount(aChannelCount),
+ mIsVoice(aIsVoice),
+ mDeviceChangedCount(0) {}
+
+ uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
+ return mChannelCount;
+ }
+ bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
+ return mIsVoice;
+ };
+ void DeviceChanged(MediaTrackGraph* aGraph) override {
+ ++mDeviceChangedCount;
+ }
+ void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
+ uint32_t DeviceChangedCount() { return mDeviceChangedCount; }
+
+ private:
+ ~TestAudioDataListener() = default;
+ const uint32_t mChannelCount;
+ const bool mIsVoice;
+ std::atomic<uint32_t> mDeviceChangedCount;
+ };
+
+ // Create a full-duplex AudioCallbackDriver by creating a NativeInputTrack.
+ const CubebUtils::AudioDeviceID device1 = (CubebUtils::AudioDeviceID)1;
+ RefPtr<TestAudioDataListener> listener1 = new TestAudioDataListener(1, false);
+ RefPtr<TestDeviceInputConsumerTrack> track1 =
+ TestDeviceInputConsumerTrack::Create(graphImpl);
+ track1->ConnectDeviceInput(device1, listener1.get(), PRINCIPAL_HANDLE_NONE);
+
+ EXPECT_TRUE(track1->ConnectToNativeDevice());
+ EXPECT_FALSE(track1->ConnectToNonNativeDevice());
+ auto started =
+ Invoke([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
+ RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream1->mHasInput);
+ EXPECT_TRUE(stream1->mHasOutput);
+ EXPECT_EQ(stream1->GetInputDeviceID(), device1);
+ Unused << WaitFor(started);
+
+ // Create a NonNativeInputTrack, and make sure its DeviceChangeCallback works.
+ const CubebUtils::AudioDeviceID device2 = (CubebUtils::AudioDeviceID)2;
+ RefPtr<TestAudioDataListener> listener2 = new TestAudioDataListener(2, true);
+ RefPtr<TestDeviceInputConsumerTrack> track2 =
+ TestDeviceInputConsumerTrack::Create(graphImpl);
+ track2->ConnectDeviceInput(device2, listener2.get(), PRINCIPAL_HANDLE_NONE);
+
+ EXPECT_FALSE(track2->ConnectToNativeDevice());
+ EXPECT_TRUE(track2->ConnectToNonNativeDevice());
+ RefPtr<SmartMockCubebStream> stream2 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_FALSE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+
+ // Produce a device-changed event for the NonNativeInputTrack.
+ DispatchFunction([&] { stream2->ForceDeviceChanged(); });
+ WaitFor(stream2->DeviceChangeForcedEvent());
+
+ // Produce a device-changed event for the NativeInputTrack.
+ DispatchFunction([&] { stream1->ForceDeviceChanged(); });
+ WaitFor(stream1->DeviceChangeForcedEvent());
+
+ // Destroy the NonNativeInputTrack.
+ DispatchFunction([&] {
+ track2->DisconnectDeviceInput();
+ track2->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), stream2.get());
+
+ // Make sure we only have one device-changed event for the NativeInputTrack.
+ EXPECT_EQ(listener2->DeviceChangedCount(), 1U);
+
+ // Destroy the NativeInputTrack.
+ DispatchFunction([&] {
+ track1->DisconnectDeviceInput();
+ track1->Destroy();
+ });
+ destroyedStream = WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), stream1.get());
+
+ // Make sure we only have one device-changed event for the NativeInputTrack.
+ EXPECT_EQ(listener1->DeviceChangedCount(), 1U);
+}
+
+// The native audio stream (a.k.a. GraphDriver) and the non-native audio stream
+// should always be the same as the max requested input channel of its paired
+// DeviceInputTracks. This test checks if the audio stream paired with the
+// DeviceInputTrack will follow the max requested input channel or not.
+//
+// The main focus for this test is to make sure DeviceInputTrack::OpenAudio and
+// ::CloseAudio works as what we expect. Besides, This test also confirms
+// MediaTrackGraph::ReevaluateInputDevice works correctly by using a
+// test-only AudioDataListener.
+//
+// This test is pretty similar to RestartAudioIfProcessingMaxChannelCountChanged
+// below, which tests the same thing but using AudioProcessingTrack.
+// AudioProcessingTrack is the consumer of the DeviceInputTrack used in wild.
+// It has its own customized AudioDataListener. However, it only tests when
+// MOZ_WEBRTC is defined.
+TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+ auto unforcer = WaitFor(cubeb->ForceAudioThread()).unwrap();
+ Unused << unforcer;
+
+ MediaTrackGraph* graphImpl = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ // A test-only AudioDataListener that simulates AudioInputProcessing's setter
+ // and getter for the input channel count.
+ class TestAudioDataListener : public AudioDataListener {
+ public:
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
+ : mChannelCount(aChannelCount), mIsVoice(aIsVoice) {}
+ // Main thread API
+ void SetInputChannelCount(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDevice,
+ uint32_t aChannelCount) {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ struct Message : public ControlMessage {
+ MediaTrackGraph* mGraph;
+ TestAudioDataListener* mListener;
+ CubebUtils::AudioDeviceID mDevice;
+ uint32_t mChannelCount;
+
+ Message(MediaTrackGraph* aGraph, TestAudioDataListener* aListener,
+ CubebUtils::AudioDeviceID aDevice, uint32_t aChannelCount)
+ : ControlMessage(nullptr),
+ mGraph(aGraph),
+ mListener(aListener),
+ mDevice(aDevice),
+ mChannelCount(aChannelCount) {}
+ void Run() override {
+ mListener->mChannelCount = mChannelCount;
+ mGraph->ReevaluateInputDevice(mDevice);
+ }
+ };
+
+ static_cast<MediaTrackGraphImpl*>(aGraph)->AppendMessage(
+ MakeUnique<Message>(aGraph, this, aDevice, aChannelCount));
+ }
+ // Graph thread APIs: AudioDataListenerInterface implementations.
+ uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
+ aGraph->AssertOnGraphThread();
+ return mChannelCount;
+ }
+ bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
+ return mIsVoice;
+ };
+ void DeviceChanged(MediaTrackGraph* aGraph) override { /* Ignored */
+ }
+ void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
+
+ private:
+ ~TestAudioDataListener() = default;
+
+ // Graph thread-only.
+ uint32_t mChannelCount;
+ // Any thread.
+ const bool mIsVoice;
+ };
+
+ // Request a new input channel count and expect to have a new stream.
+ auto setNewChannelCount = [&](const RefPtr<TestAudioDataListener>& aListener,
+ RefPtr<SmartMockCubebStream>& aStream,
+ uint32_t aChannelCount) {
+ ASSERT_TRUE(!!aListener);
+ ASSERT_TRUE(!!aStream);
+ ASSERT_TRUE(aStream->mHasInput);
+ ASSERT_NE(aChannelCount, 0U);
+
+ const CubebUtils::AudioDeviceID device = aStream->GetInputDeviceID();
+
+ bool destroyed = false;
+ MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ destroyed = aDestroyed.get() == aStream.get();
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ newStream = aCreated;
+ });
+
+ DispatchFunction([&] {
+ aListener->SetInputChannelCount(graphImpl, device, aChannelCount);
+ });
+
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged) #1"_ns,
+ [&] { return destroyed && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aStream = newStream;
+ };
+
+ // Open a new track and expect to have a new stream.
+ auto openTrack = [&](RefPtr<SmartMockCubebStream>& aCurrentStream,
+ RefPtr<TestDeviceInputConsumerTrack>& aTrack,
+ const RefPtr<TestAudioDataListener>& aListener,
+ CubebUtils::AudioDeviceID aDevice) {
+ ASSERT_TRUE(!!aCurrentStream);
+ ASSERT_TRUE(aCurrentStream->mHasInput);
+ ASSERT_TRUE(!aTrack);
+ ASSERT_TRUE(!!aListener);
+
+ bool destroyed = false;
+ MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ destroyed = aDestroyed.get() == aCurrentStream.get();
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ newStream = aCreated;
+ });
+
+ aTrack = TestDeviceInputConsumerTrack::Create(graphImpl);
+ aTrack->ConnectDeviceInput(aDevice, aListener.get(), PRINCIPAL_HANDLE_NONE);
+
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged) #2"_ns,
+ [&] { return destroyed && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aCurrentStream = newStream;
+ };
+
+ // Test for the native input device first then non-native device. The
+ // non-native device will be destroyed before the native device in case of
+ // causing a driver switching.
+
+ // Test for the native device.
+ const CubebUtils::AudioDeviceID nativeDevice = (CubebUtils::AudioDeviceID)1;
+ RefPtr<TestDeviceInputConsumerTrack> track1;
+ RefPtr<TestAudioDataListener> listener1;
+ RefPtr<SmartMockCubebStream> nativeStream;
+ RefPtr<TestDeviceInputConsumerTrack> track2;
+ RefPtr<TestAudioDataListener> listener2;
+ {
+ // Open a 1-channel NativeInputTrack.
+ listener1 = new TestAudioDataListener(1, false);
+ track1 = TestDeviceInputConsumerTrack::Create(graphImpl);
+ track1->ConnectDeviceInput(nativeDevice, listener1.get(),
+ PRINCIPAL_HANDLE_NONE);
+
+ EXPECT_TRUE(track1->ConnectToNativeDevice());
+ EXPECT_FALSE(track1->ConnectToNonNativeDevice());
+ auto started =
+ Invoke([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
+ nativeStream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nativeStream->mHasInput);
+ EXPECT_TRUE(nativeStream->mHasOutput);
+ EXPECT_EQ(nativeStream->GetInputDeviceID(), nativeDevice);
+ Unused << WaitFor(started);
+
+ // Open a 2-channel NativeInputTrack and wait for a new driver since the
+ // max-channel for the native device becomes 2 now.
+ listener2 = new TestAudioDataListener(2, false);
+ openTrack(nativeStream, track2, listener2, nativeDevice);
+ EXPECT_EQ(nativeStream->InputChannels(), 2U);
+
+ // Set the second NativeInputTrack to 1-channel and wait for a new driver
+ // since the max-channel for the native device becomes 1 now.
+ setNewChannelCount(listener2, nativeStream, 1);
+ EXPECT_EQ(nativeStream->InputChannels(), 1U);
+
+ // Set the first NativeInputTrack to 2-channel and wait for a new driver
+ // since the max input channel for the native device becomes 2 now.
+ setNewChannelCount(listener1, nativeStream, 2);
+ EXPECT_EQ(nativeStream->InputChannels(), 2U);
+ }
+
+ // Test for the non-native device.
+ {
+ const CubebUtils::AudioDeviceID nonNativeDevice =
+ (CubebUtils::AudioDeviceID)2;
+
+ // Open a 1-channel NonNativeInputTrack.
+ RefPtr<TestAudioDataListener> listener3 =
+ new TestAudioDataListener(1, false);
+ RefPtr<TestDeviceInputConsumerTrack> track3 =
+ TestDeviceInputConsumerTrack::Create(graphImpl);
+ track3->ConnectDeviceInput(nonNativeDevice, listener3.get(),
+ PRINCIPAL_HANDLE_NONE);
+ EXPECT_FALSE(track3->ConnectToNativeDevice());
+ EXPECT_TRUE(track3->ConnectToNonNativeDevice());
+
+ RefPtr<SmartMockCubebStream> nonNativeStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nonNativeStream->mHasInput);
+ EXPECT_FALSE(nonNativeStream->mHasOutput);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), nonNativeDevice);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 1U);
+
+ // Open a 2-channel NonNativeInputTrack and wait for a new stream since
+ // the max-channel for the non-native device becomes 2 now.
+ RefPtr<TestAudioDataListener> listener4 =
+ new TestAudioDataListener(2, false);
+ RefPtr<TestDeviceInputConsumerTrack> track4;
+ openTrack(nonNativeStream, track4, listener4, nonNativeDevice);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 2U);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), nonNativeDevice);
+
+ // Set the second NonNativeInputTrack to 1-channel and wait for a new
+ // driver since the max-channel for the non-native device becomes 1 now.
+ setNewChannelCount(listener4, nonNativeStream, 1);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 1U);
+
+ // Set the first NonNativeInputTrack to 2-channel and wait for a new
+ // driver since the max input channel for the non-native device becomes 2
+ // now.
+ setNewChannelCount(listener3, nonNativeStream, 2);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 2U);
+
+ // Close the second NonNativeInputTrack (1-channel) then the first one
+ // (2-channel) so we won't result in another stream creation.
+ DispatchFunction([&] {
+ track4->DisconnectDeviceInput();
+ track4->Destroy();
+ });
+ DispatchFunction([&] {
+ track3->DisconnectDeviceInput();
+ track3->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nonNativeStream.get());
+ }
+
+ // Tear down for the native device.
+ {
+ // Close the second NativeInputTrack (1-channel) then the first one
+ // (2-channel) so we won't have driver switching.
+ DispatchFunction([&] {
+ track2->DisconnectDeviceInput();
+ track2->Destroy();
+ });
+ DispatchFunction([&] {
+ track1->DisconnectDeviceInput();
+ track1->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nativeStream.get());
+ }
+}
+
+// This test is pretty similar to SwitchNativeAudioProcessingTrack below, which
+// tests the same thing but using AudioProcessingTrack. AudioProcessingTrack is
+// the consumer of the DeviceInputTrack used in wild. It has its own customized
+// AudioDataListener. However, it only tests when MOZ_WEBRTC is defined.
+TEST(TestAudioTrackGraph, SwitchNativeInputDevice)
+{
+ class TestAudioDataListener : public AudioDataListener {
+ public:
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
+ : mChannelCount(aChannelCount),
+ mIsVoice(aIsVoice),
+ mDeviceChangedCount(0) {}
+
+ uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
+ return mChannelCount;
+ }
+ bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
+ return mIsVoice;
+ };
+ void DeviceChanged(MediaTrackGraph* aGraph) override {
+ ++mDeviceChangedCount;
+ }
+ void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
+ uint32_t DeviceChangedCount() { return mDeviceChangedCount; }
+
+ private:
+ ~TestAudioDataListener() = default;
+ const uint32_t mChannelCount;
+ const bool mIsVoice;
+ std::atomic<uint32_t> mDeviceChangedCount;
+ };
+
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ auto switchNativeDevice =
+ [&](RefPtr<SmartMockCubebStream>&& aCurrentNativeStream,
+ RefPtr<TestDeviceInputConsumerTrack>& aCurrentNativeTrack,
+ RefPtr<SmartMockCubebStream>& aNextNativeStream,
+ RefPtr<TestDeviceInputConsumerTrack>& aNextNativeTrack) {
+ ASSERT_TRUE(aCurrentNativeStream->mHasInput);
+ ASSERT_TRUE(aCurrentNativeStream->mHasOutput);
+ ASSERT_TRUE(aNextNativeStream->mHasInput);
+ ASSERT_FALSE(aNextNativeStream->mHasOutput);
+
+ std::cerr << "Switching native input from device "
+ << aCurrentNativeStream->GetInputDeviceID() << " to "
+ << aNextNativeStream->GetInputDeviceID() << std::endl;
+
+ uint32_t destroyed = 0;
+ MediaEventListener destroyListener =
+ cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ if (aDestroyed.get() == aCurrentNativeStream.get() ||
+ aDestroyed.get() == aNextNativeStream.get()) {
+ std::cerr << "cubeb stream " << aDestroyed.get()
+ << " (device " << aDestroyed->GetInputDeviceID()
+ << ") has been destroyed" << std::endl;
+ destroyed += 1;
+ }
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ // Make sure new stream has input, to prevent from getting a
+ // temporary output-only AudioCallbackDriver after closing current
+ // native device but before setting a new native input.
+ if (aCreated->mHasInput) {
+ ASSERT_TRUE(aCreated->mHasOutput);
+ newStream = aCreated;
+ }
+ });
+
+ std::cerr << "Close device " << aCurrentNativeStream->GetInputDeviceID()
+ << std::endl;
+ DispatchFunction([&] {
+ aCurrentNativeTrack->DisconnectDeviceInput();
+ aCurrentNativeTrack->Destroy();
+ });
+
+ std::cerr << "Wait for the switching" << std::endl;
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, SwitchNativeInputDevice)"_ns,
+ [&] { return destroyed >= 2 && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aCurrentNativeStream = nullptr;
+ aNextNativeStream = newStream;
+
+ std::cerr << "Now the native input is device "
+ << aNextNativeStream->GetInputDeviceID() << std::endl;
+ };
+
+ // Open a DeviceInputConsumerTrack for device 1.
+ const CubebUtils::AudioDeviceID device1 = (CubebUtils::AudioDeviceID)1;
+ RefPtr<TestDeviceInputConsumerTrack> track1 =
+ TestDeviceInputConsumerTrack::Create(graph);
+ RefPtr<TestAudioDataListener> listener1 = new TestAudioDataListener(1, false);
+ track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track1->DeviceId().value(), device1);
+
+ auto started =
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+
+ RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream1->mHasInput);
+ EXPECT_TRUE(stream1->mHasOutput);
+ EXPECT_EQ(stream1->InputChannels(), 1U);
+ EXPECT_EQ(stream1->GetInputDeviceID(), device1);
+ Unused << WaitFor(started);
+ std::cerr << "Device " << device1 << " is opened (stream " << stream1.get()
+ << ")" << std::endl;
+
+ // Open a DeviceInputConsumerTrack for device 2.
+ const CubebUtils::AudioDeviceID device2 = (CubebUtils::AudioDeviceID)2;
+ RefPtr<TestDeviceInputConsumerTrack> track2 =
+ TestDeviceInputConsumerTrack::Create(graph);
+ RefPtr<TestAudioDataListener> listener2 = new TestAudioDataListener(2, false);
+ track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track2->DeviceId().value(), device2);
+
+ RefPtr<SmartMockCubebStream> stream2 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_FALSE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->InputChannels(), 2U);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+ std::cerr << "Device " << device2 << " is opened (stream " << stream2.get()
+ << ")" << std::endl;
+
+ // Open a DeviceInputConsumerTrack for device 3.
+ const CubebUtils::AudioDeviceID device3 = (CubebUtils::AudioDeviceID)3;
+ RefPtr<TestDeviceInputConsumerTrack> track3 =
+ TestDeviceInputConsumerTrack::Create(graph);
+ RefPtr<TestAudioDataListener> listener3 = new TestAudioDataListener(1, false);
+ track3->ConnectDeviceInput(device3, listener3, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track3->DeviceId().value(), device3);
+
+ RefPtr<SmartMockCubebStream> stream3 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream3->mHasInput);
+ EXPECT_FALSE(stream3->mHasOutput);
+ EXPECT_EQ(stream3->InputChannels(), 1U);
+ EXPECT_EQ(stream3->GetInputDeviceID(), device3);
+ std::cerr << "Device " << device3 << " is opened (stream " << stream3.get()
+ << ")" << std::endl;
+
+ // Close device 1, so the native input device is switched from device 1 to
+ // device 2.
+ switchNativeDevice(std::move(stream1), track1, stream2, track2);
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_TRUE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->InputChannels(), 2U);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+ {
+ NativeInputTrack* native = track2->Graph()->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!!native);
+ EXPECT_EQ(native->mDeviceId, device2);
+ }
+
+ // Close device 2, so the native input device is switched from device 2 to
+ // device 3.
+ switchNativeDevice(std::move(stream2), track2, stream3, track3);
+ EXPECT_TRUE(stream3->mHasInput);
+ EXPECT_TRUE(stream3->mHasOutput);
+ EXPECT_EQ(stream3->InputChannels(), 1U);
+ EXPECT_EQ(stream3->GetInputDeviceID(), device3);
+ {
+ NativeInputTrack* native = track3->Graph()->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!!native);
+ EXPECT_EQ(native->mDeviceId, device3);
+ }
+
+ // Clean up.
+ std::cerr << "Close device " << device3 << std::endl;
+ DispatchFunction([&] {
+ track3->DisconnectDeviceInput();
+ track3->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), stream3.get());
+ {
+ NativeInputTrack* native = graph->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!native);
+ }
+ std::cerr << "No native input now" << std::endl;
+}
+
+#ifdef MOZ_WEBRTC
+TEST(TestAudioTrackGraph, ErrorCallback)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+
+ // Dummy track to make graph rolling. Add it and remove it to remove the
+ // graph from the global hash table and let it shutdown.
+ //
+ // We open an input through this track so that there's something triggering
+ // EnsureNextIteration on the fallback driver after the callback driver has
+ // gotten the error, and to check that a replacement cubeb_stream receives
+ // output from the graph.
+ RefPtr<AudioProcessingTrack> processingTrack;
+ RefPtr<AudioInputProcessing> listener;
+ auto started = Invoke([&] {
+ processingTrack = AudioProcessingTrack::Create(graph);
+ listener = new AudioInputProcessing(2);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ processingTrack->SetInputProcessing(listener);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, listener));
+ processingTrack->ConnectDeviceInput(deviceId, listener,
+ PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(processingTrack->DeviceId().value(), deviceId);
+ processingTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
+ return graph->NotifyWhenDeviceStarted(nullptr);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ Result<bool, nsresult> rv = WaitFor(started);
+ EXPECT_TRUE(rv.unwrapOr(false));
+
+ // Force a cubeb state_callback error and see that we don't crash.
+ DispatchFunction([&] { stream->ForceError(); });
+
+ // Wait for the error to take effect, and the driver to restart and receive
+ // output.
+ bool errored = false;
+ MediaEventListener errorListener = stream->ErrorForcedEvent().Connect(
+ AbstractThread::GetCurrent(), [&] { errored = true; });
+ stream = WaitFor(cubeb->StreamInitEvent());
+ WaitFor(stream->FramesVerifiedEvent());
+ // The error event is notified after CUBEB_STATE_ERROR triggers other
+ // threads to init a new cubeb_stream, so there is a theoretical chance that
+ // `errored` might not be set when `stream` is set.
+ errorListener.Disconnect();
+ EXPECT_TRUE(errored);
+
+ // Clean up.
+ DispatchFunction([&] {
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->DisconnectDeviceInput();
+ processingTrack->Destroy();
+ });
+ WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestAudioTrackGraph, AudioProcessingTrack)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+ auto unforcer = WaitFor(cubeb->ForceAudioThread()).unwrap();
+ Unused << unforcer;
+
+ // Start on a system clock driver, then switch to full-duplex in one go. If we
+ // did output-then-full-duplex we'd risk a second NotifyWhenDeviceStarted
+ // resolving early after checking the first audio driver only.
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+
+ RefPtr<AudioProcessingTrack> processingTrack;
+ RefPtr<ProcessedMediaTrack> outputTrack;
+ RefPtr<MediaInputPort> port;
+ RefPtr<AudioInputProcessing> listener;
+ auto p = Invoke([&] {
+ processingTrack = AudioProcessingTrack::Create(graph);
+ outputTrack = graph->CreateForwardedInputTrack(MediaSegment::AUDIO);
+ outputTrack->QueueSetAutoend(false);
+ outputTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
+ port = outputTrack->AllocateInputPort(processingTrack);
+ /* Primary graph: Open Audio Input through SourceMediaTrack */
+ listener = new AudioInputProcessing(2);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ processingTrack->SetInputProcessing(listener);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, listener));
+ // Device id does not matter. Ignore.
+ processingTrack->ConnectDeviceInput(deviceId, listener,
+ PRINCIPAL_HANDLE_NONE);
+ return graph->NotifyWhenDeviceStarted(nullptr);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ Unused << WaitFor(p);
+
+ // Wait for a second worth of audio data. GoFaster is dispatched through a
+ // ControlMessage so that it is called in the first audio driver iteration.
+ // Otherwise the audio driver might be going very fast while the fallback
+ // system clock driver is still in an iteration.
+ DispatchFunction([&] {
+ processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
+ });
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesVerifiedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ cubeb->DontGoFaster();
+
+ // Clean up.
+ DispatchFunction([&] {
+ outputTrack->RemoveAudioOutput((void*)1);
+ outputTrack->Destroy();
+ port->Destroy();
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->DisconnectDeviceInput();
+ processingTrack->Destroy();
+ });
+
+ uint32_t inputRate = stream->SampleRate();
+ uint32_t inputFrequency = stream->InputFrequency();
+ uint64_t preSilenceSamples;
+ uint32_t estimatedFreq;
+ uint32_t nrDiscontinuities;
+ std::tie(preSilenceSamples, estimatedFreq, nrDiscontinuities) =
+ WaitFor(stream->OutputVerificationEvent());
+
+ EXPECT_EQ(estimatedFreq, inputFrequency);
+ std::cerr << "PreSilence: " << preSilenceSamples << std::endl;
+ // We buffer 128 frames. See DeviceInputTrack::ProcessInput.
+ EXPECT_GE(preSilenceSamples, 128U);
+ // If the fallback system clock driver is doing a graph iteration before the
+ // first audio driver iteration comes in, that iteration is ignored and
+ // results in zeros. It takes one fallback driver iteration *after* the audio
+ // driver has started to complete the switch, *usually* resulting two
+ // 10ms-iterations of silence; sometimes only one.
+ EXPECT_LE(preSilenceSamples, 128U + 2 * inputRate / 100 /* 2*10ms */);
+ // The waveform from AudioGenerator starts at 0, but we don't control its
+ // ending, so we expect a discontinuity there.
+ EXPECT_LE(nrDiscontinuities, 1U);
+}
+
+TEST(TestAudioTrackGraph, ReConnectDeviceInput)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // 48k is a native processing rate, and avoids a resampling pass compared
+ // to 44.1k. The resampler may add take a few frames to stabilize, which show
+ // as unexected discontinuities in the test.
+ const TrackRate rate = 48000;
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1, rate, nullptr,
+ GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+
+ RefPtr<AudioProcessingTrack> processingTrack;
+ RefPtr<ProcessedMediaTrack> outputTrack;
+ RefPtr<MediaInputPort> port;
+ RefPtr<AudioInputProcessing> listener;
+ auto p = Invoke([&] {
+ processingTrack = AudioProcessingTrack::Create(graph);
+ outputTrack = graph->CreateForwardedInputTrack(MediaSegment::AUDIO);
+ outputTrack->QueueSetAutoend(false);
+ outputTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
+ port = outputTrack->AllocateInputPort(processingTrack);
+ listener = new AudioInputProcessing(2);
+ processingTrack->SetInputProcessing(listener);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, listener));
+ processingTrack->ConnectDeviceInput(deviceId, listener,
+ PRINCIPAL_HANDLE_NONE);
+ return graph->NotifyWhenDeviceStarted(nullptr);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ Unused << WaitFor(p);
+
+ // Set a drift factor so that we don't dont produce perfect 10ms-chunks. This
+ // will exercise whatever buffers are in the audio processing pipeline, and
+ // the bookkeeping surrounding them.
+ stream->SetDriftFactor(1.111);
+
+ // Wait for a second worth of audio data. GoFaster is dispatched through a
+ // ControlMessage so that it is called in the first audio driver iteration.
+ // Otherwise the audio driver might be going very fast while the fallback
+ // system clock driver is still in an iteration.
+ DispatchFunction([&] {
+ processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
+ });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Close the input to see that no asserts go off due to bad state.
+ DispatchFunction([&] { processingTrack->DisconnectDeviceInput(); });
+
+ stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_FALSE(stream->mHasInput);
+ Unused << WaitFor(
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
+
+ // Output-only. Wait for another second before unmuting.
+ DispatchFunction([&] {
+ processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
+ });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Re-open the input to again see that no asserts go off due to bad state.
+ DispatchFunction([&] {
+ // Device id does not matter. Ignore.
+ processingTrack->ConnectDeviceInput(deviceId, listener,
+ PRINCIPAL_HANDLE_NONE);
+ });
+
+ stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ Unused << WaitFor(
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
+
+ // Full-duplex. Wait for another second before finishing.
+ DispatchFunction([&] {
+ processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
+ });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Clean up.
+ DispatchFunction([&] {
+ outputTrack->RemoveAudioOutput((void*)1);
+ outputTrack->Destroy();
+ port->Destroy();
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->DisconnectDeviceInput();
+ processingTrack->Destroy();
+ });
+
+ uint32_t inputRate = stream->SampleRate();
+ uint32_t inputFrequency = stream->InputFrequency();
+ uint64_t preSilenceSamples;
+ uint32_t estimatedFreq;
+ uint32_t nrDiscontinuities;
+ std::tie(preSilenceSamples, estimatedFreq, nrDiscontinuities) =
+ WaitFor(stream->OutputVerificationEvent());
+
+ EXPECT_EQ(estimatedFreq, inputFrequency);
+ std::cerr << "PreSilence: " << preSilenceSamples << std::endl;
+ // We buffer 10ms worth of frames in non-passthrough mode, plus up to 128
+ // frames as we round up to the nearest block. See
+ // AudioInputProcessing::Process and DeviceInputTrack::PrcoessInput.
+ EXPECT_GE(preSilenceSamples, 128U + inputRate / 100);
+ // If the fallback system clock driver is doing a graph iteration before the
+ // first audio driver iteration comes in, that iteration is ignored and
+ // results in zeros. It takes one fallback driver iteration *after* the audio
+ // driver has started to complete the switch, *usually* resulting two
+ // 10ms-iterations of silence; sometimes only one.
+ EXPECT_LE(preSilenceSamples, 128U + 3 * inputRate / 100 /* 3*10ms */);
+ // The waveform from AudioGenerator starts at 0, but we don't control its
+ // ending, so we expect a discontinuity there. Note that this check is only
+ // for the waveform on the stream *after* re-opening the input.
+ EXPECT_LE(nrDiscontinuities, 1U);
+}
+
+// Sum the signal to mono and compute the root mean square, in float32,
+// regardless of the input format.
+float rmsf32(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) {
+ float downmixed;
+ float rms = 0.;
+ uint32_t readIdx = 0;
+ for (uint32_t i = 0; i < aFrames; i++) {
+ downmixed = 0.;
+ for (uint32_t j = 0; j < aChannels; j++) {
+ downmixed += AudioSampleToFloat(aSamples[readIdx++]);
+ }
+ rms += downmixed * downmixed;
+ }
+ rms = rms / aFrames;
+ return sqrt(rms);
+}
+
+TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+
+ RefPtr<AudioProcessingTrack> processingTrack;
+ RefPtr<ProcessedMediaTrack> outputTrack;
+ RefPtr<MediaInputPort> port;
+ RefPtr<AudioInputProcessing> listener;
+ auto p = Invoke([&] {
+ processingTrack = AudioProcessingTrack::Create(graph);
+ outputTrack = graph->CreateForwardedInputTrack(MediaSegment::AUDIO);
+ outputTrack->QueueSetAutoend(false);
+ outputTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
+ port = outputTrack->AllocateInputPort(processingTrack);
+ /* Primary graph: Open Audio Input through SourceMediaTrack */
+ listener = new AudioInputProcessing(2);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ processingTrack->SetInputProcessing(listener);
+ processingTrack->ConnectDeviceInput(deviceId, listener,
+ PRINCIPAL_HANDLE_NONE);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, listener));
+ return graph->NotifyWhenDeviceStarted(nullptr);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ Unused << WaitFor(p);
+
+ stream->SetOutputRecordingEnabled(true);
+
+ // Wait for a second worth of audio data.
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+
+ const uint32_t ITERATION_COUNT = 5;
+ uint32_t iterations = ITERATION_COUNT;
+ DisabledTrackMode currentMode = DisabledTrackMode::SILENCE_BLACK;
+ while (iterations--) {
+ // toggle the track enabled mode, wait a second, do this ITERATION_COUNT
+ // times
+ DispatchFunction([&] {
+ processingTrack->SetDisabledTrackMode(currentMode);
+ if (currentMode == DisabledTrackMode::SILENCE_BLACK) {
+ currentMode = DisabledTrackMode::ENABLED;
+ } else {
+ currentMode = DisabledTrackMode::SILENCE_BLACK;
+ }
+ });
+
+ totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+
+ // Clean up.
+ DispatchFunction([&] {
+ outputTrack->RemoveAudioOutput((void*)1);
+ outputTrack->Destroy();
+ port->Destroy();
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->DisconnectDeviceInput();
+ processingTrack->Destroy();
+ });
+
+ uint64_t preSilenceSamples;
+ uint32_t estimatedFreq;
+ uint32_t nrDiscontinuities;
+ std::tie(preSilenceSamples, estimatedFreq, nrDiscontinuities) =
+ WaitFor(stream->OutputVerificationEvent());
+
+ auto data = stream->TakeRecordedOutput();
+
+ // check that there is non-silence and silence at the expected time in the
+ // stereo recording, while allowing for a bit of scheduling uncertainty, by
+ // checking half a second after the theoretical muting/unmuting.
+ // non-silence starts around: 0s, 2s, 4s
+ // silence start around: 1s, 3s, 5s
+ // To detect silence or non-silence, we compute the RMS of the signal for
+ // 100ms.
+ float noisyTime_s[] = {0.5, 2.5, 4.5};
+ float silenceTime_s[] = {1.5, 3.5, 5.5};
+
+ uint32_t rate = graph->GraphRate();
+ for (float& time : noisyTime_s) {
+ uint32_t startIdx = time * rate * 2 /* stereo */;
+ EXPECT_NE(rmsf32(&(data[startIdx]), 2, rate / 10), 0.0);
+ }
+
+ for (float& time : silenceTime_s) {
+ uint32_t startIdx = time * rate * 2 /* stereo */;
+ EXPECT_EQ(rmsf32(&(data[startIdx]), 2, rate / 10), 0.0);
+ }
+}
+
+TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ // Open a 2-channel native input stream.
+ const CubebUtils::AudioDeviceID device1 = (CubebUtils::AudioDeviceID)1;
+ RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
+ RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(2);
+ track1->SetInputProcessing(listener1);
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track1, listener1, true));
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track1, listener1));
+ track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track1->DeviceId().value(), device1);
+
+ auto started =
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+
+ RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream1->mHasInput);
+ EXPECT_TRUE(stream1->mHasOutput);
+ EXPECT_EQ(stream1->InputChannels(), 2U);
+ EXPECT_EQ(stream1->GetInputDeviceID(), device1);
+ Unused << WaitFor(started);
+
+ // Open a 1-channel non-native input stream.
+ const CubebUtils::AudioDeviceID device2 = (CubebUtils::AudioDeviceID)2;
+ RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
+ RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(1);
+ track2->SetInputProcessing(listener2);
+ track2->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track2, listener2, true));
+ track2->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track2, listener2));
+ track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track2->DeviceId().value(), device2);
+
+ RefPtr<SmartMockCubebStream> stream2 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_FALSE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->InputChannels(), 1U);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+
+ // Request a new input channel count. This should re-create new input stream
+ // accordingly.
+ auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack> aTrack,
+ const RefPtr<AudioInputProcessing>& aListener,
+ RefPtr<SmartMockCubebStream>& aStream,
+ uint32_t aChannelCount) {
+ bool destroyed = false;
+ MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ destroyed = aDestroyed.get() == aStream.get();
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ newStream = aCreated;
+ });
+
+ DispatchFunction([&] {
+ aTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetRequestedInputChannelCount>(aTrack, *aTrack->DeviceId(),
+ aListener, aChannelCount));
+ });
+
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)"_ns,
+ [&] { return destroyed && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aStream = newStream;
+ };
+
+ // Set the native input stream's input channel count to 1.
+ setNewChannelCount(track1, listener1, stream1, 1);
+ EXPECT_TRUE(stream1->mHasInput);
+ EXPECT_TRUE(stream1->mHasOutput);
+ EXPECT_EQ(stream1->InputChannels(), 1U);
+ EXPECT_EQ(stream1->GetInputDeviceID(), device1);
+
+ // Set the non-native input stream's input channel count to 2.
+ setNewChannelCount(track2, listener2, stream2, 2);
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_FALSE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->InputChannels(), 2U);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+
+ // Close the non-native input stream.
+ DispatchFunction([&] {
+ track2->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track2, listener2));
+ track2->DisconnectDeviceInput();
+ track2->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyed = WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyed.get(), stream2.get());
+
+ // Close the native input stream.
+ DispatchFunction([&] {
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track1, listener1));
+ track1->DisconnectDeviceInput();
+ track1->Destroy();
+ });
+ destroyed = WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyed.get(), stream1.get());
+}
+
+// The native audio stream (a.k.a. GraphDriver) and the non-native audio stream
+// should always be the same as the max requested input channel of its paired
+// AudioProcessingTracks. This test checks if the audio stream paired with the
+// AudioProcessingTrack will follow the max requested input channel or not.
+//
+// This test is pretty similar to RestartAudioIfMaxChannelCountChanged above,
+// which makes sure the related DeviceInputTrack operations for the test here
+// works correctly. Instead of using a test-only AudioDataListener, we use
+// AudioInputProcessing here to simulate the real world use case.
+TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+ auto unforcer = WaitFor(cubeb->ForceAudioThread()).unwrap();
+ Unused << unforcer;
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ // Request a new input channel count and expect to have a new stream.
+ auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack>& aTrack,
+ const RefPtr<AudioInputProcessing>& aListener,
+ RefPtr<SmartMockCubebStream>& aStream,
+ uint32_t aChannelCount) {
+ ASSERT_TRUE(!!aTrack);
+ ASSERT_TRUE(!!aListener);
+ ASSERT_TRUE(!!aStream);
+ ASSERT_TRUE(aStream->mHasInput);
+ ASSERT_NE(aChannelCount, 0U);
+
+ const CubebUtils::AudioDeviceID device = *aTrack->DeviceId();
+
+ bool destroyed = false;
+ MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ destroyed = aDestroyed.get() == aStream.get();
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ newStream = aCreated;
+ });
+
+ DispatchFunction([&] {
+ aTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetRequestedInputChannelCount>(aTrack, device, aListener,
+ aChannelCount));
+ });
+
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged) #1"_ns,
+ [&] { return destroyed && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aStream = newStream;
+ };
+
+ // Open a new track and expect to have a new stream.
+ auto openTrack = [&](RefPtr<SmartMockCubebStream>& aCurrentStream,
+ RefPtr<AudioProcessingTrack>& aTrack,
+ RefPtr<AudioInputProcessing>& aListener,
+ CubebUtils::AudioDeviceID aDevice,
+ uint32_t aChannelCount) {
+ ASSERT_TRUE(!!aCurrentStream);
+ ASSERT_TRUE(aCurrentStream->mHasInput);
+ ASSERT_TRUE(aChannelCount > aCurrentStream->InputChannels());
+ ASSERT_TRUE(!aTrack);
+ ASSERT_TRUE(!aListener);
+
+ bool destroyed = false;
+ MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ destroyed = aDestroyed.get() == aCurrentStream.get();
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ newStream = aCreated;
+ });
+
+ aTrack = AudioProcessingTrack::Create(graph);
+ aListener = new AudioInputProcessing(aChannelCount);
+ aTrack->SetInputProcessing(aListener);
+ aTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(aTrack, aListener, true));
+ aTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(aTrack, aListener));
+
+ DispatchFunction([&] {
+ aTrack->ConnectDeviceInput(aDevice, aListener, PRINCIPAL_HANDLE_NONE);
+ });
+
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged) #2"_ns,
+ [&] { return destroyed && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aCurrentStream = newStream;
+ };
+
+ // Test for the native input device first then non-native device. The
+ // non-native device will be destroyed before the native device in case of
+ // causing a native-device-switching.
+
+ // Test for the native device.
+ const CubebUtils::AudioDeviceID nativeDevice = (CubebUtils::AudioDeviceID)1;
+ RefPtr<AudioProcessingTrack> track1;
+ RefPtr<AudioInputProcessing> listener1;
+ RefPtr<SmartMockCubebStream> nativeStream;
+ RefPtr<AudioProcessingTrack> track2;
+ RefPtr<AudioInputProcessing> listener2;
+ {
+ // Open a 1-channel AudioProcessingTrack for the native device.
+ track1 = AudioProcessingTrack::Create(graph);
+ listener1 = new AudioInputProcessing(1);
+ track1->SetInputProcessing(listener1);
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track1, listener1, true));
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track1, listener1));
+ track1->ConnectDeviceInput(nativeDevice, listener1, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track1->DeviceId().value(), nativeDevice);
+
+ auto started =
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+
+ nativeStream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nativeStream->mHasInput);
+ EXPECT_TRUE(nativeStream->mHasOutput);
+ EXPECT_EQ(nativeStream->InputChannels(), 1U);
+ EXPECT_EQ(nativeStream->GetInputDeviceID(), nativeDevice);
+ Unused << WaitFor(started);
+
+ // Open a 2-channel AudioProcessingTrack for the native device and wait for
+ // a new driver since the max-channel for the native device becomes 2 now.
+ openTrack(nativeStream, track2, listener2, nativeDevice, 2);
+ EXPECT_EQ(nativeStream->InputChannels(), 2U);
+
+ // Set the second AudioProcessingTrack for the native device to 1-channel
+ // and wait for a new driver since the max-channel for the native device
+ // becomes 1 now.
+ setNewChannelCount(track2, listener2, nativeStream, 1);
+ EXPECT_EQ(nativeStream->InputChannels(), 1U);
+
+ // Set the first AudioProcessingTrack for the native device to 2-channel and
+ // wait for a new driver since the max input channel for the native device
+ // becomes 2 now.
+ setNewChannelCount(track1, listener1, nativeStream, 2);
+ EXPECT_EQ(nativeStream->InputChannels(), 2U);
+ }
+
+ // Test for the non-native device.
+ {
+ const CubebUtils::AudioDeviceID nonNativeDevice =
+ (CubebUtils::AudioDeviceID)2;
+
+ // Open a 1-channel AudioProcessingTrack for the non-native device.
+ RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
+ RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
+ track3->SetInputProcessing(listener3);
+ track3->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track3, listener3, true));
+ track3->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track3, listener3));
+ track3->ConnectDeviceInput(nonNativeDevice, listener3,
+ PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track3->DeviceId().value(), nonNativeDevice);
+
+ RefPtr<SmartMockCubebStream> nonNativeStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(nonNativeStream->mHasInput);
+ EXPECT_FALSE(nonNativeStream->mHasOutput);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 1U);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), nonNativeDevice);
+
+ // Open a 2-channel AudioProcessingTrack for the non-native device and wait
+ // for a new stream since the max-channel for the non-native device becomes
+ // 2 now.
+ RefPtr<AudioProcessingTrack> track4;
+ RefPtr<AudioInputProcessing> listener4;
+ openTrack(nonNativeStream, track4, listener4, nonNativeDevice, 2);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 2U);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), nonNativeDevice);
+
+ // Set the second AudioProcessingTrack for the non-native to 1-channel and
+ // wait for a new driver since the max-channel for the non-native device
+ // becomes 1 now.
+ setNewChannelCount(track4, listener4, nonNativeStream, 1);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 1U);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), nonNativeDevice);
+
+ // Set the first AudioProcessingTrack for the non-native device to 2-channel
+ // and wait for a new driver since the max input channel for the non-native
+ // device becomes 2 now.
+ setNewChannelCount(track3, listener3, nonNativeStream, 2);
+ EXPECT_EQ(nonNativeStream->InputChannels(), 2U);
+ EXPECT_EQ(nonNativeStream->GetInputDeviceID(), nonNativeDevice);
+
+ // Close the second AudioProcessingTrack (1-channel) for the non-native
+ // device then the first one (2-channel) so we won't result in another
+ // stream creation.
+ DispatchFunction([&] {
+ track4->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track4, listener4));
+ track4->DisconnectDeviceInput();
+ track4->Destroy();
+ });
+ DispatchFunction([&] {
+ track3->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track3, listener3));
+ track3->DisconnectDeviceInput();
+ track3->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nonNativeStream.get());
+ }
+
+ // Tear down for the native device.
+ {
+ // Close the second AudioProcessingTrack (1-channel) for the native device
+ // then the first one (2-channel) so we won't have driver switching.
+ DispatchFunction([&] {
+ track2->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track2, listener2));
+ track2->DisconnectDeviceInput();
+ track2->Destroy();
+ });
+ DispatchFunction([&] {
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track1, listener1));
+ track1->DisconnectDeviceInput();
+ track1->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), nativeStream.get());
+ }
+}
+
+TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ // Set the input channel count of AudioInputProcessing, which will force
+ // MediaTrackGraph to re-evaluate input device, when the MediaTrackGraph is
+ // driven by the SystemClockDriver.
+
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ RefPtr<AudioProcessingTrack> track;
+ RefPtr<AudioInputProcessing> listener;
+ {
+ MozPromiseHolder<GenericPromise> h;
+ RefPtr<GenericPromise> p = h.Ensure(__func__);
+
+ struct GuardMessage : public ControlMessage {
+ MozPromiseHolder<GenericPromise> mHolder;
+
+ GuardMessage(MediaTrack* aTrack,
+ MozPromiseHolder<GenericPromise>&& aHolder)
+ : ControlMessage(aTrack), mHolder(std::move(aHolder)) {}
+ void Run() override {
+ mTrack->GraphImpl()->Dispatch(NS_NewRunnableFunction(
+ "TestAudioTrackGraph::SetInputChannel::Message::Resolver",
+ [holder = std::move(mHolder)]() mutable {
+ holder.Resolve(true, __func__);
+ }));
+ }
+ };
+
+ DispatchFunction([&] {
+ track = AudioProcessingTrack::Create(graph);
+ listener = new AudioInputProcessing(2);
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track, listener, true));
+ track->SetInputProcessing(listener);
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<SetRequestedInputChannelCount>(track, deviceId, listener,
+ 1));
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<GuardMessage>(track, std::move(h)));
+ });
+
+ Unused << WaitFor(p);
+ }
+
+ // Open a full-duplex AudioCallbackDriver.
+
+ RefPtr<MediaInputPort> port;
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track, listener));
+ track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
+ });
+
+ // MediaTrackGraph will create a output-only AudioCallbackDriver in
+ // CheckDriver before we open an audio input above, since AudioProcessingTrack
+ // is a audio-type MediaTrack, so we need to wait here until the duplex
+ // AudioCallbackDriver is created.
+ RefPtr<SmartMockCubebStream> stream;
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)"_ns,
+ [&] {
+ stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasOutput);
+ return stream->mHasInput;
+ });
+ EXPECT_EQ(stream->InputChannels(), 1U);
+
+ Unused << WaitFor(
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
+
+ // Clean up.
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track, listener));
+ track->DisconnectDeviceInput();
+ track->Destroy();
+ });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestAudioTrackGraph, StartAudioDeviceBeforeStartingAudioProcessing)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ // Create a duplex AudioCallbackDriver
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ RefPtr<AudioProcessingTrack> track;
+ RefPtr<AudioInputProcessing> listener;
+ DispatchFunction([&] {
+ track = AudioProcessingTrack::Create(graph);
+ listener = new AudioInputProcessing(2);
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track, listener, true));
+ track->SetInputProcessing(listener);
+ // Start audio device without starting audio processing.
+ track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_TRUE(stream->mHasOutput);
+
+ // Wait for a second to make sure audio output callback has been fired.
+ DispatchFunction(
+ [&] { track->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb)); });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Start the audio processing.
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track, listener));
+ });
+
+ // Wait for a second to make sure audio output callback has been fired.
+ DispatchFunction(
+ [&] { track->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb)); });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Clean up.
+ DispatchFunction([&] {
+ track->DisconnectDeviceInput();
+ track->Destroy();
+ });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestAudioTrackGraph, StopAudioProcessingBeforeStoppingAudioDevice)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ // Create a duplex AudioCallbackDriver
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ RefPtr<AudioProcessingTrack> track;
+ RefPtr<AudioInputProcessing> listener;
+ DispatchFunction([&] {
+ track = AudioProcessingTrack::Create(graph);
+ listener = new AudioInputProcessing(2);
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track, listener, true));
+ track->SetInputProcessing(listener);
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track, listener));
+ track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_TRUE(stream->mHasOutput);
+
+ // Wait for a second to make sure audio output callback has been fired.
+ DispatchFunction(
+ [&] { track->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb)); });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Stop the audio processing
+ DispatchFunction([&] {
+ track->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track, listener));
+ });
+
+ // Wait for a second to make sure audio output callback has been fired.
+ DispatchFunction(
+ [&] { track->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb)); });
+ {
+ uint32_t totalFrames = 0;
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ totalFrames += aFrames;
+ return totalFrames > static_cast<uint32_t>(graph->GraphRate());
+ });
+ }
+ cubeb->DontGoFaster();
+
+ // Clean up.
+ DispatchFunction([&] {
+ track->DisconnectDeviceInput();
+ track->Destroy();
+ });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+}
+
+// This test is pretty similar to SwitchNativeInputDevice above, which makes
+// sure the related DeviceInputTrack operations for the test here works
+// correctly. Instead of using a test-only DeviceInputTrack consumer, we use
+// AudioProcessingTrack here to simulate the real world use case.
+TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ auto switchNativeDevice =
+ [&](RefPtr<SmartMockCubebStream>&& aCurrentNativeStream,
+ RefPtr<AudioProcessingTrack>& aCurrentNativeTrack,
+ RefPtr<AudioInputProcessing>& aCurrentNativeListener,
+ RefPtr<SmartMockCubebStream>& aNextNativeStream,
+ RefPtr<AudioProcessingTrack>& aNextNativeTrack) {
+ ASSERT_TRUE(aCurrentNativeStream->mHasInput);
+ ASSERT_TRUE(aCurrentNativeStream->mHasOutput);
+ ASSERT_TRUE(aNextNativeStream->mHasInput);
+ ASSERT_FALSE(aNextNativeStream->mHasOutput);
+
+ std::cerr << "Switching native input from device "
+ << aCurrentNativeStream->GetInputDeviceID() << " to "
+ << aNextNativeStream->GetInputDeviceID() << std::endl;
+
+ uint32_t destroyed = 0;
+ MediaEventListener destroyListener =
+ cubeb->StreamDestroyEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aDestroyed) {
+ if (aDestroyed.get() == aCurrentNativeStream.get() ||
+ aDestroyed.get() == aNextNativeStream.get()) {
+ std::cerr << "cubeb stream " << aDestroyed.get()
+ << " (device " << aDestroyed->GetInputDeviceID()
+ << ") has been destroyed" << std::endl;
+ destroyed += 1;
+ }
+ });
+
+ RefPtr<SmartMockCubebStream> newStream;
+ MediaEventListener restartListener = cubeb->StreamInitEvent().Connect(
+ AbstractThread::GetCurrent(),
+ [&](const RefPtr<SmartMockCubebStream>& aCreated) {
+ // Make sure new stream has input, to prevent from getting a
+ // temporary output-only AudioCallbackDriver after closing current
+ // native device but before setting a new native input.
+ if (aCreated->mHasInput) {
+ ASSERT_TRUE(aCreated->mHasOutput);
+ newStream = aCreated;
+ }
+ });
+
+ std::cerr << "Close device " << aCurrentNativeStream->GetInputDeviceID()
+ << std::endl;
+ DispatchFunction([&] {
+ aCurrentNativeTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(aCurrentNativeTrack,
+ aCurrentNativeListener));
+ aCurrentNativeTrack->DisconnectDeviceInput();
+ aCurrentNativeTrack->Destroy();
+ });
+
+ std::cerr << "Wait for the switching" << std::endl;
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)"_ns,
+ [&] { return destroyed >= 2 && newStream; });
+
+ destroyListener.Disconnect();
+ restartListener.Disconnect();
+
+ aCurrentNativeStream = nullptr;
+ aNextNativeStream = newStream;
+
+ std::cerr << "Now the native input is device "
+ << aNextNativeStream->GetInputDeviceID() << std::endl;
+ };
+
+ // Open a AudioProcessingTrack for device 1.
+ const CubebUtils::AudioDeviceID device1 = (CubebUtils::AudioDeviceID)1;
+ RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
+ RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(1);
+ track1->SetInputProcessing(listener1);
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track1, listener1, true));
+ track1->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track1, listener1));
+ track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track1->DeviceId().value(), device1);
+
+ auto started =
+ Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+
+ RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream1->mHasInput);
+ EXPECT_TRUE(stream1->mHasOutput);
+ EXPECT_EQ(stream1->InputChannels(), 1U);
+ EXPECT_EQ(stream1->GetInputDeviceID(), device1);
+ Unused << WaitFor(started);
+ std::cerr << "Device " << device1 << " is opened (stream " << stream1.get()
+ << ")" << std::endl;
+
+ // Open a AudioProcessingTrack for device 2.
+ const CubebUtils::AudioDeviceID device2 = (CubebUtils::AudioDeviceID)2;
+ RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
+ RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(2);
+ track2->SetInputProcessing(listener2);
+ track2->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track2, listener2, true));
+ track2->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track2, listener2));
+ track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track2->DeviceId().value(), device2);
+
+ RefPtr<SmartMockCubebStream> stream2 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_FALSE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->InputChannels(), 2U);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+ std::cerr << "Device " << device2 << " is opened (stream " << stream2.get()
+ << ")" << std::endl;
+
+ // Open a AudioProcessingTrack for device 3.
+ const CubebUtils::AudioDeviceID device3 = (CubebUtils::AudioDeviceID)3;
+ RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
+ RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
+ track3->SetInputProcessing(listener3);
+ track3->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(track3, listener3, true));
+ track3->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(track3, listener3));
+ track3->ConnectDeviceInput(device3, listener3, PRINCIPAL_HANDLE_NONE);
+ EXPECT_EQ(track3->DeviceId().value(), device3);
+
+ RefPtr<SmartMockCubebStream> stream3 = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream3->mHasInput);
+ EXPECT_FALSE(stream3->mHasOutput);
+ EXPECT_EQ(stream3->InputChannels(), 1U);
+ EXPECT_EQ(stream3->GetInputDeviceID(), device3);
+ std::cerr << "Device " << device3 << " is opened (stream " << stream3.get()
+ << ")" << std::endl;
+
+ // Close device 1, so the native input device is switched from device 1 to
+ // device 2.
+ switchNativeDevice(std::move(stream1), track1, listener1, stream2, track2);
+ EXPECT_TRUE(stream2->mHasInput);
+ EXPECT_TRUE(stream2->mHasOutput);
+ EXPECT_EQ(stream2->InputChannels(), 2U);
+ EXPECT_EQ(stream2->GetInputDeviceID(), device2);
+ {
+ NativeInputTrack* native = track2->Graph()->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!!native);
+ EXPECT_EQ(native->mDeviceId, device2);
+ }
+
+ // Close device 2, so the native input device is switched from device 2 to
+ // device 3.
+ switchNativeDevice(std::move(stream2), track2, listener2, stream3, track3);
+ EXPECT_TRUE(stream3->mHasInput);
+ EXPECT_TRUE(stream3->mHasOutput);
+ EXPECT_EQ(stream3->InputChannels(), 1U);
+ EXPECT_EQ(stream3->GetInputDeviceID(), device3);
+ {
+ NativeInputTrack* native = track3->Graph()->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!!native);
+ EXPECT_EQ(native->mDeviceId, device3);
+ }
+
+ // Clean up.
+ std::cerr << "Close device " << device3 << std::endl;
+ DispatchFunction([&] {
+ track3->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(track3, listener3));
+ track3->DisconnectDeviceInput();
+ track3->Destroy();
+ });
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), stream3.get());
+ {
+ NativeInputTrack* native = graph->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!native);
+ }
+ std::cerr << "No native input now" << std::endl;
+}
+
+class OnFallbackListener : public MediaTrackListener {
+ const RefPtr<MediaTrack> mTrack;
+ Atomic<bool> mOnFallback{true};
+
+ public:
+ explicit OnFallbackListener(MediaTrack* aTrack) : mTrack(aTrack) {}
+
+ bool OnFallback() { return mOnFallback; }
+
+ void NotifyOutput(MediaTrackGraph*, TrackTime) override {
+ if (auto* ad =
+ mTrack->GraphImpl()->CurrentDriver()->AsAudioCallbackDriver()) {
+ mOnFallback = ad->OnFallback();
+ }
+ }
+};
+
+void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
+ float aDriftFactor, uint32_t aRunTimeSeconds = 10,
+ uint32_t aNumExpectedUnderruns = 0) {
+ std::cerr << "TestCrossGraphPort input: " << aInputRate
+ << ", output: " << aOutputRate << ", driftFactor: " << aDriftFactor
+ << std::endl;
+
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ /* Primary graph: Create the graph. */
+ MediaTrackGraph* primary = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER,
+ /*Window ID*/ 1, aInputRate, nullptr, GetMainThreadSerialEventTarget());
+
+ /* Partner graph: Create the graph. */
+ MediaTrackGraph* partner = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1, aOutputRate,
+ /*OutputDeviceID*/ reinterpret_cast<cubeb_devid>(1),
+ GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID inputDeviceId = (CubebUtils::AudioDeviceID)1;
+
+ RefPtr<AudioProcessingTrack> processingTrack;
+ RefPtr<AudioInputProcessing> listener;
+ RefPtr<OnFallbackListener> primaryFallbackListener;
+ DispatchFunction([&] {
+ /* Primary graph: Create input track and open it */
+ processingTrack = AudioProcessingTrack::Create(primary);
+ listener = new AudioInputProcessing(2);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ processingTrack->SetInputProcessing(listener);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, listener));
+ processingTrack->ConnectDeviceInput(inputDeviceId, listener,
+ PRINCIPAL_HANDLE_NONE);
+ primaryFallbackListener = new OnFallbackListener(processingTrack);
+ processingTrack->AddListener(primaryFallbackListener);
+ });
+
+ RefPtr<SmartMockCubebStream> inputStream = WaitFor(cubeb->StreamInitEvent());
+
+ // Wait for the primary AudioCallbackDriver to come into effect.
+ while (primaryFallbackListener->OnFallback()) {
+ EXPECT_EQ(inputStream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ RefPtr<CrossGraphTransmitter> transmitter;
+ RefPtr<MediaInputPort> port;
+ RefPtr<CrossGraphReceiver> receiver;
+ RefPtr<OnFallbackListener> partnerFallbackListener;
+ DispatchFunction([&] {
+ processingTrack->RemoveListener(primaryFallbackListener);
+
+ /* Partner graph: Create CrossGraphReceiver */
+ receiver = partner->CreateCrossGraphReceiver(primary->GraphRate());
+
+ /* Primary graph: Create CrossGraphTransmitter */
+ transmitter = primary->CreateCrossGraphTransmitter(receiver);
+
+ /* How the input track connects to another ProcessedMediaTrack.
+ * Check in MediaManager how it is connected to AudioStreamTrack. */
+ port = transmitter->AllocateInputPort(processingTrack);
+ receiver->AddAudioOutput((void*)1, partner->PrimaryOutputDeviceID(), 0);
+
+ partnerFallbackListener = new OnFallbackListener(receiver);
+ receiver->AddListener(partnerFallbackListener);
+ });
+
+ RefPtr<SmartMockCubebStream> partnerStream =
+ WaitFor(cubeb->StreamInitEvent());
+
+ // Process the CrossGraphTransmitter on the primary graph.
+ EXPECT_EQ(inputStream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+
+ // Wait for the partner AudioCallbackDriver to come into effect.
+ while (partnerFallbackListener->OnFallback()) {
+ EXPECT_EQ(partnerStream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ DispatchFunction([&] { receiver->RemoveListener(partnerFallbackListener); });
+ while (NS_ProcessNextEvent(nullptr, false)) {
+ }
+
+ nsIThread* currentThread = NS_GetCurrentThread();
+ cubeb_state inputState = CUBEB_STATE_STARTED;
+ MediaEventListener inputStateListener = inputStream->StateEvent().Connect(
+ currentThread, [&](cubeb_state aState) { inputState = aState; });
+ cubeb_state partnerState = CUBEB_STATE_STARTED;
+ MediaEventListener partnerStateListener = partnerStream->StateEvent().Connect(
+ currentThread, [&](cubeb_state aState) { partnerState = aState; });
+
+ const media::TimeUnit runtime = media::TimeUnit::FromSeconds(aRunTimeSeconds);
+ // 10ms per iteration.
+ const media::TimeUnit step = media::TimeUnit::FromSeconds(0.01);
+ {
+ media::TimeUnit pos = media::TimeUnit::Zero();
+ long inputFrames = 0;
+ long outputFrames = 0;
+ while (pos < runtime) {
+ pos += step;
+ const long newInputFrames = pos.ToTicksAtRate(aInputRate);
+ const long newOutputFrames =
+ (pos.MultDouble(aDriftFactor)).ToTicksAtRate(aOutputRate);
+ EXPECT_EQ(inputStream->ManualDataCallback(newInputFrames - inputFrames),
+ MockCubebStream::KeepProcessing::Yes);
+ EXPECT_EQ(
+ partnerStream->ManualDataCallback(newOutputFrames - outputFrames),
+ MockCubebStream::KeepProcessing::Yes);
+
+ inputFrames = newInputFrames;
+ outputFrames = newOutputFrames;
+ }
+ }
+
+ DispatchFunction([&] {
+ // Clean up on MainThread
+ receiver->RemoveAudioOutput((void*)1);
+ receiver->Destroy();
+ transmitter->Destroy();
+ port->Destroy();
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->DisconnectDeviceInput();
+ processingTrack->Destroy();
+ });
+
+ while (NS_ProcessNextEvent(nullptr, false)) {
+ }
+
+ EXPECT_EQ(inputStream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ EXPECT_EQ(partnerStream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+
+ EXPECT_EQ(inputStream->ManualDataCallback(128),
+ MockCubebStream::KeepProcessing::No);
+ EXPECT_EQ(partnerStream->ManualDataCallback(128),
+ MockCubebStream::KeepProcessing::No);
+
+ uint32_t inputFrequency = inputStream->InputFrequency();
+
+ uint64_t preSilenceSamples;
+ float estimatedFreq;
+ uint32_t nrDiscontinuities;
+ std::tie(preSilenceSamples, estimatedFreq, nrDiscontinuities) =
+ WaitFor(partnerStream->OutputVerificationEvent());
+
+ EXPECT_NEAR(estimatedFreq, inputFrequency / aDriftFactor, 5);
+ // Note that pre-silence is in the output rate. The buffering is on the input
+ // side. There is one block buffered in NativeInputTrack. Then
+ // AudioDriftCorrection sets its pre-buffering so that *after* the first
+ // resample of real input data, the buffer contains enough data to match the
+ // desired level, which is initially 50ms. I.e. silence = buffering -
+ // inputStep + outputStep. Note that the steps here are rounded up to block
+ // size.
+ const media::TimeUnit inputBuffering(WEBAUDIO_BLOCK_SIZE, aInputRate);
+ const media::TimeUnit buffering =
+ media::TimeUnit::FromSeconds(0.05).ToBase(aInputRate);
+ const media::TimeUnit inputStepSize(
+ MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(
+ step.ToTicksAtRate(aInputRate)),
+ aInputRate);
+ const media::TimeUnit outputStepSize =
+ media::TimeUnit(MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(
+ step.ToBase(aOutputRate)
+ .MultDouble(aDriftFactor)
+ .ToTicksAtRate(aOutputRate)),
+ aOutputRate)
+ .ToBase(aInputRate);
+ const uint32_t expectedPreSilence =
+ (outputStepSize + inputBuffering + buffering - inputStepSize)
+ .ToBase(aInputRate)
+ .ToBase<media::TimeUnit::CeilingPolicy>(aOutputRate)
+ .ToTicksAtRate(aOutputRate);
+ // Use a margin of 0.1% of the expected pre-silence, since the resampler is
+ // adapting to drift and will process the pre-silence frames. Because of
+ // rounding errors, we don't use a margin lower than 1.
+ const uint32_t margin = std::max(1U, expectedPreSilence / 1000);
+ EXPECT_NEAR(preSilenceSamples, expectedPreSilence, margin);
+ // The waveform from AudioGenerator starts at 0, but we don't control its
+ // ending, so we expect a discontinuity there. For each expected underrun
+ // there could be an additional 2 discontinuities (start and end of the silent
+ // period).
+ EXPECT_LE(nrDiscontinuities, 1U + 2 * aNumExpectedUnderruns);
+
+ SpinEventLoopUntil("streams have stopped"_ns, [&] {
+ return inputState == CUBEB_STATE_STOPPED &&
+ partnerState == CUBEB_STATE_STOPPED;
+ });
+ inputStateListener.Disconnect();
+ partnerStateListener.Disconnect();
+}
+
+TEST(TestAudioTrackGraph, CrossGraphPort)
+{
+ TestCrossGraphPort(44100, 44100, 1);
+ TestCrossGraphPort(44100, 44100, 1.006);
+ TestCrossGraphPort(44100, 44100, 0.994);
+
+ TestCrossGraphPort(48000, 44100, 1);
+ TestCrossGraphPort(48000, 44100, 1.006);
+ TestCrossGraphPort(48000, 44100, 0.994);
+
+ TestCrossGraphPort(44100, 48000, 1);
+ TestCrossGraphPort(44100, 48000, 1.006);
+ TestCrossGraphPort(44100, 48000, 0.994);
+
+ TestCrossGraphPort(52110, 17781, 1);
+ TestCrossGraphPort(52110, 17781, 1.006);
+ TestCrossGraphPort(52110, 17781, 0.994);
+}
+
+TEST(TestAudioTrackGraph, CrossGraphPortUnderrun)
+{
+ TestCrossGraphPort(44100, 44100, 1.01, 30, 1);
+ TestCrossGraphPort(44100, 44100, 1.03, 40, 3);
+
+ TestCrossGraphPort(48000, 44100, 1.01, 30, 1);
+ TestCrossGraphPort(48000, 44100, 1.03, 40, 3);
+
+ TestCrossGraphPort(44100, 48000, 1.01, 30, 1);
+ TestCrossGraphPort(44100, 48000, 1.03, 40, 3);
+
+ TestCrossGraphPort(52110, 17781, 1.01, 30, 1);
+ TestCrossGraphPort(52110, 17781, 1.03, 40, 3);
+}
+
+TEST(TestAudioTrackGraph, SecondaryOutputDevice)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const TrackRate primaryRate = 48000;
+ const TrackRate secondaryRate = 44100; // for secondary output device
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER,
+ /*Window ID*/ 1, primaryRate, nullptr, GetMainThreadSerialEventTarget());
+
+ RefPtr<AudioProcessingTrack> processingTrack;
+ RefPtr<AudioInputProcessing> listener;
+ DispatchFunction([&] {
+ /* Create an input track and connect it to a device */
+ processingTrack = AudioProcessingTrack::Create(graph);
+ listener = new AudioInputProcessing(2);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ processingTrack->SetInputProcessing(listener);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, listener));
+ processingTrack->ConnectDeviceInput(nullptr, listener,
+ PRINCIPAL_HANDLE_NONE);
+ });
+ RefPtr<SmartMockCubebStream> primaryStream =
+ WaitFor(cubeb->StreamInitEvent());
+
+ const void* secondaryDeviceID = CubebUtils::AudioDeviceID(2);
+ DispatchFunction([&] {
+ processingTrack->AddAudioOutput(nullptr, secondaryDeviceID, secondaryRate);
+ processingTrack->SetAudioOutputVolume(nullptr, 0.f);
+ });
+ RefPtr<SmartMockCubebStream> secondaryStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_EQ(secondaryStream->GetOutputDeviceID(), secondaryDeviceID);
+ EXPECT_EQ(static_cast<TrackRate>(secondaryStream->SampleRate()),
+ secondaryRate);
+
+ nsIThread* currentThread = NS_GetCurrentThread();
+ uint32_t audioFrames = 0; // excludes pre-silence
+ MediaEventListener audioListener =
+ secondaryStream->FramesVerifiedEvent().Connect(
+ currentThread, [&](uint32_t aFrames) { audioFrames += aFrames; });
+
+ // Wait for 100ms of pre-silence to verify that SetAudioOutputVolume() is
+ // effective.
+ uint32_t processedFrames = 0;
+ WaitUntil(secondaryStream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ processedFrames += aFrames;
+ return processedFrames > static_cast<uint32_t>(secondaryRate / 10);
+ });
+ EXPECT_EQ(audioFrames, 0U) << "audio frames at zero volume";
+
+ secondaryStream->SetOutputRecordingEnabled(true);
+ DispatchFunction(
+ [&] { processingTrack->SetAudioOutputVolume(nullptr, 1.f); });
+
+ // Wait for enough audio after initial silence to check the frequency.
+ SpinEventLoopUntil("200ms of audio"_ns, [&] {
+ return audioFrames > static_cast<uint32_t>(secondaryRate / 5);
+ });
+ audioListener.Disconnect();
+
+ // Stop recording now so as not to record the discontinuity when the
+ // CrossGraphReceiver is removed from the secondary graph before its
+ // AudioCallbackDriver is stopped.
+ secondaryStream->SetOutputRecordingEnabled(false);
+
+ DispatchFunction([&] { processingTrack->RemoveAudioOutput(nullptr); });
+ WaitFor(secondaryStream->OutputVerificationEvent());
+ // The frequency from OutputVerificationEvent() is estimated by
+ // AudioVerifier from a zero-crossing count. When the discontinuity from
+ // the volume change is resampled, the discontinuity presents as
+ // oscillations, which increase the zero-crossing count and corrupt the
+ // frequency estimate. Trim off sufficient leading from the output to
+ // remove this discontinuity.
+ uint32_t channelCount = secondaryStream->OutputChannels();
+ nsTArray<AudioDataValue> output = secondaryStream->TakeRecordedOutput();
+ size_t leadingIndex = 0;
+ for (; leadingIndex < output.Length() && output[leadingIndex] == 0.f;
+ leadingIndex += channelCount) {
+ };
+ leadingIndex += 10 * channelCount; // skip discontinuity oscillations
+ EXPECT_LT(leadingIndex, output.Length());
+ auto trimmed = Span(output).From(std::min(leadingIndex, output.Length()));
+ size_t frameCount = trimmed.Length() / channelCount;
+ uint32_t inputFrequency = primaryStream->InputFrequency();
+ AudioVerifier<AudioDataValue> verifier(secondaryRate, inputFrequency);
+ verifier.AppendDataInterleaved(trimmed.Elements(), frameCount, channelCount);
+ EXPECT_EQ(verifier.EstimatedFreq(), inputFrequency);
+ // AudioVerifier considers the previous value before the initial sample to
+ // be zero and so considers any initial sample >> 0 to be a discontinuity.
+ EXPECT_EQ(verifier.CountDiscontinuities(), 1U);
+
+ DispatchFunction([&] {
+ // Clean up
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->DisconnectDeviceInput();
+ processingTrack->Destroy();
+ });
+ WaitFor(primaryStream->OutputVerificationEvent());
+}
+#endif // MOZ_WEBRTC
+
+#undef Invoke
+#undef DispatchFunction
+#undef DispatchMethod
diff --git a/dom/media/gtest/TestBenchmarkStorage.cpp b/dom/media/gtest/TestBenchmarkStorage.cpp
new file mode 100644
index 0000000000..0f1eb7e4c4
--- /dev/null
+++ b/dom/media/gtest/TestBenchmarkStorage.cpp
@@ -0,0 +1,92 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/BenchmarkStorageParent.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+
+using ::testing::Return;
+using namespace mozilla;
+
+TEST(BenchmarkStorage, MovingAverage)
+{
+ int32_t av = 0;
+ int32_t win = 0;
+ int32_t val = 100;
+ BenchmarkStorageParent::MovingAverage(av, win, val);
+ EXPECT_EQ(av, val) << "1st average";
+ EXPECT_EQ(win, 1) << "1st window";
+
+ av = 50;
+ win = 1;
+ val = 100;
+ BenchmarkStorageParent::MovingAverage(av, win, val);
+ EXPECT_EQ(av, 75) << "2nd average";
+ EXPECT_EQ(win, 2) << "2nd window";
+
+ av = 100;
+ win = 9;
+ val = 90;
+ BenchmarkStorageParent::MovingAverage(av, win, val);
+ EXPECT_EQ(av, 99) << "9th average";
+ EXPECT_EQ(win, 10) << "9th window";
+
+ av = 90;
+ win = 19;
+ val = 90;
+ BenchmarkStorageParent::MovingAverage(av, win, val);
+ EXPECT_EQ(av, 90) << "19th average";
+ EXPECT_EQ(win, 20) << "19th window";
+
+ av = 90;
+ win = 20;
+ val = 100;
+ BenchmarkStorageParent::MovingAverage(av, win, val);
+ EXPECT_EQ(av, 91) << "20th average";
+ EXPECT_EQ(win, 20) << "20th window";
+}
+
+TEST(BenchmarkStorage, ParseStoredValue)
+{
+ int32_t win = 0;
+ int32_t score = BenchmarkStorageParent::ParseStoredValue(1100, win);
+ EXPECT_EQ(win, 1) << "Window";
+ EXPECT_EQ(score, 100) << "Score/Percentage";
+
+ win = 0;
+ score = BenchmarkStorageParent::ParseStoredValue(10099, win);
+ EXPECT_EQ(win, 10) << "Window";
+ EXPECT_EQ(score, 99) << "Score/Percentage";
+
+ win = 0;
+ score = BenchmarkStorageParent::ParseStoredValue(15038, win);
+ EXPECT_EQ(win, 15) << "Window";
+ EXPECT_EQ(score, 38) << "Score/Percentage";
+
+ win = 0;
+ score = BenchmarkStorageParent::ParseStoredValue(20099, win);
+ EXPECT_EQ(win, 20) << "Window";
+ EXPECT_EQ(score, 99) << "Score/Percentage";
+}
+
+TEST(BenchmarkStorage, PrepareStoredValue)
+{
+ int32_t stored_value = BenchmarkStorageParent::PrepareStoredValue(80, 1);
+ EXPECT_EQ(stored_value, 1080) << "Window";
+
+ stored_value = BenchmarkStorageParent::PrepareStoredValue(100, 6);
+ EXPECT_EQ(stored_value, 6100) << "Window";
+
+ stored_value = BenchmarkStorageParent::PrepareStoredValue(1, 10);
+ EXPECT_EQ(stored_value, 10001) << "Window";
+
+ stored_value = BenchmarkStorageParent::PrepareStoredValue(88, 13);
+ EXPECT_EQ(stored_value, 13088) << "Window";
+
+ stored_value = BenchmarkStorageParent::PrepareStoredValue(100, 20);
+ EXPECT_EQ(stored_value, 20100) << "Window";
+}
diff --git a/dom/media/gtest/TestBitWriter.cpp b/dom/media/gtest/TestBitWriter.cpp
new file mode 100644
index 0000000000..7cc94fb9a0
--- /dev/null
+++ b/dom/media/gtest/TestBitWriter.cpp
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdint.h>
+#include "gtest/gtest.h"
+#include "BitReader.h"
+#include "BitWriter.h"
+#include "H264.h"
+
+using namespace mozilla;
+
+TEST(BitWriter, BitWriter)
+{
+ RefPtr<MediaByteBuffer> test = new MediaByteBuffer();
+ BitWriter b(test);
+ b.WriteBit(false);
+ b.WriteBits(~1ULL, 1); // ensure that extra bits don't modify byte buffer.
+ b.WriteBits(3, 1);
+ b.WriteUE(1280 / 16 - 1);
+ b.WriteUE(720 / 16 - 1);
+ b.WriteUE(1280);
+ b.WriteUE(720);
+ b.WriteBit(true);
+ b.WriteBit(false);
+ b.WriteBit(true);
+ b.WriteU8(7);
+ b.WriteU32(16356);
+ b.WriteU64(116356);
+ b.WriteBits(~(0ULL) & ~1ULL, 16);
+ b.WriteULEB128(16ULL);
+ b.WriteULEB128(31895793ULL);
+ b.WriteULEB128(426894039235654ULL);
+ const uint32_t length = b.BitCount();
+ b.CloseWithRbspTrailing();
+
+ BitReader c(test);
+
+ EXPECT_EQ(c.ReadBit(), false);
+ EXPECT_EQ(c.ReadBit(), false);
+ EXPECT_EQ(c.ReadBit(), true);
+ EXPECT_EQ(c.ReadUE(), 1280u / 16 - 1);
+ EXPECT_EQ(c.ReadUE(), 720u / 16 - 1);
+ EXPECT_EQ(c.ReadUE(), 1280u);
+ EXPECT_EQ(c.ReadUE(), 720u);
+ EXPECT_EQ(c.ReadBit(), true);
+ EXPECT_EQ(c.ReadBit(), false);
+ EXPECT_EQ(c.ReadBit(), true);
+ EXPECT_EQ(c.ReadBits(8), 7u);
+ EXPECT_EQ(c.ReadU32(), 16356u);
+ EXPECT_EQ(c.ReadU64(), 116356u);
+ EXPECT_EQ(c.ReadBits(16), 0xfffeu);
+ EXPECT_EQ(c.ReadULEB128(), 16ull);
+ EXPECT_EQ(c.ReadULEB128(), 31895793ull);
+ EXPECT_EQ(c.ReadULEB128(), 426894039235654ull);
+ EXPECT_EQ(length, BitReader::GetBitLength(test));
+}
+
+TEST(BitWriter, AdvanceBytes)
+{
+ RefPtr<MediaByteBuffer> test = new MediaByteBuffer();
+ BitWriter b(test);
+ b.WriteBits(0xff, 8);
+ EXPECT_EQ(test->Length(), 1u);
+
+ uint8_t data[] = {0xfe, 0xfd};
+ test->AppendElements(data, sizeof(data));
+ EXPECT_EQ(test->Length(), 3u);
+ b.AdvanceBytes(2);
+
+ b.WriteBits(0xfc, 8);
+ EXPECT_EQ(test->Length(), 4u);
+
+ BitReader c(test);
+ EXPECT_EQ(c.ReadU32(), 0xfffefdfc);
+}
+
+TEST(BitWriter, SPS)
+{
+ uint8_t sps_pps[] = {0x01, 0x4d, 0x40, 0x0c, 0xff, 0xe1, 0x00, 0x1b, 0x67,
+ 0x4d, 0x40, 0x0c, 0xe8, 0x80, 0x80, 0x9d, 0x80, 0xb5,
+ 0x01, 0x01, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x40,
+ 0x00, 0x00, 0x0f, 0x03, 0xc5, 0x0a, 0x44, 0x80, 0x01,
+ 0x00, 0x04, 0x68, 0xeb, 0xef, 0x20};
+
+ RefPtr<MediaByteBuffer> extraData = new MediaByteBuffer();
+ extraData->AppendElements(sps_pps, sizeof(sps_pps));
+ SPSData spsdata1;
+ bool success = H264::DecodeSPSFromExtraData(extraData, spsdata1);
+ EXPECT_EQ(success, true);
+
+ auto testOutput = [&](uint8_t aProfile, uint8_t aConstraints, uint8_t aLevel,
+ gfx::IntSize aSize, char const* aDesc) {
+ RefPtr<MediaByteBuffer> extraData =
+ H264::CreateExtraData(aProfile, aConstraints, aLevel, aSize);
+ SPSData spsData;
+ success = H264::DecodeSPSFromExtraData(extraData, spsData);
+ EXPECT_EQ(success, true) << aDesc;
+ EXPECT_EQ(spsData.profile_idc, aProfile) << aDesc;
+ EXPECT_EQ(spsData.constraint_set0_flag, (aConstraints >> 7) & 1) << aDesc;
+ EXPECT_EQ(spsData.constraint_set1_flag, (aConstraints >> 6) & 1) << aDesc;
+ EXPECT_EQ(spsData.constraint_set2_flag, (aConstraints >> 5) & 1) << aDesc;
+ EXPECT_EQ(spsData.constraint_set3_flag, (aConstraints >> 4) & 1) << aDesc;
+ EXPECT_EQ(spsData.constraint_set4_flag, (aConstraints >> 3) & 1) << aDesc;
+ EXPECT_EQ(spsData.constraint_set5_flag, (aConstraints >> 2) & 1) << aDesc;
+
+ EXPECT_EQ(spsData.level_idc, aLevel) << aDesc;
+ EXPECT_TRUE(!aSize.IsEmpty());
+ EXPECT_EQ(spsData.pic_width, static_cast<uint32_t>(aSize.width)) << aDesc;
+ EXPECT_EQ(spsData.pic_height, static_cast<uint32_t>(aSize.height)) << aDesc;
+ };
+
+ testOutput(0x42, 0x40, 0x1E, {1920, 1080}, "Constrained Baseline Profile");
+ testOutput(0x4D, 0x00, 0x0B, {300, 300}, "Main Profile");
+ testOutput(0x64, 0x0C, 0x33, {1280, 720}, "Constrained High Profile");
+}
diff --git a/dom/media/gtest/TestBlankVideoDataCreator.cpp b/dom/media/gtest/TestBlankVideoDataCreator.cpp
new file mode 100644
index 0000000000..b30f1cecbe
--- /dev/null
+++ b/dom/media/gtest/TestBlankVideoDataCreator.cpp
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "BlankDecoderModule.h"
+#include "ImageContainer.h"
+
+using namespace mozilla;
+
+TEST(BlankVideoDataCreator, ShouldNotOverflow)
+{
+ RefPtr<MediaRawData> mrd = new MediaRawData();
+ const uint32_t width = 1;
+ const uint32_t height = 1;
+ BlankVideoDataCreator creater(width, height, nullptr);
+ RefPtr<MediaData> data = creater.Create(mrd);
+ EXPECT_NE(data.get(), nullptr);
+}
+
+TEST(BlankVideoDataCreator, ShouldOverflow)
+{
+ RefPtr<MediaRawData> mrd = new MediaRawData();
+ const uint32_t width = UINT_MAX;
+ const uint32_t height = UINT_MAX;
+ BlankVideoDataCreator creater(width, height, nullptr);
+ RefPtr<MediaData> data = creater.Create(mrd);
+ EXPECT_EQ(data.get(), nullptr);
+}
diff --git a/dom/media/gtest/TestBufferReader.cpp b/dom/media/gtest/TestBufferReader.cpp
new file mode 100644
index 0000000000..827e55335d
--- /dev/null
+++ b/dom/media/gtest/TestBufferReader.cpp
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "BufferReader.h"
+
+using namespace mozilla;
+
+TEST(BufferReader, ReaderCursor)
+{
+ // Allocate a buffer and create a BufferReader.
+ const size_t BUFFER_SIZE = 10;
+ uint8_t buffer[BUFFER_SIZE] = {0};
+
+ const uint8_t* const HEAD = reinterpret_cast<uint8_t*>(buffer);
+ const uint8_t* const TAIL = HEAD + BUFFER_SIZE;
+
+ BufferReader reader(HEAD, BUFFER_SIZE);
+ ASSERT_EQ(reader.Offset(), static_cast<size_t>(0));
+ ASSERT_EQ(reader.Peek(BUFFER_SIZE), HEAD);
+
+ // Keep reading to the end, and make sure the final read failed.
+ const size_t READ_SIZE = 4;
+ ASSERT_NE(BUFFER_SIZE % READ_SIZE, static_cast<size_t>(0));
+ for (const uint8_t* ptr = reader.Peek(0); ptr != nullptr;
+ ptr = reader.Read(READ_SIZE)) {
+ }
+
+ // Check the reading cursor of the BufferReader is correct
+ // after reading and seeking.
+ const uint8_t* tail = reader.Peek(0);
+ const uint8_t* head = reader.Seek(0);
+
+ EXPECT_EQ(head, HEAD);
+ EXPECT_EQ(tail, TAIL);
+}
+
+TEST(BufferReader, UnalignedRead)
+{
+ // Allocate a buffer and create a BufferReader.
+ const size_t BUFFER_SIZE = 5;
+ uint8_t buffer[BUFFER_SIZE] = {0};
+
+ const uint8_t* const HEAD = reinterpret_cast<uint8_t*>(buffer);
+
+ BufferReader reader(HEAD, BUFFER_SIZE);
+ // adjust the offset so that it's unaligned
+ reader.Read(1);
+ // read an int which needs 4 byte alignment
+ reader.ReadType<uint32_t>();
+}
diff --git a/dom/media/gtest/TestCDMStorage.cpp b/dom/media/gtest/TestCDMStorage.cpp
new file mode 100644
index 0000000000..d6249cc95f
--- /dev/null
+++ b/dom/media/gtest/TestCDMStorage.cpp
@@ -0,0 +1,1342 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ChromiumCDMCallback.h"
+#include "ChromiumCDMParent.h"
+#include "GMPServiceParent.h"
+#include "GMPTestMonitor.h"
+#include "MediaResult.h"
+#include "gtest/gtest.h"
+#include "mozilla/gtest/MozAssertions.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/SchedulerGroup.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "nsIFile.h"
+#include "nsCRTGlue.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsDirectoryServiceUtils.h"
+#include "nsNSSComponent.h" //For EnsureNSSInitializedChromeOrContent
+#include "nsThreadUtils.h"
+
+using namespace mozilla;
+using namespace mozilla::gmp;
+
+static already_AddRefed<nsIThread> GetGMPThread() {
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+ nsCOMPtr<nsIThread> thread;
+ EXPECT_NS_SUCCEEDED(service->GetThread(getter_AddRefs(thread)));
+ return thread.forget();
+}
+
+/**
+ * Enumerate files under |aPath| (non-recursive).
+ */
+template <typename T>
+static nsresult EnumerateDir(nsIFile* aPath, T&& aDirIter) {
+ nsCOMPtr<nsIDirectoryEnumerator> iter;
+ nsresult rv = aPath->GetDirectoryEntries(getter_AddRefs(iter));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ nsCOMPtr<nsIFile> entry;
+ while (NS_SUCCEEDED(iter->GetNextFile(getter_AddRefs(entry))) && entry) {
+ aDirIter(entry);
+ }
+ return NS_OK;
+}
+
+/**
+ * Enumerate files under $profileDir/gmp/$platform/gmp-fake/$aDir/
+ * (non-recursive).
+ */
+template <typename T>
+static nsresult EnumerateCDMStorageDir(const nsACString& aDir, T&& aDirIter) {
+ RefPtr<GeckoMediaPluginServiceParent> service =
+ GeckoMediaPluginServiceParent::GetSingleton();
+ MOZ_ASSERT(service);
+
+ // $profileDir/gmp/$platform/
+ nsCOMPtr<nsIFile> path;
+ nsresult rv = service->GetStorageDir(getter_AddRefs(path));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // $profileDir/gmp/$platform/gmp-fake/
+ rv = path->Append(u"gmp-fake"_ns);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // $profileDir/gmp/$platform/gmp-fake/$aDir/
+ rv = path->AppendNative(aDir);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ return EnumerateDir(path, aDirIter);
+}
+
+class GMPShutdownObserver : public nsIRunnable, public nsIObserver {
+ public:
+ GMPShutdownObserver(already_AddRefed<nsIRunnable> aShutdownTask,
+ already_AddRefed<nsIRunnable> Continuation,
+ const nsACString& aNodeId)
+ : mShutdownTask(aShutdownTask),
+ mContinuation(Continuation),
+ mNodeId(NS_ConvertUTF8toUTF16(aNodeId)) {}
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(NS_IsMainThread());
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ EXPECT_TRUE(observerService);
+ observerService->AddObserver(this, "gmp-shutdown", false);
+
+ nsCOMPtr<nsIThread> thread(GetGMPThread());
+ thread->Dispatch(mShutdownTask, NS_DISPATCH_NORMAL);
+ return NS_OK;
+ }
+
+ NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aSomeData) override {
+ if (!strcmp(aTopic, "gmp-shutdown") &&
+ mNodeId.Equals(nsDependentString(aSomeData))) {
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ EXPECT_TRUE(observerService);
+ observerService->RemoveObserver(this, "gmp-shutdown");
+ nsCOMPtr<nsIThread> thread(GetGMPThread());
+ thread->Dispatch(mContinuation, NS_DISPATCH_NORMAL);
+ }
+ return NS_OK;
+ }
+
+ private:
+ virtual ~GMPShutdownObserver() = default;
+ nsCOMPtr<nsIRunnable> mShutdownTask;
+ nsCOMPtr<nsIRunnable> mContinuation;
+ const nsString mNodeId;
+};
+
+NS_IMPL_ISUPPORTS(GMPShutdownObserver, nsIRunnable, nsIObserver)
+
+class NotifyObserversTask : public Runnable {
+ public:
+ explicit NotifyObserversTask(const char* aTopic)
+ : mozilla::Runnable("NotifyObserversTask"), mTopic(aTopic) {}
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(NS_IsMainThread());
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ if (observerService) {
+ observerService->NotifyObservers(nullptr, mTopic, nullptr);
+ }
+ return NS_OK;
+ }
+ const char* mTopic;
+};
+
+class ClearCDMStorageTask : public nsIRunnable, public nsIObserver {
+ public:
+ ClearCDMStorageTask(already_AddRefed<nsIRunnable> Continuation,
+ nsIThread* aTarget, PRTime aSince)
+ : mContinuation(Continuation), mTarget(aTarget), mSince(aSince) {}
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(NS_IsMainThread());
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ EXPECT_TRUE(observerService);
+ observerService->AddObserver(this, "gmp-clear-storage-complete", false);
+ if (observerService) {
+ nsAutoString str;
+ if (mSince >= 0) {
+ str.AppendInt(static_cast<int64_t>(mSince));
+ }
+ observerService->NotifyObservers(nullptr, "browser:purge-session-history",
+ str.Data());
+ }
+ return NS_OK;
+ }
+
+ NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aSomeData) override {
+ if (!strcmp(aTopic, "gmp-clear-storage-complete")) {
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ EXPECT_TRUE(observerService);
+ observerService->RemoveObserver(this, "gmp-clear-storage-complete");
+ mTarget->Dispatch(mContinuation, NS_DISPATCH_NORMAL);
+ }
+ return NS_OK;
+ }
+
+ private:
+ virtual ~ClearCDMStorageTask() = default;
+ nsCOMPtr<nsIRunnable> mContinuation;
+ nsCOMPtr<nsIThread> mTarget;
+ const PRTime mSince;
+};
+
+NS_IMPL_ISUPPORTS(ClearCDMStorageTask, nsIRunnable, nsIObserver)
+
+static void ClearCDMStorage(already_AddRefed<nsIRunnable> aContinuation,
+ nsIThread* aTarget, PRTime aSince = -1) {
+ RefPtr<ClearCDMStorageTask> task(
+ new ClearCDMStorageTask(std::move(aContinuation), aTarget, aSince));
+ SchedulerGroup::Dispatch(task.forget());
+}
+
+static void SimulatePBModeExit() {
+ NS_DispatchAndSpinEventLoopUntilComplete(
+ "SimulatePBModeExit"_ns, GetMainThreadSerialEventTarget(),
+ MakeAndAddRef<NotifyObserversTask>("last-pb-context-exited"));
+}
+
+class TestGetNodeIdCallback : public GetNodeIdCallback {
+ public:
+ TestGetNodeIdCallback(nsCString& aNodeId, nsresult& aResult)
+ : mNodeId(aNodeId), mResult(aResult) {}
+
+ void Done(nsresult aResult, const nsACString& aNodeId) {
+ mResult = aResult;
+ mNodeId = aNodeId;
+ }
+
+ private:
+ nsCString& mNodeId;
+ nsresult& mResult;
+};
+
+static NodeIdParts GetNodeIdParts(const nsAString& aOrigin,
+ const nsAString& aTopLevelOrigin,
+ const nsAString& aGmpName, bool aInPBMode) {
+ OriginAttributes attrs;
+ attrs.mPrivateBrowsingId = aInPBMode ? 1 : 0;
+
+ nsAutoCString suffix;
+ attrs.CreateSuffix(suffix);
+
+ nsAutoString origin;
+ origin.Assign(aOrigin);
+ origin.Append(NS_ConvertUTF8toUTF16(suffix));
+
+ nsAutoString topLevelOrigin;
+ topLevelOrigin.Assign(aTopLevelOrigin);
+ topLevelOrigin.Append(NS_ConvertUTF8toUTF16(suffix));
+ return NodeIdParts{origin, topLevelOrigin, nsString(aGmpName)};
+}
+
+static nsCString GetNodeId(const nsAString& aOrigin,
+ const nsAString& aTopLevelOrigin, bool aInPBMode) {
+ RefPtr<GeckoMediaPluginServiceParent> service =
+ GeckoMediaPluginServiceParent::GetSingleton();
+ EXPECT_TRUE(service);
+ nsCString nodeId;
+ nsresult result;
+ UniquePtr<GetNodeIdCallback> callback(
+ new TestGetNodeIdCallback(nodeId, result));
+
+ OriginAttributes attrs;
+ attrs.mPrivateBrowsingId = aInPBMode ? 1 : 0;
+
+ nsAutoCString suffix;
+ attrs.CreateSuffix(suffix);
+
+ nsAutoString origin;
+ origin.Assign(aOrigin);
+ origin.Append(NS_ConvertUTF8toUTF16(suffix));
+
+ nsAutoString topLevelOrigin;
+ topLevelOrigin.Assign(aTopLevelOrigin);
+ topLevelOrigin.Append(NS_ConvertUTF8toUTF16(suffix));
+
+ // We rely on the fact that the GetNodeId implementation for
+ // GeckoMediaPluginServiceParent is synchronous.
+ nsresult rv = service->GetNodeId(origin, topLevelOrigin, u"gmp-fake"_ns,
+ std::move(callback));
+ EXPECT_TRUE(NS_SUCCEEDED(rv) && NS_SUCCEEDED(result));
+ return nodeId;
+}
+
+static bool IsCDMStorageIsEmpty() {
+ RefPtr<GeckoMediaPluginServiceParent> service =
+ GeckoMediaPluginServiceParent::GetSingleton();
+ MOZ_ASSERT(service);
+ nsCOMPtr<nsIFile> storage;
+ nsresult rv = service->GetStorageDir(getter_AddRefs(storage));
+ EXPECT_NS_SUCCEEDED(rv);
+ bool exists = false;
+ if (storage) {
+ storage->Exists(&exists);
+ }
+ return !exists;
+}
+
+static void AssertIsOnGMPThread() {
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+ MOZ_ASSERT(service);
+ nsCOMPtr<nsIThread> thread;
+ service->GetThread(getter_AddRefs(thread));
+ MOZ_ASSERT(thread);
+ nsCOMPtr<nsIThread> currentThread;
+ DebugOnly<nsresult> rv = NS_GetCurrentThread(getter_AddRefs(currentThread));
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ MOZ_ASSERT(currentThread == thread);
+}
+
+class CDMStorageTest {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CDMStorageTest)
+
+ void DoTest(void (CDMStorageTest::*aTestMethod)()) {
+ EnsureNSSInitializedChromeOrContent();
+ nsCOMPtr<nsIThread> thread(GetGMPThread());
+ ClearCDMStorage(
+ NewRunnableMethod("CDMStorageTest::DoTest", this, aTestMethod), thread);
+ AwaitFinished();
+ }
+
+ CDMStorageTest() : mMonitor("CDMStorageTest"), mFinished(false) {}
+
+ void Update(const nsCString& aMessage) {
+ nsTArray<uint8_t> msg;
+ msg.AppendElements(aMessage.get(), aMessage.Length());
+ mCDM->UpdateSession("fake-session-id"_ns, 1, msg);
+ }
+
+ void TestGetNodeId() {
+ AssertIsOnGMPThread();
+
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ const nsString origin1 = u"http://example1.com"_ns;
+ const nsString origin2 = u"http://example2.org"_ns;
+
+ nsCString PBnodeId1 = GetNodeId(origin1, origin2, true);
+ nsCString PBnodeId2 = GetNodeId(origin1, origin2, true);
+
+ // Node ids for the same origins should be the same in PB mode.
+ EXPECT_TRUE(PBnodeId1.Equals(PBnodeId2));
+
+ nsCString PBnodeId3 = GetNodeId(origin2, origin1, true);
+
+ // Node ids with origin and top level origin swapped should be different.
+ EXPECT_TRUE(!PBnodeId3.Equals(PBnodeId1));
+
+ // Getting node ids in PB mode should not result in the node id being
+ // stored.
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ nsCString nodeId1 = GetNodeId(origin1, origin2, false);
+ nsCString nodeId2 = GetNodeId(origin1, origin2, false);
+
+ // NodeIds for the same origin pair in non-pb mode should be the same.
+ EXPECT_TRUE(nodeId1.Equals(nodeId2));
+
+ // Node ids for a given origin pair should be different for the PB origins
+ // should be the same in PB mode.
+ EXPECT_TRUE(!PBnodeId1.Equals(nodeId1));
+ EXPECT_TRUE(!PBnodeId2.Equals(nodeId2));
+
+ nsCOMPtr<nsIThread> thread(GetGMPThread());
+ ClearCDMStorage(NewRunnableMethod<nsCString>(
+ "CDMStorageTest::TestGetNodeId_Continuation", this,
+ &CDMStorageTest::TestGetNodeId_Continuation, nodeId1),
+ thread);
+ }
+
+ void TestGetNodeId_Continuation(nsCString aNodeId1) {
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ // Once we clear storage, the node ids generated for the same origin-pair
+ // should be different.
+ const nsString origin1 = u"http://example1.com"_ns;
+ const nsString origin2 = u"http://example2.org"_ns;
+ nsCString nodeId3 = GetNodeId(origin1, origin2, false);
+ EXPECT_TRUE(!aNodeId1.Equals(nodeId3));
+
+ SetFinished();
+ }
+
+ void CreateDecryptor(const nsAString& aOrigin,
+ const nsAString& aTopLevelOrigin, bool aInPBMode,
+ const nsCString& aUpdate) {
+ nsTArray<nsCString> updates;
+ updates.AppendElement(aUpdate);
+ CreateDecryptor(aOrigin, aTopLevelOrigin, aInPBMode, std::move(updates));
+ }
+
+ void CreateDecryptor(const nsAString& aOrigin,
+ const nsAString& aTopLevelOrigin, bool aInPBMode,
+ nsTArray<nsCString>&& aUpdates) {
+ CreateDecryptor(
+ GetNodeIdParts(aOrigin, aTopLevelOrigin, u"gmp-fake"_ns, aInPBMode),
+ std::move(aUpdates));
+ }
+
+ void CreateDecryptor(const NodeIdParts& aNodeId,
+ nsTArray<nsCString>&& aUpdates) {
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+ EXPECT_TRUE(service);
+
+ nsCString keySystem{"fake"_ns};
+
+ RefPtr<CDMStorageTest> self = this;
+ RefPtr<gmp::GetCDMParentPromise> promise =
+ service->GetCDM(aNodeId, keySystem, nullptr);
+ nsCOMPtr<nsISerialEventTarget> thread = GetGMPThread();
+ promise->Then(
+ thread, __func__,
+ [self, updates = std::move(aUpdates),
+ thread](RefPtr<gmp::ChromiumCDMParent> cdm) mutable {
+ self->mCDM = cdm;
+ EXPECT_TRUE(!!self->mCDM);
+ self->mCallback.reset(new CallbackProxy(self));
+ nsCString failureReason;
+ self->mCDM
+ ->Init(self->mCallback.get(), false, true,
+ GetMainThreadSerialEventTarget())
+ ->Then(
+ thread, __func__,
+ [self, updates = std::move(updates)] {
+ for (const auto& update : updates) {
+ self->Update(update);
+ }
+ },
+ [](MediaResult rv) { EXPECT_TRUE(false); });
+ },
+ [](MediaResult rv) { EXPECT_TRUE(false); });
+ }
+
+ void TestBasicStorage() {
+ AssertIsOnGMPThread();
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+
+ // Send a message to the fake GMP for it to run its own tests internally.
+ // It sends us a "test-storage complete" message when its passed, or
+ // some other message if its tests fail.
+ Expect("test-storage complete"_ns,
+ NewRunnableMethod("CDMStorageTest::SetFinished", this,
+ &CDMStorageTest::SetFinished));
+
+ CreateDecryptor(u"http://example1.com"_ns, u"http://example2.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ /**
+ * 1. Generate storage data for some sites.
+ * 2. Forget about one of the sites.
+ * 3. Check if the storage data for the forgotten site are erased correctly.
+ * 4. Check if the storage data for other sites remain unchanged.
+ */
+ void TestForgetThisSite() {
+ AssertIsOnGMPThread();
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ // Generate storage data for some site.
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestForgetThisSite_AnotherSite", this,
+ &CDMStorageTest::TestForgetThisSite_AnotherSite);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://example1.com"_ns, u"http://example2.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ void TestForgetThisSite_AnotherSite() {
+ Shutdown();
+
+ // Generate storage data for another site.
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestForgetThisSite_CollectSiteInfo", this,
+ &CDMStorageTest::TestForgetThisSite_CollectSiteInfo);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://example3.com"_ns, u"http://example4.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ struct NodeInfo {
+ explicit NodeInfo(const nsACString& aSite,
+ const mozilla::OriginAttributesPattern& aPattern)
+ : siteToForget(aSite), mPattern(aPattern) {}
+ nsCString siteToForget;
+ mozilla::OriginAttributesPattern mPattern;
+ nsTArray<nsCString> mExpectedRemainingNodeIds;
+ };
+
+ class NodeIdCollector {
+ public:
+ explicit NodeIdCollector(NodeInfo* aInfo) : mNodeInfo(aInfo) {}
+ void operator()(nsIFile* aFile) {
+ nsCString salt;
+ nsresult rv = ReadSalt(aFile, salt);
+ ASSERT_NS_SUCCEEDED(rv);
+ if (!MatchOrigin(aFile, mNodeInfo->siteToForget, mNodeInfo->mPattern)) {
+ mNodeInfo->mExpectedRemainingNodeIds.AppendElement(salt);
+ }
+ }
+
+ private:
+ NodeInfo* mNodeInfo;
+ };
+
+ void TestForgetThisSite_CollectSiteInfo() {
+ mozilla::OriginAttributesPattern pattern;
+
+ UniquePtr<NodeInfo> siteInfo(
+ new NodeInfo("http://example1.com"_ns, pattern));
+ // Collect nodeIds that are expected to remain for later comparison.
+ EnumerateCDMStorageDir("id"_ns, NodeIdCollector(siteInfo.get()));
+ // Invoke "Forget this site" on the main thread.
+ SchedulerGroup::Dispatch(NewRunnableMethod<UniquePtr<NodeInfo>&&>(
+ "CDMStorageTest::TestForgetThisSite_Forget", this,
+ &CDMStorageTest::TestForgetThisSite_Forget, std::move(siteInfo)));
+ }
+
+ void TestForgetThisSite_Forget(UniquePtr<NodeInfo>&& aSiteInfo) {
+ RefPtr<GeckoMediaPluginServiceParent> service =
+ GeckoMediaPluginServiceParent::GetSingleton();
+ service->ForgetThisSiteNative(
+ NS_ConvertUTF8toUTF16(aSiteInfo->siteToForget), aSiteInfo->mPattern);
+
+ nsCOMPtr<nsIThread> thread;
+ service->GetThread(getter_AddRefs(thread));
+
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod<UniquePtr<NodeInfo>&&>(
+ "CDMStorageTest::TestForgetThisSite_Verify", this,
+ &CDMStorageTest::TestForgetThisSite_Verify, std::move(aSiteInfo));
+ thread->Dispatch(r, NS_DISPATCH_NORMAL);
+
+ nsCOMPtr<nsIRunnable> f = NewRunnableMethod(
+ "CDMStorageTest::SetFinished", this, &CDMStorageTest::SetFinished);
+ thread->Dispatch(f, NS_DISPATCH_NORMAL);
+ }
+
+ class NodeIdVerifier {
+ public:
+ explicit NodeIdVerifier(const NodeInfo* aInfo)
+ : mNodeInfo(aInfo),
+ mExpectedRemainingNodeIds(aInfo->mExpectedRemainingNodeIds.Clone()) {}
+ void operator()(nsIFile* aFile) {
+ nsCString salt;
+ nsresult rv = ReadSalt(aFile, salt);
+ ASSERT_NS_SUCCEEDED(rv);
+ // Shouldn't match the origin if we clear correctly.
+ EXPECT_FALSE(
+ MatchOrigin(aFile, mNodeInfo->siteToForget, mNodeInfo->mPattern))
+ << "Found files persisted that match against a site that should "
+ "have been removed!";
+ // Check if remaining nodeIDs are as expected.
+ EXPECT_TRUE(mExpectedRemainingNodeIds.RemoveElement(salt))
+ << "Failed to remove salt from expected remaining node ids. This "
+ "indicates storage that should be forgotten is still persisted!";
+ }
+ ~NodeIdVerifier() {
+ EXPECT_TRUE(mExpectedRemainingNodeIds.IsEmpty())
+ << "Some expected remaining node ids were not checked against. This "
+ "indicates that data we expected to find in storage was missing!";
+ }
+
+ private:
+ const NodeInfo* mNodeInfo;
+ nsTArray<nsCString> mExpectedRemainingNodeIds;
+ };
+
+ class StorageVerifier {
+ public:
+ explicit StorageVerifier(const NodeInfo* aInfo)
+ : mExpectedRemainingNodeIds(aInfo->mExpectedRemainingNodeIds.Clone()) {}
+ void operator()(nsIFile* aFile) {
+ nsCString salt;
+ nsresult rv = aFile->GetNativeLeafName(salt);
+ ASSERT_NS_SUCCEEDED(rv);
+ EXPECT_TRUE(mExpectedRemainingNodeIds.RemoveElement(salt))
+ << "Failed to remove salt from expected remaining node ids. This "
+ "indicates storage that should be forgotten is still persisted!";
+ }
+ ~StorageVerifier() {
+ EXPECT_TRUE(mExpectedRemainingNodeIds.IsEmpty())
+ << "Some expected remaining node ids were not checked against. This "
+ "indicates that data we expected to find in storage was missing!";
+ }
+
+ private:
+ nsTArray<nsCString> mExpectedRemainingNodeIds;
+ };
+
+ void TestForgetThisSite_Verify(UniquePtr<NodeInfo>&& aSiteInfo) {
+ nsresult rv =
+ EnumerateCDMStorageDir("id"_ns, NodeIdVerifier(aSiteInfo.get()));
+ EXPECT_NS_SUCCEEDED(rv);
+
+ rv = EnumerateCDMStorageDir("storage"_ns, StorageVerifier(aSiteInfo.get()));
+ EXPECT_NS_SUCCEEDED(rv);
+ }
+
+ /**
+ * 1. Generate storage data for some sites.
+ * 2. Forget about base domain example1.com
+ * 3. Check if the storage data for the forgotten site are erased correctly.
+ * 4. Check if the storage data for other sites remain unchanged.
+ */
+ void TestForgetThisBaseDomain() {
+ AssertIsOnGMPThread();
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ // Generate storage data for some site.
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestForgetThisBaseDomain_SecondSite", this,
+ &CDMStorageTest::TestForgetThisBaseDomain_SecondSite);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://media.example1.com"_ns,
+ u"http://tld.example2.com"_ns, false, "test-storage"_ns);
+ }
+
+ void TestForgetThisBaseDomain_SecondSite() {
+ Shutdown();
+
+ // Generate storage data for another site.
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestForgetThisBaseDomain_ThirdSite", this,
+ &CDMStorageTest::TestForgetThisBaseDomain_ThirdSite);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://media.somewhereelse.com"_ns,
+ u"http://home.example1.com"_ns, false, "test-storage"_ns);
+ }
+
+ void TestForgetThisBaseDomain_ThirdSite() {
+ Shutdown();
+
+ // Generate storage data for another site.
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestForgetThisBaseDomain_CollectSiteInfo", this,
+ &CDMStorageTest::TestForgetThisBaseDomain_CollectSiteInfo);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://media.example3.com"_ns,
+ u"http://tld.long-example1.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ struct BaseDomainNodeInfo {
+ explicit BaseDomainNodeInfo(const nsACString& aBaseDomain)
+ : baseDomainToForget(aBaseDomain) {}
+ nsCString baseDomainToForget;
+
+ nsTArray<nsCString> mExpectedRemainingNodeIds;
+ };
+
+ class BaseDomainNodeIdCollector {
+ public:
+ explicit BaseDomainNodeIdCollector(BaseDomainNodeInfo* aInfo)
+ : mNodeInfo(aInfo) {}
+ void operator()(nsIFile* aFile) {
+ nsCString salt;
+ nsresult rv = ReadSalt(aFile, salt);
+ ASSERT_NS_SUCCEEDED(rv);
+ if (!MatchBaseDomain(aFile, mNodeInfo->baseDomainToForget)) {
+ mNodeInfo->mExpectedRemainingNodeIds.AppendElement(salt);
+ }
+ }
+
+ private:
+ BaseDomainNodeInfo* mNodeInfo;
+ };
+
+ void TestForgetThisBaseDomain_CollectSiteInfo() {
+ UniquePtr<BaseDomainNodeInfo> siteInfo(
+ new BaseDomainNodeInfo("example1.com"_ns));
+ // Collect nodeIds that are expected to remain for later comparison.
+ EnumerateCDMStorageDir("id"_ns, BaseDomainNodeIdCollector(siteInfo.get()));
+ // Invoke "ForgetThisBaseDomain" on the main thread.
+ SchedulerGroup::Dispatch(NewRunnableMethod<UniquePtr<BaseDomainNodeInfo>&&>(
+ "CDMStorageTest::TestForgetThisBaseDomain_Forget", this,
+ &CDMStorageTest::TestForgetThisBaseDomain_Forget, std::move(siteInfo)));
+ }
+
+ void TestForgetThisBaseDomain_Forget(
+ UniquePtr<BaseDomainNodeInfo>&& aSiteInfo) {
+ RefPtr<GeckoMediaPluginServiceParent> service =
+ GeckoMediaPluginServiceParent::GetSingleton();
+ service->ForgetThisBaseDomain(
+ NS_ConvertUTF8toUTF16(aSiteInfo->baseDomainToForget));
+
+ nsCOMPtr<nsIThread> thread;
+ service->GetThread(getter_AddRefs(thread));
+
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod<UniquePtr<BaseDomainNodeInfo>&&>(
+ "CDMStorageTest::TestForgetThisBaseDomain_Verify", this,
+ &CDMStorageTest::TestForgetThisBaseDomain_Verify,
+ std::move(aSiteInfo));
+ thread->Dispatch(r, NS_DISPATCH_NORMAL);
+
+ nsCOMPtr<nsIRunnable> f = NewRunnableMethod(
+ "CDMStorageTest::SetFinished", this, &CDMStorageTest::SetFinished);
+ thread->Dispatch(f, NS_DISPATCH_NORMAL);
+ }
+
+ class BaseDomainNodeIdVerifier {
+ public:
+ explicit BaseDomainNodeIdVerifier(const BaseDomainNodeInfo* aInfo)
+ : mNodeInfo(aInfo),
+ mExpectedRemainingNodeIds(aInfo->mExpectedRemainingNodeIds.Clone()) {}
+ void operator()(nsIFile* aFile) {
+ nsCString salt;
+ nsresult rv = ReadSalt(aFile, salt);
+ ASSERT_NS_SUCCEEDED(rv);
+ // Shouldn't match the origin if we clear correctly.
+ EXPECT_FALSE(MatchBaseDomain(aFile, mNodeInfo->baseDomainToForget))
+ << "Found files persisted that match against a domain that should "
+ "have been removed!";
+ // Check if remaining nodeIDs are as expected.
+ EXPECT_TRUE(mExpectedRemainingNodeIds.RemoveElement(salt))
+ << "Failed to remove salt from expected remaining node ids. This "
+ "indicates storage that should be forgotten is still persisted!";
+ }
+ ~BaseDomainNodeIdVerifier() {
+ EXPECT_TRUE(mExpectedRemainingNodeIds.IsEmpty())
+ << "Some expected remaining node ids were not checked against. This "
+ "indicates that data we expected to find in storage was missing!";
+ }
+
+ private:
+ const BaseDomainNodeInfo* mNodeInfo;
+ nsTArray<nsCString> mExpectedRemainingNodeIds;
+ };
+
+ class BaseDomainStorageVerifier {
+ public:
+ explicit BaseDomainStorageVerifier(const BaseDomainNodeInfo* aInfo)
+ : mExpectedRemainingNodeIds(aInfo->mExpectedRemainingNodeIds.Clone()) {}
+ void operator()(nsIFile* aFile) {
+ nsCString salt;
+ nsresult rv = aFile->GetNativeLeafName(salt);
+ ASSERT_NS_SUCCEEDED(rv);
+ EXPECT_TRUE(mExpectedRemainingNodeIds.RemoveElement(salt))
+ << "Failed to remove salt from expected remaining node ids. This "
+ "indicates storage that should be forgotten is still persisted!";
+ ;
+ }
+ ~BaseDomainStorageVerifier() {
+ EXPECT_TRUE(mExpectedRemainingNodeIds.IsEmpty())
+ << "Some expected remaining node ids were not checked against. This "
+ "indicates that data we expected to find in storage was missing!";
+ ;
+ }
+
+ private:
+ nsTArray<nsCString> mExpectedRemainingNodeIds;
+ };
+
+ void TestForgetThisBaseDomain_Verify(
+ UniquePtr<BaseDomainNodeInfo>&& aSiteInfo) {
+ nsresult rv = EnumerateCDMStorageDir(
+ "id"_ns, BaseDomainNodeIdVerifier(aSiteInfo.get()));
+ EXPECT_NS_SUCCEEDED(rv);
+
+ rv = EnumerateCDMStorageDir("storage"_ns,
+ BaseDomainStorageVerifier(aSiteInfo.get()));
+ EXPECT_NS_SUCCEEDED(rv);
+ }
+
+ /**
+ * 1. Generate some storage data.
+ * 2. Find the max mtime |t| in $profileDir/gmp/$platform/gmp-fake/id/.
+ * 3. Pass |t| to clear recent history.
+ * 4. Check if all directories in $profileDir/gmp/$platform/gmp-fake/id/ and
+ * $profileDir/gmp/$platform/gmp-fake/storage are removed.
+ */
+ void TestClearRecentHistory1() {
+ AssertIsOnGMPThread();
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ // Generate storage data for some site.
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod("CDMStorageTest::TestClearRecentHistory1_Clear", this,
+ &CDMStorageTest::TestClearRecentHistory1_Clear);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://example1.com"_ns, u"http://example2.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ /**
+ * 1. Generate some storage data.
+ * 2. Find the max mtime |t| in $profileDir/gmp/$platform/gmp-fake/storage/.
+ * 3. Pass |t| to clear recent history.
+ * 4. Check if all directories in $profileDir/gmp/$platform/gmp-fake/id/ and
+ * $profileDir/gmp/$platform/gmp-fake/storage are removed.
+ */
+ void TestClearRecentHistory2() {
+ AssertIsOnGMPThread();
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ // Generate storage data for some site.
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod("CDMStorageTest::TestClearRecentHistory2_Clear", this,
+ &CDMStorageTest::TestClearRecentHistory2_Clear);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://example1.com"_ns, u"http://example2.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ /**
+ * 1. Generate some storage data.
+ * 2. Find the max mtime |t| in $profileDir/gmp/$platform/gmp-fake/storage/.
+ * 3. Pass |t+1| to clear recent history.
+ * 4. Check if all directories in $profileDir/gmp/$platform/gmp-fake/id/ and
+ * $profileDir/gmp/$platform/gmp-fake/storage remain unchanged.
+ */
+ void TestClearRecentHistory3() {
+ AssertIsOnGMPThread();
+ EXPECT_TRUE(IsCDMStorageIsEmpty());
+
+ // Generate storage data for some site.
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod("CDMStorageTest::TestClearRecentHistory3_Clear", this,
+ &CDMStorageTest::TestClearRecentHistory3_Clear);
+ Expect("test-storage complete"_ns, r.forget());
+
+ CreateDecryptor(u"http://example1.com"_ns, u"http://example2.com"_ns, false,
+ "test-storage"_ns);
+ }
+
+ class MaxMTimeFinder {
+ public:
+ MaxMTimeFinder() : mMaxTime(0) {}
+ void operator()(nsIFile* aFile) {
+ PRTime lastModified;
+ nsresult rv = aFile->GetLastModifiedTime(&lastModified);
+ if (NS_SUCCEEDED(rv) && lastModified > mMaxTime) {
+ mMaxTime = lastModified;
+ }
+ EnumerateDir(aFile, *this);
+ }
+ PRTime GetResult() const { return mMaxTime; }
+
+ private:
+ PRTime mMaxTime;
+ };
+
+ void TestClearRecentHistory1_Clear() {
+ MaxMTimeFinder f;
+ nsresult rv = EnumerateCDMStorageDir("id"_ns, f);
+ EXPECT_NS_SUCCEEDED(rv);
+
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestClearRecentHistory_CheckEmpty", this,
+ &CDMStorageTest::TestClearRecentHistory_CheckEmpty);
+ nsCOMPtr<nsIThread> t(GetGMPThread());
+ ClearCDMStorage(r.forget(), t, f.GetResult());
+ }
+
+ void TestClearRecentHistory2_Clear() {
+ MaxMTimeFinder f;
+ nsresult rv = EnumerateCDMStorageDir("storage"_ns, f);
+ EXPECT_NS_SUCCEEDED(rv);
+
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestClearRecentHistory_CheckEmpty", this,
+ &CDMStorageTest::TestClearRecentHistory_CheckEmpty);
+ nsCOMPtr<nsIThread> t(GetGMPThread());
+ ClearCDMStorage(r.forget(), t, f.GetResult());
+ }
+
+ void TestClearRecentHistory3_Clear() {
+ MaxMTimeFinder f;
+ nsresult rv = EnumerateCDMStorageDir("storage"_ns, f);
+ EXPECT_NS_SUCCEEDED(rv);
+
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(
+ "CDMStorageTest::TestClearRecentHistory_CheckNonEmpty", this,
+ &CDMStorageTest::TestClearRecentHistory_CheckNonEmpty);
+ nsCOMPtr<nsIThread> t(GetGMPThread());
+ ClearCDMStorage(r.forget(), t, f.GetResult() + 1);
+ }
+
+ class FileCounter {
+ public:
+ FileCounter() : mCount(0) {}
+ void operator()(nsIFile* aFile) { ++mCount; }
+ int GetCount() const { return mCount; }
+
+ private:
+ int mCount;
+ };
+
+ void TestClearRecentHistory_CheckEmpty() {
+ FileCounter c1;
+ nsresult rv = EnumerateCDMStorageDir("id"_ns, c1);
+ EXPECT_NS_SUCCEEDED(rv);
+ // There should be no files under $profileDir/gmp/$platform/gmp-fake/id/
+ EXPECT_EQ(c1.GetCount(), 0);
+
+ FileCounter c2;
+ rv = EnumerateCDMStorageDir("storage"_ns, c2);
+ EXPECT_NS_SUCCEEDED(rv);
+ // There should be no files under
+ // $profileDir/gmp/$platform/gmp-fake/storage/
+ EXPECT_EQ(c2.GetCount(), 0);
+
+ SetFinished();
+ }
+
+ void TestClearRecentHistory_CheckNonEmpty() {
+ FileCounter c1;
+ nsresult rv = EnumerateCDMStorageDir("id"_ns, c1);
+ EXPECT_NS_SUCCEEDED(rv);
+ // There should be one directory under
+ // $profileDir/gmp/$platform/gmp-fake/id/
+ EXPECT_EQ(c1.GetCount(), 1);
+
+ FileCounter c2;
+ rv = EnumerateCDMStorageDir("storage"_ns, c2);
+ EXPECT_NS_SUCCEEDED(rv);
+ // There should be one directory under
+ // $profileDir/gmp/$platform/gmp-fake/storage/
+ EXPECT_EQ(c2.GetCount(), 1);
+
+ SetFinished();
+ }
+
+ void TestCrossOriginStorage() {
+ EXPECT_TRUE(!mCDM);
+
+ // Send the decryptor the message "store recordid $time"
+ // Wait for the decrytor to send us "stored recordid $time"
+ auto t = time(0);
+ nsCString response("stored crossOriginTestRecordId ");
+ response.AppendInt((int64_t)t);
+ Expect(
+ response,
+ NewRunnableMethod(
+ "CDMStorageTest::TestCrossOriginStorage_RecordStoredContinuation",
+ this,
+ &CDMStorageTest::TestCrossOriginStorage_RecordStoredContinuation));
+
+ nsCString update("store crossOriginTestRecordId ");
+ update.AppendInt((int64_t)t);
+
+ // Open decryptor on one, origin, write a record, and test that that
+ // record can't be read on another origin.
+ CreateDecryptor(u"http://example3.com"_ns, u"http://example4.com"_ns, false,
+ update);
+ }
+
+ void TestCrossOriginStorage_RecordStoredContinuation() {
+ // Close the old decryptor, and create a new one on a different origin,
+ // and try to read the record.
+ Shutdown();
+
+ Expect(nsLiteralCString(
+ "retrieve crossOriginTestRecordId succeeded (length 0 bytes)"),
+ NewRunnableMethod("CDMStorageTest::SetFinished", this,
+ &CDMStorageTest::SetFinished));
+
+ CreateDecryptor(u"http://example5.com"_ns, u"http://example6.com"_ns, false,
+ "retrieve crossOriginTestRecordId"_ns);
+ }
+
+ void TestPBStorage() {
+ // Send the decryptor the message "store recordid $time"
+ // Wait for the decrytor to send us "stored recordid $time"
+ nsCString response("stored pbdata test-pb-data");
+ Expect(response,
+ NewRunnableMethod(
+ "CDMStorageTest::TestPBStorage_RecordStoredContinuation", this,
+ &CDMStorageTest::TestPBStorage_RecordStoredContinuation));
+
+ // Open decryptor on one, origin, write a record, close decryptor,
+ // open another, and test that record can be read, close decryptor,
+ // then send pb-last-context-closed notification, then open decryptor
+ // and check that it can't read that data; it should have been purged.
+ CreateDecryptor(u"http://pb1.com"_ns, u"http://pb2.com"_ns, true,
+ "store pbdata test-pb-data"_ns);
+ }
+
+ void TestPBStorage_RecordStoredContinuation() {
+ Shutdown();
+
+ Expect(
+ "retrieve pbdata succeeded (length 12 bytes)"_ns,
+ NewRunnableMethod(
+ "CDMStorageTest::TestPBStorage_RecordRetrievedContinuation", this,
+ &CDMStorageTest::TestPBStorage_RecordRetrievedContinuation));
+
+ CreateDecryptor(u"http://pb1.com"_ns, u"http://pb2.com"_ns, true,
+ "retrieve pbdata"_ns);
+ }
+
+ void TestPBStorage_RecordRetrievedContinuation() {
+ Shutdown();
+ SimulatePBModeExit();
+
+ Expect("retrieve pbdata succeeded (length 0 bytes)"_ns,
+ NewRunnableMethod("CDMStorageTest::SetFinished", this,
+ &CDMStorageTest::SetFinished));
+
+ CreateDecryptor(u"http://pb1.com"_ns, u"http://pb2.com"_ns, true,
+ "retrieve pbdata"_ns);
+ }
+
+#if defined(XP_WIN)
+ void TestOutputProtection() {
+ Shutdown();
+
+ Expect("OP tests completed"_ns,
+ NewRunnableMethod("CDMStorageTest::SetFinished", this,
+ &CDMStorageTest::SetFinished));
+
+ CreateDecryptor(u"http://example15.com"_ns, u"http://example16.com"_ns,
+ false, "test-op-apis"_ns);
+ }
+#endif
+
+ void TestLongRecordNames() {
+ constexpr auto longRecordName =
+ "A_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "very_very_very_very_very_very_very_very_very_"
+ "very_very_very_very_very_very_"
+ "long_record_name"_ns;
+
+ constexpr auto data = "Just_some_arbitrary_data."_ns;
+
+ MOZ_ASSERT(longRecordName.Length() < GMP_MAX_RECORD_NAME_SIZE);
+ MOZ_ASSERT(longRecordName.Length() > 260); // Windows MAX_PATH
+
+ nsCString response("stored ");
+ response.Append(longRecordName);
+ response.AppendLiteral(" ");
+ response.Append(data);
+ Expect(response, NewRunnableMethod("CDMStorageTest::SetFinished", this,
+ &CDMStorageTest::SetFinished));
+
+ nsCString update("store ");
+ update.Append(longRecordName);
+ update.AppendLiteral(" ");
+ update.Append(data);
+ CreateDecryptor(u"http://fuz.com"_ns, u"http://baz.com"_ns, false, update);
+ }
+
+ void Expect(const nsCString& aMessage,
+ already_AddRefed<nsIRunnable> aContinuation) {
+ mExpected.AppendElement(
+ ExpectedMessage(aMessage, std::move(aContinuation)));
+ }
+
+ void AwaitFinished() {
+ mozilla::SpinEventLoopUntil("CDMStorageTest::AwaitFinished"_ns,
+ [&]() -> bool { return mFinished; });
+ mFinished = false;
+ }
+
+ void ShutdownThen(already_AddRefed<nsIRunnable> aContinuation) {
+ EXPECT_TRUE(!!mCDM);
+ if (!mCDM) {
+ return;
+ }
+ EXPECT_FALSE(mNodeId.IsEmpty());
+ RefPtr<GMPShutdownObserver> task(new GMPShutdownObserver(
+ NewRunnableMethod("CDMStorageTest::Shutdown", this,
+ &CDMStorageTest::Shutdown),
+ std::move(aContinuation), mNodeId));
+ SchedulerGroup::Dispatch(task.forget());
+ }
+
+ void Shutdown() {
+ if (mCDM) {
+ mCDM->Shutdown();
+ mCDM = nullptr;
+ mNodeId.Truncate();
+ }
+ }
+
+ void Dummy() {}
+
+ void SetFinished() {
+ mFinished = true;
+ Shutdown();
+ nsCOMPtr<nsIRunnable> task = NewRunnableMethod(
+ "CDMStorageTest::Dummy", this, &CDMStorageTest::Dummy);
+ SchedulerGroup::Dispatch(task.forget());
+ }
+
+ void SessionMessage(const nsACString& aSessionId, uint32_t aMessageType,
+ const nsTArray<uint8_t>& aMessage) {
+ MonitorAutoLock mon(mMonitor);
+
+ nsCString msg((const char*)aMessage.Elements(), aMessage.Length());
+ EXPECT_TRUE(mExpected.Length() > 0);
+ bool matches = mExpected[0].mMessage.Equals(msg);
+ EXPECT_STREQ(mExpected[0].mMessage.get(), msg.get());
+ if (mExpected.Length() > 0 && matches) {
+ nsCOMPtr<nsIRunnable> continuation = mExpected[0].mContinuation;
+ mExpected.RemoveElementAt(0);
+ if (continuation) {
+ NS_DispatchToCurrentThread(continuation);
+ }
+ }
+ }
+
+ void Terminated() {
+ if (mCDM) {
+ mCDM->Shutdown();
+ mCDM = nullptr;
+ }
+ }
+
+ private:
+ ~CDMStorageTest() = default;
+
+ struct ExpectedMessage {
+ ExpectedMessage(const nsCString& aMessage,
+ already_AddRefed<nsIRunnable> aContinuation)
+ : mMessage(aMessage), mContinuation(aContinuation) {}
+ nsCString mMessage;
+ nsCOMPtr<nsIRunnable> mContinuation;
+ };
+
+ nsTArray<ExpectedMessage> mExpected;
+
+ RefPtr<gmp::ChromiumCDMParent> mCDM;
+ Monitor mMonitor MOZ_UNANNOTATED;
+ Atomic<bool> mFinished;
+ nsCString mNodeId;
+
+ class CallbackProxy : public ChromiumCDMCallback {
+ public:
+ explicit CallbackProxy(CDMStorageTest* aRunner) : mRunner(aRunner) {}
+
+ void SetSessionId(uint32_t aPromiseId,
+ const nsCString& aSessionId) override {}
+
+ void ResolveLoadSessionPromise(uint32_t aPromiseId,
+ bool aSuccessful) override {}
+
+ void ResolvePromiseWithKeyStatus(uint32_t aPromiseId,
+ uint32_t aKeyStatus) override {}
+
+ void ResolvePromise(uint32_t aPromiseId) override {}
+
+ void RejectPromise(uint32_t aPromiseId, ErrorResult&& aError,
+ const nsCString& aErrorMessage) override {}
+
+ void SessionMessage(const nsACString& aSessionId, uint32_t aMessageType,
+ nsTArray<uint8_t>&& aMessage) override {
+ mRunner->SessionMessage(aSessionId, aMessageType, std::move(aMessage));
+ }
+
+ void SessionKeysChange(
+ const nsCString& aSessionId,
+ nsTArray<mozilla::gmp::CDMKeyInformation>&& aKeysInfo) override {}
+
+ void ExpirationChange(const nsCString& aSessionId,
+ double aSecondsSinceEpoch) override {}
+
+ void SessionClosed(const nsCString& aSessionId) override {}
+
+ void QueryOutputProtectionStatus() override {}
+
+ void Terminated() override { mRunner->Terminated(); }
+
+ void Shutdown() override { mRunner->Shutdown(); }
+
+ private:
+ // Warning: Weak ref.
+ CDMStorageTest* mRunner;
+ };
+
+ UniquePtr<CallbackProxy> mCallback;
+}; // class CDMStorageTest
+
+static nsresult CreateTestDirectory(nsCOMPtr<nsIFile>& aOut) {
+ nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(aOut));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ nsCString dirName;
+ dirName.SetLength(32);
+ NS_MakeRandomString(dirName.BeginWriting(), 32);
+ aOut->Append(NS_ConvertUTF8toUTF16(dirName));
+ rv = aOut->Create(nsIFile::DIRECTORY_TYPE, 0755);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ return NS_OK;
+}
+
+void TestMatchBaseDomain_MatchOrigin() {
+ nsCOMPtr<nsIFile> testDir;
+ nsresult rv = CreateTestDirectory(testDir);
+ EXPECT_NS_SUCCEEDED(rv);
+
+ rv = WriteToFile(testDir, "origin"_ns,
+ "https://video.subdomain.removeme.github.io"_ns);
+ EXPECT_NS_SUCCEEDED(rv);
+ rv = WriteToFile(testDir, "topLevelOrigin"_ns,
+ "https://embedder.example.com"_ns);
+ EXPECT_NS_SUCCEEDED(rv);
+ bool result = MatchBaseDomain(testDir, "removeme.github.io"_ns);
+ EXPECT_TRUE(result);
+ testDir->Remove(true);
+}
+
+void TestMatchBaseDomain_MatchTLD() {
+ nsCOMPtr<nsIFile> testDir;
+ nsresult rv = CreateTestDirectory(testDir);
+ EXPECT_NS_SUCCEEDED(rv);
+
+ rv = WriteToFile(testDir, "origin"_ns,
+ "https://video.example.com^userContextId=4"_ns);
+ EXPECT_NS_SUCCEEDED(rv);
+ rv = WriteToFile(testDir, "topLevelOrigin"_ns,
+ "https://evil.web.megacorp.co.uk^privateBrowsingId=1"_ns);
+ EXPECT_NS_SUCCEEDED(rv);
+ bool result = MatchBaseDomain(testDir, "megacorp.co.uk"_ns);
+ EXPECT_TRUE(result);
+ testDir->Remove(true);
+}
+
+void TestMatchBaseDomain_NoMatch() {
+ nsCOMPtr<nsIFile> testDir;
+ nsresult rv = CreateTestDirectory(testDir);
+ EXPECT_NS_SUCCEEDED(rv);
+
+ rv = WriteToFile(testDir, "origin"_ns,
+ "https://video.example.com^userContextId=4"_ns);
+ EXPECT_NS_SUCCEEDED(rv);
+ rv = WriteToFile(testDir, "topLevelOrigin"_ns,
+ "https://evil.web.megacorp.co.uk^privateBrowsingId=1"_ns);
+ EXPECT_NS_SUCCEEDED(rv);
+ bool result = MatchBaseDomain(testDir, "longer-example.com"_ns);
+ EXPECT_FALSE(result);
+ testDir->Remove(true);
+}
+
+TEST(GeckoMediaPlugins, MatchBaseDomain_MatchOrigin)
+{ TestMatchBaseDomain_MatchOrigin(); }
+
+TEST(GeckoMediaPlugins, MatchBaseDomain_MatchTLD)
+{ TestMatchBaseDomain_MatchTLD(); }
+
+TEST(GeckoMediaPlugins, MatchBaseDomain_NoMatch)
+{ TestMatchBaseDomain_NoMatch(); }
+
+// Bug 1776767 - Skip all GMP tests on Windows ASAN
+#if !(defined(XP_WIN) && defined(MOZ_ASAN))
+TEST(GeckoMediaPlugins, CDMStorageGetNodeId)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestGetNodeId);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageBasic)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestBasicStorage);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageForgetThisSite)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestForgetThisSite);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageForgetThisBaseDomain)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestForgetThisBaseDomain);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageClearRecentHistory1)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestClearRecentHistory1);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageClearRecentHistory2)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestClearRecentHistory2);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageClearRecentHistory3)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestClearRecentHistory3);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageCrossOrigin)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestCrossOriginStorage);
+}
+
+TEST(GeckoMediaPlugins, CDMStoragePrivateBrowsing)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestPBStorage);
+}
+
+TEST(GeckoMediaPlugins, CDMStorageLongRecordNames)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestLongRecordNames);
+}
+
+# if defined(XP_WIN)
+TEST(GeckoMediaPlugins, GMPOutputProtection)
+{
+ RefPtr<CDMStorageTest> runner = new CDMStorageTest();
+ runner->DoTest(&CDMStorageTest::TestOutputProtection);
+}
+# endif // defined(XP_WIN)
+#endif // !(defined(XP_WIN) && defined(MOZ_ASAN))
diff --git a/dom/media/gtest/TestCubebInputStream.cpp b/dom/media/gtest/TestCubebInputStream.cpp
new file mode 100644
index 0000000000..d5fb73edc9
--- /dev/null
+++ b/dom/media/gtest/TestCubebInputStream.cpp
@@ -0,0 +1,187 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "CubebInputStream.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "MockCubeb.h"
+
+using namespace mozilla;
+
+namespace {
+#define DispatchFunction(f) \
+ NS_DispatchToCurrentThread(NS_NewRunnableFunction(__func__, f))
+} // namespace
+
+class MockListener : public CubebInputStream::Listener {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockListener, override);
+ MOCK_METHOD2(DataCallback, long(const void* aBuffer, long aFrames));
+ MOCK_METHOD1(StateCallback, void(cubeb_state aState));
+ MOCK_METHOD0(DeviceChangedCallback, void());
+
+ private:
+ ~MockListener() = default;
+};
+
+TEST(TestCubebInputStream, DataCallback)
+{
+ using ::testing::Ne;
+ using ::testing::NotNull;
+
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const CubebUtils::AudioDeviceID deviceId = nullptr;
+ const uint32_t channels = 2;
+
+ uint32_t rate = 0;
+ ASSERT_EQ(cubeb_get_preferred_sample_rate(cubeb->AsCubebContext(), &rate),
+ CUBEB_OK);
+
+ nsTArray<AudioDataValue> data;
+ auto listener = MakeRefPtr<MockListener>();
+ EXPECT_CALL(*listener, DataCallback(NotNull(), Ne(0)))
+ .WillRepeatedly([&](const void* aBuffer, long aFrames) {
+ const AudioDataValue* source =
+ reinterpret_cast<const AudioDataValue*>(aBuffer);
+ size_t sampleCount =
+ static_cast<size_t>(aFrames) * static_cast<size_t>(channels);
+ data.AppendElements(source, sampleCount);
+ return aFrames;
+ });
+
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_STARTED));
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_STOPPED)).Times(2);
+
+ EXPECT_CALL(*listener, DeviceChangedCallback).Times(0);
+
+ UniquePtr<CubebInputStream> cis;
+ DispatchFunction([&] {
+ cis = CubebInputStream::Create(deviceId, channels, rate, true,
+ listener.get());
+ ASSERT_TRUE(cis);
+ });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+
+ stream->SetInputRecordingEnabled(true);
+
+ DispatchFunction([&] { ASSERT_EQ(cis->Start(), CUBEB_OK); });
+ WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { ASSERT_EQ(cis->Stop(), CUBEB_OK); });
+ WaitFor(stream->OutputVerificationEvent());
+
+ nsTArray<AudioDataValue> record = stream->TakeRecordedInput();
+
+ DispatchFunction([&] { cis = nullptr; });
+ WaitFor(cubeb->StreamDestroyEvent());
+
+ ASSERT_EQ(data, record);
+}
+
+TEST(TestCubebInputStream, ErrorCallback)
+{
+ using ::testing::Ne;
+ using ::testing::NotNull;
+ using ::testing::ReturnArg;
+
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const CubebUtils::AudioDeviceID deviceId = nullptr;
+ const uint32_t channels = 2;
+
+ uint32_t rate = 0;
+ ASSERT_EQ(cubeb_get_preferred_sample_rate(cubeb->AsCubebContext(), &rate),
+ CUBEB_OK);
+
+ auto listener = MakeRefPtr<MockListener>();
+ EXPECT_CALL(*listener, DataCallback(NotNull(), Ne(0)))
+ .WillRepeatedly(ReturnArg<1>());
+
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_STARTED));
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_ERROR));
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_STOPPED));
+
+ EXPECT_CALL(*listener, DeviceChangedCallback).Times(0);
+
+ UniquePtr<CubebInputStream> cis;
+ DispatchFunction([&] {
+ cis = CubebInputStream::Create(deviceId, channels, rate, true,
+ listener.get());
+ ASSERT_TRUE(cis);
+ });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+
+ DispatchFunction([&] { ASSERT_EQ(cis->Start(), CUBEB_OK); });
+ WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { stream->ForceError(); });
+ WaitFor(stream->ErrorForcedEvent());
+
+ // If stream ran into an error state, then it should be stopped.
+
+ DispatchFunction([&] { cis = nullptr; });
+ WaitFor(cubeb->StreamDestroyEvent());
+}
+
+TEST(TestCubebInputStream, DeviceChangedCallback)
+{
+ using ::testing::Ne;
+ using ::testing::NotNull;
+ using ::testing::ReturnArg;
+
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const CubebUtils::AudioDeviceID deviceId = nullptr;
+ const uint32_t channels = 2;
+
+ uint32_t rate = 0;
+ ASSERT_EQ(cubeb_get_preferred_sample_rate(cubeb->AsCubebContext(), &rate),
+ CUBEB_OK);
+
+ auto listener = MakeRefPtr<MockListener>();
+ EXPECT_CALL(*listener, DataCallback(NotNull(), Ne(0)))
+ .WillRepeatedly(ReturnArg<1>());
+
+ // In real world, the stream might run into an error state when the
+ // device-changed event is fired (e.g., the last default output device is
+ // unplugged). But it's fine to not check here since we can control how
+ // MockCubeb behaves.
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_STARTED));
+ EXPECT_CALL(*listener, StateCallback(CUBEB_STATE_STOPPED)).Times(2);
+
+ EXPECT_CALL(*listener, DeviceChangedCallback);
+
+ UniquePtr<CubebInputStream> cis;
+ DispatchFunction([&] {
+ cis = CubebInputStream::Create(deviceId, channels, rate, true,
+ listener.get());
+ ASSERT_TRUE(cis);
+ });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+
+ DispatchFunction([&] { ASSERT_EQ(cis->Start(), CUBEB_OK); });
+ WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { stream->ForceDeviceChanged(); });
+ WaitFor(stream->DeviceChangeForcedEvent());
+
+ // The stream can keep running when its device is changed.
+ DispatchFunction([&] { ASSERT_EQ(cis->Stop(), CUBEB_OK); });
+ cubeb_state state = WaitFor(stream->StateEvent());
+ EXPECT_EQ(state, CUBEB_STATE_STOPPED);
+
+ DispatchFunction([&] { cis = nullptr; });
+ WaitFor(cubeb->StreamDestroyEvent());
+}
diff --git a/dom/media/gtest/TestDataMutex.cpp b/dom/media/gtest/TestDataMutex.cpp
new file mode 100644
index 0000000000..11f3e395c9
--- /dev/null
+++ b/dom/media/gtest/TestDataMutex.cpp
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/DataMutex.h"
+#include "nsTArray.h"
+
+using mozilla::DataMutex;
+
+struct A {
+ void Set(int a) { mValue = a; }
+ int mValue;
+};
+
+TEST(DataMutex, Basic)
+{
+ {
+ DataMutex<uint32_t> i(1, "1");
+ i.Mutex().AssertNotCurrentThreadOwns();
+ {
+ auto x = i.Lock();
+ i.Mutex().AssertCurrentThreadOwns();
+ *x = 4;
+ ASSERT_EQ(*x, 4u);
+ }
+ i.Mutex().AssertNotCurrentThreadOwns();
+ }
+ {
+ DataMutex<A> a({4}, "StructA");
+ auto x = a.Lock();
+ ASSERT_EQ(x->mValue, 4);
+ x->Set(8);
+ ASSERT_EQ(x->mValue, 8);
+ }
+ {
+ DataMutex<nsTArray<uint32_t>> _a("array");
+ auto a = _a.Lock();
+ auto& x = a.ref();
+ ASSERT_EQ(x.Length(), 0u);
+ x.AppendElement(1u);
+ ASSERT_EQ(x.Length(), 1u);
+ ASSERT_EQ(x[0], 1u);
+ }
+}
diff --git a/dom/media/gtest/TestDecoderBenchmark.cpp b/dom/media/gtest/TestDecoderBenchmark.cpp
new file mode 100644
index 0000000000..85091a4946
--- /dev/null
+++ b/dom/media/gtest/TestDecoderBenchmark.cpp
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DecoderBenchmark.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+
+using ::testing::Return;
+using namespace mozilla;
+
+TEST(DecoderBenchmark, CreateKey)
+{
+ DecoderBenchmarkInfo info{"video/av1"_ns, 1, 1, 1, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info),
+ "ResolutionLevel0-FrameRateLevel0-8bit"_ns)
+ << "Min level";
+
+ DecoderBenchmarkInfo info1{"video/av1"_ns, 5000, 5000, 100, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info1),
+ "ResolutionLevel7-FrameRateLevel4-8bit"_ns)
+ << "Max level";
+
+ DecoderBenchmarkInfo info2{"video/av1"_ns, 854, 480, 30, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info2),
+ "ResolutionLevel3-FrameRateLevel2-8bit"_ns)
+ << "On the top of 4th resolution level";
+
+ DecoderBenchmarkInfo info3{"video/av1"_ns, 1270, 710, 24, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info3),
+ "ResolutionLevel4-FrameRateLevel1-8bit"_ns)
+ << "Closer to 5th resolution level - bellow";
+
+ DecoderBenchmarkInfo info4{"video/av1"_ns, 1290, 730, 24, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info4),
+ "ResolutionLevel4-FrameRateLevel1-8bit"_ns)
+ << "Closer to 5th resolution level - above";
+
+ DecoderBenchmarkInfo info5{"video/av1"_ns, 854, 480, 20, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info5),
+ "ResolutionLevel3-FrameRateLevel1-8bit"_ns)
+ << "Closer to 2nd frame rate level - bellow";
+
+ DecoderBenchmarkInfo info6{"video/av1"_ns, 854, 480, 26, 8};
+ EXPECT_EQ(KeyUtil::CreateKey(info6),
+ "ResolutionLevel3-FrameRateLevel1-8bit"_ns)
+ << "Closer to 2nd frame rate level - above";
+
+ DecoderBenchmarkInfo info7{"video/av1"_ns, 1280, 720, 24, 10};
+ EXPECT_EQ(KeyUtil::CreateKey(info7),
+ "ResolutionLevel4-FrameRateLevel1-non8bit"_ns)
+ << "Bit depth 10 bits";
+
+ DecoderBenchmarkInfo info8{"video/av1"_ns, 1280, 720, 24, 12};
+ EXPECT_EQ(KeyUtil::CreateKey(info8),
+ "ResolutionLevel4-FrameRateLevel1-non8bit"_ns)
+ << "Bit depth 12 bits";
+
+ DecoderBenchmarkInfo info9{"video/av1"_ns, 1280, 720, 24, 16};
+ EXPECT_EQ(KeyUtil::CreateKey(info9),
+ "ResolutionLevel4-FrameRateLevel1-non8bit"_ns)
+ << "Bit depth 16 bits";
+}
diff --git a/dom/media/gtest/TestDeviceInputTrack.cpp b/dom/media/gtest/TestDeviceInputTrack.cpp
new file mode 100644
index 0000000000..6eb8c08774
--- /dev/null
+++ b/dom/media/gtest/TestDeviceInputTrack.cpp
@@ -0,0 +1,558 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "DeviceInputTrack.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "AudioGenerator.h"
+#include "MediaTrackGraphImpl.h"
+#include "MockCubeb.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "nsContentUtils.h"
+
+using namespace mozilla;
+using testing::NiceMock;
+using testing::Return;
+
+namespace {
+#define DispatchFunction(f) \
+ NS_DispatchToCurrentThread(NS_NewRunnableFunction(__func__, f))
+} // namespace
+
+class MockGraphImpl : public MediaTrackGraphImpl {
+ public:
+ explicit MockGraphImpl(TrackRate aRate)
+ : MediaTrackGraphImpl(0, aRate, nullptr, NS_GetCurrentThread()) {
+ ON_CALL(*this, OnGraphThread).WillByDefault(Return(true));
+ }
+
+ void Init(uint32_t aChannels) {
+ MediaTrackGraphImpl::Init(OFFLINE_THREAD_DRIVER, DIRECT_DRIVER, aChannels);
+ // We have to call `Destroy()` manually in order to break the reference.
+ // The reason we don't assign a null driver is because we would add a track
+ // to the graph, then it would trigger graph's `EnsureNextIteration()` that
+ // requires a non-null driver.
+ SetCurrentDriver(new NiceMock<MockDriver>());
+ }
+
+ MOCK_CONST_METHOD0(OnGraphThread, bool());
+ MOCK_METHOD1(AppendMessage, void(UniquePtr<ControlMessageInterface>));
+
+ protected:
+ ~MockGraphImpl() = default;
+
+ class MockDriver : public GraphDriver {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockDriver, override);
+
+ MockDriver() : GraphDriver(nullptr, nullptr, 0) {
+ ON_CALL(*this, OnThread).WillByDefault(Return(true));
+ ON_CALL(*this, ThreadRunning).WillByDefault(Return(true));
+ }
+
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(Shutdown, void());
+ MOCK_METHOD0(IterationDuration, uint32_t());
+ MOCK_METHOD0(EnsureNextIteration, void());
+ MOCK_CONST_METHOD0(OnThread, bool());
+ MOCK_CONST_METHOD0(ThreadRunning, bool());
+
+ protected:
+ ~MockDriver() = default;
+ };
+};
+
+class TestDeviceInputTrack : public testing::Test {
+ protected:
+ TestDeviceInputTrack() : mChannels(2), mRate(44100) {}
+
+ void SetUp() override {
+ mGraph = MakeRefPtr<NiceMock<MockGraphImpl>>(mRate);
+ mGraph->Init(mChannels);
+ }
+
+ void TearDown() override { mGraph->Destroy(); }
+
+ const uint32_t mChannels;
+ const TrackRate mRate;
+ RefPtr<MockGraphImpl> mGraph;
+};
+
+TEST_F(TestDeviceInputTrack, DeviceInputConsumerTrack) {
+ class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
+ public:
+ static TestDeviceInputConsumerTrack* Create(MediaTrackGraph* aGraph) {
+ MOZ_ASSERT(NS_IsMainThread());
+ TestDeviceInputConsumerTrack* track =
+ new TestDeviceInputConsumerTrack(aGraph->GraphRate());
+ aGraph->AddTrack(track);
+ return track;
+ }
+
+ void Destroy() {
+ MOZ_ASSERT(NS_IsMainThread());
+ DisconnectDeviceInput();
+ DeviceInputConsumerTrack::Destroy();
+ }
+
+ void ProcessInput(GraphTime aFrom, GraphTime aTo,
+ uint32_t aFlags) override{/* Ignored */};
+
+ uint32_t NumberOfChannels() const override {
+ if (mInputs.IsEmpty()) {
+ return 0;
+ }
+ DeviceInputTrack* t = mInputs[0]->GetSource()->AsDeviceInputTrack();
+ MOZ_ASSERT(t);
+ return t->NumberOfChannels();
+ }
+
+ private:
+ explicit TestDeviceInputConsumerTrack(TrackRate aSampleRate)
+ : DeviceInputConsumerTrack(aSampleRate) {}
+ };
+
+ class TestAudioDataListener : public AudioDataListener {
+ public:
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
+ : mChannelCount(aChannelCount), mIsVoice(aIsVoice) {}
+ // Graph thread APIs: AudioDataListenerInterface implementations.
+ uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
+ aGraph->AssertOnGraphThread();
+ return mChannelCount;
+ }
+ bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
+ return mIsVoice;
+ };
+ void DeviceChanged(MediaTrackGraph* aGraph) override { /* Ignored */
+ }
+ void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
+
+ private:
+ ~TestAudioDataListener() = default;
+
+ // Graph thread-only.
+ uint32_t mChannelCount;
+ // Any thread.
+ const bool mIsVoice;
+ };
+
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+
+ const CubebUtils::AudioDeviceID device1 = (void*)1;
+ RefPtr<TestAudioDataListener> listener1 = new TestAudioDataListener(1, false);
+ RefPtr<TestDeviceInputConsumerTrack> track1 =
+ TestDeviceInputConsumerTrack::Create(mGraph);
+ track1->ConnectDeviceInput(device1, listener1.get(), testPrincipal);
+ EXPECT_TRUE(track1->ConnectToNativeDevice());
+ EXPECT_FALSE(track1->ConnectToNonNativeDevice());
+
+ const CubebUtils::AudioDeviceID device2 = (void*)2;
+ RefPtr<TestAudioDataListener> listener2 = new TestAudioDataListener(2, false);
+ RefPtr<TestDeviceInputConsumerTrack> track2 =
+ TestDeviceInputConsumerTrack::Create(mGraph);
+ track2->ConnectDeviceInput(device2, listener2.get(), testPrincipal);
+ EXPECT_FALSE(track2->ConnectToNativeDevice());
+ EXPECT_TRUE(track2->ConnectToNonNativeDevice());
+
+ track2->Destroy();
+ mGraph->RemoveTrackGraphThread(track2);
+
+ track1->Destroy();
+ mGraph->RemoveTrackGraphThread(track1);
+}
+
+TEST_F(TestDeviceInputTrack, NativeInputTrackData) {
+ const uint32_t flags = 0;
+ const CubebUtils::AudioDeviceID deviceId = (void*)1;
+
+ AudioGenerator<AudioDataValue> generator(mChannels, mRate);
+ const size_t nrFrames = 10;
+ const size_t bufferSize = nrFrames * mChannels;
+ nsTArray<AudioDataValue> buffer(bufferSize);
+ buffer.AppendElements(bufferSize);
+
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+
+ // Setup: Create a NativeInputTrack and add it to mGraph
+ RefPtr<NativeInputTrack> track =
+ new NativeInputTrack(mGraph->GraphRate(), deviceId, testPrincipal);
+ mGraph->AddTrack(track);
+
+ // Main test below:
+
+ generator.GenerateInterleaved(buffer.Elements(), nrFrames);
+ track->NotifyInputData(mGraph.get(), buffer.Elements(), nrFrames, mRate,
+ mChannels, 0);
+
+ track->ProcessInput(0, WEBAUDIO_BLOCK_SIZE + nrFrames, flags);
+ EXPECT_EQ(static_cast<size_t>(track->GetEnd()),
+ static_cast<size_t>(WEBAUDIO_BLOCK_SIZE) + nrFrames);
+
+ // Check pre-buffering: null data with PRINCIPAL_HANDLE_NONE principal
+ AudioSegment preBuffering;
+ preBuffering.AppendSlice(*track->GetData(), 0, WEBAUDIO_BLOCK_SIZE);
+ EXPECT_TRUE(preBuffering.IsNull());
+ for (AudioSegment::ConstChunkIterator iter(preBuffering); !iter.IsEnded();
+ iter.Next()) {
+ const AudioChunk& chunk = *iter;
+ EXPECT_EQ(chunk.mPrincipalHandle, PRINCIPAL_HANDLE_NONE);
+ }
+
+ // Check rest of the data
+ AudioSegment data;
+ data.AppendSlice(*track->GetData(), WEBAUDIO_BLOCK_SIZE,
+ WEBAUDIO_BLOCK_SIZE + nrFrames);
+ nsTArray<AudioDataValue> interleaved;
+ size_t sampleCount = data.WriteToInterleavedBuffer(interleaved, mChannels);
+ EXPECT_EQ(sampleCount, bufferSize);
+ EXPECT_EQ(interleaved, buffer);
+
+ // Check principal in data
+ for (AudioSegment::ConstChunkIterator iter(data); !iter.IsEnded();
+ iter.Next()) {
+ const AudioChunk& chunk = *iter;
+ EXPECT_EQ(chunk.mPrincipalHandle, testPrincipal);
+ }
+
+ // Tear down: Destroy the NativeInputTrack and remove it from mGraph.
+ track->Destroy();
+ mGraph->RemoveTrackGraphThread(track);
+}
+
+class MockEventListener : public AudioInputSource::EventListener {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockEventListener, override);
+ MOCK_METHOD1(AudioDeviceChanged, void(AudioInputSource::Id));
+ MOCK_METHOD2(AudioStateCallback,
+ void(AudioInputSource::Id,
+ AudioInputSource::EventListener::State));
+
+ private:
+ ~MockEventListener() = default;
+};
+
+TEST_F(TestDeviceInputTrack, StartAndStop) {
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Non native input settings
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate rate = 48000;
+
+ // Setup: Create a NonNativeInputTrack and add it to mGraph.
+ RefPtr<NonNativeInputTrack> track =
+ new NonNativeInputTrack(mGraph->GraphRate(), deviceId, testPrincipal);
+ mGraph->AddTrack(track);
+
+ // Main test below:
+
+ // Make sure the NonNativeInputTrack can start and stop its audio correctly.
+ {
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ // No input channels and device preference before start.
+ EXPECT_EQ(track->NumberOfChannels(), 0U);
+ EXPECT_EQ(track->DevicePreference(), AudioInputType::Unknown);
+
+ DispatchFunction([&] {
+ track->StartAudio(MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true /* voice */,
+ testPrincipal, rate, mGraph->GraphRate()));
+ });
+
+ // Wait for stream creation.
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+
+ // Make sure the audio stream and the track's settings are correct.
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(rate));
+ EXPECT_EQ(track->NumberOfChannels(), channels);
+ EXPECT_EQ(track->DevicePreference(), AudioInputType::Voice);
+
+ // Wait for stream callbacks.
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { track->StopAudio(); });
+
+ // Wait for stream destroy.
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ // No input channels and device preference after stop.
+ EXPECT_EQ(track->NumberOfChannels(), 0U);
+ EXPECT_EQ(track->DevicePreference(), AudioInputType::Unknown);
+ }
+
+ // Make sure the NonNativeInputTrack can restart its audio correctly.
+ {
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ DispatchFunction([&] {
+ track->StartAudio(MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true,
+ testPrincipal, rate, mGraph->GraphRate()));
+ });
+
+ // Wait for stream creation.
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(rate));
+
+ // Wait for stream callbacks.
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ DispatchFunction([&] { track->StopAudio(); });
+
+ // Wait for stream destroy.
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+ }
+
+ // Tear down: Destroy the NativeInputTrack and remove it from mGraph.
+ track->Destroy();
+ mGraph->RemoveTrackGraphThread(track);
+}
+
+TEST_F(TestDeviceInputTrack, NonNativeInputTrackData) {
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Graph settings
+ const uint32_t flags = 0;
+ const GraphTime frames = 440;
+
+ // Non native input settings
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate rate = 48000;
+
+ // Setup: Create a NonNativeInputTrack and add it to mGraph.
+ RefPtr<NonNativeInputTrack> track =
+ new NonNativeInputTrack(mGraph->GraphRate(), deviceId, testPrincipal);
+ mGraph->AddTrack(track);
+
+ // Main test below:
+
+ // Make sure we get null data if the track is not started yet.
+ GraphTime current = 0;
+ GraphTime next = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
+ ASSERT_NE(current, next); // Make sure we have data produced in ProcessInput.
+
+ track->ProcessInput(current, next, flags);
+ {
+ AudioSegment data;
+ data.AppendSegment(track->GetData<AudioSegment>());
+ EXPECT_TRUE(data.IsNull());
+ }
+
+ // Make sure we get the AudioInputSource's data once we start the track.
+
+ current = next;
+ next = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(2 * frames);
+ ASSERT_NE(current, next); // Make sure we have data produced in ProcessInput.
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ DispatchFunction([&] {
+ track->StartAudio(MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ rate, mGraph->GraphRate()));
+ });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(rate));
+
+ // Check audio data.
+ Unused << WaitFor(stream->FramesProcessedEvent());
+ track->ProcessInput(current, next, flags);
+ {
+ AudioSegment data;
+ data.AppendSlice(*track->GetData<AudioSegment>(), current, next);
+ EXPECT_FALSE(data.IsNull());
+ for (AudioSegment::ConstChunkIterator iter(data); !iter.IsEnded();
+ iter.Next()) {
+ EXPECT_EQ(iter->mChannelData.Length(), channels);
+ EXPECT_EQ(iter->mPrincipalHandle, testPrincipal);
+ }
+ }
+
+ // Stop the track and make sure it produces null data again.
+ current = next;
+ next = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(3 * frames);
+ ASSERT_NE(current, next); // Make sure we have data produced in ProcessInput.
+
+ DispatchFunction([&] { track->StopAudio(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ track->ProcessInput(current, next, flags);
+ {
+ AudioSegment data;
+ data.AppendSlice(*track->GetData<AudioSegment>(), current, next);
+ EXPECT_TRUE(data.IsNull());
+ }
+
+ // Tear down: Destroy the NonNativeInputTrack and remove it from mGraph.
+ track->Destroy();
+ mGraph->RemoveTrackGraphThread(track);
+}
+
+TEST_F(TestDeviceInputTrack, NonNativeDeviceChangedCallback) {
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Non native input settings
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate rate = 48000;
+
+ // Setup: Create a NonNativeInputTrack and add it to mGraph.
+ RefPtr<NonNativeInputTrack> track =
+ new NonNativeInputTrack(mGraph->GraphRate(), deviceId, testPrincipal);
+ mGraph->AddTrack(track);
+
+ // Main test below:
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener, AudioDeviceChanged(sourceId));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ // Launch and start an audio stream.
+ DispatchFunction([&] {
+ track->StartAudio(MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ rate, mGraph->GraphRate()));
+ });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(rate));
+
+ // Make sure the stream is running.
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ // Fire a device-changed callback.
+ DispatchFunction([&] { stream->ForceDeviceChanged(); });
+ WaitFor(stream->DeviceChangeForcedEvent());
+
+ // Stop and destroy the stream.
+ DispatchFunction([&] { track->StopAudio(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ // Tear down: Destroy the NonNativeInputTrack and remove it from mGraph.
+ track->Destroy();
+ mGraph->RemoveTrackGraphThread(track);
+}
+
+TEST_F(TestDeviceInputTrack, NonNativeErrorCallback) {
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Non native input settings
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate rate = 48000;
+
+ // Setup: Create a NonNativeInputTrack and add it to mGraph.
+ RefPtr<NonNativeInputTrack> track =
+ new NonNativeInputTrack(mGraph->GraphRate(), deviceId, testPrincipal);
+ mGraph->AddTrack(track);
+
+ // Main test below:
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Error));
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(2);
+
+ // Launch and start an audio stream.
+ DispatchFunction([&] {
+ track->StartAudio(MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ rate, mGraph->GraphRate()));
+ });
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_FALSE(stream->mHasOutput);
+ EXPECT_EQ(stream->GetInputDeviceID(), deviceId);
+ EXPECT_EQ(stream->InputChannels(), channels);
+ EXPECT_EQ(stream->SampleRate(), static_cast<uint32_t>(rate));
+
+ // Make sure the stream is running.
+ Unused << WaitFor(stream->FramesProcessedEvent());
+
+ // Force an error in the MockCubeb.
+ DispatchFunction([&] { stream->ForceError(); });
+ WaitFor(stream->ErrorForcedEvent());
+
+ // Stop and destroy the stream.
+ DispatchFunction([&] { track->StopAudio(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+
+ // Tear down: Destroy the NonNativeInputTrack and remove it from mGraph.
+ track->Destroy();
+ mGraph->RemoveTrackGraphThread(track);
+}
diff --git a/dom/media/gtest/TestDriftCompensation.cpp b/dom/media/gtest/TestDriftCompensation.cpp
new file mode 100644
index 0000000000..055a74ff5f
--- /dev/null
+++ b/dom/media/gtest/TestDriftCompensation.cpp
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "DriftCompensation.h"
+#include "mozilla/SpinEventLoopUntil.h"
+
+using namespace mozilla;
+
+class DriftCompensatorTest : public ::testing::Test {
+ public:
+ const TrackRate mRate = 44100;
+ const TimeStamp mStart;
+ const RefPtr<DriftCompensator> mComp;
+
+ DriftCompensatorTest()
+ : mStart(TimeStamp::Now()),
+ mComp(MakeRefPtr<DriftCompensator>(GetCurrentSerialEventTarget(),
+ mRate)) {
+ mComp->NotifyAudioStart(mStart);
+ // NotifyAudioStart dispatched a runnable to update the audio mStart time on
+ // the video thread. Because this is a test, the video thread is the current
+ // thread. We spin the event loop until we know the mStart time is updated.
+ {
+ bool updated = false;
+ NS_DispatchToCurrentThread(
+ NS_NewRunnableFunction(__func__, [&] { updated = true; }));
+ SpinEventLoopUntil("DriftCompensatorTest::DriftCompensatorTest"_ns,
+ [&] { return updated; });
+ }
+ }
+
+ // Past() is half as far from `mStart` as `aNow`.
+ TimeStamp Past(TimeStamp aNow) {
+ return mStart + (aNow - mStart) / (int64_t)2;
+ }
+
+ // Future() is twice as far from `mStart` as `aNow`.
+ TimeStamp Future(TimeStamp aNow) { return mStart + (aNow - mStart) * 2; }
+};
+
+TEST_F(DriftCompensatorTest, Initialized) {
+ EXPECT_EQ(mComp->GetVideoTime(mStart, mStart), mStart);
+}
+
+TEST_F(DriftCompensatorTest, SlowerAudio) {
+ // 10s of audio took 20 seconds of wall clock to play out
+ mComp->NotifyAudio(mRate * 10);
+ TimeStamp now = mStart + TimeDuration::FromSeconds(20);
+ EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, Past(now)) - mStart).ToSeconds(), 5.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, now) - mStart).ToSeconds(), 10.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, Future(now)) - mStart).ToSeconds(), 20.0);
+}
+
+TEST_F(DriftCompensatorTest, NoDrift) {
+ // 10s of audio took 10 seconds of wall clock to play out
+ mComp->NotifyAudio(mRate * 10);
+ TimeStamp now = mStart + TimeDuration::FromSeconds(10);
+ EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, Past(now)) - mStart).ToSeconds(), 5.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, now) - mStart).ToSeconds(), 10.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, Future(now)) - mStart).ToSeconds(), 20.0);
+}
+
+TEST_F(DriftCompensatorTest, NoProgress) {
+ // 10s of audio took 0 seconds of wall clock to play out
+ mComp->NotifyAudio(mRate * 10);
+ TimeStamp now = mStart;
+ TimeStamp future = mStart + TimeDuration::FromSeconds(5);
+ EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, future) - mStart).ToSeconds(), 5.0);
+}
+
+TEST_F(DriftCompensatorTest, FasterAudio) {
+ // 20s of audio took 10 seconds of wall clock to play out
+ mComp->NotifyAudio(mRate * 20);
+ TimeStamp now = mStart + TimeDuration::FromSeconds(10);
+ EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, Past(now)) - mStart).ToSeconds(), 10.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, now) - mStart).ToSeconds(), 20.0);
+ EXPECT_EQ((mComp->GetVideoTime(now, Future(now)) - mStart).ToSeconds(), 40.0);
+}
diff --git a/dom/media/gtest/TestGMPCrossOrigin.cpp b/dom/media/gtest/TestGMPCrossOrigin.cpp
new file mode 100644
index 0000000000..8abb7694ac
--- /dev/null
+++ b/dom/media/gtest/TestGMPCrossOrigin.cpp
@@ -0,0 +1,212 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/gtest/MozAssertions.h"
+#include "mozilla/StaticPtr.h"
+#include "GMPTestMonitor.h"
+#include "GMPVideoDecoderProxy.h"
+#include "GMPVideoEncoderProxy.h"
+#include "GMPServiceParent.h"
+#include "nsAppDirectoryServiceDefs.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#include "nsThreadUtils.h"
+
+using namespace mozilla;
+using namespace mozilla::gmp;
+
+struct GMPTestRunner {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(GMPTestRunner)
+
+ GMPTestRunner() = default;
+ void DoTest(void (GMPTestRunner::*aTestMethod)(GMPTestMonitor&));
+ void RunTestGMPTestCodec1(GMPTestMonitor& aMonitor);
+ void RunTestGMPTestCodec2(GMPTestMonitor& aMonitor);
+ void RunTestGMPTestCodec3(GMPTestMonitor& aMonitor);
+ void RunTestGMPCrossOrigin1(GMPTestMonitor& aMonitor);
+ void RunTestGMPCrossOrigin2(GMPTestMonitor& aMonitor);
+ void RunTestGMPCrossOrigin3(GMPTestMonitor& aMonitor);
+ void RunTestGMPCrossOrigin4(GMPTestMonitor& aMonitor);
+
+ private:
+ ~GMPTestRunner() = default;
+};
+
+template <class T, class Base,
+ nsresult (NS_STDCALL GeckoMediaPluginService::*Getter)(
+ GMPCrashHelper*, nsTArray<nsCString>*, const nsACString&,
+ UniquePtr<Base>&&)>
+class RunTestGMPVideoCodec : public Base {
+ public:
+ void Done(T* aGMP, GMPVideoHost* aHost) override {
+ EXPECT_TRUE(aGMP);
+ EXPECT_TRUE(aHost);
+ if (aGMP) {
+ aGMP->Close();
+ }
+ mMonitor.SetFinished();
+ }
+
+ static void Run(GMPTestMonitor& aMonitor, const nsCString& aOrigin) {
+ UniquePtr<GMPCallbackType> callback(new RunTestGMPVideoCodec(aMonitor));
+ Get(aOrigin, std::move(callback));
+ }
+
+ protected:
+ typedef T GMPCodecType;
+ typedef Base GMPCallbackType;
+
+ explicit RunTestGMPVideoCodec(GMPTestMonitor& aMonitor)
+ : mMonitor(aMonitor) {}
+
+ static nsresult Get(const nsACString& aNodeId, UniquePtr<Base>&& aCallback) {
+ nsTArray<nsCString> tags;
+ tags.AppendElement("h264"_ns);
+ tags.AppendElement("fake"_ns);
+
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+ return ((*service).*Getter)(nullptr, &tags, aNodeId, std::move(aCallback));
+ }
+
+ GMPTestMonitor& mMonitor;
+};
+
+typedef RunTestGMPVideoCodec<GMPVideoDecoderProxy, GetGMPVideoDecoderCallback,
+ &GeckoMediaPluginService::GetGMPVideoDecoder>
+ RunTestGMPVideoDecoder;
+typedef RunTestGMPVideoCodec<GMPVideoEncoderProxy, GetGMPVideoEncoderCallback,
+ &GeckoMediaPluginService::GetGMPVideoEncoder>
+ RunTestGMPVideoEncoder;
+
+void GMPTestRunner::RunTestGMPTestCodec1(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoDecoder::Run(aMonitor, "o"_ns);
+}
+
+void GMPTestRunner::RunTestGMPTestCodec2(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoDecoder::Run(aMonitor, ""_ns);
+}
+
+void GMPTestRunner::RunTestGMPTestCodec3(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoEncoder::Run(aMonitor, ""_ns);
+}
+
+template <class Base>
+class RunTestGMPCrossOrigin : public Base {
+ public:
+ void Done(typename Base::GMPCodecType* aGMP, GMPVideoHost* aHost) override {
+ EXPECT_TRUE(aGMP);
+
+ UniquePtr<typename Base::GMPCallbackType> callback(
+ new Step2(Base::mMonitor, aGMP, mShouldBeEqual));
+ nsresult rv = Base::Get(mOrigin2, std::move(callback));
+ EXPECT_NS_SUCCEEDED(rv);
+ if (NS_FAILED(rv)) {
+ Base::mMonitor.SetFinished();
+ }
+ }
+
+ static void Run(GMPTestMonitor& aMonitor, const nsCString& aOrigin1,
+ const nsCString& aOrigin2) {
+ UniquePtr<typename Base::GMPCallbackType> callback(
+ new RunTestGMPCrossOrigin<Base>(aMonitor, aOrigin1, aOrigin2));
+ nsresult rv = Base::Get(aOrigin1, std::move(callback));
+ EXPECT_NS_SUCCEEDED(rv);
+ if (NS_FAILED(rv)) {
+ aMonitor.SetFinished();
+ }
+ }
+
+ private:
+ RunTestGMPCrossOrigin(GMPTestMonitor& aMonitor, const nsCString& aOrigin1,
+ const nsCString& aOrigin2)
+ : Base(aMonitor),
+ mGMP(nullptr),
+ mOrigin2(aOrigin2),
+ mShouldBeEqual(aOrigin1.Equals(aOrigin2)) {}
+
+ class Step2 : public Base {
+ public:
+ Step2(GMPTestMonitor& aMonitor, typename Base::GMPCodecType* aGMP,
+ bool aShouldBeEqual)
+ : Base(aMonitor), mGMP(aGMP), mShouldBeEqual(aShouldBeEqual) {}
+ void Done(typename Base::GMPCodecType* aGMP, GMPVideoHost* aHost) override {
+ EXPECT_TRUE(aGMP);
+ if (aGMP) {
+ EXPECT_TRUE(mGMP && (mGMP->GetPluginId() == aGMP->GetPluginId()) ==
+ mShouldBeEqual);
+ }
+ if (mGMP) {
+ mGMP->Close();
+ }
+ Base::Done(aGMP, aHost);
+ }
+
+ private:
+ typename Base::GMPCodecType* mGMP;
+ bool mShouldBeEqual;
+ };
+
+ typename Base::GMPCodecType* mGMP;
+ nsCString mOrigin2;
+ bool mShouldBeEqual;
+};
+
+typedef RunTestGMPCrossOrigin<RunTestGMPVideoDecoder>
+ RunTestGMPVideoDecoderCrossOrigin;
+typedef RunTestGMPCrossOrigin<RunTestGMPVideoEncoder>
+ RunTestGMPVideoEncoderCrossOrigin;
+
+void GMPTestRunner::RunTestGMPCrossOrigin1(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoDecoderCrossOrigin::Run(aMonitor, "origin1"_ns, "origin2"_ns);
+}
+
+void GMPTestRunner::RunTestGMPCrossOrigin2(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoEncoderCrossOrigin::Run(aMonitor, "origin1"_ns, "origin2"_ns);
+}
+
+void GMPTestRunner::RunTestGMPCrossOrigin3(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoDecoderCrossOrigin::Run(aMonitor, "origin1"_ns, "origin1"_ns);
+}
+
+void GMPTestRunner::RunTestGMPCrossOrigin4(GMPTestMonitor& aMonitor) {
+ RunTestGMPVideoEncoderCrossOrigin::Run(aMonitor, "origin1"_ns, "origin1"_ns);
+}
+
+void GMPTestRunner::DoTest(
+ void (GMPTestRunner::*aTestMethod)(GMPTestMonitor&)) {
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+ nsCOMPtr<nsIThread> thread;
+ EXPECT_NS_SUCCEEDED(service->GetThread(getter_AddRefs(thread)));
+
+ GMPTestMonitor monitor;
+ thread->Dispatch(NewRunnableMethod<GMPTestMonitor&>(
+ "GMPTestRunner::DoTest", this, aTestMethod, monitor),
+ NS_DISPATCH_NORMAL);
+ monitor.AwaitFinished();
+}
+
+// Bug 1776767 - Skip all GMP tests on Windows ASAN
+#if !(defined(XP_WIN) && defined(MOZ_ASAN))
+TEST(GeckoMediaPlugins, GMPTestCodec)
+{
+ RefPtr<GMPTestRunner> runner = new GMPTestRunner();
+ runner->DoTest(&GMPTestRunner::RunTestGMPTestCodec1);
+ runner->DoTest(&GMPTestRunner::RunTestGMPTestCodec2);
+ runner->DoTest(&GMPTestRunner::RunTestGMPTestCodec3);
+}
+
+TEST(GeckoMediaPlugins, GMPCrossOrigin)
+{
+ RefPtr<GMPTestRunner> runner = new GMPTestRunner();
+ runner->DoTest(&GMPTestRunner::RunTestGMPCrossOrigin1);
+ runner->DoTest(&GMPTestRunner::RunTestGMPCrossOrigin2);
+ runner->DoTest(&GMPTestRunner::RunTestGMPCrossOrigin3);
+ runner->DoTest(&GMPTestRunner::RunTestGMPCrossOrigin4);
+}
+#endif // !(defined(XP_WIN) && defined(MOZ_ASAN))
diff --git a/dom/media/gtest/TestGMPRemoveAndDelete.cpp b/dom/media/gtest/TestGMPRemoveAndDelete.cpp
new file mode 100644
index 0000000000..9f2118ea8a
--- /dev/null
+++ b/dom/media/gtest/TestGMPRemoveAndDelete.cpp
@@ -0,0 +1,472 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GMPService.h"
+#include "GMPServiceParent.h"
+#include "GMPTestMonitor.h"
+#include "GMPUtils.h"
+#include "GMPVideoDecoderProxy.h"
+#include "gmp-api/gmp-video-host.h"
+#include "gtest/gtest.h"
+#include "mozilla/Services.h"
+#include "mozilla/StaticPtr.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsIObserverService.h"
+
+#define GMP_DIR_NAME u"gmp-fakeopenh264"_ns
+#define GMP_OLD_VERSION u"1.0"_ns
+#define GMP_NEW_VERSION u"1.1"_ns
+
+#define GMP_DELETED_TOPIC "gmp-directory-deleted"
+
+#define EXPECT_OK(X) EXPECT_TRUE(NS_SUCCEEDED(X))
+
+using namespace mozilla;
+using namespace mozilla::gmp;
+
+class GMPRemoveTest : public nsIObserver, public GMPVideoDecoderCallbackProxy {
+ public:
+ GMPRemoveTest();
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ // Called when a GMP plugin directory has been successfully deleted.
+ // |aData| will contain the directory path.
+ NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData) override;
+
+ // Create a new GMP plugin directory that we can trash and add it to the GMP
+ // service. Remove the original plugin directory. Original plugin directory
+ // gets re-added at destruction.
+ void Setup();
+
+ bool CreateVideoDecoder(nsCString aNodeId = ""_ns);
+ void CloseVideoDecoder();
+
+ void DeletePluginDirectory(bool aCanDefer);
+
+ // Decode a dummy frame.
+ GMPErr Decode();
+
+ // Wait until TestMonitor has been signaled.
+ void Wait();
+
+ // Did we get a Terminated() callback from the plugin?
+ bool IsTerminated();
+
+ // From GMPVideoDecoderCallbackProxy
+ // Set mDecodeResult; unblock TestMonitor.
+ virtual void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
+ virtual void Error(GMPErr aError) override;
+
+ // From GMPVideoDecoderCallbackProxy
+ // We expect this to be called when a plugin has been forcibly closed.
+ virtual void Terminated() override;
+
+ // Ignored GMPVideoDecoderCallbackProxy members
+ virtual void ReceivedDecodedReferenceFrame(
+ const uint64_t aPictureId) override {}
+ virtual void ReceivedDecodedFrame(const uint64_t aPictureId) override {}
+ virtual void InputDataExhausted() override {}
+ virtual void DrainComplete() override {}
+ virtual void ResetComplete() override {}
+
+ private:
+ virtual ~GMPRemoveTest();
+
+ void gmp_Decode();
+ void gmp_GetVideoDecoder(nsCString aNodeId,
+ GMPVideoDecoderProxy** aOutDecoder,
+ GMPVideoHost** aOutHost);
+ void GeneratePlugin();
+
+ GMPTestMonitor mTestMonitor;
+ nsCOMPtr<nsIThread> mGMPThread;
+
+ bool mIsTerminated;
+
+ // Path to the cloned GMP we have created.
+ nsString mTmpPath;
+ nsCOMPtr<nsIFile> mTmpDir;
+
+ // Path to the original GMP. Store so that we can re-add it after we're done
+ // testing.
+ nsString mOriginalPath;
+
+ GMPVideoDecoderProxy* mDecoder;
+ GMPVideoHost* mHost;
+ GMPErr mDecodeResult;
+};
+
+/*
+ * Simple test that the plugin is deleted when forcibly removed and deleted.
+ */
+TEST(GeckoMediaPlugins, RemoveAndDeleteForcedSimple)
+{
+ RefPtr<GMPRemoveTest> test(new GMPRemoveTest());
+
+ test->Setup();
+ test->DeletePluginDirectory(false /* force immediate */);
+ test->Wait();
+}
+
+/*
+ * Simple test that the plugin is deleted when deferred deletion is allowed.
+ */
+TEST(GeckoMediaPlugins, RemoveAndDeleteDeferredSimple)
+{
+ RefPtr<GMPRemoveTest> test(new GMPRemoveTest());
+
+ test->Setup();
+ test->DeletePluginDirectory(true /* can defer */);
+ test->Wait();
+}
+
+/*
+ * Test that the plugin is unavailable immediately after a forced
+ * RemoveAndDelete, and that the plugin is deleted afterwards.
+ */
+// Bug 1115253 - disable test in win64 to reduce failure rate
+#if !defined(_WIN64)
+TEST(GeckoMediaPlugins, RemoveAndDeleteForcedInUse)
+{
+ RefPtr<GMPRemoveTest> test(new GMPRemoveTest());
+
+ test->Setup();
+ EXPECT_TRUE(test->CreateVideoDecoder("thisOrigin"_ns));
+
+ // Test that we can decode a frame.
+ GMPErr err = test->Decode();
+ EXPECT_EQ(err, GMPNoErr);
+
+ test->DeletePluginDirectory(false /* force immediate */);
+ test->Wait();
+
+ // Test that the VideoDecoder is no longer available.
+ EXPECT_FALSE(test->CreateVideoDecoder("thisOrigin"_ns));
+
+ // Test that we were notified of the plugin's destruction.
+ EXPECT_TRUE(test->IsTerminated());
+}
+
+/*
+ * Test that the plugin is still usable after a deferred RemoveAndDelete, and
+ * that the plugin is deleted afterwards.
+ */
+TEST(GeckoMediaPlugins, RemoveAndDeleteDeferredInUse)
+{
+ RefPtr<GMPRemoveTest> test(new GMPRemoveTest());
+
+ test->Setup();
+ EXPECT_TRUE(test->CreateVideoDecoder("thisOrigin"_ns));
+
+ // Make sure decoding works before we do anything.
+ GMPErr err = test->Decode();
+ EXPECT_EQ(err, GMPNoErr);
+
+ test->DeletePluginDirectory(true /* can defer */);
+
+ // Test that decoding still works.
+ err = test->Decode();
+ EXPECT_EQ(err, GMPNoErr);
+
+ // Test that this origin is still able to fetch the video decoder.
+ EXPECT_TRUE(test->CreateVideoDecoder("thisOrigin"_ns));
+
+ test->CloseVideoDecoder();
+ test->Wait();
+}
+#endif
+
+static StaticRefPtr<GeckoMediaPluginService> gService;
+static StaticRefPtr<GeckoMediaPluginServiceParent> gServiceParent;
+
+static GeckoMediaPluginService* GetService() {
+ if (!gService) {
+ RefPtr<GeckoMediaPluginService> service =
+ GeckoMediaPluginService::GetGeckoMediaPluginService();
+ gService = service;
+ }
+
+ return gService.get();
+}
+
+static GeckoMediaPluginServiceParent* GetServiceParent() {
+ if (!gServiceParent) {
+ RefPtr<GeckoMediaPluginServiceParent> parent =
+ GeckoMediaPluginServiceParent::GetSingleton();
+ gServiceParent = parent;
+ }
+
+ return gServiceParent.get();
+}
+
+NS_IMPL_ISUPPORTS(GMPRemoveTest, nsIObserver)
+
+GMPRemoveTest::GMPRemoveTest()
+ : mIsTerminated(false), mDecoder(nullptr), mHost(nullptr) {}
+
+GMPRemoveTest::~GMPRemoveTest() {
+ bool exists;
+ EXPECT_TRUE(NS_SUCCEEDED(mTmpDir->Exists(&exists)) && !exists);
+
+ EXPECT_OK(GetServiceParent()->AddPluginDirectory(mOriginalPath));
+}
+
+void GMPRemoveTest::Setup() {
+ GeneratePlugin();
+ GetService()->GetThread(getter_AddRefs(mGMPThread));
+
+ // Spin the event loop until the GMP service has had a chance to complete
+ // adding GMPs from MOZ_GMP_PATH. Otherwise, the RemovePluginDirectory()
+ // below may complete before we're finished adding GMPs from MOZ_GMP_PATH,
+ // and we'll end up not removing the GMP, and the test will fail.
+ nsCOMPtr<nsISerialEventTarget> thread(GetServiceParent()->GetGMPThread());
+ EXPECT_TRUE(thread);
+ GMPTestMonitor* mon = &mTestMonitor;
+ GetServiceParent()->EnsureInitialized()->Then(
+ thread, __func__, [mon]() { mon->SetFinished(); },
+ [mon]() { mon->SetFinished(); });
+ mTestMonitor.AwaitFinished();
+
+ nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
+ obs->AddObserver(this, GMP_DELETED_TOPIC, false /* strong ref */);
+ EXPECT_OK(GetServiceParent()->RemovePluginDirectory(mOriginalPath));
+
+ GetServiceParent()->AsyncAddPluginDirectory(mTmpPath)->Then(
+ thread, __func__, [mon]() { mon->SetFinished(); },
+ [mon]() { mon->SetFinished(); });
+ mTestMonitor.AwaitFinished();
+}
+
+bool GMPRemoveTest::CreateVideoDecoder(nsCString aNodeId) {
+ GMPVideoHost* host;
+ GMPVideoDecoderProxy* decoder = nullptr;
+
+ mGMPThread->Dispatch(
+ NewNonOwningRunnableMethod<nsCString, GMPVideoDecoderProxy**,
+ GMPVideoHost**>(
+ "GMPRemoveTest::gmp_GetVideoDecoder", this,
+ &GMPRemoveTest::gmp_GetVideoDecoder, aNodeId, &decoder, &host),
+ NS_DISPATCH_NORMAL);
+
+ mTestMonitor.AwaitFinished();
+
+ if (!decoder) {
+ return false;
+ }
+
+ GMPVideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+ codec.mGMPApiVersion = 33;
+
+ nsTArray<uint8_t> empty;
+ NS_DispatchAndSpinEventLoopUntilComplete(
+ "GMPVideoDecoderProxy::InitDecode"_ns, mGMPThread,
+ NewNonOwningRunnableMethod<const GMPVideoCodec&, const nsTArray<uint8_t>&,
+ GMPVideoDecoderCallbackProxy*, int32_t>(
+ "GMPVideoDecoderProxy::InitDecode", decoder,
+ &GMPVideoDecoderProxy::InitDecode, codec, empty, this,
+ 1 /* core count */));
+
+ if (mDecoder) {
+ CloseVideoDecoder();
+ }
+
+ mDecoder = decoder;
+ mHost = host;
+
+ return true;
+}
+
+void GMPRemoveTest::gmp_GetVideoDecoder(nsCString aNodeId,
+ GMPVideoDecoderProxy** aOutDecoder,
+ GMPVideoHost** aOutHost) {
+ nsTArray<nsCString> tags;
+ tags.AppendElement("h264"_ns);
+ tags.AppendElement("fake"_ns);
+
+ class Callback : public GetGMPVideoDecoderCallback {
+ public:
+ Callback(GMPTestMonitor* aMonitor, GMPVideoDecoderProxy** aDecoder,
+ GMPVideoHost** aHost)
+ : mMonitor(aMonitor), mDecoder(aDecoder), mHost(aHost) {}
+ virtual void Done(GMPVideoDecoderProxy* aDecoder,
+ GMPVideoHost* aHost) override {
+ *mDecoder = aDecoder;
+ *mHost = aHost;
+ mMonitor->SetFinished();
+ }
+
+ private:
+ GMPTestMonitor* mMonitor;
+ GMPVideoDecoderProxy** mDecoder;
+ GMPVideoHost** mHost;
+ };
+
+ UniquePtr<GetGMPVideoDecoderCallback> cb(
+ new Callback(&mTestMonitor, aOutDecoder, aOutHost));
+
+ if (NS_FAILED(GetService()->GetGMPVideoDecoder(nullptr, &tags, aNodeId,
+ std::move(cb)))) {
+ mTestMonitor.SetFinished();
+ }
+}
+
+void GMPRemoveTest::CloseVideoDecoder() {
+ NS_DispatchAndSpinEventLoopUntilComplete(
+ "GMPVideoDecoderProxy::Close"_ns, mGMPThread,
+ NewNonOwningRunnableMethod("GMPVideoDecoderProxy::Close", mDecoder,
+ &GMPVideoDecoderProxy::Close));
+
+ mDecoder = nullptr;
+ mHost = nullptr;
+}
+
+void GMPRemoveTest::DeletePluginDirectory(bool aCanDefer) {
+ GetServiceParent()->RemoveAndDeletePluginDirectory(mTmpPath, aCanDefer);
+}
+
+GMPErr GMPRemoveTest::Decode() {
+ mGMPThread->Dispatch(
+ NewNonOwningRunnableMethod("GMPRemoveTest::gmp_Decode", this,
+ &GMPRemoveTest::gmp_Decode),
+ NS_DISPATCH_NORMAL);
+
+ mTestMonitor.AwaitFinished();
+ return mDecodeResult;
+}
+
+void GMPRemoveTest::gmp_Decode() {
+// from gmp-fake.cpp
+#pragma pack(push, 1)
+ struct EncodedFrame {
+ struct SPSNalu {
+ uint32_t size_;
+ uint8_t payload[14];
+ } sps_nalu;
+ struct PPSNalu {
+ uint32_t size_;
+ uint8_t payload[4];
+ } pps_nalu;
+ struct IDRNalu {
+ uint32_t size_;
+ uint8_t h264_compat_;
+ uint32_t magic_;
+ uint32_t width_;
+ uint32_t height_;
+ uint8_t y_;
+ uint8_t u_;
+ uint8_t v_;
+ uint64_t timestamp_;
+ } idr_nalu;
+ };
+#pragma pack(pop)
+
+ GMPVideoFrame* absFrame;
+ GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &absFrame);
+ EXPECT_EQ(err, GMPNoErr);
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame(
+ static_cast<GMPVideoEncodedFrame*>(absFrame));
+ err = frame->CreateEmptyFrame(sizeof(EncodedFrame) /* size */);
+ EXPECT_EQ(err, GMPNoErr);
+
+ EncodedFrame* frameData = reinterpret_cast<EncodedFrame*>(frame->Buffer());
+ frameData->sps_nalu.size_ = sizeof(EncodedFrame::SPSNalu) - sizeof(uint32_t);
+ frameData->pps_nalu.size_ = sizeof(EncodedFrame::PPSNalu) - sizeof(uint32_t);
+ frameData->idr_nalu.size_ = sizeof(EncodedFrame::IDRNalu) - sizeof(uint32_t);
+ frameData->idr_nalu.h264_compat_ = 5;
+ frameData->idr_nalu.magic_ = 0x004000b8;
+ frameData->idr_nalu.width_ = frameData->idr_nalu.height_ = 16;
+
+ nsTArray<uint8_t> empty;
+ nsresult rv =
+ mDecoder->Decode(std::move(frame), false /* aMissingFrames */, empty);
+ EXPECT_OK(rv);
+}
+
+void GMPRemoveTest::Wait() { mTestMonitor.AwaitFinished(); }
+
+bool GMPRemoveTest::IsTerminated() { return mIsTerminated; }
+
+// nsIObserver
+NS_IMETHODIMP
+GMPRemoveTest::Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData) {
+ EXPECT_TRUE(!strcmp(GMP_DELETED_TOPIC, aTopic));
+
+ nsString data(aData);
+ if (mTmpPath.Equals(data)) {
+ mTestMonitor.SetFinished();
+ nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
+ obs->RemoveObserver(this, GMP_DELETED_TOPIC);
+ }
+
+ return NS_OK;
+}
+
+// GMPVideoDecoderCallbackProxy
+void GMPRemoveTest::Decoded(GMPVideoi420Frame* aDecodedFrame) {
+ aDecodedFrame->Destroy();
+ mDecodeResult = GMPNoErr;
+ mTestMonitor.SetFinished();
+}
+
+// GMPVideoDecoderCallbackProxy
+void GMPRemoveTest::Error(GMPErr aError) {
+ mDecodeResult = aError;
+ mTestMonitor.SetFinished();
+}
+
+// GMPVideoDecoderCallbackProxy
+void GMPRemoveTest::Terminated() {
+ mIsTerminated = true;
+ if (mDecoder) {
+ mDecoder->Close();
+ mDecoder = nullptr;
+ }
+}
+
+void GMPRemoveTest::GeneratePlugin() {
+ nsresult rv;
+ nsCOMPtr<nsIFile> gmpDir;
+ nsCOMPtr<nsIFile> origDir;
+ nsCOMPtr<nsIFile> tmpDir;
+
+ rv = NS_GetSpecialDirectory(NS_GRE_DIR, getter_AddRefs(gmpDir));
+ EXPECT_OK(rv);
+ rv = gmpDir->Append(GMP_DIR_NAME);
+ EXPECT_OK(rv);
+
+ rv = gmpDir->Clone(getter_AddRefs(origDir));
+ EXPECT_OK(rv);
+ rv = origDir->Append(GMP_OLD_VERSION);
+ EXPECT_OK(rv);
+
+ rv = gmpDir->Clone(getter_AddRefs(tmpDir));
+ EXPECT_OK(rv);
+ rv = tmpDir->Append(GMP_NEW_VERSION);
+ EXPECT_OK(rv);
+ bool exists = false;
+ rv = tmpDir->Exists(&exists);
+ EXPECT_OK(rv);
+ if (exists) {
+ rv = tmpDir->Remove(true);
+ EXPECT_OK(rv);
+ }
+ rv = origDir->CopyTo(gmpDir, GMP_NEW_VERSION);
+ EXPECT_OK(rv);
+
+ rv = gmpDir->Clone(getter_AddRefs(tmpDir));
+ EXPECT_OK(rv);
+ rv = tmpDir->Append(GMP_NEW_VERSION);
+ EXPECT_OK(rv);
+
+ EXPECT_OK(origDir->GetPath(mOriginalPath));
+ EXPECT_OK(tmpDir->GetPath(mTmpPath));
+ mTmpDir = tmpDir;
+}
diff --git a/dom/media/gtest/TestGMPUtils.cpp b/dom/media/gtest/TestGMPUtils.cpp
new file mode 100644
index 0000000000..589b47b581
--- /dev/null
+++ b/dom/media/gtest/TestGMPUtils.cpp
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "GMPUtils.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsString.h"
+
+#include <string>
+#include <vector>
+
+using namespace mozilla;
+
+void TestSplitAt(const char* aInput, const char* aDelims,
+ size_t aNumExpectedTokens, const char* aExpectedTokens[]) {
+ nsCString input(aInput);
+ nsTArray<nsCString> tokens;
+ SplitAt(aDelims, input, tokens);
+ EXPECT_EQ(tokens.Length(), aNumExpectedTokens)
+ << "Should get expected number of tokens";
+ for (size_t i = 0; i < tokens.Length(); i++) {
+ EXPECT_TRUE(tokens[i].EqualsASCII(aExpectedTokens[i]))
+ << "Tokenize fail; expected=" << aExpectedTokens[i]
+ << " got=" << tokens[i].BeginReading();
+ }
+}
+
+TEST(GeckoMediaPlugins, TestSplitAt)
+{
+ {
+ const char* input = "1,2,3,4";
+ const char* delims = ",";
+ const char* tokens[] = {"1", "2", "3", "4"};
+ TestSplitAt(input, delims, MOZ_ARRAY_LENGTH(tokens), tokens);
+ }
+
+ {
+ const char* input = "a simple, comma, seperated, list";
+ const char* delims = ",";
+ const char* tokens[] = {"a simple", " comma", " seperated", " list"};
+ TestSplitAt(input, delims, MOZ_ARRAY_LENGTH(tokens), tokens);
+ }
+
+ {
+ const char* input = // Various platform line endings...
+ "line1\r\n" // Windows
+ "line2\r" // Old MacOSX
+ "line3\n" // Unix
+ "line4";
+ const char* delims = "\r\n";
+ const char* tokens[] = {"line1", "line2", "line3", "line4"};
+ TestSplitAt(input, delims, MOZ_ARRAY_LENGTH(tokens), tokens);
+ }
+}
+
+TEST(GeckoMediaPlugins, ToHexString)
+{
+ struct Test {
+ nsTArray<uint8_t> bytes;
+ std::string hex;
+ };
+
+ static const Test tests[] = {
+ {{0x00, 0x00}, "0000"},
+ {{0xff, 0xff}, "ffff"},
+ {{0xff, 0x00}, "ff00"},
+ {{0x00, 0xff}, "00ff"},
+ {{0xf0, 0x10}, "f010"},
+ {{0x05, 0x50}, "0550"},
+ {{0xf}, "0f"},
+ {{0x10}, "10"},
+ {{}, ""},
+ {{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb,
+ 0xcc, 0xdd, 0xee, 0xff},
+ "00112233445566778899aabbccddeeff"},
+ };
+
+ for (const Test& test : tests) {
+ EXPECT_STREQ(test.hex.c_str(), ToHexString(test.bytes).get());
+ }
+}
diff --git a/dom/media/gtest/TestGroupId.cpp b/dom/media/gtest/TestGroupId.cpp
new file mode 100644
index 0000000000..efd0bae20a
--- /dev/null
+++ b/dom/media/gtest/TestGroupId.cpp
@@ -0,0 +1,302 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioDeviceInfo.h"
+#include "MediaManager.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/UniquePtr.h"
+#include "nsTArray.h"
+#include "webrtc/MediaEngineFake.h"
+
+using ::testing::Return;
+using namespace mozilla;
+
+void PrintTo(const nsString& aValue, ::std::ostream* aStream) {
+ NS_ConvertUTF16toUTF8 str(aValue);
+ (*aStream) << str.get();
+}
+void PrintTo(const nsCString& aValue, ::std::ostream* aStream) {
+ (*aStream) << aValue.get();
+}
+
+RefPtr<AudioDeviceInfo> MakeAudioDeviceInfo(const nsAString& aName,
+ const nsAString& aGroupId,
+ uint16_t aType) {
+ return MakeRefPtr<AudioDeviceInfo>(
+ nullptr, aName, aGroupId, u"Vendor"_ns, aType,
+ AudioDeviceInfo::STATE_ENABLED, AudioDeviceInfo::PREF_NONE,
+ AudioDeviceInfo::FMT_F32LE, AudioDeviceInfo::FMT_F32LE, 2u, 44100u,
+ 44100u, 44100u, 0, 0);
+}
+
+RefPtr<MediaDevice> MakeCameraDevice(const nsString& aName,
+ const nsString& aGroupId) {
+ return new MediaDevice(new MediaEngineFake(), dom::MediaSourceEnum::Camera,
+ aName, u""_ns, aGroupId, MediaDevice::IsScary::No,
+ MediaDevice::OsPromptable::No);
+}
+
+RefPtr<MediaDevice> MakeMicDevice(const nsString& aName,
+ const nsString& aGroupId) {
+ return new MediaDevice(
+ new MediaEngineFake(),
+ MakeAudioDeviceInfo(aName, aGroupId, AudioDeviceInfo::TYPE_INPUT),
+ u""_ns);
+}
+
+RefPtr<MediaDevice> MakeSpeakerDevice(const nsString& aName,
+ const nsString& aGroupId) {
+ return new MediaDevice(
+ new MediaEngineFake(),
+ MakeAudioDeviceInfo(aName, aGroupId, AudioDeviceInfo::TYPE_OUTPUT),
+ u"ID"_ns);
+}
+
+/* Verify that when an audio input device name contains the video input device
+ * name the video device group id is updated to become equal to the audio
+ * device group id. */
+TEST(TestGroupId, MatchInput_PartOfName)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, u"Cam-Model-GroupId"_ns));
+
+ auto mic =
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns);
+ devices.AppendElement(mic);
+ audios.AppendElement(mic);
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, devices[1]->mRawGroupID)
+ << "Video group id is the same as audio input group id.";
+}
+
+/* Verify that when an audio input device name is the same as the video input
+ * device name the video device group id is updated to become equal to the audio
+ * device group id. */
+TEST(TestGroupId, MatchInput_FullName)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, u"Cam-Model-GroupId"_ns));
+
+ auto mic = MakeMicDevice(u"Vendor Model"_ns, u"Mic-Model-GroupId"_ns);
+ devices.AppendElement(mic);
+ audios.AppendElement(mic);
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, devices[1]->mRawGroupID)
+ << "Video group id is the same as audio input group id.";
+}
+
+/* Verify that when an audio input device name does not contain the video input
+ * device name the video device group id does not change. */
+TEST(TestGroupId, NoMatchInput)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ nsString Cam_Model_GroupId = u"Cam-Model-GroupId"_ns;
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, Cam_Model_GroupId));
+
+ audios.AppendElement(
+ MakeMicDevice(u"Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, Cam_Model_GroupId)
+ << "Video group id has not been updated.";
+ EXPECT_NE(devices[0]->mRawGroupID, audios[0]->mRawGroupID)
+ << "Video group id is different than audio input group id.";
+}
+
+/* Verify that when more that one audio input and more than one audio output
+ * device name contain the video input device name the video device group id
+ * does not change. */
+TEST(TestGroupId, NoMatch_TwoIdenticalDevices)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ nsString Cam_Model_GroupId = u"Cam-Model-GroupId"_ns;
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, Cam_Model_GroupId));
+
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, Cam_Model_GroupId)
+ << "Video group id has not been updated.";
+ EXPECT_NE(devices[0]->mRawGroupID, audios[0]->mRawGroupID)
+ << "Video group id is different from audio input group id.";
+ EXPECT_NE(devices[0]->mRawGroupID, audios[2]->mRawGroupID)
+ << "Video group id is different from audio output group id.";
+}
+
+/* Verify that when more that one audio input device name contain the video
+ * input device name the video device group id is not updated by audio input
+ * device group id but it continues looking at audio output devices where it
+ * finds a match so video input group id is updated by audio output group id. */
+TEST(TestGroupId, Match_TwoIdenticalInputsMatchOutput)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ nsString Cam_Model_GroupId = u"Cam-Model-GroupId"_ns;
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, Cam_Model_GroupId));
+
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, audios[2]->mRawGroupID)
+ << "Video group id is the same as audio output group id.";
+}
+
+/* Verify that when more that one audio input and more than one audio output
+ * device names contain the video input device name the video device group id
+ * does not change. */
+TEST(TestGroupId, NoMatch_ThreeIdenticalDevices)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ nsString Cam_Model_GroupId = u"Cam-Model-GroupId"_ns;
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, Cam_Model_GroupId));
+
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, Cam_Model_GroupId)
+ << "Video group id has not been updated.";
+ EXPECT_NE(devices[0]->mRawGroupID, audios[0]->mRawGroupID)
+ << "Video group id is different from audio input group id.";
+ EXPECT_NE(devices[0]->mRawGroupID, audios[3]->mRawGroupID)
+ << "Video group id is different from audio output group id.";
+}
+
+/* Verify that when an audio output device name contains the video input device
+ * name the video device group id is updated to become equal to the audio
+ * device group id. */
+TEST(TestGroupId, MatchOutput)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, u"Cam-Model-GroupId"_ns));
+
+ audios.AppendElement(
+ MakeMicDevice(u"Mic Analog Stereo"_ns, u"Mic-Model-GroupId"_ns));
+
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model Analog Stereo"_ns,
+ u"Speaker-Model-GroupId"_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, audios[1]->mRawGroupID)
+ << "Video group id is the same as audio output group id.";
+}
+
+/* Verify that when an audio input device name is the same as audio output
+ * device and video input device name the video device group id is updated to
+ * become equal to the audio input device group id. */
+TEST(TestGroupId, InputOutputSameName)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, u"Cam-Model-GroupId"_ns));
+
+ audios.AppendElement(
+ MakeMicDevice(u"Vendor Model"_ns, u"Mic-Model-GroupId"_ns));
+
+ audios.AppendElement(
+ MakeSpeakerDevice(u"Vendor Model"_ns, u"Speaker-Model-GroupId"_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, audios[0]->mRawGroupID)
+ << "Video input group id is the same as audio input group id.";
+}
+
+/* Verify that when an audio input device name contains the video input device
+ * and the audio input group id is an empty string, the video device group id
+ * is updated to become equal to the audio device group id. */
+TEST(TestGroupId, InputEmptyGroupId)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, u"Cam-Model-GroupId"_ns));
+
+ audios.AppendElement(MakeMicDevice(u"Vendor Model"_ns, u""_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, audios[0]->mRawGroupID)
+ << "Video input group id is the same as audio input group id.";
+}
+
+/* Verify that when an audio output device name contains the video input device
+ * and the audio output group id is an empty string, the video device group id
+ * is updated to become equal to the audio output device group id. */
+TEST(TestGroupId, OutputEmptyGroupId)
+{
+ MediaManager::MediaDeviceSet devices;
+ MediaManager::MediaDeviceSet audios;
+
+ devices.AppendElement(
+ MakeCameraDevice(u"Vendor Model"_ns, u"Cam-Model-GroupId"_ns));
+
+ audios.AppendElement(MakeSpeakerDevice(u"Vendor Model"_ns, u""_ns));
+
+ MediaManager::GuessVideoDeviceGroupIDs(devices, audios);
+
+ EXPECT_EQ(devices[0]->mRawGroupID, audios[0]->mRawGroupID)
+ << "Video input group id is the same as audio output group id.";
+}
diff --git a/dom/media/gtest/TestIntervalSet.cpp b/dom/media/gtest/TestIntervalSet.cpp
new file mode 100644
index 0000000000..11d0428f6c
--- /dev/null
+++ b/dom/media/gtest/TestIntervalSet.cpp
@@ -0,0 +1,819 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/ErrorResult.h"
+#include "mozilla/dom/TimeRanges.h"
+#include "TimeUnits.h"
+#include "Intervals.h"
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+using namespace mozilla;
+
+typedef media::Interval<uint8_t> ByteInterval;
+typedef media::Interval<int> IntInterval;
+typedef media::IntervalSet<int> IntIntervals;
+
+ByteInterval CreateByteInterval(int32_t aStart, int32_t aEnd) {
+ ByteInterval test(aStart, aEnd);
+ return test;
+}
+
+media::IntervalSet<uint8_t> CreateByteIntervalSet(int32_t aStart,
+ int32_t aEnd) {
+ media::IntervalSet<uint8_t> test;
+ test += ByteInterval(aStart, aEnd);
+ return test;
+}
+
+TEST(IntervalSet, Constructors)
+{
+ const int32_t start = 1;
+ const int32_t end = 2;
+ const int32_t fuzz = 0;
+
+ // Compiler exercise.
+ ByteInterval test1(start, end);
+ ByteInterval test2(test1);
+ ByteInterval test3(start, end, fuzz);
+ ByteInterval test4(test3);
+ ByteInterval test5 = CreateByteInterval(start, end);
+
+ media::IntervalSet<uint8_t> blah1(test1);
+ media::IntervalSet<uint8_t> blah2 = blah1;
+ media::IntervalSet<uint8_t> blah3 = blah1 + test1;
+ media::IntervalSet<uint8_t> blah4 = test1 + blah1;
+ media::IntervalSet<uint8_t> blah5 = CreateByteIntervalSet(start, end);
+ (void)test1;
+ (void)test2;
+ (void)test3;
+ (void)test4;
+ (void)test5;
+ (void)blah1;
+ (void)blah2;
+ (void)blah3;
+ (void)blah4;
+ (void)blah5;
+}
+
+media::TimeInterval CreateTimeInterval(int32_t aStart, int32_t aEnd) {
+ // Copy constructor test
+ media::TimeUnit start = media::TimeUnit::FromMicroseconds(aStart);
+ media::TimeUnit end;
+ // operator= test
+ end = media::TimeUnit::FromMicroseconds(aEnd);
+ media::TimeInterval ti(start, end);
+ return ti;
+}
+
+media::TimeIntervals CreateTimeIntervals(int32_t aStart, int32_t aEnd) {
+ media::TimeIntervals test;
+ test += CreateTimeInterval(aStart, aEnd);
+ return test;
+}
+
+TEST(IntervalSet, TimeIntervalsConstructors)
+{
+ const auto start = media::TimeUnit::FromMicroseconds(1);
+ const auto end = media::TimeUnit::FromMicroseconds(2);
+ const media::TimeUnit fuzz;
+
+ // Compiler exercise.
+ media::TimeInterval test1(start, end);
+ media::TimeInterval test2(test1);
+ media::TimeInterval test3(start, end, fuzz);
+ media::TimeInterval test4(test3);
+ media::TimeInterval test5 =
+ CreateTimeInterval(start.ToMicroseconds(), end.ToMicroseconds());
+
+ media::TimeIntervals blah1(test1);
+ media::TimeIntervals blah2(blah1);
+ media::TimeIntervals blah3 = blah1 + test1;
+ media::TimeIntervals blah4 = test1 + blah1;
+ media::TimeIntervals blah5 =
+ CreateTimeIntervals(start.ToMicroseconds(), end.ToMicroseconds());
+ (void)test1;
+ (void)test2;
+ (void)test3;
+ (void)test4;
+ (void)test5;
+ (void)blah1;
+ (void)blah2;
+ (void)blah3;
+ (void)blah4;
+ (void)blah5;
+
+ media::TimeIntervals i0{media::TimeInterval(media::TimeUnit::FromSeconds(0),
+ media::TimeUnit::FromSeconds(0))};
+ EXPECT_TRUE(i0.IsEmpty()); // Constructing with an empty time interval.
+}
+
+TEST(IntervalSet, Length)
+{
+ IntInterval i(15, 25);
+ EXPECT_EQ(10, i.Length());
+}
+
+TEST(IntervalSet, Intersects)
+{
+ EXPECT_TRUE(IntInterval(1, 5).Intersects(IntInterval(3, 4)));
+ EXPECT_TRUE(IntInterval(1, 5).Intersects(IntInterval(3, 7)));
+ EXPECT_TRUE(IntInterval(1, 5).Intersects(IntInterval(-1, 3)));
+ EXPECT_TRUE(IntInterval(1, 5).Intersects(IntInterval(-1, 7)));
+ EXPECT_FALSE(IntInterval(1, 5).Intersects(IntInterval(6, 7)));
+ EXPECT_FALSE(IntInterval(1, 5).Intersects(IntInterval(-1, 0)));
+ // End boundary is exclusive of the interval.
+ EXPECT_FALSE(IntInterval(1, 5).Intersects(IntInterval(5, 7)));
+ EXPECT_FALSE(IntInterval(1, 5).Intersects(IntInterval(0, 1)));
+ // Empty identical interval do not intersect.
+ EXPECT_FALSE(IntInterval(1, 1).Intersects(IntInterval(1, 1)));
+ // Empty interval do not intersect.
+ EXPECT_FALSE(IntInterval(1, 1).Intersects(IntInterval(2, 2)));
+}
+
+TEST(IntervalSet, Intersection)
+{
+ IntInterval i0(10, 20);
+ IntInterval i1(15, 25);
+ IntInterval i = i0.Intersection(i1);
+ EXPECT_EQ(15, i.mStart);
+ EXPECT_EQ(20, i.mEnd);
+ IntInterval j0(10, 20);
+ IntInterval j1(20, 25);
+ IntInterval j = j0.Intersection(j1);
+ EXPECT_TRUE(j.IsEmpty());
+ IntInterval k0(2, 2);
+ IntInterval k1(2, 2);
+ IntInterval k = k0.Intersection(k1);
+ EXPECT_TRUE(k.IsEmpty());
+}
+
+TEST(IntervalSet, Equals)
+{
+ IntInterval i0(10, 20);
+ IntInterval i1(10, 20);
+ EXPECT_EQ(i0, i1);
+
+ IntInterval i2(5, 20);
+ EXPECT_NE(i0, i2);
+
+ IntInterval i3(10, 15);
+ EXPECT_NE(i0, i2);
+}
+
+TEST(IntervalSet, IntersectionIntervalSet)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+
+ IntIntervals i1;
+ i1.Add(IntInterval(7, 15));
+ i1.Add(IntInterval(16, 27));
+ i1.Add(IntInterval(45, 50));
+ i1.Add(IntInterval(53, 57));
+
+ IntIntervals i = media::Intersection(i0, i1);
+
+ EXPECT_EQ(4u, i.Length());
+
+ EXPECT_EQ(7, i[0].mStart);
+ EXPECT_EQ(10, i[0].mEnd);
+
+ EXPECT_EQ(20, i[1].mStart);
+ EXPECT_EQ(25, i[1].mEnd);
+
+ EXPECT_EQ(45, i[2].mStart);
+ EXPECT_EQ(50, i[2].mEnd);
+
+ EXPECT_EQ(53, i[3].mStart);
+ EXPECT_EQ(57, i[3].mEnd);
+}
+
+template <typename T>
+static void Compare(const media::IntervalSet<T>& aI1,
+ const media::IntervalSet<T>& aI2) {
+ EXPECT_EQ(aI1.Length(), aI2.Length());
+ if (aI1.Length() != aI2.Length()) {
+ return;
+ }
+ for (uint32_t i = 0; i < aI1.Length(); i++) {
+ EXPECT_EQ(aI1[i].mStart, aI2[i].mStart);
+ EXPECT_EQ(aI1[i].mEnd, aI2[i].mEnd);
+ }
+}
+
+static void GeneratePermutations(const IntIntervals& aI1,
+ const IntIntervals& aI2) {
+ IntIntervals i_ref = media::Intersection(aI1, aI2);
+ // Test all permutations possible
+ std::vector<uint32_t> comb1;
+ for (uint32_t i = 0; i < aI1.Length(); i++) {
+ comb1.push_back(i);
+ }
+ std::vector<uint32_t> comb2;
+ for (uint32_t i = 0; i < aI2.Length(); i++) {
+ comb2.push_back(i);
+ }
+
+ do {
+ do {
+ // Create intervals according to new indexes.
+ IntIntervals i_0;
+ for (uint32_t i = 0; i < comb1.size(); i++) {
+ i_0 += aI1[comb1[i]];
+ }
+ // Test that intervals are always normalized.
+ Compare(aI1, i_0);
+ IntIntervals i_1;
+ for (uint32_t i = 0; i < comb2.size(); i++) {
+ i_1 += aI2[comb2[i]];
+ }
+ Compare(aI2, i_1);
+ // Check intersections yield the same result.
+ Compare(i_0.Intersection(i_1), i_ref);
+ } while (std::next_permutation(comb2.begin(), comb2.end()));
+ } while (std::next_permutation(comb1.begin(), comb1.end()));
+}
+
+TEST(IntervalSet, IntersectionNormalizedIntervalSet)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+
+ IntIntervals i1;
+ i1.Add(IntInterval(7, 15));
+ i1.Add(IntInterval(16, 27));
+ i1.Add(IntInterval(45, 50));
+ i1.Add(IntInterval(53, 57));
+
+ GeneratePermutations(i0, i1);
+}
+
+TEST(IntervalSet, IntersectionUnorderedNonNormalizedIntervalSet)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(8, 25);
+ i0 += IntInterval(24, 60);
+
+ IntIntervals i1;
+ i1.Add(IntInterval(7, 15));
+ i1.Add(IntInterval(10, 27));
+ i1.Add(IntInterval(45, 50));
+ i1.Add(IntInterval(53, 57));
+
+ GeneratePermutations(i0, i1);
+}
+
+TEST(IntervalSet, IntersectionNonNormalizedInterval)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(8, 25);
+ i0 += IntInterval(30, 60);
+
+ media::Interval<int> i1(9, 15);
+ i0.Intersection(i1);
+ EXPECT_EQ(1u, i0.Length());
+ EXPECT_EQ(i0[0].mStart, i1.mStart);
+ EXPECT_EQ(i0[0].mEnd, i1.mEnd);
+}
+
+TEST(IntervalSet, IntersectionUnorderedNonNormalizedInterval)
+{
+ IntIntervals i0;
+ i0 += IntInterval(1, 3);
+ i0 += IntInterval(1, 10);
+ i0 += IntInterval(9, 12);
+ i0 += IntInterval(12, 15);
+ i0 += IntInterval(8, 25);
+ i0 += IntInterval(30, 60);
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(30, 60);
+
+ media::Interval<int> i1(9, 15);
+ i0.Intersection(i1);
+ EXPECT_EQ(1u, i0.Length());
+ EXPECT_EQ(i0[0].mStart, i1.mStart);
+ EXPECT_EQ(i0[0].mEnd, i1.mEnd);
+}
+
+static IntIntervals Duplicate(const IntIntervals& aValue) {
+ IntIntervals value(aValue);
+ return value;
+}
+
+TEST(IntervalSet, Normalize)
+{
+ IntIntervals i;
+ // Test IntervalSet<T> + Interval<T> operator.
+ i = i + IntInterval(20, 30);
+ // Test Internal<T> + IntervalSet<T> operator.
+ i = IntInterval(2, 7) + i;
+ // Test Interval<T> + IntervalSet<T> operator
+ i = IntInterval(1, 8) + i;
+ IntIntervals interval;
+ interval += IntInterval(5, 10);
+ // Test += with rval move.
+ i += Duplicate(interval);
+ // Test = with move and add with move.
+ i = Duplicate(interval) + i;
+
+ EXPECT_EQ(2u, i.Length());
+
+ EXPECT_EQ(1, i[0].mStart);
+ EXPECT_EQ(10, i[0].mEnd);
+
+ EXPECT_EQ(20, i[1].mStart);
+ EXPECT_EQ(30, i[1].mEnd);
+
+ media::TimeIntervals ti;
+ ti += media::TimeInterval(media::TimeUnit::FromSeconds(0.0),
+ media::TimeUnit::FromSeconds(3.203333));
+ ti += media::TimeInterval(media::TimeUnit::FromSeconds(3.203366),
+ media::TimeUnit::FromSeconds(10.010065));
+ EXPECT_EQ(2u, ti.Length());
+ ti += media::TimeInterval(ti.Start(0), ti.End(0),
+ media::TimeUnit::FromMicroseconds(35000));
+ EXPECT_EQ(1u, ti.Length());
+}
+
+TEST(IntervalSet, ContainValue)
+{
+ IntIntervals i0;
+ i0 += IntInterval(0, 10);
+ i0 += IntInterval(15, 20);
+ i0 += IntInterval(30, 50);
+ EXPECT_TRUE(i0.Contains(0)); // start is inclusive.
+ EXPECT_TRUE(i0.Contains(17));
+ EXPECT_FALSE(i0.Contains(20)); // end boundary is exclusive.
+ EXPECT_FALSE(i0.Contains(25));
+}
+
+TEST(IntervalSet, ContainValueWithFuzz)
+{
+ IntIntervals i0;
+ i0 += IntInterval(0, 10);
+ i0 += IntInterval(15, 20, 1);
+ i0 += IntInterval(30, 50);
+ EXPECT_TRUE(i0.Contains(0)); // start is inclusive.
+ EXPECT_TRUE(i0.Contains(17));
+ EXPECT_TRUE(
+ i0.Contains(20)); // end boundary is exclusive but we have a fuzz of 1.
+ EXPECT_FALSE(i0.Contains(25));
+}
+
+TEST(IntervalSet, ContainInterval)
+{
+ IntIntervals i0;
+ i0 += IntInterval(0, 10);
+ i0 += IntInterval(15, 20);
+ i0 += IntInterval(30, 50);
+ EXPECT_TRUE(i0.Contains(IntInterval(2, 8)));
+ EXPECT_TRUE(i0.Contains(IntInterval(31, 50)));
+ EXPECT_TRUE(i0.Contains(IntInterval(0, 10)));
+ EXPECT_FALSE(i0.Contains(IntInterval(0, 11)));
+ EXPECT_TRUE(i0.Contains(IntInterval(0, 5)));
+ EXPECT_FALSE(i0.Contains(IntInterval(8, 15)));
+ EXPECT_FALSE(i0.Contains(IntInterval(15, 30)));
+ EXPECT_FALSE(i0.Contains(IntInterval(30, 55)));
+}
+
+TEST(IntervalSet, ContainIntervalWithFuzz)
+{
+ IntIntervals i0;
+ i0 += IntInterval(0, 10);
+ i0 += IntInterval(15, 20);
+ i0 += IntInterval(30, 50);
+ EXPECT_TRUE(i0.Contains(IntInterval(2, 8)));
+ EXPECT_TRUE(i0.Contains(IntInterval(31, 50)));
+ EXPECT_TRUE(i0.Contains(IntInterval(0, 11, 1)));
+ EXPECT_TRUE(i0.Contains(IntInterval(0, 5)));
+ EXPECT_FALSE(i0.Contains(IntInterval(8, 15)));
+ EXPECT_FALSE(i0.Contains(IntInterval(15, 21)));
+ EXPECT_FALSE(i0.Contains(IntInterval(15, 30)));
+ EXPECT_FALSE(i0.Contains(IntInterval(30, 55)));
+
+ IntIntervals i1;
+ i1 += IntInterval(0, 10, 1);
+ i1 += IntInterval(15, 20, 1);
+ i1 += IntInterval(30, 50, 1);
+ EXPECT_TRUE(i1.Contains(IntInterval(2, 8)));
+ EXPECT_TRUE(i1.Contains(IntInterval(29, 51)));
+ EXPECT_TRUE(i1.Contains(IntInterval(0, 11, 1)));
+ EXPECT_TRUE(i1.Contains(IntInterval(15, 21)));
+}
+
+TEST(IntervalSet, Span)
+{
+ IntInterval i0(0, 10);
+ IntInterval i1(20, 30);
+ IntInterval i{i0.Span(i1)};
+
+ EXPECT_EQ(i.mStart, 0);
+ EXPECT_EQ(i.mEnd, 30);
+}
+
+TEST(IntervalSet, Union)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+
+ IntIntervals i1;
+ i1.Add(IntInterval(7, 15));
+ i1.Add(IntInterval(16, 27));
+ i1.Add(IntInterval(45, 50));
+ i1.Add(IntInterval(53, 57));
+
+ IntIntervals i = media::Union(i0, i1);
+
+ EXPECT_EQ(3u, i.Length());
+
+ EXPECT_EQ(5, i[0].mStart);
+ EXPECT_EQ(15, i[0].mEnd);
+
+ EXPECT_EQ(16, i[1].mStart);
+ EXPECT_EQ(27, i[1].mEnd);
+
+ EXPECT_EQ(40, i[2].mStart);
+ EXPECT_EQ(60, i[2].mEnd);
+}
+
+TEST(IntervalSet, UnionNotOrdered)
+{
+ IntIntervals i0;
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+ i0 += IntInterval(5, 10);
+
+ IntIntervals i1;
+ i1.Add(IntInterval(16, 27));
+ i1.Add(IntInterval(7, 15));
+ i1.Add(IntInterval(53, 57));
+ i1.Add(IntInterval(45, 50));
+
+ IntIntervals i = media::Union(i0, i1);
+
+ EXPECT_EQ(3u, i.Length());
+
+ EXPECT_EQ(5, i[0].mStart);
+ EXPECT_EQ(15, i[0].mEnd);
+
+ EXPECT_EQ(16, i[1].mStart);
+ EXPECT_EQ(27, i[1].mEnd);
+
+ EXPECT_EQ(40, i[2].mStart);
+ EXPECT_EQ(60, i[2].mEnd);
+}
+
+TEST(IntervalSet, NormalizeFuzz)
+{
+ IntIntervals i0;
+ i0 += IntInterval(11, 25, 0);
+ i0 += IntInterval(5, 10, 1);
+ i0 += IntInterval(40, 60, 1);
+
+ EXPECT_EQ(2u, i0.Length());
+
+ EXPECT_EQ(5, i0[0].mStart);
+ EXPECT_EQ(25, i0[0].mEnd);
+
+ EXPECT_EQ(40, i0[1].mStart);
+ EXPECT_EQ(60, i0[1].mEnd);
+}
+
+TEST(IntervalSet, UnionFuzz)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10, 1);
+ i0 += IntInterval(11, 25, 0);
+ i0 += IntInterval(40, 60, 1);
+ EXPECT_EQ(2u, i0.Length());
+ EXPECT_EQ(5, i0[0].mStart);
+ EXPECT_EQ(25, i0[0].mEnd);
+ EXPECT_EQ(40, i0[1].mStart);
+ EXPECT_EQ(60, i0[1].mEnd);
+
+ IntIntervals i1;
+ i1.Add(IntInterval(7, 15, 1));
+ i1.Add(IntInterval(16, 27, 1));
+ i1.Add(IntInterval(45, 50, 1));
+ i1.Add(IntInterval(53, 57, 1));
+ EXPECT_EQ(3u, i1.Length());
+ EXPECT_EQ(7, i1[0].mStart);
+ EXPECT_EQ(27, i1[0].mEnd);
+ EXPECT_EQ(45, i1[1].mStart);
+ EXPECT_EQ(50, i1[1].mEnd);
+ EXPECT_EQ(53, i1[2].mStart);
+ EXPECT_EQ(57, i1[2].mEnd);
+
+ IntIntervals i = media::Union(i0, i1);
+
+ EXPECT_EQ(2u, i.Length());
+
+ EXPECT_EQ(5, i[0].mStart);
+ EXPECT_EQ(27, i[0].mEnd);
+
+ EXPECT_EQ(40, i[1].mStart);
+ EXPECT_EQ(60, i[1].mEnd);
+}
+
+TEST(IntervalSet, Contiguous)
+{
+ EXPECT_FALSE(IntInterval(5, 10).Contiguous(IntInterval(11, 25)));
+ EXPECT_TRUE(IntInterval(5, 10).Contiguous(IntInterval(10, 25)));
+ EXPECT_TRUE(IntInterval(5, 10, 1).Contiguous(IntInterval(11, 25)));
+ EXPECT_TRUE(IntInterval(5, 10).Contiguous(IntInterval(11, 25, 1)));
+}
+
+TEST(IntervalSet, TimeRangesSeconds)
+{
+ media::TimeIntervals i0;
+ i0 += media::TimeInterval(media::TimeUnit::FromSeconds(20),
+ media::TimeUnit::FromSeconds(25));
+ i0 += media::TimeInterval(media::TimeUnit::FromSeconds(40),
+ media::TimeUnit::FromSeconds(60));
+ i0 += media::TimeInterval(media::TimeUnit::FromSeconds(5),
+ media::TimeUnit::FromSeconds(10));
+
+ media::TimeIntervals i1;
+ i1.Add(media::TimeInterval(media::TimeUnit::FromSeconds(16),
+ media::TimeUnit::FromSeconds(27)));
+ i1.Add(media::TimeInterval(media::TimeUnit::FromSeconds(7),
+ media::TimeUnit::FromSeconds(15)));
+ i1.Add(media::TimeInterval(media::TimeUnit::FromSeconds(53),
+ media::TimeUnit::FromSeconds(57)));
+ i1.Add(media::TimeInterval(media::TimeUnit::FromSeconds(45),
+ media::TimeUnit::FromSeconds(50)));
+
+ media::TimeIntervals i(i0 + i1);
+ RefPtr<dom::TimeRanges> tr = new dom::TimeRanges(i);
+ EXPECT_EQ(tr->Length(), i.Length());
+ for (dom::TimeRanges::index_type index = 0; index < tr->Length(); index++) {
+ ErrorResult rv;
+ EXPECT_EQ(tr->Start(index, rv), i[index].mStart.ToSeconds());
+ EXPECT_EQ(tr->Start(index, rv), i.Start(index).ToSeconds());
+ EXPECT_EQ(tr->End(index, rv), i[index].mEnd.ToSeconds());
+ EXPECT_EQ(tr->End(index, rv), i.End(index).ToSeconds());
+ }
+}
+
+static void CheckTimeRanges(dom::TimeRanges* aTr,
+ const media::TimeIntervals& aTi) {
+ RefPtr<dom::TimeRanges> tr = new dom::TimeRanges;
+ tr->Union(aTr, 0); // This will normalize the time range.
+ EXPECT_EQ(tr->Length(), aTi.Length());
+ for (dom::TimeRanges::index_type i = 0; i < tr->Length(); i++) {
+ ErrorResult rv;
+ EXPECT_EQ(tr->Start(i, rv), aTi[i].mStart.ToSeconds());
+ EXPECT_EQ(tr->Start(i, rv), aTi.Start(i).ToSeconds());
+ EXPECT_EQ(tr->End(i, rv), aTi[i].mEnd.ToSeconds());
+ EXPECT_EQ(tr->End(i, rv), aTi.End(i).ToSeconds());
+ }
+}
+
+TEST(IntervalSet, TimeRangesConversion)
+{
+ RefPtr<dom::TimeRanges> tr = new dom::TimeRanges();
+ tr->Add(20, 25);
+ tr->Add(40, 60);
+ tr->Add(5, 10);
+ tr->Add(16, 27);
+ tr->Add(53, 57);
+ tr->Add(45, 50);
+
+ // explicit copy constructor and ToTimeIntervals.
+ media::TimeIntervals i1(tr->ToTimeIntervals());
+ CheckTimeRanges(tr, i1);
+
+ // ctor(const TimeIntervals&)
+ RefPtr<dom::TimeRanges> tr2 = new dom::TimeRanges(tr->ToTimeIntervals());
+ CheckTimeRanges(tr2, i1);
+}
+
+TEST(IntervalSet, TimeRangesMicroseconds)
+{
+ media::TimeIntervals i0;
+
+ i0 += media::TimeInterval(media::TimeUnit::FromMicroseconds(20),
+ media::TimeUnit::FromMicroseconds(25));
+ i0 += media::TimeInterval(media::TimeUnit::FromMicroseconds(40),
+ media::TimeUnit::FromMicroseconds(60));
+ i0 += media::TimeInterval(media::TimeUnit::FromMicroseconds(5),
+ media::TimeUnit::FromMicroseconds(10));
+
+ media::TimeIntervals i1;
+ i1.Add(media::TimeInterval(media::TimeUnit::FromMicroseconds(16),
+ media::TimeUnit::FromMicroseconds(27)));
+ i1.Add(media::TimeInterval(media::TimeUnit::FromMicroseconds(7),
+ media::TimeUnit::FromMicroseconds(15)));
+ i1.Add(media::TimeInterval(media::TimeUnit::FromMicroseconds(53),
+ media::TimeUnit::FromMicroseconds(57)));
+ i1.Add(media::TimeInterval(media::TimeUnit::FromMicroseconds(45),
+ media::TimeUnit::FromMicroseconds(50)));
+
+ media::TimeIntervals i(i0 + i1);
+ RefPtr<dom::TimeRanges> tr = new dom::TimeRanges(i);
+ EXPECT_EQ(tr->Length(), i.Length());
+ for (dom::TimeRanges::index_type index = 0; index < tr->Length(); index++) {
+ ErrorResult rv;
+ EXPECT_EQ(tr->Start(index, rv), i[index].mStart.ToSeconds());
+ EXPECT_EQ(tr->Start(index, rv), i.Start(index).ToSeconds());
+ EXPECT_EQ(tr->End(index, rv), i[index].mEnd.ToSeconds());
+ EXPECT_EQ(tr->End(index, rv), i.End(index).ToSeconds());
+ }
+
+ tr->Normalize();
+ EXPECT_EQ(tr->Length(), i.Length());
+ for (dom::TimeRanges::index_type index = 0; index < tr->Length(); index++) {
+ ErrorResult rv;
+ EXPECT_EQ(tr->Start(index, rv), i[index].mStart.ToSeconds());
+ EXPECT_EQ(tr->Start(index, rv), i.Start(index).ToSeconds());
+ EXPECT_EQ(tr->End(index, rv), i[index].mEnd.ToSeconds());
+ EXPECT_EQ(tr->End(index, rv), i.End(index).ToSeconds());
+ }
+
+ // Check infinity values aren't lost in the conversion.
+ tr = new dom::TimeRanges();
+ tr->Add(0, 30);
+ tr->Add(50, std::numeric_limits<double>::infinity());
+ media::TimeIntervals i_oo = tr->ToTimeIntervals();
+ RefPtr<dom::TimeRanges> tr2 = new dom::TimeRanges(i_oo);
+ EXPECT_EQ(tr->Length(), tr2->Length());
+ for (dom::TimeRanges::index_type index = 0; index < tr->Length(); index++) {
+ ErrorResult rv;
+ EXPECT_EQ(tr->Start(index, rv), tr2->Start(index, rv));
+ EXPECT_EQ(tr->End(index, rv), tr2->End(index, rv));
+ }
+}
+
+template <typename T>
+class Foo {
+ public:
+ Foo() : mArg1(1), mArg2(2), mArg3(3) {}
+
+ Foo(T a1, T a2, T a3) : mArg1(a1), mArg2(a2), mArg3(a3) {}
+
+ Foo<T> operator+(const Foo<T>& aOther) const {
+ Foo<T> blah;
+ blah.mArg1 += aOther.mArg1;
+ blah.mArg2 += aOther.mArg2;
+ blah.mArg3 += aOther.mArg3;
+ return blah;
+ }
+ Foo<T> operator-(const Foo<T>& aOther) const {
+ Foo<T> blah;
+ blah.mArg1 -= aOther.mArg1;
+ blah.mArg2 -= aOther.mArg2;
+ blah.mArg3 -= aOther.mArg3;
+ return blah;
+ }
+ bool operator<(const Foo<T>& aOther) const { return mArg1 < aOther.mArg1; }
+ bool operator==(const Foo<T>& aOther) const { return mArg1 == aOther.mArg1; }
+ bool operator<=(const Foo<T>& aOther) const { return mArg1 <= aOther.mArg1; }
+
+ private:
+ int32_t mArg1;
+ int32_t mArg2;
+ int32_t mArg3;
+};
+
+TEST(IntervalSet, FooIntervalSet)
+{
+ media::Interval<Foo<int>> i(Foo<int>(), Foo<int>(4, 5, 6));
+ media::IntervalSet<Foo<int>> is;
+ is += i;
+ is += i;
+ is.Add(i);
+ is = is + i;
+ is = i + is;
+ EXPECT_EQ(1u, is.Length());
+ EXPECT_EQ(Foo<int>(), is[0].mStart);
+ EXPECT_EQ(Foo<int>(4, 5, 6), is[0].mEnd);
+}
+
+TEST(IntervalSet, StaticAssert)
+{
+ media::Interval<int> i;
+
+ static_assert(
+ std::is_same_v<nsTArray_RelocationStrategy<IntIntervals>::Type,
+ nsTArray_RelocateUsingMoveConstructor<IntIntervals>>,
+ "Must use copy constructor");
+ static_assert(
+ std::is_same_v<
+ nsTArray_RelocationStrategy<media::TimeIntervals>::Type,
+ nsTArray_RelocateUsingMoveConstructor<media::TimeIntervals>>,
+ "Must use copy constructor");
+}
+
+TEST(IntervalSet, Substraction)
+{
+ IntIntervals i0;
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+
+ IntInterval i1(8, 15);
+ i0 -= i1;
+
+ EXPECT_EQ(3u, i0.Length());
+ EXPECT_EQ(5, i0[0].mStart);
+ EXPECT_EQ(8, i0[0].mEnd);
+ EXPECT_EQ(20, i0[1].mStart);
+ EXPECT_EQ(25, i0[1].mEnd);
+ EXPECT_EQ(40, i0[2].mStart);
+ EXPECT_EQ(60, i0[2].mEnd);
+
+ i0 = IntIntervals();
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+ i1 = IntInterval(0, 60);
+ i0 -= i1;
+ EXPECT_TRUE(i0.IsEmpty());
+
+ i0 = IntIntervals();
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+ i1 = IntInterval(0, 45);
+ i0 -= i1;
+ EXPECT_EQ(1u, i0.Length());
+ EXPECT_EQ(45, i0[0].mStart);
+ EXPECT_EQ(60, i0[0].mEnd);
+
+ i0 = IntIntervals();
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+ i1 = IntInterval(8, 45);
+ i0 -= i1;
+ EXPECT_EQ(2u, i0.Length());
+ EXPECT_EQ(5, i0[0].mStart);
+ EXPECT_EQ(8, i0[0].mEnd);
+ EXPECT_EQ(45, i0[1].mStart);
+ EXPECT_EQ(60, i0[1].mEnd);
+
+ i0 = IntIntervals();
+ i0 += IntInterval(5, 10);
+ i0 += IntInterval(20, 25);
+ i0 += IntInterval(40, 60);
+ i1 = IntInterval(8, 70);
+ i0 -= i1;
+ EXPECT_EQ(1u, i0.Length());
+ EXPECT_EQ(5, i0[0].mStart);
+ EXPECT_EQ(8, i0[0].mEnd);
+
+ i0 = IntIntervals();
+ i0 += IntInterval(0, 10);
+ IntIntervals i2;
+ i2 += IntInterval(4, 6);
+ i0 -= i2;
+ EXPECT_EQ(2u, i0.Length());
+ EXPECT_EQ(0, i0[0].mStart);
+ EXPECT_EQ(4, i0[0].mEnd);
+ EXPECT_EQ(6, i0[1].mStart);
+ EXPECT_EQ(10, i0[1].mEnd);
+
+ i0 = IntIntervals();
+ i0 += IntInterval(0, 1);
+ i0 += IntInterval(3, 10);
+ EXPECT_EQ(2u, i0.Length());
+ // This fuzz should collapse i0 into [0,10).
+ i0.SetFuzz(1);
+ EXPECT_EQ(1u, i0.Length());
+ EXPECT_EQ(1, i0[0].mFuzz);
+ i2 = IntInterval(4, 6);
+ i0 -= i2;
+ EXPECT_EQ(2u, i0.Length());
+ EXPECT_EQ(0, i0[0].mStart);
+ EXPECT_EQ(4, i0[0].mEnd);
+ EXPECT_EQ(6, i0[1].mStart);
+ EXPECT_EQ(10, i0[1].mEnd);
+ EXPECT_EQ(1, i0[0].mFuzz);
+ EXPECT_EQ(1, i0[1].mFuzz);
+
+ i0 = IntIntervals();
+ i0 += IntInterval(0, 10);
+ // [4,6) with fuzz 1 used to fail because the complementary interval set
+ // [0,4)+[6,10) would collapse into [0,10).
+ i2 = IntInterval(4, 6);
+ i2.SetFuzz(1);
+ i0 -= i2;
+ EXPECT_EQ(2u, i0.Length());
+ EXPECT_EQ(0, i0[0].mStart);
+ EXPECT_EQ(4, i0[0].mEnd);
+ EXPECT_EQ(6, i0[1].mStart);
+ EXPECT_EQ(10, i0[1].mEnd);
+}
diff --git a/dom/media/gtest/TestKeyValueStorage.cpp b/dom/media/gtest/TestKeyValueStorage.cpp
new file mode 100644
index 0000000000..7ba65343e3
--- /dev/null
+++ b/dom/media/gtest/TestKeyValueStorage.cpp
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/KeyValueStorage.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest.h"
+
+#include "GMPTestMonitor.h"
+
+using ::testing::Return;
+using namespace mozilla;
+
+TEST(TestKeyValueStorage, BasicPutGet)
+{
+ auto kvs = MakeRefPtr<KeyValueStorage>();
+
+ nsCString name("database_name");
+ nsCString key("key1");
+ int32_t value = 100;
+
+ GMPTestMonitor mon;
+
+ kvs->Put(name, key, value)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [&](bool) { return kvs->Get(name, key); },
+ [](nsresult rv) {
+ EXPECT_TRUE(false) << "Put promise has been rejected";
+ return KeyValueStorage::GetPromise::CreateAndReject(rv, __func__);
+ })
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [&](int32_t aValue) {
+ EXPECT_EQ(aValue, value) << "Values are the same";
+ mon.SetFinished();
+ },
+ [&](nsresult rv) {
+ EXPECT_TRUE(false) << "Get Promise has been rejected";
+ mon.SetFinished();
+ });
+
+ mon.AwaitFinished();
+}
+
+TEST(TestKeyValueStorage, GetNonExistedKey)
+{
+ auto kvs = MakeRefPtr<KeyValueStorage>();
+
+ nsCString name("database_name");
+ nsCString key("NonExistedKey");
+
+ GMPTestMonitor mon;
+
+ kvs->Get(name, key)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [&mon](int32_t aValue) {
+ EXPECT_EQ(aValue, -1) << "When key does not exist return -1";
+ mon.SetFinished();
+ },
+ [&mon](nsresult rv) {
+ EXPECT_TRUE(false) << "Get Promise has been rejected";
+ mon.SetFinished();
+ });
+
+ mon.AwaitFinished();
+}
+
+TEST(TestKeyValueStorage, Clear)
+{
+ auto kvs = MakeRefPtr<KeyValueStorage>();
+
+ nsCString name("database_name");
+ nsCString key("key1");
+ int32_t value = 100;
+
+ GMPTestMonitor mon;
+
+ kvs->Put(name, key, value)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [&](bool) { return kvs->Clear(name); },
+ [](nsresult rv) {
+ EXPECT_TRUE(false) << "Put promise has been rejected";
+ return GenericPromise::CreateAndReject(rv, __func__);
+ })
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [&](bool) { return kvs->Get(name, key); },
+ [](nsresult rv) {
+ EXPECT_TRUE(false) << "Clear promise has been rejected";
+ return KeyValueStorage::GetPromise::CreateAndReject(rv, __func__);
+ })
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [&](int32_t aValue) {
+ EXPECT_EQ(aValue, -1) << "After clear the key does not exist";
+ mon.SetFinished();
+ },
+ [&](nsresult rv) {
+ EXPECT_TRUE(false) << "Get Promise has been rejected";
+ mon.SetFinished();
+ });
+
+ mon.AwaitFinished();
+}
diff --git a/dom/media/gtest/TestMP3Demuxer.cpp b/dom/media/gtest/TestMP3Demuxer.cpp
new file mode 100644
index 0000000000..f6c589b835
--- /dev/null
+++ b/dom/media/gtest/TestMP3Demuxer.cpp
@@ -0,0 +1,579 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <gtest/gtest.h>
+#include <vector>
+
+#include "MP3Demuxer.h"
+#include "mozilla/ArrayUtils.h"
+#include "MockMediaResource.h"
+
+class MockMP3MediaResource;
+class MockMP3StreamMediaResource;
+namespace mozilla {
+DDLoggedTypeNameAndBase(::MockMP3MediaResource, MockMediaResource);
+DDLoggedTypeNameAndBase(::MockMP3StreamMediaResource, MockMP3MediaResource);
+} // namespace mozilla
+
+using namespace mozilla;
+using media::TimeUnit;
+
+// Regular MP3 file mock resource.
+class MockMP3MediaResource
+ : public MockMediaResource,
+ public DecoderDoctorLifeLogger<MockMP3MediaResource> {
+ public:
+ explicit MockMP3MediaResource(const char* aFileName)
+ : MockMediaResource(aFileName) {}
+
+ protected:
+ virtual ~MockMP3MediaResource() = default;
+};
+
+// MP3 stream mock resource.
+class MockMP3StreamMediaResource
+ : public MockMP3MediaResource,
+ public DecoderDoctorLifeLogger<MockMP3StreamMediaResource> {
+ public:
+ explicit MockMP3StreamMediaResource(const char* aFileName)
+ : MockMP3MediaResource(aFileName) {}
+
+ int64_t GetLength() override { return -1; }
+
+ protected:
+ virtual ~MockMP3StreamMediaResource() = default;
+};
+
+struct MP3Resource {
+ enum class HeaderType { NONE, XING, VBRI };
+ struct Duration {
+ int64_t mMicroseconds;
+ float mTolerableRate;
+
+ Duration(int64_t aMicroseconds, float aTolerableRate)
+ : mMicroseconds(aMicroseconds), mTolerableRate(aTolerableRate) {}
+ int64_t Tolerance() const {
+ return AssertedCast<int64_t>(mTolerableRate *
+ static_cast<float>(mMicroseconds));
+ }
+ };
+
+ const char* mFilePath{};
+ bool mIsVBR{};
+ HeaderType mHeaderType{HeaderType::NONE};
+ int64_t mFileSize{};
+ uint32_t mMPEGLayer{};
+ uint32_t mMPEGVersion{};
+ uint8_t mID3MajorVersion{};
+ uint8_t mID3MinorVersion{};
+ uint8_t mID3Flags{};
+ uint32_t mID3Size{};
+
+ Maybe<Duration> mDuration;
+ float mSeekError{};
+ uint32_t mSampleRate{};
+ uint32_t mSamplesPerFrame{};
+ uint32_t mNumSamples{};
+ uint32_t mPadding{};
+ uint32_t mEncoderDelay{};
+ uint32_t mBitrate{};
+ uint32_t mSlotSize{};
+ int32_t mPrivate{};
+
+ // The first n frame offsets.
+ std::vector<int32_t> mSyncOffsets;
+ RefPtr<MockMP3MediaResource> mResource;
+ RefPtr<MP3TrackDemuxer> mDemuxer;
+};
+
+class MP3DemuxerTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ {
+ MP3Resource res;
+ res.mFilePath = "noise.mp3";
+ res.mIsVBR = false;
+ res.mHeaderType = MP3Resource::HeaderType::NONE;
+ res.mFileSize = 965257;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 3;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 2141;
+ // The tolerance comes from the fact that this file has ID3v1 information
+ // at the end, this trips our CBR duration calculation. The file has
+ // however the correct duration when decoded / demuxed completely.
+ res.mDuration = Some(MP3Resource::Duration{30093063, 0.00015f});
+ res.mSeekError = 0.02f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 1327104;
+ res.mPadding = 0;
+ res.mEncoderDelay = 0;
+ res.mBitrate = 256000;
+ res.mSlotSize = 1;
+ res.mPrivate = 0;
+ const int syncs[] = {2151, 2987, 3823, 4659, 5495, 6331};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 6);
+
+ // No content length can be estimated for CBR stream resources.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+ streamRes.mDuration = Nothing();
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ {
+ MP3Resource res;
+ // This file trips up the MP3 demuxer if ID3v2 tags aren't properly
+ // skipped. If skipping is not properly implemented, depending on the
+ // strictness of the MPEG frame parser a false sync will be detected
+ // somewhere within the metadata at or after 112087, or failing that, at
+ // the artificially added extraneous header at 114532.
+ res.mFilePath = "id3v2header.mp3";
+ res.mIsVBR = false;
+ res.mHeaderType = MP3Resource::HeaderType::NONE;
+ res.mFileSize = 191302;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 3;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 115304;
+ // The tolerance comes from the fact that this file has ID3v1 information
+ // at the end, this trips our CBR duration calculation. The file has
+ // however the correct duration when decoded / demuxed completely.
+ res.mDuration = Some(MP3Resource::Duration{3160833, 0.0017f});
+ res.mSeekError = 0.02f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 139392;
+ res.mPadding = 0;
+ res.mEncoderDelay = 0;
+ res.mBitrate = 192000;
+ res.mSlotSize = 1;
+ res.mPrivate = 1;
+ const int syncs[] = {115314, 115941, 116568, 117195, 117822, 118449};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 6);
+
+ // No content length can be estimated for CBR stream resources.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+ streamRes.mDuration = Nothing();
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ {
+ MP3Resource res;
+ res.mFilePath = "noise_vbr.mp3";
+ res.mIsVBR = true;
+ res.mHeaderType = MP3Resource::HeaderType::XING;
+ res.mFileSize = 583679;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 3;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 2221;
+ res.mDuration = Some(MP3Resource::Duration{30081065, 0.f});
+ res.mSeekError = 0.02f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 1326575;
+ res.mPadding = 576;
+ res.mEncoderDelay = 2257;
+ res.mBitrate = 154000;
+ res.mSlotSize = 1;
+ res.mPrivate = 0;
+ const int syncs[] = {2231, 2648, 2752, 3796, 4318, 4735};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 6);
+
+ // VBR stream resources contain header info on total frames numbers, which
+ // is used to estimate the total duration.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ {
+ MP3Resource res;
+ res.mFilePath = "small-shot.mp3";
+ res.mIsVBR = true;
+ res.mHeaderType = MP3Resource::HeaderType::XING;
+ res.mFileSize = 6825;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 4;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 24;
+ res.mDuration = Some(MP3Resource::Duration{301473, 0.f});
+ res.mSeekError = 0.2f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 12;
+ res.mPadding = 0;
+ res.mEncoderDelay = 1152 + 529;
+ res.mBitrate = 256000;
+ res.mSlotSize = 1;
+ res.mPrivate = 0;
+ const int syncs[] = {34, 556, 1078, 1601, 2123, 2646, 3168,
+ 3691, 4213, 4736, 5258, 5781, 6303};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 13);
+
+ // No content length can be estimated for CBR stream resources.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ {
+ MP3Resource res;
+ // This file contains a false frame sync at 34, just after the ID3 tag,
+ // which should be identified as a false positive and skipped.
+ res.mFilePath = "small-shot-false-positive.mp3";
+ res.mIsVBR = true;
+ res.mHeaderType = MP3Resource::HeaderType::XING;
+ res.mFileSize = 6845;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 4;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 24;
+ res.mDuration = Some(MP3Resource::Duration{301473, 0.f});
+ res.mSeekError = 0.2f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 12;
+ res.mPadding = 0;
+ res.mEncoderDelay = 1681;
+ res.mBitrate = 256000;
+ res.mSlotSize = 1;
+ res.mPrivate = 0;
+ const int syncs[] = {54, 576, 1098, 1621, 2143, 2666, 3188,
+ 3711, 4233, 4756, 5278, 5801, 6323};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 13);
+
+ // No content length can be estimated for CBR stream resources.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ {
+ MP3Resource res;
+ res.mFilePath = "small-shot-partial-xing.mp3";
+ res.mIsVBR = true;
+ res.mHeaderType = MP3Resource::HeaderType::XING;
+ res.mFileSize = 6825;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 4;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 24;
+ res.mDuration = Some(MP3Resource::Duration{301473, 0.f});
+ res.mSeekError = 0.2f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 12;
+ res.mPadding = 0;
+ res.mEncoderDelay = 1681;
+ res.mBitrate = 256000;
+ res.mSlotSize = 1;
+ res.mPrivate = 0;
+ const int syncs[] = {34, 556, 1078, 1601, 2123, 2646, 3168,
+ 3691, 4213, 4736, 5258, 5781, 6303};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 13);
+
+ // No content length can be estimated for CBR stream resources.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ {
+ MP3Resource res;
+ res.mFilePath = "test_vbri.mp3";
+ res.mIsVBR = true;
+ res.mHeaderType = MP3Resource::HeaderType::VBRI;
+ res.mFileSize = 16519;
+ res.mMPEGLayer = 3;
+ res.mMPEGVersion = 1;
+ res.mID3MajorVersion = 3;
+ res.mID3MinorVersion = 0;
+ res.mID3Flags = 0;
+ res.mID3Size = 4202;
+ res.mDuration = Some(MP3Resource::Duration{731428, 0.f});
+ res.mSeekError = 0.02f;
+ res.mSampleRate = 44100;
+ res.mSamplesPerFrame = 1152;
+ res.mNumSamples = 29;
+ res.mPadding = 0;
+ res.mEncoderDelay = 1152;
+ res.mBitrate = 0;
+ res.mSlotSize = 1;
+ res.mPrivate = 0;
+ const int syncs[] = {4212, 4734, 5047, 5464, 5986, 6403};
+ res.mSyncOffsets.insert(res.mSyncOffsets.begin(), syncs, syncs + 6);
+
+ // VBR stream resources contain header info on total frames numbers, which
+ // is used to estimate the total duration.
+ MP3Resource streamRes = res;
+ streamRes.mFileSize = -1;
+
+ res.mResource = new MockMP3MediaResource(res.mFilePath);
+ res.mDemuxer = new MP3TrackDemuxer(res.mResource);
+ mTargets.push_back(res);
+
+ streamRes.mResource = new MockMP3StreamMediaResource(streamRes.mFilePath);
+ streamRes.mDemuxer = new MP3TrackDemuxer(streamRes.mResource);
+ mTargets.push_back(streamRes);
+ }
+
+ for (auto& target : mTargets) {
+ ASSERT_EQ(NS_OK, target.mResource->Open());
+ ASSERT_TRUE(target.mDemuxer->Init());
+ }
+ }
+
+ std::vector<MP3Resource> mTargets;
+};
+
+TEST_F(MP3DemuxerTest, ID3Tags) {
+ for (const auto& target : mTargets) {
+ RefPtr<MediaRawData> frame(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frame);
+
+ const auto& id3 = target.mDemuxer->ID3Header();
+ ASSERT_TRUE(id3.IsValid());
+
+ EXPECT_EQ(target.mID3MajorVersion, id3.MajorVersion());
+ EXPECT_EQ(target.mID3MinorVersion, id3.MinorVersion());
+ EXPECT_EQ(target.mID3Flags, id3.Flags());
+ EXPECT_EQ(target.mID3Size, id3.Size());
+ }
+}
+
+TEST_F(MP3DemuxerTest, VBRHeader) {
+ for (const auto& target : mTargets) {
+ RefPtr<MediaRawData> frame(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frame);
+
+ const auto& vbr = target.mDemuxer->VBRInfo();
+
+ if (target.mHeaderType == MP3Resource::HeaderType::XING) {
+ EXPECT_EQ(FrameParser::VBRHeader::XING, vbr.Type());
+ } else if (target.mHeaderType == MP3Resource::HeaderType::VBRI) {
+ EXPECT_TRUE(target.mIsVBR);
+ EXPECT_EQ(FrameParser::VBRHeader::VBRI, vbr.Type());
+ } else { // MP3Resource::HeaderType::NONE
+ EXPECT_EQ(FrameParser::VBRHeader::NONE, vbr.Type());
+ EXPECT_FALSE(vbr.NumAudioFrames());
+ }
+ }
+}
+
+TEST_F(MP3DemuxerTest, FrameParsing) {
+ for (const auto& target : mTargets) {
+ printf("Testing: %s\n", target.mFilePath);
+ RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frameData);
+ EXPECT_EQ(target.mFileSize, target.mDemuxer->StreamLength());
+
+ const auto& id3 = target.mDemuxer->ID3Header();
+ ASSERT_TRUE(id3.IsValid());
+
+ int64_t parsedLength = id3.Size();
+ uint64_t bitrateSum = 0;
+ uint32_t numFrames = 0;
+ uint32_t numSamples = 0;
+
+ while (frameData) {
+ if (static_cast<int64_t>(target.mSyncOffsets.size()) > numFrames) {
+ // Test sync offsets.
+ EXPECT_EQ(target.mSyncOffsets[numFrames], frameData->mOffset);
+ }
+
+ ++numFrames;
+ parsedLength += AssertedCast<int64_t>(frameData->Size());
+
+ const auto& frame = target.mDemuxer->LastFrame();
+ const auto& header = frame.Header();
+ ASSERT_TRUE(header.IsValid());
+
+ numSamples += header.SamplesPerFrame();
+
+ EXPECT_EQ(target.mMPEGLayer, header.Layer());
+ EXPECT_EQ(target.mSampleRate, header.SampleRate());
+ EXPECT_EQ(target.mSamplesPerFrame, header.SamplesPerFrame());
+ EXPECT_EQ(target.mSlotSize, header.SlotSize());
+ EXPECT_EQ(target.mPrivate, header.Private());
+
+ if (target.mIsVBR) {
+ // Used to compute the average bitrate for VBR streams.
+ bitrateSum += target.mBitrate;
+ } else {
+ EXPECT_EQ(target.mBitrate, header.Bitrate());
+ }
+
+ frameData = target.mDemuxer->DemuxSample();
+ }
+
+ EXPECT_EQ(target.mPadding, target.mDemuxer->PaddingFrames());
+ EXPECT_EQ(target.mEncoderDelay, target.mDemuxer->EncoderDelayFrames());
+ EXPECT_GE(numSamples, 0u);
+
+ // There may be trailing headers which we don't parse, so the stream length
+ // is the upper bound.
+ if (target.mFileSize > 0) {
+ EXPECT_GE(target.mFileSize, parsedLength);
+ }
+
+ if (target.mIsVBR) {
+ ASSERT_TRUE(numFrames);
+ EXPECT_EQ(target.mBitrate, bitrateSum / numFrames);
+ }
+ }
+}
+
+TEST_F(MP3DemuxerTest, Duration) {
+ for (const auto& target : mTargets) {
+ printf("Testing: %s\n", target.mFilePath);
+ RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frameData);
+ EXPECT_EQ(target.mFileSize, target.mDemuxer->StreamLength());
+
+ while (frameData) {
+ if (target.mDuration) {
+ ASSERT_TRUE(target.mDemuxer->Duration());
+ EXPECT_NEAR(target.mDuration->mMicroseconds,
+ target.mDemuxer->Duration()->ToMicroseconds(),
+ target.mDuration->Tolerance());
+ } else {
+ EXPECT_FALSE(target.mDemuxer->Duration());
+ }
+ frameData = target.mDemuxer->DemuxSample();
+ }
+ if (target.mDuration) {
+ // At the end, the durations should always be exact.
+ EXPECT_EQ(target.mDuration->mMicroseconds,
+ target.mDemuxer->Duration()->ToMicroseconds());
+ }
+ }
+
+ // Seek out of range tests.
+ for (const auto& target : mTargets) {
+ printf("Testing %s\n", target.mFilePath);
+ // Skip tests for stream media resources because of lacking duration.
+ if (target.mFileSize <= 0) {
+ continue;
+ }
+
+ target.mDemuxer->Reset();
+ RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frameData);
+
+ ASSERT_TRUE(target.mDemuxer->Duration());
+ const auto duration = target.mDemuxer->Duration().value();
+ const auto pos = duration + TimeUnit::FromMicroseconds(1e6);
+
+ // Attempt to seek 1 second past the end of stream.
+ target.mDemuxer->Seek(pos);
+ // The seek should bring us to the end of the stream.
+ EXPECT_NEAR(duration.ToMicroseconds(),
+ target.mDemuxer->SeekPosition().ToMicroseconds(),
+ target.mSeekError * duration.ToMicroseconds());
+
+ // Since we're at the end of the stream, there should be no frames left.
+ frameData = target.mDemuxer->DemuxSample();
+ ASSERT_FALSE(frameData);
+ }
+}
+
+TEST_F(MP3DemuxerTest, Seek) {
+ for (const auto& target : mTargets) {
+ RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frameData);
+
+ const auto seekTime = TimeUnit::FromSeconds(1);
+ auto pos = target.mDemuxer->SeekPosition();
+
+ while (frameData) {
+ EXPECT_NEAR(pos.ToMicroseconds(),
+ target.mDemuxer->SeekPosition().ToMicroseconds(),
+ target.mSeekError * pos.ToMicroseconds());
+
+ pos += seekTime;
+ target.mDemuxer->Seek(pos);
+ frameData = target.mDemuxer->DemuxSample();
+ }
+ }
+
+ // Seeking should work with in-between resets, too.
+ for (const auto& target : mTargets) {
+ target.mDemuxer->Reset();
+ RefPtr<MediaRawData> frameData(target.mDemuxer->DemuxSample());
+ ASSERT_TRUE(frameData);
+
+ const auto seekTime = TimeUnit::FromSeconds(1);
+ auto pos = target.mDemuxer->SeekPosition();
+
+ while (frameData) {
+ EXPECT_NEAR(pos.ToMicroseconds(),
+ target.mDemuxer->SeekPosition().ToMicroseconds(),
+ target.mSeekError * pos.ToMicroseconds());
+
+ pos += seekTime;
+ target.mDemuxer->Reset();
+ target.mDemuxer->Seek(pos);
+ frameData = target.mDemuxer->DemuxSample();
+ }
+ }
+}
diff --git a/dom/media/gtest/TestMP4Demuxer.cpp b/dom/media/gtest/TestMP4Demuxer.cpp
new file mode 100644
index 0000000000..43dfdf19a4
--- /dev/null
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -0,0 +1,613 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "MP4Demuxer.h"
+#include "mozilla/MozPromise.h"
+#include "MediaDataDemuxer.h"
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/TaskQueue.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Unused.h"
+#include "MockMediaResource.h"
+#include "VideoUtils.h"
+
+using namespace mozilla;
+using media::TimeUnit;
+
+#define DO_FAIL \
+ [binding]() -> void { \
+ EXPECT_TRUE(false); \
+ binding->mTaskQueue->BeginShutdown(); \
+ }
+
+class MP4DemuxerBinding {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MP4DemuxerBinding);
+
+ RefPtr<MockMediaResource> resource;
+ RefPtr<MP4Demuxer> mDemuxer;
+ RefPtr<TaskQueue> mTaskQueue;
+ RefPtr<MediaTrackDemuxer> mAudioTrack;
+ RefPtr<MediaTrackDemuxer> mVideoTrack;
+ uint32_t mIndex;
+ nsTArray<RefPtr<MediaRawData>> mSamples;
+ nsTArray<int64_t> mKeyFrameTimecodes;
+ MozPromiseHolder<GenericPromise> mCheckTrackKeyFramePromise;
+ MozPromiseHolder<GenericPromise> mCheckTrackSamples;
+
+ explicit MP4DemuxerBinding(const char* aFileName = "dash_dashinit.mp4")
+ : resource(new MockMediaResource(aFileName)),
+ mDemuxer(new MP4Demuxer(resource)),
+ mTaskQueue(TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::SUPERVISOR), "TestMP4Demuxer")),
+ mIndex(0) {
+ EXPECT_EQ(NS_OK, resource->Open());
+ }
+
+ template <typename Function>
+ void RunTestAndWait(const Function& aFunction) {
+ Function func(aFunction);
+ RefPtr<MP4DemuxerBinding> binding = this;
+ mDemuxer->Init()->Then(mTaskQueue, __func__, std::move(func), DO_FAIL);
+ mTaskQueue->AwaitShutdownAndIdle();
+ }
+
+ RefPtr<GenericPromise> CheckTrackKeyFrame(MediaTrackDemuxer* aTrackDemuxer) {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
+ RefPtr<MP4DemuxerBinding> binding = this;
+
+ auto time = TimeUnit::Invalid();
+ while (mIndex < mSamples.Length()) {
+ uint32_t i = mIndex++;
+ if (mSamples[i]->mKeyframe) {
+ time = mSamples[i]->mTime;
+ break;
+ }
+ }
+
+ RefPtr<GenericPromise> p = mCheckTrackKeyFramePromise.Ensure(__func__);
+
+ if (!time.IsValid()) {
+ mCheckTrackKeyFramePromise.Resolve(true, __func__);
+ return p;
+ }
+
+ DispatchTask([track, time, binding]() {
+ track->Seek(time)->Then(
+ binding->mTaskQueue, __func__,
+ [track, time, binding]() {
+ track->GetSamples()->Then(
+ binding->mTaskQueue, __func__,
+ [track, time,
+ binding](RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
+ EXPECT_EQ(time, aSamples->GetSamples()[0]->mTime);
+ binding->CheckTrackKeyFrame(track);
+ },
+ DO_FAIL);
+ },
+ DO_FAIL);
+ });
+
+ return p;
+ }
+
+ RefPtr<GenericPromise> CheckTrackSamples(MediaTrackDemuxer* aTrackDemuxer) {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
+ RefPtr<MP4DemuxerBinding> binding = this;
+
+ RefPtr<GenericPromise> p = mCheckTrackSamples.Ensure(__func__);
+
+ DispatchTask([track, binding]() {
+ track->GetSamples()->Then(
+ binding->mTaskQueue, __func__,
+ [track, binding](RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
+ if (aSamples->GetSamples().Length()) {
+ binding->mSamples.AppendElements(aSamples->GetSamples());
+ binding->CheckTrackSamples(track);
+ }
+ },
+ [binding](const MediaResult& aError) {
+ if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+ EXPECT_TRUE(binding->mSamples.Length() > 1);
+ for (uint32_t i = 0; i < (binding->mSamples.Length() - 1); i++) {
+ EXPECT_LT(binding->mSamples[i]->mTimecode,
+ binding->mSamples[i + 1]->mTimecode);
+ if (binding->mSamples[i]->mKeyframe) {
+ binding->mKeyFrameTimecodes.AppendElement(
+ binding->mSamples[i]->mTimecode.ToMicroseconds());
+ }
+ }
+ binding->mCheckTrackSamples.Resolve(true, __func__);
+ } else {
+ EXPECT_TRUE(false);
+ binding->mCheckTrackSamples.Reject(aError, __func__);
+ }
+ });
+ });
+
+ return p;
+ }
+
+ private:
+ template <typename FunctionType>
+ void DispatchTask(FunctionType aFun) {
+ RefPtr<Runnable> r =
+ NS_NewRunnableFunction("MP4DemuxerBinding::DispatchTask", aFun);
+ Unused << mTaskQueue->Dispatch(r.forget());
+ }
+
+ virtual ~MP4DemuxerBinding() = default;
+};
+
+TEST(MP4Demuxer, Seek)
+{
+ RefPtr<MP4DemuxerBinding> binding = new MP4DemuxerBinding();
+
+ binding->RunTestAndWait([binding]() {
+ binding->mVideoTrack =
+ binding->mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
+ binding->CheckTrackSamples(binding->mVideoTrack)
+ ->Then(
+ binding->mTaskQueue, __func__,
+ [binding]() {
+ binding->CheckTrackKeyFrame(binding->mVideoTrack)
+ ->Then(
+ binding->mTaskQueue, __func__,
+ [binding]() { binding->mTaskQueue->BeginShutdown(); },
+ DO_FAIL);
+ },
+ DO_FAIL);
+ });
+}
+
+static nsCString ToCryptoString(const CryptoSample& aCrypto) {
+ nsCString res;
+ if (aCrypto.IsEncrypted()) {
+ res.AppendPrintf("%d ", aCrypto.mIVSize);
+ for (size_t i = 0; i < aCrypto.mKeyId.Length(); i++) {
+ res.AppendPrintf("%02x", aCrypto.mKeyId[i]);
+ }
+ res.AppendLiteral(" ");
+ for (size_t i = 0; i < aCrypto.mIV.Length(); i++) {
+ res.AppendPrintf("%02x", aCrypto.mIV[i]);
+ }
+ EXPECT_EQ(aCrypto.mPlainSizes.Length(), aCrypto.mEncryptedSizes.Length());
+ for (size_t i = 0; i < aCrypto.mPlainSizes.Length(); i++) {
+ res.AppendPrintf(" %d,%d", aCrypto.mPlainSizes[i],
+ aCrypto.mEncryptedSizes[i]);
+ }
+ } else {
+ res.AppendLiteral("no crypto");
+ }
+ return res;
+}
+
+TEST(MP4Demuxer, CENCFragVideo)
+{
+ const char* video[] = {
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000000 "
+ "5,684 5,16980",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000450 "
+ "5,1826",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000004c3 "
+ "5,1215",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000050f "
+ "5,1302",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000561 "
+ "5,939",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000059c "
+ "5,763",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000005cc "
+ "5,672",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000005f6 "
+ "5,748",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000625 "
+ "5,1025",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000666 "
+ "5,730",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000694 "
+ "5,897",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000006cd "
+ "5,643",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000006f6 "
+ "5,556",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000719 "
+ "5,527",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000073a "
+ "5,606",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000760 "
+ "5,701",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000078c "
+ "5,531",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000007ae "
+ "5,562",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000007d2 "
+ "5,576",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000007f6 "
+ "5,514",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000817 "
+ "5,404",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000831 "
+ "5,635",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000859 "
+ "5,433",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000875 "
+ "5,478",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000893 "
+ "5,474",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000008b1 "
+ "5,462",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000008ce "
+ "5,473",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000008ec "
+ "5,437",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000908 "
+ "5,418",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000923 "
+ "5,475",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000941 "
+ "5,23133",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000ee7 "
+ "5,475",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f05 "
+ "5,402",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f1f "
+ "5,415",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f39 "
+ "5,408",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f53 "
+ "5,442",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f6f "
+ "5,385",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f88 "
+ "5,368",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000f9f "
+ "5,354",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000fb6 "
+ "5,400",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000fcf "
+ "5,399",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000000fe8 "
+ "5,1098",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000102d "
+ "5,1508",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000108c "
+ "5,1345",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000010e1 "
+ "5,1945",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000115b "
+ "5,1824",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000011cd "
+ "5,2133",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000001253 "
+ "5,2486",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000012ef "
+ "5,1739",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000135c "
+ "5,1836",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000013cf "
+ "5,2367",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000001463 "
+ "5,2571",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000001504 "
+ "5,3008",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000015c0 "
+ "5,3255",
+ "16 7e571d037e571d037e571d037e571d03 0000000000000000000000000000168c "
+ "5,3225",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000001756 "
+ "5,3118",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000001819 "
+ "5,2407",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000018b0 "
+ "5,2400",
+ "16 7e571d037e571d037e571d037e571d03 00000000000000000000000000001946 "
+ "5,2158",
+ "16 7e571d037e571d037e571d037e571d03 000000000000000000000000000019cd "
+ "5,2392",
+ };
+
+ RefPtr<MP4DemuxerBinding> binding = new MP4DemuxerBinding("gizmo-frag.mp4");
+
+ binding->RunTestAndWait([binding, video]() {
+ // grab all video samples.
+ binding->mVideoTrack =
+ binding->mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
+ binding->CheckTrackSamples(binding->mVideoTrack)
+ ->Then(
+ binding->mTaskQueue, __func__,
+ [binding, video]() {
+ for (uint32_t i = 0; i < binding->mSamples.Length(); i++) {
+ nsCString text = ToCryptoString(binding->mSamples[i]->mCrypto);
+ EXPECT_STREQ(video[i++], text.get());
+ }
+ EXPECT_EQ(ArrayLength(video), binding->mSamples.Length());
+ binding->mTaskQueue->BeginShutdown();
+ },
+ DO_FAIL);
+ });
+}
+
+TEST(MP4Demuxer, CENCFragAudio)
+{
+ const char* audio[] = {
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000000 "
+ "0,281",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000012 "
+ "0,257",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000023 "
+ "0,246",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000033 "
+ "0,257",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000044 "
+ "0,260",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000055 "
+ "0,260",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000066 "
+ "0,272",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000077 "
+ "0,280",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000089 "
+ "0,284",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000009b "
+ "0,290",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000000ae "
+ "0,278",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000000c0 "
+ "0,268",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000000d1 "
+ "0,307",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000000e5 "
+ "0,290",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000000f8 "
+ "0,304",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000010b "
+ "0,316",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000011f "
+ "0,308",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000133 "
+ "0,301",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000146 "
+ "0,318",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000015a "
+ "0,311",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000016e "
+ "0,303",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000181 "
+ "0,325",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000196 "
+ "0,334",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000001ab "
+ "0,344",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000001c1 "
+ "0,344",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000001d7 "
+ "0,387",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000001f0 "
+ "0,396",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000209 "
+ "0,368",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000220 "
+ "0,373",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000238 "
+ "0,425",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000253 "
+ "0,428",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000026e "
+ "0,426",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000289 "
+ "0,427",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000002a4 "
+ "0,424",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000002bf "
+ "0,447",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000002db "
+ "0,446",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000002f7 "
+ "0,442",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000313 "
+ "0,444",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000032f "
+ "0,374",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000347 "
+ "0,405",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000361 "
+ "0,372",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000379 "
+ "0,395",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000392 "
+ "0,435",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000003ae "
+ "0,426",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000003c9 "
+ "0,430",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000003e4 "
+ "0,390",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000003fd "
+ "0,335",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000412 "
+ "0,339",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000428 "
+ "0,352",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000043e "
+ "0,364",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000455 "
+ "0,398",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000046e "
+ "0,451",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000048b "
+ "0,448",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000004a7 "
+ "0,436",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000004c3 "
+ "0,424",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000004de "
+ "0,428",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000004f9 "
+ "0,413",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000513 "
+ "0,430",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000052e "
+ "0,450",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000054b "
+ "0,386",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000564 "
+ "0,320",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000578 "
+ "0,347",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000058e "
+ "0,382",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000005a6 "
+ "0,437",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000005c2 "
+ "0,387",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000005db "
+ "0,340",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000005f1 "
+ "0,337",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000607 "
+ "0,389",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000620 "
+ "0,428",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000063b "
+ "0,426",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000656 "
+ "0,446",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000672 "
+ "0,456",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000068f "
+ "0,468",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000006ad "
+ "0,468",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000006cb "
+ "0,463",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000006e8 "
+ "0,467",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000706 "
+ "0,460",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000723 "
+ "0,446",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000073f "
+ "0,453",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000075c "
+ "0,448",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000778 "
+ "0,446",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000794 "
+ "0,439",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000007b0 "
+ "0,436",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000007cc "
+ "0,441",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000007e8 "
+ "0,465",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000806 "
+ "0,448",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000822 "
+ "0,448",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000083e "
+ "0,469",
+ "16 7e571d047e571d047e571d047e571d04 0000000000000000000000000000085c "
+ "0,431",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000877 "
+ "0,437",
+ "16 7e571d047e571d047e571d047e571d04 00000000000000000000000000000893 "
+ "0,474",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000008b1 "
+ "0,436",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000008cd "
+ "0,433",
+ "16 7e571d047e571d047e571d047e571d04 000000000000000000000000000008e9 "
+ "0,481",
+ };
+
+ RefPtr<MP4DemuxerBinding> binding = new MP4DemuxerBinding("gizmo-frag.mp4");
+
+ binding->RunTestAndWait([binding, audio]() {
+ // grab all audio samples.
+ binding->mAudioTrack =
+ binding->mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0);
+ binding->CheckTrackSamples(binding->mAudioTrack)
+ ->Then(
+ binding->mTaskQueue, __func__,
+ [binding, audio]() {
+ EXPECT_TRUE(binding->mSamples.Length() > 1);
+ for (uint32_t i = 0; i < binding->mSamples.Length(); i++) {
+ nsCString text = ToCryptoString(binding->mSamples[i]->mCrypto);
+ EXPECT_STREQ(audio[i++], text.get());
+ }
+ EXPECT_EQ(ArrayLength(audio), binding->mSamples.Length());
+ binding->mTaskQueue->BeginShutdown();
+ },
+ DO_FAIL);
+ });
+}
+
+TEST(MP4Demuxer, GetNextKeyframe)
+{
+ RefPtr<MP4DemuxerBinding> binding = new MP4DemuxerBinding("gizmo-frag.mp4");
+
+ binding->RunTestAndWait([binding]() {
+ // Insert a [0,end] buffered range, to simulate Moof's being buffered
+ // via MSE.
+ auto len = binding->resource->GetLength();
+ binding->resource->MockAddBufferedRange(0, len);
+
+ // gizmp-frag has two keyframes; one at dts=cts=0, and another at
+ // dts=cts=1000000. Verify we get expected results.
+ TimeUnit time;
+ binding->mVideoTrack =
+ binding->mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
+ binding->mVideoTrack->Reset();
+ binding->mVideoTrack->GetNextRandomAccessPoint(&time);
+ EXPECT_EQ(time.ToMicroseconds(), 0);
+ binding->mVideoTrack->GetSamples()->Then(
+ binding->mTaskQueue, __func__,
+ [binding]() {
+ TimeUnit time;
+ binding->mVideoTrack->GetNextRandomAccessPoint(&time);
+ EXPECT_EQ(time.ToMicroseconds(), 1000000);
+ binding->mTaskQueue->BeginShutdown();
+ },
+ DO_FAIL);
+ });
+}
+
+TEST(MP4Demuxer, ZeroInLastMoov)
+{
+ RefPtr<MP4DemuxerBinding> binding =
+ new MP4DemuxerBinding("short-zero-in-moov.mp4");
+ binding->RunTestAndWait([binding]() {
+ // It demuxes without error. That is sufficient.
+ binding->mTaskQueue->BeginShutdown();
+ });
+}
+
+TEST(MP4Demuxer, ZeroInMoovQuickTime)
+{
+ RefPtr<MP4DemuxerBinding> binding =
+ new MP4DemuxerBinding("short-zero-inband.mov");
+ binding->RunTestAndWait([binding]() {
+ // It demuxes without error. That is sufficient.
+ binding->mTaskQueue->BeginShutdown();
+ });
+}
+
+TEST(MP4Demuxer, IgnoreMinus1Duration)
+{
+ RefPtr<MP4DemuxerBinding> binding =
+ new MP4DemuxerBinding("negative_duration.mp4");
+ binding->RunTestAndWait([binding]() {
+ // It demuxes without error. That is sufficient.
+ binding->mTaskQueue->BeginShutdown();
+ });
+}
+
+#undef DO_FAIL
diff --git a/dom/media/gtest/TestMediaCodecsSupport.cpp b/dom/media/gtest/TestMediaCodecsSupport.cpp
new file mode 100644
index 0000000000..193e79c937
--- /dev/null
+++ b/dom/media/gtest/TestMediaCodecsSupport.cpp
@@ -0,0 +1,239 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "MediaCodecsSupport.h"
+
+using namespace mozilla;
+using namespace media;
+
+TEST(MediaCodecsSupport, BasicDecodeSupportSet)
+{
+ DecodeSupportSet support{};
+ EXPECT_TRUE(support != DecodeSupport::SoftwareDecode);
+ EXPECT_TRUE(support != DecodeSupport::HardwareDecode);
+ EXPECT_TRUE(!support.contains(DecodeSupport::SoftwareDecode));
+ EXPECT_TRUE(!support.contains(DecodeSupport::HardwareDecode));
+ EXPECT_TRUE(support.isEmpty());
+
+ support += DecodeSupport::SoftwareDecode;
+ EXPECT_TRUE(support == DecodeSupport::SoftwareDecode);
+ EXPECT_TRUE(support != DecodeSupport::HardwareDecode);
+ EXPECT_TRUE(support.contains(DecodeSupport::SoftwareDecode));
+ EXPECT_TRUE(!support.contains(DecodeSupport::HardwareDecode));
+ EXPECT_TRUE(!support.isEmpty());
+
+ support += DecodeSupport::HardwareDecode;
+ EXPECT_TRUE(support != DecodeSupport::SoftwareDecode);
+ EXPECT_TRUE(support != DecodeSupport::HardwareDecode);
+ EXPECT_TRUE(support.contains(DecodeSupport::SoftwareDecode));
+ EXPECT_TRUE(support.contains(DecodeSupport::HardwareDecode));
+ EXPECT_TRUE(!support.isEmpty());
+}
+
+// Test MCSInfo::GetDecodeSupportSet function.
+// This function is used to retrieve SW/HW support information for a
+// given codec from a MediaCodecsSupported EnumSet.
+// We validate that SW, HW, SW+HW, or lack of support information is
+// properly returned.
+TEST(MediaCodecsSupport, GetDecodeSupportSet)
+{
+ // Mock VP8 SW support, VP9 HW support, H264 SW+HW support
+ MediaCodecsSupported supported{MediaCodecsSupport::VP8SoftwareDecode,
+ MediaCodecsSupport::VP9HardwareDecode,
+ MediaCodecsSupport::H264SoftwareDecode,
+ MediaCodecsSupport::H264HardwareDecode};
+
+ MediaCodec codec; // Codec used to generate + filter results
+ DecodeSupportSet RV; // Return value to check for validity
+
+ // Check only SW support returned for VP8
+ codec = MediaCodec::VP8;
+ RV = MCSInfo::GetDecodeSupportSet(codec, supported);
+ EXPECT_TRUE(RV.contains(DecodeSupport::SoftwareDecode));
+ EXPECT_TRUE(RV.size() == 1);
+
+ // Check only HW support returned for VP9
+ codec = MediaCodec::VP9;
+ RV = MCSInfo::GetDecodeSupportSet(codec, supported);
+ EXPECT_TRUE(RV.contains(DecodeSupport::HardwareDecode));
+ EXPECT_TRUE(RV.size() == 1);
+
+ // Check for both SW/HW support returned for H264
+ codec = MediaCodec::H264;
+ RV = MCSInfo::GetDecodeSupportSet(codec, supported);
+ EXPECT_TRUE(RV.contains(DecodeSupport::SoftwareDecode));
+ EXPECT_TRUE(RV.contains(DecodeSupport::HardwareDecode));
+ EXPECT_TRUE(RV.size() == 2);
+
+ // Check empty return if codec not in list of codecs
+ codec = MediaCodec::AV1;
+ RV = MCSInfo::GetDecodeSupportSet(codec, supported);
+ EXPECT_TRUE(RV.size() == 0);
+}
+
+// Test MCSInfo::GetDecodeMediaCodecsSupported function.
+// This function is used to generate codec-specific SW/HW
+// support information from a generic codec identifier enum and
+// generic SW/HW support information.
+// We validate that SW, HW, SW+HW, or lack of support information is
+// properly returned.
+TEST(MediaCodecsSupport, GetDecodeMediaCodecsSupported)
+{
+ MediaCodec codec; // Codec used to generate / filter results
+ MediaCodecsSupported RV; // Return value to check for validity
+ DecodeSupportSet dss; // Non codec-specific SW / HW support information
+
+ // Check SW support returned for VP8
+ codec = MediaCodec::VP8;
+ dss = DecodeSupportSet{DecodeSupport::SoftwareDecode};
+ RV = MCSInfo::GetDecodeMediaCodecsSupported(codec, dss);
+ EXPECT_TRUE(RV.contains(MediaCodecsSupport::VP8SoftwareDecode));
+ EXPECT_TRUE(RV.size() == 1);
+
+ // Check HW support returned for AV1
+ codec = MediaCodec::AV1;
+ dss = DecodeSupportSet{DecodeSupport::HardwareDecode};
+ RV = MCSInfo::GetDecodeMediaCodecsSupported(codec, dss);
+ EXPECT_TRUE(RV.contains(MediaCodecsSupport::AV1HardwareDecode));
+ EXPECT_TRUE(RV.size() == 1);
+
+ // Check SW + HW support returned for VP9
+ codec = MediaCodec::VP9;
+ dss = DecodeSupportSet{DecodeSupport::SoftwareDecode,
+ DecodeSupport::HardwareDecode};
+ RV = MCSInfo::GetDecodeMediaCodecsSupported(codec, dss);
+ EXPECT_TRUE(RV.contains(MediaCodecsSupport::VP9SoftwareDecode));
+ EXPECT_TRUE(RV.contains(MediaCodecsSupport::VP9HardwareDecode));
+ EXPECT_TRUE(RV.size() == 2);
+
+ // Check empty return if codec not supported
+ codec = MediaCodec::AV1;
+ dss = DecodeSupportSet{};
+ RV = MCSInfo::GetDecodeMediaCodecsSupported(codec, dss);
+ EXPECT_TRUE(RV.size() == 0);
+}
+
+// Test MCSInfo::AddSupport function.
+// This function is used to store codec support data.
+// Incoming support data will be merged with any data that
+// has already been stored.
+TEST(MediaCodecsSupport, AddSupport)
+{
+ // Make sure we're not storing any existing support information.
+ MCSInfo::ResetSupport();
+ EXPECT_TRUE(MCSInfo::GetSupport().size() == 0);
+
+ // Add codec support one at a time via individual calls
+ MCSInfo::AddSupport(MediaCodecsSupport::AACSoftwareDecode);
+ MCSInfo::AddSupport(MediaCodecsSupport::VP9SoftwareDecode);
+ MCSInfo::AddSupport(MediaCodecsSupport::AV1HardwareDecode);
+
+ // Add multiple codec support via MediaCodecsSupported EnumSet
+ MCSInfo::AddSupport(
+ MediaCodecsSupported{MediaCodecsSupport::H264SoftwareDecode,
+ MediaCodecsSupport::H264HardwareDecode});
+
+ // Query MCSInfo for supported codecs
+ MediaCodecsSupported supported = MCSInfo::GetSupport();
+ DecodeSupportSet dss;
+
+ // AAC should only report software decode support
+ dss = MCSInfo::GetDecodeSupportSet(MediaCodec::AAC, supported);
+ EXPECT_TRUE(dss.size() == 1);
+ EXPECT_TRUE(dss.contains(DecodeSupport::SoftwareDecode));
+
+ // AV1 should only report hardware decode support
+ dss = MCSInfo::GetDecodeSupportSet(MediaCodec::AV1, supported);
+ EXPECT_TRUE(dss.size() == 1);
+ EXPECT_TRUE(dss.contains(DecodeSupport::HardwareDecode));
+
+ // H264 should report both SW + HW decode support
+ dss = MCSInfo::GetDecodeSupportSet(MediaCodec::H264, supported);
+ EXPECT_TRUE(dss.size() == 2);
+ EXPECT_TRUE(dss.contains(DecodeSupport::SoftwareDecode));
+ EXPECT_TRUE(dss.contains(DecodeSupport::HardwareDecode));
+
+ // Vorbis should report no decode support
+ dss = MCSInfo::GetDecodeSupportSet(MediaCodec::Vorbis, supported);
+ EXPECT_TRUE(dss.size() == 0);
+}
+
+// Test MCSInfo::GetMediaCodecsSupportedString function.
+// This function returns a human-readable string containing codec
+// names and SW/HW playback support information.
+TEST(MediaCodecsSupport, GetMediaCodecsSupportedString)
+{
+ // Make sure we're not storing any existing support information.
+ MCSInfo::ResetSupport();
+ EXPECT_TRUE(MCSInfo::GetSupport().size() == 0);
+
+ // Add H264 SW/HW support + VP8 Software decode support.
+ MCSInfo::AddSupport({MediaCodecsSupport::H264SoftwareDecode,
+ MediaCodecsSupport::H264HardwareDecode,
+ MediaCodecsSupport::VP8SoftwareDecode,
+ MediaCodecsSupport::VP9HardwareDecode});
+
+ nsCString supportString;
+ nsCString targetString;
+ MCSInfo::GetMediaCodecsSupportedString(supportString, MCSInfo::GetSupport());
+
+ // MCSInfo should return support text for all possible codecs
+ for (const auto& it : MCSInfo::GetAllCodecDefinitions()) {
+ if (it.codec == MediaCodec::SENTINEL) {
+ break;
+ }
+ nsCString cn(it.commonName);
+ // H264/VP8/VP9 support text should reflect args to MCSInfo::AddSupport
+ if (cn == "H264"_ns) {
+ targetString += "H264 SW HW"_ns;
+ } else if (cn.Equals("VP8"_ns)) {
+ targetString += "VP8 SW"_ns;
+ } else if (cn.Equals("VP9"_ns)) {
+ targetString += "VP9 HW"_ns;
+ } else {
+ targetString += nsCString(it.commonName) + " NONE"_ns;
+ }
+ targetString += "\n"_ns;
+ }
+ // MCSInfo support string should not have a trailing newline
+ if (!targetString.IsEmpty()) {
+ targetString.Truncate(targetString.Length() - 1);
+ }
+ EXPECT_TRUE(supportString.Equals(targetString));
+}
+
+// Test MCSInfo::GetMediaCodecFromMimeType function.
+// This function returns a MediaCodec enum for a given MIME type string.
+TEST(MediaCodecsSupport, GetMediaCodecFromMimeType)
+{
+ std::vector<std::pair<nsCString, MediaCodec>> testPairs = {
+// Video codecs
+#ifdef MOZ_AV1
+ {"video/av1"_ns, MediaCodec::AV1},
+#endif
+ {"video/avc"_ns, MediaCodec::H264},
+ {"video/mp4"_ns, MediaCodec::H264},
+ {"video/theora"_ns, MediaCodec::Theora},
+ {"video/vp8"_ns, MediaCodec::VP8},
+ {"video/vp9"_ns, MediaCodec::VP9},
+ // Audio codecs
+ {"audio/mp4a-latm"_ns, MediaCodec::AAC},
+ {"audio/flac"_ns, MediaCodec::FLAC},
+ {"audio/mpeg"_ns, MediaCodec::MP3},
+ {"audio/opus"_ns, MediaCodec::Opus},
+ {"audio/vorbis"_ns, MediaCodec::Vorbis},
+ {"audio/x-wav"_ns, MediaCodec::Wave},
+ // Non-existant codecs that should fail
+ {"audio/jukebox"_ns, MediaCodec::SENTINEL},
+ {"video/stopmotion"_ns, MediaCodec::SENTINEL},
+ {"漢字"_ns, MediaCodec::SENTINEL},
+ {"/"_ns, MediaCodec::SENTINEL},
+ {""_ns, MediaCodec::SENTINEL},
+ };
+ for (auto& p : testPairs) {
+ EXPECT_TRUE(MCSInfo::GetMediaCodecFromMimeType(p.first) == p.second);
+ }
+}
diff --git a/dom/media/gtest/TestMediaDataDecoder.cpp b/dom/media/gtest/TestMediaDataDecoder.cpp
new file mode 100644
index 0000000000..79a92842b6
--- /dev/null
+++ b/dom/media/gtest/TestMediaDataDecoder.cpp
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "Benchmark.h"
+#include "MockMediaResource.h"
+#include "DecoderTraits.h"
+#include "MediaContainerType.h"
+#include "MP4Demuxer.h"
+#include "MP4Decoder.h"
+#include "WebMDecoder.h"
+#include "WebMDemuxer.h"
+#include "mozilla/AbstractThread.h"
+#include "mozilla/gtest/MozAssertions.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "nsMimeTypes.h"
+
+using namespace mozilla;
+
+class BenchmarkRunner {
+ public:
+ explicit BenchmarkRunner(Benchmark* aBenchmark) : mBenchmark(aBenchmark) {}
+
+ uint32_t Run() {
+ bool done = false;
+ uint32_t result = 0;
+
+ mBenchmark->Init();
+ mBenchmark->Run()->Then(
+ // Non DocGroup-version of AbstractThread::MainThread() is fine for
+ // testing.
+ AbstractThread::MainThread(), __func__,
+ [&](uint32_t aDecodeFps) {
+ result = aDecodeFps;
+ done = true;
+ },
+ [&]() { done = true; });
+
+ // Wait until benchmark completes.
+ SpinEventLoopUntil("BenchmarkRunner::Run"_ns, [&]() { return done; });
+ return result;
+ }
+
+ private:
+ RefPtr<Benchmark> mBenchmark;
+};
+
+TEST(MediaDataDecoder, H264)
+{
+ if (!MP4Decoder::IsSupportedType(MediaContainerType(MEDIAMIMETYPE(VIDEO_MP4)),
+ /* DecoderDoctorDiagnostics* */ nullptr)) {
+ EXPECT_TRUE(true);
+ } else {
+ RefPtr<MockMediaResource> resource = new MockMediaResource("gizmo.mp4");
+ nsresult rv = resource->Open();
+ EXPECT_NS_SUCCEEDED(rv);
+
+ BenchmarkRunner runner(new Benchmark(new MP4Demuxer(resource)));
+ EXPECT_GT(runner.Run(), 0u);
+ }
+}
+
+// Decoding AV1 via. ffvpx is supported on Linux only.
+#if defined(MOZ_AV1) && defined(MOZ_WIDGET_GTK) && defined(MOZ_FFVPX) && \
+ !defined(MOZ_FFVPX_AUDIOONLY)
+TEST(MediaDataDecoder, AV1)
+{
+ if (!MP4Decoder::IsSupportedType(MediaContainerType(MEDIAMIMETYPE(VIDEO_MP4)),
+ /* DecoderDoctorDiagnostics* */ nullptr)) {
+ EXPECT_TRUE(true);
+ } else {
+ RefPtr<MockMediaResource> resource = new MockMediaResource("av1.mp4");
+ nsresult rv = resource->Open();
+ EXPECT_NS_SUCCEEDED(rv);
+
+ BenchmarkRunner runner(new Benchmark(new MP4Demuxer(resource)));
+ EXPECT_GT(runner.Run(), 0u);
+ }
+}
+#endif
+
+TEST(MediaDataDecoder, VP9)
+{
+ if (!WebMDecoder::IsSupportedType(
+ MediaContainerType(MEDIAMIMETYPE(VIDEO_WEBM)))) {
+ EXPECT_TRUE(true);
+ } else {
+ RefPtr<MockMediaResource> resource = new MockMediaResource("vp9cake.webm");
+ nsresult rv = resource->Open();
+ EXPECT_NS_SUCCEEDED(rv);
+
+ BenchmarkRunner runner(new Benchmark(new WebMDemuxer(resource)));
+ EXPECT_GT(runner.Run(), 0u);
+ }
+}
diff --git a/dom/media/gtest/TestMediaDataEncoder.cpp b/dom/media/gtest/TestMediaDataEncoder.cpp
new file mode 100644
index 0000000000..bdab94cfe5
--- /dev/null
+++ b/dom/media/gtest/TestMediaDataEncoder.cpp
@@ -0,0 +1,775 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "AnnexB.h"
+#include "H264.h"
+#include "ImageContainer.h"
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "mozilla/media/MediaUtils.h" // For media::Await
+#include "PEMFactory.h"
+#include "TimeUnits.h"
+#include "VideoUtils.h"
+#include "VPXDecoder.h"
+#include <algorithm>
+
+#define RUN_IF_SUPPORTED(codecType, test) \
+ do { \
+ RefPtr<PEMFactory> f(new PEMFactory()); \
+ if (f->SupportsCodec(codecType)) { \
+ test(); \
+ } \
+ } while (0)
+
+#define BLOCK_SIZE 64
+#define WIDTH 640
+#define HEIGHT 480
+#define NUM_FRAMES 150UL
+#define FRAME_RATE 30
+#define FRAME_DURATION (1000000 / FRAME_RATE)
+#define BIT_RATE (1000 * 1000) // 1Mbps
+#define BIT_RATE_MODE MediaDataEncoder::BitrateMode::Variable
+#define KEYFRAME_INTERVAL FRAME_RATE // 1 keyframe per second
+
+using namespace mozilla;
+
+static gfx::IntSize kImageSize(WIDTH, HEIGHT);
+// Set codec to avc1.42001E - Base profile, constraint 0, level 30.
+const H264Specific kH264SpecificAnnexB(H264_PROFILE_BASE, H264_LEVEL_3,
+ H264BitStreamFormat::ANNEXB);
+const H264Specific kH264SpecificAVCC(H264_PROFILE_BASE, H264_LEVEL_3,
+ H264BitStreamFormat::AVC);
+
+class MediaDataEncoderTest : public testing::Test {
+ protected:
+ void SetUp() override {
+ Preferences::SetBool("media.ffmpeg.encoder.enabled", true);
+ Preferences::SetInt("logging.FFmpegVideo", 5);
+ mData.Init(kImageSize);
+ }
+
+ void TearDown() override { mData.Deinit(); }
+
+ public:
+ struct FrameSource final {
+ layers::PlanarYCbCrData mYUV;
+ UniquePtr<uint8_t[]> mBuffer;
+ RefPtr<layers::BufferRecycleBin> mRecycleBin;
+ int16_t mColorStep = 4;
+
+ void Init(const gfx::IntSize& aSize) {
+ mYUV.mPictureRect = gfx::IntRect(0, 0, aSize.width, aSize.height);
+ mYUV.mYStride = aSize.width;
+ mYUV.mCbCrStride = (aSize.width + 1) / 2;
+ mYUV.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ auto ySize = mYUV.YDataSize();
+ auto cbcrSize = mYUV.CbCrDataSize();
+ size_t bufferSize =
+ mYUV.mYStride * ySize.height + 2 * mYUV.mCbCrStride * cbcrSize.height;
+ mBuffer = MakeUnique<uint8_t[]>(bufferSize);
+ std::fill_n(mBuffer.get(), bufferSize, 0x7F);
+ mYUV.mYChannel = mBuffer.get();
+ mYUV.mCbChannel = mYUV.mYChannel + mYUV.mYStride * ySize.height;
+ mYUV.mCrChannel = mYUV.mCbChannel + mYUV.mCbCrStride * cbcrSize.height;
+ mYUV.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ mRecycleBin = new layers::BufferRecycleBin();
+ }
+
+ void Deinit() {
+ mBuffer.reset();
+ mRecycleBin = nullptr;
+ }
+
+ already_AddRefed<MediaData> GetFrame(const size_t aIndex) {
+ Draw(aIndex);
+ RefPtr<layers::PlanarYCbCrImage> img =
+ new layers::RecyclingPlanarYCbCrImage(mRecycleBin);
+ img->CopyData(mYUV);
+ RefPtr<MediaData> frame = VideoData::CreateFromImage(
+ kImageSize, 0,
+ // The precise time unit should be media::TimeUnit(1, FRAME_RATE)
+ // instead of media::TimeUnit(FRAME_DURATION, USECS_PER_S)
+ // (FRAME_DURATION microseconds), but this setting forces us to take
+ // care some potential rounding issue, e.g., when converting to a time
+ // unit based in FRAME_RATE by TimeUnit::ToTicksAtRate(FRAME_RATE),
+ // the time unit would be calculated from 999990 / 1000000, which
+ // could be zero.
+ media::TimeUnit::FromMicroseconds(AssertedCast<int64_t>(aIndex) *
+ FRAME_DURATION),
+ media::TimeUnit::FromMicroseconds(FRAME_DURATION), img,
+ (aIndex & 0xF) == 0,
+ media::TimeUnit::FromMicroseconds(AssertedCast<int64_t>(aIndex) *
+ FRAME_DURATION));
+ return frame.forget();
+ }
+
+ void DrawChessboard(uint8_t* aAddr, const size_t aWidth,
+ const size_t aHeight, const size_t aOffset) {
+ uint8_t pixels[2][BLOCK_SIZE];
+ size_t x = aOffset % BLOCK_SIZE;
+ if ((aOffset / BLOCK_SIZE) & 1) {
+ x = BLOCK_SIZE - x;
+ }
+ for (size_t i = 0; i < x; i++) {
+ pixels[0][i] = 0x00;
+ pixels[1][i] = 0xFF;
+ }
+ for (size_t i = x; i < BLOCK_SIZE; i++) {
+ pixels[0][i] = 0xFF;
+ pixels[1][i] = 0x00;
+ }
+
+ uint8_t* p = aAddr;
+ for (size_t row = 0; row < aHeight; row++) {
+ for (size_t col = 0; col < aWidth; col += BLOCK_SIZE) {
+ memcpy(p, pixels[((row / BLOCK_SIZE) + (col / BLOCK_SIZE)) % 2],
+ BLOCK_SIZE);
+ p += BLOCK_SIZE;
+ }
+ }
+ }
+
+ void Draw(const size_t aIndex) {
+ auto ySize = mYUV.YDataSize();
+ DrawChessboard(mYUV.mYChannel, ySize.width, ySize.height, aIndex << 1);
+ int16_t color = AssertedCast<int16_t>(mYUV.mCbChannel[0] + mColorStep);
+ if (color > 255 || color < 0) {
+ mColorStep = AssertedCast<int16_t>(-mColorStep);
+ color = AssertedCast<int16_t>(mYUV.mCbChannel[0] + mColorStep);
+ }
+
+ size_t size = (mYUV.mCrChannel - mYUV.mCbChannel);
+
+ std::fill_n(mYUV.mCbChannel, size, static_cast<uint8_t>(color));
+ std::fill_n(mYUV.mCrChannel, size, 0xFF - static_cast<uint8_t>(color));
+ }
+ };
+
+ public:
+ FrameSource mData;
+};
+
+template <typename T>
+already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
+ CodecType aCodec, MediaDataEncoder::Usage aUsage,
+ MediaDataEncoder::PixelFormat aPixelFormat, int32_t aWidth, int32_t aHeight,
+ MediaDataEncoder::ScalabilityMode aScalabilityMode,
+ const Maybe<T>& aSpecific) {
+ RefPtr<PEMFactory> f(new PEMFactory());
+
+ if (!f->SupportsCodec(aCodec)) {
+ return nullptr;
+ }
+
+ const RefPtr<TaskQueue> taskQueue(
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::PLATFORM_ENCODER),
+ "TestMediaDataEncoder"));
+
+ RefPtr<MediaDataEncoder> e;
+#ifdef MOZ_WIDGET_ANDROID
+ const MediaDataEncoder::HardwarePreference pref =
+ MediaDataEncoder::HardwarePreference::None;
+#else
+ const MediaDataEncoder::HardwarePreference pref =
+ MediaDataEncoder::HardwarePreference::None;
+#endif
+ e = f->CreateEncoder(
+ EncoderConfig(aCodec, gfx::IntSize{aWidth, aHeight}, aUsage, aPixelFormat,
+ aPixelFormat, FRAME_RATE /* FPS */,
+ KEYFRAME_INTERVAL /* keyframe interval */,
+ BIT_RATE /* bitrate */, BIT_RATE_MODE, pref,
+ aScalabilityMode, aSpecific),
+ taskQueue);
+
+ return e.forget();
+}
+
+static already_AddRefed<MediaDataEncoder> CreateH264Encoder(
+ MediaDataEncoder::Usage aUsage = MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat aPixelFormat =
+ MediaDataEncoder::PixelFormat::YUV420P,
+ int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
+ MediaDataEncoder::ScalabilityMode aScalabilityMode =
+ MediaDataEncoder::ScalabilityMode::None,
+ const Maybe<H264Specific>& aSpecific = Some(kH264SpecificAnnexB)) {
+ return CreateVideoEncoder(CodecType::H264, aUsage, aPixelFormat, aWidth,
+ aHeight, aScalabilityMode, aSpecific);
+}
+
+void WaitForShutdown(const RefPtr<MediaDataEncoder>& aEncoder) {
+ MOZ_ASSERT(aEncoder);
+
+ Maybe<bool> result;
+ // media::Await() supports exclusive promises only, but ShutdownPromise is
+ // not.
+ aEncoder->Shutdown()->Then(
+ AbstractThread::MainThread(), __func__,
+ [&result](bool rv) {
+ EXPECT_TRUE(rv);
+ result = Some(true);
+ },
+ []() { FAIL() << "Shutdown should never be rejected"; });
+ SpinEventLoopUntil("TestMediaDataEncoder.cpp:WaitForShutdown"_ns,
+ [&result]() { return result; });
+}
+
+TEST_F(MediaDataEncoderTest, H264Create) {
+ RUN_IF_SUPPORTED(CodecType::H264, []() {
+ RefPtr<MediaDataEncoder> e = CreateH264Encoder();
+ EXPECT_TRUE(e);
+ WaitForShutdown(e);
+ });
+}
+
+static bool EnsureInit(const RefPtr<MediaDataEncoder>& aEncoder) {
+ if (!aEncoder) {
+ return false;
+ }
+
+ bool succeeded;
+ media::Await(
+ GetMediaThreadPool(MediaThreadType::SUPERVISOR), aEncoder->Init(),
+ [&succeeded](TrackInfo::TrackType t) {
+ EXPECT_EQ(TrackInfo::TrackType::kVideoTrack, t);
+ succeeded = true;
+ },
+ [&succeeded](const MediaResult& r) { succeeded = false; });
+ return succeeded;
+}
+
+TEST_F(MediaDataEncoderTest, H264Inits) {
+ RUN_IF_SUPPORTED(CodecType::H264, []() {
+ // w/o codec specific: should fail for h264.
+ RefPtr<MediaDataEncoder> e =
+ CreateH264Encoder(MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::None, Nothing());
+ EXPECT_FALSE(e);
+
+ // w/ codec specific
+ e = CreateH264Encoder();
+ EXPECT_TRUE(EnsureInit(e));
+ WaitForShutdown(e);
+ });
+}
+
+static MediaDataEncoder::EncodedData Encode(
+ const RefPtr<MediaDataEncoder>& aEncoder, const size_t aNumFrames,
+ MediaDataEncoderTest::FrameSource& aSource) {
+ MediaDataEncoder::EncodedData output;
+ bool succeeded;
+ for (size_t i = 0; i < aNumFrames; i++) {
+ RefPtr<MediaData> frame = aSource.GetFrame(i);
+ media::Await(
+ GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ aEncoder->Encode(frame),
+ [&output, &succeeded](MediaDataEncoder::EncodedData encoded) {
+ output.AppendElements(std::move(encoded));
+ succeeded = true;
+ },
+ [&succeeded](const MediaResult& r) { succeeded = false; });
+ EXPECT_TRUE(succeeded);
+ if (!succeeded) {
+ return output;
+ }
+ }
+
+ size_t pending = 0;
+ do {
+ media::Await(
+ GetMediaThreadPool(MediaThreadType::SUPERVISOR), aEncoder->Drain(),
+ [&pending, &output, &succeeded](MediaDataEncoder::EncodedData encoded) {
+ pending = encoded.Length();
+ output.AppendElements(std::move(encoded));
+ succeeded = true;
+ },
+ [&succeeded](const MediaResult& r) { succeeded = false; });
+ EXPECT_TRUE(succeeded);
+ if (!succeeded) {
+ return output;
+ }
+ } while (pending > 0);
+
+ return output;
+}
+
+TEST_F(MediaDataEncoderTest, H264Encodes) {
+ RUN_IF_SUPPORTED(CodecType::H264, [this]() {
+ // Encode one frame and output in AnnexB format.
+ RefPtr<MediaDataEncoder> e = CreateH264Encoder();
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, 1UL, mData);
+ EXPECT_EQ(output.Length(), 1UL);
+ EXPECT_TRUE(AnnexB::IsAnnexB(output[0]));
+ WaitForShutdown(e);
+
+ // Encode multiple frames and output in AnnexB format.
+ e = CreateH264Encoder();
+ EnsureInit(e);
+ output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ EXPECT_TRUE(AnnexB::IsAnnexB(frame));
+ }
+ WaitForShutdown(e);
+
+ // Encode one frame and output in avcC format.
+ e = CreateH264Encoder(MediaDataEncoder::Usage::Record,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::None,
+ Some(kH264SpecificAVCC));
+ EnsureInit(e);
+ output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ AnnexB::IsAVCC(output[0]); // Only 1st frame has extra data.
+ for (auto frame : output) {
+ EXPECT_FALSE(AnnexB::IsAnnexB(frame));
+ }
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, H264Duration) {
+ RUN_IF_SUPPORTED(CodecType::H264, [this]() {
+ RefPtr<MediaDataEncoder> e = CreateH264Encoder();
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (const auto& frame : output) {
+ EXPECT_GT(frame->mDuration, media::TimeUnit::Zero());
+ }
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, InvalidSize) {
+ RUN_IF_SUPPORTED(CodecType::H264, []() {
+ RefPtr<MediaDataEncoder> e0x0 = CreateH264Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, 0, 0,
+ MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ EXPECT_EQ(e0x0, nullptr);
+
+ RefPtr<MediaDataEncoder> e0x1 = CreateH264Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, 0, 1,
+ MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ EXPECT_EQ(e0x1, nullptr);
+
+ RefPtr<MediaDataEncoder> e1x0 = CreateH264Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, 1, 0,
+ MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ EXPECT_EQ(e1x0, nullptr);
+ });
+}
+
+#ifdef MOZ_WIDGET_ANDROID
+TEST_F(MediaDataEncoderTest, AndroidNotSupportedSize) {
+ RUN_IF_SUPPORTED(CodecType::H264, []() {
+ RefPtr<MediaDataEncoder> e = CreateH264Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, 1, 1,
+ MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ EXPECT_NE(e, nullptr);
+ EXPECT_FALSE(EnsureInit(e));
+ });
+}
+#endif
+
+#if defined(XP_LINUX) && !defined(ANDROID) && \
+ (defined(MOZ_FFMPEG) || defined(MOZ_FFVPX))
+TEST_F(MediaDataEncoderTest, H264AVCC) {
+ RUN_IF_SUPPORTED(CodecType::H264, [this]() {
+ // Encod frames in avcC format.
+ RefPtr<MediaDataEncoder> e = CreateH264Encoder(
+ MediaDataEncoder::Usage::Record, MediaDataEncoder::PixelFormat::YUV420P,
+ WIDTH, HEIGHT, MediaDataEncoder::ScalabilityMode::None,
+ Some(kH264SpecificAVCC));
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ EXPECT_FALSE(AnnexB::IsAnnexB(frame));
+ if (frame->mKeyframe) {
+ AnnexB::IsAVCC(frame);
+ AVCCConfig config = AVCCConfig::Parse(frame).unwrap();
+ EXPECT_EQ(config.mAVCProfileIndication,
+ static_cast<decltype(config.mAVCProfileIndication)>(
+ kH264SpecificAVCC.mProfile));
+ EXPECT_EQ(config.mAVCLevelIndication,
+ static_cast<decltype(config.mAVCLevelIndication)>(
+ kH264SpecificAVCC.mLevel));
+ }
+ }
+ WaitForShutdown(e);
+ });
+}
+#endif
+
+static already_AddRefed<MediaDataEncoder> CreateVP8Encoder(
+ MediaDataEncoder::Usage aUsage = MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat aPixelFormat =
+ MediaDataEncoder::PixelFormat::YUV420P,
+ int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
+ MediaDataEncoder::ScalabilityMode aScalabilityMode =
+ MediaDataEncoder::ScalabilityMode::None,
+ const Maybe<VP8Specific>& aSpecific = Some(VP8Specific())) {
+ return CreateVideoEncoder(CodecType::VP8, aUsage, aPixelFormat, aWidth,
+ aHeight, aScalabilityMode, aSpecific);
+}
+
+static already_AddRefed<MediaDataEncoder> CreateVP9Encoder(
+ MediaDataEncoder::Usage aUsage = MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat aPixelFormat =
+ MediaDataEncoder::PixelFormat::YUV420P,
+ int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
+ MediaDataEncoder::ScalabilityMode aScalabilityMode =
+ MediaDataEncoder::ScalabilityMode::None,
+ const Maybe<VP9Specific>& aSpecific = Some(VP9Specific())) {
+ return CreateVideoEncoder(CodecType::VP9, aUsage, aPixelFormat, aWidth,
+ aHeight, aScalabilityMode, aSpecific);
+}
+
+TEST_F(MediaDataEncoderTest, VP8Create) {
+ RUN_IF_SUPPORTED(CodecType::VP8, []() {
+ RefPtr<MediaDataEncoder> e = CreateVP8Encoder();
+ EXPECT_TRUE(e);
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP8Inits) {
+ RUN_IF_SUPPORTED(CodecType::VP8, []() {
+ // w/o codec specific.
+ RefPtr<MediaDataEncoder> e =
+ CreateVP8Encoder(MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::None, Nothing());
+ EXPECT_TRUE(EnsureInit(e));
+ WaitForShutdown(e);
+
+ // w/ codec specific
+ e = CreateVP8Encoder();
+ EXPECT_TRUE(EnsureInit(e));
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP8Encodes) {
+ RUN_IF_SUPPORTED(CodecType::VP8, [this]() {
+ // Encode one VPX frame.
+ RefPtr<MediaDataEncoder> e = CreateVP8Encoder();
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, 1UL, mData);
+ EXPECT_EQ(output.Length(), 1UL);
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*output[0], info, VPXDecoder::Codec::VP8));
+ EXPECT_EQ(info.mKeyFrame, output[0]->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ WaitForShutdown(e);
+
+ // Encode multiple VPX frames.
+ e = CreateVP8Encoder();
+ EnsureInit(e);
+ output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*frame, info, VPXDecoder::Codec::VP8));
+ EXPECT_EQ(info.mKeyFrame, frame->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ }
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP8Duration) {
+ RUN_IF_SUPPORTED(CodecType::VP8, [this]() {
+ RefPtr<MediaDataEncoder> e = CreateVP8Encoder();
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (const auto& frame : output) {
+ EXPECT_GT(frame->mDuration, media::TimeUnit::Zero());
+ }
+ WaitForShutdown(e);
+ });
+}
+
+#if defined(XP_LINUX) && !defined(ANDROID) && \
+ (defined(MOZ_FFMPEG) || defined(MOZ_FFVPX))
+TEST_F(MediaDataEncoderTest, VP8EncodeAfterDrain) {
+ RUN_IF_SUPPORTED(CodecType::VP8, [this]() {
+ RefPtr<MediaDataEncoder> e = CreateVP8Encoder();
+ EnsureInit(e);
+
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*frame, info, VPXDecoder::Codec::VP8));
+ EXPECT_EQ(info.mKeyFrame, frame->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ }
+ output.Clear();
+
+ output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*frame, info, VPXDecoder::Codec::VP8));
+ EXPECT_EQ(info.mKeyFrame, frame->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ }
+
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP8EncodeWithScalabilityModeL1T2) {
+ RUN_IF_SUPPORTED(CodecType::VP8, [this]() {
+ VP8Specific specific(VPXComplexity::Normal, /* mComplexity */
+ true, /* mResilience */
+ 2, /* mNumTemporalLayers */
+ true, /* mDenoising */
+ false, /* mAutoResize */
+ false /* mFrameDropping */
+ );
+ RefPtr<MediaDataEncoder> e = CreateVP8Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::L1T2, Some(specific));
+ EnsureInit(e);
+
+ const nsTArray<uint8_t> pattern({0, 1});
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (size_t i = 0; i < output.Length(); ++i) {
+ const RefPtr<MediaRawData> frame = output[i];
+
+ EXPECT_TRUE(frame->mTemporalLayerId);
+ size_t idx = i % pattern.Length();
+ EXPECT_EQ(frame->mTemporalLayerId.value(), pattern[idx]);
+ }
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP8EncodeWithScalabilityModeL1T3) {
+ RUN_IF_SUPPORTED(CodecType::VP8, [this]() {
+ VP8Specific specific(VPXComplexity::Normal, /* mComplexity */
+ true, /* mResilience */
+ 3, /* mNumTemporalLayers */
+ true, /* mDenoising */
+ false, /* mAutoResize */
+ false /* mFrameDropping */
+ );
+ RefPtr<MediaDataEncoder> e = CreateVP8Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::L1T3, Some(specific));
+ EnsureInit(e);
+
+ const nsTArray<uint8_t> pattern({0, 2, 1, 2});
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (size_t i = 0; i < output.Length(); ++i) {
+ const RefPtr<MediaRawData> frame = output[i];
+
+ EXPECT_TRUE(frame->mTemporalLayerId);
+ size_t idx = i % pattern.Length();
+ EXPECT_EQ(frame->mTemporalLayerId.value(), pattern[idx]);
+ }
+ WaitForShutdown(e);
+ });
+}
+#endif
+
+TEST_F(MediaDataEncoderTest, VP9Create) {
+ RUN_IF_SUPPORTED(CodecType::VP9, []() {
+ RefPtr<MediaDataEncoder> e = CreateVP9Encoder();
+ EXPECT_TRUE(e);
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP9Inits) {
+ RUN_IF_SUPPORTED(CodecType::VP9, []() {
+ // w/o codec specific.
+ RefPtr<MediaDataEncoder> e =
+ CreateVP9Encoder(MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::None, Nothing());
+ EXPECT_TRUE(EnsureInit(e));
+ WaitForShutdown(e);
+
+ // w/ codec specific
+ e = CreateVP9Encoder();
+ EXPECT_TRUE(EnsureInit(e));
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP9Encodes) {
+ RUN_IF_SUPPORTED(CodecType::VP9, [this]() {
+ RefPtr<MediaDataEncoder> e = CreateVP9Encoder();
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, 1UL, mData);
+ EXPECT_EQ(output.Length(), 1UL);
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*output[0], info, VPXDecoder::Codec::VP9));
+ EXPECT_EQ(info.mKeyFrame, output[0]->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ WaitForShutdown(e);
+
+ e = CreateVP9Encoder();
+ EnsureInit(e);
+ output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*frame, info, VPXDecoder::Codec::VP9));
+ EXPECT_EQ(info.mKeyFrame, frame->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ }
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP9Duration) {
+ RUN_IF_SUPPORTED(CodecType::VP9, [this]() {
+ RefPtr<MediaDataEncoder> e = CreateVP9Encoder();
+ EnsureInit(e);
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (const auto& frame : output) {
+ EXPECT_GT(frame->mDuration, media::TimeUnit::Zero());
+ }
+ WaitForShutdown(e);
+ });
+}
+
+#if defined(XP_LINUX) && !defined(ANDROID) && \
+ (defined(MOZ_FFMPEG) || defined(MOZ_FFVPX))
+TEST_F(MediaDataEncoderTest, VP9EncodeAfterDrain) {
+ RUN_IF_SUPPORTED(CodecType::VP9, [this]() {
+ RefPtr<MediaDataEncoder> e = CreateVP9Encoder();
+ EnsureInit(e);
+
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*frame, info, VPXDecoder::Codec::VP9));
+ EXPECT_EQ(info.mKeyFrame, frame->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ }
+ output.Clear();
+
+ output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (auto frame : output) {
+ VPXDecoder::VPXStreamInfo info;
+ EXPECT_TRUE(
+ VPXDecoder::GetStreamInfo(*frame, info, VPXDecoder::Codec::VP9));
+ EXPECT_EQ(info.mKeyFrame, frame->mKeyframe);
+ if (info.mKeyFrame) {
+ EXPECT_EQ(info.mImage, kImageSize);
+ }
+ }
+
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP9EncodeWithScalabilityModeL1T2) {
+ RUN_IF_SUPPORTED(CodecType::VP9, [this]() {
+ VP9Specific specific(VPXComplexity::Normal, /* mComplexity */
+ true, /* mResilience */
+ 2, /* mNumTemporalLayers */
+ true, /* mDenoising */
+ false, /* mAutoResize */
+ false, /* mFrameDropping */
+ true, /* mAdaptiveQp */
+ 1, /* mNumSpatialLayers */
+ false /* mFlexible */
+ );
+
+ RefPtr<MediaDataEncoder> e = CreateVP9Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::L1T2, Some(specific));
+ EnsureInit(e);
+
+ const nsTArray<uint8_t> pattern({0, 1});
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (size_t i = 0; i < output.Length(); ++i) {
+ const RefPtr<MediaRawData> frame = output[i];
+ EXPECT_TRUE(frame->mTemporalLayerId);
+ size_t idx = i % pattern.Length();
+ EXPECT_EQ(frame->mTemporalLayerId.value(), pattern[idx]);
+ }
+ WaitForShutdown(e);
+ });
+}
+
+TEST_F(MediaDataEncoderTest, VP9EncodeWithScalabilityModeL1T3) {
+ RUN_IF_SUPPORTED(CodecType::VP9, [this]() {
+ VP9Specific specific(VPXComplexity::Normal, /* mComplexity */
+ true, /* mResilience */
+ 3, /* mNumTemporalLayers */
+ true, /* mDenoising */
+ false, /* mAutoResize */
+ false, /* mFrameDropping */
+ true, /* mAdaptiveQp */
+ 1, /* mNumSpatialLayers */
+ false /* mFlexible */
+ );
+
+ RefPtr<MediaDataEncoder> e = CreateVP9Encoder(
+ MediaDataEncoder::Usage::Realtime,
+ MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
+ MediaDataEncoder::ScalabilityMode::L1T3, Some(specific));
+ EnsureInit(e);
+
+ const nsTArray<uint8_t> pattern({0, 2, 1, 2});
+ MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
+ EXPECT_EQ(output.Length(), NUM_FRAMES);
+ for (size_t i = 0; i < output.Length(); ++i) {
+ const RefPtr<MediaRawData> frame = output[i];
+ EXPECT_TRUE(frame->mTemporalLayerId);
+ size_t idx = i % pattern.Length();
+ EXPECT_EQ(frame->mTemporalLayerId.value(), pattern[idx]);
+ }
+ WaitForShutdown(e);
+ });
+}
+#endif
diff --git a/dom/media/gtest/TestMediaEventSource.cpp b/dom/media/gtest/TestMediaEventSource.cpp
new file mode 100644
index 0000000000..811e2bec9f
--- /dev/null
+++ b/dom/media/gtest/TestMediaEventSource.cpp
@@ -0,0 +1,490 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/TaskQueue.h"
+#include "mozilla/UniquePtr.h"
+#include "MediaEventSource.h"
+#include "VideoUtils.h"
+
+using namespace mozilla;
+
+/*
+ * Test if listeners receive the event data correctly.
+ */
+TEST(MediaEventSource, SingleListener)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource SingleListener");
+
+ MediaEventProducer<int> source;
+ int i = 0;
+
+ auto func = [&](int j) { i += j; };
+ MediaEventListener listener = source.Connect(queue, func);
+
+ // Call Notify 3 times. The listener should be also called 3 times.
+ source.Notify(3);
+ source.Notify(5);
+ source.Notify(7);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ // Verify the event data is passed correctly to the listener.
+ EXPECT_EQ(i, 15); // 3 + 5 + 7
+ listener.Disconnect();
+}
+
+TEST(MediaEventSource, MultiListener)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource MultiListener");
+
+ MediaEventProducer<int> source;
+ int i = 0;
+ int j = 0;
+
+ auto func1 = [&](int k) { i = k * 2; };
+ auto func2 = [&](int k) { j = k * 3; };
+ MediaEventListener listener1 = source.Connect(queue, func1);
+ MediaEventListener listener2 = source.Connect(queue, func2);
+
+ // Both listeners should receive the event.
+ source.Notify(11);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ // Verify the event data is passed correctly to the listener.
+ EXPECT_EQ(i, 22); // 11 * 2
+ EXPECT_EQ(j, 33); // 11 * 3
+
+ listener1.Disconnect();
+ listener2.Disconnect();
+}
+
+/*
+ * Test if disconnecting a listener prevents events from coming.
+ */
+TEST(MediaEventSource, DisconnectAfterNotification)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource DisconnectAfterNotification");
+
+ MediaEventProducer<int> source;
+ int i = 0;
+
+ MediaEventListener listener;
+ auto func = [&](int j) {
+ i += j;
+ listener.Disconnect();
+ };
+ listener = source.Connect(queue, func);
+
+ // Call Notify() twice. Since we disconnect the listener when receiving
+ // the 1st event, the 2nd event should not reach the listener.
+ source.Notify(11);
+ source.Notify(11);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ // Check only the 1st event is received.
+ EXPECT_EQ(i, 11);
+}
+
+TEST(MediaEventSource, DisconnectBeforeNotification)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource DisconnectBeforeNotification");
+
+ MediaEventProducer<int> source;
+ int i = 0;
+ int j = 0;
+
+ auto func1 = [&](int k) { i = k * 2; };
+ auto func2 = [&](int k) { j = k * 3; };
+ MediaEventListener listener1 = source.Connect(queue, func1);
+ MediaEventListener listener2 = source.Connect(queue, func2);
+
+ // Disconnect listener2 before notification. Only listener1 should receive
+ // the event.
+ listener2.Disconnect();
+ source.Notify(11);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ EXPECT_EQ(i, 22); // 11 * 2
+ EXPECT_EQ(j, 0); // event not received
+
+ listener1.Disconnect();
+}
+
+/*
+ * Test we don't hit the assertion when calling Connect() and Disconnect()
+ * repeatedly.
+ */
+TEST(MediaEventSource, DisconnectAndConnect)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource DisconnectAndConnect");
+
+ MediaEventProducerExc<int> source;
+ MediaEventListener listener = source.Connect(queue, []() {});
+ listener.Disconnect();
+ listener = source.Connect(queue, []() {});
+ listener.Disconnect();
+}
+
+/*
+ * Test void event type.
+ */
+TEST(MediaEventSource, VoidEventType)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource VoidEventType");
+
+ MediaEventProducer<void> source;
+ int i = 0;
+
+ // Test function object.
+ auto func = [&]() { ++i; };
+ MediaEventListener listener1 = source.Connect(queue, func);
+
+ // Test member function.
+ struct Foo {
+ Foo() : j(1) {}
+ void OnNotify() { j *= 2; }
+ int j;
+ } foo;
+ MediaEventListener listener2 = source.Connect(queue, &foo, &Foo::OnNotify);
+
+ // Call Notify 2 times. The listener should be also called 2 times.
+ source.Notify();
+ source.Notify();
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ // Verify the event data is passed correctly to the listener.
+ EXPECT_EQ(i, 2); // ++i called twice
+ EXPECT_EQ(foo.j, 4); // |j *= 2| called twice
+ listener1.Disconnect();
+ listener2.Disconnect();
+}
+
+/*
+ * Test listeners can take various event types (T, T&&, const T& and void).
+ */
+TEST(MediaEventSource, ListenerType1)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource ListenerType1");
+
+ MediaEventProducer<int> source;
+ int i = 0;
+
+ // Test various argument types.
+ auto func1 = [&](int&& j) { i += j; };
+ auto func2 = [&](const int& j) { i += j; };
+ auto func3 = [&]() { i += 1; };
+ MediaEventListener listener1 = source.Connect(queue, func1);
+ MediaEventListener listener2 = source.Connect(queue, func2);
+ MediaEventListener listener3 = source.Connect(queue, func3);
+
+ source.Notify(1);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ EXPECT_EQ(i, 3);
+
+ listener1.Disconnect();
+ listener2.Disconnect();
+ listener3.Disconnect();
+}
+
+TEST(MediaEventSource, ListenerType2)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource ListenerType2");
+
+ MediaEventProducer<int> source;
+
+ struct Foo {
+ Foo() : mInt(0) {}
+ void OnNotify1(int&& i) { mInt += i; }
+ void OnNotify2(const int& i) { mInt += i; }
+ void OnNotify3() { mInt += 1; }
+ void OnNotify4(int i) const { mInt += i; }
+ void OnNotify5(int i) volatile { mInt = mInt + i; }
+ mutable int mInt;
+ } foo;
+
+ // Test member functions which might be CV qualified.
+ MediaEventListener listener1 = source.Connect(queue, &foo, &Foo::OnNotify1);
+ MediaEventListener listener2 = source.Connect(queue, &foo, &Foo::OnNotify2);
+ MediaEventListener listener3 = source.Connect(queue, &foo, &Foo::OnNotify3);
+ MediaEventListener listener4 = source.Connect(queue, &foo, &Foo::OnNotify4);
+ MediaEventListener listener5 = source.Connect(queue, &foo, &Foo::OnNotify5);
+
+ source.Notify(1);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+
+ EXPECT_EQ(foo.mInt, 5);
+
+ listener1.Disconnect();
+ listener2.Disconnect();
+ listener3.Disconnect();
+ listener4.Disconnect();
+ listener5.Disconnect();
+}
+
+struct SomeEvent {
+ explicit SomeEvent(int& aCount) : mCount(aCount) {}
+ // Increment mCount when copy constructor is called to know how many times
+ // the event data is copied.
+ SomeEvent(const SomeEvent& aOther) : mCount(aOther.mCount) { ++mCount; }
+ SomeEvent(SomeEvent&& aOther) : mCount(aOther.mCount) {}
+ int& mCount;
+};
+
+/*
+ * Test we don't have unnecessary copies of the event data.
+ */
+TEST(MediaEventSource, CopyEvent1)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource CopyEvent1");
+
+ MediaEventProducer<SomeEvent> source;
+ int i = 0;
+
+ auto func = [](SomeEvent&& aEvent) {};
+ struct Foo {
+ void OnNotify(SomeEvent&& aEvent) {}
+ } foo;
+
+ MediaEventListener listener1 = source.Connect(queue, func);
+ MediaEventListener listener2 = source.Connect(queue, &foo, &Foo::OnNotify);
+
+ // We expect i to be 2 since SomeEvent should be copied only once when
+ // passing to each listener.
+ source.Notify(SomeEvent(i));
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+ EXPECT_EQ(i, 2);
+ listener1.Disconnect();
+ listener2.Disconnect();
+}
+
+TEST(MediaEventSource, CopyEvent2)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource CopyEvent2");
+
+ MediaEventProducer<SomeEvent> source;
+ int i = 0;
+
+ auto func = []() {};
+ struct Foo {
+ void OnNotify() {}
+ } foo;
+
+ MediaEventListener listener1 = source.Connect(queue, func);
+ MediaEventListener listener2 = source.Connect(queue, &foo, &Foo::OnNotify);
+
+ // SomeEvent won't be copied at all since the listeners take no arguments.
+ source.Notify(SomeEvent(i));
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+ EXPECT_EQ(i, 0);
+ listener1.Disconnect();
+ listener2.Disconnect();
+}
+
+/*
+ * Test move-only types.
+ */
+TEST(MediaEventSource, MoveOnly)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource MoveOnly");
+
+ MediaEventProducerExc<UniquePtr<int>> source;
+
+ auto func = [](UniquePtr<int>&& aEvent) { EXPECT_EQ(*aEvent, 20); };
+ MediaEventListener listener = source.Connect(queue, func);
+
+ // It is OK to pass an rvalue which is move-only.
+ source.Notify(UniquePtr<int>(new int(20)));
+ // It is an error to pass an lvalue which is move-only.
+ // UniquePtr<int> event(new int(30));
+ // source.Notify(event);
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+ listener.Disconnect();
+}
+
+struct RefCounter {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCounter)
+ explicit RefCounter(int aVal) : mVal(aVal) {}
+ int mVal;
+
+ private:
+ ~RefCounter() = default;
+};
+
+/*
+ * Test we should copy instead of move in NonExclusive mode
+ * for each listener must get a copy.
+ */
+TEST(MediaEventSource, NoMove)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource NoMove");
+
+ MediaEventProducer<RefPtr<RefCounter>> source;
+
+ auto func1 = [](RefPtr<RefCounter>&& aEvent) { EXPECT_EQ(aEvent->mVal, 20); };
+ auto func2 = [](RefPtr<RefCounter>&& aEvent) { EXPECT_EQ(aEvent->mVal, 20); };
+ MediaEventListener listener1 = source.Connect(queue, func1);
+ MediaEventListener listener2 = source.Connect(queue, func2);
+
+ // We should copy this rvalue instead of move it in NonExclusive mode.
+ RefPtr<RefCounter> val = new RefCounter(20);
+ source.Notify(std::move(val));
+
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+ listener1.Disconnect();
+ listener2.Disconnect();
+}
+
+/*
+ * Rvalue lambda should be moved instead of copied.
+ */
+TEST(MediaEventSource, MoveLambda)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource MoveLambda");
+
+ MediaEventProducer<void> source;
+
+ int counter = 0;
+ SomeEvent someEvent(counter);
+
+ auto func = [someEvent]() {};
+ // someEvent is copied when captured by the lambda.
+ EXPECT_EQ(someEvent.mCount, 1);
+
+ // someEvent should be copied for we pass |func| as an lvalue.
+ MediaEventListener listener1 = source.Connect(queue, func);
+ EXPECT_EQ(someEvent.mCount, 2);
+
+ // someEvent should be moved for we pass |func| as an rvalue.
+ MediaEventListener listener2 = source.Connect(queue, std::move(func));
+ EXPECT_EQ(someEvent.mCount, 2);
+
+ listener1.Disconnect();
+ listener2.Disconnect();
+}
+
+template <typename Bool>
+struct DestroyChecker {
+ explicit DestroyChecker(Bool* aIsDestroyed) : mIsDestroyed(aIsDestroyed) {
+ EXPECT_FALSE(*mIsDestroyed);
+ }
+ ~DestroyChecker() {
+ EXPECT_FALSE(*mIsDestroyed);
+ *mIsDestroyed = true;
+ }
+
+ private:
+ Bool* const mIsDestroyed;
+};
+
+class ClassForDestroyCheck final : private DestroyChecker<bool> {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ClassForDestroyCheck);
+
+ explicit ClassForDestroyCheck(bool* aIsDestroyed)
+ : DestroyChecker(aIsDestroyed) {}
+
+ int32_t RefCountNums() const { return mRefCnt; }
+
+ protected:
+ ~ClassForDestroyCheck() = default;
+};
+
+TEST(MediaEventSource, ResetFuncReferenceAfterDisconnect)
+{
+ const RefPtr<TaskQueue> queue = TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource ResetFuncReferenceAfterDisconnect");
+ MediaEventProducer<void> source;
+
+ // Using a class that supports refcounting to check the object destruction.
+ bool isDestroyed = false;
+ auto object = MakeRefPtr<ClassForDestroyCheck>(&isDestroyed);
+ EXPECT_FALSE(isDestroyed);
+ EXPECT_EQ(object->RefCountNums(), 1);
+
+ // Function holds a strong reference to object.
+ MediaEventListener listener = source.Connect(queue, [ptr = object] {});
+ EXPECT_FALSE(isDestroyed);
+ EXPECT_EQ(object->RefCountNums(), 2);
+
+ // This should destroy the function and release the object reference from the
+ // function on the task queue,
+ listener.Disconnect();
+ queue->BeginShutdown();
+ queue->AwaitShutdownAndIdle();
+ EXPECT_FALSE(isDestroyed);
+ EXPECT_EQ(object->RefCountNums(), 1);
+
+ // No one is holding reference to object, it should be destroyed
+ // immediately.
+ object = nullptr;
+ EXPECT_TRUE(isDestroyed);
+}
+
+TEST(MediaEventSource, ResetTargetAfterDisconnect)
+{
+ RefPtr<TaskQueue> queue =
+ TaskQueue::Create(GetMediaThreadPool(MediaThreadType::SUPERVISOR),
+ "TestMediaEventSource ResetTargetAfterDisconnect");
+ MediaEventProducer<void> source;
+ MediaEventListener listener = source.Connect(queue, [] {});
+
+ // MediaEventListener::Disconnect eventually gives up its target
+ listener.Disconnect();
+ queue->AwaitIdle();
+
+ // `queue` should be the last reference to the TaskQueue, meaning that this
+ // Release destroys it.
+ EXPECT_EQ(queue.forget().take()->Release(), 0u);
+}
diff --git a/dom/media/gtest/TestMediaMIMETypes.cpp b/dom/media/gtest/TestMediaMIMETypes.cpp
new file mode 100644
index 0000000000..d36e3bf586
--- /dev/null
+++ b/dom/media/gtest/TestMediaMIMETypes.cpp
@@ -0,0 +1,284 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "MediaMIMETypes.h"
+#include "mozilla/Unused.h"
+
+using namespace mozilla;
+
+TEST(MediaMIMETypes, DependentMIMEType)
+{
+ static const struct {
+ const char* mString;
+ DependentMediaMIMEType mDependentMediaMIMEType;
+ } tests[] = {{"audio/mp4", MEDIAMIMETYPE("audio/mp4")},
+ {"video/mp4", MEDIAMIMETYPE("video/mp4")},
+ {"application/x-mp4", MEDIAMIMETYPE("application/x-mp4")}};
+ for (const auto& test : tests) {
+ EXPECT_TRUE(test.mDependentMediaMIMEType.AsDependentString().EqualsASCII(
+ test.mString));
+ MediaMIMEType mimetype(test.mDependentMediaMIMEType);
+ EXPECT_TRUE(mimetype.AsString().Equals(
+ test.mDependentMediaMIMEType.AsDependentString()));
+ EXPECT_EQ(mimetype, test.mDependentMediaMIMEType);
+ EXPECT_EQ(mimetype, MediaMIMEType(test.mDependentMediaMIMEType));
+ }
+}
+
+TEST(MediaMIMETypes, MakeMediaMIMEType_bad)
+{
+ static const char* tests[] = {"", " ", "/", "audio",
+ "audio/", "mp4", "/mp4", "a/b"};
+
+ for (const auto& test : tests) {
+ Maybe<MediaMIMEType> type = MakeMediaMIMEType(test);
+ EXPECT_TRUE(type.isNothing())
+ << "MakeMediaMIMEType(\"" << test << "\").isNothing()";
+ }
+}
+
+TEST(MediaMIMETypes, MediaMIMEType)
+{
+ static const struct {
+ const char* mTypeString;
+ const char* mAsString;
+ bool mApplication;
+ bool mAudio;
+ bool mVideo;
+ bool mEqualsLiteralVideoSlashMp4; // tests `== "video/mp4"`
+ } tests[] = {
+ // in AsString app audio video ==v/mp4
+ {"video/mp4", "video/mp4", false, false, true, true},
+ {"video/mp4; codecs=0", "video/mp4", false, false, true, true},
+ {"VIDEO/MP4", "video/mp4", false, false, true, true},
+ {"audio/mp4", "audio/mp4", false, true, false, false},
+ {"application/x", "application/x", true, false, false, false}};
+
+ for (const auto& test : tests) {
+ Maybe<MediaMIMEType> type = MakeMediaMIMEType(test.mTypeString);
+ EXPECT_TRUE(type.isSome())
+ << "MakeMediaMIMEType(\"" << test.mTypeString << "\").isSome()";
+ EXPECT_TRUE(type->AsString().EqualsASCII(test.mAsString))
+ << "MakeMediaMIMEType(\"" << test.mTypeString << "\")->AsString() == \""
+ << test.mAsString << "\"";
+ EXPECT_EQ(test.mApplication, type->HasApplicationMajorType())
+ << "MakeMediaMIMEType(\"" << test.mTypeString
+ << "\")->HasApplicationMajorType() == "
+ << (test.mApplication ? "true" : "false");
+ EXPECT_EQ(test.mAudio, type->HasAudioMajorType())
+ << "MakeMediaMIMEType(\"" << test.mTypeString
+ << "\")->HasAudioMajorType() == " << (test.mAudio ? "true" : "false");
+ EXPECT_EQ(test.mVideo, type->HasVideoMajorType())
+ << "MakeMediaMIMEType(\"" << test.mTypeString
+ << "\")->HasVideoMajorType() == " << (test.mVideo ? "true" : "false");
+ EXPECT_EQ(test.mEqualsLiteralVideoSlashMp4,
+ *type == MEDIAMIMETYPE("video/mp4"))
+ << "*MakeMediaMIMEType(\"" << test.mTypeString
+ << "\") == MEDIAMIMETYPE(\"video/mp4\")";
+ }
+}
+
+TEST(MediaMIMETypes, MediaCodecs)
+{
+ MediaCodecs empty("");
+ EXPECT_TRUE(empty.IsEmpty());
+ EXPECT_TRUE(empty.AsString().EqualsLiteral(""));
+ EXPECT_FALSE(empty.Contains(u""_ns));
+ EXPECT_FALSE(empty.Contains(u"c1"_ns));
+ EXPECT_FALSE(empty.ContainsPrefix(u""_ns));
+ EXPECT_FALSE(empty.ContainsPrefix(u"c1"_ns));
+ int iterations = 0;
+ for (const auto& codec : empty.Range()) {
+ ++iterations;
+ Unused << codec;
+ }
+ EXPECT_EQ(0, iterations);
+
+ MediaCodecs space(" ");
+ EXPECT_FALSE(space.IsEmpty());
+ EXPECT_TRUE(space.AsString().EqualsLiteral(" "));
+ EXPECT_TRUE(space.Contains(u""_ns));
+ EXPECT_FALSE(space.Contains(u"c1"_ns));
+ EXPECT_TRUE(space.ContainsPrefix(u""_ns));
+ EXPECT_FALSE(space.ContainsPrefix(u"c"_ns));
+ EXPECT_FALSE(space.ContainsPrefix(u"c1"_ns));
+ iterations = 0;
+ for (const auto& codec : space.Range()) {
+ ++iterations;
+ EXPECT_TRUE(codec.IsEmpty());
+ }
+ EXPECT_EQ(1, iterations);
+
+ MediaCodecs one(" c1 ");
+ EXPECT_FALSE(one.IsEmpty());
+ EXPECT_TRUE(one.AsString().EqualsLiteral(" c1 "));
+ EXPECT_FALSE(one.Contains(u""_ns));
+ EXPECT_TRUE(one.Contains(u"c1"_ns));
+ EXPECT_TRUE(one.ContainsPrefix(u""_ns));
+ EXPECT_TRUE(one.ContainsPrefix(u"c"_ns));
+ EXPECT_TRUE(one.ContainsPrefix(u"c1"_ns));
+ EXPECT_FALSE(one.ContainsPrefix(u"c1x"_ns));
+ EXPECT_FALSE(one.ContainsPrefix(u"c1 "_ns));
+ iterations = 0;
+ for (const auto& codec : one.Range()) {
+ ++iterations;
+ EXPECT_TRUE(codec.EqualsLiteral("c1"));
+ }
+ EXPECT_EQ(1, iterations);
+
+ MediaCodecs two(" c1 , c2 ");
+ EXPECT_FALSE(two.IsEmpty());
+ EXPECT_TRUE(two.AsString().EqualsLiteral(" c1 , c2 "));
+ EXPECT_FALSE(two.Contains(u""_ns));
+ EXPECT_TRUE(two.Contains(u"c1"_ns));
+ EXPECT_TRUE(two.Contains(u"c2"_ns));
+ EXPECT_TRUE(two.ContainsPrefix(u""_ns));
+ EXPECT_TRUE(two.ContainsPrefix(u"c"_ns));
+ EXPECT_FALSE(two.ContainsPrefix(u"1"_ns));
+ EXPECT_TRUE(two.ContainsPrefix(u"c1"_ns));
+ EXPECT_TRUE(two.ContainsPrefix(u"c2"_ns));
+ EXPECT_FALSE(two.ContainsPrefix(u"c1x"_ns));
+ EXPECT_FALSE(two.ContainsPrefix(u"c2x"_ns));
+ iterations = 0;
+ for (const auto& codec : two.Range()) {
+ ++iterations;
+ char buffer[] = "c0";
+ buffer[1] += iterations;
+ EXPECT_TRUE(codec.EqualsASCII(buffer));
+ }
+ EXPECT_EQ(2, iterations);
+
+ EXPECT_TRUE(two.ContainsAll(two));
+ EXPECT_TRUE(two.ContainsAll(one));
+ EXPECT_FALSE(one.ContainsAll(two));
+
+ // Check wide char case where both octets/bytes are relevant. Note we don't
+ // use `EqualsLiteral` here because at the time of writing it will place the
+ // literal into a narrow string which then doesn't compare correctly with
+ // the wide representation from MediaCodecs.
+ MediaCodecs euroSign(" € "); // U+20AC
+ EXPECT_FALSE(euroSign.IsEmpty());
+ EXPECT_TRUE(euroSign.AsString().Equals(u" € "_ns));
+ EXPECT_FALSE(euroSign.Contains(u""_ns));
+ EXPECT_TRUE(euroSign.Contains(u"€"_ns));
+ EXPECT_FALSE(euroSign.Contains(u"€€"_ns));
+ EXPECT_TRUE(euroSign.ContainsPrefix(u""_ns));
+ EXPECT_TRUE(euroSign.ContainsPrefix(u"€"_ns));
+ EXPECT_FALSE(euroSign.ContainsPrefix(
+ u"â‚­"_ns)); // U+20AD -- ensure second octet is compared
+ EXPECT_FALSE(euroSign.ContainsPrefix(
+ u"↬"_ns)); // U+21AC -- ensure first octet is compared
+ EXPECT_FALSE(euroSign.ContainsPrefix(u"€ "_ns));
+ iterations = 0;
+ for (const auto& codec : euroSign.Range()) {
+ ++iterations;
+ EXPECT_TRUE(codec.Equals(u"€"_ns));
+ }
+ EXPECT_EQ(1, iterations);
+}
+
+TEST(MediaMIMETypes, MakeMediaExtendedMIMEType_bad)
+{
+ static const char* tests[] = {"", " ", "/", "audio",
+ "audio/", "mp4", "/mp4", "a/b"};
+
+ for (const auto& test : tests) {
+ Maybe<MediaExtendedMIMEType> type = MakeMediaExtendedMIMEType(test);
+ EXPECT_TRUE(type.isNothing())
+ << "MakeMediaExtendedMIMEType(\"" << test << "\").isNothing()";
+ }
+}
+
+TEST(MediaMIMETypes, MediaExtendedMIMEType)
+{
+ // Some generic tests first.
+ static const struct {
+ const char* mTypeString;
+ const char* mTypeAsString;
+ bool mApplication;
+ bool mAudio;
+ bool mVideo;
+ bool mEqualsLiteralVideoSlashMp4; // tests `== "video/mp4"`
+ bool mHaveCodecs;
+ } tests[] = {
+ // in Type().AsString app audio video ==v/mp4
+ // codecs
+ {"video/mp4", "video/mp4", false, false, true, true, false},
+ {"video/mp4; codecs=0", "video/mp4", false, false, true, true, true},
+ {"VIDEO/MP4", "video/mp4", false, false, true, true, false},
+ {"audio/mp4", "audio/mp4", false, true, false, false, false},
+ {"video/webm", "video/webm", false, false, true, false, false},
+ {"audio/webm", "audio/webm", false, true, false, false, false},
+ {"application/x", "application/x", true, false, false, false, false}};
+
+ for (const auto& test : tests) {
+ Maybe<MediaExtendedMIMEType> type =
+ MakeMediaExtendedMIMEType(test.mTypeString);
+ EXPECT_TRUE(type.isSome())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString << "\").isSome()";
+ EXPECT_TRUE(type->OriginalString().EqualsASCII(test.mTypeString))
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->AsString() == \"" << test.mTypeAsString << "\"";
+ EXPECT_TRUE(type->Type().AsString().EqualsASCII(test.mTypeAsString))
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->AsString() == \"" << test.mTypeAsString << "\"";
+ EXPECT_EQ(test.mApplication, type->Type().HasApplicationMajorType())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->Type().HasApplicationMajorType() == "
+ << (test.mApplication ? "true" : "false");
+ EXPECT_EQ(test.mAudio, type->Type().HasAudioMajorType())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->Type().HasAudioMajorType() == "
+ << (test.mAudio ? "true" : "false");
+ EXPECT_EQ(test.mVideo, type->Type().HasVideoMajorType())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->Type().HasVideoMajorType() == "
+ << (test.mVideo ? "true" : "false");
+ EXPECT_EQ(test.mEqualsLiteralVideoSlashMp4,
+ type->Type() == MEDIAMIMETYPE("video/mp4"))
+ << "*MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->Type() == MEDIAMIMETYPE(\"video/mp4\")";
+ EXPECT_EQ(test.mHaveCodecs, type->HaveCodecs())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->HaveCodecs() == " << (test.mHaveCodecs ? "true" : "false");
+ EXPECT_NE(test.mHaveCodecs, type->Codecs().IsEmpty())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->Codecs.IsEmpty() != " << (test.mHaveCodecs ? "true" : "false");
+ EXPECT_FALSE(type->GetWidth()) << "MakeMediaExtendedMIMEType(\""
+ << test.mTypeString << "\")->GetWidth()";
+ EXPECT_FALSE(type->GetHeight()) << "MakeMediaExtendedMIMEType(\""
+ << test.mTypeString << "\")->GetHeight()";
+ EXPECT_FALSE(type->GetFramerate())
+ << "MakeMediaExtendedMIMEType(\"" << test.mTypeString
+ << "\")->GetFramerate()";
+ EXPECT_FALSE(type->GetBitrate()) << "MakeMediaExtendedMIMEType(\""
+ << test.mTypeString << "\")->GetBitrate()";
+ }
+
+ // Test all extra parameters.
+ Maybe<MediaExtendedMIMEType> type = MakeMediaExtendedMIMEType(
+ "video/mp4; codecs=\"a,b\"; width=1024; Height=768; FrameRate=60; "
+ "BITRATE=100000");
+ EXPECT_TRUE(type->HaveCodecs());
+ EXPECT_FALSE(type->Codecs().IsEmpty());
+ EXPECT_TRUE(type->Codecs().AsString().EqualsASCII("a,b"));
+ EXPECT_TRUE(type->Codecs() == "a,b");
+ EXPECT_TRUE(type->Codecs().Contains(u"a"_ns));
+ EXPECT_TRUE(type->Codecs().Contains(u"b"_ns));
+ EXPECT_TRUE(type->Codecs().ContainsPrefix(u"a"_ns));
+ EXPECT_TRUE(type->Codecs().ContainsPrefix(u"b"_ns));
+ EXPECT_FALSE(type->Codecs().ContainsPrefix(u"ab"_ns));
+ EXPECT_FALSE(type->Codecs().ContainsPrefix(u"ba"_ns));
+ EXPECT_FALSE(type->Codecs().ContainsPrefix(u"a,b"_ns));
+ EXPECT_TRUE(!!type->GetWidth());
+ EXPECT_EQ(1024, *type->GetWidth());
+ EXPECT_TRUE(!!type->GetHeight());
+ EXPECT_EQ(768, *type->GetHeight());
+ EXPECT_TRUE(!!type->GetFramerate());
+ EXPECT_EQ(60, *type->GetFramerate());
+ EXPECT_TRUE(!!type->GetBitrate());
+ EXPECT_EQ(100000, *type->GetBitrate());
+}
diff --git a/dom/media/gtest/TestMediaQueue.cpp b/dom/media/gtest/TestMediaQueue.cpp
new file mode 100644
index 0000000000..5b049dc7fe
--- /dev/null
+++ b/dom/media/gtest/TestMediaQueue.cpp
@@ -0,0 +1,288 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <gtest/gtest.h>
+
+#include "MediaData.h"
+#include "MediaQueue.h"
+
+using namespace mozilla;
+using mozilla::media::TimeUnit;
+
+MediaData* CreateDataRawPtr(
+ int64_t aStartTime, int64_t aEndTime,
+ MediaData::Type aType = MediaData::Type::NULL_DATA) {
+ const TimeUnit startTime = TimeUnit::FromMicroseconds(aStartTime);
+ const TimeUnit endTime = TimeUnit::FromMicroseconds(aEndTime);
+ MediaData* data;
+ if (aType == MediaData::Type::AUDIO_DATA) {
+ AlignedAudioBuffer samples;
+ data = new AudioData(0, startTime, std::move(samples), 2, 44100);
+ data->mDuration = endTime - startTime;
+ } else if (aType == MediaData::Type::VIDEO_DATA) {
+ data = new VideoData(0, startTime, endTime - startTime, true, startTime,
+ gfx::IntSize(), 0);
+ } else {
+ data = new NullData(0, startTime, endTime - startTime);
+ }
+ return data;
+}
+
+already_AddRefed<MediaData> CreateData(int64_t aStartTime, int64_t aEndTime) {
+ RefPtr<MediaData> data = CreateDataRawPtr(aStartTime, aEndTime);
+ return data.forget();
+}
+
+// Used to avoid the compile error `comparison of integers of different signs`
+// when comparing 'const unsigned long' and 'const int'.
+#define EXPECT_EQUAL_SIZE_T(lhs, rhs) EXPECT_EQ(size_t(lhs), size_t(rhs))
+
+TEST(MediaQueue, BasicPopOperations)
+{
+ MediaQueue<MediaData> queue;
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 0);
+
+ // Test only one element
+ const RefPtr<MediaData> data = CreateDataRawPtr(0, 10);
+ queue.Push(data.get());
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+
+ RefPtr<MediaData> rv = queue.PopFront();
+ EXPECT_EQ(rv, data);
+
+ queue.Push(data.get());
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+
+ rv = queue.PopBack();
+ EXPECT_EQ(rv, data);
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 0);
+
+ // Test multiple elements
+ const RefPtr<MediaData> data1 = CreateDataRawPtr(0, 10);
+ const RefPtr<MediaData> data2 = CreateDataRawPtr(11, 20);
+ const RefPtr<MediaData> data3 = CreateDataRawPtr(21, 30);
+ queue.Push(data1.get());
+ queue.Push(data2.get());
+ queue.Push(data3.get());
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 3);
+
+ rv = queue.PopFront();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 2);
+ EXPECT_EQ(rv, data1);
+
+ rv = queue.PopBack();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+ EXPECT_EQ(rv, data3);
+
+ rv = queue.PopBack();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 0);
+ EXPECT_EQ(rv, data2);
+}
+
+TEST(MediaQueue, BasicPeekOperations)
+{
+ MediaQueue<MediaData> queue;
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 0);
+
+ // Test only one element
+ const RefPtr<MediaData> data1 = CreateDataRawPtr(0, 10);
+ queue.Push(data1.get());
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+
+ RefPtr<MediaData> rv = queue.PeekFront();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+ EXPECT_EQ(rv, data1);
+
+ rv = queue.PeekBack();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+ EXPECT_EQ(rv, data1);
+
+ // Test multiple elements
+ const RefPtr<MediaData> data2 = CreateDataRawPtr(11, 20);
+ const RefPtr<MediaData> data3 = CreateDataRawPtr(21, 30);
+ queue.Push(data2.get());
+ queue.Push(data3.get());
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 3);
+
+ rv = queue.PeekFront();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 3);
+ EXPECT_EQ(rv, data1);
+
+ rv = queue.PeekBack();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 3);
+ EXPECT_EQ(rv, data3);
+}
+
+TEST(MediaQueue, FinishQueue)
+{
+ MediaQueue<MediaData> queue;
+ EXPECT_FALSE(queue.IsFinished());
+
+ queue.Finish();
+ EXPECT_TRUE(queue.IsFinished());
+}
+
+TEST(MediaQueue, EndOfStream)
+{
+ MediaQueue<MediaData> queue;
+ EXPECT_FALSE(queue.IsFinished());
+
+ queue.Push(CreateData(0, 10));
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+
+ queue.Finish();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 1);
+ EXPECT_TRUE(queue.IsFinished());
+ EXPECT_FALSE(queue.AtEndOfStream());
+
+ RefPtr<MediaData> rv = queue.PopFront();
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 0);
+ EXPECT_TRUE(queue.IsFinished());
+ EXPECT_TRUE(queue.AtEndOfStream());
+}
+
+TEST(MediaQueue, QueueDuration)
+{
+ MediaQueue<MediaData> queue;
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 0);
+
+ queue.Push(CreateData(0, 10));
+ queue.Push(CreateData(11, 20));
+ queue.Push(CreateData(21, 30));
+ EXPECT_EQUAL_SIZE_T(queue.GetSize(), 3);
+
+ const int64_t rv = queue.Duration();
+ EXPECT_EQ(rv, 30);
+}
+
+TEST(MediaQueue, CallGetElementAfterOnSingleElement)
+{
+ MediaQueue<MediaData> queue;
+ queue.Push(CreateData(0, 10));
+
+ // Target time is earlier than data's end time
+ TimeUnit targetTime = TimeUnit::FromMicroseconds(5);
+ nsTArray<RefPtr<MediaData>> foundResult;
+ queue.GetElementsAfter(targetTime, &foundResult);
+ EXPECT_EQUAL_SIZE_T(foundResult.Length(), 1);
+ EXPECT_TRUE(foundResult[0]->GetEndTime() > targetTime);
+
+ // Target time is later than data's end time
+ targetTime = TimeUnit::FromMicroseconds(15);
+ nsTArray<RefPtr<MediaData>> emptyResult;
+ queue.GetElementsAfter(targetTime, &emptyResult);
+ EXPECT_TRUE(emptyResult.IsEmpty());
+}
+
+TEST(MediaQueue, CallGetElementAfterOnMultipleElements)
+{
+ MediaQueue<MediaData> queue;
+ queue.Push(CreateData(0, 10));
+ queue.Push(CreateData(11, 20));
+ queue.Push(CreateData(21, 30));
+ queue.Push(CreateData(31, 40));
+ queue.Push(CreateData(41, 50));
+
+ // Should find [21,30], [31,40] and [41,50]
+ TimeUnit targetTime = TimeUnit::FromMicroseconds(25);
+ nsTArray<RefPtr<MediaData>> foundResult;
+ queue.GetElementsAfter(targetTime, &foundResult);
+ EXPECT_EQUAL_SIZE_T(foundResult.Length(), 3);
+ for (const auto& data : foundResult) {
+ EXPECT_TRUE(data->GetEndTime() > targetTime);
+ }
+
+ // Should find [31,40] and [41,50]
+ targetTime = TimeUnit::FromMicroseconds(30);
+ foundResult.Clear();
+ queue.GetElementsAfter(targetTime, &foundResult);
+ EXPECT_EQUAL_SIZE_T(foundResult.Length(), 2);
+ for (const auto& data : foundResult) {
+ EXPECT_TRUE(data->GetEndTime() > targetTime);
+ }
+
+ // Should find no data.
+ targetTime = TimeUnit::FromMicroseconds(60);
+ nsTArray<RefPtr<MediaData>> emptyResult;
+ queue.GetElementsAfter(targetTime, &emptyResult);
+ EXPECT_TRUE(emptyResult.IsEmpty());
+}
+
+TEST(MediaQueue, TimestampAdjustmentForSupportDataType)
+{
+ const size_t kOffSet = 30;
+ {
+ MediaQueue<AudioData> audioQueue;
+ audioQueue.Push(
+ CreateDataRawPtr(0, 10, MediaData::Type::AUDIO_DATA)->As<AudioData>());
+ audioQueue.SetOffset(TimeUnit::FromMicroseconds(kOffSet));
+ audioQueue.Push(
+ CreateDataRawPtr(0, 10, MediaData::Type::AUDIO_DATA)->As<AudioData>());
+
+ // Data stored before setting the offset shouldn't be changed
+ RefPtr<AudioData> data = audioQueue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10));
+
+ // Data stored after setting the offset should be changed
+ data = audioQueue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0 + kOffSet));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10 + kOffSet));
+
+ // Reset will clean the offset.
+ audioQueue.Reset();
+ audioQueue.Push(
+ CreateDataRawPtr(0, 10, MediaData::Type::AUDIO_DATA)->As<AudioData>());
+ data = audioQueue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10));
+ }
+
+ // Check another supported type
+ MediaQueue<VideoData> videoQueue;
+ videoQueue.Push(
+ CreateDataRawPtr(0, 10, MediaData::Type::VIDEO_DATA)->As<VideoData>());
+ videoQueue.SetOffset(TimeUnit::FromMicroseconds(kOffSet));
+ videoQueue.Push(
+ CreateDataRawPtr(0, 10, MediaData::Type::VIDEO_DATA)->As<VideoData>());
+
+ // Data stored before setting the offset shouldn't be changed
+ RefPtr<VideoData> data = videoQueue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10));
+
+ // Data stored after setting the offset should be changed
+ data = videoQueue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0 + kOffSet));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10 + kOffSet));
+
+ // Reset will clean the offset.
+ videoQueue.Reset();
+ videoQueue.Push(
+ CreateDataRawPtr(0, 10, MediaData::Type::VIDEO_DATA)->As<VideoData>());
+ data = videoQueue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10));
+}
+
+TEST(MediaQueue, TimestampAdjustmentForNotSupportDataType)
+{
+ const size_t kOffSet = 30;
+
+ MediaQueue<MediaData> queue;
+ queue.Push(CreateDataRawPtr(0, 10));
+ queue.SetOffset(TimeUnit::FromMicroseconds(kOffSet));
+ queue.Push(CreateDataRawPtr(0, 10));
+
+ // Offset won't affect any data at all.
+ RefPtr<MediaData> data = queue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10));
+
+ data = queue.PopFront();
+ EXPECT_EQ(data->mTime, TimeUnit::FromMicroseconds(0));
+ EXPECT_EQ(data->GetEndTime(), TimeUnit::FromMicroseconds(10));
+}
+
+#undef EXPECT_EQUAL_SIZE_T
diff --git a/dom/media/gtest/TestMediaSpan.cpp b/dom/media/gtest/TestMediaSpan.cpp
new file mode 100644
index 0000000000..e6edcb944b
--- /dev/null
+++ b/dom/media/gtest/TestMediaSpan.cpp
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <gtest/gtest.h>
+#include <stdint.h>
+
+#include "MediaSpan.h"
+
+#include "mozilla/ArrayUtils.h"
+
+using namespace mozilla;
+
+already_AddRefed<MediaByteBuffer> makeBuffer(uint8_t aStart, uint8_t aEnd) {
+ RefPtr<MediaByteBuffer> buffer(new MediaByteBuffer);
+ for (uint8_t i = aStart; i <= aEnd; i++) {
+ buffer->AppendElement(i);
+ }
+ return buffer.forget();
+}
+
+bool IsRangeAt(const MediaSpan& aSpan, uint8_t aStart, uint8_t aEnd,
+ size_t aAt) {
+ size_t length = size_t(aEnd) - size_t(aStart) + 1;
+ if (aAt + length > aSpan.Length()) {
+ return false;
+ }
+ for (size_t i = 0; i < length; i++) {
+ if (aSpan[aAt + i] != uint8_t(aStart + i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsRange(const MediaSpan& aSpan, uint8_t aStart, uint8_t aEnd) {
+ return IsRangeAt(aSpan, aStart, aEnd, 0);
+}
+
+TEST(MediaSpan, AppendToFromSpan)
+{
+ RefPtr<MediaByteBuffer> buffer1 = makeBuffer(0, 9);
+ MediaSpan span1 = MediaSpan(buffer1);
+ EXPECT_EQ(span1.Length(), size_t(10));
+ EXPECT_TRUE(IsRange(span1, 0, 9));
+
+ MediaSpan span2 = span1.From(5);
+
+ EXPECT_EQ(span2.Length(), size_t(5));
+ EXPECT_TRUE(IsRange(span2, 5, 9));
+ RefPtr<MediaByteBuffer> buffer2 = makeBuffer(10, 19);
+ EXPECT_EQ(buffer2->Length(), size_t(10));
+ span2.Append(buffer2);
+
+ // Span2 should be: [5...19]
+ EXPECT_EQ(span2.Length(), size_t(15));
+ EXPECT_TRUE(IsRange(span2, 5, 19));
+
+ // Span1 should not be modified by the append to span2.
+ EXPECT_EQ(span1.Length(), size_t(10));
+ EXPECT_TRUE(IsRange(span1, 0, 9));
+}
+
+TEST(MediaSpan, AppendToToSpan)
+{
+ RefPtr<MediaByteBuffer> buffer1 = makeBuffer(0, 9);
+ MediaSpan span1 = MediaSpan(buffer1);
+ EXPECT_EQ(span1.Length(), size_t(10));
+ EXPECT_TRUE(IsRange(span1, 0, 9));
+
+ MediaSpan span2 = span1.To(5);
+
+ // Span2 should be [0...4]
+ EXPECT_EQ(span2.Length(), size_t(5));
+ EXPECT_TRUE(IsRange(span2, 0, 4));
+ RefPtr<MediaByteBuffer> buffer2 = makeBuffer(10, 19);
+ EXPECT_EQ(buffer2->Length(), size_t(10));
+ span2.Append(buffer2);
+
+ // Span2 should be: [0...4][10...19]
+ EXPECT_EQ(span2.Length(), size_t(15));
+ EXPECT_TRUE(IsRangeAt(span2, 0, 4, 0));
+ EXPECT_TRUE(IsRangeAt(span2, 10, 19, 5));
+
+ // Span1 should not be modified by the append to span2.
+ EXPECT_EQ(span1.Length(), size_t(10));
+ EXPECT_TRUE(IsRange(span1, 0, 9));
+}
+
+TEST(MediaSpan, RemoveFront)
+{
+ RefPtr<MediaByteBuffer> buffer1 = makeBuffer(0, 9);
+ MediaSpan span1 = MediaSpan(buffer1);
+ EXPECT_EQ(span1.Length(), size_t(10));
+ EXPECT_TRUE(IsRange(span1, 0, 9));
+
+ MediaSpan span2(span1);
+ EXPECT_EQ(span2.Length(), size_t(10));
+
+ span2.RemoveFront(5);
+
+ // Span2 should now be [5...9]
+ EXPECT_EQ(span2.Length(), size_t(5));
+ EXPECT_TRUE(IsRange(span2, 5, 9));
+
+ // Span1 should be unaffected.
+ EXPECT_EQ(span1.Length(), size_t(10));
+ EXPECT_TRUE(IsRange(span1, 0, 9));
+}
diff --git a/dom/media/gtest/TestMediaUtils.cpp b/dom/media/gtest/TestMediaUtils.cpp
new file mode 100644
index 0000000000..33a32b7ea0
--- /dev/null
+++ b/dom/media/gtest/TestMediaUtils.cpp
@@ -0,0 +1,240 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "MediaUtils.h"
+#include "mozilla/AppShutdown.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/gtest/MozHelpers.h"
+
+using namespace mozilla;
+using namespace mozilla::gtest;
+using namespace mozilla::media;
+
+// Spawning the death test child process aborts on Android.
+#if !defined(ANDROID)
+
+// Kept here for reference as it can be handy during development.
+# define DISABLE_CRASH_REPORTING \
+ gtest::DisableCrashReporter(); \
+ ZERO_GDB_SLEEP();
+
+void DoCreateTicketBeforeAppShutdownOnMain() {
+ auto reporter = ScopedTestResultReporter::Create(ExitMode::ExitOnDtor);
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownNetTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTeardown);
+
+ Monitor mon("TestMonitor");
+ bool pastAppShutdown = false;
+ bool backgroundTaskFinished = false;
+
+ UniquePtr ticket = ShutdownBlockingTicket::Create(
+ u"Test"_ns, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__);
+
+ MOZ_ALWAYS_SUCCEEDS(
+ NS_DispatchBackgroundTask(NS_NewRunnableFunction(__func__, [&] {
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp end = now + TimeDuration::FromSeconds(0.2);
+ MonitorAutoLock lock(mon);
+ while (!pastAppShutdown && (end - now) > TimeDuration()) {
+ lock.Wait(end - now);
+ now = TimeStamp::Now();
+ }
+ EXPECT_FALSE(pastAppShutdown);
+ ticket = nullptr;
+ while (!pastAppShutdown) {
+ lock.Wait();
+ }
+ EXPECT_TRUE(pastAppShutdown);
+ backgroundTaskFinished = true;
+ lock.Notify();
+ })));
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdown);
+
+ {
+ MonitorAutoLock lock(mon);
+ pastAppShutdown = true;
+ lock.Notify();
+ while (!backgroundTaskFinished) {
+ lock.Wait();
+ }
+ }
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownQM);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTelemetry);
+
+ NS_ShutdownXPCOM(nullptr);
+}
+
+void DoCreateTicketAfterAppShutdownOnMain() {
+ auto reporter = ScopedTestResultReporter::Create(ExitMode::ExitOnDtor);
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownNetTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdown);
+
+ auto ticket = ShutdownBlockingTicket::Create(
+ u"Test"_ns, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__);
+ EXPECT_FALSE(ticket);
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownQM);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTelemetry);
+
+ NS_ShutdownXPCOM(nullptr);
+}
+
+void DoCreateTicketBeforeAppShutdownOffMain() {
+ auto reporter = ScopedTestResultReporter::Create(ExitMode::ExitOnDtor);
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownNetTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTeardown);
+
+ Monitor mon("TestMonitor");
+ bool pastAppShutdown = false;
+ bool ticketCreated = false;
+ bool backgroundTaskFinished = false;
+
+ MOZ_ALWAYS_SUCCEEDS(
+ NS_DispatchBackgroundTask(NS_NewRunnableFunction(__func__, [&] {
+ MonitorAutoLock lock(mon);
+ auto ticket = ShutdownBlockingTicket::Create(
+ u"Test"_ns, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__);
+ EXPECT_TRUE(ticket);
+ ticketCreated = true;
+ lock.Notify();
+
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp end = now + TimeDuration::FromSeconds(0.2);
+ while (!pastAppShutdown && (end - now) > TimeDuration()) {
+ lock.Wait(end - now);
+ now = TimeStamp::Now();
+ }
+ EXPECT_FALSE(pastAppShutdown);
+ ticket = nullptr;
+ while (!pastAppShutdown) {
+ lock.Wait();
+ }
+ EXPECT_TRUE(pastAppShutdown);
+ backgroundTaskFinished = true;
+ lock.Notify();
+ })));
+
+ {
+ MonitorAutoLock lock(mon);
+ while (!ticketCreated) {
+ lock.Wait();
+ }
+ }
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdown);
+
+ MonitorAutoLock lock(mon);
+ pastAppShutdown = true;
+ lock.Notify();
+ while (!backgroundTaskFinished) {
+ lock.Wait();
+ }
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownQM);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTelemetry);
+
+ NS_ShutdownXPCOM(nullptr);
+}
+
+void DoCreateTicketAfterAppShutdownOffMain() {
+ auto reporter = ScopedTestResultReporter::Create(ExitMode::ExitOnDtor);
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownNetTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdown);
+
+ UniquePtr<ShutdownBlockingTicket> ticket;
+ MOZ_ALWAYS_SUCCEEDS(NS_DispatchBackgroundTask(
+ MakeAndAddRef<SyncRunnable>(NS_NewRunnableFunction(__func__, [&] {
+ ticket = ShutdownBlockingTicket::Create(
+ u"Test"_ns, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__);
+ }))));
+
+ EXPECT_FALSE(ticket);
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownQM);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTelemetry);
+
+ NS_ShutdownXPCOM(nullptr);
+}
+
+void DoTwoTicketsWithSameNameBothBlockShutdown() {
+ auto reporter = ScopedTestResultReporter::Create(ExitMode::ExitOnDtor);
+
+ const auto name = u"Test"_ns;
+ auto ticket1 = ShutdownBlockingTicket::Create(
+ name, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__);
+ EXPECT_TRUE(ticket1);
+ auto ticket2 = ShutdownBlockingTicket::Create(
+ name, NS_LITERAL_STRING_FROM_CSTRING(__FILE__), __LINE__);
+ EXPECT_TRUE(ticket2);
+
+ ticket1 = nullptr;
+
+ // A copyable holder for the std::function in NS_NewTimerWithCallback.
+ auto ticket2Holder =
+ MakeRefPtr<Refcountable<UniquePtr<ShutdownBlockingTicket>>>(
+ ticket2.release());
+
+ const auto waitBeforeDestroyingTicket = TimeDuration::FromMilliseconds(100);
+ TimeStamp before = TimeStamp::Now();
+ auto timerResult = NS_NewTimerWithCallback(
+ [t = std::move(ticket2Holder)](nsITimer* aTimer) {},
+ waitBeforeDestroyingTicket, nsITimer::TYPE_ONE_SHOT, __func__);
+ ASSERT_TRUE(timerResult.isOk());
+
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownNetTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTeardown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdown);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownQM);
+ AppShutdown::AdvanceShutdownPhase(ShutdownPhase::AppShutdownTelemetry);
+
+ NS_ShutdownXPCOM(nullptr);
+ TimeStamp after = TimeStamp::Now();
+ EXPECT_GT((after - before).ToMilliseconds(),
+ waitBeforeDestroyingTicket.ToMilliseconds());
+}
+
+TEST(ShutdownBlockingTicketDeathTest, CreateTicketBeforeAppShutdownOnMain)
+{
+ EXPECT_EXIT(DoCreateTicketBeforeAppShutdownOnMain(),
+ testing::ExitedWithCode(0), "");
+}
+
+TEST(ShutdownBlockingTicketDeathTest, CreateTicketAfterAppShutdownOnMain)
+{
+ EXPECT_EXIT(DoCreateTicketAfterAppShutdownOnMain(),
+ testing::ExitedWithCode(0), "");
+}
+
+TEST(ShutdownBlockingTicketDeathTest, CreateTicketBeforeAppShutdownOffMain)
+{
+ EXPECT_EXIT(DoCreateTicketBeforeAppShutdownOffMain(),
+ testing::ExitedWithCode(0), "");
+}
+
+TEST(ShutdownBlockingTicketDeathTest, CreateTicketAfterAppShutdownOffMain)
+{
+ EXPECT_EXIT(DoCreateTicketAfterAppShutdownOffMain(),
+ testing::ExitedWithCode(0), "");
+}
+
+TEST(ShutdownBlockingTicketDeathTest, TwoTicketsWithSameNameBothBlockShutdown)
+{
+ EXPECT_EXIT(DoTwoTicketsWithSameNameBothBlockShutdown(),
+ testing::ExitedWithCode(0), "");
+}
+
+# undef DISABLE_CRASH_REPORTING
+
+#endif
diff --git a/dom/media/gtest/TestMuxer.cpp b/dom/media/gtest/TestMuxer.cpp
new file mode 100644
index 0000000000..1c6c128eef
--- /dev/null
+++ b/dom/media/gtest/TestMuxer.cpp
@@ -0,0 +1,212 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <vector>
+
+#include "ContainerWriter.h"
+#include "EncodedFrame.h"
+#include "gtest/gtest.h"
+#include "gmock/gmock.h"
+#include "Muxer.h"
+#include "OpusTrackEncoder.h"
+#include "WebMWriter.h"
+
+using namespace mozilla;
+using media::TimeUnit;
+using testing::_;
+using testing::ElementsAre;
+using testing::Return;
+using testing::StaticAssertTypeEq;
+
+static RefPtr<TrackMetadataBase> CreateOpusMetadata(int32_t aChannels,
+ float aSamplingFrequency,
+ size_t aIdHeaderSize,
+ size_t aCommentHeaderSize) {
+ auto opusMetadata = MakeRefPtr<OpusMetadata>();
+ opusMetadata->mChannels = aChannels;
+ opusMetadata->mSamplingFrequency = aSamplingFrequency;
+ opusMetadata->mIdHeader.SetLength(aIdHeaderSize);
+ for (size_t i = 0; i < opusMetadata->mIdHeader.Length(); i++) {
+ opusMetadata->mIdHeader[i] = 0;
+ }
+ opusMetadata->mCommentHeader.SetLength(aCommentHeaderSize);
+ for (size_t i = 0; i < opusMetadata->mCommentHeader.Length(); i++) {
+ opusMetadata->mCommentHeader[i] = 0;
+ }
+ return opusMetadata;
+}
+
+static RefPtr<TrackMetadataBase> CreateVP8Metadata(int32_t aWidth,
+ int32_t aHeight) {
+ auto vp8Metadata = MakeRefPtr<VP8Metadata>();
+ vp8Metadata->mWidth = aWidth;
+ vp8Metadata->mDisplayWidth = aWidth;
+ vp8Metadata->mHeight = aHeight;
+ vp8Metadata->mDisplayHeight = aHeight;
+ return vp8Metadata;
+}
+
+static RefPtr<EncodedFrame> CreateFrame(EncodedFrame::FrameType aType,
+ const TimeUnit& aTime,
+ const TimeUnit& aDuration,
+ size_t aDataSize) {
+ auto data = MakeRefPtr<EncodedFrame::FrameData>();
+ data->SetLength(aDataSize);
+ if (aType == EncodedFrame::OPUS_AUDIO_FRAME) {
+ // Opus duration is in samples, so figure out how many samples will put us
+ // closest to aDurationUs without going over.
+ return MakeRefPtr<EncodedFrame>(aTime,
+ TimeUnitToFrames(aDuration, 48000).value(),
+ 48000, aType, std::move(data));
+ }
+ return MakeRefPtr<EncodedFrame>(
+ aTime, TimeUnitToFrames(aDuration, USECS_PER_S).value(), USECS_PER_S,
+ aType, std::move(data));
+}
+
+class MockContainerWriter : public ContainerWriter {
+ public:
+ MOCK_METHOD2(WriteEncodedTrack,
+ nsresult(const nsTArray<RefPtr<EncodedFrame>>&, uint32_t));
+ MOCK_METHOD1(SetMetadata,
+ nsresult(const nsTArray<RefPtr<TrackMetadataBase>>&));
+ MOCK_METHOD0(IsWritingComplete, bool());
+ MOCK_METHOD2(GetContainerData,
+ nsresult(nsTArray<nsTArray<uint8_t>>*, uint32_t));
+};
+
+TEST(MuxerTest, AudioOnly)
+{
+ MediaQueue<EncodedFrame> audioQueue;
+ MediaQueue<EncodedFrame> videoQueue;
+ videoQueue.Finish();
+ MockContainerWriter* writer = new MockContainerWriter();
+ Muxer muxer(WrapUnique<ContainerWriter>(writer), audioQueue, videoQueue);
+
+ // Prepare data
+
+ auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16);
+ auto audioFrame =
+ CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, TimeUnit::FromSeconds(0),
+ TimeUnit::FromSeconds(0.2), 4096);
+
+ // Expectations
+
+ EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta)))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(audioFrame),
+ ContainerWriter::END_OF_STREAM))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, IsWritingComplete()).Times(0);
+
+ // Test
+
+ EXPECT_EQ(muxer.SetMetadata(nsTArray<RefPtr<TrackMetadataBase>>({opusMeta})),
+ NS_OK);
+ audioQueue.Push(audioFrame);
+ audioQueue.Finish();
+ nsTArray<nsTArray<uint8_t>> buffers;
+ EXPECT_EQ(muxer.GetData(&buffers), NS_OK);
+}
+
+TEST(MuxerTest, AudioVideo)
+{
+ MediaQueue<EncodedFrame> audioQueue;
+ MediaQueue<EncodedFrame> videoQueue;
+ MockContainerWriter* writer = new MockContainerWriter();
+ Muxer muxer(WrapUnique<ContainerWriter>(writer), audioQueue, videoQueue);
+
+ // Prepare data
+
+ auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16);
+ auto vp8Meta = CreateVP8Metadata(640, 480);
+ auto audioFrame =
+ CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, TimeUnit::FromSeconds(0),
+ TimeUnit::FromSeconds(0.2), 4096);
+ auto videoFrame =
+ CreateFrame(EncodedFrame::VP8_I_FRAME, TimeUnit::FromSeconds(0),
+ TimeUnit::FromSeconds(0.05), 65536);
+
+ // Expectations
+
+ EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta, vp8Meta)))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(videoFrame, audioFrame),
+ ContainerWriter::END_OF_STREAM))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, IsWritingComplete()).Times(0);
+
+ // Test
+
+ EXPECT_EQ(muxer.SetMetadata(
+ nsTArray<RefPtr<TrackMetadataBase>>({opusMeta, vp8Meta})),
+ NS_OK);
+ audioQueue.Push(audioFrame);
+ audioQueue.Finish();
+ videoQueue.Push(videoFrame);
+ videoQueue.Finish();
+ nsTArray<nsTArray<uint8_t>> buffers;
+ EXPECT_EQ(muxer.GetData(&buffers), NS_OK);
+}
+
+TEST(MuxerTest, AudioVideoOutOfOrder)
+{
+ MediaQueue<EncodedFrame> audioQueue;
+ MediaQueue<EncodedFrame> videoQueue;
+ MockContainerWriter* writer = new MockContainerWriter();
+ Muxer muxer(WrapUnique<ContainerWriter>(writer), audioQueue, videoQueue);
+
+ // Prepare data
+
+ auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16);
+ auto vp8Meta = CreateVP8Metadata(640, 480);
+ auto a0 =
+ CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, TimeUnit::FromMicroseconds(0),
+ TimeUnit::FromMicroseconds(48), 4096);
+ auto v0 =
+ CreateFrame(EncodedFrame::VP8_I_FRAME, TimeUnit::FromMicroseconds(0),
+ TimeUnit::FromMicroseconds(50), 65536);
+ auto a48 = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME,
+ TimeUnit::FromMicroseconds(48),
+ TimeUnit::FromMicroseconds(48), 4096);
+ auto v50 =
+ CreateFrame(EncodedFrame::VP8_I_FRAME, TimeUnit::FromMicroseconds(50),
+ TimeUnit::FromMicroseconds(50), 65536);
+
+ // Expectations
+
+ EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta, vp8Meta)))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(v0, a0, a48, v50),
+ ContainerWriter::END_OF_STREAM))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED))
+ .WillOnce(Return(NS_OK));
+ EXPECT_CALL(*writer, IsWritingComplete()).Times(0);
+
+ // Test
+
+ EXPECT_EQ(muxer.SetMetadata(
+ nsTArray<RefPtr<TrackMetadataBase>>({opusMeta, vp8Meta})),
+ NS_OK);
+ audioQueue.Push(a0);
+ videoQueue.Push(v0);
+ videoQueue.Push(v50);
+ videoQueue.Finish();
+ audioQueue.Push(a48);
+ audioQueue.Finish();
+ nsTArray<nsTArray<uint8_t>> buffers;
+ EXPECT_EQ(muxer.GetData(&buffers), NS_OK);
+}
diff --git a/dom/media/gtest/TestOggWriter.cpp b/dom/media/gtest/TestOggWriter.cpp
new file mode 100644
index 0000000000..d9df697cfe
--- /dev/null
+++ b/dom/media/gtest/TestOggWriter.cpp
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "OggWriter.h"
+#include "OpusTrackEncoder.h"
+
+using namespace mozilla;
+
+// Writing multiple 4kB-pages should return all of them on getting.
+TEST(TestOggWriter, MultiPageInput)
+{
+ auto opusMeta = MakeRefPtr<OpusMetadata>();
+ opusMeta->mChannels = 1;
+ opusMeta->mSamplingFrequency = 48000;
+ opusMeta->mIdHeader.AppendElement(1);
+ opusMeta->mCommentHeader.AppendElement(1);
+ AutoTArray<RefPtr<TrackMetadataBase>, 1> metadata;
+ metadata.AppendElement(std::move(opusMeta));
+
+ OggWriter ogg;
+ MOZ_ALWAYS_SUCCEEDS(ogg.SetMetadata(metadata));
+ {
+ nsTArray<nsTArray<uint8_t>> buffer;
+ MOZ_ALWAYS_SUCCEEDS(
+ ogg.GetContainerData(&buffer, ContainerWriter::GET_HEADER));
+ }
+
+ size_t inputBytes = 0;
+ const size_t USECS_PER_MS = 1000;
+ auto frameData = MakeRefPtr<EncodedFrame::FrameData>();
+ frameData->SetLength(320); // 320B per 20ms == 128kbps
+ PodZero(frameData->Elements(), frameData->Length());
+ // 50 frames at 320B = 16kB = 4 4kB-pages
+ for (int i = 0; i < 50; ++i) {
+ auto frame = MakeRefPtr<EncodedFrame>(
+ media::TimeUnit::FromMicroseconds(20 * USECS_PER_MS * i),
+ 48000 / 1000 * 20 /* 20ms */, 48000, EncodedFrame::OPUS_AUDIO_FRAME,
+ frameData);
+ AutoTArray<RefPtr<EncodedFrame>, 1> frames;
+ frames.AppendElement(std::move(frame));
+ uint32_t flags = 0;
+ if (i == 49) {
+ flags |= ContainerWriter::END_OF_STREAM;
+ }
+ MOZ_ALWAYS_SUCCEEDS(ogg.WriteEncodedTrack(frames, flags));
+ inputBytes += frameData->Length();
+ }
+
+ nsTArray<nsTArray<uint8_t>> buffer;
+ MOZ_ALWAYS_SUCCEEDS(
+ ogg.GetContainerData(&buffer, ContainerWriter::FLUSH_NEEDED));
+ size_t outputBytes = 0;
+ for (const auto& b : buffer) {
+ outputBytes += b.Length();
+ }
+
+ EXPECT_EQ(inputBytes, 16000U);
+ EXPECT_EQ(outputBytes, 16208U);
+}
diff --git a/dom/media/gtest/TestOpusParser.cpp b/dom/media/gtest/TestOpusParser.cpp
new file mode 100644
index 0000000000..639fe7cfc0
--- /dev/null
+++ b/dom/media/gtest/TestOpusParser.cpp
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "OpusParser.h"
+#include <algorithm>
+
+using namespace mozilla;
+
+TEST(OpusParser, Mapping2)
+{
+ uint8_t validChannels[] = {1, 3, 4, 6, 9, 11, 16, 18, 25, 27,
+ 36, 38, 49, 51, 64, 66, 81, 83, 100, 102,
+ 121, 123, 144, 146, 169, 171, 196, 198, 225, 227};
+ for (uint8_t channels = 0; channels < 255; channels++) {
+ bool found = OpusParser::IsValidMapping2ChannelsCount(channels);
+ bool foundTable =
+ std::find(std::begin(validChannels), std::end(validChannels),
+ channels) != std::end(validChannels);
+ EXPECT_EQ(found, foundTable);
+ }
+}
diff --git a/dom/media/gtest/TestPacer.cpp b/dom/media/gtest/TestPacer.cpp
new file mode 100644
index 0000000000..df9bb4c9e5
--- /dev/null
+++ b/dom/media/gtest/TestPacer.cpp
@@ -0,0 +1,189 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "Pacer.h"
+#include "VideoUtils.h"
+
+using namespace mozilla;
+
+template <typename T>
+class PacerTest {
+ protected:
+ explicit PacerTest(TimeDuration aDuplicationInterval)
+ : mTaskQueue(TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::WEBRTC_WORKER), "PacerTest")),
+ mPacer(MakeRefPtr<Pacer<T>>(mTaskQueue, aDuplicationInterval)),
+ mInterval(aDuplicationInterval) {}
+
+ // Helper for calling `mPacer->Enqueue(...)`. Dispatches an event to the
+ // current thread which will enqueue the event to make sure that any listeners
+ // registered by a call to `WaitFor(...)` have been registered before events
+ // start being processed on a background queue.
+ void EnqueueSoon(T aItem, TimeStamp aTime) {
+ MOZ_ALWAYS_SUCCEEDS(NS_DispatchToCurrentThread(NS_NewRunnableFunction(
+ "PacerTest::EnqueueSoon",
+ [pacer = mPacer, aItem = std::move(aItem), aTime] {
+ pacer->Enqueue(std::move(aItem), aTime);
+ })));
+ }
+
+ void TearDown() {
+ mPacer->Shutdown()->Then(mTaskQueue, __func__,
+ [tq = mTaskQueue] { tq->BeginShutdown(); });
+ }
+
+ const RefPtr<TaskQueue> mTaskQueue;
+ const RefPtr<Pacer<T>> mPacer;
+ const TimeDuration mInterval;
+};
+
+class PacerTestInt : public PacerTest<int>, public ::testing::Test {
+ protected:
+ explicit PacerTestInt(TimeDuration aDuplicationInterval)
+ : PacerTest<int>(aDuplicationInterval) {}
+
+ void TearDown() override { PacerTest::TearDown(); }
+};
+
+class PacerTestIntLongDuplication : public PacerTestInt {
+ protected:
+ PacerTestIntLongDuplication() : PacerTestInt(TimeDuration::FromSeconds(10)) {}
+};
+
+class PacerTestIntTenMsDuplication : public PacerTestInt {
+ protected:
+ PacerTestIntTenMsDuplication()
+ : PacerTestInt(TimeDuration::FromMilliseconds(10)) {}
+};
+
+TEST_F(PacerTestIntLongDuplication, Single) {
+ auto now = TimeStamp::Now();
+ auto d1 = TimeDuration::FromMilliseconds(100);
+ EnqueueSoon(1, now + d1);
+
+ auto [i, time] = WaitFor(TakeN(mPacer->PacedItemEvent(), 1)).unwrap()[0];
+ EXPECT_GE(TimeStamp::Now() - now, d1);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1);
+}
+
+TEST_F(PacerTestIntLongDuplication, Past) {
+ auto now = TimeStamp::Now();
+ auto d1 = TimeDuration::FromMilliseconds(100);
+ EnqueueSoon(1, now - d1);
+
+ auto [i, time] = WaitFor(TakeN(mPacer->PacedItemEvent(), 1)).unwrap()[0];
+ EXPECT_GE(TimeStamp::Now() - now, -d1);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, -d1);
+}
+
+TEST_F(PacerTestIntLongDuplication, TimeReset) {
+ auto now = TimeStamp::Now();
+ auto d1 = TimeDuration::FromMilliseconds(100);
+ auto d2 = TimeDuration::FromMilliseconds(200);
+ auto d3 = TimeDuration::FromMilliseconds(300);
+ EnqueueSoon(1, now + d1);
+ EnqueueSoon(2, now + d3);
+ EnqueueSoon(3, now + d2);
+
+ auto items = WaitFor(TakeN(mPacer->PacedItemEvent(), 2)).unwrap();
+
+ {
+ auto [i, time] = items[0];
+ EXPECT_GE(TimeStamp::Now() - now, d1);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1);
+ }
+ {
+ auto [i, time] = items[1];
+ EXPECT_GE(TimeStamp::Now() - now, d2);
+ EXPECT_EQ(i, 3);
+ EXPECT_EQ(time - now, d2);
+ }
+}
+
+TEST_F(PacerTestIntTenMsDuplication, SingleDuplication) {
+ auto now = TimeStamp::Now();
+ auto d1 = TimeDuration::FromMilliseconds(100);
+ EnqueueSoon(1, now + d1);
+
+ auto items = WaitFor(TakeN(mPacer->PacedItemEvent(), 2)).unwrap();
+
+ {
+ auto [i, time] = items[0];
+ EXPECT_GE(TimeStamp::Now() - now, d1);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1);
+ }
+ {
+ auto [i, time] = items[1];
+ EXPECT_GE(TimeStamp::Now() - now, d1 + mInterval);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1 + mInterval);
+ }
+}
+
+TEST_F(PacerTestIntTenMsDuplication, RacyDuplication1) {
+ auto now = TimeStamp::Now();
+ auto d1 = TimeDuration::FromMilliseconds(100);
+ auto d2 = d1 + mInterval - TimeDuration::FromMicroseconds(1);
+ EnqueueSoon(1, now + d1);
+ EnqueueSoon(2, now + d2);
+
+ auto items = WaitFor(TakeN(mPacer->PacedItemEvent(), 3)).unwrap();
+
+ {
+ auto [i, time] = items[0];
+ EXPECT_GE(TimeStamp::Now() - now, d1);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1);
+ }
+ {
+ auto [i, time] = items[1];
+ EXPECT_GE(TimeStamp::Now() - now, d2);
+ EXPECT_EQ(i, 2);
+ EXPECT_EQ(time - now, d2);
+ }
+ {
+ auto [i, time] = items[2];
+ EXPECT_GE(TimeStamp::Now() - now, d2 + mInterval);
+ EXPECT_EQ(i, 2);
+ EXPECT_EQ(time - now, d2 + mInterval);
+ }
+}
+
+TEST_F(PacerTestIntTenMsDuplication, RacyDuplication2) {
+ auto now = TimeStamp::Now();
+ auto d1 = TimeDuration::FromMilliseconds(100);
+ auto d2 = d1 + mInterval + TimeDuration::FromMicroseconds(1);
+ EnqueueSoon(1, now + d1);
+ EnqueueSoon(2, now + d2);
+
+ auto items = WaitFor(TakeN(mPacer->PacedItemEvent(), 3)).unwrap();
+
+ {
+ auto [i, time] = items[0];
+ EXPECT_GE(TimeStamp::Now() - now, d1);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1);
+ }
+ {
+ auto [i, time] = items[1];
+ EXPECT_GE(TimeStamp::Now() - now, d1 + mInterval);
+ EXPECT_EQ(i, 1);
+ EXPECT_EQ(time - now, d1 + mInterval);
+ }
+ {
+ auto [i, time] = items[2];
+ EXPECT_GE(TimeStamp::Now() - now, d2);
+ EXPECT_EQ(i, 2);
+ EXPECT_EQ(time - now, d2);
+ }
+}
diff --git a/dom/media/gtest/TestRTCStatsTimestampMaker.cpp b/dom/media/gtest/TestRTCStatsTimestampMaker.cpp
new file mode 100644
index 0000000000..442789f031
--- /dev/null
+++ b/dom/media/gtest/TestRTCStatsTimestampMaker.cpp
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cmath>
+
+#include "gtest/gtest.h"
+#include "libwebrtcglue/SystemTime.h"
+
+using namespace mozilla;
+using dom::PerformanceService;
+using dom::RTCStatsTimestamp;
+using dom::RTCStatsTimestampMaker;
+
+static constexpr auto kWebrtcTimeOffset = webrtc::Timestamp::Seconds(123456789);
+
+TEST(RTCStatsTimestampMakerRealtimeClock, ConvertTimestampToNtpTime)
+{
+ auto maker = RTCStatsTimestampMaker::Create();
+ RTCStatsTimestampMakerRealtimeClock clock(maker);
+ constexpr auto ntpTo1Jan1970Ms = webrtc::kNtpJan1970 * 1000LL;
+ for (int i = 1000; i < 20000; i += 93) {
+ const auto t = kWebrtcTimeOffset + webrtc::TimeDelta::Micros(i);
+ const auto ntp = clock.ConvertTimestampToNtpTime(t);
+ // Because of precision differences, these round to a specific millisecond
+ // slightly differently.
+ EXPECT_NEAR(ntp.ToMs() - ntpTo1Jan1970Ms,
+ RTCStatsTimestamp::FromRealtime(maker, t).To1Jan1970().ms(),
+ 1.0)
+ << " for i=" << i;
+ }
+}
+
+TEST(RTCStatsTimestampMaker, ConvertNtpToDomTime)
+{
+ auto maker = RTCStatsTimestampMaker::Create();
+ RTCStatsTimestampMakerRealtimeClock clock(maker);
+ for (int i = 1000; i < 20000; i += 93) {
+ const auto t = kWebrtcTimeOffset + webrtc::TimeDelta::Micros(i);
+ const auto ntp = clock.ConvertTimestampToNtpTime(t);
+ const auto dom =
+ RTCStatsTimestamp::FromNtp(maker, webrtc::Timestamp::Millis(ntp.ToMs()))
+ .ToDom();
+ // Because of precision differences, these round to a specific millisecond
+ // slightly differently.
+ EXPECT_NEAR(std::lround(dom),
+ std::lround(RTCStatsTimestamp::FromRealtime(maker, t).ToDom()),
+ 1.0)
+ << " for i=" << i;
+ }
+}
+
+TEST(RTCStatsTimestampMaker, ConvertMozTime)
+{
+ auto maker = RTCStatsTimestampMaker::Create();
+ const auto start = TimeStamp::Now();
+ RTCStatsTimestampMakerRealtimeClock clock(maker);
+ for (int i = 1000; i < 20000; i += 93) {
+ const auto duration = TimeDuration::FromMicroseconds(i);
+ const auto time = RTCStatsTimestamp::FromMozTime(maker, start + duration);
+ EXPECT_EQ(duration.ToMicroseconds(),
+ (time.ToMozTime() - start).ToMicroseconds())
+ << " for i=" << i;
+ }
+}
+
+TEST(RTCStatsTimestampMaker, ConvertRealtime)
+{
+ auto maker = RTCStatsTimestampMaker::Create();
+ const auto start = kWebrtcTimeOffset;
+ RTCStatsTimestampMakerRealtimeClock clock(maker);
+ for (int i = 1000; i < 20000; i += 93) {
+ const auto duration = webrtc::TimeDelta::Micros(i);
+ const auto time = RTCStatsTimestamp::FromRealtime(maker, start + duration);
+ // Because of precision differences, these round to a specific Microsecond
+ // slightly differently.
+ EXPECT_NEAR(duration.us(), (time.ToRealtime() - start).us(), 1)
+ << " for i=" << i;
+ }
+}
+
+TEST(RTCStatsTimestampMaker, Convert1Jan1970)
+{
+ auto maker = RTCStatsTimestampMaker::Create();
+ const auto start =
+ kWebrtcTimeOffset +
+ webrtc::TimeDelta::Millis(PerformanceService::GetOrCreate()->TimeOrigin(
+ WebrtcSystemTimeBase()));
+ RTCStatsTimestampMakerRealtimeClock clock(maker);
+ for (int i = 1000; i < 20000; i += 93) {
+ const auto duration = webrtc::TimeDelta::Micros(i);
+ const auto time = RTCStatsTimestamp::From1Jan1970(maker, start + duration);
+ // Because of precision differences, these round to a specific Microsecond
+ // slightly differently.
+ EXPECT_NEAR(duration.us(), (time.To1Jan1970() - start).us(), 1)
+ << " for i=" << i;
+ }
+}
+
+TEST(RTCStatsTimestampMaker, ConvertDomRealtime)
+{
+ auto maker = RTCStatsTimestampMaker::Create();
+ const auto start = kWebrtcTimeOffset;
+ RTCStatsTimestampMakerRealtimeClock clock(maker);
+ for (int i = 1000; i < 20000; i += 93) {
+ const auto duration = webrtc::TimeDelta::Micros(i);
+ const auto time =
+ RTCStatsTimestamp::FromDomRealtime(maker, start + duration);
+ // Because of precision differences, these round to a specific Microsecond
+ // slightly differently.
+ EXPECT_NEAR(duration.us(), (time.ToDomRealtime() - start).us(), 1)
+ << " for i=" << i;
+ }
+}
diff --git a/dom/media/gtest/TestRust.cpp b/dom/media/gtest/TestRust.cpp
new file mode 100644
index 0000000000..059500767f
--- /dev/null
+++ b/dom/media/gtest/TestRust.cpp
@@ -0,0 +1,10 @@
+#include <stdint.h>
+#include "gtest/gtest.h"
+
+extern "C" uint8_t* test_rust();
+
+TEST(rust, CallFromCpp)
+{
+ auto greeting = test_rust();
+ EXPECT_STREQ(reinterpret_cast<char*>(greeting), "hello from rust.");
+}
diff --git a/dom/media/gtest/TestTimeUnit.cpp b/dom/media/gtest/TestTimeUnit.cpp
new file mode 100644
index 0000000000..173d38ecd5
--- /dev/null
+++ b/dom/media/gtest/TestTimeUnit.cpp
@@ -0,0 +1,295 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include <algorithm>
+#include <vector>
+
+#include "TimeUnits.h"
+
+using namespace mozilla;
+using namespace mozilla::media;
+using TimeUnit = mozilla::media::TimeUnit;
+
+TEST(TimeUnit, BasicArithmetic)
+{
+ const TimeUnit a(1000, 44100);
+ {
+ TimeUnit b = a * 10;
+ EXPECT_EQ(b.mBase, 44100);
+ EXPECT_EQ(b.mTicks.value(), a.mTicks.value() * 10);
+ EXPECT_EQ(a * 10, b);
+ }
+ {
+ TimeUnit b = a / 10;
+ EXPECT_EQ(b.mBase, 44100);
+ EXPECT_EQ(b.mTicks.value(), a.mTicks.value() / 10);
+ EXPECT_EQ(a / 10, b);
+ }
+ {
+ TimeUnit b = TimeUnit(10, 44100);
+ b += a;
+ EXPECT_EQ(b.mBase, 44100);
+ EXPECT_EQ(b.mTicks.value(), a.mTicks.value() + 10);
+ EXPECT_EQ(b - a, TimeUnit(10, 44100));
+ }
+ {
+ TimeUnit b = TimeUnit(1010, 44100);
+ b -= a; // now 10
+ EXPECT_EQ(b.mBase, 44100);
+ EXPECT_EQ(b.mTicks.value(), 10);
+ EXPECT_EQ(a + b, TimeUnit(1010, 44100));
+ }
+ {
+ TimeUnit b = TimeUnit(4010, 44100);
+ TimeUnit c = b % a; // now 10
+ EXPECT_EQ(c.mBase, 44100);
+ EXPECT_EQ(c.mTicks.value(), 10);
+ }
+ {
+ // Adding 6s in nanoseconds (e.g. coming from script) to a typical number
+ // from an mp4, 9001 in base 90000
+ TimeUnit b = TimeUnit(6000000000, 1000000000);
+ TimeUnit c = TimeUnit(9001, 90000);
+ TimeUnit d = c + b;
+ EXPECT_EQ(d.mBase, 90000);
+ EXPECT_EQ(d.mTicks.value(), 549001);
+ }
+ {
+ // Subtracting 9001 in base 9000 from 6s in nanoseconds (e.g. coming from
+ // script), converting to back to base 9000.
+ TimeUnit b = TimeUnit(6000000000, 1000000000);
+ TimeUnit c = TimeUnit(9001, 90000);
+ TimeUnit d = (b - c).ToBase(90000);
+ EXPECT_EQ(d.mBase, 90000);
+ EXPECT_EQ(d.mTicks.value(), 530999);
+ }
+}
+
+TEST(TimeUnit, Base)
+{
+ {
+ TimeUnit a = TimeUnit::FromSeconds(1);
+ EXPECT_EQ(a.mTicks.value(), 1000000);
+ EXPECT_EQ(a.mBase, 1000000);
+ }
+ {
+ TimeUnit a = TimeUnit::FromMicroseconds(44100000000);
+ EXPECT_EQ(a.mTicks.value(), 44100000000);
+ EXPECT_EQ(a.mBase, 1000000);
+ }
+ {
+ TimeUnit a = TimeUnit::FromSeconds(6.0);
+ EXPECT_EQ(a.mTicks.value(), 6000000);
+ EXPECT_EQ(a.mBase, 1000000);
+ double error;
+ TimeUnit b = a.ToBase(90000, error);
+ EXPECT_EQ(error, 0);
+ EXPECT_EQ(b.mTicks.value(), 540000);
+ EXPECT_EQ(b.mBase, 90000);
+ }
+}
+
+TEST(TimeUnit, Rounding)
+{
+ int64_t usecs = 662617;
+ double seconds = TimeUnit::FromMicroseconds(usecs).ToSeconds();
+ TimeUnit fromSeconds = TimeUnit::FromSeconds(seconds);
+ EXPECT_EQ(fromSeconds.mTicks.value(), usecs);
+ // TimeUnit base is microseconds if not explicitly passed.
+ EXPECT_EQ(fromSeconds.mBase, 1000000);
+ EXPECT_EQ(fromSeconds.ToMicroseconds(), usecs);
+
+ seconds = 4.169470123;
+ int64_t nsecs = 4169470123;
+ EXPECT_EQ(TimeUnit::FromSeconds(seconds, 1e9).ToNanoseconds(), nsecs);
+ EXPECT_EQ(TimeUnit::FromSeconds(seconds, 1e9).ToMicroseconds(), nsecs / 1000);
+
+ seconds = 2312312.16947012;
+ nsecs = 2312312169470120;
+ EXPECT_EQ(TimeUnit::FromSeconds(seconds, 1e9).ToNanoseconds(), nsecs);
+ EXPECT_EQ(TimeUnit::FromSeconds(seconds, 1e9).ToMicroseconds(), nsecs / 1000);
+
+ seconds = 2312312.169470123;
+ nsecs = 2312312169470123;
+ // A double doesn't have enough precision to roundtrip this time value
+ // correctly in this base, but the number of microseconds is still correct.
+ // This value is about 142.5 days however.
+ // This particular calculation results in exactly 1ns of difference after
+ // roundtrip. Enable this after remoing the MOZ_CRASH in TimeUnit::FromSeconds
+ // EXPECT_EQ(TimeUnit::FromSeconds(seconds, 1e9).ToNanoseconds() - nsecs, 1);
+ EXPECT_EQ(TimeUnit::FromSeconds(seconds, 1e9).ToMicroseconds(), nsecs / 1000);
+}
+
+TEST(TimeUnit, Comparisons)
+{
+ TimeUnit a(0, 1e9);
+ TimeUnit b(1, 1e9);
+ TimeUnit c(1, 1e6);
+
+ EXPECT_GE(b, a);
+ EXPECT_GE(c, a);
+ EXPECT_GE(c, b);
+
+ EXPECT_GT(b, a);
+ EXPECT_GT(c, a);
+ EXPECT_GT(c, b);
+
+ EXPECT_LE(a, b);
+ EXPECT_LE(a, c);
+ EXPECT_LE(b, c);
+
+ EXPECT_LT(a, b);
+ EXPECT_LT(a, c);
+ EXPECT_LT(b, c);
+
+ // Equivalence of zero regardless of the base
+ TimeUnit d(0, 1);
+ TimeUnit e(0, 1000);
+ EXPECT_EQ(a, d);
+ EXPECT_EQ(a, e);
+
+ // Equivalence of time accross bases
+ TimeUnit f(1000, 1e9);
+ TimeUnit g(1, 1e6);
+ EXPECT_EQ(f, g);
+
+ // Comparisons with infinity, same base
+ TimeUnit h = TimeUnit::FromInfinity();
+ TimeUnit i = TimeUnit::Zero();
+ EXPECT_LE(i, h);
+ EXPECT_LT(i, h);
+ EXPECT_GE(h, i);
+ EXPECT_GT(h, i);
+
+ // Comparisons with infinity, different base
+ TimeUnit j = TimeUnit::FromInfinity();
+ TimeUnit k = TimeUnit::Zero(1000000);
+ EXPECT_LE(k, j);
+ EXPECT_LT(k, j);
+ EXPECT_GE(j, k);
+ EXPECT_GT(j, k);
+
+ // Comparison of very big numbers, different base that have a gcd that makes
+ // it easy to reduce, to test the fraction reduction code
+ TimeUnit l = TimeUnit(123123120000000, 1000000000);
+ TimeUnit m = TimeUnit(123123120000000, 1000);
+ EXPECT_LE(l, m);
+ EXPECT_LT(l, m);
+ EXPECT_GE(m, l);
+ EXPECT_GT(m, l);
+
+ // Comparison of very big numbers, different base that are co-prime: worst
+ // cast scenario.
+ TimeUnit n = TimeUnit(123123123123123, 1000000000);
+ TimeUnit o = TimeUnit(123123123123123, 1000000001);
+ EXPECT_LE(o, n);
+ EXPECT_LT(o, n);
+ EXPECT_GE(n, o);
+ EXPECT_GT(n, o);
+
+ // Values taken from a real website (this is about 53 years, Date.now() in
+ // 2023).
+ TimeUnit leftBound(74332508253360, 44100);
+ TimeUnit rightBound(74332508297392, 44100);
+ TimeUnit fuzz(250000, 1000000);
+ TimeUnit time(1685544404790205, 1000000);
+
+ EXPECT_LT(leftBound - fuzz, time);
+ EXPECT_GT(time, leftBound - fuzz);
+ EXPECT_GE(rightBound + fuzz, time);
+ EXPECT_LT(time, rightBound + fuzz);
+
+ TimeUnit zero = TimeUnit::Zero(); // default base 1e6
+ TimeUnit datenow(
+ 151737439364679,
+ 90000); // Also from `Date.now()` in a common base for an mp4
+ EXPECT_NE(zero, datenow);
+}
+
+TEST(TimeUnit, InfinityMath)
+{
+ // Operator plus/minus uses floating point behaviour for positive and
+ // negative infinity values, i.e.:
+ // posInf + posInf = inf
+ // posInf + negInf = -nan
+ // posInf + finite = inf
+ // posInf - posInf = -nan
+ // posInf - negInf = inf
+ // posInf - finite = inf
+ // negInf + negInf = -inf
+ // negInf + posInf = -nan
+ // negInf + finite = -inf
+ // negInf - negInf = -nan
+ // negInf - posInf = -inf
+ // negInf - finite = -inf
+ // finite + posInf = inf
+ // finite - posInf = -inf
+ // finite + negInf = -inf
+ // finite - negInf = inf
+
+ const TimeUnit posInf = TimeUnit::FromInfinity();
+ EXPECT_EQ(TimeUnit::FromSeconds(mozilla::PositiveInfinity<double>()), posInf);
+
+ const TimeUnit negInf = TimeUnit::FromNegativeInfinity();
+ EXPECT_EQ(TimeUnit::FromSeconds(mozilla::NegativeInfinity<double>()), negInf);
+
+ EXPECT_EQ(posInf + posInf, posInf);
+ EXPECT_FALSE((posInf + negInf).IsValid());
+ EXPECT_FALSE((posInf - posInf).IsValid());
+ EXPECT_EQ(posInf - negInf, posInf);
+ EXPECT_EQ(negInf + negInf, negInf);
+ EXPECT_FALSE((negInf + posInf).IsValid());
+ EXPECT_FALSE((negInf - negInf).IsValid());
+ EXPECT_EQ(negInf - posInf, negInf);
+
+ const TimeUnit finite = TimeUnit::FromSeconds(42.0);
+ EXPECT_EQ(posInf - finite, posInf);
+ EXPECT_EQ(posInf + finite, posInf);
+ EXPECT_EQ(negInf - finite, negInf);
+ EXPECT_EQ(negInf + finite, negInf);
+
+ EXPECT_EQ(finite + posInf, posInf);
+ EXPECT_EQ(finite - posInf, negInf);
+ EXPECT_EQ(finite + negInf, negInf);
+ EXPECT_EQ(finite - negInf, posInf);
+}
+
+TEST(TimeUnit, BaseConversion)
+{
+ const int64_t packetSize = 1024; // typical for AAC
+ int64_t sampleRates[] = {16000, 44100, 48000, 88200, 96000};
+ const double hnsPerSeconds = 10000000.;
+ for (auto sampleRate : sampleRates) {
+ int64_t frameCount = 0;
+ TimeUnit pts;
+ do {
+ // Compute a time in hundreds of nanoseconds based of frame count, typical
+ // on Windows platform, checking that it round trips properly.
+ int64_t hns = AssertedCast<int64_t>(
+ std::round(hnsPerSeconds * static_cast<double>(frameCount) /
+ static_cast<double>(sampleRate)));
+ pts = TimeUnit::FromHns(hns, sampleRate);
+ EXPECT_EQ(
+ AssertedCast<int64_t>(std::round(pts.ToSeconds() * hnsPerSeconds)),
+ hns);
+ frameCount += packetSize;
+ } while (pts.ToSeconds() < 36000);
+ }
+}
+
+TEST(TimeUnit, MinimumRoundingError)
+{
+ TimeUnit a(448, 48000); // ≈9333 us
+ TimeUnit b(1, 1000000); // 1 us
+ TimeUnit rv = a - b; // should close to 9332 us as much as possible
+ EXPECT_EQ(rv.mTicks.value(), 448); // ≈9333 us is closer to 9332 us
+ EXPECT_EQ(rv.mBase, 48000);
+
+ TimeUnit c(11, 1000000); // 11 us
+ rv = a - c; // should close to 9322 as much as possible
+ EXPECT_EQ(rv.mTicks.value(), 447); // ≈9312 us is closer to 9322 us
+ EXPECT_EQ(rv.mBase, 48000);
+}
diff --git a/dom/media/gtest/TestVPXDecoding.cpp b/dom/media/gtest/TestVPXDecoding.cpp
new file mode 100644
index 0000000000..d58ca24cc7
--- /dev/null
+++ b/dom/media/gtest/TestVPXDecoding.cpp
@@ -0,0 +1,96 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsTArray.h"
+#include "VPXDecoder.h"
+
+#include <stdio.h>
+
+using namespace mozilla;
+
+static void ReadVPXFile(const char* aPath, nsTArray<uint8_t>& aBuffer) {
+ FILE* f = fopen(aPath, "rb");
+ ASSERT_NE(f, (FILE*)nullptr);
+
+ int r = fseek(f, 0, SEEK_END);
+ ASSERT_EQ(r, 0);
+
+ long size = ftell(f);
+ ASSERT_NE(size, -1);
+ aBuffer.SetLength(size);
+
+ r = fseek(f, 0, SEEK_SET);
+ ASSERT_EQ(r, 0);
+
+ size_t got = fread(aBuffer.Elements(), 1, size, f);
+ ASSERT_EQ(got, size_t(size));
+
+ r = fclose(f);
+ ASSERT_EQ(r, 0);
+}
+
+static vpx_codec_iface_t* ParseIVFConfig(nsTArray<uint8_t>& data,
+ vpx_codec_dec_cfg_t& config) {
+ if (data.Length() < 32 + 12) {
+ // Not enough data for file & first frame headers.
+ return nullptr;
+ }
+ if (data[0] != 'D' || data[1] != 'K' || data[2] != 'I' || data[3] != 'F') {
+ // Expect 'DKIP'
+ return nullptr;
+ }
+ if (data[4] != 0 || data[5] != 0) {
+ // Expect version==0.
+ return nullptr;
+ }
+ if (data[8] != 'V' || data[9] != 'P' ||
+ (data[10] != '8' && data[10] != '9') || data[11] != '0') {
+ // Expect 'VP80' or 'VP90'.
+ return nullptr;
+ }
+ config.w = uint32_t(data[12]) || (uint32_t(data[13]) << 8);
+ config.h = uint32_t(data[14]) || (uint32_t(data[15]) << 8);
+ vpx_codec_iface_t* codec =
+ (data[10] == '8') ? vpx_codec_vp8_dx() : vpx_codec_vp9_dx();
+ // Remove headers, to just leave raw VPx data to be decoded.
+ data.RemoveElementsAt(0, 32 + 12);
+ return codec;
+}
+
+struct TestFileData {
+ const char* mFilename;
+ vpx_codec_err_t mDecodeResult;
+};
+static const TestFileData testFiles[] = {
+ {"test_case_1224361.vp8.ivf", VPX_CODEC_OK},
+ {"test_case_1224363.vp8.ivf", VPX_CODEC_CORRUPT_FRAME},
+ {"test_case_1224369.vp8.ivf", VPX_CODEC_CORRUPT_FRAME}};
+
+TEST(libvpx, test_cases)
+{
+ for (size_t test = 0; test < ArrayLength(testFiles); ++test) {
+ nsTArray<uint8_t> data;
+ ReadVPXFile(testFiles[test].mFilename, data);
+ ASSERT_GT(data.Length(), 0u);
+
+ vpx_codec_dec_cfg_t config;
+ vpx_codec_iface_t* dx = ParseIVFConfig(data, config);
+ ASSERT_TRUE(dx);
+ config.threads = 2;
+
+ vpx_codec_ctx_t ctx;
+ PodZero(&ctx);
+ vpx_codec_err_t r = vpx_codec_dec_init(&ctx, dx, &config, 0);
+ ASSERT_EQ(VPX_CODEC_OK, r);
+
+ r = vpx_codec_decode(&ctx, data.Elements(), data.Length(), nullptr, 0);
+ // This test case is known to be corrupt.
+ EXPECT_EQ(testFiles[test].mDecodeResult, r);
+
+ r = vpx_codec_destroy(&ctx);
+ EXPECT_EQ(VPX_CODEC_OK, r);
+ }
+}
diff --git a/dom/media/gtest/TestVideoFrameConverter.cpp b/dom/media/gtest/TestVideoFrameConverter.cpp
new file mode 100644
index 0000000000..6be202d084
--- /dev/null
+++ b/dom/media/gtest/TestVideoFrameConverter.cpp
@@ -0,0 +1,508 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <iterator>
+
+#include "gtest/gtest.h"
+#include "libwebrtcglue/SystemTime.h"
+#include "mozilla/gtest/WaitFor.h"
+#include "MediaEventSource.h"
+#include "VideoFrameConverter.h"
+#include "YUVBufferGenerator.h"
+
+using namespace mozilla;
+
+class VideoFrameConverterTest;
+
+class FrameListener {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(FrameListener)
+
+ explicit FrameListener(MediaEventSourceExc<webrtc::VideoFrame>& aSource) {
+ mListener = aSource.Connect(AbstractThread::GetCurrent(), this,
+ &FrameListener::OnVideoFrameConverted);
+ }
+
+ void OnVideoFrameConverted(webrtc::VideoFrame aVideoFrame) {
+ mVideoFrameConvertedEvent.Notify(std::move(aVideoFrame), TimeStamp::Now());
+ }
+
+ MediaEventSource<webrtc::VideoFrame, TimeStamp>& VideoFrameConvertedEvent() {
+ return mVideoFrameConvertedEvent;
+ }
+
+ private:
+ ~FrameListener() { mListener.Disconnect(); }
+
+ MediaEventListener mListener;
+ MediaEventProducer<webrtc::VideoFrame, TimeStamp> mVideoFrameConvertedEvent;
+};
+
+class DebugVideoFrameConverter : public VideoFrameConverter {
+ public:
+ explicit DebugVideoFrameConverter(
+ const dom::RTCStatsTimestampMaker& aTimestampMaker)
+ : VideoFrameConverter(aTimestampMaker) {}
+
+ using VideoFrameConverter::QueueForProcessing;
+ using VideoFrameConverter::RegisterListener;
+};
+
+class VideoFrameConverterTest : public ::testing::Test {
+ protected:
+ const dom::RTCStatsTimestampMaker mTimestampMaker;
+ RefPtr<DebugVideoFrameConverter> mConverter;
+ RefPtr<FrameListener> mListener;
+
+ VideoFrameConverterTest()
+ : mTimestampMaker(dom::RTCStatsTimestampMaker::Create()),
+ mConverter(MakeAndAddRef<DebugVideoFrameConverter>(mTimestampMaker)),
+ mListener(MakeAndAddRef<FrameListener>(
+ mConverter->VideoFrameConvertedEvent())) {
+ mConverter->RegisterListener();
+ }
+
+ void TearDown() override { mConverter->Shutdown(); }
+
+ RefPtr<TakeNPromise<webrtc::VideoFrame, TimeStamp>> TakeNConvertedFrames(
+ size_t aN) {
+ return TakeN(mListener->VideoFrameConvertedEvent(), aN);
+ }
+};
+
+static bool IsPlane(const uint8_t* aData, int aWidth, int aHeight, int aStride,
+ uint8_t aValue) {
+ for (int i = 0; i < aHeight; ++i) {
+ for (int j = 0; j < aWidth; ++j) {
+ if (aData[i * aStride + j] != aValue) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static bool IsFrameBlack(const webrtc::VideoFrame& aFrame) {
+ RefPtr<webrtc::I420BufferInterface> buffer =
+ aFrame.video_frame_buffer()->ToI420().get();
+ return IsPlane(buffer->DataY(), buffer->width(), buffer->height(),
+ buffer->StrideY(), 0x00) &&
+ IsPlane(buffer->DataU(), buffer->ChromaWidth(), buffer->ChromaHeight(),
+ buffer->StrideU(), 0x80) &&
+ IsPlane(buffer->DataV(), buffer->ChromaWidth(), buffer->ChromaHeight(),
+ buffer->StrideV(), 0x80);
+}
+
+VideoChunk GenerateChunk(int32_t aWidth, int32_t aHeight, TimeStamp aTime) {
+ YUVBufferGenerator generator;
+ generator.Init(gfx::IntSize(aWidth, aHeight));
+ VideoFrame f(generator.GenerateI420Image(), gfx::IntSize(aWidth, aHeight));
+ VideoChunk c;
+ c.mFrame.TakeFrom(&f);
+ c.mTimeStamp = aTime;
+ c.mDuration = 0;
+ return c;
+}
+
+TEST_F(VideoFrameConverterTest, BasicConversion) {
+ auto framesPromise = TakeNConvertedFrames(1);
+ TimeStamp now = TimeStamp::Now();
+ VideoChunk chunk = GenerateChunk(640, 480, now);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(chunk, false);
+ auto frames = WaitFor(framesPromise).unwrap();
+ ASSERT_EQ(frames.size(), 1U);
+ const auto& [frame, conversionTime] = frames[0];
+ EXPECT_EQ(frame.width(), 640);
+ EXPECT_EQ(frame.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame));
+ EXPECT_GT(conversionTime - now, TimeDuration::FromMilliseconds(0));
+}
+
+TEST_F(VideoFrameConverterTest, BasicPacing) {
+ auto framesPromise = TakeNConvertedFrames(1);
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future = now + TimeDuration::FromMilliseconds(100);
+ VideoChunk chunk = GenerateChunk(640, 480, future);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(chunk, false);
+ auto frames = WaitFor(framesPromise).unwrap();
+ EXPECT_GT(TimeStamp::Now() - now, future - now);
+ ASSERT_EQ(frames.size(), 1U);
+ const auto& [frame, conversionTime] = frames[0];
+ EXPECT_EQ(frame.width(), 640);
+ EXPECT_EQ(frame.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame));
+ EXPECT_GT(conversionTime - now, future - now);
+}
+
+TEST_F(VideoFrameConverterTest, MultiPacing) {
+ auto framesPromise = TakeNConvertedFrames(2);
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
+ TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
+ VideoChunk chunk = GenerateChunk(640, 480, future1);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(chunk, false);
+ chunk = GenerateChunk(640, 480, future2);
+ mConverter->QueueVideoChunk(chunk, false);
+ auto frames = WaitFor(framesPromise).unwrap();
+ EXPECT_GT(TimeStamp::Now(), future2);
+ ASSERT_EQ(frames.size(), 2U);
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_GT(conversionTime0 - now, future1 - now);
+
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 640);
+ EXPECT_EQ(frame1.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame1));
+ EXPECT_GT(conversionTime1, future2);
+ EXPECT_GT(conversionTime1 - now, conversionTime0 - now);
+}
+
+TEST_F(VideoFrameConverterTest, Duplication) {
+ auto framesPromise = TakeNConvertedFrames(2);
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
+ VideoChunk chunk = GenerateChunk(640, 480, future1);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(chunk, false);
+ auto frames = WaitFor(framesPromise).unwrap();
+ EXPECT_GT(TimeStamp::Now() - now, TimeDuration::FromMilliseconds(1100));
+ ASSERT_EQ(frames.size(), 2U);
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_GT(conversionTime0, future1);
+
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 640);
+ EXPECT_EQ(frame1.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame1));
+ EXPECT_GT(conversionTime1 - now, TimeDuration::FromMilliseconds(1100));
+ EXPECT_EQ(frame1.timestamp_us() - frame0.timestamp_us(), USECS_PER_S);
+
+ // Check that we re-used the old buffer.
+ EXPECT_EQ(frame0.video_frame_buffer(), frame1.video_frame_buffer());
+}
+
+TEST_F(VideoFrameConverterTest, DropsOld) {
+ auto framesPromise = TakeNConvertedFrames(1);
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future1 = now + TimeDuration::FromMilliseconds(1000);
+ TimeStamp future2 = now + TimeDuration::FromMilliseconds(100);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
+ auto frames = WaitFor(framesPromise).unwrap();
+ EXPECT_GT(TimeStamp::Now(), future2);
+ ASSERT_EQ(frames.size(), 1U);
+ const auto& [frame, conversionTime] = frames[0];
+ EXPECT_EQ(frame.width(), 640);
+ EXPECT_EQ(frame.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame));
+ EXPECT_GT(conversionTime - now, future2 - now);
+}
+
+// We check that the disabling code was triggered by sending multiple,
+// different, frames to the converter within one second. While black, it shall
+// treat all frames identical and issue one black frame per second.
+// This version disables before queuing a frame. A frame will have to be
+// invented.
+TEST_F(VideoFrameConverterTest, BlackOnDisableCreated) {
+ auto framesPromise = TakeNConvertedFrames(2);
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
+ TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
+ TimeStamp future3 = now + TimeDuration::FromMilliseconds(400);
+ mConverter->SetActive(true);
+ mConverter->SetTrackEnabled(false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future3), false);
+ auto frames = WaitFor(framesPromise).unwrap();
+ EXPECT_GT(TimeStamp::Now() - now, TimeDuration::FromSeconds(1));
+ ASSERT_EQ(frames.size(), 2U);
+ // The first frame was created instantly by SetTrackEnabled().
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_TRUE(IsFrameBlack(frame0));
+ EXPECT_GT(conversionTime0 - now, TimeDuration::FromSeconds(0));
+ // The second frame was created by the same-frame timer (after 1s).
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 640);
+ EXPECT_EQ(frame1.height(), 480);
+ EXPECT_TRUE(IsFrameBlack(frame1));
+ EXPECT_GT(conversionTime1 - now, TimeDuration::FromSeconds(1));
+ // Check that the second frame comes 1s after the first.
+ EXPECT_EQ(frame1.timestamp_us(), frame0.timestamp_us() + PR_USEC_PER_SEC);
+}
+
+// We check that the disabling code was triggered by sending multiple,
+// different, frames to the converter within one second. While black, it shall
+// treat all frames identical and issue one black frame per second.
+// This version queues a frame before disabling.
+TEST_F(VideoFrameConverterTest, BlackOnDisableDuplicated) {
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
+ TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
+ TimeStamp future3 = now + TimeDuration::FromMilliseconds(400);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future3), false);
+
+ const auto [frame0, conversionTime0] =
+ WaitFor(TakeNConvertedFrames(1)).unwrap()[0];
+ mConverter->SetTrackEnabled(false);
+ // The first frame was queued.
+ EXPECT_EQ(frame0.width(), 800);
+ EXPECT_EQ(frame0.height(), 600);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_GT(conversionTime0 - now, future1 - now);
+
+ auto frames = WaitFor(TakeNConvertedFrames(2)).unwrap();
+ ASSERT_EQ(frames.size(), 2U);
+ // The second frame was duplicated by SetTrackEnabled.
+ const auto& [frame1, conversionTime1] = frames[0];
+ EXPECT_EQ(frame1.width(), 800);
+ EXPECT_EQ(frame1.height(), 600);
+ EXPECT_TRUE(IsFrameBlack(frame1));
+ EXPECT_GT(conversionTime1 - now, future1 - now);
+ // The third frame was created by the same-frame timer (after 1s).
+ const auto& [frame2, conversionTime2] = frames[1];
+ EXPECT_EQ(frame2.width(), 800);
+ EXPECT_EQ(frame2.height(), 600);
+ EXPECT_TRUE(IsFrameBlack(frame2));
+ EXPECT_GT(conversionTime2 - now,
+ future1 - now + TimeDuration::FromSeconds(1));
+ // Check that the third frame comes 1s after the second.
+ EXPECT_EQ(frame2.timestamp_us(), frame1.timestamp_us() + PR_USEC_PER_SEC);
+}
+
+TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) {
+ TimeStamp start = TimeStamp::Now();
+ TimeStamp future1 = start + TimeDuration::FromMilliseconds(100);
+
+ auto framesPromise = TakeNConvertedFrames(1);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
+ auto frames = WaitFor(framesPromise).unwrap();
+
+ // We are now at t=100ms+. Queue a future frame and jump back in time to
+ // signal a reset.
+
+ framesPromise = TakeNConvertedFrames(1);
+ TimeStamp step1 = TimeStamp::Now();
+ ASSERT_GT(step1 - start, future1 - start);
+ TimeStamp future2 = step1 + TimeDuration::FromMilliseconds(200);
+ TimeStamp future3 = step1 + TimeDuration::FromMilliseconds(100);
+ ASSERT_LT(future2 - start, future1 + TimeDuration::FromSeconds(1) - start);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
+ VideoChunk nullChunk;
+ nullChunk.mFrame = VideoFrame(nullptr, gfx::IntSize(800, 600));
+ nullChunk.mTimeStamp = step1;
+ mConverter->QueueVideoChunk(nullChunk, false);
+
+ // We queue one more chunk after the reset so we don't have to wait a full
+ // second for the same-frame timer. It has a different time and resolution
+ // so we can differentiate them.
+ mConverter->QueueVideoChunk(GenerateChunk(320, 240, future3), false);
+
+ {
+ auto newFrames = WaitFor(framesPromise).unwrap();
+ frames.insert(frames.end(), std::make_move_iterator(newFrames.begin()),
+ std::make_move_iterator(newFrames.end()));
+ }
+ TimeStamp step2 = TimeStamp::Now();
+ EXPECT_GT(step2 - start, future3 - start);
+ ASSERT_EQ(frames.size(), 2U);
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_GT(conversionTime0 - start, future1 - start);
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 320);
+ EXPECT_EQ(frame1.height(), 240);
+ EXPECT_FALSE(IsFrameBlack(frame1));
+ EXPECT_GT(conversionTime1 - start, future3 - start);
+}
+
+// We check that the no frame is converted while inactive, and that on
+// activating the most recently queued frame gets converted.
+TEST_F(VideoFrameConverterTest, NoConversionsWhileInactive) {
+ auto framesPromise = TakeNConvertedFrames(1);
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp future1 = now - TimeDuration::FromMilliseconds(1);
+ TimeStamp future2 = now;
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
+
+ // SetActive needs to follow the same async path as the frames to be in sync.
+ auto q = TaskQueue::Create(GetMediaThreadPool(MediaThreadType::WEBRTC_WORKER),
+ "VideoFrameConverterTest");
+ auto timer = MakeRefPtr<MediaTimer>(false);
+ timer->WaitFor(TimeDuration::FromMilliseconds(100), __func__)
+ ->Then(q, __func__,
+ [converter = mConverter] { converter->SetActive(true); });
+
+ auto frames = WaitFor(framesPromise).unwrap();
+ ASSERT_EQ(frames.size(), 1U);
+ const auto& [frame, conversionTime] = frames[0];
+ Unused << conversionTime;
+ EXPECT_EQ(frame.width(), 800);
+ EXPECT_EQ(frame.height(), 600);
+ EXPECT_FALSE(IsFrameBlack(frame));
+}
+
+TEST_F(VideoFrameConverterTest, TimestampPropagation) {
+ auto framesPromise = TakeNConvertedFrames(2);
+ TimeStamp now = TimeStamp::Now();
+ TimeDuration d1 = TimeDuration::FromMilliseconds(1);
+ TimeDuration d2 = TimeDuration::FromMilliseconds(29);
+
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d1), false);
+ mConverter->QueueVideoChunk(GenerateChunk(800, 600, now + d2), false);
+
+ auto frames = WaitFor(framesPromise).unwrap();
+ ASSERT_EQ(frames.size(), 2U);
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_EQ(frame0.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d1)
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime0 - now, d1);
+
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 800);
+ EXPECT_EQ(frame1.height(), 600);
+ EXPECT_FALSE(IsFrameBlack(frame1));
+ EXPECT_EQ(frame1.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d2)
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime1 - now, d2);
+}
+
+TEST_F(VideoFrameConverterTest, IgnoreOldFrames) {
+ TimeStamp now = TimeStamp::Now();
+ TimeDuration d1 = TimeDuration::FromMilliseconds(100);
+ TimeDuration d2 = d1 + TimeDuration::FromMicroseconds(1);
+
+ auto framesPromise = TakeNConvertedFrames(1);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d1), false);
+ auto frames = WaitFor(framesPromise).unwrap();
+
+ framesPromise = TakeNConvertedFrames(2);
+
+ // Time is now ~t1. This processes an extra frame using t=now().
+ mConverter->SetActive(false);
+ mConverter->SetActive(true);
+
+ // This processes a new chunk with an earlier timestamp than the extra frame
+ // above. But it gets processed after the extra frame, so time will appear to
+ // go backwards. This simulates a frame from the pacer being in flight when we
+ // flip SetActive() above. This frame is expected to get ignored.
+ Unused << WaitFor(InvokeAsync(mConverter->mTaskQueue, __func__, [&] {
+ mConverter->QueueForProcessing(
+ GenerateChunk(800, 600, now + d2).mFrame.GetImage(), now + d2,
+ gfx::IntSize(800, 600), false);
+ return GenericPromise::CreateAndResolve(true, __func__);
+ }));
+
+ {
+ auto newFrames = WaitFor(framesPromise).unwrap();
+ frames.insert(frames.end(), std::make_move_iterator(newFrames.begin()),
+ std::make_move_iterator(newFrames.end()));
+ }
+ ASSERT_EQ(frames.size(), 3U);
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_EQ(frame0.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d1)
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime0 - now, d1);
+
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 640);
+ EXPECT_EQ(frame1.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame1));
+ EXPECT_GT(frame1.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d2)
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime1 - now, d2);
+
+ const auto& [frame2, conversionTime2] = frames[2];
+ EXPECT_EQ(frame2.width(), 640);
+ EXPECT_EQ(frame2.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame2));
+ EXPECT_EQ(frame2.timestamp_us(), frame1.timestamp_us() + USECS_PER_S);
+ EXPECT_GE(conversionTime2 - now, d2 + TimeDuration::FromSeconds(1));
+}
+
+TEST_F(VideoFrameConverterTest, SameFrameTimerRacingWithPacing) {
+ TimeStamp now = TimeStamp::Now();
+ TimeDuration d1 = TimeDuration::FromMilliseconds(100);
+ TimeDuration d2 =
+ d1 + TimeDuration::FromSeconds(1) - TimeDuration::FromMicroseconds(1);
+
+ auto framesPromise = TakeNConvertedFrames(3);
+ mConverter->SetActive(true);
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d1), false);
+ mConverter->QueueVideoChunk(GenerateChunk(640, 480, now + d2), false);
+ auto frames = WaitFor(framesPromise).unwrap();
+
+ // The expected order here (in timestamps) is t1, t2, t2+1s.
+ //
+ // If the same-frame timer doesn't check what is queued we could end up with
+ // t1, t1+1s, t2.
+
+ ASSERT_EQ(frames.size(), 3U);
+ const auto& [frame0, conversionTime0] = frames[0];
+ EXPECT_EQ(frame0.width(), 640);
+ EXPECT_EQ(frame0.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame0));
+ EXPECT_EQ(frame0.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d1)
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime0 - now, d1);
+
+ const auto& [frame1, conversionTime1] = frames[1];
+ EXPECT_EQ(frame1.width(), 640);
+ EXPECT_EQ(frame1.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame1));
+ EXPECT_EQ(frame1.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(mTimestampMaker, now + d2)
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime1 - now, d2);
+
+ const auto& [frame2, conversionTime2] = frames[2];
+ EXPECT_EQ(frame2.width(), 640);
+ EXPECT_EQ(frame2.height(), 480);
+ EXPECT_FALSE(IsFrameBlack(frame2));
+ EXPECT_EQ(frame2.timestamp_us(),
+ dom::RTCStatsTimestamp::FromMozTime(
+ mTimestampMaker, now + d2 + TimeDuration::FromSeconds(1))
+ .ToRealtime()
+ .us());
+ EXPECT_GE(conversionTime2 - now, d2 + TimeDuration::FromSeconds(1));
+}
diff --git a/dom/media/gtest/TestVideoSegment.cpp b/dom/media/gtest/TestVideoSegment.cpp
new file mode 100644
index 0000000000..fd9f5ed285
--- /dev/null
+++ b/dom/media/gtest/TestVideoSegment.cpp
@@ -0,0 +1,44 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "VideoSegment.h"
+
+using namespace mozilla;
+
+namespace mozilla::layer {
+class Image;
+} // namespace mozilla::layer
+
+TEST(VideoSegment, TestAppendFrameForceBlack)
+{
+ RefPtr<layers::Image> testImage = nullptr;
+
+ VideoSegment segment;
+ segment.AppendFrame(testImage.forget(), mozilla::gfx::IntSize(640, 480),
+ PRINCIPAL_HANDLE_NONE, true);
+
+ VideoSegment::ChunkIterator iter(segment);
+ while (!iter.IsEnded()) {
+ VideoChunk chunk = *iter;
+ EXPECT_TRUE(chunk.mFrame.GetForceBlack());
+ iter.Next();
+ }
+}
+
+TEST(VideoSegment, TestAppendFrameNotForceBlack)
+{
+ RefPtr<layers::Image> testImage = nullptr;
+
+ VideoSegment segment;
+ segment.AppendFrame(testImage.forget(), mozilla::gfx::IntSize(640, 480),
+ PRINCIPAL_HANDLE_NONE);
+
+ VideoSegment::ChunkIterator iter(segment);
+ while (!iter.IsEnded()) {
+ VideoChunk chunk = *iter;
+ EXPECT_FALSE(chunk.mFrame.GetForceBlack());
+ iter.Next();
+ }
+}
diff --git a/dom/media/gtest/TestVideoTrackEncoder.cpp b/dom/media/gtest/TestVideoTrackEncoder.cpp
new file mode 100644
index 0000000000..ee39213961
--- /dev/null
+++ b/dom/media/gtest/TestVideoTrackEncoder.cpp
@@ -0,0 +1,1467 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <algorithm>
+
+#include "DriftCompensation.h"
+#include "MediaTrackGraph.h"
+#include "MediaTrackListener.h"
+#include "VP8TrackEncoder.h"
+#include "WebMWriter.h" // TODO: it's weird to include muxer header to get the class definition of VP8 METADATA
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "mozilla/ArrayUtils.h"
+#include "prtime.h"
+
+#include "YUVBufferGenerator.h"
+
+#define VIDEO_TRACK_RATE 90000
+
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::TestWithParam;
+using ::testing::Values;
+
+using namespace mozilla::layers;
+using namespace mozilla;
+
+struct InitParam {
+ bool mShouldSucceed; // This parameter should cause success or fail result
+ int mWidth; // frame width
+ int mHeight; // frame height
+};
+
+class MockDriftCompensator : public DriftCompensator {
+ public:
+ MockDriftCompensator()
+ : DriftCompensator(GetCurrentSerialEventTarget(), VIDEO_TRACK_RATE) {
+ ON_CALL(*this, GetVideoTime(_, _))
+ .WillByDefault(Invoke([](TimeStamp, TimeStamp t) { return t; }));
+ }
+
+ MOCK_METHOD2(GetVideoTime, TimeStamp(TimeStamp, TimeStamp));
+};
+
+class TestVP8TrackEncoder : public VP8TrackEncoder {
+ public:
+ explicit TestVP8TrackEncoder(Maybe<float> aKeyFrameIntervalFactor = Nothing())
+ : VP8TrackEncoder(MakeRefPtr<NiceMock<MockDriftCompensator>>(),
+ VIDEO_TRACK_RATE, mEncodedVideoQueue,
+ FrameDroppingMode::DISALLOW, aKeyFrameIntervalFactor) {}
+
+ MockDriftCompensator* DriftCompensator() {
+ return static_cast<MockDriftCompensator*>(mDriftCompensator.get());
+ }
+
+ ::testing::AssertionResult TestInit(const InitParam& aParam) {
+ nsresult result =
+ Init(aParam.mWidth, aParam.mHeight, aParam.mWidth, aParam.mHeight, 30);
+
+ if (((NS_FAILED(result) && aParam.mShouldSucceed)) ||
+ (NS_SUCCEEDED(result) && !aParam.mShouldSucceed)) {
+ return ::testing::AssertionFailure()
+ << " width = " << aParam.mWidth << " height = " << aParam.mHeight;
+ }
+
+ return ::testing::AssertionSuccess();
+ }
+
+ MediaQueue<EncodedFrame> mEncodedVideoQueue;
+};
+
+// Init test
+TEST(VP8VideoTrackEncoder, Initialization)
+{
+ InitParam params[] = {
+ // Failure cases.
+ {false, 0, 0}, // Height/ width should be larger than 1.
+ {false, 0, 1}, // Height/ width should be larger than 1.
+ {false, 1, 0}, // Height/ width should be larger than 1.
+
+ // Success cases
+ {true, 640, 480}, // Standard VGA
+ {true, 800, 480}, // Standard WVGA
+ {true, 960, 540}, // Standard qHD
+ {true, 1280, 720} // Standard HD
+ };
+
+ for (const InitParam& param : params) {
+ TestVP8TrackEncoder encoder;
+ EXPECT_TRUE(encoder.TestInit(param));
+ }
+}
+
+// Get MetaData test
+TEST(VP8VideoTrackEncoder, FetchMetaData)
+{
+ InitParam params[] = {
+ // Success cases
+ {true, 640, 480}, // Standard VGA
+ {true, 800, 480}, // Standard WVGA
+ {true, 960, 540}, // Standard qHD
+ {true, 1280, 720} // Standard HD
+ };
+
+ for (const InitParam& param : params) {
+ TestVP8TrackEncoder encoder;
+ EXPECT_TRUE(encoder.TestInit(param));
+
+ RefPtr<TrackMetadataBase> meta = encoder.GetMetadata();
+ RefPtr<VP8Metadata> vp8Meta(static_cast<VP8Metadata*>(meta.get()));
+
+ // METADATA should be depend on how to initiate encoder.
+ EXPECT_EQ(vp8Meta->mWidth, param.mWidth);
+ EXPECT_EQ(vp8Meta->mHeight, param.mHeight);
+ }
+}
+
+// Encode test
+TEST(VP8VideoTrackEncoder, FrameEncode)
+{
+ TestVP8TrackEncoder encoder;
+ TimeStamp now = TimeStamp::Now();
+
+ // Create YUV images as source.
+ nsTArray<RefPtr<Image>> images;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ images.AppendElement(generator.GenerateI420Image());
+ images.AppendElement(generator.GenerateNV12Image());
+ images.AppendElement(generator.GenerateNV21Image());
+
+ // Put generated YUV frame into video segment.
+ // Duration of each frame is 1 second.
+ VideoSegment segment;
+ for (nsTArray<RefPtr<Image>>::size_type i = 0; i < images.Length(); i++) {
+ RefPtr<Image> image = images[i];
+ segment.AppendFrame(image.forget(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(i));
+ }
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(images.Length()));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that encoding a single frame gives useful output.
+TEST(VP8VideoTrackEncoder, SingleFrameEncode)
+{
+ TestVP8TrackEncoder encoder;
+ TimeStamp now = TimeStamp::Now();
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+
+ // Pass a half-second frame to the encoder.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Read out encoded data, and verify.
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frame->mFrameType)
+ << "We only have one frame, so it should be a keyframe";
+
+ const uint64_t halfSecond = PR_USEC_PER_SEC / 2;
+ EXPECT_EQ(halfSecond, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that encoding a couple of identical images gives useful output.
+TEST(VP8VideoTrackEncoder, SameFrameEncode)
+{
+ TestVP8TrackEncoder encoder;
+ TimeStamp now = TimeStamp::Now();
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+
+ // Pass 15 100ms frames to the encoder.
+ RefPtr<Image> image = generator.GenerateI420Image();
+ VideoSegment segment;
+ for (uint32_t i = 0; i < 15; ++i) {
+ segment.AppendFrame(do_AddRef(image), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(i * 0.1));
+ }
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.5));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify total duration being 1.5s.
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t oneAndAHalf = (PR_USEC_PER_SEC / 2) * 3;
+ EXPECT_EQ(oneAndAHalf, totalDuration);
+}
+
+// Test encoding a track that has to skip frames.
+TEST(VP8VideoTrackEncoder, SkippedFrames)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass 100 frames of the shortest possible duration where we don't get
+ // rounding errors between input/output rate.
+ VideoSegment segment;
+ for (uint32_t i = 0; i < 100; ++i) {
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(i));
+ }
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(100));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify total duration being 100 * 1ms = 100ms.
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t hundredMillis = PR_USEC_PER_SEC / 10;
+ EXPECT_EQ(hundredMillis, totalDuration);
+}
+
+// Test encoding a track with frames subject to rounding errors.
+TEST(VP8VideoTrackEncoder, RoundingErrorFramesEncode)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass nine frames with timestamps not expressable in 90kHz sample rate,
+ // then one frame to make the total duration close to one second.
+ VideoSegment segment;
+ uint32_t usPerFrame = 99999; // 99.999ms
+ for (uint32_t i = 0; i < 9; ++i) {
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMicroseconds(i * usPerFrame));
+ }
+
+ // This last frame has timestamp start + 0.9s and duration 0.1s.
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.9));
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify total duration being 1s.
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ // Not exact, the stream is encoded in time base 90kHz.
+ const uint64_t oneSecond = PR_USEC_PER_SEC - 1;
+ EXPECT_EQ(oneSecond, totalDuration);
+}
+
+// Test that we're encoding timestamps rather than durations.
+TEST(VP8VideoTrackEncoder, TimestampFrameEncode)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.05));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.2));
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify total duration being 0.3s and individual frames being [0.05s, 0.15s,
+ // 0.1s]
+ uint64_t expectedDurations[] = {(PR_USEC_PER_SEC / 10) / 2,
+ (PR_USEC_PER_SEC / 10) * 3 / 2,
+ (PR_USEC_PER_SEC / 10)};
+ uint64_t totalDuration = 0;
+ size_t i = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ EXPECT_EQ(expectedDurations[i], frame->mDuration);
+ i++;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t pointThree = (PR_USEC_PER_SEC / 10) * 3;
+ EXPECT_EQ(pointThree, totalDuration);
+}
+
+// Test that we're compensating for drift when encoding.
+TEST(VP8VideoTrackEncoder, DriftingFrameEncode)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Set up major drift -- audio that goes twice as fast as video.
+ // This should make the given video durations double as they get encoded.
+ EXPECT_CALL(*encoder.DriftCompensator(), GetVideoTime(_, _))
+ .WillRepeatedly(Invoke(
+ [&](TimeStamp, TimeStamp aTime) { return now + (aTime - now) * 2; }));
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.05));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.2));
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify total duration being 0.6s and individual frames being [0.1s, 0.3s,
+ // 0.2s]
+ uint64_t expectedDurations[] = {(PR_USEC_PER_SEC / 10),
+ (PR_USEC_PER_SEC / 10) * 3,
+ (PR_USEC_PER_SEC / 10) * 2};
+ uint64_t totalDuration = 0;
+ size_t i = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ EXPECT_EQ(expectedDurations[i], frame->mDuration);
+ i++;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t pointSix = (PR_USEC_PER_SEC / 10) * 6;
+ EXPECT_EQ(pointSix, totalDuration);
+}
+
+// Test that suspending an encoding works.
+TEST(VP8VideoTrackEncoder, Suspended)
+{
+ TestVP8TrackEncoder encoder;
+ TimeStamp now = TimeStamp::Now();
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+
+ // Pass 3 frames with duration 0.1s. We suspend before and resume after the
+ // second frame.
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.1));
+ }
+
+ encoder.Suspend(now + TimeDuration::FromSeconds(0.1));
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.1));
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.2));
+ }
+
+ encoder.Resume(now + TimeDuration::FromSeconds(0.2));
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.2));
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3));
+ }
+
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify that we have two encoded frames and a total duration of 0.2s.
+ uint64_t count = 0;
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ ++count;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t two = 2;
+ EXPECT_EQ(two, count);
+ const uint64_t pointTwo = (PR_USEC_PER_SEC / 10) * 2;
+ EXPECT_EQ(pointTwo, totalDuration);
+}
+
+// Test that ending a track while the video track encoder is suspended works.
+TEST(VP8VideoTrackEncoder, SuspendedUntilEnd)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass 2 frames with duration 0.1s. We suspend before the second frame.
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.1));
+ }
+
+ encoder.Suspend(now + TimeDuration::FromSeconds(0.1));
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.1));
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.2));
+ }
+
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify that we have one encoded frames and a total duration of 0.1s.
+ uint64_t count = 0;
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ ++count;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t one = 1;
+ EXPECT_EQ(one, count);
+ const uint64_t pointOne = PR_USEC_PER_SEC / 10;
+ EXPECT_EQ(pointOne, totalDuration);
+}
+
+// Test that ending a track that was always suspended works.
+TEST(VP8VideoTrackEncoder, AlwaysSuspended)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Suspend and then pass a frame with duration 2s.
+
+ encoder.Suspend(now);
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2));
+
+ encoder.NotifyEndOfStream();
+
+ // Verify that we have no encoded frames.
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that encoding a track that is suspended in the beginning works.
+TEST(VP8VideoTrackEncoder, SuspendedBeginning)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Suspend and pass a frame with duration 0.5s. Then resume and pass one more.
+ encoder.Suspend(now);
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5));
+ }
+
+ encoder.Resume(now + TimeDuration::FromSeconds(0.5));
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(0.5));
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
+ }
+
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify that we have one encoded frames and a total duration of 0.1s.
+ uint64_t count = 0;
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ ++count;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t one = 1;
+ EXPECT_EQ(one, count);
+ const uint64_t half = PR_USEC_PER_SEC / 2;
+ EXPECT_EQ(half, totalDuration);
+}
+
+// Test that suspending and resuming in the middle of already pushed data
+// works.
+TEST(VP8VideoTrackEncoder, SuspendedOverlap)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ {
+ // Pass a 1s frame and suspend after 0.5s.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5));
+ encoder.Suspend(now + TimeDuration::FromSeconds(0.5));
+
+ {
+ // Pass another 1s frame and resume after 0.3 of this new frame.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromSeconds(1));
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.3));
+ encoder.Resume(now + TimeDuration::FromSeconds(1.3));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Verify that we have two encoded frames and a total duration of 0.1s.
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ const uint64_t pointFive = (PR_USEC_PER_SEC / 10) * 5;
+ EXPECT_EQ(pointFive, frame->mDuration);
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ const uint64_t pointSeven = (PR_USEC_PER_SEC / 10) * 7;
+ EXPECT_EQ(pointSeven, frame->mDuration);
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that ending a track in the middle of already pushed data works.
+TEST(VP8VideoTrackEncoder, PrematureEnding)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a 1s frame and end the track after 0.5s.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t half = PR_USEC_PER_SEC / 2;
+ EXPECT_EQ(half, totalDuration);
+}
+
+// Test that a track that starts at t > 0 works as expected.
+TEST(VP8VideoTrackEncoder, DelayedStart)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a 2s frame, start (pass first CurrentTime) at 0.5s, end at 1s.
+ // Should result in a 0.5s encoding.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now + TimeDuration::FromSeconds(0.5));
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t half = PR_USEC_PER_SEC / 2;
+ EXPECT_EQ(half, totalDuration);
+}
+
+// Test that a track that starts at t > 0 works as expected, when
+// SetStartOffset comes after AppendVideoSegment.
+TEST(VP8VideoTrackEncoder, DelayedStartOtherEventOrder)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a 2s frame, start (pass first CurrentTime) at 0.5s, end at 1s.
+ // Should result in a 0.5s encoding.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.SetStartOffset(now + TimeDuration::FromSeconds(0.5));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t half = PR_USEC_PER_SEC / 2;
+ EXPECT_EQ(half, totalDuration);
+}
+
+// Test that a track that starts at t >>> 0 works as expected.
+TEST(VP8VideoTrackEncoder, VeryDelayedStart)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a 1s frame, start (pass first CurrentTime) at 10s, end at 10.5s.
+ // Should result in a 0.5s encoding.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now + TimeDuration::FromSeconds(10));
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(10.5));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t half = PR_USEC_PER_SEC / 2;
+ EXPECT_EQ(half, totalDuration);
+}
+
+// Test that a video frame that hangs around for a long time gets encoded
+// every second.
+TEST(VP8VideoTrackEncoder, LongFramesReEncoded)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a frame at t=0 and start encoding.
+ // Advancing the current time by 6.5s should encode six 1s frames.
+ // Advancing the current time by another 5.5s should encode another five 1s
+ // frames.
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+
+ {
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(6.5));
+
+ EXPECT_FALSE(encoder.IsEncodingComplete());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.IsFinished());
+
+ uint64_t count = 0;
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ ++count;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t sixSec = 6 * PR_USEC_PER_SEC;
+ EXPECT_EQ(sixSec, totalDuration);
+ EXPECT_EQ(6U, count);
+ }
+
+ {
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(11));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+
+ uint64_t count = 0;
+ uint64_t totalDuration = 0;
+ while (RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront()) {
+ ++count;
+ totalDuration += frame->mDuration;
+ }
+ const uint64_t fiveSec = 5 * PR_USEC_PER_SEC;
+ EXPECT_EQ(fiveSec, totalDuration);
+ EXPECT_EQ(5U, count);
+ }
+}
+
+// Test that an encoding with no defined key frame interval encodes keyframes
+// as expected. Default interval should be 10s.
+TEST(VP8VideoTrackEncoder, DefaultKeyFrameInterval)
+{
+ // Set the factor high to only test the keyframe-forcing logic
+ TestVP8TrackEncoder encoder(Some(2.0));
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a frame at t=0, and the frame-duplication logic will encode frames
+ // every second. Keyframes are expected at t=0, 10s and 20s.
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(21.5));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // Duplication logic ensures no frame duration is longer than 1 second.
+
+ // [0, 1000ms) - key-frame.
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frame->mDuration);
+ EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frame->mFrameType);
+
+ // [1000ms, 10000ms) - non-key-frames
+ for (int i = 0; i < 9; ++i) {
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frame->mDuration)
+ << "Start time: " << frame->mTime.ToMicroseconds() << "us";
+ EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frame->mFrameType)
+ << "Start time: " << frame->mTime.ToMicroseconds() << "us";
+ }
+
+ // [10000ms, 11000ms) - key-frame
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frame->mDuration);
+ EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frame->mFrameType);
+
+ // [11000ms, 20000ms) - non-key-frames
+ for (int i = 0; i < 9; ++i) {
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frame->mDuration)
+ << "Start time: " << frame->mTime.ToMicroseconds() << "us";
+ EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frame->mFrameType)
+ << "Start time: " << frame->mTime.ToMicroseconds() << "us";
+ }
+
+ // [20000ms, 21000ms) - key-frame
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frame->mDuration);
+ EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frame->mFrameType);
+
+ // [21000ms, 21500ms) - non-key-frame
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 500UL, frame->mDuration);
+ EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frame->mFrameType);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that an encoding which is disabled on a frame timestamp encodes
+// frames as expected.
+TEST(VP8VideoTrackEncoder, DisableOnFrameTime)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a frame in at t=0.
+ // Pass another frame in at t=100ms.
+ // Disable the track at t=100ms.
+ // Stop encoding at t=200ms.
+ // Should yield 2 frames, 1 real; 1 black.
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(100));
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+
+ // Advancing 100ms, for simplicity.
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(100));
+
+ encoder.Disable(now + TimeDuration::FromMilliseconds(100));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 100ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ // [100ms, 200ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that an encoding which is disabled between two frame timestamps
+// encodes frames as expected.
+TEST(VP8VideoTrackEncoder, DisableBetweenFrames)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Pass a frame in at t=0.
+ // Disable the track at t=50ms.
+ // Pass another frame in at t=100ms.
+ // Stop encoding at t=200ms.
+ // Should yield 3 frames, 1 real [0, 50); 2 black [50, 100) and [100, 200).
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(100));
+
+ encoder.SetStartOffset(now);
+ encoder.AppendVideoSegment(std::move(segment));
+
+ encoder.Disable(now + TimeDuration::FromMilliseconds(50));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 50ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ // [50ms, 100ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ // [100ms, 200ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that an encoding which is disabled before the first frame becomes
+// black immediately.
+TEST(VP8VideoTrackEncoder, DisableBeforeFirstFrame)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Disable the track at t=0.
+ // Pass a frame in at t=50ms.
+ // Enable the track at t=100ms.
+ // Stop encoding at t=200ms.
+ // Should yield 2 frames, 1 black [0, 100); 1 real [100, 200).
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(50));
+
+ encoder.SetStartOffset(now);
+ encoder.Disable(now);
+ encoder.AppendVideoSegment(std::move(segment));
+
+ encoder.Enable(now + TimeDuration::FromMilliseconds(100));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 100ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ // [100ms, 200ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that an encoding which is enabled on a frame timestamp encodes
+// frames as expected.
+TEST(VP8VideoTrackEncoder, EnableOnFrameTime)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Disable the track at t=0.
+ // Pass a frame in at t=0.
+ // Pass another frame in at t=100ms.
+ // Enable the track at t=100ms.
+ // Stop encoding at t=200ms.
+ // Should yield 2 frames, 1 black; 1 real.
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(100));
+
+ encoder.SetStartOffset(now);
+ encoder.Disable(now);
+ encoder.AppendVideoSegment(std::move(segment));
+
+ // Advancing 100ms, for simplicity.
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(100));
+
+ encoder.Enable(now + TimeDuration::FromMilliseconds(100));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 100ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ // [100ms, 200ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that an encoding which is enabled between two frame timestamps encodes
+// frames as expected.
+TEST(VP8VideoTrackEncoder, EnableBetweenFrames)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ // Disable the track at t=0.
+ // Pass a frame in at t=0.
+ // Enable the track at t=50ms.
+ // Pass another frame in at t=100ms.
+ // Stop encoding at t=200ms.
+ // Should yield 3 frames, 1 black [0, 50); 2 real [50, 100) and [100, 200).
+
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(100));
+
+ encoder.SetStartOffset(now);
+ encoder.Disable(now);
+ encoder.AppendVideoSegment(std::move(segment));
+
+ encoder.Enable(now + TimeDuration::FromMilliseconds(50));
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 50ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ // [50ms, 100ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ // [100ms, 200ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that making time go backwards removes any future frames in the
+// encoder.
+TEST(VP8VideoTrackEncoder, BackwardsTimeResets)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ encoder.SetStartOffset(now);
+
+ // Pass frames in at t=0, t=100ms, t=200ms, t=300ms.
+ // Advance time to t=125ms.
+ // Pass frames in at t=150ms, t=250ms, t=350ms.
+ // Stop encoding at t=300ms.
+ // Should yield 4 frames, at t=0, t=100ms, t=150ms, t=250ms.
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(100));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(200));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(300));
+
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(125));
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(150));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(250));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(350));
+
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(300));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 100ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ // [100ms, 150ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ // [150ms, 250ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ // [250ms, 300ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// Test that trying to encode a null image removes any future frames in the
+// encoder.
+TEST(VP8VideoTrackEncoder, NullImageResets)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(640, 480));
+ TimeStamp now = TimeStamp::Now();
+
+ encoder.SetStartOffset(now);
+
+ // Pass frames in at t=0, t=100ms, t=200ms, t=300ms.
+ // Advance time to t=125ms.
+ // Pass in a null image at t=125ms.
+ // Pass frames in at t=250ms, t=350ms.
+ // Stop encoding at t=300ms.
+ // Should yield 3 frames, at t=0, t=100ms, t=250ms.
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false, now);
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(100));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(200));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(300));
+
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(125));
+
+ {
+ VideoSegment segment;
+ segment.AppendFrame(nullptr, generator.GetSize(), PRINCIPAL_HANDLE_NONE,
+ false, now + TimeDuration::FromMilliseconds(125));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(250));
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + TimeDuration::FromMilliseconds(350));
+
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(300));
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 100ms)
+ RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration);
+
+ // [100ms, 250ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frame->mDuration);
+
+ // [250ms, 300ms)
+ frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frame->mDuration);
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+TEST(VP8VideoTrackEncoder, MaxKeyFrameDistanceLowFramerate)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(240, 180));
+ TimeStamp now = TimeStamp::Now();
+
+ encoder.SetStartOffset(now);
+
+ // Pass 10s worth of frames at 2 fps and verify that the key frame interval
+ // is ~7.5s.
+ const TimeDuration duration = TimeDuration::FromSeconds(10);
+ const uint32_t numFrames = 10 * 2;
+ const TimeDuration frameDuration = duration / static_cast<int64_t>(numFrames);
+
+ {
+ VideoSegment segment;
+ for (uint32_t i = 0; i < numFrames; ++i) {
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + frameDuration * i);
+ }
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + duration);
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ for (uint32_t i = 0; i < numFrames; ++i) {
+ const RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 500UL, frame->mDuration)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ // 7.5s key frame interval at 2 fps becomes the 15th frame.
+ EXPECT_EQ(
+ i % 15 == 0 ? EncodedFrame::VP8_I_FRAME : EncodedFrame::VP8_P_FRAME,
+ frame->mFrameType)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ }
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// This is "High" framerate, as in higher than the test for "Low" framerate.
+// We don't make it too high because the test takes considerably longer to
+// run.
+TEST(VP8VideoTrackEncoder, MaxKeyFrameDistanceHighFramerate)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(240, 180));
+ TimeStamp now = TimeStamp::Now();
+
+ encoder.SetStartOffset(now);
+
+ // Pass 10s worth of frames at 8 fps and verify that the key frame interval
+ // is ~7.5s.
+ const TimeDuration duration = TimeDuration::FromSeconds(10);
+ const uint32_t numFrames = 10 * 8;
+ const TimeDuration frameDuration = duration / static_cast<int64_t>(numFrames);
+
+ {
+ VideoSegment segment;
+ for (uint32_t i = 0; i < numFrames; ++i) {
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + frameDuration * i);
+ }
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+
+ encoder.AdvanceCurrentTime(now + duration);
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ for (uint32_t i = 0; i < numFrames; ++i) {
+ const RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 125UL, frame->mDuration)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ // 7.5s key frame interval at 8 fps becomes the 60th frame.
+ EXPECT_EQ(
+ i % 60 == 0 ? EncodedFrame::VP8_I_FRAME : EncodedFrame::VP8_P_FRAME,
+ frame->mFrameType)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ }
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+TEST(VP8VideoTrackEncoder, MaxKeyFrameDistanceAdaptiveFramerate)
+{
+ TestVP8TrackEncoder encoder;
+ YUVBufferGenerator generator;
+ generator.Init(mozilla::gfx::IntSize(240, 180));
+ TimeStamp now = TimeStamp::Now();
+
+ encoder.SetStartOffset(now);
+
+ // Pass 11s worth of frames at 2 fps and verify that there is a key frame
+ // at 7.5s. Then pass 14s worth of frames at 10 fps and verify that there is
+ // a key frame at 15s (due to re-init) and then one at 22.5s.
+
+ const TimeDuration firstDuration = TimeDuration::FromSeconds(11);
+ const uint32_t firstNumFrames = 11 * 2;
+ const TimeDuration firstFrameDuration =
+ firstDuration / static_cast<int64_t>(firstNumFrames);
+ {
+ VideoSegment segment;
+ for (uint32_t i = 0; i < firstNumFrames; ++i) {
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + firstFrameDuration * i);
+ }
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+ encoder.AdvanceCurrentTime(now + firstDuration);
+
+ const TimeDuration secondDuration = TimeDuration::FromSeconds(14);
+ const uint32_t secondNumFrames = 14 * 10;
+ const TimeDuration secondFrameDuration =
+ secondDuration / static_cast<int64_t>(secondNumFrames);
+ {
+ VideoSegment segment;
+ for (uint32_t i = 0; i < secondNumFrames; ++i) {
+ segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
+ PRINCIPAL_HANDLE_NONE, false,
+ now + firstDuration + secondFrameDuration * i);
+ }
+ encoder.AppendVideoSegment(std::move(segment));
+ }
+ encoder.AdvanceCurrentTime(now + firstDuration + secondDuration);
+
+ encoder.NotifyEndOfStream();
+
+ EXPECT_TRUE(encoder.IsEncodingComplete());
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+ EXPECT_FALSE(encoder.mEncodedVideoQueue.AtEndOfStream());
+
+ // [0, 11s) - keyframe distance is now 7.5s@2fps = 15.
+ for (uint32_t i = 0; i < 22; ++i) {
+ const RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 500UL, frame->mDuration)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ // 7.5s key frame interval at 2 fps becomes the 15th frame.
+ EXPECT_EQ(
+ i % 15 == 0 ? EncodedFrame::VP8_I_FRAME : EncodedFrame::VP8_P_FRAME,
+ frame->mFrameType)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ }
+
+ // Input framerate is now 10fps.
+ // Framerate re-evaluation every 5s, so the keyframe distance changed at
+ // 15s.
+ for (uint32_t i = 22; i < 162; ++i) {
+ const RefPtr<EncodedFrame> frame = encoder.mEncodedVideoQueue.PopFront();
+ EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frame->mDuration)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ if (i < 22 + 40) {
+ // [11s, 15s) - 40 frames at 10fps but with the 2fps keyframe distance.
+ EXPECT_EQ(
+ i % 15 == 0 ? EncodedFrame::VP8_I_FRAME : EncodedFrame::VP8_P_FRAME,
+ frame->mFrameType)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ } else {
+ // [15s, 25s) - 100 frames at 10fps. Keyframe distance 75. Starts with
+ // keyframe due to re-init.
+ EXPECT_EQ((i - 22 - 40) % 75 == 0 ? EncodedFrame::VP8_I_FRAME
+ : EncodedFrame::VP8_P_FRAME,
+ frame->mFrameType)
+ << "Frame " << i << ", with start: " << frame->mTime.ToMicroseconds()
+ << "us";
+ }
+ }
+
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.AtEndOfStream());
+}
+
+// EOS test
+TEST(VP8VideoTrackEncoder, EncodeComplete)
+{
+ TestVP8TrackEncoder encoder;
+
+ // NotifyEndOfStream should wrap up the encoding immediately.
+ encoder.NotifyEndOfStream();
+ EXPECT_TRUE(encoder.mEncodedVideoQueue.IsFinished());
+}
diff --git a/dom/media/gtest/TestVideoUtils.cpp b/dom/media/gtest/TestVideoUtils.cpp
new file mode 100644
index 0000000000..d322d15d64
--- /dev/null
+++ b/dom/media/gtest/TestVideoUtils.cpp
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "nsMimeTypes.h"
+#include "nsString.h"
+#include "VideoUtils.h"
+
+using namespace mozilla;
+
+TEST(MediaMIMETypes, IsMediaMIMEType)
+{
+ EXPECT_TRUE(IsMediaMIMEType(AUDIO_MP4));
+ EXPECT_TRUE(IsMediaMIMEType(VIDEO_MP4));
+ EXPECT_TRUE(IsMediaMIMEType("application/x-mp4"));
+
+ EXPECT_TRUE(IsMediaMIMEType("audio/m"));
+ EXPECT_FALSE(IsMediaMIMEType("audio/"));
+
+ EXPECT_FALSE(IsMediaMIMEType("vide/mp4"));
+ EXPECT_FALSE(IsMediaMIMEType("videos/mp4"));
+
+ // Expect lowercase only.
+ EXPECT_FALSE(IsMediaMIMEType("Video/mp4"));
+}
+
+TEST(StringListRange, MakeStringListRange)
+{
+ static const struct {
+ const char* mList;
+ const char* mExpectedSkipEmpties;
+ const char* mExpectedProcessAll;
+ const char* mExpectedProcessEmpties;
+ } tests[] = {
+ // string skip all empties
+ {"", "", "|", ""},
+ {" ", "", "|", "|"},
+ {",", "", "||", "||"},
+ {" , ", "", "||", "||"},
+ {"a", "a|", "a|", "a|"},
+ {" a ", "a|", "a|", "a|"},
+ {"a,", "a|", "a||", "a||"},
+ {"a, ", "a|", "a||", "a||"},
+ {",a", "a|", "|a|", "|a|"},
+ {" ,a", "a|", "|a|", "|a|"},
+ {"aa,bb", "aa|bb|", "aa|bb|", "aa|bb|"},
+ {" a a , b b ", "a a|b b|", "a a|b b|", "a a|b b|"},
+ {" , ,a 1,, ,b 2,", "a 1|b 2|", "||a 1|||b 2||", "||a 1|||b 2||"}};
+
+ for (const auto& test : tests) {
+ nsCString list(test.mList);
+ nsCString out;
+ for (const auto& item : MakeStringListRange(list)) {
+ out += item;
+ out += "|";
+ }
+ EXPECT_STREQ(test.mExpectedSkipEmpties, out.Data());
+ out.SetLength(0);
+
+ for (const auto& item :
+ MakeStringListRange<StringListRangeEmptyItems::ProcessAll>(list)) {
+ out += item;
+ out += "|";
+ }
+ EXPECT_STREQ(test.mExpectedProcessAll, out.Data());
+ out.SetLength(0);
+
+ for (const auto& item :
+ MakeStringListRange<StringListRangeEmptyItems::ProcessEmptyItems>(
+ list)) {
+ out += item;
+ out += "|";
+ }
+ EXPECT_STREQ(test.mExpectedProcessEmpties, out.Data());
+ }
+}
+
+TEST(StringListRange, StringListContains)
+{
+ static const struct {
+ const char* mList;
+ const char* mItemToSearch;
+ bool mExpectedSkipEmpties;
+ bool mExpectedProcessAll;
+ bool mExpectedProcessEmpties;
+ } tests[] = {// haystack needle skip all empties
+ {"", "", false, true, false},
+ {" ", "", false, true, true},
+ {"", "a", false, false, false},
+ {" ", "a", false, false, false},
+ {",", "a", false, false, false},
+ {" , ", "", false, true, true},
+ {" , ", "a", false, false, false},
+ {"a", "a", true, true, true},
+ {"a", "b", false, false, false},
+ {" a ", "a", true, true, true},
+ {"aa,bb", "aa", true, true, true},
+ {"aa,bb", "bb", true, true, true},
+ {"aa,bb", "cc", false, false, false},
+ {"aa,bb", " aa ", false, false, false},
+ {" a a , b b ", "a a", true, true, true},
+ {" , ,a 1,, ,b 2,", "a 1", true, true, true},
+ {" , ,a 1,, ,b 2,", "b 2", true, true, true},
+ {" , ,a 1,, ,b 2,", "", false, true, true},
+ {" , ,a 1,, ,b 2,", " ", false, false, false},
+ {" , ,a 1,, ,b 2,", "A 1", false, false, false},
+ {" , ,A 1,, ,b 2,", "a 1", false, false, false}};
+
+ for (const auto& test : tests) {
+ nsCString list(test.mList);
+ nsCString itemToSearch(test.mItemToSearch);
+ EXPECT_EQ(test.mExpectedSkipEmpties, StringListContains(list, itemToSearch))
+ << "trying to find \"" << itemToSearch.Data() << "\" in \""
+ << list.Data() << "\" (skipping empties)";
+ EXPECT_EQ(test.mExpectedProcessAll,
+ StringListContains<StringListRangeEmptyItems::ProcessAll>(
+ list, itemToSearch))
+ << "trying to find \"" << itemToSearch.Data() << "\" in \""
+ << list.Data() << "\" (processing everything)";
+ EXPECT_EQ(test.mExpectedProcessEmpties,
+ StringListContains<StringListRangeEmptyItems::ProcessEmptyItems>(
+ list, itemToSearch))
+ << "trying to find \"" << itemToSearch.Data() << "\" in \""
+ << list.Data() << "\" (processing empties)";
+ }
+}
diff --git a/dom/media/gtest/TestWebMBuffered.cpp b/dom/media/gtest/TestWebMBuffered.cpp
new file mode 100644
index 0000000000..35ba00fec7
--- /dev/null
+++ b/dom/media/gtest/TestWebMBuffered.cpp
@@ -0,0 +1,234 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/ArrayUtils.h"
+#include <stdio.h>
+#include "nsTArray.h"
+#include "WebMBufferedParser.h"
+
+using namespace mozilla;
+
+std::ostream& operator<<(std::ostream& aStream, nsresult aResult) {
+ return aStream << GetStaticErrorName(aResult);
+}
+
+namespace mozilla {
+std::ostream& operator<<(std::ostream& aStream, const MediaResult& aResult) {
+ aStream << aResult.Code();
+ if (!aResult.Message().IsEmpty()) {
+ aStream << " (" << aResult.Message() << ")";
+ }
+ return aStream;
+}
+} // namespace mozilla
+
+// "test.webm" contains 8 SimpleBlocks in a single Cluster. The blocks with
+// timecodes 100000000 and are 133000000 skipped by WebMBufferedParser
+// because they occur after a block with timecode 160000000 and the parser
+// expects in-order timecodes per the WebM spec. The remaining 6
+// SimpleBlocks have the following attributes:
+static const uint64_t gTimecodes[] = {66000000, 160000000, 166000000,
+ 200000000, 233000000, 320000000};
+static const int64_t gEndOffsets[] = {466, 737, 1209, 1345, 1508, 1980};
+
+TEST(WebMBuffered, BasicTests)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(nullptr, 0, mapping), NS_OK);
+ EXPECT_TRUE(mapping.IsEmpty());
+ EXPECT_EQ(parser.mStartOffset, 0);
+ EXPECT_EQ(parser.mCurrentOffset, 0);
+
+ unsigned char buf[] = {0x1a, 0x45, 0xdf, 0xa3};
+ EXPECT_EQ(parser.Append(buf, ArrayLength(buf), mapping), NS_OK);
+ EXPECT_TRUE(mapping.IsEmpty());
+ EXPECT_EQ(parser.mStartOffset, 0);
+ EXPECT_EQ(parser.mCurrentOffset, 4);
+}
+
+static void ReadFile(const char* aPath, nsTArray<uint8_t>& aBuffer) {
+ FILE* f = fopen(aPath, "rb");
+ ASSERT_NE(f, (FILE*)nullptr);
+
+ int r = fseek(f, 0, SEEK_END);
+ ASSERT_EQ(r, 0);
+
+ long size = ftell(f);
+ ASSERT_NE(size, -1);
+ aBuffer.SetLength(size);
+
+ r = fseek(f, 0, SEEK_SET);
+ ASSERT_EQ(r, 0);
+
+ size_t got = fread(aBuffer.Elements(), 1, size, f);
+ ASSERT_EQ(got, size_t(size));
+
+ r = fclose(f);
+ ASSERT_EQ(r, 0);
+}
+
+TEST(WebMBuffered, RealData)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ ReadFile("test.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_OK);
+ EXPECT_EQ(mapping.Length(), 6u);
+ EXPECT_EQ(parser.mStartOffset, 0);
+ EXPECT_EQ(parser.mCurrentOffset, int64_t(webmData.Length()));
+ EXPECT_EQ(parser.GetTimecodeScale(), 500000u);
+
+ for (uint32_t i = 0; i < mapping.Length(); ++i) {
+ EXPECT_EQ(mapping[i].mEndOffset, gEndOffsets[i]);
+ EXPECT_EQ(mapping[i].mSyncOffset, 326);
+ EXPECT_EQ(mapping[i].mTimecode, gTimecodes[i]);
+ }
+}
+
+TEST(WebMBuffered, RealDataAppend)
+{
+ WebMBufferedParser parser(0);
+ nsTArray<WebMTimeDataOffset> mapping;
+
+ nsTArray<uint8_t> webmData;
+ ReadFile("test.webm", webmData);
+
+ uint32_t arrayEntries = mapping.Length();
+ size_t offset = 0;
+ while (offset < webmData.Length()) {
+ EXPECT_EQ(parser.Append(webmData.Elements() + offset, 1, mapping), NS_OK);
+ offset += 1;
+ EXPECT_EQ(parser.mCurrentOffset, int64_t(offset));
+ if (mapping.Length() != arrayEntries) {
+ arrayEntries = mapping.Length();
+ ASSERT_LE(arrayEntries, 6u);
+ uint32_t i = arrayEntries - 1;
+ EXPECT_EQ(mapping[i].mEndOffset, gEndOffsets[i]);
+ EXPECT_EQ(mapping[i].mSyncOffset, 326);
+ EXPECT_EQ(mapping[i].mTimecode, gTimecodes[i]);
+ EXPECT_EQ(parser.GetTimecodeScale(), 500000u);
+ }
+ }
+ EXPECT_EQ(mapping.Length(), 6u);
+ EXPECT_EQ(parser.mStartOffset, 0);
+ EXPECT_EQ(parser.mCurrentOffset, int64_t(webmData.Length()));
+ EXPECT_EQ(parser.GetTimecodeScale(), 500000u);
+
+ for (uint32_t i = 0; i < mapping.Length(); ++i) {
+ EXPECT_EQ(mapping[i].mEndOffset, gEndOffsets[i]);
+ EXPECT_EQ(mapping[i].mSyncOffset, 326);
+ EXPECT_EQ(mapping[i].mTimecode, gTimecodes[i]);
+ }
+}
+
+TEST(WebMBuffered, InvalidEBMLMaxIdLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxIdLength=3, but a Segment element (and maybe
+ // others) whose Id VInt has length 4.
+ ReadFile("test_InvalidElementId.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_ERROR_FAILURE);
+}
+
+TEST(WebMBuffered, InvalidLargeElementIdLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxIdLength=4, but a dummy element whose Id VInt has
+ // length 5.
+ ReadFile("test_InvalidLargeElementId.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_ERROR_FAILURE);
+}
+
+TEST(WebMBuffered, InvalidSmallEBMLMaxIdLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxIdLength=3.
+ // Per draft-ietf-cellar-matroska-13 EBMLMaxIdLength MUST be 4. But element
+ // ids can also be between 1 and 5 octets long. 5 only if EBMLMaxIdLength
+ // specifies it. At least 3 is too short.
+ ReadFile("test_InvalidSmallEBMLMaxIdLength.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_ERROR_FAILURE);
+}
+
+TEST(WebMBuffered, ValidLargeEBMLMaxIdLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxIdLength=5 and a dummy element with a 5 octet
+ // long id. Per draft-ietf-cellar-matroska-13 EBMLMaxIdLength MUST be 4. But
+ // element ids can also be between 1 and 5 octets long. 5 only if
+ // EBMLMaxIdLength specifies it. We better tolerate this.
+ ReadFile("test_ValidLargeEBMLMaxIdLength.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_OK);
+}
+
+TEST(WebMBuffered, InvalidLargeEBMLMaxIdLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxIdLength=6.
+ // Per draft-ietf-cellar-matroska-13 EBMLMaxIdLength MUST be 4. But
+ // element ids can also be between 1 and 5 octets long. 5 only if
+ // EBMLMaxIdLength specifies it. At least 6 is too long.
+ ReadFile("test_InvalidLargeEBMLMaxIdLength.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_ERROR_FAILURE);
+}
+
+TEST(WebMBuffered, ValidSmallEBMLMaxSizeLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxSizeLength=7 and no element with an element size
+ // longer than 7 bytes.
+ ReadFile("test_ValidSmallEBMLMaxSizeLength.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_OK);
+}
+
+TEST(WebMBuffered, InvalidEBMLMaxSizeLength)
+{
+ WebMBufferedParser parser(0);
+
+ nsTArray<uint8_t> webmData;
+ // This file contains EBMLMaxSizeLength=7, but the Segment element size VInt
+ // has length 8.
+ ReadFile("test_InvalidElementSize.webm", webmData);
+
+ nsTArray<WebMTimeDataOffset> mapping;
+ EXPECT_EQ(parser.Append(webmData.Elements(), webmData.Length(), mapping),
+ NS_ERROR_FAILURE);
+}
diff --git a/dom/media/gtest/TestWebMWriter.cpp b/dom/media/gtest/TestWebMWriter.cpp
new file mode 100644
index 0000000000..837ee6a2c6
--- /dev/null
+++ b/dom/media/gtest/TestWebMWriter.cpp
@@ -0,0 +1,388 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MathAlgorithms.h"
+#include "nestegg/nestegg.h"
+#include "DriftCompensation.h"
+#include "OpusTrackEncoder.h"
+#include "VP8TrackEncoder.h"
+#include "WebMWriter.h"
+
+using namespace mozilla;
+
+class WebMOpusTrackEncoder : public OpusTrackEncoder {
+ public:
+ explicit WebMOpusTrackEncoder(TrackRate aTrackRate)
+ : OpusTrackEncoder(aTrackRate, mEncodedAudioQueue) {}
+ bool TestOpusCreation(int aChannels) {
+ if (NS_SUCCEEDED(Init(aChannels))) {
+ return true;
+ }
+ return false;
+ }
+ MediaQueue<EncodedFrame> mEncodedAudioQueue;
+};
+
+class WebMVP8TrackEncoder : public VP8TrackEncoder {
+ public:
+ explicit WebMVP8TrackEncoder(TrackRate aTrackRate = 90000)
+ : VP8TrackEncoder(nullptr, aTrackRate, mEncodedVideoQueue,
+ FrameDroppingMode::DISALLOW) {}
+
+ bool TestVP8Creation(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
+ int32_t aDisplayHeight) {
+ if (NS_SUCCEEDED(
+ Init(aWidth, aHeight, aDisplayWidth, aDisplayHeight, 30))) {
+ return true;
+ }
+ return false;
+ }
+ MediaQueue<EncodedFrame> mEncodedVideoQueue;
+};
+
+static void GetOpusMetadata(int aChannels, TrackRate aTrackRate,
+ nsTArray<RefPtr<TrackMetadataBase>>& aMeta) {
+ WebMOpusTrackEncoder opusEncoder(aTrackRate);
+ EXPECT_TRUE(opusEncoder.TestOpusCreation(aChannels));
+ aMeta.AppendElement(opusEncoder.GetMetadata());
+}
+
+static void GetVP8Metadata(int32_t aWidth, int32_t aHeight,
+ int32_t aDisplayWidth, int32_t aDisplayHeight,
+ TrackRate aTrackRate,
+ nsTArray<RefPtr<TrackMetadataBase>>& aMeta) {
+ WebMVP8TrackEncoder vp8Encoder;
+ EXPECT_TRUE(vp8Encoder.TestVP8Creation(aWidth, aHeight, aDisplayWidth,
+ aDisplayHeight));
+ aMeta.AppendElement(vp8Encoder.GetMetadata());
+}
+
+const uint64_t FIXED_DURATION = 1000000;
+const uint32_t FIXED_FRAMESIZE = 500;
+
+class TestWebMWriter : public WebMWriter {
+ public:
+ TestWebMWriter() = default;
+
+ // When we append an I-Frame into WebM muxer, the muxer will treat previous
+ // data as "a cluster".
+ // In these test cases, we will call the function many times to enclose the
+ // previous cluster so that we can retrieve data by |GetContainerData|.
+ void AppendDummyFrame(EncodedFrame::FrameType aFrameType,
+ uint64_t aDuration) {
+ nsTArray<RefPtr<EncodedFrame>> encodedVideoData;
+ auto frameData = MakeRefPtr<EncodedFrame::FrameData>();
+ // Create dummy frame data.
+ frameData->SetLength(FIXED_FRAMESIZE);
+ encodedVideoData.AppendElement(
+ MakeRefPtr<EncodedFrame>(mTimestamp, aDuration, PR_USEC_PER_SEC,
+ aFrameType, std::move(frameData)));
+ WriteEncodedTrack(encodedVideoData, 0);
+ mTimestamp += media::TimeUnit::FromMicroseconds(aDuration);
+ }
+
+ bool HaveValidCluster() {
+ nsTArray<nsTArray<uint8_t>> encodedBuf;
+ GetContainerData(&encodedBuf, 0);
+ return !encodedBuf.IsEmpty();
+ }
+
+ // Timestamp accumulator that increased by AppendDummyFrame.
+ // Keep it public that we can do some testcases about it.
+ media::TimeUnit mTimestamp;
+};
+
+TEST(WebMWriter, Metadata)
+{
+ TestWebMWriter writer;
+
+ // The output should be empty since we didn't set any metadata in writer.
+ nsTArray<nsTArray<uint8_t>> encodedBuf;
+ writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
+ EXPECT_TRUE(encodedBuf.Length() == 0);
+ writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED);
+ EXPECT_TRUE(encodedBuf.Length() == 0);
+
+ nsTArray<RefPtr<TrackMetadataBase>> meta;
+
+ TrackRate trackRate = 44100;
+
+ // Get opus metadata.
+ int channel = 1;
+ GetOpusMetadata(channel, trackRate, meta);
+
+ // Get vp8 metadata
+ int32_t width = 640;
+ int32_t height = 480;
+ int32_t displayWidth = 640;
+ int32_t displayHeight = 480;
+ GetVP8Metadata(width, height, displayWidth, displayHeight, trackRate, meta);
+
+ // Set metadata
+ writer.SetMetadata(meta);
+
+ writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
+ EXPECT_TRUE(encodedBuf.Length() > 0);
+}
+
+TEST(WebMWriter, Cluster)
+{
+ TestWebMWriter writer;
+ nsTArray<RefPtr<TrackMetadataBase>> meta;
+ TrackRate trackRate = 48000;
+ // Get opus metadata.
+ int channel = 1;
+ GetOpusMetadata(channel, trackRate, meta);
+ // Get vp8 metadata
+ int32_t width = 320;
+ int32_t height = 240;
+ int32_t displayWidth = 320;
+ int32_t displayHeight = 240;
+ GetVP8Metadata(width, height, displayWidth, displayHeight, trackRate, meta);
+ writer.SetMetadata(meta);
+
+ nsTArray<nsTArray<uint8_t>> encodedBuf;
+ writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
+ EXPECT_TRUE(encodedBuf.Length() > 0);
+ encodedBuf.Clear();
+
+ // write the first I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+ EXPECT_TRUE(writer.HaveValidCluster());
+
+ // The second I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+ EXPECT_TRUE(writer.HaveValidCluster());
+
+ // P-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_P_FRAME, FIXED_DURATION);
+ EXPECT_TRUE(writer.HaveValidCluster());
+
+ // The third I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+ EXPECT_TRUE(writer.HaveValidCluster());
+}
+
+TEST(WebMWriter, FLUSH_NEEDED)
+{
+ TestWebMWriter writer;
+ nsTArray<RefPtr<TrackMetadataBase>> meta;
+ TrackRate trackRate = 44100;
+ // Get opus metadata.
+ int channel = 2;
+ GetOpusMetadata(channel, trackRate, meta);
+ // Get vp8 metadata
+ int32_t width = 176;
+ int32_t height = 352;
+ int32_t displayWidth = 176;
+ int32_t displayHeight = 352;
+ GetVP8Metadata(width, height, displayWidth, displayHeight, trackRate, meta);
+ writer.SetMetadata(meta);
+ // Have data because the metadata is finished.
+ EXPECT_TRUE(writer.HaveValidCluster());
+
+ // write the first I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+
+ // P-Frame
+ writer.AppendDummyFrame(EncodedFrame::VP8_P_FRAME, FIXED_DURATION);
+ // Have data because frames were written.
+ EXPECT_TRUE(writer.HaveValidCluster());
+ // No data because the previous check emptied it.
+ EXPECT_FALSE(writer.HaveValidCluster());
+
+ nsTArray<nsTArray<uint8_t>> encodedBuf;
+ // No data because the flag ContainerWriter::FLUSH_NEEDED does nothing.
+ writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED);
+ EXPECT_TRUE(encodedBuf.IsEmpty());
+ encodedBuf.Clear();
+
+ // P-Frame
+ writer.AppendDummyFrame(EncodedFrame::VP8_P_FRAME, FIXED_DURATION);
+ // Have data because we continue the previous cluster.
+ EXPECT_TRUE(writer.HaveValidCluster());
+
+ // I-Frame
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+ // Have data with a new cluster.
+ EXPECT_TRUE(writer.HaveValidCluster());
+
+ // I-Frame
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+ // Have data with a new cluster.
+ EXPECT_TRUE(writer.HaveValidCluster());
+}
+
+struct WebMioData {
+ nsTArray<uint8_t> data;
+ CheckedInt<size_t> offset;
+};
+
+static int webm_read(void* aBuffer, size_t aLength, void* aUserData) {
+ NS_ASSERTION(aUserData, "aUserData must point to a valid WebMioData");
+ WebMioData* ioData = static_cast<WebMioData*>(aUserData);
+
+ // Check the read length.
+ if (aLength > ioData->data.Length()) {
+ return 0;
+ }
+
+ // Check eos.
+ if (ioData->offset.value() >= ioData->data.Length()) {
+ return 0;
+ }
+
+ size_t oldOffset = ioData->offset.value();
+ ioData->offset += aLength;
+ if (!ioData->offset.isValid() ||
+ (ioData->offset.value() > ioData->data.Length())) {
+ return -1;
+ }
+ memcpy(aBuffer, ioData->data.Elements() + oldOffset, aLength);
+ return 1;
+}
+
+static int webm_seek(int64_t aOffset, int aWhence, void* aUserData) {
+ NS_ASSERTION(aUserData, "aUserData must point to a valid WebMioData");
+ WebMioData* ioData = static_cast<WebMioData*>(aUserData);
+
+ if (Abs(aOffset) > ioData->data.Length()) {
+ NS_ERROR("Invalid aOffset");
+ return -1;
+ }
+
+ switch (aWhence) {
+ case NESTEGG_SEEK_END: {
+ CheckedInt<size_t> tempOffset = ioData->data.Length();
+ ioData->offset = tempOffset + aOffset;
+ break;
+ }
+ case NESTEGG_SEEK_CUR:
+ ioData->offset += aOffset;
+ break;
+ case NESTEGG_SEEK_SET:
+ ioData->offset = aOffset;
+ break;
+ default:
+ NS_ERROR("Unknown whence");
+ return -1;
+ }
+
+ if (!ioData->offset.isValid()) {
+ NS_ERROR("Invalid offset");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int64_t webm_tell(void* aUserData) {
+ NS_ASSERTION(aUserData, "aUserData must point to a valid WebMioData");
+ WebMioData* ioData = static_cast<WebMioData*>(aUserData);
+ return ioData->offset.isValid() ? ioData->offset.value() : -1;
+}
+
+TEST(WebMWriter, bug970774_aspect_ratio)
+{
+ TestWebMWriter writer;
+ nsTArray<RefPtr<TrackMetadataBase>> meta;
+ TrackRate trackRate = 44100;
+ // Get opus metadata.
+ int channel = 1;
+ GetOpusMetadata(channel, trackRate, meta);
+ // Set vp8 metadata
+ int32_t width = 640;
+ int32_t height = 480;
+ int32_t displayWidth = 1280;
+ int32_t displayHeight = 960;
+ GetVP8Metadata(width, height, displayWidth, displayHeight, trackRate, meta);
+ writer.SetMetadata(meta);
+
+ // write the first I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+
+ // write the second I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
+
+ // Get the metadata and the first cluster.
+ nsTArray<nsTArray<uint8_t>> encodedBuf;
+ writer.GetContainerData(&encodedBuf, 0);
+ // Flatten the encodedBuf.
+ WebMioData ioData;
+ ioData.offset = 0;
+ for (uint32_t i = 0; i < encodedBuf.Length(); ++i) {
+ ioData.data.AppendElements(encodedBuf[i]);
+ }
+
+ // Use nestegg to verify the information in metadata.
+ nestegg* context = nullptr;
+ nestegg_io io;
+ io.read = webm_read;
+ io.seek = webm_seek;
+ io.tell = webm_tell;
+ io.userdata = static_cast<void*>(&ioData);
+ int rv = nestegg_init(&context, io, nullptr, -1);
+ EXPECT_EQ(rv, 0);
+ unsigned int ntracks = 0;
+ rv = nestegg_track_count(context, &ntracks);
+ EXPECT_EQ(rv, 0);
+ EXPECT_EQ(ntracks, (unsigned int)2);
+ for (unsigned int track = 0; track < ntracks; ++track) {
+ int id = nestegg_track_codec_id(context, track);
+ EXPECT_NE(id, -1);
+ int type = nestegg_track_type(context, track);
+ if (type == NESTEGG_TRACK_VIDEO) {
+ nestegg_video_params params;
+ rv = nestegg_track_video_params(context, track, &params);
+ EXPECT_EQ(rv, 0);
+ EXPECT_EQ(width, static_cast<int32_t>(params.width));
+ EXPECT_EQ(height, static_cast<int32_t>(params.height));
+ EXPECT_EQ(displayWidth, static_cast<int32_t>(params.display_width));
+ EXPECT_EQ(displayHeight, static_cast<int32_t>(params.display_height));
+ } else if (type == NESTEGG_TRACK_AUDIO) {
+ nestegg_audio_params params;
+ rv = nestegg_track_audio_params(context, track, &params);
+ EXPECT_EQ(rv, 0);
+ EXPECT_EQ(channel, static_cast<int>(params.channels));
+ EXPECT_EQ(static_cast<double>(trackRate), params.rate);
+ }
+ }
+ if (context) {
+ nestegg_destroy(context);
+ }
+}
+
+/**
+ * Test that we don't crash when writing two video frames that are too far apart
+ * to fit in the same cluster (>32767ms).
+ */
+TEST(WebMWriter, LongVideoGap)
+{
+ TestWebMWriter writer;
+ nsTArray<RefPtr<TrackMetadataBase>> meta;
+ TrackRate trackRate = 44100;
+ // Set vp8 metadata
+ int32_t width = 640;
+ int32_t height = 480;
+ int32_t displayWidth = 640;
+ int32_t displayHeight = 480;
+ GetVP8Metadata(width, height, displayWidth, displayHeight, trackRate, meta);
+ writer.SetMetadata(meta);
+
+ // write the first I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME,
+ media::TimeUnit::FromSeconds(33).ToMicroseconds());
+
+ // write the second I-Frame.
+ writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME,
+ media::TimeUnit::FromSeconds(0.33).ToMicroseconds());
+
+ nsTArray<nsTArray<uint8_t>> encodedBuf;
+ writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
+ // metadata + 2 frames
+ EXPECT_EQ(encodedBuf.Length(), 3U);
+}
diff --git a/dom/media/gtest/YUVBufferGenerator.cpp b/dom/media/gtest/YUVBufferGenerator.cpp
new file mode 100644
index 0000000000..60c8c6adcb
--- /dev/null
+++ b/dom/media/gtest/YUVBufferGenerator.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "YUVBufferGenerator.h"
+
+#include "VideoUtils.h"
+
+using namespace mozilla::layers;
+using namespace mozilla;
+
+void YUVBufferGenerator::Init(const mozilla::gfx::IntSize& aSize) {
+ mImageSize = aSize;
+
+ int yPlaneLen = aSize.width * aSize.height;
+ int cbcrPlaneLen = (yPlaneLen + 1) / 2;
+ int frameLen = yPlaneLen + cbcrPlaneLen;
+
+ // Generate source buffer.
+ mSourceBuffer.SetLength(frameLen);
+
+ // Fill Y plane.
+ memset(mSourceBuffer.Elements(), 0x10, yPlaneLen);
+
+ // Fill Cb/Cr planes.
+ memset(mSourceBuffer.Elements() + yPlaneLen, 0x80, cbcrPlaneLen);
+}
+
+mozilla::gfx::IntSize YUVBufferGenerator::GetSize() const { return mImageSize; }
+
+already_AddRefed<Image> YUVBufferGenerator::GenerateI420Image() {
+ return do_AddRef(CreateI420Image());
+}
+
+already_AddRefed<Image> YUVBufferGenerator::GenerateNV12Image() {
+ return do_AddRef(CreateNV12Image());
+}
+
+already_AddRefed<Image> YUVBufferGenerator::GenerateNV21Image() {
+ return do_AddRef(CreateNV21Image());
+}
+
+Image* YUVBufferGenerator::CreateI420Image() {
+ PlanarYCbCrImage* image =
+ new RecyclingPlanarYCbCrImage(new BufferRecycleBin());
+ PlanarYCbCrData data;
+ data.mPictureRect = gfx::IntRect(0, 0, mImageSize.width, mImageSize.height);
+
+ const uint32_t yPlaneSize = mImageSize.width * mImageSize.height;
+ const uint32_t halfWidth = (mImageSize.width + 1) / 2;
+ const uint32_t halfHeight = (mImageSize.height + 1) / 2;
+ const uint32_t uvPlaneSize = halfWidth * halfHeight;
+
+ // Y plane.
+ uint8_t* y = mSourceBuffer.Elements();
+ data.mYChannel = y;
+ data.mYStride = mImageSize.width;
+ data.mYSkip = 0;
+
+ // Cr plane (aka V).
+ uint8_t* cr = y + yPlaneSize + uvPlaneSize;
+ data.mCrChannel = cr;
+ data.mCrSkip = 0;
+
+ // Cb plane (aka U).
+ uint8_t* cb = y + yPlaneSize;
+ data.mCbChannel = cb;
+ data.mCbSkip = 0;
+
+ // CrCb plane vectors.
+ data.mCbCrStride = halfWidth;
+ data.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+
+ data.mYUVColorSpace = DefaultColorSpace(mImageSize);
+
+ image->CopyData(data);
+ return image;
+}
+
+Image* YUVBufferGenerator::CreateNV12Image() {
+ NVImage* image = new NVImage();
+ PlanarYCbCrData data;
+ data.mPictureRect = gfx::IntRect(0, 0, mImageSize.width, mImageSize.height);
+
+ const uint32_t yPlaneSize = mImageSize.width * mImageSize.height;
+
+ // Y plane.
+ uint8_t* y = mSourceBuffer.Elements();
+ data.mYChannel = y;
+ data.mYStride = mImageSize.width;
+ data.mYSkip = 0;
+
+ // Cb plane (aka U).
+ uint8_t* cb = y + yPlaneSize;
+ data.mCbChannel = cb;
+ data.mCbSkip = 1;
+
+ // Cr plane (aka V).
+ uint8_t* cr = y + yPlaneSize + 1;
+ data.mCrChannel = cr;
+ data.mCrSkip = 1;
+
+ // 4:2:0.
+ data.mCbCrStride = mImageSize.width;
+ data.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+
+ image->SetData(data);
+ return image;
+}
+
+Image* YUVBufferGenerator::CreateNV21Image() {
+ NVImage* image = new NVImage();
+ PlanarYCbCrData data;
+ data.mPictureRect = gfx::IntRect(0, 0, mImageSize.width, mImageSize.height);
+
+ const uint32_t yPlaneSize = mImageSize.width * mImageSize.height;
+
+ // Y plane.
+ uint8_t* y = mSourceBuffer.Elements();
+ data.mYChannel = y;
+ data.mYStride = mImageSize.width;
+ data.mYSkip = 0;
+
+ // Cb plane (aka U).
+ uint8_t* cb = y + yPlaneSize + 1;
+ data.mCbChannel = cb;
+ data.mCbSkip = 1;
+
+ // Cr plane (aka V).
+ uint8_t* cr = y + yPlaneSize;
+ data.mCrChannel = cr;
+ data.mCrSkip = 1;
+
+ // 4:2:0.
+ data.mCbCrStride = mImageSize.width;
+ data.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+
+ data.mYUVColorSpace = DefaultColorSpace(mImageSize);
+
+ image->SetData(data);
+ return image;
+}
diff --git a/dom/media/gtest/YUVBufferGenerator.h b/dom/media/gtest/YUVBufferGenerator.h
new file mode 100644
index 0000000000..cb6ed6b220
--- /dev/null
+++ b/dom/media/gtest/YUVBufferGenerator.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef YUVBufferGenerator_h
+#define YUVBufferGenerator_h
+
+#include "ImageContainer.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "nsTArray.h"
+#include "Point.h" // mozilla::gfx::IntSize
+
+// A helper object to generate of different YUV planes.
+class YUVBufferGenerator {
+ public:
+ void Init(const mozilla::gfx::IntSize& aSize);
+ mozilla::gfx::IntSize GetSize() const;
+ already_AddRefed<mozilla::layers::Image> GenerateI420Image();
+ already_AddRefed<mozilla::layers::Image> GenerateNV12Image();
+ already_AddRefed<mozilla::layers::Image> GenerateNV21Image();
+
+ private:
+ mozilla::layers::Image* CreateI420Image();
+ mozilla::layers::Image* CreateNV12Image();
+ mozilla::layers::Image* CreateNV21Image();
+ mozilla::gfx::IntSize mImageSize;
+ nsTArray<uint8_t> mSourceBuffer;
+};
+
+#endif // YUVBufferGenerator_h
diff --git a/dom/media/gtest/dash_dashinit.mp4 b/dom/media/gtest/dash_dashinit.mp4
new file mode 100644
index 0000000000..d19068f36d
--- /dev/null
+++ b/dom/media/gtest/dash_dashinit.mp4
Binary files differ
diff --git a/dom/media/gtest/hello.rs b/dom/media/gtest/hello.rs
new file mode 100644
index 0000000000..af1308eee6
--- /dev/null
+++ b/dom/media/gtest/hello.rs
@@ -0,0 +1,6 @@
+#[no_mangle]
+pub extern "C" fn test_rust() -> *const u8 {
+ // NB: rust &str aren't null terminated.
+ let greeting = "hello from rust.\0";
+ greeting.as_ptr()
+}
diff --git a/dom/media/gtest/id3v2header.mp3 b/dom/media/gtest/id3v2header.mp3
new file mode 100644
index 0000000000..2f5585d02e
--- /dev/null
+++ b/dom/media/gtest/id3v2header.mp3
Binary files differ
diff --git a/dom/media/gtest/moz.build b/dom/media/gtest/moz.build
new file mode 100644
index 0000000000..581be004ef
--- /dev/null
+++ b/dom/media/gtest/moz.build
@@ -0,0 +1,148 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+include("/dom/media/webrtc/third_party_build/webrtc.mozbuild")
+
+DEFINES["ENABLE_SET_CUBEB_BACKEND"] = True
+DEFINES["VISIBLE_TIMEUNIT_INTERNALS"] = True
+
+LOCAL_INCLUDES += [
+ "/dom/media/driftcontrol",
+ "/dom/media/mediasink",
+ "/dom/media/systemservices",
+ "/dom/media/webrtc",
+ "/dom/media/webrtc/common",
+ "/third_party/libwebrtc",
+ "/third_party/libwebrtc/third_party/abseil-cpp",
+]
+
+UNIFIED_SOURCES += [
+ "MockCubeb.cpp",
+ "MockMediaResource.cpp",
+ "TestAudioBuffer.cpp",
+ "TestAudioBuffers.cpp",
+ "TestAudioCallbackDriver.cpp",
+ "TestAudioCompactor.cpp",
+ "TestAudioDecoderInputTrack.cpp",
+ "TestAudioInputSource.cpp",
+ "TestAudioMixer.cpp",
+ "TestAudioPacketizer.cpp",
+ "TestAudioRingBuffer.cpp",
+ "TestAudioSegment.cpp",
+ "TestAudioSinkWrapper.cpp",
+ "TestAudioTrackEncoder.cpp",
+ "TestAudioTrackGraph.cpp",
+ "TestBenchmarkStorage.cpp",
+ "TestBitWriter.cpp",
+ "TestBlankVideoDataCreator.cpp",
+ "TestBufferReader.cpp",
+ "TestCubebInputStream.cpp",
+ "TestDataMutex.cpp",
+ "TestDecoderBenchmark.cpp",
+ "TestDeviceInputTrack.cpp",
+ "TestDriftCompensation.cpp",
+ "TestGMPUtils.cpp",
+ "TestGroupId.cpp",
+ "TestIntervalSet.cpp",
+ "TestKeyValueStorage.cpp",
+ "TestMediaCodecsSupport.cpp",
+ "TestMediaDataDecoder.cpp",
+ "TestMediaDataEncoder.cpp",
+ "TestMediaEventSource.cpp",
+ "TestMediaMIMETypes.cpp",
+ "TestMediaQueue.cpp",
+ "TestMediaSpan.cpp",
+ "TestMediaUtils.cpp",
+ "TestMP3Demuxer.cpp",
+ "TestMP4Demuxer.cpp",
+ "TestMuxer.cpp",
+ "TestOggWriter.cpp",
+ "TestOpusParser.cpp",
+ "TestPacer.cpp",
+ "TestRust.cpp",
+ "TestTimeUnit.cpp",
+ "TestVideoSegment.cpp",
+ "TestVideoTrackEncoder.cpp",
+ "TestVideoUtils.cpp",
+ "TestVPXDecoding.cpp",
+ "TestWebMBuffered.cpp",
+ "TestWebMWriter.cpp",
+ "YUVBufferGenerator.cpp",
+]
+
+if CONFIG["MOZ_WEBRTC"]:
+ UNIFIED_SOURCES += [
+ "TestAudioInputProcessing.cpp",
+ "TestRTCStatsTimestampMaker.cpp",
+ ]
+
+if CONFIG["OS_TARGET"] != "Android":
+ UNIFIED_SOURCES += [
+ "TestCDMStorage.cpp",
+ "TestGMPCrossOrigin.cpp",
+ "TestGMPRemoveAndDelete.cpp",
+ ]
+
+if CONFIG["MOZ_WEBRTC"] and CONFIG["OS_TARGET"] != "Android":
+ UNIFIED_SOURCES += [
+ "TestAudioDeviceEnumerator.cpp",
+ "TestVideoFrameConverter.cpp",
+ ]
+
+TEST_HARNESS_FILES.gtest += [
+ "../test/av1.mp4",
+ "../test/gizmo-frag.mp4",
+ "../test/gizmo.mp4",
+ "../test/vp9cake.webm",
+ "dash_dashinit.mp4",
+ "id3v2header.mp3",
+ "negative_duration.mp4",
+ "noise.mp3",
+ "noise_vbr.mp3",
+ "short-zero-in-moov.mp4",
+ "short-zero-inband.mov",
+ "small-shot-false-positive.mp3",
+ "small-shot-partial-xing.mp3",
+ "small-shot.mp3",
+ "test.webm",
+ "test_case_1224361.vp8.ivf",
+ "test_case_1224363.vp8.ivf",
+ "test_case_1224369.vp8.ivf",
+ "test_InvalidElementId.webm",
+ "test_InvalidElementSize.webm",
+ "test_InvalidLargeEBMLMaxIdLength.webm",
+ "test_InvalidLargeElementId.webm",
+ "test_InvalidSmallEBMLMaxIdLength.webm",
+ "test_ValidLargeEBMLMaxIdLength.webm",
+ "test_ValidSmallEBMLMaxSizeLength.webm",
+ "test_vbri.mp3",
+]
+
+TEST_DIRS += [
+ "mp4_demuxer",
+]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+LOCAL_INCLUDES += [
+ "/dom/media",
+ "/dom/media/encoder",
+ "/dom/media/gmp",
+ "/dom/media/mp4",
+ "/dom/media/platforms",
+ "/dom/media/platforms/agnostic",
+ "/dom/media/webrtc",
+ "/gfx/2d/",
+ "/security/certverifier",
+]
+
+FINAL_LIBRARY = "xul-gtest"
+
+if CONFIG["CC_TYPE"] in ("clang", "clang-cl"):
+ CXXFLAGS += [
+ "-Wno-inconsistent-missing-override",
+ "-Wno-unused-private-field",
+ ]
diff --git a/dom/media/gtest/mp4_demuxer/TestInterval.cpp b/dom/media/gtest/mp4_demuxer/TestInterval.cpp
new file mode 100644
index 0000000000..2572b1c392
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/TestInterval.cpp
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "MP4Interval.h"
+
+using mozilla::MP4Interval;
+
+TEST(MP4Interval, Length)
+{
+ MP4Interval<int> i(15, 25);
+ EXPECT_EQ(10, i.Length());
+}
+
+TEST(MP4Interval, Intersection)
+{
+ MP4Interval<int> i0(10, 20);
+ MP4Interval<int> i1(15, 25);
+ MP4Interval<int> i = i0.Intersection(i1);
+ EXPECT_EQ(15, i.start);
+ EXPECT_EQ(20, i.end);
+}
+
+TEST(MP4Interval, Equals)
+{
+ MP4Interval<int> i0(10, 20);
+ MP4Interval<int> i1(10, 20);
+ EXPECT_EQ(i0, i1);
+
+ MP4Interval<int> i2(5, 20);
+ EXPECT_NE(i0, i2);
+
+ MP4Interval<int> i3(10, 15);
+ EXPECT_NE(i0, i2);
+}
+
+TEST(MP4Interval, IntersectionVector)
+{
+ nsTArray<MP4Interval<int>> i0;
+ i0.AppendElement(MP4Interval<int>(5, 10));
+ i0.AppendElement(MP4Interval<int>(20, 25));
+ i0.AppendElement(MP4Interval<int>(40, 60));
+
+ nsTArray<MP4Interval<int>> i1;
+ i1.AppendElement(MP4Interval<int>(7, 15));
+ i1.AppendElement(MP4Interval<int>(16, 27));
+ i1.AppendElement(MP4Interval<int>(45, 50));
+ i1.AppendElement(MP4Interval<int>(53, 57));
+
+ nsTArray<MP4Interval<int>> i;
+ MP4Interval<int>::Intersection(i0, i1, &i);
+
+ EXPECT_EQ(4u, i.Length());
+
+ EXPECT_EQ(7, i[0].start);
+ EXPECT_EQ(10, i[0].end);
+
+ EXPECT_EQ(20, i[1].start);
+ EXPECT_EQ(25, i[1].end);
+
+ EXPECT_EQ(45, i[2].start);
+ EXPECT_EQ(50, i[2].end);
+
+ EXPECT_EQ(53, i[3].start);
+ EXPECT_EQ(57, i[3].end);
+}
+
+TEST(MP4Interval, Normalize)
+{
+ nsTArray<MP4Interval<int>> i;
+ i.AppendElement(MP4Interval<int>(20, 30));
+ i.AppendElement(MP4Interval<int>(1, 8));
+ i.AppendElement(MP4Interval<int>(5, 10));
+ i.AppendElement(MP4Interval<int>(2, 7));
+
+ nsTArray<MP4Interval<int>> o;
+ MP4Interval<int>::Normalize(i, &o);
+
+ EXPECT_EQ(2u, o.Length());
+
+ EXPECT_EQ(1, o[0].start);
+ EXPECT_EQ(10, o[0].end);
+
+ EXPECT_EQ(20, o[1].start);
+ EXPECT_EQ(30, o[1].end);
+}
diff --git a/dom/media/gtest/mp4_demuxer/TestMP4.cpp b/dom/media/gtest/mp4_demuxer/TestMP4.cpp
new file mode 100644
index 0000000000..df58ec42e2
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/TestMP4.cpp
@@ -0,0 +1,133 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mp4parse.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <algorithm>
+#include <vector>
+
+static intptr_t error_reader(uint8_t* buffer, uintptr_t size, void* userdata) {
+ return -1;
+}
+
+struct read_vector {
+ explicit read_vector(FILE* file, size_t length);
+ explicit read_vector(size_t length);
+
+ size_t location;
+ std::vector<uint8_t> buffer;
+};
+
+read_vector::read_vector(FILE* file, size_t length) : location(0) {
+ buffer.resize(length);
+ size_t read = fread(buffer.data(), sizeof(decltype(buffer)::value_type),
+ buffer.size(), file);
+ buffer.resize(read);
+}
+
+read_vector::read_vector(size_t length) : location(0) {
+ buffer.resize(length, 0);
+}
+
+static intptr_t vector_reader(uint8_t* buffer, uintptr_t size, void* userdata) {
+ if (!buffer || !userdata) {
+ return -1;
+ }
+
+ auto source = reinterpret_cast<read_vector*>(userdata);
+ if (source->location > source->buffer.size()) {
+ return -1;
+ }
+ uintptr_t available =
+ source->buffer.data() ? source->buffer.size() - source->location : 0;
+ uintptr_t length = std::min(available, size);
+ if (length) {
+ memcpy(buffer, source->buffer.data() + source->location, length);
+ source->location += length;
+ }
+ return length;
+}
+
+TEST(rust, MP4MetadataEmpty)
+{
+ Mp4parseStatus rv;
+ Mp4parseIo io;
+ Mp4parseParser* parser = nullptr;
+
+ // Shouldn't be able to read with no context.
+ rv = mp4parse_new(nullptr, nullptr);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_BAD_ARG);
+
+ // Shouldn't be able to wrap an Mp4parseIo with null members.
+ io = {nullptr, nullptr};
+ rv = mp4parse_new(&io, &parser);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_BAD_ARG);
+ EXPECT_EQ(parser, nullptr);
+
+ io = {nullptr, &io};
+ rv = mp4parse_new(&io, &parser);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_BAD_ARG);
+ EXPECT_EQ(parser, nullptr);
+
+ // FIXME: this should probably be accepted.
+ io = {error_reader, nullptr};
+ rv = mp4parse_new(&io, &parser);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_BAD_ARG);
+ EXPECT_EQ(parser, nullptr);
+
+ // Read method errors should propagate.
+ io = {error_reader, &io};
+ rv = mp4parse_new(&io, &parser);
+ ASSERT_EQ(parser, nullptr);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_IO);
+
+ // Short buffers should fail.
+ read_vector buf(0);
+ io = {vector_reader, &buf};
+ rv = mp4parse_new(&io, &parser);
+ ASSERT_EQ(parser, nullptr);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_MOOV_MISSING);
+
+ buf.buffer.reserve(4097);
+ rv = mp4parse_new(&io, &parser);
+ ASSERT_EQ(parser, nullptr);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_MOOV_MISSING);
+
+ // Empty buffers should fail.
+ buf.buffer.resize(4097, 0);
+ rv = mp4parse_new(&io, &parser);
+ ASSERT_EQ(parser, nullptr);
+ EXPECT_EQ(rv, MP4PARSE_STATUS_UNSUPPORTED);
+}
+
+TEST(rust, MP4Metadata)
+{
+ FILE* f = fopen("street.mp4", "rb");
+ ASSERT_TRUE(f != nullptr);
+ // Read just the moov header to work around the parser
+ // treating mid-box eof as an error.
+ // read_vector reader = read_vector(f, 1061);
+ struct stat s;
+ ASSERT_EQ(0, fstat(fileno(f), &s));
+ read_vector reader = read_vector(f, s.st_size);
+ fclose(f);
+
+ Mp4parseIo io = {vector_reader, &reader};
+ Mp4parseParser* parser = nullptr;
+ Mp4parseStatus rv = mp4parse_new(&io, &parser);
+ ASSERT_NE(nullptr, parser);
+ EXPECT_EQ(MP4PARSE_STATUS_OK, rv);
+
+ uint32_t tracks = 0;
+ rv = mp4parse_get_track_count(parser, &tracks);
+ EXPECT_EQ(MP4PARSE_STATUS_OK, rv);
+ EXPECT_EQ(2U, tracks);
+
+ mp4parse_free(parser);
+}
diff --git a/dom/media/gtest/mp4_demuxer/TestParser.cpp b/dom/media/gtest/mp4_demuxer/TestParser.cpp
new file mode 100644
index 0000000000..db4dd6839a
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/TestParser.cpp
@@ -0,0 +1,1022 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "js/Conversions.h"
+#include "MediaData.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/gtest/MozAssertions.h"
+#include "mozilla/Preferences.h"
+
+#include "BufferStream.h"
+#include "MP4Metadata.h"
+#include "MoofParser.h"
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+
+class TestStream;
+namespace mozilla {
+DDLoggedTypeNameAndBase(::TestStream, ByteStream);
+} // namespace mozilla
+
+using namespace mozilla;
+
+static const uint32_t E = MP4Metadata::NumberTracksError();
+
+class TestStream : public ByteStream,
+ public DecoderDoctorLifeLogger<TestStream> {
+ public:
+ TestStream(const uint8_t* aBuffer, size_t aSize)
+ : mHighestSuccessfulEndOffset(0), mBuffer(aBuffer), mSize(aSize) {}
+ bool ReadAt(int64_t aOffset, void* aData, size_t aLength,
+ size_t* aBytesRead) override {
+ if (aOffset < 0 || aOffset > static_cast<int64_t>(mSize)) {
+ return false;
+ }
+ // After the test, 0 <= aOffset <= mSize <= SIZE_MAX, so it's safe to cast
+ // to size_t.
+ size_t offset = static_cast<size_t>(aOffset);
+ // Don't read past the end (but it's not an error to try).
+ if (aLength > mSize - offset) {
+ aLength = mSize - offset;
+ }
+ // Now, 0 <= offset <= offset + aLength <= mSize <= SIZE_MAX.
+ *aBytesRead = aLength;
+ memcpy(aData, mBuffer + offset, aLength);
+ if (mHighestSuccessfulEndOffset < offset + aLength) {
+ mHighestSuccessfulEndOffset = offset + aLength;
+ }
+ return true;
+ }
+ bool CachedReadAt(int64_t aOffset, void* aData, size_t aLength,
+ size_t* aBytesRead) override {
+ return ReadAt(aOffset, aData, aLength, aBytesRead);
+ }
+ bool Length(int64_t* aLength) override {
+ *aLength = mSize;
+ return true;
+ }
+ void DiscardBefore(int64_t aOffset) override {}
+
+ // Offset past the last character ever read. 0 when nothing read yet.
+ size_t mHighestSuccessfulEndOffset;
+
+ protected:
+ virtual ~TestStream() = default;
+
+ const uint8_t* mBuffer;
+ size_t mSize;
+};
+
+TEST(MP4Metadata, EmptyStream)
+{
+ RefPtr<ByteStream> stream = new TestStream(nullptr, 0);
+
+ MP4Metadata::ResultAndByteBuffer metadataBuffer =
+ MP4Metadata::Metadata(stream);
+ EXPECT_TRUE(NS_OK != metadataBuffer.Result());
+ EXPECT_FALSE(static_cast<bool>(metadataBuffer.Ref()));
+
+ MP4Metadata metadata(stream);
+ EXPECT_TRUE(0u ==
+ metadata.GetNumberTracks(TrackInfo::kUndefinedTrack).Ref() ||
+ E == metadata.GetNumberTracks(TrackInfo::kUndefinedTrack).Ref());
+ EXPECT_TRUE(0u == metadata.GetNumberTracks(TrackInfo::kAudioTrack).Ref() ||
+ E == metadata.GetNumberTracks(TrackInfo::kAudioTrack).Ref());
+ EXPECT_TRUE(0u == metadata.GetNumberTracks(TrackInfo::kVideoTrack).Ref() ||
+ E == metadata.GetNumberTracks(TrackInfo::kVideoTrack).Ref());
+ EXPECT_TRUE(0u == metadata.GetNumberTracks(TrackInfo::kTextTrack).Ref() ||
+ E == metadata.GetNumberTracks(TrackInfo::kTextTrack).Ref());
+ EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kAudioTrack, 0).Ref());
+ EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kVideoTrack, 0).Ref());
+ EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kTextTrack, 0).Ref());
+ // We can seek anywhere in any MPEG4.
+ EXPECT_TRUE(metadata.CanSeek());
+ EXPECT_FALSE(metadata.Crypto().Ref()->valid);
+}
+
+TEST(MoofParser, EmptyStream)
+{
+ RefPtr<ByteStream> stream = new TestStream(nullptr, 0);
+
+ MoofParser parser(stream, AsVariant(ParseAllTracks{}), false);
+ EXPECT_EQ(0u, parser.mOffset);
+ EXPECT_TRUE(parser.ReachedEnd());
+
+ MediaByteRangeSet byteRanges;
+ EXPECT_FALSE(parser.RebuildFragmentedIndex(byteRanges));
+
+ EXPECT_TRUE(parser.GetCompositionRange(byteRanges).IsNull());
+ EXPECT_TRUE(parser.mInitRange.IsEmpty());
+ EXPECT_EQ(0u, parser.mOffset);
+ EXPECT_TRUE(parser.ReachedEnd());
+ RefPtr<MediaByteBuffer> metadataBuffer = parser.Metadata();
+ EXPECT_FALSE(metadataBuffer);
+ EXPECT_TRUE(parser.FirstCompleteMediaSegment().IsEmpty());
+ EXPECT_TRUE(parser.FirstCompleteMediaHeader().IsEmpty());
+}
+
+nsTArray<uint8_t> ReadTestFile(const char* aFilename) {
+ if (!aFilename) {
+ return {};
+ }
+ FILE* f = fopen(aFilename, "rb");
+ if (!f) {
+ return {};
+ }
+
+ if (fseek(f, 0, SEEK_END) != 0) {
+ fclose(f);
+ return {};
+ }
+ long position = ftell(f);
+ // I know EOF==-1, so this test is made obsolete by '<0', but I don't want
+ // the code to rely on that.
+ if (position == 0 || position == EOF || position < 0) {
+ fclose(f);
+ return {};
+ }
+ if (fseek(f, 0, SEEK_SET) != 0) {
+ fclose(f);
+ return {};
+ }
+
+ size_t len = static_cast<size_t>(position);
+ nsTArray<uint8_t> buffer(len);
+ buffer.SetLength(len);
+ size_t read = fread(buffer.Elements(), 1, len, f);
+ fclose(f);
+ if (read != len) {
+ return {};
+ }
+
+ return buffer;
+}
+
+struct TestFileData {
+ const char* mFilename;
+ bool mParseResult;
+ uint32_t mNumberVideoTracks;
+ bool mHasVideoIndice;
+ double mVideoDuration; // For first video track, -1 if N/A, in seconds.
+ int32_t mWidth;
+ int32_t mHeight;
+ uint32_t mNumberAudioTracks;
+ double mAudioDuration; // For first audio track, -1 if N/A, in seconds.
+ bool mHasCrypto; // Note, MP4Metadata only considers pssh box for crypto.
+ uint64_t mMoofReachedOffset; // or 0 for the end.
+ bool mValidMoofForTrack1;
+ bool mValidMoofForAllTracks;
+ int8_t mAudioProfile;
+};
+
+static const TestFileData testFiles[] = {
+ // filename parses? #V hasVideoIndex vDur w h #A aDur hasCrypto? moofOffset
+ // validMoof? audio_profile
+ {"test_case_1156505.mp4", false, 0, false, -1, 0, 0, 0, -1., false, 152,
+ false, false, 0}, // invalid ''trak box
+ {"test_case_1181213.mp4", true, 1, true, 0.41666666, 320, 240, 1,
+ 0.47746032, true, 0, false, false, 2},
+ {"test_case_1181215.mp4", true, 0, false, -1, 0, 0, 0, -1, false, 0, false,
+ false, 0},
+ {"test_case_1181223.mp4", false, 0, false, 0.41666666, 320, 240, 0, -1,
+ false, 0, false, false, 0},
+ {"test_case_1181719.mp4", false, 0, false, -1, 0, 0, 0, -1, false, 0, false,
+ false, 0},
+ {"test_case_1185230.mp4", true, 2, true, 0.41666666, 320, 240, 2,
+ 0.0000059754907, false, 0, false, false, 2},
+ {"test_case_1187067.mp4", true, 1, true, 0.080000, 160, 90, 0, -1, false, 0,
+ false, false, 0},
+ {"test_case_1200326.mp4", false, 0, false, -1, 0, 0, 0, -1, false, 0, false,
+ false, 0},
+ {"test_case_1204580.mp4", true, 1, true, 0.502500, 320, 180, 0, -1, false,
+ 0, false, false, 0},
+ {"test_case_1216748.mp4", false, 0, false, -1, 0, 0, 0, -1, false, 152,
+ false, false, 0}, // invalid 'trak' box
+ {"test_case_1296473.mp4", false, 0, false, -1, 0, 0, 0, -1, false, 0, false,
+ false, 0},
+ {"test_case_1296532.mp4", true, 1, true, 5.589333, 560, 320, 1, 5.589333,
+ true, 0, true, false, 2},
+ {"test_case_1301065.mp4", true, 0, false, -1, 0, 0, 1, 100079991719, false,
+ 0, false, false, 2},
+ {"test_case_1301065-u32max.mp4", true, 0, false, -1, 0, 0, 1, 97391.548639,
+ false, 0, false, false, 2},
+ {"test_case_1301065-max-ez.mp4", true, 0, false, -1, 0, 0, 1,
+ 209146758.205306, false, 0, false, false, 2},
+ {"test_case_1301065-harder.mp4", true, 0, false, -1, 0, 0, 1,
+ 209146758.205328, false, 0, false, false, 2},
+ {"test_case_1301065-max-ok.mp4", true, 0, false, -1, 0, 0, 1,
+ 9223372036854.775, false, 0, false, false, 2},
+ // The duration is overflow for int64_t in TestFileData, parser uses
+ // uint64_t so
+ // this file is ignore.
+ //{ "test_case_1301065-overfl.mp4", 0, -1, 0, 0, 1, 9223372036854775827,
+ // false, 0,
+ // false, 2
+ // },
+ {"test_case_1301065-i64max.mp4", true, 0, false, -1, 0, 0, 1,
+ std::numeric_limits<double>::infinity(), false, 0, false, false, 2},
+ {"test_case_1301065-i64min.mp4", true, 0, false, -1, 0, 0, 1,
+ -std::numeric_limits<double>::infinity(), false, 0, false, false, 2},
+ {"test_case_1301065-u64max.mp4", true, 0, false, -1, 0, 0, 1, 0, false, 0,
+ false, false, 2},
+ {"test_case_1329061.mov", false, 0, false, -1, 0, 0, 1, 234567981, false, 0,
+ false, false, 2},
+ {"test_case_1351094.mp4", true, 0, false, -1, 0, 0, 0, -1, false, 0, false,
+ false, 0},
+ {"test_case_1389299.mp4", true, 1, true, 5.589333, 560, 320, 1, 5.589333,
+ true, 0, true, false, 2},
+
+ {"test_case_1389527.mp4", true, 1, false, 5.005000, 80, 128, 1, 4.992000,
+ false, 0, false, false, 2},
+ {"test_case_1395244.mp4", true, 1, true, 0.41666666, 320, 240, 1,
+ 0.47746032, false, 0, false, false, 2},
+ {"test_case_1388991.mp4", true, 0, false, -1, 0, 0, 1, 30.000181, false, 0,
+ false, false, 2},
+ {"test_case_1410565.mp4", false, 0, false, 0, 0, 0, 0, 0, false, 955100,
+ false, false, 2}, // negative 'timescale'
+ {"test_case_1513651-2-sample-description-entries.mp4", true, 1, true,
+ 9.843344, 400, 300, 0, -1, true, 0, false, false, 0},
+ {"test_case_1519617-cenc-init-with-track_id-0.mp4", true, 1, true, 0, 1272,
+ 530, 0, -1, false, 0, false, false,
+ 0}, // Uses bad track id 0 and has a sinf but no pssh
+ {"test_case_1519617-track2-trafs-removed.mp4", true, 1, true, 10.032000,
+ 400, 300, 1, 10.032000, false, 0, true, false, 2},
+ {"test_case_1519617-video-has-track_id-0.mp4", true, 1, true, 10.032000,
+ 400, 300, 1, 10.032000, false, 0, false, false, 2}, // Uses bad track id 0
+ // The following file has multiple sample description entries with the same
+ // crypto information. This does not cover multiple entries with different
+ // crypto information which is tracked by
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1714626
+ {"test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4",
+ true, 1, true, 0, 1920, 1080, 0, 0, true, 0, false, false, 0},
+};
+
+TEST(MP4Metadata, test_case_mp4)
+{
+ const TestFileData* tests = nullptr;
+ size_t length = 0;
+
+ tests = testFiles;
+ length = ArrayLength(testFiles);
+
+ for (size_t test = 0; test < length; ++test) {
+ nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ MP4Metadata::ResultAndByteBuffer metadataBuffer =
+ MP4Metadata::Metadata(stream);
+ EXPECT_EQ(NS_OK, metadataBuffer.Result());
+ EXPECT_TRUE(metadataBuffer.Ref());
+
+ MP4Metadata metadata(stream);
+ nsresult res = metadata.Parse();
+ EXPECT_EQ(tests[test].mParseResult, NS_SUCCEEDED(res))
+ << tests[test].mFilename;
+ if (!tests[test].mParseResult) {
+ continue;
+ }
+
+ EXPECT_EQ(tests[test].mNumberAudioTracks,
+ metadata.GetNumberTracks(TrackInfo::kAudioTrack).Ref())
+ << tests[test].mFilename;
+ EXPECT_EQ(tests[test].mNumberVideoTracks,
+ metadata.GetNumberTracks(TrackInfo::kVideoTrack).Ref())
+ << tests[test].mFilename;
+ // If there is an error, we should expect an error code instead of zero
+ // for non-Audio/Video tracks.
+ const uint32_t None = (tests[test].mNumberVideoTracks == E) ? E : 0;
+ EXPECT_EQ(None, metadata.GetNumberTracks(TrackInfo::kUndefinedTrack).Ref())
+ << tests[test].mFilename;
+ EXPECT_EQ(None, metadata.GetNumberTracks(TrackInfo::kTextTrack).Ref())
+ << tests[test].mFilename;
+ EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kUndefinedTrack, 0).Ref());
+ MP4Metadata::ResultAndTrackInfo trackInfo =
+ metadata.GetTrackInfo(TrackInfo::kVideoTrack, 0);
+ if (!!tests[test].mNumberVideoTracks) {
+ ASSERT_TRUE(!!trackInfo.Ref());
+ const VideoInfo* videoInfo = trackInfo.Ref()->GetAsVideoInfo();
+ ASSERT_TRUE(!!videoInfo);
+ EXPECT_TRUE(videoInfo->IsValid()) << tests[test].mFilename;
+ EXPECT_TRUE(videoInfo->IsVideo()) << tests[test].mFilename;
+ if (std::isinf(tests[test].mVideoDuration)) {
+ ASSERT_TRUE(std::isinf(videoInfo->mDuration.ToSeconds()));
+ } else {
+ EXPECT_FLOAT_EQ(tests[test].mVideoDuration,
+ videoInfo->mDuration.ToSeconds())
+ << tests[test].mFilename;
+ }
+ EXPECT_EQ(tests[test].mWidth, videoInfo->mDisplay.width)
+ << tests[test].mFilename;
+ EXPECT_EQ(tests[test].mHeight, videoInfo->mDisplay.height)
+ << tests[test].mFilename;
+
+ MP4Metadata::ResultAndIndice indices =
+ metadata.GetTrackIndice(videoInfo->mTrackId);
+ EXPECT_EQ(!!indices.Ref(), tests[test].mHasVideoIndice)
+ << tests[test].mFilename;
+ if (tests[test].mHasVideoIndice) {
+ for (size_t i = 0; i < indices.Ref()->Length(); i++) {
+ MP4SampleIndex::Indice data;
+ EXPECT_TRUE(indices.Ref()->GetIndice(i, data))
+ << tests[test].mFilename;
+ EXPECT_TRUE(data.start_offset <= data.end_offset)
+ << tests[test].mFilename;
+ EXPECT_TRUE(data.start_composition <= data.end_composition)
+ << tests[test].mFilename;
+ }
+ }
+ }
+ trackInfo = metadata.GetTrackInfo(TrackInfo::kAudioTrack, 0);
+ if (tests[test].mNumberAudioTracks == 0 ||
+ tests[test].mNumberAudioTracks == E) {
+ EXPECT_TRUE(!trackInfo.Ref()) << tests[test].mFilename;
+ } else {
+ ASSERT_TRUE(!!trackInfo.Ref());
+ const AudioInfo* audioInfo = trackInfo.Ref()->GetAsAudioInfo();
+ ASSERT_TRUE(!!audioInfo);
+ EXPECT_TRUE(audioInfo->IsValid()) << tests[test].mFilename;
+ EXPECT_TRUE(audioInfo->IsAudio()) << tests[test].mFilename;
+ if (std::isinf(tests[test].mAudioDuration)) {
+ ASSERT_TRUE(std::isinf(audioInfo->mDuration.ToSeconds()))
+ << tests[test].mFilename;
+ } else {
+ EXPECT_FLOAT_EQ(tests[test].mAudioDuration,
+ audioInfo->mDuration.ToSeconds())
+ << tests[test].mFilename;
+ }
+ EXPECT_EQ(tests[test].mAudioProfile, audioInfo->mProfile)
+ << tests[test].mFilename;
+
+ MP4Metadata::ResultAndIndice indices =
+ metadata.GetTrackIndice(audioInfo->mTrackId);
+ EXPECT_TRUE(!!indices.Ref()) << tests[test].mFilename;
+ for (size_t i = 0; i < indices.Ref()->Length(); i++) {
+ MP4SampleIndex::Indice data;
+ EXPECT_TRUE(indices.Ref()->GetIndice(i, data)) << tests[test].mFilename;
+ EXPECT_TRUE(data.start_offset <= data.end_offset)
+ << tests[test].mFilename;
+ EXPECT_TRUE(int64_t(data.start_composition) <=
+ int64_t(data.end_composition))
+ << tests[test].mFilename;
+ }
+ }
+ EXPECT_FALSE(metadata.GetTrackInfo(TrackInfo::kTextTrack, 0).Ref())
+ << tests[test].mFilename;
+ // We can see anywhere in any MPEG4.
+ EXPECT_TRUE(metadata.CanSeek()) << tests[test].mFilename;
+ EXPECT_EQ(tests[test].mHasCrypto, metadata.Crypto().Ref()->valid)
+ << tests[test].mFilename;
+ }
+}
+
+// This test was disabled by Bug 1224019 for producing way too much output.
+// This test no longer produces such output, as we've moved away from
+// stagefright, but it does take a long time to run. I can be useful to enable
+// as a sanity check on changes to the parser, but is too taxing to run as part
+// of normal test execution.
+#if 0
+TEST(MP4Metadata, test_case_mp4_subsets) {
+ static const size_t step = 1u;
+ for (size_t test = 0; test < ArrayLength(testFiles); ++test) {
+ nsTArray<uint8_t> buffer = ReadTestFile(testFiles[test].mFilename);
+ ASSERT_FALSE(buffer.IsEmpty());
+ ASSERT_LE(step, buffer.Length());
+ // Just exercizing the parser starting at different points through the file,
+ // making sure it doesn't crash.
+ // No checks because results would differ for each position.
+ for (size_t offset = 0; offset < buffer.Length() - step; offset += step) {
+ size_t size = buffer.Length() - offset;
+ while (size > 0) {
+ RefPtr<TestStream> stream =
+ new TestStream(buffer.Elements() + offset, size);
+
+ MP4Metadata::ResultAndByteBuffer metadataBuffer =
+ MP4Metadata::Metadata(stream);
+ MP4Metadata metadata(stream);
+
+ if (stream->mHighestSuccessfulEndOffset <= 0) {
+ // No successful reads -> Cutting down the size won't change anything.
+ break;
+ }
+ if (stream->mHighestSuccessfulEndOffset < size) {
+ // Read up to a point before the end -> Resize down to that point.
+ size = stream->mHighestSuccessfulEndOffset;
+ } else {
+ // Read up to the end (or after?!) -> Just cut 1 byte.
+ size -= 1;
+ }
+ }
+ }
+ }
+}
+#endif
+
+#if !defined(XP_WIN) || !defined(MOZ_ASAN) // OOMs on Windows ASan
+TEST(MoofParser, test_case_mp4)
+{
+ const TestFileData* tests = nullptr;
+ size_t length = 0;
+
+ tests = testFiles;
+ length = ArrayLength(testFiles);
+
+ for (size_t test = 0; test < length; ++test) {
+ nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ MoofParser parser(stream, AsVariant(ParseAllTracks{}), false);
+ EXPECT_EQ(0u, parser.mOffset) << tests[test].mFilename;
+ EXPECT_FALSE(parser.ReachedEnd()) << tests[test].mFilename;
+ EXPECT_TRUE(parser.mInitRange.IsEmpty()) << tests[test].mFilename;
+
+ RefPtr<MediaByteBuffer> metadataBuffer = parser.Metadata();
+ EXPECT_TRUE(metadataBuffer) << tests[test].mFilename;
+
+ EXPECT_FALSE(parser.mInitRange.IsEmpty()) << tests[test].mFilename;
+ const MediaByteRangeSet byteRanges(
+ MediaByteRange(0, int64_t(buffer.Length())));
+ EXPECT_EQ(tests[test].mValidMoofForAllTracks,
+ parser.RebuildFragmentedIndex(byteRanges))
+ << tests[test].mFilename;
+ if (tests[test].mMoofReachedOffset == 0) {
+ EXPECT_EQ(buffer.Length(), parser.mOffset) << tests[test].mFilename;
+ EXPECT_TRUE(parser.ReachedEnd()) << tests[test].mFilename;
+ } else {
+ EXPECT_EQ(tests[test].mMoofReachedOffset, parser.mOffset)
+ << tests[test].mFilename;
+ EXPECT_FALSE(parser.ReachedEnd()) << tests[test].mFilename;
+ }
+
+ EXPECT_FALSE(parser.mInitRange.IsEmpty()) << tests[test].mFilename;
+ EXPECT_TRUE(parser.GetCompositionRange(byteRanges).IsNull())
+ << tests[test].mFilename;
+ EXPECT_TRUE(parser.FirstCompleteMediaSegment().IsEmpty())
+ << tests[test].mFilename;
+ // If we expect a valid moof we should have that moof's range stored.
+ EXPECT_EQ(tests[test].mValidMoofForAllTracks,
+ !parser.FirstCompleteMediaHeader().IsEmpty())
+ << tests[test].mFilename;
+ }
+}
+
+TEST(MoofParser, test_case_sample_description_entries)
+{
+ const TestFileData* tests = testFiles;
+ size_t length = ArrayLength(testFiles);
+
+ for (size_t test = 0; test < length; ++test) {
+ nsTArray<uint8_t> buffer = ReadTestFile(tests[test].mFilename);
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ // Parse the first track. Treating it as audio is hacky, but this doesn't
+ // affect how we read the sample description entries.
+ uint32_t trackNumber = 1;
+ MoofParser parser(stream, AsVariant(trackNumber), false);
+ EXPECT_EQ(0u, parser.mOffset) << tests[test].mFilename;
+ EXPECT_FALSE(parser.ReachedEnd()) << tests[test].mFilename;
+ EXPECT_TRUE(parser.mInitRange.IsEmpty()) << tests[test].mFilename;
+
+ // Explicitly don't call parser.Metadata() so that the parser itself will
+ // read the metadata as if we're in a fragmented case. Otherwise the parser
+ // won't read the sample description table.
+
+ const MediaByteRangeSet byteRanges(
+ MediaByteRange(0, int64_t(buffer.Length())));
+ EXPECT_EQ(tests[test].mValidMoofForTrack1,
+ parser.RebuildFragmentedIndex(byteRanges))
+ << tests[test].mFilename;
+
+ // We only care about crypto data from the samples descriptions right now.
+ // This test should be expanded should we read further information.
+ if (tests[test].mHasCrypto) {
+ uint32_t numEncryptedEntries = 0;
+ // It's possible to have multiple sample description entries. Bug
+ // 1714626 tracks more robust handling of multiple entries, for now just
+ // check that we have at least one.
+ for (SampleDescriptionEntry entry : parser.mSampleDescriptions) {
+ if (entry.mIsEncryptedEntry) {
+ numEncryptedEntries++;
+ }
+ }
+ EXPECT_GE(numEncryptedEntries, 1u) << tests[test].mFilename;
+ }
+ }
+}
+#endif // !defined(XP_WIN) || !defined(MOZ_ASAN)
+
+// We should gracefully handle track_id 0 since Bug 1519617. We'd previously
+// used id 0 to trigger special handling in the MoofParser to read multiple
+// track metadata, but since muxers use track id 0 in the wild, we want to
+// make sure they can't accidentally trigger such handling.
+TEST(MoofParser, test_case_track_id_0_does_not_read_multitracks)
+{
+ const char* zeroTrackIdFileName =
+ "test_case_1519617-video-has-track_id-0.mp4";
+ nsTArray<uint8_t> buffer = ReadTestFile(zeroTrackIdFileName);
+
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ // Parse track id 0. We expect to only get metadata from that track, not the
+ // other track with id 2.
+ const uint32_t videoTrackId = 0;
+ MoofParser parser(stream, AsVariant(videoTrackId), false);
+
+ // Explicitly don't call parser.Metadata() so that the parser itself will
+ // read the metadata as if we're in a fragmented case. Otherwise we won't
+ // read the trak data.
+
+ const MediaByteRangeSet byteRanges(
+ MediaByteRange(0, int64_t(buffer.Length())));
+ EXPECT_TRUE(parser.RebuildFragmentedIndex(byteRanges))
+ << "MoofParser should find a valid moof as the file contains one!";
+
+ // Verify we only have data from track 0, if we parsed multiple tracks we'd
+ // find some of the audio track metadata here. Only check for values that
+ // differ between tracks.
+ const uint32_t videoTimescale = 90000;
+ const uint32_t videoSampleDuration = 3000;
+ const uint32_t videoSampleFlags = 0x10000;
+ const uint32_t videoNumSampleDescriptionEntries = 1;
+ EXPECT_EQ(videoTimescale, parser.mMdhd.mTimescale)
+ << "Wrong timescale for video track! If value is 22050, we've read from "
+ "the audio track!";
+ EXPECT_EQ(videoTrackId, parser.mTrex.mTrackId)
+ << "Wrong track id for video track! If value is 2, we've read from the "
+ "audio track!";
+ EXPECT_EQ(videoSampleDuration, parser.mTrex.mDefaultSampleDuration)
+ << "Wrong sample duration for video track! If value is 1024, we've read "
+ "from the audio track!";
+ EXPECT_EQ(videoSampleFlags, parser.mTrex.mDefaultSampleFlags)
+ << "Wrong sample flags for video track! If value is 0x2000000 (note "
+ "that's hex), we've read from the audio track!";
+ EXPECT_EQ(videoNumSampleDescriptionEntries,
+ parser.mSampleDescriptions.Length())
+ << "Wrong number of sample descriptions for video track! If value is 2, "
+ "then we've read sample description information from video and audio "
+ "tracks!";
+}
+
+// We should gracefully handle track_id 0 since Bug 1519617. This includes
+// handling crypto data from the sinf box in the MoofParser. Note, as of the
+// time of writing, MP4Metadata uses the presence of a pssh box to determine
+// if its crypto member is valid. However, even on files where the pssh isn't
+// in the init segment, the MoofParser should still read the sinf, as in this
+// testcase.
+TEST(MoofParser, test_case_track_id_0_reads_crypto_metadata)
+{
+ const char* zeroTrackIdFileName =
+ "test_case_1519617-cenc-init-with-track_id-0.mp4";
+ nsTArray<uint8_t> buffer = ReadTestFile(zeroTrackIdFileName);
+
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ // Parse track id 0. We expect to only get metadata from that track, not the
+ // other track with id 2.
+ const uint32_t videoTrackId = 0;
+ MoofParser parser(stream, AsVariant(videoTrackId), false);
+
+ // Explicitly don't call parser.Metadata() so that the parser itself will
+ // read the metadata as if we're in a fragmented case. Otherwise we won't
+ // read the trak data.
+
+ const MediaByteRangeSet byteRanges(
+ MediaByteRange(0, int64_t(buffer.Length())));
+ EXPECT_FALSE(parser.RebuildFragmentedIndex(byteRanges))
+ << "MoofParser should not find a valid moof, this is just an init "
+ "segment!";
+
+ // Verify we only have data from track 0, if we parsed multiple tracks we'd
+ // find some of the audio track metadata here. Only check for values that
+ // differ between tracks.
+ const size_t numSampleDescriptionEntries = 1;
+ const uint32_t defaultPerSampleIVSize = 8;
+ const size_t keyIdLength = 16;
+ const uint32_t defaultKeyId[keyIdLength] = {
+ 0x43, 0xbe, 0x13, 0xd0, 0x26, 0xc9, 0x41, 0x54,
+ 0x8f, 0xed, 0xf9, 0x54, 0x1a, 0xef, 0x6b, 0x0e};
+ EXPECT_TRUE(parser.mSinf.IsValid())
+ << "Should have a sinf that has crypto data!";
+ EXPECT_EQ(defaultPerSampleIVSize, parser.mSinf.mDefaultIVSize)
+ << "Wrong default per sample IV size for track! If 0 indicates we failed "
+ "to parse some crypto info!";
+ for (size_t i = 0; i < keyIdLength; i++) {
+ EXPECT_EQ(defaultKeyId[i], parser.mSinf.mDefaultKeyID[i])
+ << "Mismatched default key ID byte at index " << i
+ << " indicates we failed to parse some crypto info!";
+ }
+ ASSERT_EQ(numSampleDescriptionEntries, parser.mSampleDescriptions.Length())
+ << "Wrong number of sample descriptions for track! If 0, indicates we "
+ "failed to parse some expected crypto!";
+ EXPECT_TRUE(parser.mSampleDescriptions[0].mIsEncryptedEntry)
+ << "Sample description should be marked as encrypted!";
+}
+
+// The MoofParser may be asked to parse metadata for multiple tracks, but then
+// be presented with fragments/moofs that contain data for only a subset of
+// those tracks. I.e. metadata contains information for tracks with ids 1 and 2,
+// but then the moof parser only receives moofs with data for track id 1. We
+// should parse such fragmented media. In this test the metadata contains info
+// for track ids 1 and 2, but track 2's track fragment headers (traf) have been
+// over written with free space boxes (free).
+TEST(MoofParser, test_case_moofs_missing_trafs)
+{
+ const char* noTrafsForTrack2MoofsFileName =
+ "test_case_1519617-track2-trafs-removed.mp4";
+ nsTArray<uint8_t> buffer = ReadTestFile(noTrafsForTrack2MoofsFileName);
+
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ // Create parser that will read metadata from all tracks.
+ MoofParser parser(stream, AsVariant(ParseAllTracks{}), false);
+
+ // Explicitly don't call parser.Metadata() so that the parser itself will
+ // read the metadata as if we're in a fragmented case. Otherwise we won't
+ // read the trak data.
+
+ const MediaByteRangeSet byteRanges(
+ MediaByteRange(0, int64_t(buffer.Length())));
+ EXPECT_TRUE(parser.RebuildFragmentedIndex(byteRanges))
+ << "MoofParser should find a valid moof, there's 2 in the file!";
+
+ // Verify we've found 2 moofs and that the parser was able to parse them.
+ const size_t numMoofs = 2;
+ EXPECT_EQ(numMoofs, parser.Moofs().Length())
+ << "File has 2 moofs, we should have read both";
+ for (size_t i = 0; i < parser.Moofs().Length(); i++) {
+ EXPECT_TRUE(parser.Moofs()[i].IsValid()) << "All moofs should be valid";
+ }
+}
+
+// This test was disabled by Bug 1224019 for producing way too much output.
+// This test no longer produces such output, as we've moved away from
+// stagefright, but it does take a long time to run. I can be useful to enable
+// as a sanity check on changes to the parser, but is too taxing to run as part
+// of normal test execution.
+#if 0
+TEST(MoofParser, test_case_mp4_subsets) {
+ const size_t step = 1u;
+ for (size_t test = 0; test < ArrayLength(testFiles); ++test) {
+ nsTArray<uint8_t> buffer = ReadTestFile(testFiles[test].mFilename);
+ ASSERT_FALSE(buffer.IsEmpty());
+ ASSERT_LE(step, buffer.Length());
+ // Just exercizing the parser starting at different points through the file,
+ // making sure it doesn't crash.
+ // No checks because results would differ for each position.
+ for (size_t offset = 0; offset < buffer.Length() - step; offset += step) {
+ size_t size = buffer.Length() - offset;
+ while (size > 0) {
+ RefPtr<TestStream> stream =
+ new TestStream(buffer.Elements() + offset, size);
+
+ MoofParser parser(stream, AsVariant(ParseAllTracks{}), false);
+ MediaByteRangeSet byteRanges;
+ EXPECT_FALSE(parser.RebuildFragmentedIndex(byteRanges));
+ parser.GetCompositionRange(byteRanges);
+ RefPtr<MediaByteBuffer> metadataBuffer = parser.Metadata();
+ parser.FirstCompleteMediaSegment();
+ parser.FirstCompleteMediaHeader();
+
+ if (stream->mHighestSuccessfulEndOffset <= 0) {
+ // No successful reads -> Cutting down the size won't change anything.
+ break;
+ }
+ if (stream->mHighestSuccessfulEndOffset < size) {
+ // Read up to a point before the end -> Resize down to that point.
+ size = stream->mHighestSuccessfulEndOffset;
+ } else {
+ // Read up to the end (or after?!) -> Just cut 1 byte.
+ size -= 1;
+ }
+ }
+ }
+ }
+}
+#endif
+
+uint8_t media_gtest_video_init_mp4[] = {
+ 0x00, 0x00, 0x00, 0x18, 0x66, 0x74, 0x79, 0x70, 0x69, 0x73, 0x6f, 0x6d,
+ 0x00, 0x00, 0x00, 0x01, 0x69, 0x73, 0x6f, 0x6d, 0x61, 0x76, 0x63, 0x31,
+ 0x00, 0x00, 0x02, 0xd1, 0x6d, 0x6f, 0x6f, 0x76, 0x00, 0x00, 0x00, 0x6c,
+ 0x6d, 0x76, 0x68, 0x64, 0x00, 0x00, 0x00, 0x00, 0xc8, 0x49, 0x73, 0xf8,
+ 0xc8, 0x4a, 0xc5, 0x7a, 0x00, 0x00, 0x02, 0x58, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x18,
+ 0x69, 0x6f, 0x64, 0x73, 0x00, 0x00, 0x00, 0x00, 0x10, 0x80, 0x80, 0x80,
+ 0x07, 0x00, 0x4f, 0xff, 0xff, 0x29, 0x15, 0xff, 0x00, 0x00, 0x02, 0x0d,
+ 0x74, 0x72, 0x61, 0x6b, 0x00, 0x00, 0x00, 0x5c, 0x74, 0x6b, 0x68, 0x64,
+ 0x00, 0x00, 0x00, 0x01, 0xc8, 0x49, 0x73, 0xf8, 0xc8, 0x49, 0x73, 0xf9,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0x00, 0x01, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0xa9, 0x6d, 0x64, 0x69, 0x61, 0x00, 0x00, 0x00, 0x20,
+ 0x6d, 0x64, 0x68, 0x64, 0x00, 0x00, 0x00, 0x00, 0xc8, 0x49, 0x73, 0xf8,
+ 0xc8, 0x49, 0x73, 0xf9, 0x00, 0x00, 0x75, 0x30, 0x00, 0x00, 0x00, 0x00,
+ 0x55, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x68, 0x64, 0x6c, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x64, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x50, 0x41, 0x43, 0x20, 0x49, 0x53, 0x4f, 0x20, 0x56, 0x69, 0x64,
+ 0x65, 0x6f, 0x20, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x49, 0x6d, 0x69, 0x6e, 0x66, 0x00, 0x00, 0x00, 0x14,
+ 0x76, 0x6d, 0x68, 0x64, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x64, 0x69, 0x6e, 0x66,
+ 0x00, 0x00, 0x00, 0x1c, 0x64, 0x72, 0x65, 0x66, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x75, 0x72, 0x6c, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x09, 0x73, 0x74, 0x62, 0x6c,
+ 0x00, 0x00, 0x00, 0xad, 0x73, 0x74, 0x73, 0x64, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x9d, 0x61, 0x76, 0x63, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x80, 0x01, 0x68, 0x00, 0x48, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0xff, 0xff, 0x00, 0x00, 0x00, 0x33, 0x61, 0x76,
+ 0x63, 0x43, 0x01, 0x64, 0x00, 0x1f, 0xff, 0xe1, 0x00, 0x1b, 0x67, 0x64,
+ 0x00, 0x1f, 0xac, 0x2c, 0xc5, 0x02, 0x80, 0xbf, 0xe5, 0xc0, 0x44, 0x00,
+ 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf2, 0x3c, 0x60, 0xc6,
+ 0x58, 0x01, 0x00, 0x05, 0x68, 0xe9, 0x2b, 0x2c, 0x8b, 0x00, 0x00, 0x00,
+ 0x14, 0x62, 0x74, 0x72, 0x74, 0x00, 0x01, 0x5a, 0xc2, 0x00, 0x24, 0x74,
+ 0x38, 0x00, 0x09, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10, 0x73, 0x74, 0x74,
+ 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x63, 0x74, 0x74, 0x73, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x73, 0x74, 0x73, 0x63, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x73, 0x74, 0x73,
+ 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x73, 0x74, 0x63, 0x6f, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6d, 0x76, 0x65,
+ 0x78, 0x00, 0x00, 0x00, 0x10, 0x6d, 0x65, 0x68, 0x64, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x05, 0x76, 0x18, 0x00, 0x00, 0x00, 0x20, 0x74, 0x72, 0x65,
+ 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00};
+
+const uint32_t media_gtest_video_init_mp4_len = 745;
+
+TEST(MP4Metadata, EmptyCTTS)
+{
+ RefPtr<MediaByteBuffer> buffer =
+ new MediaByteBuffer(media_gtest_video_init_mp4_len);
+ buffer->AppendElements(media_gtest_video_init_mp4,
+ media_gtest_video_init_mp4_len);
+ RefPtr<BufferStream> stream = new BufferStream(buffer);
+
+ MP4Metadata::ResultAndByteBuffer metadataBuffer =
+ MP4Metadata::Metadata(stream);
+ EXPECT_EQ(NS_OK, metadataBuffer.Result());
+ EXPECT_TRUE(metadataBuffer.Ref());
+
+ MP4Metadata metadata(stream);
+ EXPECT_EQ(metadata.Parse(), NS_OK);
+ EXPECT_EQ(1u, metadata.GetNumberTracks(TrackInfo::kVideoTrack).Ref());
+ MP4Metadata::ResultAndTrackInfo track =
+ metadata.GetTrackInfo(TrackInfo::kVideoTrack, 0);
+ EXPECT_TRUE(track.Ref() != nullptr);
+ // We can seek anywhere in any MPEG4.
+ EXPECT_TRUE(metadata.CanSeek());
+ EXPECT_FALSE(metadata.Crypto().Ref()->valid);
+}
+
+// Fixture so we test telemetry probes.
+class MP4MetadataTelemetryFixture : public TelemetryTestFixture {};
+
+TEST_F(MP4MetadataTelemetryFixture, Telemetry) {
+ // Helper to fetch the metadata from a file and send telemetry in the process.
+ auto UpdateMetadataAndHistograms = [](const char* testFileName) {
+ nsTArray<uint8_t> buffer = ReadTestFile(testFileName);
+ ASSERT_FALSE(buffer.IsEmpty());
+ RefPtr<ByteStream> stream =
+ new TestStream(buffer.Elements(), buffer.Length());
+
+ MP4Metadata::ResultAndByteBuffer metadataBuffer =
+ MP4Metadata::Metadata(stream);
+ EXPECT_EQ(NS_OK, metadataBuffer.Result());
+ EXPECT_TRUE(metadataBuffer.Ref());
+
+ MP4Metadata metadata(stream);
+ nsresult res = metadata.Parse();
+ EXPECT_NS_SUCCEEDED(res);
+ auto audioTrackCount = metadata.GetNumberTracks(TrackInfo::kAudioTrack);
+ ASSERT_NE(audioTrackCount.Ref(), MP4Metadata::NumberTracksError());
+ auto videoTrackCount = metadata.GetNumberTracks(TrackInfo::kVideoTrack);
+ ASSERT_NE(videoTrackCount.Ref(), MP4Metadata::NumberTracksError());
+
+ // Need to read the track data to get telemetry to fire.
+ for (uint32_t i = 0; i < audioTrackCount.Ref(); i++) {
+ metadata.GetTrackInfo(TrackInfo::kAudioTrack, i);
+ }
+ for (uint32_t i = 0; i < videoTrackCount.Ref(); i++) {
+ metadata.GetTrackInfo(TrackInfo::kVideoTrack, i);
+ }
+ };
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Checks the current state of the histograms relating to sample description
+ // entries and verifies they're in an expected state.
+ // aExpectedMultipleCodecCounts is a tuple where the first value represents
+ // the number of expected 'false' count, and the second the expected 'true'
+ // count for the sample description entries have multiple codecs histogram.
+ // aExpectedMultipleCryptoCounts is the same, but for the sample description
+ // entires have multiple crypto histogram.
+ // aExpectedSampleDescriptionEntryCounts is a tuple with 6 values, each is
+ // the expected number of sample description seen. I.e, the first value in the
+ // tuple is the number of tracks we've seen with 0 sample descriptions, the
+ // second value with 1 sample description, and so on up to 5 sample
+ // descriptions. aFileName is the name of the most recent file we've parsed,
+ // and is used to log if our telem counts are not in an expected state.
+ auto CheckHistograms =
+ [this, &cx](
+ const std::tuple<uint32_t, uint32_t>& aExpectedMultipleCodecCounts,
+ const std::tuple<uint32_t, uint32_t>& aExpectedMultipleCryptoCounts,
+ const std::tuple<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
+ uint32_t>& aExpectedSampleDescriptionEntryCounts,
+ const char* aFileName) {
+ // Get a snapshot of the current histograms
+ JS::Rooted<JS::Value> snapshot(cx.GetJSContext());
+ TelemetryTestHelpers::GetSnapshots(cx.GetJSContext(), mTelemetry,
+ "" /* this string is unused */,
+ &snapshot, false /* is_keyed */);
+
+ // We'll use these to pull values out of the histograms.
+ JS::Rooted<JS::Value> values(cx.GetJSContext());
+ JS::Rooted<JS::Value> value(cx.GetJSContext());
+
+ // Verify our multiple codecs count histogram.
+ JS::Rooted<JS::Value> multipleCodecsHistogram(cx.GetJSContext());
+ TelemetryTestHelpers::GetProperty(
+ cx.GetJSContext(),
+ "MEDIA_MP4_PARSE_SAMPLE_DESCRIPTION_ENTRIES_HAVE_MULTIPLE_CODECS",
+ snapshot, &multipleCodecsHistogram);
+ ASSERT_TRUE(multipleCodecsHistogram.isObject())
+ << "Multiple codecs histogram should exist!";
+
+ TelemetryTestHelpers::GetProperty(cx.GetJSContext(), "values",
+ multipleCodecsHistogram, &values);
+ // False count.
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 0, values, &value);
+ uint32_t uValue = 0;
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<0>(aExpectedMultipleCodecCounts), uValue)
+ << "Unexpected number of false multiple codecs after parsing "
+ << aFileName;
+ // True count.
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 1, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<1>(aExpectedMultipleCodecCounts), uValue)
+ << "Unexpected number of true multiple codecs after parsing "
+ << aFileName;
+
+ // Verify our multiple crypto count histogram.
+ JS::Rooted<JS::Value> multipleCryptoHistogram(cx.GetJSContext());
+ TelemetryTestHelpers::GetProperty(
+ cx.GetJSContext(),
+ "MEDIA_MP4_PARSE_SAMPLE_DESCRIPTION_ENTRIES_HAVE_MULTIPLE_CRYPTO",
+ snapshot, &multipleCryptoHistogram);
+ ASSERT_TRUE(multipleCryptoHistogram.isObject())
+ << "Multiple crypto histogram should exist!";
+
+ TelemetryTestHelpers::GetProperty(cx.GetJSContext(), "values",
+ multipleCryptoHistogram, &values);
+ // False count.
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 0, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<0>(aExpectedMultipleCryptoCounts), uValue)
+ << "Unexpected number of false multiple cryptos after parsing "
+ << aFileName;
+ // True count.
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 1, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<1>(aExpectedMultipleCryptoCounts), uValue)
+ << "Unexpected number of true multiple cryptos after parsing "
+ << aFileName;
+
+ // Verify our sample description entry count histogram.
+ JS::Rooted<JS::Value> numSamplesHistogram(cx.GetJSContext());
+ TelemetryTestHelpers::GetProperty(
+ cx.GetJSContext(), "MEDIA_MP4_PARSE_NUM_SAMPLE_DESCRIPTION_ENTRIES",
+ snapshot, &numSamplesHistogram);
+ ASSERT_TRUE(numSamplesHistogram.isObject())
+ << "Num sample description entries histogram should exist!";
+
+ TelemetryTestHelpers::GetProperty(cx.GetJSContext(), "values",
+ numSamplesHistogram, &values);
+
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 0, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<0>(aExpectedSampleDescriptionEntryCounts), uValue)
+ << "Unexpected number of 0 sample entry descriptions after parsing "
+ << aFileName;
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 1, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<1>(aExpectedSampleDescriptionEntryCounts), uValue)
+ << "Unexpected number of 1 sample entry descriptions after parsing "
+ << aFileName;
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 2, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<2>(aExpectedSampleDescriptionEntryCounts), uValue)
+ << "Unexpected number of 2 sample entry descriptions after parsing "
+ << aFileName;
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 3, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<3>(aExpectedSampleDescriptionEntryCounts), uValue)
+ << "Unexpected number of 3 sample entry descriptions after parsing "
+ << aFileName;
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 4, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<4>(aExpectedSampleDescriptionEntryCounts), uValue)
+ << "Unexpected number of 4 sample entry descriptions after parsing "
+ << aFileName;
+ TelemetryTestHelpers::GetElement(cx.GetJSContext(), 5, values, &value);
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ EXPECT_EQ(std::get<5>(aExpectedSampleDescriptionEntryCounts), uValue)
+ << "Unexpected number of 5 sample entry descriptions after parsing "
+ << aFileName;
+ };
+
+ // Clear histograms
+ TelemetryTestHelpers::GetAndClearHistogram(
+ cx.GetJSContext(), mTelemetry,
+ nsLiteralCString(
+ "MEDIA_MP4_PARSE_SAMPLE_DESCRIPTION_ENTRIES_HAVE_MULTIPLE_CODECS"),
+ false /* is_keyed */);
+
+ TelemetryTestHelpers::GetAndClearHistogram(
+ cx.GetJSContext(), mTelemetry,
+ nsLiteralCString(
+ "MEDIA_MP4_PARSE_SAMPLE_DESCRIPTION_ENTRIES_HAVE_MULTIPLE_CRYPTO"),
+ false /* is_keyed */);
+
+ TelemetryTestHelpers::GetAndClearHistogram(
+ cx.GetJSContext(), mTelemetry,
+ "MEDIA_MP4_PARSE_NUM_SAMPLE_DESCRIPTION_ENTRIES"_ns,
+ false /* is_keyed */);
+
+ // The snapshot won't have any data in it until we populate our histograms, so
+ // we don't check for a baseline here. Just read out first MP4 metadata.
+
+ // Grab one of the test cases we know should parse and parse it, this should
+ // trigger telemetry gathering.
+
+ // This file contains 2 moovs, each with a video and audio track with one
+ // sample description entry. So we should see 4 tracks, each with a single
+ // codec, no crypto, and a single sample description entry.
+ UpdateMetadataAndHistograms("test_case_1185230.mp4");
+
+ // Verify our histograms are updated.
+ CheckHistograms(std::make_tuple<uint32_t, uint32_t>(4, 0),
+ std::make_tuple<uint32_t, uint32_t>(4, 0),
+ std::make_tuple<uint32_t, uint32_t, uint32_t, uint32_t,
+ uint32_t, uint32_t>(0, 4, 0, 0, 0, 0),
+ "test_case_1185230.mp4");
+
+ // Parse another test case. This one has a single moov with a single video
+ // track. However, the track has two sample description entries, and our
+ // updated telemetry should reflect that.
+ UpdateMetadataAndHistograms(
+ "test_case_1513651-2-sample-description-entries.mp4");
+
+ // Verify our histograms are updated.
+ CheckHistograms(std::make_tuple<uint32_t, uint32_t>(5, 0),
+ std::make_tuple<uint32_t, uint32_t>(5, 0),
+ std::make_tuple<uint32_t, uint32_t, uint32_t, uint32_t,
+ uint32_t, uint32_t>(0, 4, 1, 0, 0, 0),
+ "test_case_1513651-2-sample-description-entries.mp4");
+
+ // Parse another test case. This one has 2 sample decription entries, both
+ // with crypto information, which should be reflected in our telemetry.
+ UpdateMetadataAndHistograms(
+ "test_case_1714125-2-sample-description-entires-with-identical-crypto."
+ "mp4");
+
+ // Verify our histograms are updated.
+ CheckHistograms(
+ std::make_tuple<uint32_t, uint32_t>(6, 0),
+ std::make_tuple<uint32_t, uint32_t>(5, 1),
+ std::make_tuple<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
+ uint32_t>(0, 4, 2, 0, 0, 0),
+ "test_case_1714125-2-sample-description-entires-with-identical-crypto."
+ "mp4");
+}
diff --git a/dom/media/gtest/mp4_demuxer/moz.build b/dom/media/gtest/mp4_demuxer/moz.build
new file mode 100644
index 0000000000..dc0946b7a0
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/moz.build
@@ -0,0 +1,66 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Library("mp4_demuxer_gtest")
+
+if CONFIG["OS_TARGET"] != "Android":
+ SOURCES += [
+ "TestParser.cpp",
+ ]
+
+SOURCES += [
+ "TestInterval.cpp",
+]
+
+TEST_HARNESS_FILES.gtest += [
+ "test_case_1156505.mp4",
+ "test_case_1181213.mp4",
+ "test_case_1181215.mp4",
+ "test_case_1181223.mp4",
+ "test_case_1181719.mp4",
+ "test_case_1185230.mp4",
+ "test_case_1187067.mp4",
+ "test_case_1200326.mp4",
+ "test_case_1204580.mp4",
+ "test_case_1216748.mp4",
+ "test_case_1296473.mp4",
+ "test_case_1296532.mp4",
+ "test_case_1301065-harder.mp4",
+ "test_case_1301065-i64max.mp4",
+ "test_case_1301065-i64min.mp4",
+ "test_case_1301065-max-ez.mp4",
+ "test_case_1301065-max-ok.mp4",
+ "test_case_1301065-overfl.mp4",
+ "test_case_1301065-u32max.mp4",
+ "test_case_1301065-u64max.mp4",
+ "test_case_1301065.mp4",
+ "test_case_1329061.mov",
+ "test_case_1351094.mp4",
+ "test_case_1388991.mp4",
+ "test_case_1389299.mp4",
+ "test_case_1389527.mp4",
+ "test_case_1395244.mp4",
+ "test_case_1410565.mp4",
+ "test_case_1513651-2-sample-description-entries.mp4",
+ "test_case_1519617-cenc-init-with-track_id-0.mp4",
+ "test_case_1519617-track2-trafs-removed.mp4",
+ "test_case_1519617-video-has-track_id-0.mp4",
+ "test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4",
+]
+
+UNIFIED_SOURCES += [
+ "TestMP4.cpp",
+]
+
+TEST_HARNESS_FILES.gtest += [
+ "../../test/street.mp4",
+]
+LOCAL_INCLUDES += [
+ "../../mp4",
+ "/toolkit/components/telemetry/tests/gtest",
+]
+
+FINAL_LIBRARY = "xul-gtest"
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1156505.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1156505.mp4
new file mode 100644
index 0000000000..687b06ee1f
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1156505.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1181213.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1181213.mp4
new file mode 100644
index 0000000000..e2326edb4e
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1181213.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1181215.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1181215.mp4
new file mode 100644
index 0000000000..7adba3836f
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1181215.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1181223.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1181223.mp4
new file mode 100644
index 0000000000..2aa2d5abfd
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1181223.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1181719.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1181719.mp4
new file mode 100644
index 0000000000..6846edd6ed
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1181719.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1185230.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1185230.mp4
new file mode 100644
index 0000000000..ac5cbdbe85
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1185230.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1187067.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1187067.mp4
new file mode 100644
index 0000000000..fdb396eeb3
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1187067.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1200326.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1200326.mp4
new file mode 100644
index 0000000000..5b8b27d508
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1200326.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1204580.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1204580.mp4
new file mode 100644
index 0000000000..4e55b05719
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1204580.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1216748.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1216748.mp4
new file mode 100644
index 0000000000..7072f53bec
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1216748.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1296473.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1296473.mp4
new file mode 100644
index 0000000000..109eb51064
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1296473.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1296532.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1296532.mp4
new file mode 100644
index 0000000000..5a5669bb89
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1296532.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-harder.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-harder.mp4
new file mode 100644
index 0000000000..7d678b7c66
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-harder.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-i64max.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-i64max.mp4
new file mode 100644
index 0000000000..5a3572f88c
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-i64max.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-i64min.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-i64min.mp4
new file mode 100644
index 0000000000..4d3eb366e1
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-i64min.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-max-ez.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-max-ez.mp4
new file mode 100644
index 0000000000..17fbf411ed
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-max-ez.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-max-ok.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-max-ok.mp4
new file mode 100644
index 0000000000..a5e1e4610d
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-max-ok.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-overfl.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-overfl.mp4
new file mode 100644
index 0000000000..1ef24e932b
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-overfl.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-u32max.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-u32max.mp4
new file mode 100644
index 0000000000..b1d8b6ce7e
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-u32max.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065-u64max.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065-u64max.mp4
new file mode 100644
index 0000000000..419dcba2c1
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065-u64max.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1301065.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1301065.mp4
new file mode 100644
index 0000000000..543a4fba3e
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1301065.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1329061.mov b/dom/media/gtest/mp4_demuxer/test_case_1329061.mov
new file mode 100644
index 0000000000..4246b8f716
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1329061.mov
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1351094.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1351094.mp4
new file mode 100644
index 0000000000..2dfd4c35ce
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1351094.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1388991.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1388991.mp4
new file mode 100644
index 0000000000..deb7aae33a
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1388991.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1389299.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1389299.mp4
new file mode 100644
index 0000000000..78dc390a3d
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1389299.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1389527.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1389527.mp4
new file mode 100644
index 0000000000..6406fcb8f8
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1389527.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1395244.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1395244.mp4
new file mode 100644
index 0000000000..da43d017ed
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1395244.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1410565.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1410565.mp4
new file mode 100644
index 0000000000..ebeaa08354
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1410565.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1513651-2-sample-description-entries.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1513651-2-sample-description-entries.mp4
new file mode 100644
index 0000000000..2f8f235a9b
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1513651-2-sample-description-entries.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1519617-cenc-init-with-track_id-0.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1519617-cenc-init-with-track_id-0.mp4
new file mode 100644
index 0000000000..e76e9f0894
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1519617-cenc-init-with-track_id-0.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1519617-track2-trafs-removed.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1519617-track2-trafs-removed.mp4
new file mode 100644
index 0000000000..55bd57c7db
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1519617-track2-trafs-removed.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1519617-video-has-track_id-0.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1519617-video-has-track_id-0.mp4
new file mode 100644
index 0000000000..8cb4dcc212
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1519617-video-has-track_id-0.mp4
Binary files differ
diff --git a/dom/media/gtest/mp4_demuxer/test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4 b/dom/media/gtest/mp4_demuxer/test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4
new file mode 100644
index 0000000000..4356259e68
--- /dev/null
+++ b/dom/media/gtest/mp4_demuxer/test_case_1714125-2-sample-description-entires-with-identical-crypto.mp4
Binary files differ
diff --git a/dom/media/gtest/negative_duration.mp4 b/dom/media/gtest/negative_duration.mp4
new file mode 100644
index 0000000000..de86bf497c
--- /dev/null
+++ b/dom/media/gtest/negative_duration.mp4
Binary files differ
diff --git a/dom/media/gtest/noise.mp3 b/dom/media/gtest/noise.mp3
new file mode 100644
index 0000000000..e76b503502
--- /dev/null
+++ b/dom/media/gtest/noise.mp3
Binary files differ
diff --git a/dom/media/gtest/noise_vbr.mp3 b/dom/media/gtest/noise_vbr.mp3
new file mode 100644
index 0000000000..284ebe40bf
--- /dev/null
+++ b/dom/media/gtest/noise_vbr.mp3
Binary files differ
diff --git a/dom/media/gtest/short-zero-in-moov.mp4 b/dom/media/gtest/short-zero-in-moov.mp4
new file mode 100644
index 0000000000..577318c8fa
--- /dev/null
+++ b/dom/media/gtest/short-zero-in-moov.mp4
Binary files differ
diff --git a/dom/media/gtest/short-zero-inband.mov b/dom/media/gtest/short-zero-inband.mov
new file mode 100644
index 0000000000..9c18642865
--- /dev/null
+++ b/dom/media/gtest/short-zero-inband.mov
Binary files differ
diff --git a/dom/media/gtest/small-shot-false-positive.mp3 b/dom/media/gtest/small-shot-false-positive.mp3
new file mode 100644
index 0000000000..2f1e794051
--- /dev/null
+++ b/dom/media/gtest/small-shot-false-positive.mp3
Binary files differ
diff --git a/dom/media/gtest/small-shot-partial-xing.mp3 b/dom/media/gtest/small-shot-partial-xing.mp3
new file mode 100644
index 0000000000..99d68e3cbe
--- /dev/null
+++ b/dom/media/gtest/small-shot-partial-xing.mp3
Binary files differ
diff --git a/dom/media/gtest/small-shot.mp3 b/dom/media/gtest/small-shot.mp3
new file mode 100644
index 0000000000..f9397a5106
--- /dev/null
+++ b/dom/media/gtest/small-shot.mp3
Binary files differ
diff --git a/dom/media/gtest/test.webm b/dom/media/gtest/test.webm
new file mode 100644
index 0000000000..fc9e991270
--- /dev/null
+++ b/dom/media/gtest/test.webm
Binary files differ
diff --git a/dom/media/gtest/test_InvalidElementId.webm b/dom/media/gtest/test_InvalidElementId.webm
new file mode 100644
index 0000000000..74e24d2093
--- /dev/null
+++ b/dom/media/gtest/test_InvalidElementId.webm
Binary files differ
diff --git a/dom/media/gtest/test_InvalidElementSize.webm b/dom/media/gtest/test_InvalidElementSize.webm
new file mode 100644
index 0000000000..420a1452ce
--- /dev/null
+++ b/dom/media/gtest/test_InvalidElementSize.webm
Binary files differ
diff --git a/dom/media/gtest/test_InvalidLargeEBMLMaxIdLength.webm b/dom/media/gtest/test_InvalidLargeEBMLMaxIdLength.webm
new file mode 100644
index 0000000000..fc2d9ce88e
--- /dev/null
+++ b/dom/media/gtest/test_InvalidLargeEBMLMaxIdLength.webm
Binary files differ
diff --git a/dom/media/gtest/test_InvalidLargeElementId.webm b/dom/media/gtest/test_InvalidLargeElementId.webm
new file mode 100644
index 0000000000..ceac160d9d
--- /dev/null
+++ b/dom/media/gtest/test_InvalidLargeElementId.webm
Binary files differ
diff --git a/dom/media/gtest/test_InvalidSmallEBMLMaxIdLength.webm b/dom/media/gtest/test_InvalidSmallEBMLMaxIdLength.webm
new file mode 100644
index 0000000000..ca38a258c8
--- /dev/null
+++ b/dom/media/gtest/test_InvalidSmallEBMLMaxIdLength.webm
Binary files differ
diff --git a/dom/media/gtest/test_ValidLargeEBMLMaxIdLength.webm b/dom/media/gtest/test_ValidLargeEBMLMaxIdLength.webm
new file mode 100644
index 0000000000..44bca6101e
--- /dev/null
+++ b/dom/media/gtest/test_ValidLargeEBMLMaxIdLength.webm
Binary files differ
diff --git a/dom/media/gtest/test_ValidSmallEBMLMaxSizeLength.webm b/dom/media/gtest/test_ValidSmallEBMLMaxSizeLength.webm
new file mode 100644
index 0000000000..23fd2b36a6
--- /dev/null
+++ b/dom/media/gtest/test_ValidSmallEBMLMaxSizeLength.webm
Binary files differ
diff --git a/dom/media/gtest/test_case_1224361.vp8.ivf b/dom/media/gtest/test_case_1224361.vp8.ivf
new file mode 100644
index 0000000000..e2fe942f0e
--- /dev/null
+++ b/dom/media/gtest/test_case_1224361.vp8.ivf
Binary files differ
diff --git a/dom/media/gtest/test_case_1224363.vp8.ivf b/dom/media/gtest/test_case_1224363.vp8.ivf
new file mode 100644
index 0000000000..6d2e4e0206
--- /dev/null
+++ b/dom/media/gtest/test_case_1224363.vp8.ivf
Binary files differ
diff --git a/dom/media/gtest/test_case_1224369.vp8.ivf b/dom/media/gtest/test_case_1224369.vp8.ivf
new file mode 100644
index 0000000000..2f8deb1148
--- /dev/null
+++ b/dom/media/gtest/test_case_1224369.vp8.ivf
Binary files differ
diff --git a/dom/media/gtest/test_vbri.mp3 b/dom/media/gtest/test_vbri.mp3
new file mode 100644
index 0000000000..efd7450338
--- /dev/null
+++ b/dom/media/gtest/test_vbri.mp3
Binary files differ