summaryrefslogtreecommitdiffstats
path: root/dom/media/platforms/ffmpeg
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media/platforms/ffmpeg')
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp12
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp7
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.h11
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h34
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp255
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h13
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp14
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h8
8 files changed, 181 insertions, 173 deletions
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
index 43041f81ea..1e8e488e25 100644
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -164,7 +164,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int16_t* data = reinterpret_cast<int16_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
- *tmp++ = AudioSampleToFloat(*data++);
+ *tmp++ = ConvertAudioSample<float>(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
@@ -174,7 +174,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int16_t** data = reinterpret_cast<int16_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
- *tmp++ = AudioSampleToFloat(data[channel][frame]);
+ *tmp++ = ConvertAudioSample<float>(data[channel][frame]);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S32) {
@@ -183,7 +183,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int32_t* data = reinterpret_cast<int32_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
- *tmp++ = AudioSampleToFloat(*data++);
+ *tmp++ = ConvertAudioSample<float>(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S32P) {
@@ -193,7 +193,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int32_t** data = reinterpret_cast<int32_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
- *tmp++ = AudioSampleToFloat(data[channel][frame]);
+ *tmp++ = ConvertAudioSample<float>(data[channel][frame]);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_U8) {
@@ -202,7 +202,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
uint8_t* data = reinterpret_cast<uint8_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
- *tmp++ = UInt8bitToAudioSample<AudioDataValue>(*data++);
+ *tmp++ = ConvertAudioSample<float>(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_U8P) {
@@ -212,7 +212,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
uint8_t** data = reinterpret_cast<uint8_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
- *tmp++ = UInt8bitToAudioSample<AudioDataValue>(data[channel][frame]);
+ *tmp++ = ConvertAudioSample<float>(data[channel][frame]);
}
}
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
index 4a30f2dd2d..bfb3105a57 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -177,9 +177,6 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(av_packet_alloc, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
AV_FUNC(av_packet_unref, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
AV_FUNC(av_packet_free, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
- AV_FUNC_OPTION(av_rdft_init, AV_FUNC_AVCODEC_ALL)
- AV_FUNC_OPTION(av_rdft_calc, AV_FUNC_AVCODEC_ALL)
- AV_FUNC_OPTION(av_rdft_end, AV_FUNC_AVCODEC_ALL)
AV_FUNC(avcodec_descriptor_get, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
@@ -254,6 +251,10 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_AVUTIL_59 |
AV_FUNC_AVUTIL_60)
#endif
+
+ AV_FUNC_OPTION(av_tx_init, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC_OPTION(av_tx_uninit, AV_FUNC_AVUTIL_ALL)
+
#undef AV_FUNC
#undef AV_FUNC_OPTION
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
index 98ab2f7930..eacbba286a 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
@@ -5,9 +5,9 @@
#ifndef __FFmpegLibWrapper_h__
#define __FFmpegLibWrapper_h__
-#include "FFmpegRDFTTypes.h" // for AvRdftInitFn, etc.
#include "mozilla/Attributes.h"
#include "mozilla/Types.h"
+#include "ffvpx/tx.h"
struct AVCodec;
struct AVCodecContext;
@@ -148,11 +148,6 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
int (*avcodec_send_frame)(AVCodecContext* avctx, const AVFrame* frame);
int (*avcodec_receive_frame)(AVCodecContext* avctx, AVFrame* frame);
- // libavcodec optional
- AvRdftInitFn av_rdft_init;
- AvRdftCalcFn av_rdft_calc;
- AvRdftEndFn av_rdft_end;
-
// libavutil
void (*av_log_set_level)(int level);
void* (*av_malloc)(size_t size);
@@ -216,6 +211,10 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
void* (*vaGetDisplayDRM)(int fd);
#endif
+ // Only ever used with ffvpx
+ decltype(::av_tx_init)* av_tx_init;
+ decltype(::av_tx_uninit)* av_tx_uninit;
+
PRLibrary* mAVCodecLib;
PRLibrary* mAVUtilLib;
#ifdef MOZ_WIDGET_GTK
diff --git a/dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h b/dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h
deleted file mode 100644
index cb3e2476fb..0000000000
--- a/dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
-
-#ifndef FFmpegRDFTTypes_h
-#define FFmpegRDFTTypes_h
-
-struct RDFTContext;
-
-typedef float FFTSample;
-
-enum RDFTransformType {
- DFT_R2C,
- IDFT_C2R,
- IDFT_R2C,
- DFT_C2R,
-};
-
-extern "C" {
-
-typedef RDFTContext* (*AvRdftInitFn)(int nbits, enum RDFTransformType trans);
-typedef void (*AvRdftCalcFn)(RDFTContext* s, FFTSample* data);
-typedef void (*AvRdftEndFn)(RDFTContext* s);
-}
-
-struct FFmpegRDFTFuncs {
- AvRdftInitFn init;
- AvRdftCalcFn calc;
- AvRdftEndFn end;
-};
-
-#endif // FFmpegRDFTTypes_h
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
index dcc3d9a88d..a3cfdf1b1d 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
@@ -504,128 +504,64 @@ MediaResult FFmpegVideoEncoder<LIBAV_VER>::InitInternal() {
mCodecContext->flags |= AV_CODEC_FLAG_FRAME_DURATION;
#endif
mCodecContext->gop_size = static_cast<int>(mConfig.mKeyframeInterval);
- // TODO (bug 1872871): Move the following extra settings to some helpers
- // instead.
+
if (mConfig.mUsage == MediaDataEncoder::Usage::Realtime) {
mLib->av_opt_set(mCodecContext->priv_data, "deadline", "realtime", 0);
// Explicitly ask encoder do not keep in flight at any one time for
// lookahead purposes.
mLib->av_opt_set(mCodecContext->priv_data, "lag-in-frames", "0", 0);
}
- // Apply SVC settings.
- if (Maybe<VPXSVCSetting> svc =
- GetVPXSVCSetting(mConfig.mScalabilityMode, mConfig.mBitrate)) {
- // For libvpx.
- if (mCodecName == "libvpx" || mCodecName == "libvpx-vp9") {
- // Show a warning if mScalabilityMode mismatches mNumTemporalLayers
- if (mConfig.mCodecSpecific) {
- if (mConfig.mCodecSpecific->is<VP8Specific>() ||
- mConfig.mCodecSpecific->is<VP9Specific>()) {
- const uint8_t numTemporalLayers =
- mConfig.mCodecSpecific->is<VP8Specific>()
- ? mConfig.mCodecSpecific->as<VP8Specific>().mNumTemporalLayers
- : mConfig.mCodecSpecific->as<VP9Specific>()
- .mNumTemporalLayers;
- if (numTemporalLayers != svc->mNumberLayers) {
- FFMPEGV_LOG(
- "Force using %zu layers defined in scalability mode instead of "
- "the %u layers defined in VP8/9Specific",
- svc->mNumberLayers, numTemporalLayers);
- }
- }
- }
- // Set ts_layering_mode.
- nsPrintfCString parameters("ts_layering_mode=%u", svc->mLayeringMode);
- // Set ts_target_bitrate.
- parameters.Append(":ts_target_bitrate=");
- for (size_t i = 0; i < svc->mTargetBitrates.Length(); ++i) {
- if (i > 0) {
- parameters.Append(",");
- }
- parameters.AppendPrintf("%d", svc->mTargetBitrates[i]);
- }
- // TODO: Set ts_number_layers, ts_periodicity, ts_layer_id and
- // ts_rate_decimator if they are different from the preset values in
- // ts_layering_mode.
-
- // Set parameters into ts-parameters.
- mLib->av_opt_set(mCodecContext->priv_data, "ts-parameters",
- parameters.get(), 0);
-
- // FFmpegVideoEncoder would be reset after Drain(), so mSVCInfo should be
- // reset() before emplace().
- mSVCInfo.reset();
- mSVCInfo.emplace(std::move(svc->mLayerIds));
-
- // TODO: layer settings should be changed dynamically when the frame's
- // color space changed.
- } else {
- FFMPEGV_LOG("SVC setting is not implemented for %s codec",
- mCodecName.get());
- }
+ if (Maybe<SVCSettings> settings = GetSVCSettings()) {
+ SVCSettings s = settings.extract();
+ mLib->av_opt_set(mCodecContext->priv_data, s.mSettingKeyValue.first.get(),
+ s.mSettingKeyValue.second.get(), 0);
+
+ // FFmpegVideoEncoder is reset after Drain(), so mSVCInfo should be reset()
+ // before emplace().
+ mSVCInfo.reset();
+ mSVCInfo.emplace(std::move(s.mTemporalLayerIds));
+
+ // TODO: layer settings should be changed dynamically when the frame's
+ // color space changed.
}
- // Apply codec specific settings.
- nsAutoCString codecSpecificLog;
- if (mConfig.mCodecSpecific) {
- if (mConfig.mCodecSpecific->is<H264Specific>()) {
- // For libx264.
- if (mCodecName == "libx264") {
- codecSpecificLog.Append(", H264:");
-
- const H264Specific& specific =
- mConfig.mCodecSpecific->as<H264Specific>();
-
- // Set profile.
- Maybe<H264Setting> profile = GetH264Profile(specific.mProfile);
- if (!profile) {
- FFMPEGV_LOG("failed to get h264 profile");
- return MediaResult(NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR,
- RESULT_DETAIL("H264 profile is unknown"));
- }
- codecSpecificLog.Append(
- nsPrintfCString(" profile - %d", profile->mValue));
- mCodecContext->profile = profile->mValue;
- if (!profile->mString.IsEmpty()) {
- codecSpecificLog.AppendPrintf(" (%s)", profile->mString.get());
- mLib->av_opt_set(mCodecContext->priv_data, "profile",
- profile->mString.get(), 0);
- }
-
- // Set level.
- Maybe<H264Setting> level = GetH264Level(specific.mLevel);
- if (!level) {
- FFMPEGV_LOG("failed to get h264 level");
- return MediaResult(NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR,
- RESULT_DETAIL("H264 level is unknown"));
- }
- codecSpecificLog.AppendPrintf(", level %d (%s)", level->mValue,
- level->mString.get());
- mCodecContext->level = level->mValue;
- MOZ_ASSERT(!level->mString.IsEmpty());
- mLib->av_opt_set(mCodecContext->priv_data, "level",
- level->mString.get(), 0);
-
- // Set format: libx264's default format is annexb
- if (specific.mFormat == H264BitStreamFormat::AVC) {
- codecSpecificLog.Append(", AVCC");
- mLib->av_opt_set(mCodecContext->priv_data, "x264-params", "annexb=0",
- 0);
- // mCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER
- // if we don't want to append SPS/PPS data in all keyframe
- // (LIBAVCODEC_VERSION_MAJOR >= 57 only).
- } else {
- codecSpecificLog.Append(", AnnexB");
- // Set annexb explicitly even if it's default format.
- mLib->av_opt_set(mCodecContext->priv_data, "x264-params", "annexb=1",
- 0);
- }
- } else {
- FFMPEGV_LOG("H264 settings is not implemented for codec %s ",
- mCodecName.get());
+
+ nsAutoCString h264Log;
+ if (mConfig.mCodecSpecific && mConfig.mCodecSpecific->is<H264Specific>()) {
+ // TODO: Set profile, level, avcc/annexb for openh264 and others.
+ if (mCodecName == "libx264") {
+ const H264Specific& h264Specific =
+ mConfig.mCodecSpecific->as<H264Specific>();
+ H264Settings s = GetH264Settings(h264Specific);
+ mCodecContext->profile = s.mProfile;
+ mCodecContext->level = s.mLevel;
+ for (const auto& pair : s.mSettingKeyValuePairs) {
+ mLib->av_opt_set(mCodecContext->priv_data, pair.first.get(),
+ pair.second.get(), 0);
}
+
+ // Log the settings.
+ // When using profile other than EXTENDED, the profile string is in the
+ // first element of mSettingKeyValuePairs, while EXTENDED profile has no
+ // profile string.
+
+ MOZ_ASSERT_IF(
+ s.mSettingKeyValuePairs.Length() != 3,
+ h264Specific.mProfile == H264_PROFILE::H264_PROFILE_EXTENDED);
+ const char* profileStr = s.mSettingKeyValuePairs.Length() == 3
+ ? s.mSettingKeyValuePairs[0].second.get()
+ : "extended";
+ const char* levelStr = s.mSettingKeyValuePairs.Length() == 3
+ ? s.mSettingKeyValuePairs[1].second.get()
+ : s.mSettingKeyValuePairs[0].second.get();
+ const char* formatStr =
+ h264Specific.mFormat == H264BitStreamFormat::AVC ? "AVCC" : "AnnexB";
+ h264Log.AppendPrintf(", H264: profile - %d (%s), level %d (%s), %s",
+ mCodecContext->profile, profileStr,
+ mCodecContext->level, levelStr, formatStr);
}
}
+
// TODO: keyint_min, max_b_frame?
// - if mConfig.mDenoising is set: av_opt_set_int(mCodecContext->priv_data,
// "noise_sensitivity", x, 0), where the x is from 0(disabled) to 6.
@@ -657,7 +593,7 @@ MediaResult FFmpegVideoEncoder<LIBAV_VER>::InitInternal() {
static_cast<int64_t>(mCodecContext->bit_rate),
mCodecContext->width, mCodecContext->height,
mCodecContext->time_base.num, mCodecContext->time_base.den,
- codecSpecificLog.IsEmpty() ? "" : codecSpecificLog.get());
+ h264Log.IsEmpty() ? "" : h264Log.get());
return MediaResult(NS_OK);
}
@@ -1152,4 +1088,99 @@ void FFmpegVideoEncoder<LIBAV_VER>::ForceEnablingFFmpegDebugLogs() {
#endif // DEBUG
}
+Maybe<FFmpegVideoEncoder<LIBAV_VER>::SVCSettings>
+FFmpegVideoEncoder<LIBAV_VER>::GetSVCSettings() {
+ MOZ_ASSERT(!mCodecName.IsEmpty());
+
+ // TODO: Add support for AV1 and H264.
+ if (mCodecName != "libvpx" && mCodecName != "libvpx-vp9") {
+ FFMPEGV_LOG("SVC setting is not implemented for %s codec",
+ mCodecName.get());
+ return Nothing();
+ }
+
+ Maybe<VPXSVCSetting> svc =
+ GetVPXSVCSetting(mConfig.mScalabilityMode, mConfig.mBitrate);
+ if (!svc) {
+ FFMPEGV_LOG("No SVC settings obtained. Skip");
+ return Nothing();
+ }
+
+ // Check if the number of temporal layers in codec specific settings matches
+ // the number of layers for the given scalability mode.
+
+ auto GetNumTemporalLayers = [&]() -> uint8_t {
+ uint8_t layers = 0;
+ if (mConfig.mCodecSpecific) {
+ if (mConfig.mCodecSpecific->is<VP8Specific>()) {
+ layers = mConfig.mCodecSpecific->as<VP8Specific>().mNumTemporalLayers;
+ MOZ_ASSERT(layers > 0);
+ } else if (mConfig.mCodecSpecific->is<VP9Specific>()) {
+ layers = mConfig.mCodecSpecific->as<VP9Specific>().mNumTemporalLayers;
+ MOZ_ASSERT(layers > 0);
+ }
+ }
+ return layers;
+ };
+
+ DebugOnly<uint8_t> numTemporalLayers = GetNumTemporalLayers();
+ MOZ_ASSERT_IF(numTemporalLayers > 0, numTemporalLayers == svc->mNumberLayers);
+
+ // Form an SVC setting string for libvpx.
+
+ nsPrintfCString parameters("ts_layering_mode=%u", svc->mLayeringMode);
+ parameters.Append(":ts_target_bitrate=");
+ for (size_t i = 0; i < svc->mTargetBitrates.Length(); ++i) {
+ if (i > 0) {
+ parameters.Append(",");
+ }
+ parameters.AppendPrintf("%d", svc->mTargetBitrates[i]);
+ }
+
+ // TODO: Set ts_number_layers, ts_periodicity, ts_layer_id and
+ // ts_rate_decimator if they are different from the preset values in
+ // ts_layering_mode.
+
+ return Some(
+ SVCSettings{std::move(svc->mLayerIds),
+ std::make_pair("ts-parameters"_ns, std::move(parameters))});
+}
+
+FFmpegVideoEncoder<LIBAV_VER>::H264Settings FFmpegVideoEncoder<
+ LIBAV_VER>::GetH264Settings(const H264Specific& aH264Specific) {
+ MOZ_ASSERT(mCodecName == "libx264",
+ "GetH264Settings is libx264-only for now");
+
+ nsTArray<std::pair<nsCString, nsCString>> keyValuePairs;
+
+ Maybe<H264Setting> profile = GetH264Profile(aH264Specific.mProfile);
+ MOZ_RELEASE_ASSERT(profile.isSome());
+ if (!profile->mString.IsEmpty()) {
+ keyValuePairs.AppendElement(std::make_pair("profile"_ns, profile->mString));
+ } else {
+ MOZ_RELEASE_ASSERT(aH264Specific.mProfile ==
+ H264_PROFILE::H264_PROFILE_EXTENDED);
+ }
+
+ Maybe<H264Setting> level = GetH264Level(aH264Specific.mLevel);
+ MOZ_RELEASE_ASSERT(level.isSome());
+ MOZ_RELEASE_ASSERT(!level->mString.IsEmpty());
+ keyValuePairs.AppendElement(std::make_pair("level"_ns, level->mString));
+
+ // Set format: libx264's default format is annexb.
+ if (aH264Specific.mFormat == H264BitStreamFormat::AVC) {
+ keyValuePairs.AppendElement(std::make_pair("x264-params"_ns, "annexb=0"));
+ // mCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER
+ // if we don't want to append SPS/PPS data in all keyframe
+ // (LIBAVCODEC_VERSION_MAJOR >= 57 only).
+ } else {
+ // Set annexb explicitly even if it's default format.
+ keyValuePairs.AppendElement(std::make_pair("x264-params"_ns, "annexb=1"));
+ }
+
+ return H264Settings{.mProfile = profile->mValue,
+ .mLevel = level->mValue,
+ .mSettingKeyValuePairs = std::move(keyValuePairs)};
+}
+
} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
index 1bcdd3eaf9..07c433ddd7 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
@@ -75,6 +75,19 @@ class FFmpegVideoEncoder<LIBAV_VER> final : public MediaDataEncoder {
Result<already_AddRefed<MediaByteBuffer>, nsresult> GetExtraData(
AVPacket* aPacket);
void ForceEnablingFFmpegDebugLogs();
+ struct SVCSettings {
+ nsTArray<uint8_t> mTemporalLayerIds;
+ // A key-value pair for av_opt_set.
+ std::pair<nsCString, nsCString> mSettingKeyValue;
+ };
+ Maybe<SVCSettings> GetSVCSettings();
+ struct H264Settings {
+ int mProfile;
+ int mLevel;
+ // A list of key-value pairs for av_opt_set.
+ nsTArray<std::pair<nsCString, nsCString>> mSettingKeyValuePairs;
+ };
+ H264Settings GetH264Settings(const H264Specific& aH264Specific);
// This refers to a static FFmpegLibWrapper, so raw pointer is adequate.
const FFmpegLibWrapper* mLib;
diff --git a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
index ba9ca4834e..dfc8244f1d 100644
--- a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
+++ b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
@@ -145,19 +145,13 @@ already_AddRefed<PlatformEncoderModule> FFVPXRuntimeLinker::CreateEncoder() {
}
/* static */
-void FFVPXRuntimeLinker::GetRDFTFuncs(FFmpegRDFTFuncs* aOutFuncs) {
+void FFVPXRuntimeLinker::GetFFTFuncs(FFmpegFFTFuncs* aOutFuncs) {
[]() MOZ_NO_THREAD_SAFETY_ANALYSIS {
MOZ_ASSERT(sLinkStatus != LinkStatus_INIT);
}();
- if (sFFVPXLib.av_rdft_init && sFFVPXLib.av_rdft_calc &&
- sFFVPXLib.av_rdft_end) {
- aOutFuncs->init = sFFVPXLib.av_rdft_init;
- aOutFuncs->calc = sFFVPXLib.av_rdft_calc;
- aOutFuncs->end = sFFVPXLib.av_rdft_end;
- } else {
- NS_WARNING("RDFT functions expected but not found");
- *aOutFuncs = FFmpegRDFTFuncs(); // zero
- }
+ MOZ_ASSERT(sFFVPXLib.av_tx_init && sFFVPXLib.av_tx_uninit);
+ aOutFuncs->init = sFFVPXLib.av_tx_init;
+ aOutFuncs->uninit = sFFVPXLib.av_tx_uninit;
}
} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h
index e52f108272..dccd37c7da 100644
--- a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h
+++ b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h
@@ -11,8 +11,12 @@
#include "PlatformEncoderModule.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/ThreadSafety.h"
+#include "ffvpx/tx.h"
-struct FFmpegRDFTFuncs;
+struct FFmpegFFTFuncs {
+ decltype(av_tx_init)* init;
+ decltype(av_tx_uninit)* uninit;
+};
namespace mozilla {
@@ -23,7 +27,7 @@ class FFVPXRuntimeLinker {
static already_AddRefed<PlatformEncoderModule> CreateEncoder();
// Call (on any thread) after Init().
- static void GetRDFTFuncs(FFmpegRDFTFuncs* aOutFuncs);
+ static void GetFFTFuncs(FFmpegFFTFuncs* aOutFuncs);
private:
// Provide critical-section for Init() and sLinkStatus.