summaryrefslogtreecommitdiffstats
path: root/media/ffvpx/libavcodec
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:42 +0000
commitda4c7e7ed675c3bf405668739c3012d140856109 (patch)
treecdd868dba063fecba609a1d819de271f0d51b23e /media/ffvpx/libavcodec
parentAdding upstream version 125.0.3. (diff)
downloadfirefox-da4c7e7ed675c3bf405668739c3012d140856109.tar.xz
firefox-da4c7e7ed675c3bf405668739c3012d140856109.zip
Adding upstream version 126.0.upstream/126.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'media/ffvpx/libavcodec')
-rw-r--r--media/ffvpx/libavcodec/audio_frame_queue.c113
-rw-r--r--media/ffvpx/libavcodec/audio_frame_queue.h83
-rw-r--r--media/ffvpx/libavcodec/codec_list.c6
-rw-r--r--media/ffvpx/libavcodec/libopusenc.c610
-rw-r--r--media/ffvpx/libavcodec/libvorbisenc.c393
-rw-r--r--media/ffvpx/libavcodec/moz.build3
6 files changed, 1208 insertions, 0 deletions
diff --git a/media/ffvpx/libavcodec/audio_frame_queue.c b/media/ffvpx/libavcodec/audio_frame_queue.c
new file mode 100644
index 0000000000..08b4b368c7
--- /dev/null
+++ b/media/ffvpx/libavcodec/audio_frame_queue.c
@@ -0,0 +1,113 @@
+/*
+ * Audio Frame Queue
+ * Copyright (c) 2012 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "audio_frame_queue.h"
+#include "encode.h"
+#include "libavutil/avassert.h"
+
+av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
+{
+ afq->avctx = avctx;
+ afq->remaining_delay = avctx->initial_padding;
+ afq->remaining_samples = avctx->initial_padding;
+ afq->frame_count = 0;
+}
+
+void ff_af_queue_close(AudioFrameQueue *afq)
+{
+ if(afq->frame_count)
+ av_log(afq->avctx, AV_LOG_WARNING, "%d frames left in the queue on closing\n", afq->frame_count);
+ av_freep(&afq->frames);
+ memset(afq, 0, sizeof(*afq));
+}
+
+int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
+{
+ AudioFrame *new = av_fast_realloc(afq->frames, &afq->frame_alloc, sizeof(*afq->frames)*(afq->frame_count+1));
+ if(!new)
+ return AVERROR(ENOMEM);
+ afq->frames = new;
+ new += afq->frame_count;
+
+ /* get frame parameters */
+ new->duration = f->nb_samples;
+ new->duration += afq->remaining_delay;
+ if (f->pts != AV_NOPTS_VALUE) {
+ new->pts = av_rescale_q(f->pts,
+ afq->avctx->time_base,
+ (AVRational){ 1, afq->avctx->sample_rate });
+ new->pts -= afq->remaining_delay;
+ if(afq->frame_count && new[-1].pts >= new->pts)
+ av_log(afq->avctx, AV_LOG_WARNING, "Queue input is backward in time\n");
+ } else {
+ new->pts = AV_NOPTS_VALUE;
+ }
+ afq->remaining_delay = 0;
+
+ /* add frame sample count */
+ afq->remaining_samples += f->nb_samples;
+
+ afq->frame_count++;
+
+ return 0;
+}
+
+void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts,
+ int64_t *duration)
+{
+ int64_t out_pts = AV_NOPTS_VALUE;
+ int removed_samples = 0;
+ int i;
+
+ if (afq->frame_count || afq->frame_alloc) {
+ if (afq->frames->pts != AV_NOPTS_VALUE)
+ out_pts = afq->frames->pts;
+ }
+ if(!afq->frame_count)
+ av_log(afq->avctx, AV_LOG_WARNING, "Trying to remove %d samples, but the queue is empty\n", nb_samples);
+ if (pts)
+ *pts = ff_samples_to_time_base(afq->avctx, out_pts);
+
+ for(i=0; nb_samples && i<afq->frame_count; i++){
+ int n= FFMIN(afq->frames[i].duration, nb_samples);
+ afq->frames[i].duration -= n;
+ nb_samples -= n;
+ removed_samples += n;
+ if(afq->frames[i].pts != AV_NOPTS_VALUE)
+ afq->frames[i].pts += n;
+ }
+ afq->remaining_samples -= removed_samples;
+ i -= i && afq->frames[i-1].duration;
+ memmove(afq->frames, afq->frames + i, sizeof(*afq->frames) * (afq->frame_count - i));
+ afq->frame_count -= i;
+
+ if(nb_samples){
+ av_assert0(!afq->frame_count);
+ av_assert0(afq->remaining_samples == afq->remaining_delay);
+ if(afq->frames && afq->frames[0].pts != AV_NOPTS_VALUE)
+ afq->frames[0].pts += nb_samples;
+ av_log(afq->avctx, AV_LOG_DEBUG, "Trying to remove %d more samples than there are in the queue\n", nb_samples);
+ }
+ if (duration)
+ *duration = ff_samples_to_time_base(afq->avctx, removed_samples);
+}
diff --git a/media/ffvpx/libavcodec/audio_frame_queue.h b/media/ffvpx/libavcodec/audio_frame_queue.h
new file mode 100644
index 0000000000..d8076eae54
--- /dev/null
+++ b/media/ffvpx/libavcodec/audio_frame_queue.h
@@ -0,0 +1,83 @@
+/*
+ * Audio Frame Queue
+ * Copyright (c) 2012 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AUDIO_FRAME_QUEUE_H
+#define AVCODEC_AUDIO_FRAME_QUEUE_H
+
+#include "avcodec.h"
+
+typedef struct AudioFrame {
+ int64_t pts;
+ int duration;
+} AudioFrame;
+
+typedef struct AudioFrameQueue {
+ AVCodecContext *avctx;
+ int remaining_delay;
+ int remaining_samples;
+ AudioFrame *frames;
+ unsigned frame_count;
+ unsigned frame_alloc;
+} AudioFrameQueue;
+
+/**
+ * Initialize AudioFrameQueue.
+ *
+ * @param avctx context to use for time_base and av_log
+ * @param afq queue context
+ */
+void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq);
+
+/**
+ * Close AudioFrameQueue.
+ *
+ * Frees memory if needed.
+ *
+ * @param afq queue context
+ */
+void ff_af_queue_close(AudioFrameQueue *afq);
+
+/**
+ * Add a frame to the queue.
+ *
+ * @param afq queue context
+ * @param f frame to add to the queue
+ */
+int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f);
+
+/**
+ * Remove frame(s) from the queue.
+ *
+ * Retrieves the pts of the next available frame, or a generated pts based on
+ * the last frame duration if there are no frames left in the queue. The number
+ * of requested samples should be the full number of samples represented by the
+ * packet that will be output by the encoder. If fewer samples are available
+ * in the queue, a smaller value will be used for the output duration.
+ *
+ * @param afq queue context
+ * @param nb_samples number of samples to remove from the queue
+ * @param[out] pts output packet pts
+ * @param[out] duration output packet duration
+ */
+void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts,
+ int64_t *duration);
+
+#endif /* AVCODEC_AUDIO_FRAME_QUEUE_H */
diff --git a/media/ffvpx/libavcodec/codec_list.c b/media/ffvpx/libavcodec/codec_list.c
index 04259e3cd7..7c6b0ceacd 100644
--- a/media/ffvpx/libavcodec/codec_list.c
+++ b/media/ffvpx/libavcodec/codec_list.c
@@ -20,6 +20,9 @@ static const FFCodec * const codec_list[] = {
#if CONFIG_LIBVORBIS_DECODER
&ff_libvorbis_decoder,
#endif
+#if CONFIG_LIBVORBIS_ENCODER
+ &ff_libvorbis_encoder,
+#endif
#if CONFIG_PCM_ALAW_DECODER
&ff_pcm_alaw_decoder,
#endif
@@ -44,6 +47,9 @@ static const FFCodec * const codec_list[] = {
#if CONFIG_LIBOPUS_DECODER
&ff_libopus_decoder,
#endif
+#if CONFIG_LIBOPUS_ENCODER
+ &ff_libopus_encoder,
+#endif
#if CONFIG_LIBVPX_VP8_DECODER
&ff_libvpx_vp8_decoder,
#endif
diff --git a/media/ffvpx/libavcodec/libopusenc.c b/media/ffvpx/libavcodec/libopusenc.c
new file mode 100644
index 0000000000..68667e3350
--- /dev/null
+++ b/media/ffvpx/libavcodec/libopusenc.c
@@ -0,0 +1,610 @@
+/*
+ * Opus encoder using libopus
+ * Copyright (c) 2012 Nathan Caldwell
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <opus.h>
+#include <opus_multistream.h>
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "codec_internal.h"
+#include "encode.h"
+#include "libopus.h"
+#include "audio_frame_queue.h"
+#include "vorbis_data.h"
+
+typedef struct LibopusEncOpts {
+ int vbr;
+ int application;
+ int packet_loss;
+ int fec;
+ int complexity;
+ float frame_duration;
+ int packet_size;
+ int max_bandwidth;
+ int mapping_family;
+ int dtx;
+#ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
+ int apply_phase_inv;
+#endif
+} LibopusEncOpts;
+
+typedef struct LibopusEncContext {
+ AVClass *class;
+ OpusMSEncoder *enc;
+ int stream_count;
+ uint8_t *samples;
+ LibopusEncOpts opts;
+ AudioFrameQueue afq;
+ const uint8_t *encoder_channel_map;
+} LibopusEncContext;
+
+static const uint8_t opus_coupled_streams[8] = {
+ 0, 1, 1, 2, 2, 2, 2, 3
+};
+
+/* Opus internal to Vorbis channel order mapping written in the header */
+static const uint8_t opus_vorbis_channel_map[8][8] = {
+ { 0 },
+ { 0, 1 },
+ { 0, 2, 1 },
+ { 0, 1, 2, 3 },
+ { 0, 4, 1, 2, 3 },
+ { 0, 4, 1, 2, 3, 5 },
+ { 0, 4, 1, 2, 3, 5, 6 },
+ { 0, 6, 1, 2, 3, 4, 5, 7 },
+};
+
+/* libavcodec to libopus channel order mapping, passed to libopus */
+static const uint8_t libavcodec_libopus_channel_map[8][8] = {
+ { 0 },
+ { 0, 1 },
+ { 0, 1, 2 },
+ { 0, 1, 2, 3 },
+ { 0, 1, 3, 4, 2 },
+ { 0, 1, 4, 5, 2, 3 },
+ { 0, 1, 5, 6, 2, 4, 3 },
+ { 0, 1, 6, 7, 4, 5, 2, 3 },
+};
+
+static void libopus_write_header(AVCodecContext *avctx, int stream_count,
+ int coupled_stream_count,
+ int mapping_family,
+ const uint8_t *channel_mapping)
+{
+ uint8_t *p = avctx->extradata;
+ int channels = avctx->ch_layout.nb_channels;
+
+ bytestream_put_buffer(&p, "OpusHead", 8);
+ bytestream_put_byte(&p, 1); /* Version */
+ bytestream_put_byte(&p, channels);
+ bytestream_put_le16(&p, avctx->initial_padding * 48000 / avctx->sample_rate); /* Lookahead samples at 48kHz */
+ bytestream_put_le32(&p, avctx->sample_rate); /* Original sample rate */
+ bytestream_put_le16(&p, 0); /* Gain of 0dB is recommended. */
+
+ /* Channel mapping */
+ bytestream_put_byte(&p, mapping_family);
+ if (mapping_family != 0) {
+ bytestream_put_byte(&p, stream_count);
+ bytestream_put_byte(&p, coupled_stream_count);
+ bytestream_put_buffer(&p, channel_mapping, channels);
+ }
+}
+
+static int libopus_configure_encoder(AVCodecContext *avctx, OpusMSEncoder *enc,
+ LibopusEncOpts *opts)
+{
+ int ret;
+
+ if (avctx->global_quality) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Quality-based encoding not supported, "
+ "please specify a bitrate and VBR setting.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ret = opus_multistream_encoder_ctl(enc, OPUS_SET_BITRATE(avctx->bit_rate));
+ if (ret != OPUS_OK) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to set bitrate: %s\n", opus_strerror(ret));
+ return ret;
+ }
+
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_COMPLEXITY(opts->complexity));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set complexity: %s\n", opus_strerror(ret));
+
+ ret = opus_multistream_encoder_ctl(enc, OPUS_SET_VBR(!!opts->vbr));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set VBR: %s\n", opus_strerror(ret));
+
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_VBR_CONSTRAINT(opts->vbr == 2));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set constrained VBR: %s\n", opus_strerror(ret));
+
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_PACKET_LOSS_PERC(opts->packet_loss));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set expected packet loss percentage: %s\n",
+ opus_strerror(ret));
+
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_INBAND_FEC(opts->fec));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set inband FEC: %s\n",
+ opus_strerror(ret));
+
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_DTX(opts->dtx));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set DTX: %s\n",
+ opus_strerror(ret));
+
+ if (avctx->cutoff) {
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_MAX_BANDWIDTH(opts->max_bandwidth));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set maximum bandwidth: %s\n", opus_strerror(ret));
+ }
+
+#ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
+ ret = opus_multistream_encoder_ctl(enc,
+ OPUS_SET_PHASE_INVERSION_DISABLED(!opts->apply_phase_inv));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to set phase inversion: %s\n",
+ opus_strerror(ret));
+#endif
+ return OPUS_OK;
+}
+
+static int libopus_check_max_channels(AVCodecContext *avctx,
+ int max_channels) {
+ if (avctx->ch_layout.nb_channels > max_channels) {
+ av_log(avctx, AV_LOG_ERROR, "Opus mapping family undefined for %d channels.\n",
+ avctx->ch_layout.nb_channels);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int libopus_check_vorbis_layout(AVCodecContext *avctx, int mapping_family) {
+ av_assert2(avctx->ch_layout.nb_channels < FF_ARRAY_ELEMS(ff_vorbis_ch_layouts));
+
+ if (avctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
+ av_log(avctx, AV_LOG_WARNING,
+ "No channel layout specified. Opus encoder will use Vorbis "
+ "channel layout for %d channels.\n", avctx->ch_layout.nb_channels);
+ } else if (av_channel_layout_compare(&avctx->ch_layout, &ff_vorbis_ch_layouts[avctx->ch_layout.nb_channels - 1])) {
+ char name[32];
+
+ av_channel_layout_describe(&avctx->ch_layout, name, sizeof(name));
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid channel layout %s for specified mapping family %d.\n",
+ name, mapping_family);
+
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int libopus_validate_layout_and_get_channel_map(
+ AVCodecContext *avctx,
+ int mapping_family,
+ const uint8_t ** channel_map_result)
+{
+ const uint8_t * channel_map = NULL;
+ int ret;
+
+ switch (mapping_family) {
+ case -1:
+ ret = libopus_check_max_channels(avctx, 8);
+ if (ret == 0) {
+ ret = libopus_check_vorbis_layout(avctx, mapping_family);
+ /* Channels do not need to be reordered. */
+ }
+
+ break;
+ case 0:
+ ret = libopus_check_max_channels(avctx, 2);
+ if (ret == 0) {
+ ret = libopus_check_vorbis_layout(avctx, mapping_family);
+ }
+ break;
+ case 1:
+ /* Opus expects channels to be in Vorbis order. */
+ ret = libopus_check_max_channels(avctx, 8);
+ if (ret == 0) {
+ ret = libopus_check_vorbis_layout(avctx, mapping_family);
+ channel_map = ff_vorbis_channel_layout_offsets[avctx->ch_layout.nb_channels - 1];
+ }
+ break;
+ case 255:
+ ret = libopus_check_max_channels(avctx, 254);
+ break;
+ default:
+ av_log(avctx, AV_LOG_WARNING,
+ "Unknown channel mapping family %d. Output channel layout may be invalid.\n",
+ mapping_family);
+ ret = 0;
+ }
+
+ *channel_map_result = channel_map;
+ return ret;
+}
+
+static av_cold int libopus_encode_init(AVCodecContext *avctx)
+{
+ LibopusEncContext *opus = avctx->priv_data;
+ OpusMSEncoder *enc;
+ uint8_t libopus_channel_mapping[255];
+ int ret = OPUS_OK;
+ int channels = avctx->ch_layout.nb_channels;
+ int av_ret;
+ int coupled_stream_count, header_size, frame_size;
+ int mapping_family;
+
+ frame_size = opus->opts.frame_duration * 48000 / 1000;
+ switch (frame_size) {
+ case 120:
+ case 240:
+ if (opus->opts.application != OPUS_APPLICATION_RESTRICTED_LOWDELAY)
+ av_log(avctx, AV_LOG_WARNING,
+ "LPC mode cannot be used with a frame duration of less "
+ "than 10ms. Enabling restricted low-delay mode.\n"
+ "Use a longer frame duration if this is not what you want.\n");
+ /* Frame sizes less than 10 ms can only use MDCT mode, so switching to
+ * RESTRICTED_LOWDELAY avoids an unnecessary extra 2.5ms lookahead. */
+ opus->opts.application = OPUS_APPLICATION_RESTRICTED_LOWDELAY;
+ case 480:
+ case 960:
+ case 1920:
+ case 2880:
+#ifdef OPUS_FRAMESIZE_120_MS
+ case 3840:
+ case 4800:
+ case 5760:
+#endif
+ opus->opts.packet_size =
+ avctx->frame_size = frame_size * avctx->sample_rate / 48000;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Invalid frame duration: %g.\n"
+ "Frame duration must be exactly one of: 2.5, 5, 10, 20, 40"
+#ifdef OPUS_FRAMESIZE_120_MS
+ ", 60, 80, 100 or 120.\n",
+#else
+ " or 60.\n",
+#endif
+ opus->opts.frame_duration);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->compression_level < 0 || avctx->compression_level > 10) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Compression level must be in the range 0 to 10. "
+ "Defaulting to 10.\n");
+ opus->opts.complexity = 10;
+ } else {
+ opus->opts.complexity = avctx->compression_level;
+ }
+
+ if (avctx->cutoff) {
+ switch (avctx->cutoff) {
+ case 4000:
+ opus->opts.max_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
+ break;
+ case 6000:
+ opus->opts.max_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
+ break;
+ case 8000:
+ opus->opts.max_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
+ break;
+ case 12000:
+ opus->opts.max_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
+ break;
+ case 20000:
+ opus->opts.max_bandwidth = OPUS_BANDWIDTH_FULLBAND;
+ break;
+ default:
+ av_log(avctx, AV_LOG_WARNING,
+ "Invalid frequency cutoff: %d. Using default maximum bandwidth.\n"
+ "Cutoff frequency must be exactly one of: 4000, 6000, 8000, 12000 or 20000.\n",
+ avctx->cutoff);
+ avctx->cutoff = 0;
+ }
+ }
+
+ /* Channels may need to be reordered to match opus mapping. */
+ av_ret = libopus_validate_layout_and_get_channel_map(avctx, opus->opts.mapping_family,
+ &opus->encoder_channel_map);
+ if (av_ret) {
+ return av_ret;
+ }
+
+ if (opus->opts.mapping_family == -1) {
+ /* By default, use mapping family 1 for the header but use the older
+ * libopus multistream API to avoid surround masking. */
+
+ /* Set the mapping family so that the value is correct in the header */
+ mapping_family = channels > 2 ? 1 : 0;
+ coupled_stream_count = opus_coupled_streams[channels - 1];
+ opus->stream_count = channels - coupled_stream_count;
+ memcpy(libopus_channel_mapping,
+ opus_vorbis_channel_map[channels - 1],
+ channels * sizeof(*libopus_channel_mapping));
+
+ enc = opus_multistream_encoder_create(
+ avctx->sample_rate, channels, opus->stream_count,
+ coupled_stream_count,
+ libavcodec_libopus_channel_map[channels - 1],
+ opus->opts.application, &ret);
+ } else {
+ /* Use the newer multistream API. The encoder will set the channel
+ * mapping and coupled stream counts to its internal defaults and will
+ * use surround masking analysis to save bits. */
+ mapping_family = opus->opts.mapping_family;
+ enc = opus_multistream_surround_encoder_create(
+ avctx->sample_rate, channels, mapping_family,
+ &opus->stream_count, &coupled_stream_count, libopus_channel_mapping,
+ opus->opts.application, &ret);
+ }
+
+ if (ret != OPUS_OK) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to create encoder: %s\n", opus_strerror(ret));
+ return ff_opus_error_to_averror(ret);
+ }
+
+ if (!avctx->bit_rate) {
+ /* Sane default copied from opusenc */
+ avctx->bit_rate = 64000 * opus->stream_count +
+ 32000 * coupled_stream_count;
+ av_log(avctx, AV_LOG_WARNING,
+ "No bit rate set. Defaulting to %"PRId64" bps.\n", avctx->bit_rate);
+ }
+
+ if (avctx->bit_rate < 500 || avctx->bit_rate > 256000 * channels) {
+ av_log(avctx, AV_LOG_ERROR, "The bit rate %"PRId64" bps is unsupported. "
+ "Please choose a value between 500 and %d.\n", avctx->bit_rate,
+ 256000 * channels);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ ret = libopus_configure_encoder(avctx, enc, &opus->opts);
+ if (ret != OPUS_OK) {
+ ret = ff_opus_error_to_averror(ret);
+ goto fail;
+ }
+
+ /* Header includes channel mapping table if and only if mapping family is NOT 0 */
+ header_size = 19 + (mapping_family == 0 ? 0 : 2 + channels);
+ avctx->extradata = av_malloc(header_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!avctx->extradata) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate extradata.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ avctx->extradata_size = header_size;
+
+ opus->samples = av_calloc(frame_size, channels *
+ av_get_bytes_per_sample(avctx->sample_fmt));
+ if (!opus->samples) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate samples buffer.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = opus_multistream_encoder_ctl(enc, OPUS_GET_LOOKAHEAD(&avctx->initial_padding));
+ if (ret != OPUS_OK)
+ av_log(avctx, AV_LOG_WARNING,
+ "Unable to get number of lookahead samples: %s\n",
+ opus_strerror(ret));
+
+ libopus_write_header(avctx, opus->stream_count, coupled_stream_count,
+ mapping_family, libopus_channel_mapping);
+
+ ff_af_queue_init(avctx, &opus->afq);
+
+ opus->enc = enc;
+
+ return 0;
+
+fail:
+ opus_multistream_encoder_destroy(enc);
+ return ret;
+}
+
+static void libopus_copy_samples_with_channel_map(
+ uint8_t *dst, const uint8_t *src, const uint8_t *channel_map,
+ int nb_channels, int nb_samples, int bytes_per_sample) {
+ int sample, channel;
+ for (sample = 0; sample < nb_samples; ++sample) {
+ for (channel = 0; channel < nb_channels; ++channel) {
+ const size_t src_pos = bytes_per_sample * (nb_channels * sample + channel);
+ const size_t dst_pos = bytes_per_sample * (nb_channels * sample + channel_map[channel]);
+
+ memcpy(&dst[dst_pos], &src[src_pos], bytes_per_sample);
+ }
+ }
+}
+
+static int libopus_encode(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ LibopusEncContext *opus = avctx->priv_data;
+ const int bytes_per_sample = av_get_bytes_per_sample(avctx->sample_fmt);
+ const int channels = avctx->ch_layout.nb_channels;
+ const int sample_size = channels * bytes_per_sample;
+ const uint8_t *audio;
+ int ret;
+ int discard_padding;
+
+ if (frame) {
+ ret = ff_af_queue_add(&opus->afq, frame);
+ if (ret < 0)
+ return ret;
+ if (opus->encoder_channel_map != NULL) {
+ audio = opus->samples;
+ libopus_copy_samples_with_channel_map(
+ opus->samples, frame->data[0], opus->encoder_channel_map,
+ channels, frame->nb_samples, bytes_per_sample);
+ } else if (frame->nb_samples < opus->opts.packet_size) {
+ audio = opus->samples;
+ memcpy(opus->samples, frame->data[0], frame->nb_samples * sample_size);
+ } else
+ audio = frame->data[0];
+ } else {
+ if (!opus->afq.remaining_samples || (!opus->afq.frame_alloc && !opus->afq.frame_count))
+ return 0;
+ audio = opus->samples;
+ memset(opus->samples, 0, opus->opts.packet_size * sample_size);
+ }
+
+ /* Maximum packet size taken from opusenc in opus-tools. 120ms packets
+ * consist of 6 frames in one packet. The maximum frame size is 1275
+ * bytes along with the largest possible packet header of 7 bytes. */
+ if ((ret = ff_alloc_packet(avctx, avpkt, (1275 * 6 + 7) * opus->stream_count)) < 0)
+ return ret;
+
+ if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
+ ret = opus_multistream_encode_float(opus->enc, (const float *)audio,
+ opus->opts.packet_size,
+ avpkt->data, avpkt->size);
+ else
+ ret = opus_multistream_encode(opus->enc, (const opus_int16 *)audio,
+ opus->opts.packet_size,
+ avpkt->data, avpkt->size);
+
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Error encoding frame: %s\n", opus_strerror(ret));
+ return ff_opus_error_to_averror(ret);
+ }
+
+ av_shrink_packet(avpkt, ret);
+
+ ff_af_queue_remove(&opus->afq, opus->opts.packet_size,
+ &avpkt->pts, &avpkt->duration);
+
+ discard_padding = opus->opts.packet_size - avpkt->duration;
+ // Check if subtraction resulted in an overflow
+ if ((discard_padding < opus->opts.packet_size) != (avpkt->duration > 0))
+ return AVERROR(EINVAL);
+ if (discard_padding > 0) {
+ uint8_t* side_data = av_packet_new_side_data(avpkt,
+ AV_PKT_DATA_SKIP_SAMPLES,
+ 10);
+ if (!side_data)
+ return AVERROR(ENOMEM);
+ AV_WL32(side_data + 4, discard_padding);
+ }
+
+ *got_packet_ptr = 1;
+
+ return 0;
+}
+
+static av_cold int libopus_encode_close(AVCodecContext *avctx)
+{
+ LibopusEncContext *opus = avctx->priv_data;
+
+ opus_multistream_encoder_destroy(opus->enc);
+
+ ff_af_queue_close(&opus->afq);
+
+ av_freep(&opus->samples);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(LibopusEncContext, opts.x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption libopus_options[] = {
+ { "application", "Intended application type", OFFSET(application), AV_OPT_TYPE_INT, { .i64 = OPUS_APPLICATION_AUDIO }, OPUS_APPLICATION_VOIP, OPUS_APPLICATION_RESTRICTED_LOWDELAY, FLAGS, "application" },
+ { "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
+ { "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
+ { "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
+ { "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 120.0, FLAGS },
+ { "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
+ { "fec", "Enable inband FEC. Expected packet loss must be non-zero", OFFSET(fec), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
+ { "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
+ { "on", "Use variable bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "vbr" },
+ { "constrained", "Use constrained VBR", 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "vbr" },
+ { "mapping_family", "Channel Mapping Family", OFFSET(mapping_family), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, FLAGS, "mapping_family" },
+ { "dtx", "Enable DTX", OFFSET(dtx), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+#ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
+ { "apply_phase_inv", "Apply intensity stereo phase inversion", OFFSET(apply_phase_inv), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+#endif
+ { NULL },
+};
+
+static const AVClass libopus_class = {
+ .class_name = "libopus",
+ .item_name = av_default_item_name,
+ .option = libopus_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const FFCodecDefault libopus_defaults[] = {
+ { "b", "0" },
+ { "compression_level", "10" },
+ { NULL },
+};
+
+static const int libopus_sample_rates[] = {
+ 48000, 24000, 16000, 12000, 8000, 0,
+};
+
+const FFCodec ff_libopus_encoder = {
+ .p.name = "libopus",
+ CODEC_LONG_NAME("libopus Opus"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_OPUS,
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
+ AV_CODEC_CAP_SMALL_LAST_FRAME,
+ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
+ .priv_data_size = sizeof(LibopusEncContext),
+ .init = libopus_encode_init,
+ FF_CODEC_ENCODE_CB(libopus_encode),
+ .close = libopus_encode_close,
+ .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_NONE },
+ .p.supported_samplerates = libopus_sample_rates,
+ .p.priv_class = &libopus_class,
+ .defaults = libopus_defaults,
+ .p.wrapper_name = "libopus",
+};
diff --git a/media/ffvpx/libavcodec/libvorbisenc.c b/media/ffvpx/libavcodec/libvorbisenc.c
new file mode 100644
index 0000000000..6331cf0d79
--- /dev/null
+++ b/media/ffvpx/libavcodec/libvorbisenc.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2002 Mark Hills <mark@pogo.org.uk>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <vorbis/vorbisenc.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/fifo.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "audio_frame_queue.h"
+#include "codec_internal.h"
+#include "encode.h"
+#include "version.h"
+#include "vorbis_parser.h"
+
+
+/* Number of samples the user should send in each call.
+ * This value is used because it is the LCD of all possible frame sizes, so
+ * an output packet will always start at the same point as one of the input
+ * packets.
+ */
+#define LIBVORBIS_FRAME_SIZE 64
+
+#define BUFFER_SIZE (1024 * 64)
+
+typedef struct LibvorbisEncContext {
+ AVClass *av_class; /**< class for AVOptions */
+ vorbis_info vi; /**< vorbis_info used during init */
+ vorbis_dsp_state vd; /**< DSP state used for analysis */
+ vorbis_block vb; /**< vorbis_block used for analysis */
+ AVFifo *pkt_fifo; /**< output packet buffer */
+ int eof; /**< end-of-file flag */
+ int dsp_initialized; /**< vd has been initialized */
+ vorbis_comment vc; /**< VorbisComment info */
+ double iblock; /**< impulse block bias option */
+ AVVorbisParseContext *vp; /**< parse context to get durations */
+ AudioFrameQueue afq; /**< frame queue for timestamps */
+} LibvorbisEncContext;
+
+static const AVOption options[] = {
+ { "iblock", "Sets the impulse block bias", offsetof(LibvorbisEncContext, iblock), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -15, 0, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL }
+};
+
+static const FFCodecDefault defaults[] = {
+ { "b", "0" },
+ { NULL },
+};
+
+static const AVClass vorbis_class = {
+ .class_name = "libvorbis",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const uint8_t vorbis_encoding_channel_layout_offsets[8][8] = {
+ { 0 },
+ { 0, 1 },
+ { 0, 2, 1 },
+ { 0, 1, 2, 3 },
+ { 0, 2, 1, 3, 4 },
+ { 0, 2, 1, 4, 5, 3 },
+ { 0, 2, 1, 5, 6, 4, 3 },
+ { 0, 2, 1, 6, 7, 4, 5, 3 },
+};
+
+static int vorbis_error_to_averror(int ov_err)
+{
+ switch (ov_err) {
+ case OV_EFAULT: return AVERROR_BUG;
+ case OV_EINVAL: return AVERROR(EINVAL);
+ case OV_EIMPL: return AVERROR(EINVAL);
+ default: return AVERROR_UNKNOWN;
+ }
+}
+
+static av_cold int libvorbis_setup(vorbis_info *vi, AVCodecContext *avctx)
+{
+ LibvorbisEncContext *s = avctx->priv_data;
+ int channels = avctx->ch_layout.nb_channels;
+ double cfreq;
+ int ret;
+
+ if (avctx->flags & AV_CODEC_FLAG_QSCALE || !avctx->bit_rate) {
+ /* variable bitrate
+ * NOTE: we use the oggenc range of -1 to 10 for global_quality for
+ * user convenience, but libvorbis uses -0.1 to 1.0.
+ */
+ float q = avctx->global_quality / (float)FF_QP2LAMBDA;
+ /* default to 3 if the user did not set quality or bitrate */
+ if (!(avctx->flags & AV_CODEC_FLAG_QSCALE))
+ q = 3.0;
+ if ((ret = vorbis_encode_setup_vbr(vi, channels,
+ avctx->sample_rate,
+ q / 10.0)))
+ goto error;
+ } else {
+ int minrate = avctx->rc_min_rate > 0 ? avctx->rc_min_rate : -1;
+ int maxrate = avctx->rc_max_rate > 0 ? avctx->rc_max_rate : -1;
+
+ /* average bitrate */
+ if ((ret = vorbis_encode_setup_managed(vi, channels,
+ avctx->sample_rate, maxrate,
+ avctx->bit_rate, minrate)))
+ goto error;
+
+ /* variable bitrate by estimate, disable slow rate management */
+ if (minrate == -1 && maxrate == -1)
+ if ((ret = vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL)))
+ goto error; /* should not happen */
+ }
+
+ /* cutoff frequency */
+ if (avctx->cutoff > 0) {
+ cfreq = avctx->cutoff / 1000.0;
+ if ((ret = vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)))
+ goto error; /* should not happen */
+ }
+
+ /* impulse block bias */
+ if (s->iblock) {
+ if ((ret = vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &s->iblock)))
+ goto error;
+ }
+
+ if ((channels == 3 &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_SURROUND)) ||
+ (channels == 4 &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_2_2) &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_QUAD)) ||
+ (channels == 5 &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0) &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0_BACK)) ||
+ (channels == 6 &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT1) &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT1_BACK)) ||
+ (channels == 7 &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_6POINT1)) ||
+ (channels == 8 &&
+ av_channel_layout_compare(&avctx->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_7POINT1))) {
+ if (avctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
+ char name[32];
+ av_channel_layout_describe(&avctx->ch_layout, name, sizeof(name));
+ av_log(avctx, AV_LOG_ERROR, "%s not supported by Vorbis: "
+ "output stream will have incorrect "
+ "channel layout.\n", name);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "No channel layout specified. The encoder "
+ "will use Vorbis channel layout for "
+ "%d channels.\n", channels);
+ }
+ }
+
+ if ((ret = vorbis_encode_setup_init(vi)))
+ goto error;
+
+ return 0;
+error:
+ return vorbis_error_to_averror(ret);
+}
+
+/* How many bytes are needed for a buffer of length 'l' */
+static int xiph_len(int l)
+{
+ return 1 + l / 255 + l;
+}
+
+static av_cold int libvorbis_encode_close(AVCodecContext *avctx)
+{
+ LibvorbisEncContext *s = avctx->priv_data;
+
+ /* notify vorbisenc this is EOF */
+ if (s->dsp_initialized)
+ vorbis_analysis_wrote(&s->vd, 0);
+
+ vorbis_block_clear(&s->vb);
+ vorbis_dsp_clear(&s->vd);
+ vorbis_info_clear(&s->vi);
+
+ av_fifo_freep2(&s->pkt_fifo);
+ ff_af_queue_close(&s->afq);
+
+ av_vorbis_parse_free(&s->vp);
+
+ return 0;
+}
+
+static av_cold int libvorbis_encode_init(AVCodecContext *avctx)
+{
+ LibvorbisEncContext *s = avctx->priv_data;
+ ogg_packet header, header_comm, header_code;
+ uint8_t *p;
+ unsigned int offset;
+ int ret;
+
+ vorbis_info_init(&s->vi);
+ if ((ret = libvorbis_setup(&s->vi, avctx))) {
+ av_log(avctx, AV_LOG_ERROR, "encoder setup failed\n");
+ goto error;
+ }
+ if ((ret = vorbis_analysis_init(&s->vd, &s->vi))) {
+ av_log(avctx, AV_LOG_ERROR, "analysis init failed\n");
+ ret = vorbis_error_to_averror(ret);
+ goto error;
+ }
+ s->dsp_initialized = 1;
+ if ((ret = vorbis_block_init(&s->vd, &s->vb))) {
+ av_log(avctx, AV_LOG_ERROR, "dsp init failed\n");
+ ret = vorbis_error_to_averror(ret);
+ goto error;
+ }
+
+ vorbis_comment_init(&s->vc);
+ if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT))
+ vorbis_comment_add_tag(&s->vc, "encoder", LIBAVCODEC_IDENT);
+
+ if ((ret = vorbis_analysis_headerout(&s->vd, &s->vc, &header, &header_comm,
+ &header_code))) {
+ ret = vorbis_error_to_averror(ret);
+ goto error;
+ }
+
+ avctx->extradata_size = 1 + xiph_len(header.bytes) +
+ xiph_len(header_comm.bytes) +
+ header_code.bytes;
+ p = avctx->extradata = av_malloc(avctx->extradata_size +
+ AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!p) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ p[0] = 2;
+ offset = 1;
+ offset += av_xiphlacing(&p[offset], header.bytes);
+ offset += av_xiphlacing(&p[offset], header_comm.bytes);
+ memcpy(&p[offset], header.packet, header.bytes);
+ offset += header.bytes;
+ memcpy(&p[offset], header_comm.packet, header_comm.bytes);
+ offset += header_comm.bytes;
+ memcpy(&p[offset], header_code.packet, header_code.bytes);
+ offset += header_code.bytes;
+ av_assert0(offset == avctx->extradata_size);
+
+ s->vp = av_vorbis_parse_init(avctx->extradata, avctx->extradata_size);
+ if (!s->vp) {
+ av_log(avctx, AV_LOG_ERROR, "invalid extradata\n");
+ return ret;
+ }
+
+ vorbis_comment_clear(&s->vc);
+
+ avctx->frame_size = LIBVORBIS_FRAME_SIZE;
+ ff_af_queue_init(avctx, &s->afq);
+
+ s->pkt_fifo = av_fifo_alloc2(BUFFER_SIZE, 1, 0);
+ if (!s->pkt_fifo) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ return 0;
+error:
+ libvorbis_encode_close(avctx);
+ return ret;
+}
+
+static int libvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ LibvorbisEncContext *s = avctx->priv_data;
+ ogg_packet op;
+ int ret, duration;
+
+ /* send samples to libvorbis */
+ if (frame) {
+ const int samples = frame->nb_samples;
+ float **buffer;
+ int c, channels = s->vi.channels;
+
+ buffer = vorbis_analysis_buffer(&s->vd, samples);
+ for (c = 0; c < channels; c++) {
+ int co = (channels > 8) ? c :
+ vorbis_encoding_channel_layout_offsets[channels - 1][c];
+ memcpy(buffer[c], frame->extended_data[co],
+ samples * sizeof(*buffer[c]));
+ }
+ if ((ret = vorbis_analysis_wrote(&s->vd, samples)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n");
+ return vorbis_error_to_averror(ret);
+ }
+ if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
+ return ret;
+ } else {
+ if (!s->eof && s->afq.frame_alloc)
+ if ((ret = vorbis_analysis_wrote(&s->vd, 0)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n");
+ return vorbis_error_to_averror(ret);
+ }
+ s->eof = 1;
+ }
+
+ /* retrieve available packets from libvorbis */
+ while ((ret = vorbis_analysis_blockout(&s->vd, &s->vb)) == 1) {
+ if ((ret = vorbis_analysis(&s->vb, NULL)) < 0)
+ break;
+ if ((ret = vorbis_bitrate_addblock(&s->vb)) < 0)
+ break;
+
+ /* add any available packets to the output packet buffer */
+ while ((ret = vorbis_bitrate_flushpacket(&s->vd, &op)) == 1) {
+ if (av_fifo_can_write(s->pkt_fifo) < sizeof(ogg_packet) + op.bytes) {
+ av_log(avctx, AV_LOG_ERROR, "packet buffer is too small\n");
+ return AVERROR_BUG;
+ }
+ av_fifo_write(s->pkt_fifo, &op, sizeof(ogg_packet));
+ av_fifo_write(s->pkt_fifo, op.packet, op.bytes);
+ }
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error getting available packets\n");
+ break;
+ }
+ }
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error getting available packets\n");
+ return vorbis_error_to_averror(ret);
+ }
+
+ /* Read an available packet if possible */
+ if (av_fifo_read(s->pkt_fifo, &op, sizeof(ogg_packet)) < 0)
+ return 0;
+
+ if ((ret = ff_get_encode_buffer(avctx, avpkt, op.bytes, 0)) < 0)
+ return ret;
+ av_fifo_read(s->pkt_fifo, avpkt->data, op.bytes);
+
+ avpkt->pts = ff_samples_to_time_base(avctx, op.granulepos);
+
+ duration = av_vorbis_parse_frame(s->vp, avpkt->data, avpkt->size);
+ if (duration > 0) {
+ /* we do not know encoder delay until we get the first packet from
+ * libvorbis, so we have to update the AudioFrameQueue counts */
+ if (!avctx->initial_padding && s->afq.frames) {
+ avctx->initial_padding = duration;
+ av_assert0(!s->afq.remaining_delay);
+ s->afq.frames->duration += duration;
+ if (s->afq.frames->pts != AV_NOPTS_VALUE)
+ s->afq.frames->pts -= duration;
+ s->afq.remaining_samples += duration;
+ }
+ ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration);
+ }
+
+ *got_packet_ptr = 1;
+ return 0;
+}
+
+const FFCodec ff_libvorbis_encoder = {
+ .p.name = "libvorbis",
+ CODEC_LONG_NAME("libvorbis"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_VORBIS,
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
+ AV_CODEC_CAP_SMALL_LAST_FRAME,
+ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
+ .priv_data_size = sizeof(LibvorbisEncContext),
+ .init = libvorbis_encode_init,
+ FF_CODEC_ENCODE_CB(libvorbis_encode_frame),
+ .close = libvorbis_encode_close,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE },
+ .p.priv_class = &vorbis_class,
+ .defaults = defaults,
+ .p.wrapper_name = "libvorbis",
+};
diff --git a/media/ffvpx/libavcodec/moz.build b/media/ffvpx/libavcodec/moz.build
index 0ba603d172..886fa7a2cb 100644
--- a/media/ffvpx/libavcodec/moz.build
+++ b/media/ffvpx/libavcodec/moz.build
@@ -20,6 +20,7 @@ LOCAL_INCLUDES += ['/modules/fdlibm/inexact-math-override']
SharedLibrary('mozavcodec')
SOURCES += [
'allcodecs.c',
+ 'audio_frame_queue.c',
'avcodec.c',
'avdct.c',
'avfft.c',
@@ -47,7 +48,9 @@ SOURCES += [
'jrevdct.c',
'libopus.c',
'libopusdec.c',
+ 'libopusenc.c',
'libvorbisdec.c',
+ 'libvorbisenc.c',
'log2_tab.c',
'mpegaudio.c',
'mpegaudiodata.c',