summaryrefslogtreecommitdiffstats
path: root/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio
diff options
context:
space:
mode:
Diffstat (limited to 'mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio')
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac3Util.java584
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac4Util.java250
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioAttributes.java162
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilities.java161
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilitiesReceiver.java166
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioDecoderException.java35
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioListener.java41
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioProcessor.java148
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioRendererEventListener.java174
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioSink.java329
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTimestampPoller.java309
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTrackPositionTracker.java545
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AuxEffectInfo.java85
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/BaseAudioProcessor.java143
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ChannelMappingAudioProcessor.java99
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DefaultAudioSink.java1474
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DtsUtil.java217
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/FloatResamplingAudioProcessor.java109
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ForwardingAudioSink.java151
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/MediaCodecAudioRenderer.java1036
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ResamplingAudioProcessor.java134
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SilenceSkippingAudioProcessor.java352
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SimpleDecoderAudioRenderer.java758
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Sonic.java506
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SonicAudioProcessor.java277
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TeeAudioProcessor.java235
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TrimmingAudioProcessor.java178
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/WavUtil.java91
-rw-r--r--mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/package-info.java19
29 files changed, 8768 insertions, 0 deletions
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac3Util.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac3Util.java
new file mode 100644
index 0000000000..c68e49dea1
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac3Util.java
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.IntDef;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.audio.Ac3Util.SyncFrameInfo.StreamType;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmInitData;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MimeTypes;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.ParsableBitArray;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.ParsableByteArray;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.nio.ByteBuffer;
+
+/**
+ * Utility methods for parsing Dolby TrueHD and (E-)AC-3 syncframes. (E-)AC-3 parsing follows the
+ * definition in ETSI TS 102 366 V1.4.1.
+ */
+public final class Ac3Util {
+
+ /** Holds sample format information as presented by a syncframe header. */
+ public static final class SyncFrameInfo {
+
+ /**
+ * AC3 stream types. See also E.1.3.1.1. One of {@link #STREAM_TYPE_UNDEFINED}, {@link
+ * #STREAM_TYPE_TYPE0}, {@link #STREAM_TYPE_TYPE1} or {@link #STREAM_TYPE_TYPE2}.
+ */
+ @Documented
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({STREAM_TYPE_UNDEFINED, STREAM_TYPE_TYPE0, STREAM_TYPE_TYPE1, STREAM_TYPE_TYPE2})
+ public @interface StreamType {}
+ /** Undefined AC3 stream type. */
+ public static final int STREAM_TYPE_UNDEFINED = -1;
+ /** Type 0 AC3 stream type. */
+ public static final int STREAM_TYPE_TYPE0 = 0;
+ /** Type 1 AC3 stream type. */
+ public static final int STREAM_TYPE_TYPE1 = 1;
+ /** Type 2 AC3 stream type. */
+ public static final int STREAM_TYPE_TYPE2 = 2;
+
+ /**
+ * The sample mime type of the bitstream. One of {@link MimeTypes#AUDIO_AC3} and {@link
+ * MimeTypes#AUDIO_E_AC3}.
+ */
+ @Nullable public final String mimeType;
+ /**
+ * The type of the stream if {@link #mimeType} is {@link MimeTypes#AUDIO_E_AC3}, or {@link
+ * #STREAM_TYPE_UNDEFINED} otherwise.
+ */
+ public final @StreamType int streamType;
+ /**
+ * The audio sampling rate in Hz.
+ */
+ public final int sampleRate;
+ /**
+ * The number of audio channels
+ */
+ public final int channelCount;
+ /**
+ * The size of the frame.
+ */
+ public final int frameSize;
+ /**
+ * Number of audio samples in the frame.
+ */
+ public final int sampleCount;
+
+ private SyncFrameInfo(
+ @Nullable String mimeType,
+ @StreamType int streamType,
+ int channelCount,
+ int sampleRate,
+ int frameSize,
+ int sampleCount) {
+ this.mimeType = mimeType;
+ this.streamType = streamType;
+ this.channelCount = channelCount;
+ this.sampleRate = sampleRate;
+ this.frameSize = frameSize;
+ this.sampleCount = sampleCount;
+ }
+
+ }
+
+ /**
+ * The number of samples to store in each output chunk when rechunking TrueHD streams. The number
+ * of samples extracted from the container corresponding to one syncframe must be an integer
+ * multiple of this value.
+ */
+ public static final int TRUEHD_RECHUNK_SAMPLE_COUNT = 16;
+ /**
+ * The number of bytes that must be parsed from a TrueHD syncframe to calculate the sample count.
+ */
+ public static final int TRUEHD_SYNCFRAME_PREFIX_LENGTH = 10;
+
+ /**
+ * The number of new samples per (E-)AC-3 audio block.
+ */
+ private static final int AUDIO_SAMPLES_PER_AUDIO_BLOCK = 256;
+ /** Each syncframe has 6 blocks that provide 256 new audio samples. See subsection 4.1. */
+ private static final int AC3_SYNCFRAME_AUDIO_SAMPLE_COUNT = 6 * AUDIO_SAMPLES_PER_AUDIO_BLOCK;
+ /**
+ * Number of audio blocks per E-AC-3 syncframe, indexed by numblkscod.
+ */
+ private static final int[] BLOCKS_PER_SYNCFRAME_BY_NUMBLKSCOD = new int[] {1, 2, 3, 6};
+ /**
+ * Sample rates, indexed by fscod.
+ */
+ private static final int[] SAMPLE_RATE_BY_FSCOD = new int[] {48000, 44100, 32000};
+ /**
+ * Sample rates, indexed by fscod2 (E-AC-3).
+ */
+ private static final int[] SAMPLE_RATE_BY_FSCOD2 = new int[] {24000, 22050, 16000};
+ /**
+ * Channel counts, indexed by acmod.
+ */
+ private static final int[] CHANNEL_COUNT_BY_ACMOD = new int[] {2, 1, 2, 3, 3, 4, 4, 5};
+ /** Nominal bitrates in kbps, indexed by frmsizecod / 2. (See table 4.13.) */
+ private static final int[] BITRATE_BY_HALF_FRMSIZECOD =
+ new int[] {
+ 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640
+ };
+ /** 16-bit words per syncframe, indexed by frmsizecod / 2. (See table 4.13.) */
+ private static final int[] SYNCFRAME_SIZE_WORDS_BY_HALF_FRMSIZECOD_44_1 =
+ new int[] {
+ 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253,
+ 1393
+ };
+
+ /**
+ * Returns the AC-3 format given {@code data} containing the AC3SpecificBox according to Annex F.
+ * The reading position of {@code data} will be modified.
+ *
+ * @param data The AC3SpecificBox to parse.
+ * @param trackId The track identifier to set on the format.
+ * @param language The language to set on the format.
+ * @param drmInitData {@link DrmInitData} to be included in the format.
+ * @return The AC-3 format parsed from data in the header.
+ */
+ public static Format parseAc3AnnexFFormat(
+ ParsableByteArray data, String trackId, String language, @Nullable DrmInitData drmInitData) {
+ int fscod = (data.readUnsignedByte() & 0xC0) >> 6;
+ int sampleRate = SAMPLE_RATE_BY_FSCOD[fscod];
+ int nextByte = data.readUnsignedByte();
+ int channelCount = CHANNEL_COUNT_BY_ACMOD[(nextByte & 0x38) >> 3];
+ if ((nextByte & 0x04) != 0) { // lfeon
+ channelCount++;
+ }
+ return Format.createAudioSampleFormat(
+ trackId,
+ MimeTypes.AUDIO_AC3,
+ /* codecs= */ null,
+ Format.NO_VALUE,
+ Format.NO_VALUE,
+ channelCount,
+ sampleRate,
+ /* initializationData= */ null,
+ drmInitData,
+ /* selectionFlags= */ 0,
+ language);
+ }
+
+ /**
+ * Returns the E-AC-3 format given {@code data} containing the EC3SpecificBox according to Annex
+ * F. The reading position of {@code data} will be modified.
+ *
+ * @param data The EC3SpecificBox to parse.
+ * @param trackId The track identifier to set on the format.
+ * @param language The language to set on the format.
+ * @param drmInitData {@link DrmInitData} to be included in the format.
+ * @return The E-AC-3 format parsed from data in the header.
+ */
+ public static Format parseEAc3AnnexFFormat(
+ ParsableByteArray data, String trackId, String language, @Nullable DrmInitData drmInitData) {
+ data.skipBytes(2); // data_rate, num_ind_sub
+
+ // Read the first independent substream.
+ int fscod = (data.readUnsignedByte() & 0xC0) >> 6;
+ int sampleRate = SAMPLE_RATE_BY_FSCOD[fscod];
+ int nextByte = data.readUnsignedByte();
+ int channelCount = CHANNEL_COUNT_BY_ACMOD[(nextByte & 0x0E) >> 1];
+ if ((nextByte & 0x01) != 0) { // lfeon
+ channelCount++;
+ }
+
+ // Read the first dependent substream.
+ nextByte = data.readUnsignedByte();
+ int numDepSub = ((nextByte & 0x1E) >> 1);
+ if (numDepSub > 0) {
+ int lowByteChanLoc = data.readUnsignedByte();
+ // Read Lrs/Rrs pair
+ // TODO: Read other channel configuration
+ if ((lowByteChanLoc & 0x02) != 0) {
+ channelCount += 2;
+ }
+ }
+ String mimeType = MimeTypes.AUDIO_E_AC3;
+ if (data.bytesLeft() > 0) {
+ nextByte = data.readUnsignedByte();
+ if ((nextByte & 0x01) != 0) { // flag_ec3_extension_type_a
+ mimeType = MimeTypes.AUDIO_E_AC3_JOC;
+ }
+ }
+ return Format.createAudioSampleFormat(
+ trackId,
+ mimeType,
+ /* codecs= */ null,
+ Format.NO_VALUE,
+ Format.NO_VALUE,
+ channelCount,
+ sampleRate,
+ /* initializationData= */ null,
+ drmInitData,
+ /* selectionFlags= */ 0,
+ language);
+ }
+
+ /**
+ * Returns (E-)AC-3 format information given {@code data} containing a syncframe. The reading
+ * position of {@code data} will be modified.
+ *
+ * @param data The data to parse, positioned at the start of the syncframe.
+ * @return The (E-)AC-3 format data parsed from the header.
+ */
+ public static SyncFrameInfo parseAc3SyncframeInfo(ParsableBitArray data) {
+ int initialPosition = data.getPosition();
+ data.skipBits(40);
+ // Parse the bitstream ID for AC-3 and E-AC-3 (see subsections 4.3, E.1.2 and E.1.3.1.6).
+ boolean isEac3 = data.readBits(5) > 10;
+ data.setPosition(initialPosition);
+ @Nullable String mimeType;
+ @StreamType int streamType = SyncFrameInfo.STREAM_TYPE_UNDEFINED;
+ int sampleRate;
+ int acmod;
+ int frameSize;
+ int sampleCount;
+ boolean lfeon;
+ int channelCount;
+ if (isEac3) {
+ // Subsection E.1.2.
+ data.skipBits(16); // syncword
+ switch (data.readBits(2)) { // strmtyp
+ case 0:
+ streamType = SyncFrameInfo.STREAM_TYPE_TYPE0;
+ break;
+ case 1:
+ streamType = SyncFrameInfo.STREAM_TYPE_TYPE1;
+ break;
+ case 2:
+ streamType = SyncFrameInfo.STREAM_TYPE_TYPE2;
+ break;
+ default:
+ streamType = SyncFrameInfo.STREAM_TYPE_UNDEFINED;
+ break;
+ }
+ data.skipBits(3); // substreamid
+ frameSize = (data.readBits(11) + 1) * 2; // See frmsiz in subsection E.1.3.1.3.
+ int fscod = data.readBits(2);
+ int audioBlocks;
+ int numblkscod;
+ if (fscod == 3) {
+ numblkscod = 3;
+ sampleRate = SAMPLE_RATE_BY_FSCOD2[data.readBits(2)];
+ audioBlocks = 6;
+ } else {
+ numblkscod = data.readBits(2);
+ audioBlocks = BLOCKS_PER_SYNCFRAME_BY_NUMBLKSCOD[numblkscod];
+ sampleRate = SAMPLE_RATE_BY_FSCOD[fscod];
+ }
+ sampleCount = AUDIO_SAMPLES_PER_AUDIO_BLOCK * audioBlocks;
+ acmod = data.readBits(3);
+ lfeon = data.readBit();
+ channelCount = CHANNEL_COUNT_BY_ACMOD[acmod] + (lfeon ? 1 : 0);
+ data.skipBits(5 + 5); // bsid, dialnorm
+ if (data.readBit()) { // compre
+ data.skipBits(8); // compr
+ }
+ if (acmod == 0) {
+ data.skipBits(5); // dialnorm2
+ if (data.readBit()) { // compr2e
+ data.skipBits(8); // compr2
+ }
+ }
+ if (streamType == SyncFrameInfo.STREAM_TYPE_TYPE1 && data.readBit()) { // chanmape
+ data.skipBits(16); // chanmap
+ }
+ if (data.readBit()) { // mixmdate
+ if (acmod > 2) {
+ data.skipBits(2); // dmixmod
+ }
+ if ((acmod & 0x01) != 0 && acmod > 2) {
+ data.skipBits(3 + 3); // ltrtcmixlev, lorocmixlev
+ }
+ if ((acmod & 0x04) != 0) {
+ data.skipBits(6); // ltrtsurmixlev, lorosurmixlev
+ }
+ if (lfeon && data.readBit()) { // lfemixlevcode
+ data.skipBits(5); // lfemixlevcod
+ }
+ if (streamType == SyncFrameInfo.STREAM_TYPE_TYPE0) {
+ if (data.readBit()) { // pgmscle
+ data.skipBits(6); //pgmscl
+ }
+ if (acmod == 0 && data.readBit()) { // pgmscl2e
+ data.skipBits(6); // pgmscl2
+ }
+ if (data.readBit()) { // extpgmscle
+ data.skipBits(6); // extpgmscl
+ }
+ int mixdef = data.readBits(2);
+ if (mixdef == 1) {
+ data.skipBits(1 + 1 + 3); // premixcmpsel, drcsrc, premixcmpscl
+ } else if (mixdef == 2) {
+ data.skipBits(12); // mixdata
+ } else if (mixdef == 3) {
+ int mixdeflen = data.readBits(5);
+ if (data.readBit()) { // mixdata2e
+ data.skipBits(1 + 1 + 3); // premixcmpsel, drcsrc, premixcmpscl
+ if (data.readBit()) { // extpgmlscle
+ data.skipBits(4); // extpgmlscl
+ }
+ if (data.readBit()) { // extpgmcscle
+ data.skipBits(4); // extpgmcscl
+ }
+ if (data.readBit()) { // extpgmrscle
+ data.skipBits(4); // extpgmrscl
+ }
+ if (data.readBit()) { // extpgmlsscle
+ data.skipBits(4); // extpgmlsscl
+ }
+ if (data.readBit()) { // extpgmrsscle
+ data.skipBits(4); // extpgmrsscl
+ }
+ if (data.readBit()) { // extpgmlfescle
+ data.skipBits(4); // extpgmlfescl
+ }
+ if (data.readBit()) { // dmixscle
+ data.skipBits(4); // dmixscl
+ }
+ if (data.readBit()) { // addche
+ if (data.readBit()) { // extpgmaux1scle
+ data.skipBits(4); // extpgmaux1scl
+ }
+ if (data.readBit()) { // extpgmaux2scle
+ data.skipBits(4); // extpgmaux2scl
+ }
+ }
+ }
+ if (data.readBit()) { // mixdata3e
+ data.skipBits(5); // spchdat
+ if (data.readBit()) { // addspchdate
+ data.skipBits(5 + 2); // spchdat1, spchan1att
+ if (data.readBit()) { // addspdat1e
+ data.skipBits(5 + 3); // spchdat2, spchan2att
+ }
+ }
+ }
+ data.skipBits(8 * (mixdeflen + 2)); // mixdata
+ data.byteAlign(); // mixdatafill
+ }
+ if (acmod < 2) {
+ if (data.readBit()) { // paninfoe
+ data.skipBits(8 + 6); // panmean, paninfo
+ }
+ if (acmod == 0) {
+ if (data.readBit()) { // paninfo2e
+ data.skipBits(8 + 6); // panmean2, paninfo2
+ }
+ }
+ }
+ if (data.readBit()) { // frmmixcfginfoe
+ if (numblkscod == 0) {
+ data.skipBits(5); // blkmixcfginfo[0]
+ } else {
+ for (int blk = 0; blk < audioBlocks; blk++) {
+ if (data.readBit()) { // blkmixcfginfoe
+ data.skipBits(5); // blkmixcfginfo[blk]
+ }
+ }
+ }
+ }
+ }
+ }
+ if (data.readBit()) { // infomdate
+ data.skipBits(3 + 1 + 1); // bsmod, copyrightb, origbs
+ if (acmod == 2) {
+ data.skipBits(2 + 2); // dsurmod, dheadphonmod
+ }
+ if (acmod >= 6) {
+ data.skipBits(2); // dsurexmod
+ }
+ if (data.readBit()) { // audioprodie
+ data.skipBits(5 + 2 + 1); // mixlevel, roomtyp, adconvtyp
+ }
+ if (acmod == 0 && data.readBit()) { // audioprodi2e
+ data.skipBits(5 + 2 + 1); // mixlevel2, roomtyp2, adconvtyp2
+ }
+ if (fscod < 3) {
+ data.skipBit(); // sourcefscod
+ }
+ }
+ if (streamType == SyncFrameInfo.STREAM_TYPE_TYPE0 && numblkscod != 3) {
+ data.skipBit(); // convsync
+ }
+ if (streamType == SyncFrameInfo.STREAM_TYPE_TYPE2
+ && (numblkscod == 3 || data.readBit())) { // blkid
+ data.skipBits(6); // frmsizecod
+ }
+ mimeType = MimeTypes.AUDIO_E_AC3;
+ if (data.readBit()) { // addbsie
+ int addbsil = data.readBits(6);
+ if (addbsil == 1 && data.readBits(8) == 1) { // addbsi
+ mimeType = MimeTypes.AUDIO_E_AC3_JOC;
+ }
+ }
+ } else /* is AC-3 */ {
+ mimeType = MimeTypes.AUDIO_AC3;
+ data.skipBits(16 + 16); // syncword, crc1
+ int fscod = data.readBits(2);
+ if (fscod == 3) {
+ // fscod '11' indicates that the decoder should not attempt to decode audio. We invalidate
+ // the mime type to prevent association with a renderer.
+ mimeType = null;
+ }
+ int frmsizecod = data.readBits(6);
+ frameSize = getAc3SyncframeSize(fscod, frmsizecod);
+ data.skipBits(5 + 3); // bsid, bsmod
+ acmod = data.readBits(3);
+ if ((acmod & 0x01) != 0 && acmod != 1) {
+ data.skipBits(2); // cmixlev
+ }
+ if ((acmod & 0x04) != 0) {
+ data.skipBits(2); // surmixlev
+ }
+ if (acmod == 2) {
+ data.skipBits(2); // dsurmod
+ }
+ sampleRate =
+ fscod < SAMPLE_RATE_BY_FSCOD.length ? SAMPLE_RATE_BY_FSCOD[fscod] : Format.NO_VALUE;
+ sampleCount = AC3_SYNCFRAME_AUDIO_SAMPLE_COUNT;
+ lfeon = data.readBit();
+ channelCount = CHANNEL_COUNT_BY_ACMOD[acmod] + (lfeon ? 1 : 0);
+ }
+ return new SyncFrameInfo(
+ mimeType, streamType, channelCount, sampleRate, frameSize, sampleCount);
+ }
+
+ /**
+ * Returns the size in bytes of the given (E-)AC-3 syncframe.
+ *
+ * @param data The syncframe to parse.
+ * @return The syncframe size in bytes. {@link C#LENGTH_UNSET} if the input is invalid.
+ */
+ public static int parseAc3SyncframeSize(byte[] data) {
+ if (data.length < 6) {
+ return C.LENGTH_UNSET;
+ }
+ // Parse the bitstream ID for AC-3 and E-AC-3 (see subsections 4.3, E.1.2 and E.1.3.1.6).
+ boolean isEac3 = ((data[5] & 0xF8) >> 3) > 10;
+ if (isEac3) {
+ int frmsiz = (data[2] & 0x07) << 8; // Most significant 3 bits.
+ frmsiz |= data[3] & 0xFF; // Least significant 8 bits.
+ return (frmsiz + 1) * 2; // See frmsiz in subsection E.1.3.1.3.
+ } else {
+ int fscod = (data[4] & 0xC0) >> 6;
+ int frmsizecod = data[4] & 0x3F;
+ return getAc3SyncframeSize(fscod, frmsizecod);
+ }
+ }
+
+ /**
+ * Reads the number of audio samples represented by the given (E-)AC-3 syncframe. The buffer's
+ * position is not modified.
+ *
+ * @param buffer The {@link ByteBuffer} from which to read the syncframe.
+ * @return The number of audio samples represented by the syncframe.
+ */
+ public static int parseAc3SyncframeAudioSampleCount(ByteBuffer buffer) {
+ // Parse the bitstream ID for AC-3 and E-AC-3 (see subsections 4.3, E.1.2 and E.1.3.1.6).
+ boolean isEac3 = ((buffer.get(buffer.position() + 5) & 0xF8) >> 3) > 10;
+ if (isEac3) {
+ int fscod = (buffer.get(buffer.position() + 4) & 0xC0) >> 6;
+ int numblkscod = fscod == 0x03 ? 3 : (buffer.get(buffer.position() + 4) & 0x30) >> 4;
+ return BLOCKS_PER_SYNCFRAME_BY_NUMBLKSCOD[numblkscod] * AUDIO_SAMPLES_PER_AUDIO_BLOCK;
+ } else {
+ return AC3_SYNCFRAME_AUDIO_SAMPLE_COUNT;
+ }
+ }
+
+ /**
+ * Returns the offset relative to the buffer's position of the start of a TrueHD syncframe, or
+ * {@link C#INDEX_UNSET} if no syncframe was found. The buffer's position is not modified.
+ *
+ * @param buffer The {@link ByteBuffer} within which to find a syncframe.
+ * @return The offset relative to the buffer's position of the start of a TrueHD syncframe, or
+ * {@link C#INDEX_UNSET} if no syncframe was found.
+ */
+ public static int findTrueHdSyncframeOffset(ByteBuffer buffer) {
+ int startIndex = buffer.position();
+ int endIndex = buffer.limit() - TRUEHD_SYNCFRAME_PREFIX_LENGTH;
+ for (int i = startIndex; i <= endIndex; i++) {
+ // The syncword ends 0xBA for TrueHD or 0xBB for MLP.
+ if ((buffer.getInt(i + 4) & 0xFEFFFFFF) == 0xBA6F72F8) {
+ return i - startIndex;
+ }
+ }
+ return C.INDEX_UNSET;
+ }
+
+ /**
+ * Returns the number of audio samples represented by the given TrueHD syncframe, or 0 if the
+ * buffer is not the start of a syncframe.
+ *
+ * @param syncframe The bytes from which to read the syncframe. Must be at least {@link
+ * #TRUEHD_SYNCFRAME_PREFIX_LENGTH} bytes long.
+ * @return The number of audio samples represented by the syncframe, or 0 if the buffer doesn't
+ * contain the start of a syncframe.
+ */
+ public static int parseTrueHdSyncframeAudioSampleCount(byte[] syncframe) {
+ // See "Dolby TrueHD (MLP) high-level bitstream description" on the Dolby developer site,
+ // subsections 2.2 and 4.2.1. The syncword ends 0xBA for TrueHD or 0xBB for MLP.
+ if (syncframe[4] != (byte) 0xF8
+ || syncframe[5] != (byte) 0x72
+ || syncframe[6] != (byte) 0x6F
+ || (syncframe[7] & 0xFE) != 0xBA) {
+ return 0;
+ }
+ boolean isMlp = (syncframe[7] & 0xFF) == 0xBB;
+ return 40 << ((syncframe[isMlp ? 9 : 8] >> 4) & 0x07);
+ }
+
+ /**
+ * Reads the number of audio samples represented by a TrueHD syncframe. The buffer's position is
+ * not modified.
+ *
+ * @param buffer The {@link ByteBuffer} from which to read the syncframe.
+ * @param offset The offset of the start of the syncframe relative to the buffer's position.
+ * @return The number of audio samples represented by the syncframe.
+ */
+ public static int parseTrueHdSyncframeAudioSampleCount(ByteBuffer buffer, int offset) {
+ // TODO: Link to specification if available.
+ boolean isMlp = (buffer.get(buffer.position() + offset + 7) & 0xFF) == 0xBB;
+ return 40 << ((buffer.get(buffer.position() + offset + (isMlp ? 9 : 8)) >> 4) & 0x07);
+ }
+
+ private static int getAc3SyncframeSize(int fscod, int frmsizecod) {
+ int halfFrmsizecod = frmsizecod / 2;
+ if (fscod < 0 || fscod >= SAMPLE_RATE_BY_FSCOD.length || frmsizecod < 0
+ || halfFrmsizecod >= SYNCFRAME_SIZE_WORDS_BY_HALF_FRMSIZECOD_44_1.length) {
+ // Invalid values provided.
+ return C.LENGTH_UNSET;
+ }
+ int sampleRate = SAMPLE_RATE_BY_FSCOD[fscod];
+ if (sampleRate == 44100) {
+ return 2 * (SYNCFRAME_SIZE_WORDS_BY_HALF_FRMSIZECOD_44_1[halfFrmsizecod] + (frmsizecod % 2));
+ }
+ int bitrate = BITRATE_BY_HALF_FRMSIZECOD[halfFrmsizecod];
+ if (sampleRate == 32000) {
+ return 6 * bitrate;
+ } else { // sampleRate == 48000
+ return 4 * bitrate;
+ }
+ }
+
+ private Ac3Util() {}
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac4Util.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac4Util.java
new file mode 100644
index 0000000000..a921346e90
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Ac4Util.java
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmInitData;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MimeTypes;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.ParsableBitArray;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.ParsableByteArray;
+import java.nio.ByteBuffer;
+
+/** Utility methods for parsing AC-4 frames, which are access units in AC-4 bitstreams. */
+public final class Ac4Util {
+
+ /** Holds sample format information as presented by a syncframe header. */
+ public static final class SyncFrameInfo {
+
+ /** The bitstream version. */
+ public final int bitstreamVersion;
+ /** The audio sampling rate in Hz. */
+ public final int sampleRate;
+ /** The number of audio channels */
+ public final int channelCount;
+ /** The size of the frame. */
+ public final int frameSize;
+ /** Number of audio samples in the frame. */
+ public final int sampleCount;
+
+ private SyncFrameInfo(
+ int bitstreamVersion, int channelCount, int sampleRate, int frameSize, int sampleCount) {
+ this.bitstreamVersion = bitstreamVersion;
+ this.channelCount = channelCount;
+ this.sampleRate = sampleRate;
+ this.frameSize = frameSize;
+ this.sampleCount = sampleCount;
+ }
+ }
+
+ public static final int AC40_SYNCWORD = 0xAC40;
+ public static final int AC41_SYNCWORD = 0xAC41;
+
+ /** The channel count of AC-4 stream. */
+ // TODO: Parse AC-4 stream channel count.
+ private static final int CHANNEL_COUNT_2 = 2;
+ /**
+ * The AC-4 sync frame header size for extractor. The seven bytes are 0xAC, 0x40, 0xFF, 0xFF,
+ * sizeByte1, sizeByte2, sizeByte3. See ETSI TS 103 190-1 V1.3.1, Annex G
+ */
+ public static final int SAMPLE_HEADER_SIZE = 7;
+ /**
+ * The header size for AC-4 parser. Only needs to be as big as we need to read, not the full
+ * header size.
+ */
+ public static final int HEADER_SIZE_FOR_PARSER = 16;
+ /**
+ * Number of audio samples in the frame. Defined in IEC61937-14:2017 table 5 and 6. This table
+ * provides the number of samples per frame at the playback sampling frequency of 48 kHz. For 44.1
+ * kHz, only frame_rate_index(13) is valid and corresponding sample count is 2048.
+ */
+ private static final int[] SAMPLE_COUNT =
+ new int[] {
+ /* [ 0] 23.976 fps */ 2002,
+ /* [ 1] 24 fps */ 2000,
+ /* [ 2] 25 fps */ 1920,
+ /* [ 3] 29.97 fps */ 1601, // 1601 | 1602 | 1601 | 1602 | 1602
+ /* [ 4] 30 fps */ 1600,
+ /* [ 5] 47.95 fps */ 1001,
+ /* [ 6] 48 fps */ 1000,
+ /* [ 7] 50 fps */ 960,
+ /* [ 8] 59.94 fps */ 800, // 800 | 801 | 801 | 801 | 801
+ /* [ 9] 60 fps */ 800,
+ /* [10] 100 fps */ 480,
+ /* [11] 119.88 fps */ 400, // 400 | 400 | 401 | 400 | 401
+ /* [12] 120 fps */ 400,
+ /* [13] 23.438 fps */ 2048
+ };
+
+ /**
+ * Returns the AC-4 format given {@code data} containing the AC4SpecificBox according to ETSI TS
+ * 103 190-1 Annex E. The reading position of {@code data} will be modified.
+ *
+ * @param data The AC4SpecificBox to parse.
+ * @param trackId The track identifier to set on the format.
+ * @param language The language to set on the format.
+ * @param drmInitData {@link DrmInitData} to be included in the format.
+ * @return The AC-4 format parsed from data in the header.
+ */
+ public static Format parseAc4AnnexEFormat(
+ ParsableByteArray data, String trackId, String language, @Nullable DrmInitData drmInitData) {
+ data.skipBytes(1); // ac4_dsi_version, bitstream_version[0:5]
+ int sampleRate = ((data.readUnsignedByte() & 0x20) >> 5 == 1) ? 48000 : 44100;
+ return Format.createAudioSampleFormat(
+ trackId,
+ MimeTypes.AUDIO_AC4,
+ /* codecs= */ null,
+ /* bitrate= */ Format.NO_VALUE,
+ /* maxInputSize= */ Format.NO_VALUE,
+ CHANNEL_COUNT_2,
+ sampleRate,
+ /* initializationData= */ null,
+ drmInitData,
+ /* selectionFlags= */ 0,
+ language);
+ }
+
+ /**
+ * Returns AC-4 format information given {@code data} containing a syncframe. The reading position
+ * of {@code data} will be modified.
+ *
+ * @param data The data to parse, positioned at the start of the syncframe.
+ * @return The AC-4 format data parsed from the header.
+ */
+ public static SyncFrameInfo parseAc4SyncframeInfo(ParsableBitArray data) {
+ int headerSize = 0;
+ int syncWord = data.readBits(16);
+ headerSize += 2;
+ int frameSize = data.readBits(16);
+ headerSize += 2;
+ if (frameSize == 0xFFFF) {
+ frameSize = data.readBits(24);
+ headerSize += 3; // Extended frame_size
+ }
+ frameSize += headerSize;
+ if (syncWord == AC41_SYNCWORD) {
+ frameSize += 2; // crc_word
+ }
+ int bitstreamVersion = data.readBits(2);
+ if (bitstreamVersion == 3) {
+ bitstreamVersion += readVariableBits(data, /* bitsPerRead= */ 2);
+ }
+ int sequenceCounter = data.readBits(10);
+ if (data.readBit()) { // b_wait_frames
+ if (data.readBits(3) > 0) { // wait_frames
+ data.skipBits(2); // reserved
+ }
+ }
+ int sampleRate = data.readBit() ? 48000 : 44100;
+ int frameRateIndex = data.readBits(4);
+ int sampleCount = 0;
+ if (sampleRate == 44100 && frameRateIndex == 13) {
+ sampleCount = SAMPLE_COUNT[frameRateIndex];
+ } else if (sampleRate == 48000 && frameRateIndex < SAMPLE_COUNT.length) {
+ sampleCount = SAMPLE_COUNT[frameRateIndex];
+ switch (sequenceCounter % 5) {
+ case 1: // fall through
+ case 3:
+ if (frameRateIndex == 3 || frameRateIndex == 8) {
+ sampleCount++;
+ }
+ break;
+ case 2:
+ if (frameRateIndex == 8 || frameRateIndex == 11) {
+ sampleCount++;
+ }
+ break;
+ case 4:
+ if (frameRateIndex == 3 || frameRateIndex == 8 || frameRateIndex == 11) {
+ sampleCount++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return new SyncFrameInfo(bitstreamVersion, CHANNEL_COUNT_2, sampleRate, frameSize, sampleCount);
+ }
+
+ /**
+ * Returns the size in bytes of the given AC-4 syncframe.
+ *
+ * @param data The syncframe to parse.
+ * @param syncword The syncword value for the syncframe.
+ * @return The syncframe size in bytes, or {@link C#LENGTH_UNSET} if the input is invalid.
+ */
+ public static int parseAc4SyncframeSize(byte[] data, int syncword) {
+ if (data.length < 7) {
+ return C.LENGTH_UNSET;
+ }
+ int headerSize = 2; // syncword
+ int frameSize = ((data[2] & 0xFF) << 8) | (data[3] & 0xFF);
+ headerSize += 2;
+ if (frameSize == 0xFFFF) {
+ frameSize = ((data[4] & 0xFF) << 16) | ((data[5] & 0xFF) << 8) | (data[6] & 0xFF);
+ headerSize += 3;
+ }
+ if (syncword == AC41_SYNCWORD) {
+ headerSize += 2;
+ }
+ frameSize += headerSize;
+ return frameSize;
+ }
+
+ /**
+ * Reads the number of audio samples represented by the given AC-4 syncframe. The buffer's
+ * position is not modified.
+ *
+ * @param buffer The {@link ByteBuffer} from which to read the syncframe.
+ * @return The number of audio samples represented by the syncframe.
+ */
+ public static int parseAc4SyncframeAudioSampleCount(ByteBuffer buffer) {
+ byte[] bufferBytes = new byte[HEADER_SIZE_FOR_PARSER];
+ int position = buffer.position();
+ buffer.get(bufferBytes);
+ buffer.position(position);
+ return parseAc4SyncframeInfo(new ParsableBitArray(bufferBytes)).sampleCount;
+ }
+
+ /** Populates {@code buffer} with an AC-4 sample header for a sample of the specified size. */
+ public static void getAc4SampleHeader(int size, ParsableByteArray buffer) {
+ // See ETSI TS 103 190-1 V1.3.1, Annex G.
+ buffer.reset(SAMPLE_HEADER_SIZE);
+ buffer.data[0] = (byte) 0xAC;
+ buffer.data[1] = 0x40;
+ buffer.data[2] = (byte) 0xFF;
+ buffer.data[3] = (byte) 0xFF;
+ buffer.data[4] = (byte) ((size >> 16) & 0xFF);
+ buffer.data[5] = (byte) ((size >> 8) & 0xFF);
+ buffer.data[6] = (byte) (size & 0xFF);
+ }
+
+ private static int readVariableBits(ParsableBitArray data, int bitsPerRead) {
+ int value = 0;
+ while (true) {
+ value += data.readBits(bitsPerRead);
+ if (!data.readBit()) {
+ break;
+ }
+ value++;
+ value <<= bitsPerRead;
+ }
+ return value;
+ }
+
+ private Ac4Util() {}
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioAttributes.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioAttributes.java
new file mode 100644
index 0000000000..d0f3fcb438
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioAttributes.java
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.annotation.TargetApi;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+
+/**
+ * Attributes for audio playback, which configure the underlying platform
+ * {@link android.media.AudioTrack}.
+ * <p>
+ * To set the audio attributes, create an instance using the {@link Builder} and either pass it to
+ * {@link org.mozilla.thirdparty.com.google.android.exoplayer2SimpleExoPlayer#setAudioAttributes(AudioAttributes)} or
+ * send a message of type {@link C#MSG_SET_AUDIO_ATTRIBUTES} to the audio renderers.
+ * <p>
+ * This class is based on {@link android.media.AudioAttributes}, but can be used on all supported
+ * API versions.
+ */
+public final class AudioAttributes {
+
+ public static final AudioAttributes DEFAULT = new Builder().build();
+
+ /**
+ * Builder for {@link AudioAttributes}.
+ */
+ public static final class Builder {
+
+ private @C.AudioContentType int contentType;
+ private @C.AudioFlags int flags;
+ private @C.AudioUsage int usage;
+ private @C.AudioAllowedCapturePolicy int allowedCapturePolicy;
+
+ /**
+ * Creates a new builder for {@link AudioAttributes}.
+ *
+ * <p>By default the content type is {@link C#CONTENT_TYPE_UNKNOWN}, usage is {@link
+ * C#USAGE_MEDIA}, capture policy is {@link C#ALLOW_CAPTURE_BY_ALL} and no flags are set.
+ */
+ public Builder() {
+ contentType = C.CONTENT_TYPE_UNKNOWN;
+ flags = 0;
+ usage = C.USAGE_MEDIA;
+ allowedCapturePolicy = C.ALLOW_CAPTURE_BY_ALL;
+ }
+
+ /**
+ * @see android.media.AudioAttributes.Builder#setContentType(int)
+ */
+ public Builder setContentType(@C.AudioContentType int contentType) {
+ this.contentType = contentType;
+ return this;
+ }
+
+ /**
+ * @see android.media.AudioAttributes.Builder#setFlags(int)
+ */
+ public Builder setFlags(@C.AudioFlags int flags) {
+ this.flags = flags;
+ return this;
+ }
+
+ /**
+ * @see android.media.AudioAttributes.Builder#setUsage(int)
+ */
+ public Builder setUsage(@C.AudioUsage int usage) {
+ this.usage = usage;
+ return this;
+ }
+
+ /** See {@link android.media.AudioAttributes.Builder#setAllowedCapturePolicy(int)}. */
+ public Builder setAllowedCapturePolicy(@C.AudioAllowedCapturePolicy int allowedCapturePolicy) {
+ this.allowedCapturePolicy = allowedCapturePolicy;
+ return this;
+ }
+
+ /** Creates an {@link AudioAttributes} instance from this builder. */
+ public AudioAttributes build() {
+ return new AudioAttributes(contentType, flags, usage, allowedCapturePolicy);
+ }
+
+ }
+
+ public final @C.AudioContentType int contentType;
+ public final @C.AudioFlags int flags;
+ public final @C.AudioUsage int usage;
+ public final @C.AudioAllowedCapturePolicy int allowedCapturePolicy;
+
+ @Nullable private android.media.AudioAttributes audioAttributesV21;
+
+ private AudioAttributes(
+ @C.AudioContentType int contentType,
+ @C.AudioFlags int flags,
+ @C.AudioUsage int usage,
+ @C.AudioAllowedCapturePolicy int allowedCapturePolicy) {
+ this.contentType = contentType;
+ this.flags = flags;
+ this.usage = usage;
+ this.allowedCapturePolicy = allowedCapturePolicy;
+ }
+
+ /**
+ * Returns a {@link android.media.AudioAttributes} from this instance.
+ *
+ * <p>Field {@link AudioAttributes#allowedCapturePolicy} is ignored for API levels prior to 29.
+ */
+ @TargetApi(21)
+ public android.media.AudioAttributes getAudioAttributesV21() {
+ if (audioAttributesV21 == null) {
+ android.media.AudioAttributes.Builder builder =
+ new android.media.AudioAttributes.Builder()
+ .setContentType(contentType)
+ .setFlags(flags)
+ .setUsage(usage);
+ if (Util.SDK_INT >= 29) {
+ builder.setAllowedCapturePolicy(allowedCapturePolicy);
+ }
+ audioAttributesV21 = builder.build();
+ }
+ return audioAttributesV21;
+ }
+
+ @Override
+ public boolean equals(@Nullable Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ AudioAttributes other = (AudioAttributes) obj;
+ return this.contentType == other.contentType
+ && this.flags == other.flags
+ && this.usage == other.usage
+ && this.allowedCapturePolicy == other.allowedCapturePolicy;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 17;
+ result = 31 * result + contentType;
+ result = 31 * result + flags;
+ result = 31 * result + usage;
+ result = 31 * result + allowedCapturePolicy;
+ return result;
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilities.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilities.java
new file mode 100644
index 0000000000..f985891465
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilities.java
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.annotation.SuppressLint;
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.net.Uri;
+import android.provider.Settings.Global;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.util.Arrays;
+
+/** Represents the set of audio formats that a device is capable of playing. */
+@TargetApi(21)
+public final class AudioCapabilities {
+
+ private static final int DEFAULT_MAX_CHANNEL_COUNT = 8;
+
+ /** The minimum audio capabilities supported by all devices. */
+ public static final AudioCapabilities DEFAULT_AUDIO_CAPABILITIES =
+ new AudioCapabilities(new int[] {AudioFormat.ENCODING_PCM_16BIT}, DEFAULT_MAX_CHANNEL_COUNT);
+
+ /** Audio capabilities when the device specifies external surround sound. */
+ private static final AudioCapabilities EXTERNAL_SURROUND_SOUND_CAPABILITIES =
+ new AudioCapabilities(
+ new int[] {
+ AudioFormat.ENCODING_PCM_16BIT, AudioFormat.ENCODING_AC3, AudioFormat.ENCODING_E_AC3
+ },
+ DEFAULT_MAX_CHANNEL_COUNT);
+
+ /** Global settings key for devices that can specify external surround sound. */
+ private static final String EXTERNAL_SURROUND_SOUND_KEY = "external_surround_sound_enabled";
+
+ /**
+ * Returns the current audio capabilities for the device.
+ *
+ * @param context A context for obtaining the current audio capabilities.
+ * @return The current audio capabilities for the device.
+ */
+ @SuppressWarnings("InlinedApi")
+ public static AudioCapabilities getCapabilities(Context context) {
+ Intent intent =
+ context.registerReceiver(
+ /* receiver= */ null, new IntentFilter(AudioManager.ACTION_HDMI_AUDIO_PLUG));
+ return getCapabilities(context, intent);
+ }
+
+ @SuppressLint("InlinedApi")
+ /* package */ static AudioCapabilities getCapabilities(Context context, @Nullable Intent intent) {
+ if (deviceMaySetExternalSurroundSoundGlobalSetting()
+ && Global.getInt(context.getContentResolver(), EXTERNAL_SURROUND_SOUND_KEY, 0) == 1) {
+ return EXTERNAL_SURROUND_SOUND_CAPABILITIES;
+ }
+ if (intent == null || intent.getIntExtra(AudioManager.EXTRA_AUDIO_PLUG_STATE, 0) == 0) {
+ return DEFAULT_AUDIO_CAPABILITIES;
+ }
+ return new AudioCapabilities(
+ intent.getIntArrayExtra(AudioManager.EXTRA_ENCODINGS),
+ intent.getIntExtra(
+ AudioManager.EXTRA_MAX_CHANNEL_COUNT, /* defaultValue= */ DEFAULT_MAX_CHANNEL_COUNT));
+ }
+
+ /**
+ * Returns the global settings {@link Uri} used by the device to specify external surround sound,
+ * or null if the device does not support this functionality.
+ */
+ @Nullable
+ /* package */ static Uri getExternalSurroundSoundGlobalSettingUri() {
+ return deviceMaySetExternalSurroundSoundGlobalSetting()
+ ? Global.getUriFor(EXTERNAL_SURROUND_SOUND_KEY)
+ : null;
+ }
+
+ private final int[] supportedEncodings;
+ private final int maxChannelCount;
+
+ /**
+ * Constructs new audio capabilities based on a set of supported encodings and a maximum channel
+ * count.
+ *
+ * <p>Applications should generally call {@link #getCapabilities(Context)} to obtain an instance
+ * based on the capabilities advertised by the platform, rather than calling this constructor.
+ *
+ * @param supportedEncodings Supported audio encodings from {@link android.media.AudioFormat}'s
+ * {@code ENCODING_*} constants. Passing {@code null} indicates that no encodings are
+ * supported.
+ * @param maxChannelCount The maximum number of audio channels that can be played simultaneously.
+ */
+ public AudioCapabilities(@Nullable int[] supportedEncodings, int maxChannelCount) {
+ if (supportedEncodings != null) {
+ this.supportedEncodings = Arrays.copyOf(supportedEncodings, supportedEncodings.length);
+ Arrays.sort(this.supportedEncodings);
+ } else {
+ this.supportedEncodings = new int[0];
+ }
+ this.maxChannelCount = maxChannelCount;
+ }
+
+ /**
+ * Returns whether this device supports playback of the specified audio {@code encoding}.
+ *
+ * @param encoding One of {@link android.media.AudioFormat}'s {@code ENCODING_*} constants.
+ * @return Whether this device supports playback the specified audio {@code encoding}.
+ */
+ public boolean supportsEncoding(int encoding) {
+ return Arrays.binarySearch(supportedEncodings, encoding) >= 0;
+ }
+
+ /**
+ * Returns the maximum number of channels the device can play at the same time.
+ */
+ public int getMaxChannelCount() {
+ return maxChannelCount;
+ }
+
+ @Override
+ public boolean equals(@Nullable Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (!(other instanceof AudioCapabilities)) {
+ return false;
+ }
+ AudioCapabilities audioCapabilities = (AudioCapabilities) other;
+ return Arrays.equals(supportedEncodings, audioCapabilities.supportedEncodings)
+ && maxChannelCount == audioCapabilities.maxChannelCount;
+ }
+
+ @Override
+ public int hashCode() {
+ return maxChannelCount + 31 * Arrays.hashCode(supportedEncodings);
+ }
+
+ @Override
+ public String toString() {
+ return "AudioCapabilities[maxChannelCount=" + maxChannelCount
+ + ", supportedEncodings=" + Arrays.toString(supportedEncodings) + "]";
+ }
+
+ private static boolean deviceMaySetExternalSurroundSoundGlobalSetting() {
+ return Util.SDK_INT >= 17 && "Amazon".equals(Util.MANUFACTURER);
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilitiesReceiver.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilitiesReceiver.java
new file mode 100644
index 0000000000..d96fd32f53
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioCapabilitiesReceiver.java
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.content.BroadcastReceiver;
+import android.content.ContentResolver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.database.ContentObserver;
+import android.media.AudioManager;
+import android.net.Uri;
+import android.os.Handler;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+
+/**
+ * Receives broadcast events indicating changes to the device's audio capabilities, notifying a
+ * {@link Listener} when audio capability changes occur.
+ */
+public final class AudioCapabilitiesReceiver {
+
+ /**
+ * Listener notified when audio capabilities change.
+ */
+ public interface Listener {
+
+ /**
+ * Called when the audio capabilities change.
+ *
+ * @param audioCapabilities The current audio capabilities for the device.
+ */
+ void onAudioCapabilitiesChanged(AudioCapabilities audioCapabilities);
+
+ }
+
+ private final Context context;
+ private final Listener listener;
+ private final Handler handler;
+ @Nullable private final BroadcastReceiver receiver;
+ @Nullable private final ExternalSurroundSoundSettingObserver externalSurroundSoundSettingObserver;
+
+ /* package */ @Nullable AudioCapabilities audioCapabilities;
+ private boolean registered;
+
+ /**
+ * @param context A context for registering the receiver.
+ * @param listener The listener to notify when audio capabilities change.
+ */
+ public AudioCapabilitiesReceiver(Context context, Listener listener) {
+ context = context.getApplicationContext();
+ this.context = context;
+ this.listener = Assertions.checkNotNull(listener);
+ handler = new Handler(Util.getLooper());
+ receiver = Util.SDK_INT >= 21 ? new HdmiAudioPlugBroadcastReceiver() : null;
+ Uri externalSurroundSoundUri = AudioCapabilities.getExternalSurroundSoundGlobalSettingUri();
+ externalSurroundSoundSettingObserver =
+ externalSurroundSoundUri != null
+ ? new ExternalSurroundSoundSettingObserver(
+ handler, context.getContentResolver(), externalSurroundSoundUri)
+ : null;
+ }
+
+ /**
+ * Registers the receiver, meaning it will notify the listener when audio capability changes
+ * occur. The current audio capabilities will be returned. It is important to call
+ * {@link #unregister} when the receiver is no longer required.
+ *
+ * @return The current audio capabilities for the device.
+ */
+ @SuppressWarnings("InlinedApi")
+ public AudioCapabilities register() {
+ if (registered) {
+ return Assertions.checkNotNull(audioCapabilities);
+ }
+ registered = true;
+ if (externalSurroundSoundSettingObserver != null) {
+ externalSurroundSoundSettingObserver.register();
+ }
+ Intent stickyIntent = null;
+ if (receiver != null) {
+ IntentFilter intentFilter = new IntentFilter(AudioManager.ACTION_HDMI_AUDIO_PLUG);
+ stickyIntent =
+ context.registerReceiver(
+ receiver, intentFilter, /* broadcastPermission= */ null, handler);
+ }
+ audioCapabilities = AudioCapabilities.getCapabilities(context, stickyIntent);
+ return audioCapabilities;
+ }
+
+ /**
+ * Unregisters the receiver, meaning it will no longer notify the listener when audio capability
+ * changes occur.
+ */
+ public void unregister() {
+ if (!registered) {
+ return;
+ }
+ audioCapabilities = null;
+ if (receiver != null) {
+ context.unregisterReceiver(receiver);
+ }
+ if (externalSurroundSoundSettingObserver != null) {
+ externalSurroundSoundSettingObserver.unregister();
+ }
+ registered = false;
+ }
+
+ private void onNewAudioCapabilities(AudioCapabilities newAudioCapabilities) {
+ if (registered && !newAudioCapabilities.equals(audioCapabilities)) {
+ audioCapabilities = newAudioCapabilities;
+ listener.onAudioCapabilitiesChanged(newAudioCapabilities);
+ }
+ }
+
+ private final class HdmiAudioPlugBroadcastReceiver extends BroadcastReceiver {
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ if (!isInitialStickyBroadcast()) {
+ onNewAudioCapabilities(AudioCapabilities.getCapabilities(context, intent));
+ }
+ }
+ }
+
+ private final class ExternalSurroundSoundSettingObserver extends ContentObserver {
+
+ private final ContentResolver resolver;
+ private final Uri settingUri;
+
+ public ExternalSurroundSoundSettingObserver(
+ Handler handler, ContentResolver resolver, Uri settingUri) {
+ super(handler);
+ this.resolver = resolver;
+ this.settingUri = settingUri;
+ }
+
+ public void register() {
+ resolver.registerContentObserver(settingUri, /* notifyForDescendants= */ false, this);
+ }
+
+ public void unregister() {
+ resolver.unregisterContentObserver(this);
+ }
+
+ @Override
+ public void onChange(boolean selfChange) {
+ onNewAudioCapabilities(AudioCapabilities.getCapabilities(context));
+ }
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioDecoderException.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioDecoderException.java
new file mode 100644
index 0000000000..0f4ac159b9
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioDecoderException.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+/** Thrown when an audio decoder error occurs. */
+public class AudioDecoderException extends Exception {
+
+ /** @param message The detail message for this exception. */
+ public AudioDecoderException(String message) {
+ super(message);
+ }
+
+ /**
+ * @param message The detail message for this exception.
+ * @param cause the cause (which is saved for later retrieval by the {@link #getCause()} method).
+ * A <tt>null</tt> value is permitted, and indicates that the cause is nonexistent or unknown.
+ */
+ public AudioDecoderException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioListener.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioListener.java
new file mode 100644
index 0000000000..457f52b887
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioListener.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+/** A listener for changes in audio configuration. */
+public interface AudioListener {
+
+ /**
+ * Called when the audio session is set.
+ *
+ * @param audioSessionId The audio session id.
+ */
+ default void onAudioSessionId(int audioSessionId) {}
+
+ /**
+ * Called when the audio attributes change.
+ *
+ * @param audioAttributes The audio attributes.
+ */
+ default void onAudioAttributesChanged(AudioAttributes audioAttributes) {}
+
+ /**
+ * Called when the volume changes.
+ *
+ * @param volume The new volume, with 0 being silence and 1 being unity gain.
+ */
+ default void onVolumeChanged(float volume) {}
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioProcessor.java
new file mode 100644
index 0000000000..e0814314ca
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioProcessor.java
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Interface for audio processors, which take audio data as input and transform it, potentially
+ * modifying its channel count, encoding and/or sample rate.
+ *
+ * <p>In addition to being able to modify the format of audio, implementations may allow parameters
+ * to be set that affect the output audio and whether the processor is active/inactive.
+ */
+public interface AudioProcessor {
+
+ /** PCM audio format that may be handled by an audio processor. */
+ final class AudioFormat {
+ public static final AudioFormat NOT_SET =
+ new AudioFormat(
+ /* sampleRate= */ Format.NO_VALUE,
+ /* channelCount= */ Format.NO_VALUE,
+ /* encoding= */ Format.NO_VALUE);
+
+ /** The sample rate in Hertz. */
+ public final int sampleRate;
+ /** The number of interleaved channels. */
+ public final int channelCount;
+ /** The type of linear PCM encoding. */
+ @C.PcmEncoding public final int encoding;
+ /** The number of bytes used to represent one audio frame. */
+ public final int bytesPerFrame;
+
+ public AudioFormat(int sampleRate, int channelCount, @C.PcmEncoding int encoding) {
+ this.sampleRate = sampleRate;
+ this.channelCount = channelCount;
+ this.encoding = encoding;
+ bytesPerFrame =
+ Util.isEncodingLinearPcm(encoding)
+ ? Util.getPcmFrameSize(encoding, channelCount)
+ : Format.NO_VALUE;
+ }
+
+ @Override
+ public String toString() {
+ return "AudioFormat["
+ + "sampleRate="
+ + sampleRate
+ + ", channelCount="
+ + channelCount
+ + ", encoding="
+ + encoding
+ + ']';
+ }
+ }
+
+ /** Exception thrown when a processor can't be configured for a given input audio format. */
+ final class UnhandledAudioFormatException extends Exception {
+
+ public UnhandledAudioFormatException(AudioFormat inputAudioFormat) {
+ super("Unhandled format: " + inputAudioFormat);
+ }
+
+ }
+
+ /** An empty, direct {@link ByteBuffer}. */
+ ByteBuffer EMPTY_BUFFER = ByteBuffer.allocateDirect(0).order(ByteOrder.nativeOrder());
+
+ /**
+ * Configures the processor to process input audio with the specified format. After calling this
+ * method, call {@link #isActive()} to determine whether the audio processor is active. Returns
+ * the configured output audio format if this instance is active.
+ *
+ * <p>After calling this method, it is necessary to {@link #flush()} the processor to apply the
+ * new configuration. Before applying the new configuration, it is safe to queue input and get
+ * output in the old input/output formats. Call {@link #queueEndOfStream()} when no more input
+ * will be supplied in the old input format.
+ *
+ * @param inputAudioFormat The format of audio that will be queued after the next call to {@link
+ * #flush()}.
+ * @return The configured output audio format if this instance is {@link #isActive() active}.
+ * @throws UnhandledAudioFormatException Thrown if the specified format can't be handled as input.
+ */
+ AudioFormat configure(AudioFormat inputAudioFormat) throws UnhandledAudioFormatException;
+
+ /** Returns whether the processor is configured and will process input buffers. */
+ boolean isActive();
+
+ /**
+ * Queues audio data between the position and limit of the input {@code buffer} for processing.
+ * {@code buffer} must be a direct byte buffer with native byte order. Its contents are treated as
+ * read-only. Its position will be advanced by the number of bytes consumed (which may be zero).
+ * The caller retains ownership of the provided buffer. Calling this method invalidates any
+ * previous buffer returned by {@link #getOutput()}.
+ *
+ * @param buffer The input buffer to process.
+ */
+ void queueInput(ByteBuffer buffer);
+
+ /**
+ * Queues an end of stream signal. After this method has been called,
+ * {@link #queueInput(ByteBuffer)} may not be called until after the next call to
+ * {@link #flush()}. Calling {@link #getOutput()} will return any remaining output data. Multiple
+ * calls may be required to read all of the remaining output data. {@link #isEnded()} will return
+ * {@code true} once all remaining output data has been read.
+ */
+ void queueEndOfStream();
+
+ /**
+ * Returns a buffer containing processed output data between its position and limit. The buffer
+ * will always be a direct byte buffer with native byte order. Calling this method invalidates any
+ * previously returned buffer. The buffer will be empty if no output is available.
+ *
+ * @return A buffer containing processed output data between its position and limit.
+ */
+ ByteBuffer getOutput();
+
+ /**
+ * Returns whether this processor will return no more output from {@link #getOutput()} until it
+ * has been {@link #flush()}ed and more input has been queued.
+ */
+ boolean isEnded();
+
+ /**
+ * Clears any buffered data and pending output. If the audio processor is active, also prepares
+ * the audio processor to receive a new stream of input in the last configured (pending) format.
+ */
+ void flush();
+
+ /** Resets the processor to its unconfigured state, releasing any resources. */
+ void reset();
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioRendererEventListener.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioRendererEventListener.java
new file mode 100644
index 0000000000..bb1ae72855
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioRendererEventListener.java
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import static org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util.castNonNull;
+
+import android.os.Handler;
+import android.os.SystemClock;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Renderer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.decoder.DecoderCounters;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+
+/**
+ * Listener of audio {@link Renderer} events. All methods have no-op default implementations to
+ * allow selective overrides.
+ */
+public interface AudioRendererEventListener {
+
+ /**
+ * Called when the renderer is enabled.
+ *
+ * @param counters {@link DecoderCounters} that will be updated by the renderer for as long as it
+ * remains enabled.
+ */
+ default void onAudioEnabled(DecoderCounters counters) {}
+
+ /**
+ * Called when the audio session is set.
+ *
+ * @param audioSessionId The audio session id.
+ */
+ default void onAudioSessionId(int audioSessionId) {}
+
+ /**
+ * Called when a decoder is created.
+ *
+ * @param decoderName The decoder that was created.
+ * @param initializedTimestampMs {@link SystemClock#elapsedRealtime()} when initialization
+ * finished.
+ * @param initializationDurationMs The time taken to initialize the decoder in milliseconds.
+ */
+ default void onAudioDecoderInitialized(
+ String decoderName, long initializedTimestampMs, long initializationDurationMs) {}
+
+ /**
+ * Called when the format of the media being consumed by the renderer changes.
+ *
+ * @param format The new format.
+ */
+ default void onAudioInputFormatChanged(Format format) {}
+
+ /**
+ * Called when an {@link AudioSink} underrun occurs.
+ *
+ * @param bufferSize The size of the {@link AudioSink}'s buffer, in bytes.
+ * @param bufferSizeMs The size of the {@link AudioSink}'s buffer, in milliseconds, if it is
+ * configured for PCM output. {@link C#TIME_UNSET} if it is configured for passthrough output,
+ * as the buffered media can have a variable bitrate so the duration may be unknown.
+ * @param elapsedSinceLastFeedMs The time since the {@link AudioSink} was last fed data.
+ */
+ default void onAudioSinkUnderrun(
+ int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {}
+
+ /**
+ * Called when the renderer is disabled.
+ *
+ * @param counters {@link DecoderCounters} that were updated by the renderer.
+ */
+ default void onAudioDisabled(DecoderCounters counters) {}
+
+ /**
+ * Dispatches events to a {@link AudioRendererEventListener}.
+ */
+ final class EventDispatcher {
+
+ @Nullable private final Handler handler;
+ @Nullable private final AudioRendererEventListener listener;
+
+ /**
+ * @param handler A handler for dispatching events, or null if creating a dummy instance.
+ * @param listener The listener to which events should be dispatched, or null if creating a
+ * dummy instance.
+ */
+ public EventDispatcher(@Nullable Handler handler,
+ @Nullable AudioRendererEventListener listener) {
+ this.handler = listener != null ? Assertions.checkNotNull(handler) : null;
+ this.listener = listener;
+ }
+
+ /**
+ * Invokes {@link AudioRendererEventListener#onAudioEnabled(DecoderCounters)}.
+ */
+ public void enabled(final DecoderCounters decoderCounters) {
+ if (handler != null) {
+ handler.post(() -> castNonNull(listener).onAudioEnabled(decoderCounters));
+ }
+ }
+
+ /**
+ * Invokes {@link AudioRendererEventListener#onAudioDecoderInitialized(String, long, long)}.
+ */
+ public void decoderInitialized(final String decoderName,
+ final long initializedTimestampMs, final long initializationDurationMs) {
+ if (handler != null) {
+ handler.post(
+ () ->
+ castNonNull(listener)
+ .onAudioDecoderInitialized(
+ decoderName, initializedTimestampMs, initializationDurationMs));
+ }
+ }
+
+ /**
+ * Invokes {@link AudioRendererEventListener#onAudioInputFormatChanged(Format)}.
+ */
+ public void inputFormatChanged(final Format format) {
+ if (handler != null) {
+ handler.post(() -> castNonNull(listener).onAudioInputFormatChanged(format));
+ }
+ }
+
+ /**
+ * Invokes {@link AudioRendererEventListener#onAudioSinkUnderrun(int, long, long)}.
+ */
+ public void audioTrackUnderrun(final int bufferSize, final long bufferSizeMs,
+ final long elapsedSinceLastFeedMs) {
+ if (handler != null) {
+ handler.post(
+ () ->
+ castNonNull(listener)
+ .onAudioSinkUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs));
+ }
+ }
+
+ /**
+ * Invokes {@link AudioRendererEventListener#onAudioDisabled(DecoderCounters)}.
+ */
+ public void disabled(final DecoderCounters counters) {
+ counters.ensureUpdated();
+ if (handler != null) {
+ handler.post(
+ () -> {
+ counters.ensureUpdated();
+ castNonNull(listener).onAudioDisabled(counters);
+ });
+ }
+ }
+
+ /**
+ * Invokes {@link AudioRendererEventListener#onAudioSessionId(int)}.
+ */
+ public void audioSessionId(final int audioSessionId) {
+ if (handler != null) {
+ handler.post(() -> castNonNull(listener).onAudioSessionId(audioSessionId));
+ }
+ }
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioSink.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioSink.java
new file mode 100644
index 0000000000..db87e28e7f
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioSink.java
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.media.AudioTrack;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlaybackParameters;
+import java.nio.ByteBuffer;
+
+/**
+ * A sink that consumes audio data.
+ *
+ * <p>Before starting playback, specify the input audio format by calling {@link #configure(int,
+ * int, int, int, int[], int, int)}.
+ *
+ * <p>Call {@link #handleBuffer(ByteBuffer, long)} to write data, and {@link #handleDiscontinuity()}
+ * when the data being fed is discontinuous. Call {@link #play()} to start playing the written data.
+ *
+ * <p>Call {@link #configure(int, int, int, int, int[], int, int)} whenever the input format
+ * changes. The sink will be reinitialized on the next call to {@link #handleBuffer(ByteBuffer,
+ * long)}.
+ *
+ * <p>Call {@link #flush()} to prepare the sink to receive audio data from a new playback position.
+ *
+ * <p>Call {@link #playToEndOfStream()} repeatedly to play out all data when no more input buffers
+ * will be provided via {@link #handleBuffer(ByteBuffer, long)} until the next {@link #flush()}.
+ * Call {@link #reset()} when the instance is no longer required.
+ *
+ * <p>The implementation may be backed by a platform {@link AudioTrack}. In this case, {@link
+ * #setAudioSessionId(int)}, {@link #setAudioAttributes(AudioAttributes)}, {@link
+ * #enableTunnelingV21(int)} and/or {@link #disableTunneling()} may be called before writing data to
+ * the sink. These methods may also be called after writing data to the sink, in which case it will
+ * be reinitialized as required. For implementations that are not based on platform {@link
+ * AudioTrack}s, calling methods relating to audio sessions, audio attributes, and tunneling may
+ * have no effect.
+ */
+public interface AudioSink {
+
+ /**
+ * Listener for audio sink events.
+ */
+ interface Listener {
+
+ /**
+ * Called if the audio sink has started rendering audio to a new platform audio session.
+ *
+ * @param audioSessionId The newly generated audio session's identifier.
+ */
+ void onAudioSessionId(int audioSessionId);
+
+ /**
+ * Called when the audio sink handles a buffer whose timestamp is discontinuous with the last
+ * buffer handled since it was reset.
+ */
+ void onPositionDiscontinuity();
+
+ /**
+ * Called when the audio sink runs out of data.
+ * <p>
+ * An audio sink implementation may never call this method (for example, if audio data is
+ * consumed in batches rather than based on the sink's own clock).
+ *
+ * @param bufferSize The size of the sink's buffer, in bytes.
+ * @param bufferSizeMs The size of the sink's buffer, in milliseconds, if it is configured for
+ * PCM output. {@link C#TIME_UNSET} if it is configured for encoded audio output, as the
+ * buffered media can have a variable bitrate so the duration may be unknown.
+ * @param elapsedSinceLastFeedMs The time since the sink was last fed data, in milliseconds.
+ */
+ void onUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs);
+
+ }
+
+ /**
+ * Thrown when a failure occurs configuring the sink.
+ */
+ final class ConfigurationException extends Exception {
+
+ /**
+ * Creates a new configuration exception with the specified {@code cause} and no message.
+ */
+ public ConfigurationException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * Creates a new configuration exception with the specified {@code message} and no cause.
+ */
+ public ConfigurationException(String message) {
+ super(message);
+ }
+
+ }
+
+ /**
+ * Thrown when a failure occurs initializing the sink.
+ */
+ final class InitializationException extends Exception {
+
+ /**
+ * The underlying {@link AudioTrack}'s state, if applicable.
+ */
+ public final int audioTrackState;
+
+ /**
+ * @param audioTrackState The underlying {@link AudioTrack}'s state, if applicable.
+ * @param sampleRate The requested sample rate in Hz.
+ * @param channelConfig The requested channel configuration.
+ * @param bufferSize The requested buffer size in bytes.
+ */
+ public InitializationException(int audioTrackState, int sampleRate, int channelConfig,
+ int bufferSize) {
+ super("AudioTrack init failed: " + audioTrackState + ", Config(" + sampleRate + ", "
+ + channelConfig + ", " + bufferSize + ")");
+ this.audioTrackState = audioTrackState;
+ }
+
+ }
+
+ /**
+ * Thrown when a failure occurs writing to the sink.
+ */
+ final class WriteException extends Exception {
+
+ /**
+ * The error value returned from the sink implementation. If the sink writes to a platform
+ * {@link AudioTrack}, this will be the error value returned from
+ * {@link AudioTrack#write(byte[], int, int)} or {@link AudioTrack#write(ByteBuffer, int, int)}.
+ * Otherwise, the meaning of the error code depends on the sink implementation.
+ */
+ public final int errorCode;
+
+ /**
+ * @param errorCode The error value returned from the sink implementation.
+ */
+ public WriteException(int errorCode) {
+ super("AudioTrack write failed: " + errorCode);
+ this.errorCode = errorCode;
+ }
+
+ }
+
+ /**
+ * Returned by {@link #getCurrentPositionUs(boolean)} when the position is not set.
+ */
+ long CURRENT_POSITION_NOT_SET = Long.MIN_VALUE;
+
+ /**
+ * Sets the listener for sink events, which should be the audio renderer.
+ *
+ * @param listener The listener for sink events, which should be the audio renderer.
+ */
+ void setListener(Listener listener);
+
+ /**
+ * Returns whether the sink supports the audio format.
+ *
+ * @param channelCount The number of channels, or {@link Format#NO_VALUE} if not known.
+ * @param encoding The audio encoding, or {@link Format#NO_VALUE} if not known.
+ * @return Whether the sink supports the audio format.
+ */
+ boolean supportsOutput(int channelCount, @C.Encoding int encoding);
+
+ /**
+ * Returns the playback position in the stream starting at zero, in microseconds, or
+ * {@link #CURRENT_POSITION_NOT_SET} if it is not yet available.
+ *
+ * @param sourceEnded Specify {@code true} if no more input buffers will be provided.
+ * @return The playback position relative to the start of playback, in microseconds.
+ */
+ long getCurrentPositionUs(boolean sourceEnded);
+
+ /**
+ * Configures (or reconfigures) the sink.
+ *
+ * @param inputEncoding The encoding of audio data provided in the input buffers.
+ * @param inputChannelCount The number of channels.
+ * @param inputSampleRate The sample rate in Hz.
+ * @param specifiedBufferSize A specific size for the playback buffer in bytes, or 0 to infer a
+ * suitable buffer size.
+ * @param outputChannels A mapping from input to output channels that is applied to this sink's
+ * input as a preprocessing step, if handling PCM input. Specify {@code null} to leave the
+ * input unchanged. Otherwise, the element at index {@code i} specifies index of the input
+ * channel to map to output channel {@code i} when preprocessing input buffers. After the map
+ * is applied the audio data will have {@code outputChannels.length} channels.
+ * @param trimStartFrames The number of audio frames to trim from the start of data written to the
+ * sink after this call.
+ * @param trimEndFrames The number of audio frames to trim from data written to the sink
+ * immediately preceding the next call to {@link #flush()} or this method.
+ * @throws ConfigurationException If an error occurs configuring the sink.
+ */
+ void configure(
+ @C.Encoding int inputEncoding,
+ int inputChannelCount,
+ int inputSampleRate,
+ int specifiedBufferSize,
+ @Nullable int[] outputChannels,
+ int trimStartFrames,
+ int trimEndFrames)
+ throws ConfigurationException;
+
+ /**
+ * Starts or resumes consuming audio if initialized.
+ */
+ void play();
+
+ /** Signals to the sink that the next buffer may be discontinuous with the previous buffer. */
+ void handleDiscontinuity();
+
+ /**
+ * Attempts to process data from a {@link ByteBuffer}, starting from its current position and
+ * ending at its limit (exclusive). The position of the {@link ByteBuffer} is advanced by the
+ * number of bytes that were handled. {@link Listener#onPositionDiscontinuity()} will be called if
+ * {@code presentationTimeUs} is discontinuous with the last buffer handled since the last reset.
+ *
+ * <p>Returns whether the data was handled in full. If the data was not handled in full then the
+ * same {@link ByteBuffer} must be provided to subsequent calls until it has been fully consumed,
+ * except in the case of an intervening call to {@link #flush()} (or to {@link #configure(int,
+ * int, int, int, int[], int, int)} that causes the sink to be flushed).
+ *
+ * @param buffer The buffer containing audio data.
+ * @param presentationTimeUs The presentation timestamp of the buffer in microseconds.
+ * @return Whether the buffer was handled fully.
+ * @throws InitializationException If an error occurs initializing the sink.
+ * @throws WriteException If an error occurs writing the audio data.
+ */
+ boolean handleBuffer(ByteBuffer buffer, long presentationTimeUs)
+ throws InitializationException, WriteException;
+
+ /**
+ * Processes any remaining data. {@link #isEnded()} will return {@code true} when no data remains.
+ *
+ * @throws WriteException If an error occurs draining data to the sink.
+ */
+ void playToEndOfStream() throws WriteException;
+
+ /**
+ * Returns whether {@link #playToEndOfStream} has been called and all buffers have been processed.
+ */
+ boolean isEnded();
+
+ /**
+ * Returns whether the sink has data pending that has not been consumed yet.
+ */
+ boolean hasPendingData();
+
+ /**
+ * Attempts to set the playback parameters. The audio sink may override these parameters if they
+ * are not supported.
+ *
+ * @param playbackParameters The new playback parameters to attempt to set.
+ */
+ void setPlaybackParameters(PlaybackParameters playbackParameters);
+
+ /**
+ * Gets the active {@link PlaybackParameters}.
+ */
+ PlaybackParameters getPlaybackParameters();
+
+ /**
+ * Sets attributes for audio playback. If the attributes have changed and if the sink is not
+ * configured for use with tunneling, then it is reset and the audio session id is cleared.
+ * <p>
+ * If the sink is configured for use with tunneling then the audio attributes are ignored. The
+ * sink is not reset and the audio session id is not cleared. The passed attributes will be used
+ * if the sink is later re-configured into non-tunneled mode.
+ *
+ * @param audioAttributes The attributes for audio playback.
+ */
+ void setAudioAttributes(AudioAttributes audioAttributes);
+
+ /** Sets the audio session id. */
+ void setAudioSessionId(int audioSessionId);
+
+ /** Sets the auxiliary effect. */
+ void setAuxEffectInfo(AuxEffectInfo auxEffectInfo);
+
+ /**
+ * Enables tunneling, if possible. The sink is reset if tunneling was previously disabled or if
+ * the audio session id has changed. Enabling tunneling is only possible if the sink is based on a
+ * platform {@link AudioTrack}, and requires platform API version 21 onwards.
+ *
+ * @param tunnelingAudioSessionId The audio session id to use.
+ * @throws IllegalStateException Thrown if enabling tunneling on platform API version &lt; 21.
+ */
+ void enableTunnelingV21(int tunnelingAudioSessionId);
+
+ /**
+ * Disables tunneling. If tunneling was previously enabled then the sink is reset and any audio
+ * session id is cleared.
+ */
+ void disableTunneling();
+
+ /**
+ * Sets the playback volume.
+ *
+ * @param volume A volume in the range [0.0, 1.0].
+ */
+ void setVolume(float volume);
+
+ /**
+ * Pauses playback.
+ */
+ void pause();
+
+ /**
+ * Flushes the sink, after which it is ready to receive buffers from a new playback position.
+ *
+ * <p>The audio session may remain active until {@link #reset()} is called.
+ */
+ void flush();
+
+ /** Resets the renderer, releasing any resources that it currently holds. */
+ void reset();
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTimestampPoller.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTimestampPoller.java
new file mode 100644
index 0000000000..153947fec0
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTimestampPoller.java
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.annotation.TargetApi;
+import android.media.AudioTimestamp;
+import android.media.AudioTrack;
+import androidx.annotation.IntDef;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+/**
+ * Polls the {@link AudioTrack} timestamp, if the platform supports it, taking care of polling at
+ * the appropriate rate to detect when the timestamp starts to advance.
+ *
+ * <p>When the audio track isn't paused, call {@link #maybePollTimestamp(long)} regularly to check
+ * for timestamp updates. If it returns {@code true}, call {@link #getTimestampPositionFrames()} and
+ * {@link #getTimestampSystemTimeUs()} to access the updated timestamp, then call {@link
+ * #acceptTimestamp()} or {@link #rejectTimestamp()} to accept or reject it.
+ *
+ * <p>If {@link #hasTimestamp()} returns {@code true}, call {@link #getTimestampSystemTimeUs()} to
+ * get the system time at which the latest timestamp was sampled and {@link
+ * #getTimestampPositionFrames()} to get its position in frames. If {@link #isTimestampAdvancing()}
+ * returns {@code true}, the caller should assume that the timestamp has been increasing in real
+ * time since it was sampled. Otherwise, it may be stationary.
+ *
+ * <p>Call {@link #reset()} when pausing or resuming the track.
+ */
+/* package */ final class AudioTimestampPoller {
+
+ /** Timestamp polling states. */
+ @Documented
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({
+ STATE_INITIALIZING,
+ STATE_TIMESTAMP,
+ STATE_TIMESTAMP_ADVANCING,
+ STATE_NO_TIMESTAMP,
+ STATE_ERROR
+ })
+ private @interface State {}
+ /** State when first initializing. */
+ private static final int STATE_INITIALIZING = 0;
+ /** State when we have a timestamp and we don't know if it's advancing. */
+ private static final int STATE_TIMESTAMP = 1;
+ /** State when we have a timestamp and we know it is advancing. */
+ private static final int STATE_TIMESTAMP_ADVANCING = 2;
+ /** State when the no timestamp is available. */
+ private static final int STATE_NO_TIMESTAMP = 3;
+ /** State when the last timestamp was rejected as invalid. */
+ private static final int STATE_ERROR = 4;
+
+ /** The polling interval for {@link #STATE_INITIALIZING} and {@link #STATE_TIMESTAMP}. */
+ private static final int FAST_POLL_INTERVAL_US = 5_000;
+ /**
+ * The polling interval for {@link #STATE_TIMESTAMP_ADVANCING} and {@link #STATE_NO_TIMESTAMP}.
+ */
+ private static final int SLOW_POLL_INTERVAL_US = 10_000_000;
+ /** The polling interval for {@link #STATE_ERROR}. */
+ private static final int ERROR_POLL_INTERVAL_US = 500_000;
+
+ /**
+ * The minimum duration to remain in {@link #STATE_INITIALIZING} if no timestamps are being
+ * returned before transitioning to {@link #STATE_NO_TIMESTAMP}.
+ */
+ private static final int INITIALIZING_DURATION_US = 500_000;
+
+ @Nullable private final AudioTimestampV19 audioTimestamp;
+
+ private @State int state;
+ private long initializeSystemTimeUs;
+ private long sampleIntervalUs;
+ private long lastTimestampSampleTimeUs;
+ private long initialTimestampPositionFrames;
+
+ /**
+ * Creates a new audio timestamp poller.
+ *
+ * @param audioTrack The audio track that will provide timestamps, if the platform supports it.
+ */
+ public AudioTimestampPoller(AudioTrack audioTrack) {
+ if (Util.SDK_INT >= 19) {
+ audioTimestamp = new AudioTimestampV19(audioTrack);
+ reset();
+ } else {
+ audioTimestamp = null;
+ updateState(STATE_NO_TIMESTAMP);
+ }
+ }
+
+ /**
+ * Polls the timestamp if required and returns whether it was updated. If {@code true}, the latest
+ * timestamp is available via {@link #getTimestampSystemTimeUs()} and {@link
+ * #getTimestampPositionFrames()}, and the caller should call {@link #acceptTimestamp()} if the
+ * timestamp was valid, or {@link #rejectTimestamp()} otherwise. The values returned by {@link
+ * #hasTimestamp()} and {@link #isTimestampAdvancing()} may be updated.
+ *
+ * @param systemTimeUs The current system time, in microseconds.
+ * @return Whether the timestamp was updated.
+ */
+ public boolean maybePollTimestamp(long systemTimeUs) {
+ if (audioTimestamp == null || (systemTimeUs - lastTimestampSampleTimeUs) < sampleIntervalUs) {
+ return false;
+ }
+ lastTimestampSampleTimeUs = systemTimeUs;
+ boolean updatedTimestamp = audioTimestamp.maybeUpdateTimestamp();
+ switch (state) {
+ case STATE_INITIALIZING:
+ if (updatedTimestamp) {
+ if (audioTimestamp.getTimestampSystemTimeUs() >= initializeSystemTimeUs) {
+ // We have an initial timestamp, but don't know if it's advancing yet.
+ initialTimestampPositionFrames = audioTimestamp.getTimestampPositionFrames();
+ updateState(STATE_TIMESTAMP);
+ } else {
+ // Drop the timestamp, as it was sampled before the last reset.
+ updatedTimestamp = false;
+ }
+ } else if (systemTimeUs - initializeSystemTimeUs > INITIALIZING_DURATION_US) {
+ // We haven't received a timestamp for a while, so they probably aren't available for the
+ // current audio route. Poll infrequently in case the route changes later.
+ // TODO: Ideally we should listen for audio route changes in order to detect when a
+ // timestamp becomes available again.
+ updateState(STATE_NO_TIMESTAMP);
+ }
+ break;
+ case STATE_TIMESTAMP:
+ if (updatedTimestamp) {
+ long timestampPositionFrames = audioTimestamp.getTimestampPositionFrames();
+ if (timestampPositionFrames > initialTimestampPositionFrames) {
+ updateState(STATE_TIMESTAMP_ADVANCING);
+ }
+ } else {
+ reset();
+ }
+ break;
+ case STATE_TIMESTAMP_ADVANCING:
+ if (!updatedTimestamp) {
+ // The audio route may have changed, so reset polling.
+ reset();
+ }
+ break;
+ case STATE_NO_TIMESTAMP:
+ if (updatedTimestamp) {
+ // The audio route may have changed, so reset polling.
+ reset();
+ }
+ break;
+ case STATE_ERROR:
+ // Do nothing. If the caller accepts any new timestamp we'll reset polling.
+ break;
+ default:
+ throw new IllegalStateException();
+ }
+ return updatedTimestamp;
+ }
+
+ /**
+ * Rejects the timestamp last polled in {@link #maybePollTimestamp(long)}. The instance will enter
+ * the error state and poll timestamps infrequently until the next call to {@link
+ * #acceptTimestamp()}.
+ */
+ public void rejectTimestamp() {
+ updateState(STATE_ERROR);
+ }
+
+ /**
+ * Accepts the timestamp last polled in {@link #maybePollTimestamp(long)}. If the instance is in
+ * the error state, it will begin to poll timestamps frequently again.
+ */
+ public void acceptTimestamp() {
+ if (state == STATE_ERROR) {
+ reset();
+ }
+ }
+
+ /**
+ * Returns whether this instance has a timestamp that can be used to calculate the audio track
+ * position. If {@code true}, call {@link #getTimestampSystemTimeUs()} and {@link
+ * #getTimestampSystemTimeUs()} to access the timestamp.
+ */
+ public boolean hasTimestamp() {
+ return state == STATE_TIMESTAMP || state == STATE_TIMESTAMP_ADVANCING;
+ }
+
+ /**
+ * Returns whether the timestamp appears to be advancing. If {@code true}, call {@link
+ * #getTimestampSystemTimeUs()} and {@link #getTimestampSystemTimeUs()} to access the timestamp. A
+ * current position for the track can be extrapolated based on elapsed real time since the system
+ * time at which the timestamp was sampled.
+ */
+ public boolean isTimestampAdvancing() {
+ return state == STATE_TIMESTAMP_ADVANCING;
+ }
+
+ /** Resets polling. Should be called whenever the audio track is paused or resumed. */
+ public void reset() {
+ if (audioTimestamp != null) {
+ updateState(STATE_INITIALIZING);
+ }
+ }
+
+ /**
+ * If {@link #maybePollTimestamp(long)} or {@link #hasTimestamp()} returned {@code true}, returns
+ * the system time at which the latest timestamp was sampled, in microseconds.
+ */
+ public long getTimestampSystemTimeUs() {
+ return audioTimestamp != null ? audioTimestamp.getTimestampSystemTimeUs() : C.TIME_UNSET;
+ }
+
+ /**
+ * If {@link #maybePollTimestamp(long)} or {@link #hasTimestamp()} returned {@code true}, returns
+ * the latest timestamp's position in frames.
+ */
+ public long getTimestampPositionFrames() {
+ return audioTimestamp != null ? audioTimestamp.getTimestampPositionFrames() : C.POSITION_UNSET;
+ }
+
+ private void updateState(@State int state) {
+ this.state = state;
+ switch (state) {
+ case STATE_INITIALIZING:
+ // Force polling a timestamp immediately, and poll quickly.
+ lastTimestampSampleTimeUs = 0;
+ initialTimestampPositionFrames = C.POSITION_UNSET;
+ initializeSystemTimeUs = System.nanoTime() / 1000;
+ sampleIntervalUs = FAST_POLL_INTERVAL_US;
+ break;
+ case STATE_TIMESTAMP:
+ sampleIntervalUs = FAST_POLL_INTERVAL_US;
+ break;
+ case STATE_TIMESTAMP_ADVANCING:
+ case STATE_NO_TIMESTAMP:
+ sampleIntervalUs = SLOW_POLL_INTERVAL_US;
+ break;
+ case STATE_ERROR:
+ sampleIntervalUs = ERROR_POLL_INTERVAL_US;
+ break;
+ default:
+ throw new IllegalStateException();
+ }
+ }
+
+ @TargetApi(19)
+ private static final class AudioTimestampV19 {
+
+ private final AudioTrack audioTrack;
+ private final AudioTimestamp audioTimestamp;
+
+ private long rawTimestampFramePositionWrapCount;
+ private long lastTimestampRawPositionFrames;
+ private long lastTimestampPositionFrames;
+
+ /**
+ * Creates a new {@link AudioTimestamp} wrapper.
+ *
+ * @param audioTrack The audio track that will provide timestamps.
+ */
+ public AudioTimestampV19(AudioTrack audioTrack) {
+ this.audioTrack = audioTrack;
+ audioTimestamp = new AudioTimestamp();
+ }
+
+ /**
+ * Attempts to update the audio track timestamp. Returns {@code true} if the timestamp was
+ * updated, in which case the updated timestamp system time and position can be accessed with
+ * {@link #getTimestampSystemTimeUs()} and {@link #getTimestampPositionFrames()}. Returns {@code
+ * false} if no timestamp is available, in which case those methods should not be called.
+ */
+ public boolean maybeUpdateTimestamp() {
+ boolean updated = audioTrack.getTimestamp(audioTimestamp);
+ if (updated) {
+ long rawPositionFrames = audioTimestamp.framePosition;
+ if (lastTimestampRawPositionFrames > rawPositionFrames) {
+ // The value must have wrapped around.
+ rawTimestampFramePositionWrapCount++;
+ }
+ lastTimestampRawPositionFrames = rawPositionFrames;
+ lastTimestampPositionFrames =
+ rawPositionFrames + (rawTimestampFramePositionWrapCount << 32);
+ }
+ return updated;
+ }
+
+ public long getTimestampSystemTimeUs() {
+ return audioTimestamp.nanoTime / 1000;
+ }
+
+ public long getTimestampPositionFrames() {
+ return lastTimestampPositionFrames;
+ }
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTrackPositionTracker.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTrackPositionTracker.java
new file mode 100644
index 0000000000..e62e8cf2c5
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AudioTrackPositionTracker.java
@@ -0,0 +1,545 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import static org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util.castNonNull;
+
+import android.media.AudioTimestamp;
+import android.media.AudioTrack;
+import android.os.SystemClock;
+import androidx.annotation.IntDef;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.Method;
+
+/**
+ * Wraps an {@link AudioTrack}, exposing a position based on {@link
+ * AudioTrack#getPlaybackHeadPosition()} and {@link AudioTrack#getTimestamp(AudioTimestamp)}.
+ *
+ * <p>Call {@link #setAudioTrack(AudioTrack, int, int, int)} to set the audio track to wrap. Call
+ * {@link #mayHandleBuffer(long)} if there is input data to write to the track. If it returns false,
+ * the audio track position is stabilizing and no data may be written. Call {@link #start()}
+ * immediately before calling {@link AudioTrack#play()}. Call {@link #pause()} when pausing the
+ * track. Call {@link #handleEndOfStream(long)} when no more data will be written to the track. When
+ * the audio track will no longer be used, call {@link #reset()}.
+ */
+/* package */ final class AudioTrackPositionTracker {
+
+ /** Listener for position tracker events. */
+ public interface Listener {
+
+ /**
+ * Called when the frame position is too far from the expected frame position.
+ *
+ * @param audioTimestampPositionFrames The frame position of the last known audio track
+ * timestamp.
+ * @param audioTimestampSystemTimeUs The system time associated with the last known audio track
+ * timestamp, in microseconds.
+ * @param systemTimeUs The current time.
+ * @param playbackPositionUs The current playback head position in microseconds.
+ */
+ void onPositionFramesMismatch(
+ long audioTimestampPositionFrames,
+ long audioTimestampSystemTimeUs,
+ long systemTimeUs,
+ long playbackPositionUs);
+
+ /**
+ * Called when the system time associated with the last known audio track timestamp is
+ * unexpectedly far from the current time.
+ *
+ * @param audioTimestampPositionFrames The frame position of the last known audio track
+ * timestamp.
+ * @param audioTimestampSystemTimeUs The system time associated with the last known audio track
+ * timestamp, in microseconds.
+ * @param systemTimeUs The current time.
+ * @param playbackPositionUs The current playback head position in microseconds.
+ */
+ void onSystemTimeUsMismatch(
+ long audioTimestampPositionFrames,
+ long audioTimestampSystemTimeUs,
+ long systemTimeUs,
+ long playbackPositionUs);
+
+ /**
+ * Called when the audio track has provided an invalid latency.
+ *
+ * @param latencyUs The reported latency in microseconds.
+ */
+ void onInvalidLatency(long latencyUs);
+
+ /**
+ * Called when the audio track runs out of data to play.
+ *
+ * @param bufferSize The size of the sink's buffer, in bytes.
+ * @param bufferSizeMs The size of the sink's buffer, in milliseconds, if it is configured for
+ * PCM output. {@link C#TIME_UNSET} if it is configured for encoded audio output, as the
+ * buffered media can have a variable bitrate so the duration may be unknown.
+ */
+ void onUnderrun(int bufferSize, long bufferSizeMs);
+ }
+
+ /** {@link AudioTrack} playback states. */
+ @Documented
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, PLAYSTATE_PLAYING})
+ private @interface PlayState {}
+ /** @see AudioTrack#PLAYSTATE_STOPPED */
+ private static final int PLAYSTATE_STOPPED = AudioTrack.PLAYSTATE_STOPPED;
+ /** @see AudioTrack#PLAYSTATE_PAUSED */
+ private static final int PLAYSTATE_PAUSED = AudioTrack.PLAYSTATE_PAUSED;
+ /** @see AudioTrack#PLAYSTATE_PLAYING */
+ private static final int PLAYSTATE_PLAYING = AudioTrack.PLAYSTATE_PLAYING;
+
+ /**
+ * AudioTrack timestamps are deemed spurious if they are offset from the system clock by more than
+ * this amount.
+ *
+ * <p>This is a fail safe that should not be required on correctly functioning devices.
+ */
+ private static final long MAX_AUDIO_TIMESTAMP_OFFSET_US = 5 * C.MICROS_PER_SECOND;
+
+ /**
+ * AudioTrack latencies are deemed impossibly large if they are greater than this amount.
+ *
+ * <p>This is a fail safe that should not be required on correctly functioning devices.
+ */
+ private static final long MAX_LATENCY_US = 5 * C.MICROS_PER_SECOND;
+
+ private static final long FORCE_RESET_WORKAROUND_TIMEOUT_MS = 200;
+
+ private static final int MAX_PLAYHEAD_OFFSET_COUNT = 10;
+ private static final int MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US = 30000;
+ private static final int MIN_LATENCY_SAMPLE_INTERVAL_US = 500000;
+
+ private final Listener listener;
+ private final long[] playheadOffsets;
+
+ @Nullable private AudioTrack audioTrack;
+ private int outputPcmFrameSize;
+ private int bufferSize;
+ @Nullable private AudioTimestampPoller audioTimestampPoller;
+ private int outputSampleRate;
+ private boolean needsPassthroughWorkarounds;
+ private long bufferSizeUs;
+
+ private long smoothedPlayheadOffsetUs;
+ private long lastPlayheadSampleTimeUs;
+
+ @Nullable private Method getLatencyMethod;
+ private long latencyUs;
+ private boolean hasData;
+
+ private boolean isOutputPcm;
+ private long lastLatencySampleTimeUs;
+ private long lastRawPlaybackHeadPosition;
+ private long rawPlaybackHeadWrapCount;
+ private long passthroughWorkaroundPauseOffset;
+ private int nextPlayheadOffsetIndex;
+ private int playheadOffsetCount;
+ private long stopTimestampUs;
+ private long forceResetWorkaroundTimeMs;
+ private long stopPlaybackHeadPosition;
+ private long endPlaybackHeadPosition;
+
+ /**
+ * Creates a new audio track position tracker.
+ *
+ * @param listener A listener for position tracking events.
+ */
+ public AudioTrackPositionTracker(Listener listener) {
+ this.listener = Assertions.checkNotNull(listener);
+ if (Util.SDK_INT >= 18) {
+ try {
+ getLatencyMethod = AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
+ } catch (NoSuchMethodException e) {
+ // There's no guarantee this method exists. Do nothing.
+ }
+ }
+ playheadOffsets = new long[MAX_PLAYHEAD_OFFSET_COUNT];
+ }
+
+ /**
+ * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
+ * track's position, until the next call to {@link #reset()}.
+ *
+ * @param audioTrack The audio track to wrap.
+ * @param outputEncoding The encoding of the audio track.
+ * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
+ * otherwise.
+ * @param bufferSize The audio track buffer size in bytes.
+ */
+ public void setAudioTrack(
+ AudioTrack audioTrack,
+ @C.Encoding int outputEncoding,
+ int outputPcmFrameSize,
+ int bufferSize) {
+ this.audioTrack = audioTrack;
+ this.outputPcmFrameSize = outputPcmFrameSize;
+ this.bufferSize = bufferSize;
+ audioTimestampPoller = new AudioTimestampPoller(audioTrack);
+ outputSampleRate = audioTrack.getSampleRate();
+ needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
+ isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
+ bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
+ lastRawPlaybackHeadPosition = 0;
+ rawPlaybackHeadWrapCount = 0;
+ passthroughWorkaroundPauseOffset = 0;
+ hasData = false;
+ stopTimestampUs = C.TIME_UNSET;
+ forceResetWorkaroundTimeMs = C.TIME_UNSET;
+ latencyUs = 0;
+ }
+
+ public long getCurrentPositionUs(boolean sourceEnded) {
+ if (Assertions.checkNotNull(this.audioTrack).getPlayState() == PLAYSTATE_PLAYING) {
+ maybeSampleSyncParams();
+ }
+
+ // If the device supports it, use the playback timestamp from AudioTrack.getTimestamp.
+ // Otherwise, derive a smoothed position by sampling the track's frame position.
+ long systemTimeUs = System.nanoTime() / 1000;
+ AudioTimestampPoller audioTimestampPoller = Assertions.checkNotNull(this.audioTimestampPoller);
+ if (audioTimestampPoller.hasTimestamp()) {
+ // Calculate the speed-adjusted position using the timestamp (which may be in the future).
+ long timestampPositionFrames = audioTimestampPoller.getTimestampPositionFrames();
+ long timestampPositionUs = framesToDurationUs(timestampPositionFrames);
+ if (!audioTimestampPoller.isTimestampAdvancing()) {
+ return timestampPositionUs;
+ }
+ long elapsedSinceTimestampUs = systemTimeUs - audioTimestampPoller.getTimestampSystemTimeUs();
+ return timestampPositionUs + elapsedSinceTimestampUs;
+ } else {
+ long positionUs;
+ if (playheadOffsetCount == 0) {
+ // The AudioTrack has started, but we don't have any samples to compute a smoothed position.
+ positionUs = getPlaybackHeadPositionUs();
+ } else {
+ // getPlaybackHeadPositionUs() only has a granularity of ~20 ms, so we base the position off
+ // the system clock (and a smoothed offset between it and the playhead position) so as to
+ // prevent jitter in the reported positions.
+ positionUs = systemTimeUs + smoothedPlayheadOffsetUs;
+ }
+ if (!sourceEnded) {
+ positionUs -= latencyUs;
+ }
+ return positionUs;
+ }
+ }
+
+ /** Starts position tracking. Must be called immediately before {@link AudioTrack#play()}. */
+ public void start() {
+ Assertions.checkNotNull(audioTimestampPoller).reset();
+ }
+
+ /** Returns whether the audio track is in the playing state. */
+ public boolean isPlaying() {
+ return Assertions.checkNotNull(audioTrack).getPlayState() == PLAYSTATE_PLAYING;
+ }
+
+ /**
+ * Checks the state of the audio track and returns whether the caller can write data to the track.
+ * Notifies {@link Listener#onUnderrun(int, long)} if the track has underrun.
+ *
+ * @param writtenFrames The number of frames that have been written.
+ * @return Whether the caller can write data to the track.
+ */
+ public boolean mayHandleBuffer(long writtenFrames) {
+ @PlayState int playState = Assertions.checkNotNull(audioTrack).getPlayState();
+ if (needsPassthroughWorkarounds) {
+ // An AC-3 audio track continues to play data written while it is paused. Stop writing so its
+ // buffer empties. See [Internal: b/18899620].
+ if (playState == PLAYSTATE_PAUSED) {
+ // We force an underrun to pause the track, so don't notify the listener in this case.
+ hasData = false;
+ return false;
+ }
+
+ // A new AC-3 audio track's playback position continues to increase from the old track's
+ // position for a short time after is has been released. Avoid writing data until the playback
+ // head position actually returns to zero.
+ if (playState == PLAYSTATE_STOPPED && getPlaybackHeadPosition() == 0) {
+ return false;
+ }
+ }
+
+ boolean hadData = hasData;
+ hasData = hasPendingData(writtenFrames);
+ if (hadData && !hasData && playState != PLAYSTATE_STOPPED && listener != null) {
+ listener.onUnderrun(bufferSize, C.usToMs(bufferSizeUs));
+ }
+
+ return true;
+ }
+
+ /**
+ * Returns an estimate of the number of additional bytes that can be written to the audio track's
+ * buffer without running out of space.
+ *
+ * <p>May only be called if the output encoding is one of the PCM encodings.
+ *
+ * @param writtenBytes The number of bytes written to the audio track so far.
+ * @return An estimate of the number of bytes that can be written.
+ */
+ public int getAvailableBufferSize(long writtenBytes) {
+ int bytesPending = (int) (writtenBytes - (getPlaybackHeadPosition() * outputPcmFrameSize));
+ return bufferSize - bytesPending;
+ }
+
+ /** Returns whether the track is in an invalid state and must be recreated. */
+ public boolean isStalled(long writtenFrames) {
+ return forceResetWorkaroundTimeMs != C.TIME_UNSET
+ && writtenFrames > 0
+ && SystemClock.elapsedRealtime() - forceResetWorkaroundTimeMs
+ >= FORCE_RESET_WORKAROUND_TIMEOUT_MS;
+ }
+
+ /**
+ * Records the writing position at which the stream ended, so that the reported position can
+ * continue to increment while remaining data is played out.
+ *
+ * @param writtenFrames The number of frames that have been written.
+ */
+ public void handleEndOfStream(long writtenFrames) {
+ stopPlaybackHeadPosition = getPlaybackHeadPosition();
+ stopTimestampUs = SystemClock.elapsedRealtime() * 1000;
+ endPlaybackHeadPosition = writtenFrames;
+ }
+
+ /**
+ * Returns whether the audio track has any pending data to play out at its current position.
+ *
+ * @param writtenFrames The number of frames written to the audio track.
+ * @return Whether the audio track has any pending data to play out.
+ */
+ public boolean hasPendingData(long writtenFrames) {
+ return writtenFrames > getPlaybackHeadPosition()
+ || forceHasPendingData();
+ }
+
+ /**
+ * Pauses the audio track position tracker, returning whether the audio track needs to be paused
+ * to cause playback to pause. If {@code false} is returned the audio track will pause without
+ * further interaction, as the end of stream has been handled.
+ */
+ public boolean pause() {
+ resetSyncParams();
+ if (stopTimestampUs == C.TIME_UNSET) {
+ // The audio track is going to be paused, so reset the timestamp poller to ensure it doesn't
+ // supply an advancing position.
+ Assertions.checkNotNull(audioTimestampPoller).reset();
+ return true;
+ }
+ // We've handled the end of the stream already, so there's no need to pause the track.
+ return false;
+ }
+
+ /**
+ * Resets the position tracker. Should be called when the audio track previous passed to {@link
+ * #setAudioTrack(AudioTrack, int, int, int)} is no longer in use.
+ */
+ public void reset() {
+ resetSyncParams();
+ audioTrack = null;
+ audioTimestampPoller = null;
+ }
+
+ private void maybeSampleSyncParams() {
+ long playbackPositionUs = getPlaybackHeadPositionUs();
+ if (playbackPositionUs == 0) {
+ // The AudioTrack hasn't output anything yet.
+ return;
+ }
+ long systemTimeUs = System.nanoTime() / 1000;
+ if (systemTimeUs - lastPlayheadSampleTimeUs >= MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US) {
+ // Take a new sample and update the smoothed offset between the system clock and the playhead.
+ playheadOffsets[nextPlayheadOffsetIndex] = playbackPositionUs - systemTimeUs;
+ nextPlayheadOffsetIndex = (nextPlayheadOffsetIndex + 1) % MAX_PLAYHEAD_OFFSET_COUNT;
+ if (playheadOffsetCount < MAX_PLAYHEAD_OFFSET_COUNT) {
+ playheadOffsetCount++;
+ }
+ lastPlayheadSampleTimeUs = systemTimeUs;
+ smoothedPlayheadOffsetUs = 0;
+ for (int i = 0; i < playheadOffsetCount; i++) {
+ smoothedPlayheadOffsetUs += playheadOffsets[i] / playheadOffsetCount;
+ }
+ }
+
+ if (needsPassthroughWorkarounds) {
+ // Don't sample the timestamp and latency if this is an AC-3 passthrough AudioTrack on
+ // platform API versions 21/22, as incorrect values are returned. See [Internal: b/21145353].
+ return;
+ }
+
+ maybePollAndCheckTimestamp(systemTimeUs, playbackPositionUs);
+ maybeUpdateLatency(systemTimeUs);
+ }
+
+ private void maybePollAndCheckTimestamp(long systemTimeUs, long playbackPositionUs) {
+ AudioTimestampPoller audioTimestampPoller = Assertions.checkNotNull(this.audioTimestampPoller);
+ if (!audioTimestampPoller.maybePollTimestamp(systemTimeUs)) {
+ return;
+ }
+
+ // Perform sanity checks on the timestamp and accept/reject it.
+ long audioTimestampSystemTimeUs = audioTimestampPoller.getTimestampSystemTimeUs();
+ long audioTimestampPositionFrames = audioTimestampPoller.getTimestampPositionFrames();
+ if (Math.abs(audioTimestampSystemTimeUs - systemTimeUs) > MAX_AUDIO_TIMESTAMP_OFFSET_US) {
+ listener.onSystemTimeUsMismatch(
+ audioTimestampPositionFrames,
+ audioTimestampSystemTimeUs,
+ systemTimeUs,
+ playbackPositionUs);
+ audioTimestampPoller.rejectTimestamp();
+ } else if (Math.abs(framesToDurationUs(audioTimestampPositionFrames) - playbackPositionUs)
+ > MAX_AUDIO_TIMESTAMP_OFFSET_US) {
+ listener.onPositionFramesMismatch(
+ audioTimestampPositionFrames,
+ audioTimestampSystemTimeUs,
+ systemTimeUs,
+ playbackPositionUs);
+ audioTimestampPoller.rejectTimestamp();
+ } else {
+ audioTimestampPoller.acceptTimestamp();
+ }
+ }
+
+ private void maybeUpdateLatency(long systemTimeUs) {
+ if (isOutputPcm
+ && getLatencyMethod != null
+ && systemTimeUs - lastLatencySampleTimeUs >= MIN_LATENCY_SAMPLE_INTERVAL_US) {
+ try {
+ // Compute the audio track latency, excluding the latency due to the buffer (leaving
+ // latency due to the mixer and audio hardware driver).
+ latencyUs =
+ castNonNull((Integer) getLatencyMethod.invoke(Assertions.checkNotNull(audioTrack)))
+ * 1000L
+ - bufferSizeUs;
+ // Sanity check that the latency is non-negative.
+ latencyUs = Math.max(latencyUs, 0);
+ // Sanity check that the latency isn't too large.
+ if (latencyUs > MAX_LATENCY_US) {
+ listener.onInvalidLatency(latencyUs);
+ latencyUs = 0;
+ }
+ } catch (Exception e) {
+ // The method existed, but doesn't work. Don't try again.
+ getLatencyMethod = null;
+ }
+ lastLatencySampleTimeUs = systemTimeUs;
+ }
+ }
+
+ private long framesToDurationUs(long frameCount) {
+ return (frameCount * C.MICROS_PER_SECOND) / outputSampleRate;
+ }
+
+ private void resetSyncParams() {
+ smoothedPlayheadOffsetUs = 0;
+ playheadOffsetCount = 0;
+ nextPlayheadOffsetIndex = 0;
+ lastPlayheadSampleTimeUs = 0;
+ }
+
+ /**
+ * If passthrough workarounds are enabled, pausing is implemented by forcing the AudioTrack to
+ * underrun. In this case, still behave as if we have pending data, otherwise writing won't
+ * resume.
+ */
+ private boolean forceHasPendingData() {
+ return needsPassthroughWorkarounds
+ && Assertions.checkNotNull(audioTrack).getPlayState() == AudioTrack.PLAYSTATE_PAUSED
+ && getPlaybackHeadPosition() == 0;
+ }
+
+ /**
+ * Returns whether to work around problems with passthrough audio tracks. See [Internal:
+ * b/18899620, b/19187573, b/21145353].
+ */
+ private static boolean needsPassthroughWorkarounds(@C.Encoding int outputEncoding) {
+ return Util.SDK_INT < 23
+ && (outputEncoding == C.ENCODING_AC3 || outputEncoding == C.ENCODING_E_AC3);
+ }
+
+ private long getPlaybackHeadPositionUs() {
+ return framesToDurationUs(getPlaybackHeadPosition());
+ }
+
+ /**
+ * {@link AudioTrack#getPlaybackHeadPosition()} returns a value intended to be interpreted as an
+ * unsigned 32 bit integer, which also wraps around periodically. This method returns the playback
+ * head position as a long that will only wrap around if the value exceeds {@link Long#MAX_VALUE}
+ * (which in practice will never happen).
+ *
+ * @return The playback head position, in frames.
+ */
+ private long getPlaybackHeadPosition() {
+ AudioTrack audioTrack = Assertions.checkNotNull(this.audioTrack);
+ if (stopTimestampUs != C.TIME_UNSET) {
+ // Simulate the playback head position up to the total number of frames submitted.
+ long elapsedTimeSinceStopUs = (SystemClock.elapsedRealtime() * 1000) - stopTimestampUs;
+ long framesSinceStop = (elapsedTimeSinceStopUs * outputSampleRate) / C.MICROS_PER_SECOND;
+ return Math.min(endPlaybackHeadPosition, stopPlaybackHeadPosition + framesSinceStop);
+ }
+
+ int state = audioTrack.getPlayState();
+ if (state == PLAYSTATE_STOPPED) {
+ // The audio track hasn't been started.
+ return 0;
+ }
+
+ long rawPlaybackHeadPosition = 0xFFFFFFFFL & audioTrack.getPlaybackHeadPosition();
+ if (needsPassthroughWorkarounds) {
+ // Work around an issue with passthrough/direct AudioTracks on platform API versions 21/22
+ // where the playback head position jumps back to zero on paused passthrough/direct audio
+ // tracks. See [Internal: b/19187573].
+ if (state == PLAYSTATE_PAUSED && rawPlaybackHeadPosition == 0) {
+ passthroughWorkaroundPauseOffset = lastRawPlaybackHeadPosition;
+ }
+ rawPlaybackHeadPosition += passthroughWorkaroundPauseOffset;
+ }
+
+ if (Util.SDK_INT <= 29) {
+ if (rawPlaybackHeadPosition == 0
+ && lastRawPlaybackHeadPosition > 0
+ && state == PLAYSTATE_PLAYING) {
+ // If connecting a Bluetooth audio device fails, the AudioTrack may be left in a state
+ // where its Java API is in the playing state, but the native track is stopped. When this
+ // happens the playback head position gets stuck at zero. In this case, return the old
+ // playback head position and force the track to be reset after
+ // {@link #FORCE_RESET_WORKAROUND_TIMEOUT_MS} has elapsed.
+ if (forceResetWorkaroundTimeMs == C.TIME_UNSET) {
+ forceResetWorkaroundTimeMs = SystemClock.elapsedRealtime();
+ }
+ return lastRawPlaybackHeadPosition;
+ } else {
+ forceResetWorkaroundTimeMs = C.TIME_UNSET;
+ }
+ }
+
+ if (lastRawPlaybackHeadPosition > rawPlaybackHeadPosition) {
+ // The value must have wrapped around.
+ rawPlaybackHeadWrapCount++;
+ }
+ lastRawPlaybackHeadPosition = rawPlaybackHeadPosition;
+ return rawPlaybackHeadPosition + (rawPlaybackHeadWrapCount << 32);
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AuxEffectInfo.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AuxEffectInfo.java
new file mode 100644
index 0000000000..6039a8c1a8
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/AuxEffectInfo.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.media.AudioTrack;
+import android.media.audiofx.AudioEffect;
+import androidx.annotation.Nullable;
+
+/**
+ * Represents auxiliary effect information, which can be used to attach an auxiliary effect to an
+ * underlying {@link AudioTrack}.
+ *
+ * <p>Auxiliary effects can only be applied if the application has the {@code
+ * android.permission.MODIFY_AUDIO_SETTINGS} permission. Apps are responsible for retaining the
+ * associated audio effect instance and releasing it when it's no longer needed. See the
+ * documentation of {@link AudioEffect} for more information.
+ */
+public final class AuxEffectInfo {
+
+ /** Value for {@link #effectId} representing no auxiliary effect. */
+ public static final int NO_AUX_EFFECT_ID = 0;
+
+ /**
+ * The identifier of the effect, or {@link #NO_AUX_EFFECT_ID} if there is no effect.
+ *
+ * @see android.media.AudioTrack#attachAuxEffect(int)
+ */
+ public final int effectId;
+ /**
+ * The send level for the effect.
+ *
+ * @see android.media.AudioTrack#setAuxEffectSendLevel(float)
+ */
+ public final float sendLevel;
+
+ /**
+ * Creates an instance with the given effect identifier and send level.
+ *
+ * @param effectId The effect identifier. This is the value returned by {@link
+ * AudioEffect#getId()} on the effect, or {@value NO_AUX_EFFECT_ID} which represents no
+ * effect. This value is passed to {@link AudioTrack#attachAuxEffect(int)} on the underlying
+ * audio track.
+ * @param sendLevel The send level for the effect, where 0 represents no effect and a value of 1
+ * is full send. If {@code effectId} is not {@value #NO_AUX_EFFECT_ID}, this value is passed
+ * to {@link AudioTrack#setAuxEffectSendLevel(float)} on the underlying audio track.
+ */
+ public AuxEffectInfo(int effectId, float sendLevel) {
+ this.effectId = effectId;
+ this.sendLevel = sendLevel;
+ }
+
+ @Override
+ public boolean equals(@Nullable Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ AuxEffectInfo auxEffectInfo = (AuxEffectInfo) o;
+ return effectId == auxEffectInfo.effectId
+ && Float.compare(auxEffectInfo.sendLevel, sendLevel) == 0;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = 17;
+ result = 31 * result + effectId;
+ result = 31 * result + Float.floatToIntBits(sendLevel);
+ return result;
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/BaseAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/BaseAudioProcessor.java
new file mode 100644
index 0000000000..189d8f0265
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/BaseAudioProcessor.java
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.CallSuper;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Base class for audio processors that keep an output buffer and an internal buffer that is reused
+ * whenever input is queued. Subclasses should override {@link #onConfigure(AudioFormat)} to return
+ * the output audio format for the processor if it's active.
+ */
+public abstract class BaseAudioProcessor implements AudioProcessor {
+
+ /** The current input audio format. */
+ protected AudioFormat inputAudioFormat;
+ /** The current output audio format. */
+ protected AudioFormat outputAudioFormat;
+
+ private AudioFormat pendingInputAudioFormat;
+ private AudioFormat pendingOutputAudioFormat;
+ private ByteBuffer buffer;
+ private ByteBuffer outputBuffer;
+ private boolean inputEnded;
+
+ public BaseAudioProcessor() {
+ buffer = EMPTY_BUFFER;
+ outputBuffer = EMPTY_BUFFER;
+ pendingInputAudioFormat = AudioFormat.NOT_SET;
+ pendingOutputAudioFormat = AudioFormat.NOT_SET;
+ inputAudioFormat = AudioFormat.NOT_SET;
+ outputAudioFormat = AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public final AudioFormat configure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ pendingInputAudioFormat = inputAudioFormat;
+ pendingOutputAudioFormat = onConfigure(inputAudioFormat);
+ return isActive() ? pendingOutputAudioFormat : AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public boolean isActive() {
+ return pendingOutputAudioFormat != AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public final void queueEndOfStream() {
+ inputEnded = true;
+ onQueueEndOfStream();
+ }
+
+ @CallSuper
+ @Override
+ public ByteBuffer getOutput() {
+ ByteBuffer outputBuffer = this.outputBuffer;
+ this.outputBuffer = EMPTY_BUFFER;
+ return outputBuffer;
+ }
+
+ @CallSuper
+ @SuppressWarnings("ReferenceEquality")
+ @Override
+ public boolean isEnded() {
+ return inputEnded && outputBuffer == EMPTY_BUFFER;
+ }
+
+ @Override
+ public final void flush() {
+ outputBuffer = EMPTY_BUFFER;
+ inputEnded = false;
+ inputAudioFormat = pendingInputAudioFormat;
+ outputAudioFormat = pendingOutputAudioFormat;
+ onFlush();
+ }
+
+ @Override
+ public final void reset() {
+ flush();
+ buffer = EMPTY_BUFFER;
+ pendingInputAudioFormat = AudioFormat.NOT_SET;
+ pendingOutputAudioFormat = AudioFormat.NOT_SET;
+ inputAudioFormat = AudioFormat.NOT_SET;
+ outputAudioFormat = AudioFormat.NOT_SET;
+ onReset();
+ }
+
+ /**
+ * Replaces the current output buffer with a buffer of at least {@code count} bytes and returns
+ * it. Callers should write to the returned buffer then {@link ByteBuffer#flip()} it so it can be
+ * read via {@link #getOutput()}.
+ */
+ protected final ByteBuffer replaceOutputBuffer(int count) {
+ if (buffer.capacity() < count) {
+ buffer = ByteBuffer.allocateDirect(count).order(ByteOrder.nativeOrder());
+ } else {
+ buffer.clear();
+ }
+ outputBuffer = buffer;
+ return buffer;
+ }
+
+ /** Returns whether the current output buffer has any data remaining. */
+ protected final boolean hasPendingOutput() {
+ return outputBuffer.hasRemaining();
+ }
+
+ /** Called when the processor is configured for a new input format. */
+ protected AudioFormat onConfigure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ return AudioFormat.NOT_SET;
+ }
+
+ /** Called when the end-of-stream is queued to the processor. */
+ protected void onQueueEndOfStream() {
+ // Do nothing.
+ }
+
+ /** Called when the processor is flushed, directly or as part of resetting. */
+ protected void onFlush() {
+ // Do nothing.
+ }
+
+ /** Called when the processor is reset. */
+ protected void onReset() {
+ // Do nothing.
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ChannelMappingAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ChannelMappingAudioProcessor.java
new file mode 100644
index 0000000000..e8496d4608
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ChannelMappingAudioProcessor.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link AudioProcessor} that applies a mapping from input channels onto specified output
+ * channels. This can be used to reorder, duplicate or discard channels.
+ */
+@SuppressWarnings("nullness:initialization.fields.uninitialized")
+/* package */ final class ChannelMappingAudioProcessor extends BaseAudioProcessor {
+
+ @Nullable private int[] pendingOutputChannels;
+ @Nullable private int[] outputChannels;
+
+ /**
+ * Resets the channel mapping. After calling this method, call {@link #configure(AudioFormat)} to
+ * start using the new channel map.
+ *
+ * @param outputChannels The mapping from input to output channel indices, or {@code null} to
+ * leave the input unchanged.
+ * @see AudioSink#configure(int, int, int, int, int[], int, int)
+ */
+ public void setChannelMap(@Nullable int[] outputChannels) {
+ pendingOutputChannels = outputChannels;
+ }
+
+ @Override
+ public AudioFormat onConfigure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ @Nullable int[] outputChannels = pendingOutputChannels;
+ if (outputChannels == null) {
+ return AudioFormat.NOT_SET;
+ }
+
+ if (inputAudioFormat.encoding != C.ENCODING_PCM_16BIT) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+
+ boolean active = inputAudioFormat.channelCount != outputChannels.length;
+ for (int i = 0; i < outputChannels.length; i++) {
+ int channelIndex = outputChannels[i];
+ if (channelIndex >= inputAudioFormat.channelCount) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+ active |= (channelIndex != i);
+ }
+ return active
+ ? new AudioFormat(inputAudioFormat.sampleRate, outputChannels.length, C.ENCODING_PCM_16BIT)
+ : AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ int[] outputChannels = Assertions.checkNotNull(this.outputChannels);
+ int position = inputBuffer.position();
+ int limit = inputBuffer.limit();
+ int frameCount = (limit - position) / inputAudioFormat.bytesPerFrame;
+ int outputSize = frameCount * outputAudioFormat.bytesPerFrame;
+ ByteBuffer buffer = replaceOutputBuffer(outputSize);
+ while (position < limit) {
+ for (int channelIndex : outputChannels) {
+ buffer.putShort(inputBuffer.getShort(position + 2 * channelIndex));
+ }
+ position += inputAudioFormat.bytesPerFrame;
+ }
+ inputBuffer.position(limit);
+ buffer.flip();
+ }
+
+ @Override
+ protected void onFlush() {
+ outputChannels = pendingOutputChannels;
+ }
+
+ @Override
+ protected void onReset() {
+ outputChannels = null;
+ pendingOutputChannels = null;
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DefaultAudioSink.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DefaultAudioSink.java
new file mode 100644
index 0000000000..9fc3fbbfd8
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DefaultAudioSink.java
@@ -0,0 +1,1474 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.annotation.SuppressLint;
+import android.annotation.TargetApi;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.os.ConditionVariable;
+import android.os.SystemClock;
+import androidx.annotation.IntDef;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlaybackParameters;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.audio.AudioProcessor.UnhandledAudioFormatException;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.extractor.MpegAudioHeader;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Log;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+
+/**
+ * Plays audio data. The implementation delegates to an {@link AudioTrack} and handles playback
+ * position smoothing, non-blocking writes and reconfiguration.
+ * <p>
+ * If tunneling mode is enabled, care must be taken that audio processors do not output buffers with
+ * a different duration than their input, and buffer processors must produce output corresponding to
+ * their last input immediately after that input is queued. This means that, for example, speed
+ * adjustment is not possible while using tunneling.
+ */
+public final class DefaultAudioSink implements AudioSink {
+
+ /**
+ * Thrown when the audio track has provided a spurious timestamp, if {@link
+ * #failOnSpuriousAudioTimestamp} is set.
+ */
+ public static final class InvalidAudioTrackTimestampException extends RuntimeException {
+
+ /**
+ * Creates a new invalid timestamp exception with the specified message.
+ *
+ * @param message The detail message for this exception.
+ */
+ private InvalidAudioTrackTimestampException(String message) {
+ super(message);
+ }
+
+ }
+
+ /**
+ * Provides a chain of audio processors, which are used for any user-defined processing and
+ * applying playback parameters (if supported). Because applying playback parameters can skip and
+ * stretch/compress audio, the sink will query the chain for information on how to transform its
+ * output position to map it onto a media position, via {@link #getMediaDuration(long)} and {@link
+ * #getSkippedOutputFrameCount()}.
+ */
+ public interface AudioProcessorChain {
+
+ /**
+ * Returns the fixed chain of audio processors that will process audio. This method is called
+ * once during initialization, but audio processors may change state to become active/inactive
+ * during playback.
+ */
+ AudioProcessor[] getAudioProcessors();
+
+ /**
+ * Configures audio processors to apply the specified playback parameters immediately, returning
+ * the new parameters, which may differ from those passed in. Only called when processors have
+ * no input pending.
+ *
+ * @param playbackParameters The playback parameters to try to apply.
+ * @return The playback parameters that were actually applied.
+ */
+ PlaybackParameters applyPlaybackParameters(PlaybackParameters playbackParameters);
+
+ /**
+ * Scales the specified playout duration to take into account speedup due to audio processing,
+ * returning an input media duration, in arbitrary units.
+ */
+ long getMediaDuration(long playoutDuration);
+
+ /**
+ * Returns the number of output audio frames skipped since the audio processors were last
+ * flushed.
+ */
+ long getSkippedOutputFrameCount();
+ }
+
+ /**
+ * The default audio processor chain, which applies a (possibly empty) chain of user-defined audio
+ * processors followed by {@link SilenceSkippingAudioProcessor} and {@link SonicAudioProcessor}.
+ */
+ public static class DefaultAudioProcessorChain implements AudioProcessorChain {
+
+ private final AudioProcessor[] audioProcessors;
+ private final SilenceSkippingAudioProcessor silenceSkippingAudioProcessor;
+ private final SonicAudioProcessor sonicAudioProcessor;
+
+ /**
+ * Creates a new default chain of audio processors, with the user-defined {@code
+ * audioProcessors} applied before silence skipping and playback parameters.
+ */
+ public DefaultAudioProcessorChain(AudioProcessor... audioProcessors) {
+ // The passed-in type may be more specialized than AudioProcessor[], so allocate a new array
+ // rather than using Arrays.copyOf.
+ this.audioProcessors = new AudioProcessor[audioProcessors.length + 2];
+ System.arraycopy(
+ /* src= */ audioProcessors,
+ /* srcPos= */ 0,
+ /* dest= */ this.audioProcessors,
+ /* destPos= */ 0,
+ /* length= */ audioProcessors.length);
+ silenceSkippingAudioProcessor = new SilenceSkippingAudioProcessor();
+ sonicAudioProcessor = new SonicAudioProcessor();
+ this.audioProcessors[audioProcessors.length] = silenceSkippingAudioProcessor;
+ this.audioProcessors[audioProcessors.length + 1] = sonicAudioProcessor;
+ }
+
+ @Override
+ public AudioProcessor[] getAudioProcessors() {
+ return audioProcessors;
+ }
+
+ @Override
+ public PlaybackParameters applyPlaybackParameters(PlaybackParameters playbackParameters) {
+ silenceSkippingAudioProcessor.setEnabled(playbackParameters.skipSilence);
+ return new PlaybackParameters(
+ sonicAudioProcessor.setSpeed(playbackParameters.speed),
+ sonicAudioProcessor.setPitch(playbackParameters.pitch),
+ playbackParameters.skipSilence);
+ }
+
+ @Override
+ public long getMediaDuration(long playoutDuration) {
+ return sonicAudioProcessor.scaleDurationForSpeedup(playoutDuration);
+ }
+
+ @Override
+ public long getSkippedOutputFrameCount() {
+ return silenceSkippingAudioProcessor.getSkippedFrames();
+ }
+ }
+
+ /**
+ * A minimum length for the {@link AudioTrack} buffer, in microseconds.
+ */
+ private static final long MIN_BUFFER_DURATION_US = 250000;
+ /**
+ * A maximum length for the {@link AudioTrack} buffer, in microseconds.
+ */
+ private static final long MAX_BUFFER_DURATION_US = 750000;
+ /**
+ * The length for passthrough {@link AudioTrack} buffers, in microseconds.
+ */
+ private static final long PASSTHROUGH_BUFFER_DURATION_US = 250000;
+ /**
+ * A multiplication factor to apply to the minimum buffer size requested by the underlying
+ * {@link AudioTrack}.
+ */
+ private static final int BUFFER_MULTIPLICATION_FACTOR = 4;
+
+ /** To avoid underruns on some devices (e.g., Broadcom 7271), scale up the AC3 buffer duration. */
+ private static final int AC3_BUFFER_MULTIPLICATION_FACTOR = 2;
+
+ /**
+ * @see AudioTrack#ERROR_BAD_VALUE
+ */
+ private static final int ERROR_BAD_VALUE = AudioTrack.ERROR_BAD_VALUE;
+ /**
+ * @see AudioTrack#MODE_STATIC
+ */
+ private static final int MODE_STATIC = AudioTrack.MODE_STATIC;
+ /**
+ * @see AudioTrack#MODE_STREAM
+ */
+ private static final int MODE_STREAM = AudioTrack.MODE_STREAM;
+ /**
+ * @see AudioTrack#STATE_INITIALIZED
+ */
+ private static final int STATE_INITIALIZED = AudioTrack.STATE_INITIALIZED;
+ /**
+ * @see AudioTrack#WRITE_NON_BLOCKING
+ */
+ @SuppressLint("InlinedApi")
+ private static final int WRITE_NON_BLOCKING = AudioTrack.WRITE_NON_BLOCKING;
+
+ private static final String TAG = "AudioTrack";
+
+ /** Represents states of the {@link #startMediaTimeUs} value. */
+ @Documented
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({START_NOT_SET, START_IN_SYNC, START_NEED_SYNC})
+ private @interface StartMediaTimeState {}
+
+ private static final int START_NOT_SET = 0;
+ private static final int START_IN_SYNC = 1;
+ private static final int START_NEED_SYNC = 2;
+
+ /**
+ * Whether to enable a workaround for an issue where an audio effect does not keep its session
+ * active across releasing/initializing a new audio track, on platform builds where
+ * {@link Util#SDK_INT} &lt; 21.
+ * <p>
+ * The flag must be set before creating a player.
+ */
+ public static boolean enablePreV21AudioSessionWorkaround = false;
+
+ /**
+ * Whether to throw an {@link InvalidAudioTrackTimestampException} when a spurious timestamp is
+ * reported from {@link AudioTrack#getTimestamp}.
+ * <p>
+ * The flag must be set before creating a player. Should be set to {@code true} for testing and
+ * debugging purposes only.
+ */
+ public static boolean failOnSpuriousAudioTimestamp = false;
+
+ @Nullable private final AudioCapabilities audioCapabilities;
+ private final AudioProcessorChain audioProcessorChain;
+ private final boolean enableFloatOutput;
+ private final ChannelMappingAudioProcessor channelMappingAudioProcessor;
+ private final TrimmingAudioProcessor trimmingAudioProcessor;
+ private final AudioProcessor[] toIntPcmAvailableAudioProcessors;
+ private final AudioProcessor[] toFloatPcmAvailableAudioProcessors;
+ private final ConditionVariable releasingConditionVariable;
+ private final AudioTrackPositionTracker audioTrackPositionTracker;
+ private final ArrayDeque<PlaybackParametersCheckpoint> playbackParametersCheckpoints;
+
+ @Nullable private Listener listener;
+ /** Used to keep the audio session active on pre-V21 builds (see {@link #initialize(long)}). */
+ @Nullable private AudioTrack keepSessionIdAudioTrack;
+
+ @Nullable private Configuration pendingConfiguration;
+ private Configuration configuration;
+ private AudioTrack audioTrack;
+
+ private AudioAttributes audioAttributes;
+ @Nullable private PlaybackParameters afterDrainPlaybackParameters;
+ private PlaybackParameters playbackParameters;
+ private long playbackParametersOffsetUs;
+ private long playbackParametersPositionUs;
+
+ @Nullable private ByteBuffer avSyncHeader;
+ private int bytesUntilNextAvSync;
+
+ private long submittedPcmBytes;
+ private long submittedEncodedFrames;
+ private long writtenPcmBytes;
+ private long writtenEncodedFrames;
+ private int framesPerEncodedSample;
+ private @StartMediaTimeState int startMediaTimeState;
+ private long startMediaTimeUs;
+ private float volume;
+
+ private AudioProcessor[] activeAudioProcessors;
+ private ByteBuffer[] outputBuffers;
+ @Nullable private ByteBuffer inputBuffer;
+ @Nullable private ByteBuffer outputBuffer;
+ private byte[] preV21OutputBuffer;
+ private int preV21OutputBufferOffset;
+ private int drainingAudioProcessorIndex;
+ private boolean handledEndOfStream;
+ private boolean stoppedAudioTrack;
+
+ private boolean playing;
+ private int audioSessionId;
+ private AuxEffectInfo auxEffectInfo;
+ private boolean tunneling;
+ private long lastFeedElapsedRealtimeMs;
+
+ /**
+ * Creates a new default audio sink.
+ *
+ * @param audioCapabilities The audio capabilities for playback on this device. May be null if the
+ * default capabilities (no encoded audio passthrough support) should be assumed.
+ * @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
+ * output. May be empty.
+ */
+ public DefaultAudioSink(
+ @Nullable AudioCapabilities audioCapabilities, AudioProcessor[] audioProcessors) {
+ this(audioCapabilities, audioProcessors, /* enableFloatOutput= */ false);
+ }
+
+ /**
+ * Creates a new default audio sink, optionally using float output for high resolution PCM.
+ *
+ * @param audioCapabilities The audio capabilities for playback on this device. May be null if the
+ * default capabilities (no encoded audio passthrough support) should be assumed.
+ * @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
+ * output. May be empty.
+ * @param enableFloatOutput Whether to enable 32-bit float output. Where possible, 32-bit float
+ * output will be used if the input is 32-bit float, and also if the input is high resolution
+ * (24-bit or 32-bit) integer PCM. Audio processing (for example, speed adjustment) will not
+ * be available when float output is in use.
+ */
+ public DefaultAudioSink(
+ @Nullable AudioCapabilities audioCapabilities,
+ AudioProcessor[] audioProcessors,
+ boolean enableFloatOutput) {
+ this(audioCapabilities, new DefaultAudioProcessorChain(audioProcessors), enableFloatOutput);
+ }
+
+ /**
+ * Creates a new default audio sink, optionally using float output for high resolution PCM and
+ * with the specified {@code audioProcessorChain}.
+ *
+ * @param audioCapabilities The audio capabilities for playback on this device. May be null if the
+ * default capabilities (no encoded audio passthrough support) should be assumed.
+ * @param audioProcessorChain An {@link AudioProcessorChain} which is used to apply playback
+ * parameters adjustments. The instance passed in must not be reused in other sinks.
+ * @param enableFloatOutput Whether to enable 32-bit float output. Where possible, 32-bit float
+ * output will be used if the input is 32-bit float, and also if the input is high resolution
+ * (24-bit or 32-bit) integer PCM. Audio processing (for example, speed adjustment) will not
+ * be available when float output is in use.
+ */
+ public DefaultAudioSink(
+ @Nullable AudioCapabilities audioCapabilities,
+ AudioProcessorChain audioProcessorChain,
+ boolean enableFloatOutput) {
+ this.audioCapabilities = audioCapabilities;
+ this.audioProcessorChain = Assertions.checkNotNull(audioProcessorChain);
+ this.enableFloatOutput = enableFloatOutput;
+ releasingConditionVariable = new ConditionVariable(true);
+ audioTrackPositionTracker = new AudioTrackPositionTracker(new PositionTrackerListener());
+ channelMappingAudioProcessor = new ChannelMappingAudioProcessor();
+ trimmingAudioProcessor = new TrimmingAudioProcessor();
+ ArrayList<AudioProcessor> toIntPcmAudioProcessors = new ArrayList<>();
+ Collections.addAll(
+ toIntPcmAudioProcessors,
+ new ResamplingAudioProcessor(),
+ channelMappingAudioProcessor,
+ trimmingAudioProcessor);
+ Collections.addAll(toIntPcmAudioProcessors, audioProcessorChain.getAudioProcessors());
+ toIntPcmAvailableAudioProcessors = toIntPcmAudioProcessors.toArray(new AudioProcessor[0]);
+ toFloatPcmAvailableAudioProcessors = new AudioProcessor[] {new FloatResamplingAudioProcessor()};
+ volume = 1.0f;
+ startMediaTimeState = START_NOT_SET;
+ audioAttributes = AudioAttributes.DEFAULT;
+ audioSessionId = C.AUDIO_SESSION_ID_UNSET;
+ auxEffectInfo = new AuxEffectInfo(AuxEffectInfo.NO_AUX_EFFECT_ID, 0f);
+ playbackParameters = PlaybackParameters.DEFAULT;
+ drainingAudioProcessorIndex = C.INDEX_UNSET;
+ activeAudioProcessors = new AudioProcessor[0];
+ outputBuffers = new ByteBuffer[0];
+ playbackParametersCheckpoints = new ArrayDeque<>();
+ }
+
+ // AudioSink implementation.
+
+ @Override
+ public void setListener(Listener listener) {
+ this.listener = listener;
+ }
+
+ @Override
+ public boolean supportsOutput(int channelCount, @C.Encoding int encoding) {
+ if (Util.isEncodingLinearPcm(encoding)) {
+ // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
+ // output from platform API version 21 only. Other integer PCM encodings are resampled by this
+ // sink to 16-bit PCM. We assume that the audio framework will downsample any number of
+ // channels to the output device's required number of channels.
+ return encoding != C.ENCODING_PCM_FLOAT || Util.SDK_INT >= 21;
+ } else {
+ return audioCapabilities != null
+ && audioCapabilities.supportsEncoding(encoding)
+ && (channelCount == Format.NO_VALUE
+ || channelCount <= audioCapabilities.getMaxChannelCount());
+ }
+ }
+
+ @Override
+ public long getCurrentPositionUs(boolean sourceEnded) {
+ if (!isInitialized() || startMediaTimeState == START_NOT_SET) {
+ return CURRENT_POSITION_NOT_SET;
+ }
+ long positionUs = audioTrackPositionTracker.getCurrentPositionUs(sourceEnded);
+ positionUs = Math.min(positionUs, configuration.framesToDurationUs(getWrittenFrames()));
+ return startMediaTimeUs + applySkipping(applySpeedup(positionUs));
+ }
+
+ @Override
+ public void configure(
+ @C.Encoding int inputEncoding,
+ int inputChannelCount,
+ int inputSampleRate,
+ int specifiedBufferSize,
+ @Nullable int[] outputChannels,
+ int trimStartFrames,
+ int trimEndFrames)
+ throws ConfigurationException {
+ if (Util.SDK_INT < 21 && inputChannelCount == 8 && outputChannels == null) {
+ // AudioTrack doesn't support 8 channel output before Android L. Discard the last two (side)
+ // channels to give a 6 channel stream that is supported.
+ outputChannels = new int[6];
+ for (int i = 0; i < outputChannels.length; i++) {
+ outputChannels[i] = i;
+ }
+ }
+
+ boolean isInputPcm = Util.isEncodingLinearPcm(inputEncoding);
+ boolean processingEnabled = isInputPcm;
+ int sampleRate = inputSampleRate;
+ int channelCount = inputChannelCount;
+ @C.Encoding int encoding = inputEncoding;
+ boolean useFloatOutput =
+ enableFloatOutput
+ && supportsOutput(inputChannelCount, C.ENCODING_PCM_FLOAT)
+ && Util.isEncodingHighResolutionPcm(inputEncoding);
+ AudioProcessor[] availableAudioProcessors =
+ useFloatOutput ? toFloatPcmAvailableAudioProcessors : toIntPcmAvailableAudioProcessors;
+ if (processingEnabled) {
+ trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames);
+ channelMappingAudioProcessor.setChannelMap(outputChannels);
+ AudioProcessor.AudioFormat outputFormat =
+ new AudioProcessor.AudioFormat(sampleRate, channelCount, encoding);
+ for (AudioProcessor audioProcessor : availableAudioProcessors) {
+ try {
+ AudioProcessor.AudioFormat nextFormat = audioProcessor.configure(outputFormat);
+ if (audioProcessor.isActive()) {
+ outputFormat = nextFormat;
+ }
+ } catch (UnhandledAudioFormatException e) {
+ throw new ConfigurationException(e);
+ }
+ }
+ sampleRate = outputFormat.sampleRate;
+ channelCount = outputFormat.channelCount;
+ encoding = outputFormat.encoding;
+ }
+
+ int outputChannelConfig = getChannelConfig(channelCount, isInputPcm);
+ if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
+ throw new ConfigurationException("Unsupported channel count: " + channelCount);
+ }
+
+ int inputPcmFrameSize =
+ isInputPcm ? Util.getPcmFrameSize(inputEncoding, inputChannelCount) : C.LENGTH_UNSET;
+ int outputPcmFrameSize =
+ isInputPcm ? Util.getPcmFrameSize(encoding, channelCount) : C.LENGTH_UNSET;
+ boolean canApplyPlaybackParameters = processingEnabled && !useFloatOutput;
+ Configuration pendingConfiguration =
+ new Configuration(
+ isInputPcm,
+ inputPcmFrameSize,
+ inputSampleRate,
+ outputPcmFrameSize,
+ sampleRate,
+ outputChannelConfig,
+ encoding,
+ specifiedBufferSize,
+ processingEnabled,
+ canApplyPlaybackParameters,
+ availableAudioProcessors);
+ if (isInitialized()) {
+ this.pendingConfiguration = pendingConfiguration;
+ } else {
+ configuration = pendingConfiguration;
+ }
+ }
+
+ private void setupAudioProcessors() {
+ AudioProcessor[] audioProcessors = configuration.availableAudioProcessors;
+ ArrayList<AudioProcessor> newAudioProcessors = new ArrayList<>();
+ for (AudioProcessor audioProcessor : audioProcessors) {
+ if (audioProcessor.isActive()) {
+ newAudioProcessors.add(audioProcessor);
+ } else {
+ audioProcessor.flush();
+ }
+ }
+ int count = newAudioProcessors.size();
+ activeAudioProcessors = newAudioProcessors.toArray(new AudioProcessor[count]);
+ outputBuffers = new ByteBuffer[count];
+ flushAudioProcessors();
+ }
+
+ private void flushAudioProcessors() {
+ for (int i = 0; i < activeAudioProcessors.length; i++) {
+ AudioProcessor audioProcessor = activeAudioProcessors[i];
+ audioProcessor.flush();
+ outputBuffers[i] = audioProcessor.getOutput();
+ }
+ }
+
+ private void initialize(long presentationTimeUs) throws InitializationException {
+ // If we're asynchronously releasing a previous audio track then we block until it has been
+ // released. This guarantees that we cannot end up in a state where we have multiple audio
+ // track instances. Without this guarantee it would be possible, in extreme cases, to exhaust
+ // the shared memory that's available for audio track buffers. This would in turn cause the
+ // initialization of the audio track to fail.
+ releasingConditionVariable.block();
+
+ audioTrack =
+ Assertions.checkNotNull(configuration)
+ .buildAudioTrack(tunneling, audioAttributes, audioSessionId);
+ int audioSessionId = audioTrack.getAudioSessionId();
+ if (enablePreV21AudioSessionWorkaround) {
+ if (Util.SDK_INT < 21) {
+ // The workaround creates an audio track with a two byte buffer on the same session, and
+ // does not release it until this object is released, which keeps the session active.
+ if (keepSessionIdAudioTrack != null
+ && audioSessionId != keepSessionIdAudioTrack.getAudioSessionId()) {
+ releaseKeepSessionIdAudioTrack();
+ }
+ if (keepSessionIdAudioTrack == null) {
+ keepSessionIdAudioTrack = initializeKeepSessionIdAudioTrack(audioSessionId);
+ }
+ }
+ }
+ if (this.audioSessionId != audioSessionId) {
+ this.audioSessionId = audioSessionId;
+ if (listener != null) {
+ listener.onAudioSessionId(audioSessionId);
+ }
+ }
+
+ applyPlaybackParameters(playbackParameters, presentationTimeUs);
+
+ audioTrackPositionTracker.setAudioTrack(
+ audioTrack,
+ configuration.outputEncoding,
+ configuration.outputPcmFrameSize,
+ configuration.bufferSize);
+ setVolumeInternal();
+
+ if (auxEffectInfo.effectId != AuxEffectInfo.NO_AUX_EFFECT_ID) {
+ audioTrack.attachAuxEffect(auxEffectInfo.effectId);
+ audioTrack.setAuxEffectSendLevel(auxEffectInfo.sendLevel);
+ }
+ }
+
+ @Override
+ public void play() {
+ playing = true;
+ if (isInitialized()) {
+ audioTrackPositionTracker.start();
+ audioTrack.play();
+ }
+ }
+
+ @Override
+ public void handleDiscontinuity() {
+ // Force resynchronization after a skipped buffer.
+ if (startMediaTimeState == START_IN_SYNC) {
+ startMediaTimeState = START_NEED_SYNC;
+ }
+ }
+
+ @Override
+ @SuppressWarnings("ReferenceEquality")
+ public boolean handleBuffer(ByteBuffer buffer, long presentationTimeUs)
+ throws InitializationException, WriteException {
+ Assertions.checkArgument(inputBuffer == null || buffer == inputBuffer);
+
+ if (pendingConfiguration != null) {
+ if (!drainAudioProcessorsToEndOfStream()) {
+ // There's still pending data in audio processors to write to the track.
+ return false;
+ } else if (!pendingConfiguration.canReuseAudioTrack(configuration)) {
+ playPendingData();
+ if (hasPendingData()) {
+ // We're waiting for playout on the current audio track to finish.
+ return false;
+ }
+ flush();
+ } else {
+ // The current audio track can be reused for the new configuration.
+ configuration = pendingConfiguration;
+ pendingConfiguration = null;
+ }
+ // Re-apply playback parameters.
+ applyPlaybackParameters(playbackParameters, presentationTimeUs);
+ }
+
+ if (!isInitialized()) {
+ initialize(presentationTimeUs);
+ if (playing) {
+ play();
+ }
+ }
+
+ if (!audioTrackPositionTracker.mayHandleBuffer(getWrittenFrames())) {
+ return false;
+ }
+
+ if (inputBuffer == null) {
+ // We are seeing this buffer for the first time.
+ if (!buffer.hasRemaining()) {
+ // The buffer is empty.
+ return true;
+ }
+
+ if (!configuration.isInputPcm && framesPerEncodedSample == 0) {
+ // If this is the first encoded sample, calculate the sample size in frames.
+ framesPerEncodedSample = getFramesPerEncodedSample(configuration.outputEncoding, buffer);
+ if (framesPerEncodedSample == 0) {
+ // We still don't know the number of frames per sample, so drop the buffer.
+ // For TrueHD this can occur after some seek operations, as not every sample starts with
+ // a syncframe header. If we chunked samples together so the extracted samples always
+ // started with a syncframe header, the chunks would be too large.
+ return true;
+ }
+ }
+
+ if (afterDrainPlaybackParameters != null) {
+ if (!drainAudioProcessorsToEndOfStream()) {
+ // Don't process any more input until draining completes.
+ return false;
+ }
+ PlaybackParameters newPlaybackParameters = afterDrainPlaybackParameters;
+ afterDrainPlaybackParameters = null;
+ applyPlaybackParameters(newPlaybackParameters, presentationTimeUs);
+ }
+
+ if (startMediaTimeState == START_NOT_SET) {
+ startMediaTimeUs = Math.max(0, presentationTimeUs);
+ startMediaTimeState = START_IN_SYNC;
+ } else {
+ // Sanity check that presentationTimeUs is consistent with the expected value.
+ long expectedPresentationTimeUs =
+ startMediaTimeUs
+ + configuration.inputFramesToDurationUs(
+ getSubmittedFrames() - trimmingAudioProcessor.getTrimmedFrameCount());
+ if (startMediaTimeState == START_IN_SYNC
+ && Math.abs(expectedPresentationTimeUs - presentationTimeUs) > 200000) {
+ Log.e(TAG, "Discontinuity detected [expected " + expectedPresentationTimeUs + ", got "
+ + presentationTimeUs + "]");
+ startMediaTimeState = START_NEED_SYNC;
+ }
+ if (startMediaTimeState == START_NEED_SYNC) {
+ // Adjust startMediaTimeUs to be consistent with the current buffer's start time and the
+ // number of bytes submitted.
+ long adjustmentUs = presentationTimeUs - expectedPresentationTimeUs;
+ startMediaTimeUs += adjustmentUs;
+ startMediaTimeState = START_IN_SYNC;
+ if (listener != null && adjustmentUs != 0) {
+ listener.onPositionDiscontinuity();
+ }
+ }
+ }
+
+ if (configuration.isInputPcm) {
+ submittedPcmBytes += buffer.remaining();
+ } else {
+ submittedEncodedFrames += framesPerEncodedSample;
+ }
+
+ inputBuffer = buffer;
+ }
+
+ if (configuration.processingEnabled) {
+ processBuffers(presentationTimeUs);
+ } else {
+ writeBuffer(inputBuffer, presentationTimeUs);
+ }
+
+ if (!inputBuffer.hasRemaining()) {
+ inputBuffer = null;
+ return true;
+ }
+
+ if (audioTrackPositionTracker.isStalled(getWrittenFrames())) {
+ Log.w(TAG, "Resetting stalled audio track");
+ flush();
+ return true;
+ }
+
+ return false;
+ }
+
+ private void processBuffers(long avSyncPresentationTimeUs) throws WriteException {
+ int count = activeAudioProcessors.length;
+ int index = count;
+ while (index >= 0) {
+ ByteBuffer input = index > 0 ? outputBuffers[index - 1]
+ : (inputBuffer != null ? inputBuffer : AudioProcessor.EMPTY_BUFFER);
+ if (index == count) {
+ writeBuffer(input, avSyncPresentationTimeUs);
+ } else {
+ AudioProcessor audioProcessor = activeAudioProcessors[index];
+ audioProcessor.queueInput(input);
+ ByteBuffer output = audioProcessor.getOutput();
+ outputBuffers[index] = output;
+ if (output.hasRemaining()) {
+ // Handle the output as input to the next audio processor or the AudioTrack.
+ index++;
+ continue;
+ }
+ }
+
+ if (input.hasRemaining()) {
+ // The input wasn't consumed and no output was produced, so give up for now.
+ return;
+ }
+
+ // Get more input from upstream.
+ index--;
+ }
+ }
+
+ @SuppressWarnings("ReferenceEquality")
+ private void writeBuffer(ByteBuffer buffer, long avSyncPresentationTimeUs) throws WriteException {
+ if (!buffer.hasRemaining()) {
+ return;
+ }
+ if (outputBuffer != null) {
+ Assertions.checkArgument(outputBuffer == buffer);
+ } else {
+ outputBuffer = buffer;
+ if (Util.SDK_INT < 21) {
+ int bytesRemaining = buffer.remaining();
+ if (preV21OutputBuffer == null || preV21OutputBuffer.length < bytesRemaining) {
+ preV21OutputBuffer = new byte[bytesRemaining];
+ }
+ int originalPosition = buffer.position();
+ buffer.get(preV21OutputBuffer, 0, bytesRemaining);
+ buffer.position(originalPosition);
+ preV21OutputBufferOffset = 0;
+ }
+ }
+ int bytesRemaining = buffer.remaining();
+ int bytesWritten = 0;
+ if (Util.SDK_INT < 21) { // isInputPcm == true
+ // Work out how many bytes we can write without the risk of blocking.
+ int bytesToWrite = audioTrackPositionTracker.getAvailableBufferSize(writtenPcmBytes);
+ if (bytesToWrite > 0) {
+ bytesToWrite = Math.min(bytesRemaining, bytesToWrite);
+ bytesWritten = audioTrack.write(preV21OutputBuffer, preV21OutputBufferOffset, bytesToWrite);
+ if (bytesWritten > 0) {
+ preV21OutputBufferOffset += bytesWritten;
+ buffer.position(buffer.position() + bytesWritten);
+ }
+ }
+ } else if (tunneling) {
+ Assertions.checkState(avSyncPresentationTimeUs != C.TIME_UNSET);
+ bytesWritten = writeNonBlockingWithAvSyncV21(audioTrack, buffer, bytesRemaining,
+ avSyncPresentationTimeUs);
+ } else {
+ bytesWritten = writeNonBlockingV21(audioTrack, buffer, bytesRemaining);
+ }
+
+ lastFeedElapsedRealtimeMs = SystemClock.elapsedRealtime();
+
+ if (bytesWritten < 0) {
+ throw new WriteException(bytesWritten);
+ }
+
+ if (configuration.isInputPcm) {
+ writtenPcmBytes += bytesWritten;
+ }
+ if (bytesWritten == bytesRemaining) {
+ if (!configuration.isInputPcm) {
+ writtenEncodedFrames += framesPerEncodedSample;
+ }
+ outputBuffer = null;
+ }
+ }
+
+ @Override
+ public void playToEndOfStream() throws WriteException {
+ if (!handledEndOfStream && isInitialized() && drainAudioProcessorsToEndOfStream()) {
+ playPendingData();
+ handledEndOfStream = true;
+ }
+ }
+
+ private boolean drainAudioProcessorsToEndOfStream() throws WriteException {
+ boolean audioProcessorNeedsEndOfStream = false;
+ if (drainingAudioProcessorIndex == C.INDEX_UNSET) {
+ drainingAudioProcessorIndex =
+ configuration.processingEnabled ? 0 : activeAudioProcessors.length;
+ audioProcessorNeedsEndOfStream = true;
+ }
+ while (drainingAudioProcessorIndex < activeAudioProcessors.length) {
+ AudioProcessor audioProcessor = activeAudioProcessors[drainingAudioProcessorIndex];
+ if (audioProcessorNeedsEndOfStream) {
+ audioProcessor.queueEndOfStream();
+ }
+ processBuffers(C.TIME_UNSET);
+ if (!audioProcessor.isEnded()) {
+ return false;
+ }
+ audioProcessorNeedsEndOfStream = true;
+ drainingAudioProcessorIndex++;
+ }
+
+ // Finish writing any remaining output to the track.
+ if (outputBuffer != null) {
+ writeBuffer(outputBuffer, C.TIME_UNSET);
+ if (outputBuffer != null) {
+ return false;
+ }
+ }
+ drainingAudioProcessorIndex = C.INDEX_UNSET;
+ return true;
+ }
+
+ @Override
+ public boolean isEnded() {
+ return !isInitialized() || (handledEndOfStream && !hasPendingData());
+ }
+
+ @Override
+ public boolean hasPendingData() {
+ return isInitialized() && audioTrackPositionTracker.hasPendingData(getWrittenFrames());
+ }
+
+ @Override
+ public void setPlaybackParameters(PlaybackParameters playbackParameters) {
+ if (configuration != null && !configuration.canApplyPlaybackParameters) {
+ this.playbackParameters = PlaybackParameters.DEFAULT;
+ return;
+ }
+ PlaybackParameters lastSetPlaybackParameters = getPlaybackParameters();
+ if (!playbackParameters.equals(lastSetPlaybackParameters)) {
+ if (isInitialized()) {
+ // Drain the audio processors so we can determine the frame position at which the new
+ // parameters apply.
+ afterDrainPlaybackParameters = playbackParameters;
+ } else {
+ // Update the playback parameters now. They will be applied to the audio processors during
+ // initialization.
+ this.playbackParameters = playbackParameters;
+ }
+ }
+ }
+
+ @Override
+ public PlaybackParameters getPlaybackParameters() {
+ // Mask the already set parameters.
+ return afterDrainPlaybackParameters != null
+ ? afterDrainPlaybackParameters
+ : !playbackParametersCheckpoints.isEmpty()
+ ? playbackParametersCheckpoints.getLast().playbackParameters
+ : playbackParameters;
+ }
+
+ @Override
+ public void setAudioAttributes(AudioAttributes audioAttributes) {
+ if (this.audioAttributes.equals(audioAttributes)) {
+ return;
+ }
+ this.audioAttributes = audioAttributes;
+ if (tunneling) {
+ // The audio attributes are ignored in tunneling mode, so no need to reset.
+ return;
+ }
+ flush();
+ audioSessionId = C.AUDIO_SESSION_ID_UNSET;
+ }
+
+ @Override
+ public void setAudioSessionId(int audioSessionId) {
+ if (this.audioSessionId != audioSessionId) {
+ this.audioSessionId = audioSessionId;
+ flush();
+ }
+ }
+
+ @Override
+ public void setAuxEffectInfo(AuxEffectInfo auxEffectInfo) {
+ if (this.auxEffectInfo.equals(auxEffectInfo)) {
+ return;
+ }
+ int effectId = auxEffectInfo.effectId;
+ float sendLevel = auxEffectInfo.sendLevel;
+ if (audioTrack != null) {
+ if (this.auxEffectInfo.effectId != effectId) {
+ audioTrack.attachAuxEffect(effectId);
+ }
+ if (effectId != AuxEffectInfo.NO_AUX_EFFECT_ID) {
+ audioTrack.setAuxEffectSendLevel(sendLevel);
+ }
+ }
+ this.auxEffectInfo = auxEffectInfo;
+ }
+
+ @Override
+ public void enableTunnelingV21(int tunnelingAudioSessionId) {
+ Assertions.checkState(Util.SDK_INT >= 21);
+ if (!tunneling || audioSessionId != tunnelingAudioSessionId) {
+ tunneling = true;
+ audioSessionId = tunnelingAudioSessionId;
+ flush();
+ }
+ }
+
+ @Override
+ public void disableTunneling() {
+ if (tunneling) {
+ tunneling = false;
+ audioSessionId = C.AUDIO_SESSION_ID_UNSET;
+ flush();
+ }
+ }
+
+ @Override
+ public void setVolume(float volume) {
+ if (this.volume != volume) {
+ this.volume = volume;
+ setVolumeInternal();
+ }
+ }
+
+ private void setVolumeInternal() {
+ if (!isInitialized()) {
+ // Do nothing.
+ } else if (Util.SDK_INT >= 21) {
+ setVolumeInternalV21(audioTrack, volume);
+ } else {
+ setVolumeInternalV3(audioTrack, volume);
+ }
+ }
+
+ @Override
+ public void pause() {
+ playing = false;
+ if (isInitialized() && audioTrackPositionTracker.pause()) {
+ audioTrack.pause();
+ }
+ }
+
+ @Override
+ public void flush() {
+ if (isInitialized()) {
+ submittedPcmBytes = 0;
+ submittedEncodedFrames = 0;
+ writtenPcmBytes = 0;
+ writtenEncodedFrames = 0;
+ framesPerEncodedSample = 0;
+ if (afterDrainPlaybackParameters != null) {
+ playbackParameters = afterDrainPlaybackParameters;
+ afterDrainPlaybackParameters = null;
+ } else if (!playbackParametersCheckpoints.isEmpty()) {
+ playbackParameters = playbackParametersCheckpoints.getLast().playbackParameters;
+ }
+ playbackParametersCheckpoints.clear();
+ playbackParametersOffsetUs = 0;
+ playbackParametersPositionUs = 0;
+ trimmingAudioProcessor.resetTrimmedFrameCount();
+ flushAudioProcessors();
+ inputBuffer = null;
+ outputBuffer = null;
+ stoppedAudioTrack = false;
+ handledEndOfStream = false;
+ drainingAudioProcessorIndex = C.INDEX_UNSET;
+ avSyncHeader = null;
+ bytesUntilNextAvSync = 0;
+ startMediaTimeState = START_NOT_SET;
+ if (audioTrackPositionTracker.isPlaying()) {
+ audioTrack.pause();
+ }
+ // AudioTrack.release can take some time, so we call it on a background thread.
+ final AudioTrack toRelease = audioTrack;
+ audioTrack = null;
+ if (pendingConfiguration != null) {
+ configuration = pendingConfiguration;
+ pendingConfiguration = null;
+ }
+ audioTrackPositionTracker.reset();
+ releasingConditionVariable.close();
+ new Thread() {
+ @Override
+ public void run() {
+ try {
+ toRelease.flush();
+ toRelease.release();
+ } finally {
+ releasingConditionVariable.open();
+ }
+ }
+ }.start();
+ }
+ }
+
+ @Override
+ public void reset() {
+ flush();
+ releaseKeepSessionIdAudioTrack();
+ for (AudioProcessor audioProcessor : toIntPcmAvailableAudioProcessors) {
+ audioProcessor.reset();
+ }
+ for (AudioProcessor audioProcessor : toFloatPcmAvailableAudioProcessors) {
+ audioProcessor.reset();
+ }
+ audioSessionId = C.AUDIO_SESSION_ID_UNSET;
+ playing = false;
+ }
+
+ /**
+ * Releases {@link #keepSessionIdAudioTrack} asynchronously, if it is non-{@code null}.
+ */
+ private void releaseKeepSessionIdAudioTrack() {
+ if (keepSessionIdAudioTrack == null) {
+ return;
+ }
+
+ // AudioTrack.release can take some time, so we call it on a background thread.
+ final AudioTrack toRelease = keepSessionIdAudioTrack;
+ keepSessionIdAudioTrack = null;
+ new Thread() {
+ @Override
+ public void run() {
+ toRelease.release();
+ }
+ }.start();
+ }
+
+ private void applyPlaybackParameters(
+ PlaybackParameters playbackParameters, long presentationTimeUs) {
+ PlaybackParameters newPlaybackParameters =
+ configuration.canApplyPlaybackParameters
+ ? audioProcessorChain.applyPlaybackParameters(playbackParameters)
+ : PlaybackParameters.DEFAULT;
+ // Store the position and corresponding media time from which the parameters will apply.
+ playbackParametersCheckpoints.add(
+ new PlaybackParametersCheckpoint(
+ newPlaybackParameters,
+ /* mediaTimeUs= */ Math.max(0, presentationTimeUs),
+ /* positionUs= */ configuration.framesToDurationUs(getWrittenFrames())));
+ setupAudioProcessors();
+ }
+
+ private long applySpeedup(long positionUs) {
+ @Nullable PlaybackParametersCheckpoint checkpoint = null;
+ while (!playbackParametersCheckpoints.isEmpty()
+ && positionUs >= playbackParametersCheckpoints.getFirst().positionUs) {
+ checkpoint = playbackParametersCheckpoints.remove();
+ }
+ if (checkpoint != null) {
+ // We are playing (or about to play) media with the new playback parameters, so update them.
+ playbackParameters = checkpoint.playbackParameters;
+ playbackParametersPositionUs = checkpoint.positionUs;
+ playbackParametersOffsetUs = checkpoint.mediaTimeUs - startMediaTimeUs;
+ }
+
+ if (playbackParameters.speed == 1f) {
+ return positionUs + playbackParametersOffsetUs - playbackParametersPositionUs;
+ }
+
+ if (playbackParametersCheckpoints.isEmpty()) {
+ return playbackParametersOffsetUs
+ + audioProcessorChain.getMediaDuration(positionUs - playbackParametersPositionUs);
+ }
+
+ // We are playing data at a previous playback speed, so fall back to multiplying by the speed.
+ return playbackParametersOffsetUs
+ + Util.getMediaDurationForPlayoutDuration(
+ positionUs - playbackParametersPositionUs, playbackParameters.speed);
+ }
+
+ private long applySkipping(long positionUs) {
+ return positionUs
+ + configuration.framesToDurationUs(audioProcessorChain.getSkippedOutputFrameCount());
+ }
+
+ private boolean isInitialized() {
+ return audioTrack != null;
+ }
+
+ private long getSubmittedFrames() {
+ return configuration.isInputPcm
+ ? (submittedPcmBytes / configuration.inputPcmFrameSize)
+ : submittedEncodedFrames;
+ }
+
+ private long getWrittenFrames() {
+ return configuration.isInputPcm
+ ? (writtenPcmBytes / configuration.outputPcmFrameSize)
+ : writtenEncodedFrames;
+ }
+
+ private static AudioTrack initializeKeepSessionIdAudioTrack(int audioSessionId) {
+ int sampleRate = 4000; // Equal to private AudioTrack.MIN_SAMPLE_RATE.
+ int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
+ @C.PcmEncoding int encoding = C.ENCODING_PCM_16BIT;
+ int bufferSize = 2; // Use a two byte buffer, as it is not actually used for playback.
+ return new AudioTrack(C.STREAM_TYPE_DEFAULT, sampleRate, channelConfig, encoding, bufferSize,
+ MODE_STATIC, audioSessionId);
+ }
+
+ private static int getChannelConfig(int channelCount, boolean isInputPcm) {
+ if (Util.SDK_INT <= 28 && !isInputPcm) {
+ // In passthrough mode the channel count used to configure the audio track doesn't affect how
+ // the stream is handled, except that some devices do overly-strict channel configuration
+ // checks. Therefore we override the channel count so that a known-working channel
+ // configuration is chosen in all cases. See [Internal: b/29116190].
+ if (channelCount == 7) {
+ channelCount = 8;
+ } else if (channelCount == 3 || channelCount == 4 || channelCount == 5) {
+ channelCount = 6;
+ }
+ }
+
+ // Workaround for Nexus Player not reporting support for mono passthrough.
+ // (See [Internal: b/34268671].)
+ if (Util.SDK_INT <= 26 && "fugu".equals(Util.DEVICE) && !isInputPcm && channelCount == 1) {
+ channelCount = 2;
+ }
+
+ return Util.getAudioTrackChannelConfig(channelCount);
+ }
+
+ private static int getMaximumEncodedRateBytesPerSecond(@C.Encoding int encoding) {
+ switch (encoding) {
+ case C.ENCODING_AC3:
+ return 640 * 1000 / 8;
+ case C.ENCODING_E_AC3:
+ case C.ENCODING_E_AC3_JOC:
+ return 6144 * 1000 / 8;
+ case C.ENCODING_AC4:
+ return 2688 * 1000 / 8;
+ case C.ENCODING_DTS:
+ // DTS allows an 'open' bitrate, but we assume the maximum listed value: 1536 kbit/s.
+ return 1536 * 1000 / 8;
+ case C.ENCODING_DTS_HD:
+ return 18000 * 1000 / 8;
+ case C.ENCODING_DOLBY_TRUEHD:
+ return 24500 * 1000 / 8;
+ case C.ENCODING_INVALID:
+ case C.ENCODING_PCM_16BIT:
+ case C.ENCODING_PCM_24BIT:
+ case C.ENCODING_PCM_32BIT:
+ case C.ENCODING_PCM_8BIT:
+ case C.ENCODING_PCM_FLOAT:
+ case Format.NO_VALUE:
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
+
+ private static int getFramesPerEncodedSample(@C.Encoding int encoding, ByteBuffer buffer) {
+ switch (encoding) {
+ case C.ENCODING_MP3:
+ return MpegAudioHeader.getFrameSampleCount(buffer.get(buffer.position()));
+ case C.ENCODING_DTS:
+ case C.ENCODING_DTS_HD:
+ return DtsUtil.parseDtsAudioSampleCount(buffer);
+ case C.ENCODING_AC3:
+ case C.ENCODING_E_AC3:
+ case C.ENCODING_E_AC3_JOC:
+ return Ac3Util.parseAc3SyncframeAudioSampleCount(buffer);
+ case C.ENCODING_AC4:
+ return Ac4Util.parseAc4SyncframeAudioSampleCount(buffer);
+ case C.ENCODING_DOLBY_TRUEHD:
+ int syncframeOffset = Ac3Util.findTrueHdSyncframeOffset(buffer);
+ return syncframeOffset == C.INDEX_UNSET
+ ? 0
+ : (Ac3Util.parseTrueHdSyncframeAudioSampleCount(buffer, syncframeOffset)
+ * Ac3Util.TRUEHD_RECHUNK_SAMPLE_COUNT);
+ default:
+ throw new IllegalStateException("Unexpected audio encoding: " + encoding);
+ }
+ }
+
+ @TargetApi(21)
+ private static int writeNonBlockingV21(AudioTrack audioTrack, ByteBuffer buffer, int size) {
+ return audioTrack.write(buffer, size, WRITE_NON_BLOCKING);
+ }
+
+ @TargetApi(21)
+ private int writeNonBlockingWithAvSyncV21(AudioTrack audioTrack, ByteBuffer buffer, int size,
+ long presentationTimeUs) {
+ if (Util.SDK_INT >= 26) {
+ // The underlying platform AudioTrack writes AV sync headers directly.
+ return audioTrack.write(buffer, size, WRITE_NON_BLOCKING, presentationTimeUs * 1000);
+ }
+ if (avSyncHeader == null) {
+ avSyncHeader = ByteBuffer.allocate(16);
+ avSyncHeader.order(ByteOrder.BIG_ENDIAN);
+ avSyncHeader.putInt(0x55550001);
+ }
+ if (bytesUntilNextAvSync == 0) {
+ avSyncHeader.putInt(4, size);
+ avSyncHeader.putLong(8, presentationTimeUs * 1000);
+ avSyncHeader.position(0);
+ bytesUntilNextAvSync = size;
+ }
+ int avSyncHeaderBytesRemaining = avSyncHeader.remaining();
+ if (avSyncHeaderBytesRemaining > 0) {
+ int result = audioTrack.write(avSyncHeader, avSyncHeaderBytesRemaining, WRITE_NON_BLOCKING);
+ if (result < 0) {
+ bytesUntilNextAvSync = 0;
+ return result;
+ }
+ if (result < avSyncHeaderBytesRemaining) {
+ return 0;
+ }
+ }
+ int result = writeNonBlockingV21(audioTrack, buffer, size);
+ if (result < 0) {
+ bytesUntilNextAvSync = 0;
+ return result;
+ }
+ bytesUntilNextAvSync -= result;
+ return result;
+ }
+
+ @TargetApi(21)
+ private static void setVolumeInternalV21(AudioTrack audioTrack, float volume) {
+ audioTrack.setVolume(volume);
+ }
+
+ private static void setVolumeInternalV3(AudioTrack audioTrack, float volume) {
+ audioTrack.setStereoVolume(volume, volume);
+ }
+
+ private void playPendingData() {
+ if (!stoppedAudioTrack) {
+ stoppedAudioTrack = true;
+ audioTrackPositionTracker.handleEndOfStream(getWrittenFrames());
+ audioTrack.stop();
+ bytesUntilNextAvSync = 0;
+ }
+ }
+
+ /** Stores playback parameters with the position and media time at which they apply. */
+ private static final class PlaybackParametersCheckpoint {
+
+ private final PlaybackParameters playbackParameters;
+ private final long mediaTimeUs;
+ private final long positionUs;
+
+ private PlaybackParametersCheckpoint(PlaybackParameters playbackParameters, long mediaTimeUs,
+ long positionUs) {
+ this.playbackParameters = playbackParameters;
+ this.mediaTimeUs = mediaTimeUs;
+ this.positionUs = positionUs;
+ }
+
+ }
+
+ private final class PositionTrackerListener implements AudioTrackPositionTracker.Listener {
+
+ @Override
+ public void onPositionFramesMismatch(
+ long audioTimestampPositionFrames,
+ long audioTimestampSystemTimeUs,
+ long systemTimeUs,
+ long playbackPositionUs) {
+ String message =
+ "Spurious audio timestamp (frame position mismatch): "
+ + audioTimestampPositionFrames
+ + ", "
+ + audioTimestampSystemTimeUs
+ + ", "
+ + systemTimeUs
+ + ", "
+ + playbackPositionUs
+ + ", "
+ + getSubmittedFrames()
+ + ", "
+ + getWrittenFrames();
+ if (failOnSpuriousAudioTimestamp) {
+ throw new InvalidAudioTrackTimestampException(message);
+ }
+ Log.w(TAG, message);
+ }
+
+ @Override
+ public void onSystemTimeUsMismatch(
+ long audioTimestampPositionFrames,
+ long audioTimestampSystemTimeUs,
+ long systemTimeUs,
+ long playbackPositionUs) {
+ String message =
+ "Spurious audio timestamp (system clock mismatch): "
+ + audioTimestampPositionFrames
+ + ", "
+ + audioTimestampSystemTimeUs
+ + ", "
+ + systemTimeUs
+ + ", "
+ + playbackPositionUs
+ + ", "
+ + getSubmittedFrames()
+ + ", "
+ + getWrittenFrames();
+ if (failOnSpuriousAudioTimestamp) {
+ throw new InvalidAudioTrackTimestampException(message);
+ }
+ Log.w(TAG, message);
+ }
+
+ @Override
+ public void onInvalidLatency(long latencyUs) {
+ Log.w(TAG, "Ignoring impossibly large audio latency: " + latencyUs);
+ }
+
+ @Override
+ public void onUnderrun(int bufferSize, long bufferSizeMs) {
+ if (listener != null) {
+ long elapsedSinceLastFeedMs = SystemClock.elapsedRealtime() - lastFeedElapsedRealtimeMs;
+ listener.onUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
+ }
+ }
+ }
+
+ /** Stores configuration relating to the audio format. */
+ private static final class Configuration {
+
+ public final boolean isInputPcm;
+ public final int inputPcmFrameSize;
+ public final int inputSampleRate;
+ public final int outputPcmFrameSize;
+ public final int outputSampleRate;
+ public final int outputChannelConfig;
+ @C.Encoding public final int outputEncoding;
+ public final int bufferSize;
+ public final boolean processingEnabled;
+ public final boolean canApplyPlaybackParameters;
+ public final AudioProcessor[] availableAudioProcessors;
+
+ public Configuration(
+ boolean isInputPcm,
+ int inputPcmFrameSize,
+ int inputSampleRate,
+ int outputPcmFrameSize,
+ int outputSampleRate,
+ int outputChannelConfig,
+ int outputEncoding,
+ int specifiedBufferSize,
+ boolean processingEnabled,
+ boolean canApplyPlaybackParameters,
+ AudioProcessor[] availableAudioProcessors) {
+ this.isInputPcm = isInputPcm;
+ this.inputPcmFrameSize = inputPcmFrameSize;
+ this.inputSampleRate = inputSampleRate;
+ this.outputPcmFrameSize = outputPcmFrameSize;
+ this.outputSampleRate = outputSampleRate;
+ this.outputChannelConfig = outputChannelConfig;
+ this.outputEncoding = outputEncoding;
+ this.bufferSize = specifiedBufferSize != 0 ? specifiedBufferSize : getDefaultBufferSize();
+ this.processingEnabled = processingEnabled;
+ this.canApplyPlaybackParameters = canApplyPlaybackParameters;
+ this.availableAudioProcessors = availableAudioProcessors;
+ }
+
+ public boolean canReuseAudioTrack(Configuration audioTrackConfiguration) {
+ return audioTrackConfiguration.outputEncoding == outputEncoding
+ && audioTrackConfiguration.outputSampleRate == outputSampleRate
+ && audioTrackConfiguration.outputChannelConfig == outputChannelConfig;
+ }
+
+ public long inputFramesToDurationUs(long frameCount) {
+ return (frameCount * C.MICROS_PER_SECOND) / inputSampleRate;
+ }
+
+ public long framesToDurationUs(long frameCount) {
+ return (frameCount * C.MICROS_PER_SECOND) / outputSampleRate;
+ }
+
+ public long durationUsToFrames(long durationUs) {
+ return (durationUs * outputSampleRate) / C.MICROS_PER_SECOND;
+ }
+
+ public AudioTrack buildAudioTrack(
+ boolean tunneling, AudioAttributes audioAttributes, int audioSessionId)
+ throws InitializationException {
+ AudioTrack audioTrack;
+ if (Util.SDK_INT >= 21) {
+ audioTrack = createAudioTrackV21(tunneling, audioAttributes, audioSessionId);
+ } else {
+ int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
+ if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) {
+ audioTrack =
+ new AudioTrack(
+ streamType,
+ outputSampleRate,
+ outputChannelConfig,
+ outputEncoding,
+ bufferSize,
+ MODE_STREAM);
+ } else {
+ // Re-attach to the same audio session.
+ audioTrack =
+ new AudioTrack(
+ streamType,
+ outputSampleRate,
+ outputChannelConfig,
+ outputEncoding,
+ bufferSize,
+ MODE_STREAM,
+ audioSessionId);
+ }
+ }
+
+ int state = audioTrack.getState();
+ if (state != STATE_INITIALIZED) {
+ try {
+ audioTrack.release();
+ } catch (Exception e) {
+ // The track has already failed to initialize, so it wouldn't be that surprising if
+ // release were to fail too. Swallow the exception.
+ }
+ throw new InitializationException(state, outputSampleRate, outputChannelConfig, bufferSize);
+ }
+ return audioTrack;
+ }
+
+ @TargetApi(21)
+ private AudioTrack createAudioTrackV21(
+ boolean tunneling, AudioAttributes audioAttributes, int audioSessionId) {
+ android.media.AudioAttributes attributes;
+ if (tunneling) {
+ attributes =
+ new android.media.AudioAttributes.Builder()
+ .setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
+ .setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
+ .setUsage(android.media.AudioAttributes.USAGE_MEDIA)
+ .build();
+ } else {
+ attributes = audioAttributes.getAudioAttributesV21();
+ }
+ AudioFormat format =
+ new AudioFormat.Builder()
+ .setChannelMask(outputChannelConfig)
+ .setEncoding(outputEncoding)
+ .setSampleRate(outputSampleRate)
+ .build();
+ return new AudioTrack(
+ attributes,
+ format,
+ bufferSize,
+ MODE_STREAM,
+ audioSessionId != C.AUDIO_SESSION_ID_UNSET
+ ? audioSessionId
+ : AudioManager.AUDIO_SESSION_ID_GENERATE);
+ }
+
+ private int getDefaultBufferSize() {
+ if (isInputPcm) {
+ int minBufferSize =
+ AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding);
+ Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
+ int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR;
+ int minAppBufferSize =
+ (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
+ int maxAppBufferSize =
+ (int)
+ Math.max(
+ minBufferSize, durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize);
+ return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize);
+ } else {
+ int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding);
+ if (outputEncoding == C.ENCODING_AC3) {
+ rate *= AC3_BUFFER_MULTIPLICATION_FACTOR;
+ }
+ return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND);
+ }
+ }
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DtsUtil.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DtsUtil.java
new file mode 100644
index 0000000000..6e5d749fdf
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/DtsUtil.java
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmInitData;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MimeTypes;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.ParsableBitArray;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Utility methods for parsing DTS frames.
+ */
+public final class DtsUtil {
+
+ private static final int SYNC_VALUE_BE = 0x7FFE8001;
+ private static final int SYNC_VALUE_14B_BE = 0x1FFFE800;
+ private static final int SYNC_VALUE_LE = 0xFE7F0180;
+ private static final int SYNC_VALUE_14B_LE = 0xFF1F00E8;
+ private static final byte FIRST_BYTE_BE = (byte) (SYNC_VALUE_BE >>> 24);
+ private static final byte FIRST_BYTE_14B_BE = (byte) (SYNC_VALUE_14B_BE >>> 24);
+ private static final byte FIRST_BYTE_LE = (byte) (SYNC_VALUE_LE >>> 24);
+ private static final byte FIRST_BYTE_14B_LE = (byte) (SYNC_VALUE_14B_LE >>> 24);
+
+ /**
+ * Maps AMODE to the number of channels. See ETSI TS 102 114 table 5.4.
+ */
+ private static final int[] CHANNELS_BY_AMODE = new int[] {1, 2, 2, 2, 2, 3, 3, 4, 4, 5, 6, 6, 6,
+ 7, 8, 8};
+
+ /**
+ * Maps SFREQ to the sampling frequency in Hz. See ETSI TS 102 144 table 5.5.
+ */
+ private static final int[] SAMPLE_RATE_BY_SFREQ = new int[] {-1, 8000, 16000, 32000, -1, -1,
+ 11025, 22050, 44100, -1, -1, 12000, 24000, 48000, -1, -1};
+
+ /**
+ * Maps RATE to 2 * bitrate in kbit/s. See ETSI TS 102 144 table 5.7.
+ */
+ private static final int[] TWICE_BITRATE_KBPS_BY_RATE = new int[] {64, 112, 128, 192, 224, 256,
+ 384, 448, 512, 640, 768, 896, 1024, 1152, 1280, 1536, 1920, 2048, 2304, 2560, 2688, 2816,
+ 2823, 2944, 3072, 3840, 4096, 6144, 7680};
+
+ /**
+ * Returns whether a given integer matches a DTS sync word. Synchronization and storage modes are
+ * defined in ETSI TS 102 114 V1.1.1 (2002-08), Section 5.3.
+ *
+ * @param word An integer.
+ * @return Whether a given integer matches a DTS sync word.
+ */
+ public static boolean isSyncWord(int word) {
+ return word == SYNC_VALUE_BE
+ || word == SYNC_VALUE_LE
+ || word == SYNC_VALUE_14B_BE
+ || word == SYNC_VALUE_14B_LE;
+ }
+
+ /**
+ * Returns the DTS format given {@code data} containing the DTS frame according to ETSI TS 102 114
+ * subsections 5.3/5.4.
+ *
+ * @param frame The DTS frame to parse.
+ * @param trackId The track identifier to set on the format.
+ * @param language The language to set on the format.
+ * @param drmInitData {@link DrmInitData} to be included in the format.
+ * @return The DTS format parsed from data in the header.
+ */
+ public static Format parseDtsFormat(
+ byte[] frame, String trackId, @Nullable String language, @Nullable DrmInitData drmInitData) {
+ ParsableBitArray frameBits = getNormalizedFrameHeader(frame);
+ frameBits.skipBits(32 + 1 + 5 + 1 + 7 + 14); // SYNC, FTYPE, SHORT, CPF, NBLKS, FSIZE
+ int amode = frameBits.readBits(6);
+ int channelCount = CHANNELS_BY_AMODE[amode];
+ int sfreq = frameBits.readBits(4);
+ int sampleRate = SAMPLE_RATE_BY_SFREQ[sfreq];
+ int rate = frameBits.readBits(5);
+ int bitrate = rate >= TWICE_BITRATE_KBPS_BY_RATE.length ? Format.NO_VALUE
+ : TWICE_BITRATE_KBPS_BY_RATE[rate] * 1000 / 2;
+ frameBits.skipBits(10); // MIX, DYNF, TIMEF, AUXF, HDCD, EXT_AUDIO_ID, EXT_AUDIO, ASPF
+ channelCount += frameBits.readBits(2) > 0 ? 1 : 0; // LFF
+ return Format.createAudioSampleFormat(trackId, MimeTypes.AUDIO_DTS, null, bitrate,
+ Format.NO_VALUE, channelCount, sampleRate, null, drmInitData, 0, language);
+ }
+
+ /**
+ * Returns the number of audio samples represented by the given DTS frame.
+ *
+ * @param data The frame to parse.
+ * @return The number of audio samples represented by the frame.
+ */
+ public static int parseDtsAudioSampleCount(byte[] data) {
+ int nblks;
+ switch (data[0]) {
+ case FIRST_BYTE_LE:
+ nblks = ((data[5] & 0x01) << 6) | ((data[4] & 0xFC) >> 2);
+ break;
+ case FIRST_BYTE_14B_LE:
+ nblks = ((data[4] & 0x07) << 4) | ((data[7] & 0x3C) >> 2);
+ break;
+ case FIRST_BYTE_14B_BE:
+ nblks = ((data[5] & 0x07) << 4) | ((data[6] & 0x3C) >> 2);
+ break;
+ default:
+ // We blindly assume FIRST_BYTE_BE if none of the others match.
+ nblks = ((data[4] & 0x01) << 6) | ((data[5] & 0xFC) >> 2);
+ }
+ return (nblks + 1) * 32;
+ }
+
+ /**
+ * Like {@link #parseDtsAudioSampleCount(byte[])} but reads from a {@link ByteBuffer}. The
+ * buffer's position is not modified.
+ *
+ * @param buffer The {@link ByteBuffer} from which to read.
+ * @return The number of audio samples represented by the syncframe.
+ */
+ public static int parseDtsAudioSampleCount(ByteBuffer buffer) {
+ // See ETSI TS 102 114 subsection 5.4.1.
+ int position = buffer.position();
+ int nblks;
+ switch (buffer.get(position)) {
+ case FIRST_BYTE_LE:
+ nblks = ((buffer.get(position + 5) & 0x01) << 6) | ((buffer.get(position + 4) & 0xFC) >> 2);
+ break;
+ case FIRST_BYTE_14B_LE:
+ nblks = ((buffer.get(position + 4) & 0x07) << 4) | ((buffer.get(position + 7) & 0x3C) >> 2);
+ break;
+ case FIRST_BYTE_14B_BE:
+ nblks = ((buffer.get(position + 5) & 0x07) << 4) | ((buffer.get(position + 6) & 0x3C) >> 2);
+ break;
+ default:
+ // We blindly assume FIRST_BYTE_BE if none of the others match.
+ nblks = ((buffer.get(position + 4) & 0x01) << 6) | ((buffer.get(position + 5) & 0xFC) >> 2);
+ }
+ return (nblks + 1) * 32;
+ }
+
+ /**
+ * Returns the size in bytes of the given DTS frame.
+ *
+ * @param data The frame to parse.
+ * @return The frame's size in bytes.
+ */
+ public static int getDtsFrameSize(byte[] data) {
+ int fsize;
+ boolean uses14BitPerWord = false;
+ switch (data[0]) {
+ case FIRST_BYTE_14B_BE:
+ fsize = (((data[6] & 0x03) << 12) | ((data[7] & 0xFF) << 4) | ((data[8] & 0x3C) >> 2)) + 1;
+ uses14BitPerWord = true;
+ break;
+ case FIRST_BYTE_LE:
+ fsize = (((data[4] & 0x03) << 12) | ((data[7] & 0xFF) << 4) | ((data[6] & 0xF0) >> 4)) + 1;
+ break;
+ case FIRST_BYTE_14B_LE:
+ fsize = (((data[7] & 0x03) << 12) | ((data[6] & 0xFF) << 4) | ((data[9] & 0x3C) >> 2)) + 1;
+ uses14BitPerWord = true;
+ break;
+ default:
+ // We blindly assume FIRST_BYTE_BE if none of the others match.
+ fsize = (((data[5] & 0x03) << 12) | ((data[6] & 0xFF) << 4) | ((data[7] & 0xF0) >> 4)) + 1;
+ }
+
+ // If the frame is stored in 14-bit mode, adjust the frame size to reflect the actual byte size.
+ return uses14BitPerWord ? fsize * 16 / 14 : fsize;
+ }
+
+ private static ParsableBitArray getNormalizedFrameHeader(byte[] frameHeader) {
+ if (frameHeader[0] == FIRST_BYTE_BE) {
+ // The frame is already 16-bit mode, big endian.
+ return new ParsableBitArray(frameHeader);
+ }
+ // Data is not normalized, but we don't want to modify frameHeader.
+ frameHeader = Arrays.copyOf(frameHeader, frameHeader.length);
+ if (isLittleEndianFrameHeader(frameHeader)) {
+ // Change endianness.
+ for (int i = 0; i < frameHeader.length - 1; i += 2) {
+ byte temp = frameHeader[i];
+ frameHeader[i] = frameHeader[i + 1];
+ frameHeader[i + 1] = temp;
+ }
+ }
+ ParsableBitArray frameBits = new ParsableBitArray(frameHeader);
+ if (frameHeader[0] == (byte) (SYNC_VALUE_14B_BE >> 24)) {
+ // Discard the 2 most significant bits of each 16 bit word.
+ ParsableBitArray scratchBits = new ParsableBitArray(frameHeader);
+ while (scratchBits.bitsLeft() >= 16) {
+ scratchBits.skipBits(2);
+ frameBits.putInt(scratchBits.readBits(14), 14);
+ }
+ }
+ frameBits.reset(frameHeader);
+ return frameBits;
+ }
+
+ private static boolean isLittleEndianFrameHeader(byte[] frameHeader) {
+ return frameHeader[0] == FIRST_BYTE_LE || frameHeader[0] == FIRST_BYTE_14B_LE;
+ }
+
+ private DtsUtil() {}
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/FloatResamplingAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/FloatResamplingAudioProcessor.java
new file mode 100644
index 0000000000..c2eb62a0ad
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/FloatResamplingAudioProcessor.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link AudioProcessor} that converts high resolution PCM audio to 32-bit float. The following
+ * encodings are supported as input:
+ *
+ * <ul>
+ * <li>{@link C#ENCODING_PCM_24BIT}
+ * <li>{@link C#ENCODING_PCM_32BIT}
+ * <li>{@link C#ENCODING_PCM_FLOAT} ({@link #isActive()} will return {@code false})
+ * </ul>
+ */
+/* package */ final class FloatResamplingAudioProcessor extends BaseAudioProcessor {
+
+ private static final int FLOAT_NAN_AS_INT = Float.floatToIntBits(Float.NaN);
+ private static final double PCM_32_BIT_INT_TO_PCM_32_BIT_FLOAT_FACTOR = 1.0 / 0x7FFFFFFF;
+
+ @Override
+ public AudioFormat onConfigure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ @C.PcmEncoding int encoding = inputAudioFormat.encoding;
+ if (!Util.isEncodingHighResolutionPcm(encoding)) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+ return encoding != C.ENCODING_PCM_FLOAT
+ ? new AudioFormat(
+ inputAudioFormat.sampleRate, inputAudioFormat.channelCount, C.ENCODING_PCM_FLOAT)
+ : AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ int position = inputBuffer.position();
+ int limit = inputBuffer.limit();
+ int size = limit - position;
+
+ ByteBuffer buffer;
+ switch (inputAudioFormat.encoding) {
+ case C.ENCODING_PCM_24BIT:
+ buffer = replaceOutputBuffer((size / 3) * 4);
+ for (int i = position; i < limit; i += 3) {
+ int pcm32BitInteger =
+ ((inputBuffer.get(i) & 0xFF) << 8)
+ | ((inputBuffer.get(i + 1) & 0xFF) << 16)
+ | ((inputBuffer.get(i + 2) & 0xFF) << 24);
+ writePcm32BitFloat(pcm32BitInteger, buffer);
+ }
+ break;
+ case C.ENCODING_PCM_32BIT:
+ buffer = replaceOutputBuffer(size);
+ for (int i = position; i < limit; i += 4) {
+ int pcm32BitInteger =
+ (inputBuffer.get(i) & 0xFF)
+ | ((inputBuffer.get(i + 1) & 0xFF) << 8)
+ | ((inputBuffer.get(i + 2) & 0xFF) << 16)
+ | ((inputBuffer.get(i + 3) & 0xFF) << 24);
+ writePcm32BitFloat(pcm32BitInteger, buffer);
+ }
+ break;
+ case C.ENCODING_PCM_8BIT:
+ case C.ENCODING_PCM_16BIT:
+ case C.ENCODING_PCM_16BIT_BIG_ENDIAN:
+ case C.ENCODING_PCM_FLOAT:
+ case C.ENCODING_INVALID:
+ case Format.NO_VALUE:
+ default:
+ // Never happens.
+ throw new IllegalStateException();
+ }
+
+ inputBuffer.position(inputBuffer.limit());
+ buffer.flip();
+ }
+
+ /**
+ * Converts the provided 32-bit integer to a 32-bit float value and writes it to {@code buffer}.
+ *
+ * @param pcm32BitInt The 32-bit integer value to convert to 32-bit float in [-1.0, 1.0].
+ * @param buffer The output buffer.
+ */
+ private static void writePcm32BitFloat(int pcm32BitInt, ByteBuffer buffer) {
+ float pcm32BitFloat = (float) (PCM_32_BIT_INT_TO_PCM_32_BIT_FLOAT_FACTOR * pcm32BitInt);
+ int floatBits = Float.floatToIntBits(pcm32BitFloat);
+ if (floatBits == FLOAT_NAN_AS_INT) {
+ floatBits = Float.floatToIntBits((float) 0.0);
+ }
+ buffer.putInt(floatBits);
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ForwardingAudioSink.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ForwardingAudioSink.java
new file mode 100644
index 0000000000..4e7f9d69f9
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ForwardingAudioSink.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlaybackParameters;
+import java.nio.ByteBuffer;
+
+/** An overridable {@link AudioSink} implementation forwarding all methods to another sink. */
+public class ForwardingAudioSink implements AudioSink {
+
+ private final AudioSink sink;
+
+ public ForwardingAudioSink(AudioSink sink) {
+ this.sink = sink;
+ }
+
+ @Override
+ public void setListener(Listener listener) {
+ sink.setListener(listener);
+ }
+
+ @Override
+ public boolean supportsOutput(int channelCount, int encoding) {
+ return sink.supportsOutput(channelCount, encoding);
+ }
+
+ @Override
+ public long getCurrentPositionUs(boolean sourceEnded) {
+ return sink.getCurrentPositionUs(sourceEnded);
+ }
+
+ @Override
+ public void configure(
+ int inputEncoding,
+ int inputChannelCount,
+ int inputSampleRate,
+ int specifiedBufferSize,
+ @Nullable int[] outputChannels,
+ int trimStartFrames,
+ int trimEndFrames)
+ throws ConfigurationException {
+ sink.configure(
+ inputEncoding,
+ inputChannelCount,
+ inputSampleRate,
+ specifiedBufferSize,
+ outputChannels,
+ trimStartFrames,
+ trimEndFrames);
+ }
+
+ @Override
+ public void play() {
+ sink.play();
+ }
+
+ @Override
+ public void handleDiscontinuity() {
+ sink.handleDiscontinuity();
+ }
+
+ @Override
+ public boolean handleBuffer(ByteBuffer buffer, long presentationTimeUs)
+ throws InitializationException, WriteException {
+ return sink.handleBuffer(buffer, presentationTimeUs);
+ }
+
+ @Override
+ public void playToEndOfStream() throws WriteException {
+ sink.playToEndOfStream();
+ }
+
+ @Override
+ public boolean isEnded() {
+ return sink.isEnded();
+ }
+
+ @Override
+ public boolean hasPendingData() {
+ return sink.hasPendingData();
+ }
+
+ @Override
+ public void setPlaybackParameters(PlaybackParameters playbackParameters) {
+ sink.setPlaybackParameters(playbackParameters);
+ }
+
+ @Override
+ public PlaybackParameters getPlaybackParameters() {
+ return sink.getPlaybackParameters();
+ }
+
+ @Override
+ public void setAudioAttributes(AudioAttributes audioAttributes) {
+ sink.setAudioAttributes(audioAttributes);
+ }
+
+ @Override
+ public void setAudioSessionId(int audioSessionId) {
+ sink.setAudioSessionId(audioSessionId);
+ }
+
+ @Override
+ public void setAuxEffectInfo(AuxEffectInfo auxEffectInfo) {
+ sink.setAuxEffectInfo(auxEffectInfo);
+ }
+
+ @Override
+ public void enableTunnelingV21(int tunnelingAudioSessionId) {
+ sink.enableTunnelingV21(tunnelingAudioSessionId);
+ }
+
+ @Override
+ public void disableTunneling() {
+ sink.disableTunneling();
+ }
+
+ @Override
+ public void setVolume(float volume) {
+ sink.setVolume(volume);
+ }
+
+ @Override
+ public void pause() {
+ sink.pause();
+ }
+
+ @Override
+ public void flush() {
+ sink.flush();
+ }
+
+ @Override
+ public void reset() {
+ sink.reset();
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/MediaCodecAudioRenderer.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/MediaCodecAudioRenderer.java
new file mode 100644
index 0000000000..42f7e99b78
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/MediaCodecAudioRenderer.java
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.annotation.SuppressLint;
+import android.content.Context;
+import android.media.MediaCodec;
+import android.media.MediaCrypto;
+import android.media.MediaFormat;
+import android.media.audiofx.Virtualizer;
+import android.os.Handler;
+import androidx.annotation.CallSuper;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.ExoPlaybackException;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.ExoPlayer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.FormatHolder;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlaybackParameters;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlayerMessage.Target;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.RendererCapabilities;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.audio.AudioRendererEventListener.EventDispatcher;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.decoder.DecoderInputBuffer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmSessionManager;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.FrameworkMediaCrypto;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.mediacodec.MediaCodecInfo;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.mediacodec.MediaCodecRenderer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.mediacodec.MediaCodecSelector;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.mediacodec.MediaCodecUtil;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.mediacodec.MediaCodecUtil.DecoderQueryException;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.mediacodec.MediaFormatUtil;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.source.MediaSource;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Log;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MediaClock;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MimeTypes;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Decodes and renders audio using {@link MediaCodec} and an {@link AudioSink}.
+ *
+ * <p>This renderer accepts the following messages sent via {@link ExoPlayer#createMessage(Target)}
+ * on the playback thread:
+ *
+ * <ul>
+ * <li>Message with type {@link C#MSG_SET_VOLUME} to set the volume. The message payload should be
+ * a {@link Float} with 0 being silence and 1 being unity gain.
+ * <li>Message with type {@link C#MSG_SET_AUDIO_ATTRIBUTES} to set the audio attributes. The
+ * message payload should be an {@link org.mozilla.thirdparty.com.google.android.exoplayer2audio.AudioAttributes}
+ * instance that will configure the underlying audio track.
+ * <li>Message with type {@link C#MSG_SET_AUX_EFFECT_INFO} to set the auxiliary effect. The
+ * message payload should be an {@link AuxEffectInfo} instance that will configure the
+ * underlying audio track.
+ * </ul>
+ */
+public class MediaCodecAudioRenderer extends MediaCodecRenderer implements MediaClock {
+
+ /**
+ * Maximum number of tracked pending stream change times. Generally there is zero or one pending
+ * stream change. We track more to allow for pending changes that have fewer samples than the
+ * codec latency.
+ */
+ private static final int MAX_PENDING_STREAM_CHANGE_COUNT = 10;
+
+ private static final String TAG = "MediaCodecAudioRenderer";
+ /**
+ * Custom key used to indicate bits per sample by some decoders on Vivo devices. For example
+ * OMX.vivo.alac.decoder on the Vivo Z1 Pro.
+ */
+ private static final String VIVO_BITS_PER_SAMPLE_KEY = "v-bits-per-sample";
+
+ private final Context context;
+ private final EventDispatcher eventDispatcher;
+ private final AudioSink audioSink;
+ private final long[] pendingStreamChangeTimesUs;
+
+ private int codecMaxInputSize;
+ private boolean passthroughEnabled;
+ private boolean codecNeedsDiscardChannelsWorkaround;
+ private boolean codecNeedsEosBufferTimestampWorkaround;
+ private android.media.MediaFormat passthroughMediaFormat;
+ @Nullable private Format inputFormat;
+ private long currentPositionUs;
+ private boolean allowFirstBufferPositionDiscontinuity;
+ private boolean allowPositionDiscontinuity;
+ private long lastInputTimeUs;
+ private int pendingStreamChangeCount;
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ */
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(Context context, MediaCodecSelector mediaCodecSelector) {
+ this(
+ context,
+ mediaCodecSelector,
+ /* drmSessionManager= */ null,
+ /* playClearSamplesWithoutKeys= */ false);
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param drmSessionManager For use with encrypted content. May be null if support for encrypted
+ * content is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @deprecated Use {@link #MediaCodecAudioRenderer(Context, MediaCodecSelector, boolean, Handler,
+ * AudioRendererEventListener, AudioSink)} instead, and pass DRM-related parameters to the
+ * {@link MediaSource} factories.
+ */
+ @Deprecated
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys) {
+ this(
+ context,
+ mediaCodecSelector,
+ drmSessionManager,
+ playClearSamplesWithoutKeys,
+ /* eventHandler= */ null,
+ /* eventListener= */ null);
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ */
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener) {
+ this(
+ context,
+ mediaCodecSelector,
+ /* drmSessionManager= */ null,
+ /* playClearSamplesWithoutKeys= */ false,
+ eventHandler,
+ eventListener);
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param drmSessionManager For use with encrypted content. May be null if support for encrypted
+ * content is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @deprecated Use {@link #MediaCodecAudioRenderer(Context, MediaCodecSelector, boolean, Handler,
+ * AudioRendererEventListener, AudioSink)} instead, and pass DRM-related parameters to the
+ * {@link MediaSource} factories.
+ */
+ @Deprecated
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys,
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener) {
+ this(
+ context,
+ mediaCodecSelector,
+ drmSessionManager,
+ playClearSamplesWithoutKeys,
+ eventHandler,
+ eventListener,
+ (AudioCapabilities) null);
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param drmSessionManager For use with encrypted content. May be null if support for encrypted
+ * content is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioCapabilities The audio capabilities for playback on this device. May be null if the
+ * default capabilities (no encoded audio passthrough support) should be assumed.
+ * @param audioProcessors Optional {@link AudioProcessor}s that will process PCM audio before
+ * output.
+ * @deprecated Use {@link #MediaCodecAudioRenderer(Context, MediaCodecSelector, boolean, Handler,
+ * AudioRendererEventListener, AudioSink)} instead, and pass DRM-related parameters to the
+ * {@link MediaSource} factories.
+ */
+ @Deprecated
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys,
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ @Nullable AudioCapabilities audioCapabilities,
+ AudioProcessor... audioProcessors) {
+ this(
+ context,
+ mediaCodecSelector,
+ drmSessionManager,
+ playClearSamplesWithoutKeys,
+ eventHandler,
+ eventListener,
+ new DefaultAudioSink(audioCapabilities, audioProcessors));
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param drmSessionManager For use with encrypted content. May be null if support for encrypted
+ * content is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioSink The sink to which audio will be output.
+ * @deprecated Use {@link #MediaCodecAudioRenderer(Context, MediaCodecSelector, boolean, Handler,
+ * AudioRendererEventListener, AudioSink)} instead, and pass DRM-related parameters to the
+ * {@link MediaSource} factories.
+ */
+ @Deprecated
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys,
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ AudioSink audioSink) {
+ this(
+ context,
+ mediaCodecSelector,
+ drmSessionManager,
+ playClearSamplesWithoutKeys,
+ /* enableDecoderFallback= */ false,
+ eventHandler,
+ eventListener,
+ audioSink);
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param enableDecoderFallback Whether to enable fallback to lower-priority decoders if decoder
+ * initialization fails. This may result in using a decoder that is slower/less efficient than
+ * the primary decoder.
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioSink The sink to which audio will be output.
+ */
+ @SuppressWarnings("deprecation")
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ boolean enableDecoderFallback,
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ AudioSink audioSink) {
+ this(
+ context,
+ mediaCodecSelector,
+ /* drmSessionManager= */ null,
+ /* playClearSamplesWithoutKeys= */ false,
+ enableDecoderFallback,
+ eventHandler,
+ eventListener,
+ audioSink);
+ }
+
+ /**
+ * @param context A context.
+ * @param mediaCodecSelector A decoder selector.
+ * @param drmSessionManager For use with encrypted content. May be null if support for encrypted
+ * content is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @param enableDecoderFallback Whether to enable fallback to lower-priority decoders if decoder
+ * initialization fails. This may result in using a decoder that is slower/less efficient than
+ * the primary decoder.
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioSink The sink to which audio will be output.
+ * @deprecated Use {@link #MediaCodecAudioRenderer(Context, MediaCodecSelector, boolean, Handler,
+ * AudioRendererEventListener, AudioSink)} instead, and pass DRM-related parameters to the
+ * {@link MediaSource} factories.
+ */
+ @Deprecated
+ public MediaCodecAudioRenderer(
+ Context context,
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys,
+ boolean enableDecoderFallback,
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ AudioSink audioSink) {
+ super(
+ C.TRACK_TYPE_AUDIO,
+ mediaCodecSelector,
+ drmSessionManager,
+ playClearSamplesWithoutKeys,
+ enableDecoderFallback,
+ /* assumedMinimumCodecOperatingRate= */ 44100);
+ this.context = context.getApplicationContext();
+ this.audioSink = audioSink;
+ lastInputTimeUs = C.TIME_UNSET;
+ pendingStreamChangeTimesUs = new long[MAX_PENDING_STREAM_CHANGE_COUNT];
+ eventDispatcher = new EventDispatcher(eventHandler, eventListener);
+ audioSink.setListener(new AudioSinkListener());
+ }
+
+ @Override
+ @Capabilities
+ protected int supportsFormat(
+ MediaCodecSelector mediaCodecSelector,
+ @Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
+ Format format)
+ throws DecoderQueryException {
+ String mimeType = format.sampleMimeType;
+ if (!MimeTypes.isAudio(mimeType)) {
+ return RendererCapabilities.create(FORMAT_UNSUPPORTED_TYPE);
+ }
+ @TunnelingSupport
+ int tunnelingSupport = Util.SDK_INT >= 21 ? TUNNELING_SUPPORTED : TUNNELING_NOT_SUPPORTED;
+ boolean supportsFormatDrm =
+ format.drmInitData == null
+ || FrameworkMediaCrypto.class.equals(format.exoMediaCryptoType)
+ || (format.exoMediaCryptoType == null
+ && supportsFormatDrm(drmSessionManager, format.drmInitData));
+ if (supportsFormatDrm
+ && allowPassthrough(format.channelCount, mimeType)
+ && mediaCodecSelector.getPassthroughDecoderInfo() != null) {
+ return RendererCapabilities.create(FORMAT_HANDLED, ADAPTIVE_NOT_SEAMLESS, tunnelingSupport);
+ }
+ if ((MimeTypes.AUDIO_RAW.equals(mimeType)
+ && !audioSink.supportsOutput(format.channelCount, format.pcmEncoding))
+ || !audioSink.supportsOutput(format.channelCount, C.ENCODING_PCM_16BIT)) {
+ // Assume the decoder outputs 16-bit PCM, unless the input is raw.
+ return RendererCapabilities.create(FORMAT_UNSUPPORTED_SUBTYPE);
+ }
+ List<MediaCodecInfo> decoderInfos =
+ getDecoderInfos(mediaCodecSelector, format, /* requiresSecureDecoder= */ false);
+ if (decoderInfos.isEmpty()) {
+ return RendererCapabilities.create(FORMAT_UNSUPPORTED_SUBTYPE);
+ }
+ if (!supportsFormatDrm) {
+ return RendererCapabilities.create(FORMAT_UNSUPPORTED_DRM);
+ }
+ // Check capabilities for the first decoder in the list, which takes priority.
+ MediaCodecInfo decoderInfo = decoderInfos.get(0);
+ boolean isFormatSupported = decoderInfo.isFormatSupported(format);
+ @AdaptiveSupport
+ int adaptiveSupport =
+ isFormatSupported && decoderInfo.isSeamlessAdaptationSupported(format)
+ ? ADAPTIVE_SEAMLESS
+ : ADAPTIVE_NOT_SEAMLESS;
+ @FormatSupport
+ int formatSupport = isFormatSupported ? FORMAT_HANDLED : FORMAT_EXCEEDS_CAPABILITIES;
+ return RendererCapabilities.create(formatSupport, adaptiveSupport, tunnelingSupport);
+ }
+
+ @Override
+ protected List<MediaCodecInfo> getDecoderInfos(
+ MediaCodecSelector mediaCodecSelector, Format format, boolean requiresSecureDecoder)
+ throws DecoderQueryException {
+ @Nullable String mimeType = format.sampleMimeType;
+ if (mimeType == null) {
+ return Collections.emptyList();
+ }
+ if (allowPassthrough(format.channelCount, mimeType)) {
+ @Nullable
+ MediaCodecInfo passthroughDecoderInfo = mediaCodecSelector.getPassthroughDecoderInfo();
+ if (passthroughDecoderInfo != null) {
+ return Collections.singletonList(passthroughDecoderInfo);
+ }
+ }
+ List<MediaCodecInfo> decoderInfos =
+ mediaCodecSelector.getDecoderInfos(
+ mimeType, requiresSecureDecoder, /* requiresTunnelingDecoder= */ false);
+ decoderInfos = MediaCodecUtil.getDecoderInfosSortedByFormatSupport(decoderInfos, format);
+ if (MimeTypes.AUDIO_E_AC3_JOC.equals(mimeType)) {
+ // E-AC3 decoders can decode JOC streams, but in 2-D rather than 3-D.
+ List<MediaCodecInfo> decoderInfosWithEac3 = new ArrayList<>(decoderInfos);
+ decoderInfosWithEac3.addAll(
+ mediaCodecSelector.getDecoderInfos(
+ MimeTypes.AUDIO_E_AC3, requiresSecureDecoder, /* requiresTunnelingDecoder= */ false));
+ decoderInfos = decoderInfosWithEac3;
+ }
+ return Collections.unmodifiableList(decoderInfos);
+ }
+
+ /**
+ * Returns whether encoded audio passthrough should be used for playing back the input format.
+ * This implementation returns true if the {@link AudioSink} indicates that encoded audio output
+ * is supported.
+ *
+ * @param channelCount The number of channels in the input media, or {@link Format#NO_VALUE} if
+ * not known.
+ * @param mimeType The type of input media.
+ * @return Whether passthrough playback is supported.
+ */
+ protected boolean allowPassthrough(int channelCount, String mimeType) {
+ return getPassthroughEncoding(channelCount, mimeType) != C.ENCODING_INVALID;
+ }
+
+ @Override
+ protected void configureCodec(
+ MediaCodecInfo codecInfo,
+ MediaCodec codec,
+ Format format,
+ @Nullable MediaCrypto crypto,
+ float codecOperatingRate) {
+ codecMaxInputSize = getCodecMaxInputSize(codecInfo, format, getStreamFormats());
+ codecNeedsDiscardChannelsWorkaround = codecNeedsDiscardChannelsWorkaround(codecInfo.name);
+ codecNeedsEosBufferTimestampWorkaround = codecNeedsEosBufferTimestampWorkaround(codecInfo.name);
+ passthroughEnabled = codecInfo.passthrough;
+ String codecMimeType = passthroughEnabled ? MimeTypes.AUDIO_RAW : codecInfo.codecMimeType;
+ MediaFormat mediaFormat =
+ getMediaFormat(format, codecMimeType, codecMaxInputSize, codecOperatingRate);
+ codec.configure(mediaFormat, /* surface= */ null, crypto, /* flags= */ 0);
+ if (passthroughEnabled) {
+ // Store the input MIME type if we're using the passthrough codec.
+ passthroughMediaFormat = mediaFormat;
+ passthroughMediaFormat.setString(MediaFormat.KEY_MIME, format.sampleMimeType);
+ } else {
+ passthroughMediaFormat = null;
+ }
+ }
+
+ @Override
+ protected @KeepCodecResult int canKeepCodec(
+ MediaCodec codec, MediaCodecInfo codecInfo, Format oldFormat, Format newFormat) {
+ // TODO: We currently rely on recreating the codec when encoder delay or padding is non-zero.
+ // Re-creating the codec is necessary to guarantee that onOutputFormatChanged is called, which
+ // is where encoder delay and padding are propagated to the sink. We should find a better way to
+ // propagate these values, and then allow the codec to be re-used in cases where this would
+ // otherwise be possible.
+ if (getCodecMaxInputSize(codecInfo, newFormat) > codecMaxInputSize
+ || oldFormat.encoderDelay != 0
+ || oldFormat.encoderPadding != 0
+ || newFormat.encoderDelay != 0
+ || newFormat.encoderPadding != 0) {
+ return KEEP_CODEC_RESULT_NO;
+ } else if (codecInfo.isSeamlessAdaptationSupported(
+ oldFormat, newFormat, /* isNewFormatComplete= */ true)) {
+ return KEEP_CODEC_RESULT_YES_WITHOUT_RECONFIGURATION;
+ } else if (canKeepCodecWithFlush(oldFormat, newFormat)) {
+ return KEEP_CODEC_RESULT_YES_WITH_FLUSH;
+ } else {
+ return KEEP_CODEC_RESULT_NO;
+ }
+ }
+
+ /**
+ * Returns whether the codec can be flushed and reused when switching to a new format. Reuse is
+ * generally possible when the codec would be configured in an identical way after the format
+ * change (excluding {@link MediaFormat#KEY_MAX_INPUT_SIZE} and configuration that does not come
+ * from the {@link Format}).
+ *
+ * @param oldFormat The first format.
+ * @param newFormat The second format.
+ * @return Whether the codec can be flushed and reused when switching to a new format.
+ */
+ protected boolean canKeepCodecWithFlush(Format oldFormat, Format newFormat) {
+ // Flush and reuse the codec if the audio format and initialization data matches. For Opus, we
+ // don't flush and reuse the codec because the decoder may discard samples after flushing, which
+ // would result in audio being dropped just after a stream change (see [Internal: b/143450854]).
+ return Util.areEqual(oldFormat.sampleMimeType, newFormat.sampleMimeType)
+ && oldFormat.channelCount == newFormat.channelCount
+ && oldFormat.sampleRate == newFormat.sampleRate
+ && oldFormat.pcmEncoding == newFormat.pcmEncoding
+ && oldFormat.initializationDataEquals(newFormat)
+ && !MimeTypes.AUDIO_OPUS.equals(oldFormat.sampleMimeType);
+ }
+
+ @Override
+ @Nullable
+ public MediaClock getMediaClock() {
+ return this;
+ }
+
+ @Override
+ protected float getCodecOperatingRateV23(
+ float operatingRate, Format format, Format[] streamFormats) {
+ // Use the highest known stream sample-rate up front, to avoid having to reconfigure the codec
+ // should an adaptive switch to that stream occur.
+ int maxSampleRate = -1;
+ for (Format streamFormat : streamFormats) {
+ int streamSampleRate = streamFormat.sampleRate;
+ if (streamSampleRate != Format.NO_VALUE) {
+ maxSampleRate = Math.max(maxSampleRate, streamSampleRate);
+ }
+ }
+ return maxSampleRate == -1 ? CODEC_OPERATING_RATE_UNSET : (maxSampleRate * operatingRate);
+ }
+
+ @Override
+ protected void onCodecInitialized(String name, long initializedTimestampMs,
+ long initializationDurationMs) {
+ eventDispatcher.decoderInitialized(name, initializedTimestampMs, initializationDurationMs);
+ }
+
+ @Override
+ protected void onInputFormatChanged(FormatHolder formatHolder) throws ExoPlaybackException {
+ super.onInputFormatChanged(formatHolder);
+ inputFormat = formatHolder.format;
+ eventDispatcher.inputFormatChanged(inputFormat);
+ }
+
+ @Override
+ protected void onOutputFormatChanged(MediaCodec codec, MediaFormat outputMediaFormat)
+ throws ExoPlaybackException {
+ @C.Encoding int encoding;
+ MediaFormat mediaFormat;
+ if (passthroughMediaFormat != null) {
+ mediaFormat = passthroughMediaFormat;
+ encoding =
+ getPassthroughEncoding(
+ mediaFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT),
+ mediaFormat.getString(MediaFormat.KEY_MIME));
+ } else {
+ mediaFormat = outputMediaFormat;
+ if (outputMediaFormat.containsKey(VIVO_BITS_PER_SAMPLE_KEY)) {
+ encoding = Util.getPcmEncoding(outputMediaFormat.getInteger(VIVO_BITS_PER_SAMPLE_KEY));
+ } else {
+ encoding = getPcmEncoding(inputFormat);
+ }
+ }
+ int channelCount = mediaFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
+ int sampleRate = mediaFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE);
+ int[] channelMap;
+ if (codecNeedsDiscardChannelsWorkaround && channelCount == 6 && inputFormat.channelCount < 6) {
+ channelMap = new int[inputFormat.channelCount];
+ for (int i = 0; i < inputFormat.channelCount; i++) {
+ channelMap[i] = i;
+ }
+ } else {
+ channelMap = null;
+ }
+
+ try {
+ audioSink.configure(
+ encoding,
+ channelCount,
+ sampleRate,
+ 0,
+ channelMap,
+ inputFormat.encoderDelay,
+ inputFormat.encoderPadding);
+ } catch (AudioSink.ConfigurationException e) {
+ // TODO(internal: b/145658993) Use outputFormat instead.
+ throw createRendererException(e, inputFormat);
+ }
+ }
+
+ /**
+ * Returns the {@link C.Encoding} constant to use for passthrough of the given format, or {@link
+ * C#ENCODING_INVALID} if passthrough is not possible.
+ */
+ @C.Encoding
+ protected int getPassthroughEncoding(int channelCount, String mimeType) {
+ if (MimeTypes.AUDIO_E_AC3_JOC.equals(mimeType)) {
+ // E-AC3 JOC is object-based so the output channel count is arbitrary.
+ if (audioSink.supportsOutput(/* channelCount= */ Format.NO_VALUE, C.ENCODING_E_AC3_JOC)) {
+ return MimeTypes.getEncoding(MimeTypes.AUDIO_E_AC3_JOC);
+ }
+ // E-AC3 receivers can decode JOC streams, but in 2-D rather than 3-D, so try to fall back.
+ mimeType = MimeTypes.AUDIO_E_AC3;
+ }
+
+ @C.Encoding int encoding = MimeTypes.getEncoding(mimeType);
+ if (audioSink.supportsOutput(channelCount, encoding)) {
+ return encoding;
+ } else {
+ return C.ENCODING_INVALID;
+ }
+ }
+
+ /**
+ * Called when the audio session id becomes known. The default implementation is a no-op. One
+ * reason for overriding this method would be to instantiate and enable a {@link Virtualizer} in
+ * order to spatialize the audio channels. For this use case, any {@link Virtualizer} instances
+ * should be released in {@link #onDisabled()} (if not before).
+ *
+ * @see AudioSink.Listener#onAudioSessionId(int)
+ */
+ protected void onAudioSessionId(int audioSessionId) {
+ // Do nothing.
+ }
+
+ /**
+ * @see AudioSink.Listener#onPositionDiscontinuity()
+ */
+ protected void onAudioTrackPositionDiscontinuity() {
+ // Do nothing.
+ }
+
+ /**
+ * @see AudioSink.Listener#onUnderrun(int, long, long)
+ */
+ protected void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
+ long elapsedSinceLastFeedMs) {
+ // Do nothing.
+ }
+
+ @Override
+ protected void onEnabled(boolean joining) throws ExoPlaybackException {
+ super.onEnabled(joining);
+ eventDispatcher.enabled(decoderCounters);
+ int tunnelingAudioSessionId = getConfiguration().tunnelingAudioSessionId;
+ if (tunnelingAudioSessionId != C.AUDIO_SESSION_ID_UNSET) {
+ audioSink.enableTunnelingV21(tunnelingAudioSessionId);
+ } else {
+ audioSink.disableTunneling();
+ }
+ }
+
+ @Override
+ protected void onStreamChanged(Format[] formats, long offsetUs) throws ExoPlaybackException {
+ super.onStreamChanged(formats, offsetUs);
+ if (lastInputTimeUs != C.TIME_UNSET) {
+ if (pendingStreamChangeCount == pendingStreamChangeTimesUs.length) {
+ Log.w(
+ TAG,
+ "Too many stream changes, so dropping change at "
+ + pendingStreamChangeTimesUs[pendingStreamChangeCount - 1]);
+ } else {
+ pendingStreamChangeCount++;
+ }
+ pendingStreamChangeTimesUs[pendingStreamChangeCount - 1] = lastInputTimeUs;
+ }
+ }
+
+ @Override
+ protected void onPositionReset(long positionUs, boolean joining) throws ExoPlaybackException {
+ super.onPositionReset(positionUs, joining);
+ audioSink.flush();
+ currentPositionUs = positionUs;
+ allowFirstBufferPositionDiscontinuity = true;
+ allowPositionDiscontinuity = true;
+ lastInputTimeUs = C.TIME_UNSET;
+ pendingStreamChangeCount = 0;
+ }
+
+ @Override
+ protected void onStarted() {
+ super.onStarted();
+ audioSink.play();
+ }
+
+ @Override
+ protected void onStopped() {
+ updateCurrentPosition();
+ audioSink.pause();
+ super.onStopped();
+ }
+
+ @Override
+ protected void onDisabled() {
+ try {
+ lastInputTimeUs = C.TIME_UNSET;
+ pendingStreamChangeCount = 0;
+ audioSink.flush();
+ } finally {
+ try {
+ super.onDisabled();
+ } finally {
+ eventDispatcher.disabled(decoderCounters);
+ }
+ }
+ }
+
+ @Override
+ protected void onReset() {
+ try {
+ super.onReset();
+ } finally {
+ audioSink.reset();
+ }
+ }
+
+ @Override
+ public boolean isEnded() {
+ return super.isEnded() && audioSink.isEnded();
+ }
+
+ @Override
+ public boolean isReady() {
+ return audioSink.hasPendingData() || super.isReady();
+ }
+
+ @Override
+ public long getPositionUs() {
+ if (getState() == STATE_STARTED) {
+ updateCurrentPosition();
+ }
+ return currentPositionUs;
+ }
+
+ @Override
+ public void setPlaybackParameters(PlaybackParameters playbackParameters) {
+ audioSink.setPlaybackParameters(playbackParameters);
+ }
+
+ @Override
+ public PlaybackParameters getPlaybackParameters() {
+ return audioSink.getPlaybackParameters();
+ }
+
+ @Override
+ protected void onQueueInputBuffer(DecoderInputBuffer buffer) {
+ if (allowFirstBufferPositionDiscontinuity && !buffer.isDecodeOnly()) {
+ // TODO: Remove this hack once we have a proper fix for [Internal: b/71876314].
+ // Allow the position to jump if the first presentable input buffer has a timestamp that
+ // differs significantly from what was expected.
+ if (Math.abs(buffer.timeUs - currentPositionUs) > 500000) {
+ currentPositionUs = buffer.timeUs;
+ }
+ allowFirstBufferPositionDiscontinuity = false;
+ }
+ lastInputTimeUs = Math.max(buffer.timeUs, lastInputTimeUs);
+ }
+
+ @CallSuper
+ @Override
+ protected void onProcessedOutputBuffer(long presentationTimeUs) {
+ while (pendingStreamChangeCount != 0 && presentationTimeUs >= pendingStreamChangeTimesUs[0]) {
+ audioSink.handleDiscontinuity();
+ pendingStreamChangeCount--;
+ System.arraycopy(
+ pendingStreamChangeTimesUs,
+ /* srcPos= */ 1,
+ pendingStreamChangeTimesUs,
+ /* destPos= */ 0,
+ pendingStreamChangeCount);
+ }
+ }
+
+ @Override
+ protected boolean processOutputBuffer(
+ long positionUs,
+ long elapsedRealtimeUs,
+ MediaCodec codec,
+ ByteBuffer buffer,
+ int bufferIndex,
+ int bufferFlags,
+ long bufferPresentationTimeUs,
+ boolean isDecodeOnlyBuffer,
+ boolean isLastBuffer,
+ Format format)
+ throws ExoPlaybackException {
+ if (codecNeedsEosBufferTimestampWorkaround
+ && bufferPresentationTimeUs == 0
+ && (bufferFlags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0
+ && lastInputTimeUs != C.TIME_UNSET) {
+ bufferPresentationTimeUs = lastInputTimeUs;
+ }
+
+ if (passthroughEnabled && (bufferFlags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
+ // Discard output buffers from the passthrough (raw) decoder containing codec specific data.
+ codec.releaseOutputBuffer(bufferIndex, false);
+ return true;
+ }
+
+ if (isDecodeOnlyBuffer) {
+ codec.releaseOutputBuffer(bufferIndex, false);
+ decoderCounters.skippedOutputBufferCount++;
+ audioSink.handleDiscontinuity();
+ return true;
+ }
+
+ try {
+ if (audioSink.handleBuffer(buffer, bufferPresentationTimeUs)) {
+ codec.releaseOutputBuffer(bufferIndex, false);
+ decoderCounters.renderedOutputBufferCount++;
+ return true;
+ }
+ } catch (AudioSink.InitializationException | AudioSink.WriteException e) {
+ // TODO(internal: b/145658993) Use outputFormat instead.
+ throw createRendererException(e, inputFormat);
+ }
+ return false;
+ }
+
+ @Override
+ protected void renderToEndOfStream() throws ExoPlaybackException {
+ try {
+ audioSink.playToEndOfStream();
+ } catch (AudioSink.WriteException e) {
+ // TODO(internal: b/145658993) Use outputFormat instead.
+ throw createRendererException(e, inputFormat);
+ }
+ }
+
+ @Override
+ public void handleMessage(int messageType, @Nullable Object message) throws ExoPlaybackException {
+ switch (messageType) {
+ case C.MSG_SET_VOLUME:
+ audioSink.setVolume((Float) message);
+ break;
+ case C.MSG_SET_AUDIO_ATTRIBUTES:
+ AudioAttributes audioAttributes = (AudioAttributes) message;
+ audioSink.setAudioAttributes(audioAttributes);
+ break;
+ case C.MSG_SET_AUX_EFFECT_INFO:
+ AuxEffectInfo auxEffectInfo = (AuxEffectInfo) message;
+ audioSink.setAuxEffectInfo(auxEffectInfo);
+ break;
+ default:
+ super.handleMessage(messageType, message);
+ break;
+ }
+ }
+
+ /**
+ * Returns a maximum input size suitable for configuring a codec for {@code format} in a way that
+ * will allow possible adaptation to other compatible formats in {@code streamFormats}.
+ *
+ * @param codecInfo A {@link MediaCodecInfo} describing the decoder.
+ * @param format The {@link Format} for which the codec is being configured.
+ * @param streamFormats The possible stream formats.
+ * @return A suitable maximum input size.
+ */
+ protected int getCodecMaxInputSize(
+ MediaCodecInfo codecInfo, Format format, Format[] streamFormats) {
+ int maxInputSize = getCodecMaxInputSize(codecInfo, format);
+ if (streamFormats.length == 1) {
+ // The single entry in streamFormats must correspond to the format for which the codec is
+ // being configured.
+ return maxInputSize;
+ }
+ for (Format streamFormat : streamFormats) {
+ if (codecInfo.isSeamlessAdaptationSupported(
+ format, streamFormat, /* isNewFormatComplete= */ false)) {
+ maxInputSize = Math.max(maxInputSize, getCodecMaxInputSize(codecInfo, streamFormat));
+ }
+ }
+ return maxInputSize;
+ }
+
+ /**
+ * Returns a maximum input buffer size for a given {@link Format}.
+ *
+ * @param codecInfo A {@link MediaCodecInfo} describing the decoder.
+ * @param format The {@link Format}.
+ * @return A maximum input buffer size in bytes, or {@link Format#NO_VALUE} if a maximum could not
+ * be determined.
+ */
+ private int getCodecMaxInputSize(MediaCodecInfo codecInfo, Format format) {
+ if ("OMX.google.raw.decoder".equals(codecInfo.name)) {
+ // OMX.google.raw.decoder didn't resize its output buffers correctly prior to N, except on
+ // Android TV running M, so there's no point requesting a non-default input size. Doing so may
+ // cause a native crash, whereas not doing so will cause a more controlled failure when
+ // attempting to fill an input buffer. See: https://github.com/google/ExoPlayer/issues/4057.
+ if (Util.SDK_INT < 24 && !(Util.SDK_INT == 23 && Util.isTv(context))) {
+ return Format.NO_VALUE;
+ }
+ }
+ return format.maxInputSize;
+ }
+
+ /**
+ * Returns the framework {@link MediaFormat} that can be used to configure a {@link MediaCodec}
+ * for decoding the given {@link Format} for playback.
+ *
+ * @param format The {@link Format} of the media.
+ * @param codecMimeType The MIME type handled by the codec.
+ * @param codecMaxInputSize The maximum input size supported by the codec.
+ * @param codecOperatingRate The codec operating rate, or {@link #CODEC_OPERATING_RATE_UNSET} if
+ * no codec operating rate should be set.
+ * @return The framework {@link MediaFormat}.
+ */
+ @SuppressLint("InlinedApi")
+ protected MediaFormat getMediaFormat(
+ Format format, String codecMimeType, int codecMaxInputSize, float codecOperatingRate) {
+ MediaFormat mediaFormat = new MediaFormat();
+ // Set format parameters that should always be set.
+ mediaFormat.setString(MediaFormat.KEY_MIME, codecMimeType);
+ mediaFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, format.channelCount);
+ mediaFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, format.sampleRate);
+ MediaFormatUtil.setCsdBuffers(mediaFormat, format.initializationData);
+ // Set codec max values.
+ MediaFormatUtil.maybeSetInteger(mediaFormat, MediaFormat.KEY_MAX_INPUT_SIZE, codecMaxInputSize);
+ // Set codec configuration values.
+ if (Util.SDK_INT >= 23) {
+ mediaFormat.setInteger(MediaFormat.KEY_PRIORITY, 0 /* realtime priority */);
+ if (codecOperatingRate != CODEC_OPERATING_RATE_UNSET && !deviceDoesntSupportOperatingRate()) {
+ mediaFormat.setFloat(MediaFormat.KEY_OPERATING_RATE, codecOperatingRate);
+ }
+ }
+ if (Util.SDK_INT <= 28 && MimeTypes.AUDIO_AC4.equals(format.sampleMimeType)) {
+ // On some older builds, the AC-4 decoder expects to receive samples formatted as raw frames
+ // not sync frames. Set a format key to override this.
+ mediaFormat.setInteger("ac4-is-sync", 1);
+ }
+ return mediaFormat;
+ }
+
+ private void updateCurrentPosition() {
+ long newCurrentPositionUs = audioSink.getCurrentPositionUs(isEnded());
+ if (newCurrentPositionUs != AudioSink.CURRENT_POSITION_NOT_SET) {
+ currentPositionUs =
+ allowPositionDiscontinuity
+ ? newCurrentPositionUs
+ : Math.max(currentPositionUs, newCurrentPositionUs);
+ allowPositionDiscontinuity = false;
+ }
+ }
+
+ /**
+ * Returns whether the device's decoders are known to not support setting the codec operating
+ * rate.
+ *
+ * <p>See <a href="https://github.com/google/ExoPlayer/issues/5821">GitHub issue #5821</a>.
+ */
+ private static boolean deviceDoesntSupportOperatingRate() {
+ return Util.SDK_INT == 23
+ && ("ZTE B2017G".equals(Util.MODEL) || "AXON 7 mini".equals(Util.MODEL));
+ }
+
+ /**
+ * Returns whether the decoder is known to output six audio channels when provided with input with
+ * fewer than six channels.
+ * <p>
+ * See [Internal: b/35655036].
+ */
+ private static boolean codecNeedsDiscardChannelsWorkaround(String codecName) {
+ // The workaround applies to Samsung Galaxy S6 and Samsung Galaxy S7.
+ return Util.SDK_INT < 24 && "OMX.SEC.aac.dec".equals(codecName)
+ && "samsung".equals(Util.MANUFACTURER)
+ && (Util.DEVICE.startsWith("zeroflte") || Util.DEVICE.startsWith("herolte")
+ || Util.DEVICE.startsWith("heroqlte"));
+ }
+
+ /**
+ * Returns whether the decoder may output a non-empty buffer with timestamp 0 as the end of stream
+ * buffer.
+ *
+ * <p>See <a href="https://github.com/google/ExoPlayer/issues/5045">GitHub issue #5045</a>.
+ */
+ private static boolean codecNeedsEosBufferTimestampWorkaround(String codecName) {
+ return Util.SDK_INT < 21
+ && "OMX.SEC.mp3.dec".equals(codecName)
+ && "samsung".equals(Util.MANUFACTURER)
+ && (Util.DEVICE.startsWith("baffin")
+ || Util.DEVICE.startsWith("grand")
+ || Util.DEVICE.startsWith("fortuna")
+ || Util.DEVICE.startsWith("gprimelte")
+ || Util.DEVICE.startsWith("j2y18lte")
+ || Util.DEVICE.startsWith("ms01"));
+ }
+
+ @C.Encoding
+ private static int getPcmEncoding(Format format) {
+ // If the format is anything other than PCM then we assume that the audio decoder will output
+ // 16-bit PCM.
+ return MimeTypes.AUDIO_RAW.equals(format.sampleMimeType)
+ ? format.pcmEncoding
+ : C.ENCODING_PCM_16BIT;
+ }
+
+ private final class AudioSinkListener implements AudioSink.Listener {
+
+ @Override
+ public void onAudioSessionId(int audioSessionId) {
+ eventDispatcher.audioSessionId(audioSessionId);
+ MediaCodecAudioRenderer.this.onAudioSessionId(audioSessionId);
+ }
+
+ @Override
+ public void onPositionDiscontinuity() {
+ onAudioTrackPositionDiscontinuity();
+ // We are out of sync so allow currentPositionUs to jump backwards.
+ MediaCodecAudioRenderer.this.allowPositionDiscontinuity = true;
+ }
+
+ @Override
+ public void onUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {
+ eventDispatcher.audioTrackUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
+ onAudioTrackUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
+ }
+
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ResamplingAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ResamplingAudioProcessor.java
new file mode 100644
index 0000000000..efd8a30d61
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/ResamplingAudioProcessor.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link AudioProcessor} that converts different PCM audio encodings to 16-bit integer PCM. The
+ * following encodings are supported as input:
+ *
+ * <ul>
+ * <li>{@link C#ENCODING_PCM_8BIT}
+ * <li>{@link C#ENCODING_PCM_16BIT} ({@link #isActive()} will return {@code false})
+ * <li>{@link C#ENCODING_PCM_16BIT_BIG_ENDIAN}
+ * <li>{@link C#ENCODING_PCM_24BIT}
+ * <li>{@link C#ENCODING_PCM_32BIT}
+ * <li>{@link C#ENCODING_PCM_FLOAT}
+ * </ul>
+ */
+/* package */ final class ResamplingAudioProcessor extends BaseAudioProcessor {
+
+ @Override
+ public AudioFormat onConfigure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ @C.PcmEncoding int encoding = inputAudioFormat.encoding;
+ if (encoding != C.ENCODING_PCM_8BIT
+ && encoding != C.ENCODING_PCM_16BIT
+ && encoding != C.ENCODING_PCM_16BIT_BIG_ENDIAN
+ && encoding != C.ENCODING_PCM_24BIT
+ && encoding != C.ENCODING_PCM_32BIT
+ && encoding != C.ENCODING_PCM_FLOAT) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+ return encoding != C.ENCODING_PCM_16BIT
+ ? new AudioFormat(
+ inputAudioFormat.sampleRate, inputAudioFormat.channelCount, C.ENCODING_PCM_16BIT)
+ : AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ // Prepare the output buffer.
+ int position = inputBuffer.position();
+ int limit = inputBuffer.limit();
+ int size = limit - position;
+ int resampledSize;
+ switch (inputAudioFormat.encoding) {
+ case C.ENCODING_PCM_8BIT:
+ resampledSize = size * 2;
+ break;
+ case C.ENCODING_PCM_16BIT_BIG_ENDIAN:
+ resampledSize = size;
+ break;
+ case C.ENCODING_PCM_24BIT:
+ resampledSize = (size / 3) * 2;
+ break;
+ case C.ENCODING_PCM_32BIT:
+ case C.ENCODING_PCM_FLOAT:
+ resampledSize = size / 2;
+ break;
+ case C.ENCODING_PCM_16BIT:
+ case C.ENCODING_INVALID:
+ case Format.NO_VALUE:
+ default:
+ throw new IllegalStateException();
+ }
+
+ // Resample the little endian input and update the input/output buffers.
+ ByteBuffer buffer = replaceOutputBuffer(resampledSize);
+ switch (inputAudioFormat.encoding) {
+ case C.ENCODING_PCM_8BIT:
+ // 8 -> 16 bit resampling. Shift each byte from [0, 256) to [-128, 128) and scale up.
+ for (int i = position; i < limit; i++) {
+ buffer.put((byte) 0);
+ buffer.put((byte) ((inputBuffer.get(i) & 0xFF) - 128));
+ }
+ break;
+ case C.ENCODING_PCM_16BIT_BIG_ENDIAN:
+ // Big endian to little endian resampling. Swap the byte order.
+ for (int i = position; i < limit; i += 2) {
+ buffer.put(inputBuffer.get(i + 1));
+ buffer.put(inputBuffer.get(i));
+ }
+ break;
+ case C.ENCODING_PCM_24BIT:
+ // 24 -> 16 bit resampling. Drop the least significant byte.
+ for (int i = position; i < limit; i += 3) {
+ buffer.put(inputBuffer.get(i + 1));
+ buffer.put(inputBuffer.get(i + 2));
+ }
+ break;
+ case C.ENCODING_PCM_32BIT:
+ // 32 -> 16 bit resampling. Drop the two least significant bytes.
+ for (int i = position; i < limit; i += 4) {
+ buffer.put(inputBuffer.get(i + 2));
+ buffer.put(inputBuffer.get(i + 3));
+ }
+ break;
+ case C.ENCODING_PCM_FLOAT:
+ // 32 bit floating point -> 16 bit resampling. Floating point values are in the range
+ // [-1.0, 1.0], so need to be scaled by Short.MAX_VALUE.
+ for (int i = position; i < limit; i += 4) {
+ short value = (short) (inputBuffer.getFloat(i) * Short.MAX_VALUE);
+ buffer.put((byte) (value & 0xFF));
+ buffer.put((byte) ((value >> 8) & 0xFF));
+ }
+ break;
+ case C.ENCODING_PCM_16BIT:
+ case C.ENCODING_INVALID:
+ case Format.NO_VALUE:
+ default:
+ // Never happens.
+ throw new IllegalStateException();
+ }
+ inputBuffer.position(inputBuffer.limit());
+ buffer.flip();
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SilenceSkippingAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SilenceSkippingAudioProcessor.java
new file mode 100644
index 0000000000..6a2c5ae9a6
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SilenceSkippingAudioProcessor.java
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.IntDef;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link AudioProcessor} that skips silence in the input stream. Input and output are 16-bit
+ * PCM.
+ */
+public final class SilenceSkippingAudioProcessor extends BaseAudioProcessor {
+
+ /**
+ * The minimum duration of audio that must be below {@link #SILENCE_THRESHOLD_LEVEL} to classify
+ * that part of audio as silent, in microseconds.
+ */
+ private static final long MINIMUM_SILENCE_DURATION_US = 150_000;
+ /**
+ * The duration of silence by which to extend non-silent sections, in microseconds. The value must
+ * not exceed {@link #MINIMUM_SILENCE_DURATION_US}.
+ */
+ private static final long PADDING_SILENCE_US = 20_000;
+ /**
+ * The absolute level below which an individual PCM sample is classified as silent. Note: the
+ * specified value will be rounded so that the threshold check only depends on the more
+ * significant byte, for efficiency.
+ */
+ private static final short SILENCE_THRESHOLD_LEVEL = 1024;
+
+ /**
+ * Threshold for classifying an individual PCM sample as silent based on its more significant
+ * byte. This is {@link #SILENCE_THRESHOLD_LEVEL} divided by 256 with rounding.
+ */
+ private static final byte SILENCE_THRESHOLD_LEVEL_MSB = (SILENCE_THRESHOLD_LEVEL + 128) >> 8;
+
+ /** Trimming states. */
+ @Documented
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({
+ STATE_NOISY,
+ STATE_MAYBE_SILENT,
+ STATE_SILENT,
+ })
+ private @interface State {}
+ /** State when the input is not silent. */
+ private static final int STATE_NOISY = 0;
+ /** State when the input may be silent but we haven't read enough yet to know. */
+ private static final int STATE_MAYBE_SILENT = 1;
+ /** State when the input is silent. */
+ private static final int STATE_SILENT = 2;
+
+ private int bytesPerFrame;
+
+ private boolean enabled;
+
+ /**
+ * Buffers audio data that may be classified as silence while in {@link #STATE_MAYBE_SILENT}. If
+ * the input becomes noisy before the buffer has filled, it will be output. Otherwise, the buffer
+ * contents will be dropped and the state will transition to {@link #STATE_SILENT}.
+ */
+ private byte[] maybeSilenceBuffer;
+
+ /**
+ * Stores the latest part of the input while silent. It will be output as padding if the next
+ * input is noisy.
+ */
+ private byte[] paddingBuffer;
+
+ @State private int state;
+ private int maybeSilenceBufferSize;
+ private int paddingSize;
+ private boolean hasOutputNoise;
+ private long skippedFrames;
+
+ /** Creates a new silence trimming audio processor. */
+ public SilenceSkippingAudioProcessor() {
+ maybeSilenceBuffer = Util.EMPTY_BYTE_ARRAY;
+ paddingBuffer = Util.EMPTY_BYTE_ARRAY;
+ }
+
+ /**
+ * Sets whether to skip silence in the input. This method may only be called after draining data
+ * through the processor. The value returned by {@link #isActive()} may change, and the processor
+ * must be {@link #flush() flushed} before queueing more data.
+ *
+ * @param enabled Whether to skip silence in the input.
+ */
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ /**
+ * Returns the total number of frames of input audio that were skipped due to being classified as
+ * silence since the last call to {@link #flush()}.
+ */
+ public long getSkippedFrames() {
+ return skippedFrames;
+ }
+
+ // AudioProcessor implementation.
+
+ @Override
+ public AudioFormat onConfigure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ if (inputAudioFormat.encoding != C.ENCODING_PCM_16BIT) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+ return enabled ? inputAudioFormat : AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public boolean isActive() {
+ return enabled;
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ while (inputBuffer.hasRemaining() && !hasPendingOutput()) {
+ switch (state) {
+ case STATE_NOISY:
+ processNoisy(inputBuffer);
+ break;
+ case STATE_MAYBE_SILENT:
+ processMaybeSilence(inputBuffer);
+ break;
+ case STATE_SILENT:
+ processSilence(inputBuffer);
+ break;
+ default:
+ throw new IllegalStateException();
+ }
+ }
+ }
+
+ @Override
+ protected void onQueueEndOfStream() {
+ if (maybeSilenceBufferSize > 0) {
+ // We haven't received enough silence to transition to the silent state, so output the buffer.
+ output(maybeSilenceBuffer, maybeSilenceBufferSize);
+ }
+ if (!hasOutputNoise) {
+ skippedFrames += paddingSize / bytesPerFrame;
+ }
+ }
+
+ @Override
+ protected void onFlush() {
+ if (enabled) {
+ bytesPerFrame = inputAudioFormat.bytesPerFrame;
+ int maybeSilenceBufferSize = durationUsToFrames(MINIMUM_SILENCE_DURATION_US) * bytesPerFrame;
+ if (maybeSilenceBuffer.length != maybeSilenceBufferSize) {
+ maybeSilenceBuffer = new byte[maybeSilenceBufferSize];
+ }
+ paddingSize = durationUsToFrames(PADDING_SILENCE_US) * bytesPerFrame;
+ if (paddingBuffer.length != paddingSize) {
+ paddingBuffer = new byte[paddingSize];
+ }
+ }
+ state = STATE_NOISY;
+ skippedFrames = 0;
+ maybeSilenceBufferSize = 0;
+ hasOutputNoise = false;
+ }
+
+ @Override
+ protected void onReset() {
+ enabled = false;
+ paddingSize = 0;
+ maybeSilenceBuffer = Util.EMPTY_BYTE_ARRAY;
+ paddingBuffer = Util.EMPTY_BYTE_ARRAY;
+ }
+
+ // Internal methods.
+
+ /**
+ * Incrementally processes new input from {@code inputBuffer} while in {@link #STATE_NOISY},
+ * updating the state if needed.
+ */
+ private void processNoisy(ByteBuffer inputBuffer) {
+ int limit = inputBuffer.limit();
+
+ // Check if there's any noise within the maybe silence buffer duration.
+ inputBuffer.limit(Math.min(limit, inputBuffer.position() + maybeSilenceBuffer.length));
+ int noiseLimit = findNoiseLimit(inputBuffer);
+ if (noiseLimit == inputBuffer.position()) {
+ // The buffer contains the start of possible silence.
+ state = STATE_MAYBE_SILENT;
+ } else {
+ inputBuffer.limit(noiseLimit);
+ output(inputBuffer);
+ }
+
+ // Restore the limit.
+ inputBuffer.limit(limit);
+ }
+
+ /**
+ * Incrementally processes new input from {@code inputBuffer} while in {@link
+ * #STATE_MAYBE_SILENT}, updating the state if needed.
+ */
+ private void processMaybeSilence(ByteBuffer inputBuffer) {
+ int limit = inputBuffer.limit();
+ int noisePosition = findNoisePosition(inputBuffer);
+ int maybeSilenceInputSize = noisePosition - inputBuffer.position();
+ int maybeSilenceBufferRemaining = maybeSilenceBuffer.length - maybeSilenceBufferSize;
+ if (noisePosition < limit && maybeSilenceInputSize < maybeSilenceBufferRemaining) {
+ // The maybe silence buffer isn't full, so output it and switch back to the noisy state.
+ output(maybeSilenceBuffer, maybeSilenceBufferSize);
+ maybeSilenceBufferSize = 0;
+ state = STATE_NOISY;
+ } else {
+ // Fill as much of the maybe silence buffer as possible.
+ int bytesToWrite = Math.min(maybeSilenceInputSize, maybeSilenceBufferRemaining);
+ inputBuffer.limit(inputBuffer.position() + bytesToWrite);
+ inputBuffer.get(maybeSilenceBuffer, maybeSilenceBufferSize, bytesToWrite);
+ maybeSilenceBufferSize += bytesToWrite;
+ if (maybeSilenceBufferSize == maybeSilenceBuffer.length) {
+ // We've reached a period of silence, so skip it, taking in to account padding for both
+ // the noisy to silent transition and any future silent to noisy transition.
+ if (hasOutputNoise) {
+ output(maybeSilenceBuffer, paddingSize);
+ skippedFrames += (maybeSilenceBufferSize - paddingSize * 2) / bytesPerFrame;
+ } else {
+ skippedFrames += (maybeSilenceBufferSize - paddingSize) / bytesPerFrame;
+ }
+ updatePaddingBuffer(inputBuffer, maybeSilenceBuffer, maybeSilenceBufferSize);
+ maybeSilenceBufferSize = 0;
+ state = STATE_SILENT;
+ }
+
+ // Restore the limit.
+ inputBuffer.limit(limit);
+ }
+ }
+
+ /**
+ * Incrementally processes new input from {@code inputBuffer} while in {@link #STATE_SILENT},
+ * updating the state if needed.
+ */
+ private void processSilence(ByteBuffer inputBuffer) {
+ int limit = inputBuffer.limit();
+ int noisyPosition = findNoisePosition(inputBuffer);
+ inputBuffer.limit(noisyPosition);
+ skippedFrames += inputBuffer.remaining() / bytesPerFrame;
+ updatePaddingBuffer(inputBuffer, paddingBuffer, paddingSize);
+ if (noisyPosition < limit) {
+ // Output the padding, which may include previous input as well as new input, then transition
+ // back to the noisy state.
+ output(paddingBuffer, paddingSize);
+ state = STATE_NOISY;
+
+ // Restore the limit.
+ inputBuffer.limit(limit);
+ }
+ }
+
+ /**
+ * Copies {@code length} elements from {@code data} to populate a new output buffer from the
+ * processor.
+ */
+ private void output(byte[] data, int length) {
+ replaceOutputBuffer(length).put(data, 0, length).flip();
+ if (length > 0) {
+ hasOutputNoise = true;
+ }
+ }
+
+ /**
+ * Copies remaining bytes from {@code data} to populate a new output buffer from the processor.
+ */
+ private void output(ByteBuffer data) {
+ int length = data.remaining();
+ replaceOutputBuffer(length).put(data).flip();
+ if (length > 0) {
+ hasOutputNoise = true;
+ }
+ }
+
+ /**
+ * Fills {@link #paddingBuffer} using data from {@code input}, plus any additional buffered data
+ * at the end of {@code buffer} (up to its {@code size}) required to fill it, advancing the input
+ * position.
+ */
+ private void updatePaddingBuffer(ByteBuffer input, byte[] buffer, int size) {
+ int fromInputSize = Math.min(input.remaining(), paddingSize);
+ int fromBufferSize = paddingSize - fromInputSize;
+ System.arraycopy(
+ /* src= */ buffer,
+ /* srcPos= */ size - fromBufferSize,
+ /* dest= */ paddingBuffer,
+ /* destPos= */ 0,
+ /* length= */ fromBufferSize);
+ input.position(input.limit() - fromInputSize);
+ input.get(paddingBuffer, fromBufferSize, fromInputSize);
+ }
+
+ /**
+ * Returns the number of input frames corresponding to {@code durationUs} microseconds of audio.
+ */
+ private int durationUsToFrames(long durationUs) {
+ return (int) ((durationUs * inputAudioFormat.sampleRate) / C.MICROS_PER_SECOND);
+ }
+
+ /**
+ * Returns the earliest byte position in [position, limit) of {@code buffer} that contains a frame
+ * classified as a noisy frame, or the limit of the buffer if no such frame exists.
+ */
+ private int findNoisePosition(ByteBuffer buffer) {
+ // The input is in ByteOrder.nativeOrder(), which is little endian on Android.
+ for (int i = buffer.position() + 1; i < buffer.limit(); i += 2) {
+ if (Math.abs(buffer.get(i)) > SILENCE_THRESHOLD_LEVEL_MSB) {
+ // Round to the start of the frame.
+ return bytesPerFrame * (i / bytesPerFrame);
+ }
+ }
+ return buffer.limit();
+ }
+
+ /**
+ * Returns the earliest byte position in [position, limit) of {@code buffer} such that all frames
+ * from the byte position to the limit are classified as silent.
+ */
+ private int findNoiseLimit(ByteBuffer buffer) {
+ // The input is in ByteOrder.nativeOrder(), which is little endian on Android.
+ for (int i = buffer.limit() - 1; i >= buffer.position(); i -= 2) {
+ if (Math.abs(buffer.get(i)) > SILENCE_THRESHOLD_LEVEL_MSB) {
+ // Return the start of the next frame.
+ return bytesPerFrame * (i / bytesPerFrame) + bytesPerFrame;
+ }
+ }
+ return buffer.position();
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SimpleDecoderAudioRenderer.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SimpleDecoderAudioRenderer.java
new file mode 100644
index 0000000000..5e86e0ad78
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SimpleDecoderAudioRenderer.java
@@ -0,0 +1,758 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import android.media.audiofx.Virtualizer;
+import android.os.Handler;
+import android.os.SystemClock;
+import androidx.annotation.IntDef;
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.BaseRenderer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.ExoPlaybackException;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.ExoPlayer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.FormatHolder;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlaybackParameters;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.PlayerMessage.Target;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.RendererCapabilities;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.audio.AudioRendererEventListener.EventDispatcher;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.decoder.DecoderCounters;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.decoder.DecoderInputBuffer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.decoder.SimpleDecoder;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.decoder.SimpleOutputBuffer;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmSession;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmSession.DrmSessionException;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.DrmSessionManager;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.drm.ExoMediaCrypto;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MediaClock;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.MimeTypes;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.TraceUtil;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+/**
+ * Decodes and renders audio using a {@link SimpleDecoder}.
+ *
+ * <p>This renderer accepts the following messages sent via {@link ExoPlayer#createMessage(Target)}
+ * on the playback thread:
+ *
+ * <ul>
+ * <li>Message with type {@link C#MSG_SET_VOLUME} to set the volume. The message payload should be
+ * a {@link Float} with 0 being silence and 1 being unity gain.
+ * <li>Message with type {@link C#MSG_SET_AUDIO_ATTRIBUTES} to set the audio attributes. The
+ * message payload should be an {@link org.mozilla.thirdparty.com.google.android.exoplayer2audio.AudioAttributes}
+ * instance that will configure the underlying audio track.
+ * <li>Message with type {@link C#MSG_SET_AUX_EFFECT_INFO} to set the auxiliary effect. The
+ * message payload should be an {@link AuxEffectInfo} instance that will configure the
+ * underlying audio track.
+ * </ul>
+ */
+public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements MediaClock {
+
+ @Documented
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({
+ REINITIALIZATION_STATE_NONE,
+ REINITIALIZATION_STATE_SIGNAL_END_OF_STREAM,
+ REINITIALIZATION_STATE_WAIT_END_OF_STREAM
+ })
+ private @interface ReinitializationState {}
+ /**
+ * The decoder does not need to be re-initialized.
+ */
+ private static final int REINITIALIZATION_STATE_NONE = 0;
+ /**
+ * The input format has changed in a way that requires the decoder to be re-initialized, but we
+ * haven't yet signaled an end of stream to the existing decoder. We need to do so in order to
+ * ensure that it outputs any remaining buffers before we release it.
+ */
+ private static final int REINITIALIZATION_STATE_SIGNAL_END_OF_STREAM = 1;
+ /**
+ * The input format has changed in a way that requires the decoder to be re-initialized, and we've
+ * signaled an end of stream to the existing decoder. We're waiting for the decoder to output an
+ * end of stream signal to indicate that it has output any remaining buffers before we release it.
+ */
+ private static final int REINITIALIZATION_STATE_WAIT_END_OF_STREAM = 2;
+
+ private final DrmSessionManager<ExoMediaCrypto> drmSessionManager;
+ private final boolean playClearSamplesWithoutKeys;
+ private final EventDispatcher eventDispatcher;
+ private final AudioSink audioSink;
+ private final DecoderInputBuffer flagsOnlyBuffer;
+
+ private boolean drmResourcesAcquired;
+ private DecoderCounters decoderCounters;
+ private Format inputFormat;
+ private int encoderDelay;
+ private int encoderPadding;
+ private SimpleDecoder<DecoderInputBuffer, ? extends SimpleOutputBuffer,
+ ? extends AudioDecoderException> decoder;
+ private DecoderInputBuffer inputBuffer;
+ private SimpleOutputBuffer outputBuffer;
+ @Nullable private DrmSession<ExoMediaCrypto> decoderDrmSession;
+ @Nullable private DrmSession<ExoMediaCrypto> sourceDrmSession;
+
+ @ReinitializationState private int decoderReinitializationState;
+ private boolean decoderReceivedBuffers;
+ private boolean audioTrackNeedsConfigure;
+
+ private long currentPositionUs;
+ private boolean allowFirstBufferPositionDiscontinuity;
+ private boolean allowPositionDiscontinuity;
+ private boolean inputStreamEnded;
+ private boolean outputStreamEnded;
+ private boolean waitingForKeys;
+
+ public SimpleDecoderAudioRenderer() {
+ this(/* eventHandler= */ null, /* eventListener= */ null);
+ }
+
+ /**
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
+ */
+ public SimpleDecoderAudioRenderer(
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ AudioProcessor... audioProcessors) {
+ this(
+ eventHandler,
+ eventListener,
+ /* audioCapabilities= */ null,
+ /* drmSessionManager= */ null,
+ /* playClearSamplesWithoutKeys= */ false,
+ audioProcessors);
+ }
+
+ /**
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioCapabilities The audio capabilities for playback on this device. May be null if the
+ * default capabilities (no encoded audio passthrough support) should be assumed.
+ */
+ public SimpleDecoderAudioRenderer(
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ @Nullable AudioCapabilities audioCapabilities) {
+ this(
+ eventHandler,
+ eventListener,
+ audioCapabilities,
+ /* drmSessionManager= */ null,
+ /* playClearSamplesWithoutKeys= */ false);
+ }
+
+ /**
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param audioCapabilities The audio capabilities for playback on this device. May be null if the
+ * default capabilities (no encoded audio passthrough support) should be assumed.
+ * @param drmSessionManager For use with encrypted media. May be null if support for encrypted
+ * media is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
+ */
+ public SimpleDecoderAudioRenderer(
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ @Nullable AudioCapabilities audioCapabilities,
+ @Nullable DrmSessionManager<ExoMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys,
+ AudioProcessor... audioProcessors) {
+ this(eventHandler, eventListener, drmSessionManager,
+ playClearSamplesWithoutKeys, new DefaultAudioSink(audioCapabilities, audioProcessors));
+ }
+
+ /**
+ * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
+ * null if delivery of events is not required.
+ * @param eventListener A listener of events. May be null if delivery of events is not required.
+ * @param drmSessionManager For use with encrypted media. May be null if support for encrypted
+ * media is not required.
+ * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
+ * For example a media file may start with a short clear region so as to allow playback to
+ * begin in parallel with key acquisition. This parameter specifies whether the renderer is
+ * permitted to play clear regions of encrypted media files before {@code drmSessionManager}
+ * has obtained the keys necessary to decrypt encrypted regions of the media.
+ * @param audioSink The sink to which audio will be output.
+ */
+ public SimpleDecoderAudioRenderer(
+ @Nullable Handler eventHandler,
+ @Nullable AudioRendererEventListener eventListener,
+ @Nullable DrmSessionManager<ExoMediaCrypto> drmSessionManager,
+ boolean playClearSamplesWithoutKeys,
+ AudioSink audioSink) {
+ super(C.TRACK_TYPE_AUDIO);
+ this.drmSessionManager = drmSessionManager;
+ this.playClearSamplesWithoutKeys = playClearSamplesWithoutKeys;
+ eventDispatcher = new EventDispatcher(eventHandler, eventListener);
+ this.audioSink = audioSink;
+ audioSink.setListener(new AudioSinkListener());
+ flagsOnlyBuffer = DecoderInputBuffer.newFlagsOnlyInstance();
+ decoderReinitializationState = REINITIALIZATION_STATE_NONE;
+ audioTrackNeedsConfigure = true;
+ }
+
+ @Override
+ @Nullable
+ public MediaClock getMediaClock() {
+ return this;
+ }
+
+ @Override
+ @Capabilities
+ public final int supportsFormat(Format format) {
+ if (!MimeTypes.isAudio(format.sampleMimeType)) {
+ return RendererCapabilities.create(FORMAT_UNSUPPORTED_TYPE);
+ }
+ @FormatSupport int formatSupport = supportsFormatInternal(drmSessionManager, format);
+ if (formatSupport <= FORMAT_UNSUPPORTED_DRM) {
+ return RendererCapabilities.create(formatSupport);
+ }
+ @TunnelingSupport
+ int tunnelingSupport = Util.SDK_INT >= 21 ? TUNNELING_SUPPORTED : TUNNELING_NOT_SUPPORTED;
+ return RendererCapabilities.create(formatSupport, ADAPTIVE_NOT_SEAMLESS, tunnelingSupport);
+ }
+
+ /**
+ * Returns the {@link FormatSupport} for the given {@link Format}.
+ *
+ * @param drmSessionManager The renderer's {@link DrmSessionManager}.
+ * @param format The format, which has an audio {@link Format#sampleMimeType}.
+ * @return The {@link FormatSupport} for this {@link Format}.
+ */
+ @FormatSupport
+ protected abstract int supportsFormatInternal(
+ @Nullable DrmSessionManager<ExoMediaCrypto> drmSessionManager, Format format);
+
+ /**
+ * Returns whether the sink supports the audio format.
+ *
+ * @see AudioSink#supportsOutput(int, int)
+ */
+ protected final boolean supportsOutput(int channelCount, @C.Encoding int encoding) {
+ return audioSink.supportsOutput(channelCount, encoding);
+ }
+
+ @Override
+ public void render(long positionUs, long elapsedRealtimeUs) throws ExoPlaybackException {
+ if (outputStreamEnded) {
+ try {
+ audioSink.playToEndOfStream();
+ } catch (AudioSink.WriteException e) {
+ throw createRendererException(e, inputFormat);
+ }
+ return;
+ }
+
+ // Try and read a format if we don't have one already.
+ if (inputFormat == null) {
+ // We don't have a format yet, so try and read one.
+ FormatHolder formatHolder = getFormatHolder();
+ flagsOnlyBuffer.clear();
+ int result = readSource(formatHolder, flagsOnlyBuffer, true);
+ if (result == C.RESULT_FORMAT_READ) {
+ onInputFormatChanged(formatHolder);
+ } else if (result == C.RESULT_BUFFER_READ) {
+ // End of stream read having not read a format.
+ Assertions.checkState(flagsOnlyBuffer.isEndOfStream());
+ inputStreamEnded = true;
+ processEndOfStream();
+ return;
+ } else {
+ // We still don't have a format and can't make progress without one.
+ return;
+ }
+ }
+
+ // If we don't have a decoder yet, we need to instantiate one.
+ maybeInitDecoder();
+
+ if (decoder != null) {
+ try {
+ // Rendering loop.
+ TraceUtil.beginSection("drainAndFeed");
+ while (drainOutputBuffer()) {}
+ while (feedInputBuffer()) {}
+ TraceUtil.endSection();
+ } catch (AudioDecoderException | AudioSink.ConfigurationException
+ | AudioSink.InitializationException | AudioSink.WriteException e) {
+ throw createRendererException(e, inputFormat);
+ }
+ decoderCounters.ensureUpdated();
+ }
+ }
+
+ /**
+ * Called when the audio session id becomes known. The default implementation is a no-op. One
+ * reason for overriding this method would be to instantiate and enable a {@link Virtualizer} in
+ * order to spatialize the audio channels. For this use case, any {@link Virtualizer} instances
+ * should be released in {@link #onDisabled()} (if not before).
+ *
+ * @see AudioSink.Listener#onAudioSessionId(int)
+ */
+ protected void onAudioSessionId(int audioSessionId) {
+ // Do nothing.
+ }
+
+ /**
+ * @see AudioSink.Listener#onPositionDiscontinuity()
+ */
+ protected void onAudioTrackPositionDiscontinuity() {
+ // Do nothing.
+ }
+
+ /**
+ * @see AudioSink.Listener#onUnderrun(int, long, long)
+ */
+ protected void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
+ long elapsedSinceLastFeedMs) {
+ // Do nothing.
+ }
+
+ /**
+ * Creates a decoder for the given format.
+ *
+ * @param format The format for which a decoder is required.
+ * @param mediaCrypto The {@link ExoMediaCrypto} object required for decoding encrypted content.
+ * Maybe null and can be ignored if decoder does not handle encrypted content.
+ * @return The decoder.
+ * @throws AudioDecoderException If an error occurred creating a suitable decoder.
+ */
+ protected abstract SimpleDecoder<
+ DecoderInputBuffer, ? extends SimpleOutputBuffer, ? extends AudioDecoderException>
+ createDecoder(Format format, @Nullable ExoMediaCrypto mediaCrypto)
+ throws AudioDecoderException;
+
+ /**
+ * Returns the format of audio buffers output by the decoder. Will not be called until the first
+ * output buffer has been dequeued, so the decoder may use input data to determine the format.
+ */
+ protected abstract Format getOutputFormat();
+
+ /**
+ * Returns whether the existing decoder can be kept for a new format.
+ *
+ * @param oldFormat The previous format.
+ * @param newFormat The new format.
+ * @return True if the existing decoder can be kept.
+ */
+ protected boolean canKeepCodec(Format oldFormat, Format newFormat) {
+ return false;
+ }
+
+ private boolean drainOutputBuffer() throws ExoPlaybackException, AudioDecoderException,
+ AudioSink.ConfigurationException, AudioSink.InitializationException,
+ AudioSink.WriteException {
+ if (outputBuffer == null) {
+ outputBuffer = decoder.dequeueOutputBuffer();
+ if (outputBuffer == null) {
+ return false;
+ }
+ if (outputBuffer.skippedOutputBufferCount > 0) {
+ decoderCounters.skippedOutputBufferCount += outputBuffer.skippedOutputBufferCount;
+ audioSink.handleDiscontinuity();
+ }
+ }
+
+ if (outputBuffer.isEndOfStream()) {
+ if (decoderReinitializationState == REINITIALIZATION_STATE_WAIT_END_OF_STREAM) {
+ // We're waiting to re-initialize the decoder, and have now processed all final buffers.
+ releaseDecoder();
+ maybeInitDecoder();
+ // The audio track may need to be recreated once the new output format is known.
+ audioTrackNeedsConfigure = true;
+ } else {
+ outputBuffer.release();
+ outputBuffer = null;
+ processEndOfStream();
+ }
+ return false;
+ }
+
+ if (audioTrackNeedsConfigure) {
+ Format outputFormat = getOutputFormat();
+ audioSink.configure(outputFormat.pcmEncoding, outputFormat.channelCount,
+ outputFormat.sampleRate, 0, null, encoderDelay, encoderPadding);
+ audioTrackNeedsConfigure = false;
+ }
+
+ if (audioSink.handleBuffer(outputBuffer.data, outputBuffer.timeUs)) {
+ decoderCounters.renderedOutputBufferCount++;
+ outputBuffer.release();
+ outputBuffer = null;
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean feedInputBuffer() throws AudioDecoderException, ExoPlaybackException {
+ if (decoder == null || decoderReinitializationState == REINITIALIZATION_STATE_WAIT_END_OF_STREAM
+ || inputStreamEnded) {
+ // We need to reinitialize the decoder or the input stream has ended.
+ return false;
+ }
+
+ if (inputBuffer == null) {
+ inputBuffer = decoder.dequeueInputBuffer();
+ if (inputBuffer == null) {
+ return false;
+ }
+ }
+
+ if (decoderReinitializationState == REINITIALIZATION_STATE_SIGNAL_END_OF_STREAM) {
+ inputBuffer.setFlags(C.BUFFER_FLAG_END_OF_STREAM);
+ decoder.queueInputBuffer(inputBuffer);
+ inputBuffer = null;
+ decoderReinitializationState = REINITIALIZATION_STATE_WAIT_END_OF_STREAM;
+ return false;
+ }
+
+ int result;
+ FormatHolder formatHolder = getFormatHolder();
+ if (waitingForKeys) {
+ // We've already read an encrypted sample into buffer, and are waiting for keys.
+ result = C.RESULT_BUFFER_READ;
+ } else {
+ result = readSource(formatHolder, inputBuffer, false);
+ }
+
+ if (result == C.RESULT_NOTHING_READ) {
+ return false;
+ }
+ if (result == C.RESULT_FORMAT_READ) {
+ onInputFormatChanged(formatHolder);
+ return true;
+ }
+ if (inputBuffer.isEndOfStream()) {
+ inputStreamEnded = true;
+ decoder.queueInputBuffer(inputBuffer);
+ inputBuffer = null;
+ return false;
+ }
+ boolean bufferEncrypted = inputBuffer.isEncrypted();
+ waitingForKeys = shouldWaitForKeys(bufferEncrypted);
+ if (waitingForKeys) {
+ return false;
+ }
+ inputBuffer.flip();
+ onQueueInputBuffer(inputBuffer);
+ decoder.queueInputBuffer(inputBuffer);
+ decoderReceivedBuffers = true;
+ decoderCounters.inputBufferCount++;
+ inputBuffer = null;
+ return true;
+ }
+
+ private boolean shouldWaitForKeys(boolean bufferEncrypted) throws ExoPlaybackException {
+ if (decoderDrmSession == null
+ || (!bufferEncrypted
+ && (playClearSamplesWithoutKeys || decoderDrmSession.playClearSamplesWithoutKeys()))) {
+ return false;
+ }
+ @DrmSession.State int drmSessionState = decoderDrmSession.getState();
+ if (drmSessionState == DrmSession.STATE_ERROR) {
+ throw createRendererException(decoderDrmSession.getError(), inputFormat);
+ }
+ return drmSessionState != DrmSession.STATE_OPENED_WITH_KEYS;
+ }
+
+ private void processEndOfStream() throws ExoPlaybackException {
+ outputStreamEnded = true;
+ try {
+ audioSink.playToEndOfStream();
+ } catch (AudioSink.WriteException e) {
+ // TODO(internal: b/145658993) Use outputFormat for the call from drainOutputBuffer.
+ throw createRendererException(e, inputFormat);
+ }
+ }
+
+ private void flushDecoder() throws ExoPlaybackException {
+ waitingForKeys = false;
+ if (decoderReinitializationState != REINITIALIZATION_STATE_NONE) {
+ releaseDecoder();
+ maybeInitDecoder();
+ } else {
+ inputBuffer = null;
+ if (outputBuffer != null) {
+ outputBuffer.release();
+ outputBuffer = null;
+ }
+ decoder.flush();
+ decoderReceivedBuffers = false;
+ }
+ }
+
+ @Override
+ public boolean isEnded() {
+ return outputStreamEnded && audioSink.isEnded();
+ }
+
+ @Override
+ public boolean isReady() {
+ return audioSink.hasPendingData()
+ || (inputFormat != null && !waitingForKeys && (isSourceReady() || outputBuffer != null));
+ }
+
+ @Override
+ public long getPositionUs() {
+ if (getState() == STATE_STARTED) {
+ updateCurrentPosition();
+ }
+ return currentPositionUs;
+ }
+
+ @Override
+ public void setPlaybackParameters(PlaybackParameters playbackParameters) {
+ audioSink.setPlaybackParameters(playbackParameters);
+ }
+
+ @Override
+ public PlaybackParameters getPlaybackParameters() {
+ return audioSink.getPlaybackParameters();
+ }
+
+ @Override
+ protected void onEnabled(boolean joining) throws ExoPlaybackException {
+ if (drmSessionManager != null && !drmResourcesAcquired) {
+ drmResourcesAcquired = true;
+ drmSessionManager.prepare();
+ }
+ decoderCounters = new DecoderCounters();
+ eventDispatcher.enabled(decoderCounters);
+ int tunnelingAudioSessionId = getConfiguration().tunnelingAudioSessionId;
+ if (tunnelingAudioSessionId != C.AUDIO_SESSION_ID_UNSET) {
+ audioSink.enableTunnelingV21(tunnelingAudioSessionId);
+ } else {
+ audioSink.disableTunneling();
+ }
+ }
+
+ @Override
+ protected void onPositionReset(long positionUs, boolean joining) throws ExoPlaybackException {
+ audioSink.flush();
+ currentPositionUs = positionUs;
+ allowFirstBufferPositionDiscontinuity = true;
+ allowPositionDiscontinuity = true;
+ inputStreamEnded = false;
+ outputStreamEnded = false;
+ if (decoder != null) {
+ flushDecoder();
+ }
+ }
+
+ @Override
+ protected void onStarted() {
+ audioSink.play();
+ }
+
+ @Override
+ protected void onStopped() {
+ updateCurrentPosition();
+ audioSink.pause();
+ }
+
+ @Override
+ protected void onDisabled() {
+ inputFormat = null;
+ audioTrackNeedsConfigure = true;
+ waitingForKeys = false;
+ try {
+ setSourceDrmSession(null);
+ releaseDecoder();
+ audioSink.reset();
+ } finally {
+ eventDispatcher.disabled(decoderCounters);
+ }
+ }
+
+ @Override
+ protected void onReset() {
+ if (drmSessionManager != null && drmResourcesAcquired) {
+ drmResourcesAcquired = false;
+ drmSessionManager.release();
+ }
+ }
+
+ @Override
+ public void handleMessage(int messageType, @Nullable Object message) throws ExoPlaybackException {
+ switch (messageType) {
+ case C.MSG_SET_VOLUME:
+ audioSink.setVolume((Float) message);
+ break;
+ case C.MSG_SET_AUDIO_ATTRIBUTES:
+ AudioAttributes audioAttributes = (AudioAttributes) message;
+ audioSink.setAudioAttributes(audioAttributes);
+ break;
+ case C.MSG_SET_AUX_EFFECT_INFO:
+ AuxEffectInfo auxEffectInfo = (AuxEffectInfo) message;
+ audioSink.setAuxEffectInfo(auxEffectInfo);
+ break;
+ default:
+ super.handleMessage(messageType, message);
+ break;
+ }
+ }
+
+ private void maybeInitDecoder() throws ExoPlaybackException {
+ if (decoder != null) {
+ return;
+ }
+
+ setDecoderDrmSession(sourceDrmSession);
+
+ ExoMediaCrypto mediaCrypto = null;
+ if (decoderDrmSession != null) {
+ mediaCrypto = decoderDrmSession.getMediaCrypto();
+ if (mediaCrypto == null) {
+ DrmSessionException drmError = decoderDrmSession.getError();
+ if (drmError != null) {
+ // Continue for now. We may be able to avoid failure if the session recovers, or if a new
+ // input format causes the session to be replaced before it's used.
+ } else {
+ // The drm session isn't open yet.
+ return;
+ }
+ }
+ }
+
+ try {
+ long codecInitializingTimestamp = SystemClock.elapsedRealtime();
+ TraceUtil.beginSection("createAudioDecoder");
+ decoder = createDecoder(inputFormat, mediaCrypto);
+ TraceUtil.endSection();
+ long codecInitializedTimestamp = SystemClock.elapsedRealtime();
+ eventDispatcher.decoderInitialized(decoder.getName(), codecInitializedTimestamp,
+ codecInitializedTimestamp - codecInitializingTimestamp);
+ decoderCounters.decoderInitCount++;
+ } catch (AudioDecoderException e) {
+ throw createRendererException(e, inputFormat);
+ }
+ }
+
+ private void releaseDecoder() {
+ inputBuffer = null;
+ outputBuffer = null;
+ decoderReinitializationState = REINITIALIZATION_STATE_NONE;
+ decoderReceivedBuffers = false;
+ if (decoder != null) {
+ decoder.release();
+ decoder = null;
+ decoderCounters.decoderReleaseCount++;
+ }
+ setDecoderDrmSession(null);
+ }
+
+ private void setSourceDrmSession(@Nullable DrmSession<ExoMediaCrypto> session) {
+ DrmSession.replaceSession(sourceDrmSession, session);
+ sourceDrmSession = session;
+ }
+
+ private void setDecoderDrmSession(@Nullable DrmSession<ExoMediaCrypto> session) {
+ DrmSession.replaceSession(decoderDrmSession, session);
+ decoderDrmSession = session;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void onInputFormatChanged(FormatHolder formatHolder) throws ExoPlaybackException {
+ Format newFormat = Assertions.checkNotNull(formatHolder.format);
+ if (formatHolder.includesDrmSession) {
+ setSourceDrmSession((DrmSession<ExoMediaCrypto>) formatHolder.drmSession);
+ } else {
+ sourceDrmSession =
+ getUpdatedSourceDrmSession(inputFormat, newFormat, drmSessionManager, sourceDrmSession);
+ }
+ Format oldFormat = inputFormat;
+ inputFormat = newFormat;
+
+ if (!canKeepCodec(oldFormat, inputFormat)) {
+ if (decoderReceivedBuffers) {
+ // Signal end of stream and wait for any final output buffers before re-initialization.
+ decoderReinitializationState = REINITIALIZATION_STATE_SIGNAL_END_OF_STREAM;
+ } else {
+ // There aren't any final output buffers, so release the decoder immediately.
+ releaseDecoder();
+ maybeInitDecoder();
+ audioTrackNeedsConfigure = true;
+ }
+ }
+
+ encoderDelay = inputFormat.encoderDelay;
+ encoderPadding = inputFormat.encoderPadding;
+
+ eventDispatcher.inputFormatChanged(inputFormat);
+ }
+
+ private void onQueueInputBuffer(DecoderInputBuffer buffer) {
+ if (allowFirstBufferPositionDiscontinuity && !buffer.isDecodeOnly()) {
+ // TODO: Remove this hack once we have a proper fix for [Internal: b/71876314].
+ // Allow the position to jump if the first presentable input buffer has a timestamp that
+ // differs significantly from what was expected.
+ if (Math.abs(buffer.timeUs - currentPositionUs) > 500000) {
+ currentPositionUs = buffer.timeUs;
+ }
+ allowFirstBufferPositionDiscontinuity = false;
+ }
+ }
+
+ private void updateCurrentPosition() {
+ long newCurrentPositionUs = audioSink.getCurrentPositionUs(isEnded());
+ if (newCurrentPositionUs != AudioSink.CURRENT_POSITION_NOT_SET) {
+ currentPositionUs =
+ allowPositionDiscontinuity
+ ? newCurrentPositionUs
+ : Math.max(currentPositionUs, newCurrentPositionUs);
+ allowPositionDiscontinuity = false;
+ }
+ }
+
+ private final class AudioSinkListener implements AudioSink.Listener {
+
+ @Override
+ public void onAudioSessionId(int audioSessionId) {
+ eventDispatcher.audioSessionId(audioSessionId);
+ SimpleDecoderAudioRenderer.this.onAudioSessionId(audioSessionId);
+ }
+
+ @Override
+ public void onPositionDiscontinuity() {
+ onAudioTrackPositionDiscontinuity();
+ // We are out of sync so allow currentPositionUs to jump backwards.
+ SimpleDecoderAudioRenderer.this.allowPositionDiscontinuity = true;
+ }
+
+ @Override
+ public void onUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {
+ eventDispatcher.audioTrackUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
+ onAudioTrackUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
+ }
+
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Sonic.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Sonic.java
new file mode 100644
index 0000000000..1a0dad4b45
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/Sonic.java
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ * Copyright (C) 2010 Bill Cox, Sonic Library
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import java.nio.ShortBuffer;
+import java.util.Arrays;
+
+/**
+ * Sonic audio stream processor for time/pitch stretching.
+ * <p>
+ * Based on https://github.com/waywardgeek/sonic.
+ */
+/* package */ final class Sonic {
+
+ private static final int MINIMUM_PITCH = 65;
+ private static final int MAXIMUM_PITCH = 400;
+ private static final int AMDF_FREQUENCY = 4000;
+ private static final int BYTES_PER_SAMPLE = 2;
+
+ private final int inputSampleRateHz;
+ private final int channelCount;
+ private final float speed;
+ private final float pitch;
+ private final float rate;
+ private final int minPeriod;
+ private final int maxPeriod;
+ private final int maxRequiredFrameCount;
+ private final short[] downSampleBuffer;
+
+ private short[] inputBuffer;
+ private int inputFrameCount;
+ private short[] outputBuffer;
+ private int outputFrameCount;
+ private short[] pitchBuffer;
+ private int pitchFrameCount;
+ private int oldRatePosition;
+ private int newRatePosition;
+ private int remainingInputToCopyFrameCount;
+ private int prevPeriod;
+ private int prevMinDiff;
+ private int minDiff;
+ private int maxDiff;
+
+ /**
+ * Creates a new Sonic audio stream processor.
+ *
+ * @param inputSampleRateHz The sample rate of input audio, in hertz.
+ * @param channelCount The number of channels in the input audio.
+ * @param speed The speedup factor for output audio.
+ * @param pitch The pitch factor for output audio.
+ * @param outputSampleRateHz The sample rate for output audio, in hertz.
+ */
+ public Sonic(
+ int inputSampleRateHz, int channelCount, float speed, float pitch, int outputSampleRateHz) {
+ this.inputSampleRateHz = inputSampleRateHz;
+ this.channelCount = channelCount;
+ this.speed = speed;
+ this.pitch = pitch;
+ rate = (float) inputSampleRateHz / outputSampleRateHz;
+ minPeriod = inputSampleRateHz / MAXIMUM_PITCH;
+ maxPeriod = inputSampleRateHz / MINIMUM_PITCH;
+ maxRequiredFrameCount = 2 * maxPeriod;
+ downSampleBuffer = new short[maxRequiredFrameCount];
+ inputBuffer = new short[maxRequiredFrameCount * channelCount];
+ outputBuffer = new short[maxRequiredFrameCount * channelCount];
+ pitchBuffer = new short[maxRequiredFrameCount * channelCount];
+ }
+
+ /**
+ * Queues remaining data from {@code buffer}, and advances its position by the number of bytes
+ * consumed.
+ *
+ * @param buffer A {@link ShortBuffer} containing input data between its position and limit.
+ */
+ public void queueInput(ShortBuffer buffer) {
+ int framesToWrite = buffer.remaining() / channelCount;
+ int bytesToWrite = framesToWrite * channelCount * 2;
+ inputBuffer = ensureSpaceForAdditionalFrames(inputBuffer, inputFrameCount, framesToWrite);
+ buffer.get(inputBuffer, inputFrameCount * channelCount, bytesToWrite / 2);
+ inputFrameCount += framesToWrite;
+ processStreamInput();
+ }
+
+ /**
+ * Gets available output, outputting to the start of {@code buffer}. The buffer's position will be
+ * advanced by the number of bytes written.
+ *
+ * @param buffer A {@link ShortBuffer} into which output will be written.
+ */
+ public void getOutput(ShortBuffer buffer) {
+ int framesToRead = Math.min(buffer.remaining() / channelCount, outputFrameCount);
+ buffer.put(outputBuffer, 0, framesToRead * channelCount);
+ outputFrameCount -= framesToRead;
+ System.arraycopy(
+ outputBuffer,
+ framesToRead * channelCount,
+ outputBuffer,
+ 0,
+ outputFrameCount * channelCount);
+ }
+
+ /**
+ * Forces generating output using whatever data has been queued already. No extra delay will be
+ * added to the output, but flushing in the middle of words could introduce distortion.
+ */
+ public void queueEndOfStream() {
+ int remainingFrameCount = inputFrameCount;
+ float s = speed / pitch;
+ float r = rate * pitch;
+ int expectedOutputFrames =
+ outputFrameCount + (int) ((remainingFrameCount / s + pitchFrameCount) / r + 0.5f);
+
+ // Add enough silence to flush both input and pitch buffers.
+ inputBuffer =
+ ensureSpaceForAdditionalFrames(
+ inputBuffer, inputFrameCount, remainingFrameCount + 2 * maxRequiredFrameCount);
+ for (int xSample = 0; xSample < 2 * maxRequiredFrameCount * channelCount; xSample++) {
+ inputBuffer[remainingFrameCount * channelCount + xSample] = 0;
+ }
+ inputFrameCount += 2 * maxRequiredFrameCount;
+ processStreamInput();
+ // Throw away any extra frames we generated due to the silence we added.
+ if (outputFrameCount > expectedOutputFrames) {
+ outputFrameCount = expectedOutputFrames;
+ }
+ // Empty input and pitch buffers.
+ inputFrameCount = 0;
+ remainingInputToCopyFrameCount = 0;
+ pitchFrameCount = 0;
+ }
+
+ /** Clears state in preparation for receiving a new stream of input buffers. */
+ public void flush() {
+ inputFrameCount = 0;
+ outputFrameCount = 0;
+ pitchFrameCount = 0;
+ oldRatePosition = 0;
+ newRatePosition = 0;
+ remainingInputToCopyFrameCount = 0;
+ prevPeriod = 0;
+ prevMinDiff = 0;
+ minDiff = 0;
+ maxDiff = 0;
+ }
+
+ /** Returns the size of output that can be read with {@link #getOutput(ShortBuffer)}, in bytes. */
+ public int getOutputSize() {
+ return outputFrameCount * channelCount * BYTES_PER_SAMPLE;
+ }
+
+ // Internal methods.
+
+ /**
+ * Returns {@code buffer} or a copy of it, such that there is enough space in the returned buffer
+ * to store {@code newFrameCount} additional frames.
+ *
+ * @param buffer The buffer.
+ * @param frameCount The number of frames already in the buffer.
+ * @param additionalFrameCount The number of additional frames that need to be stored in the
+ * buffer.
+ * @return A buffer with enough space for the additional frames.
+ */
+ private short[] ensureSpaceForAdditionalFrames(
+ short[] buffer, int frameCount, int additionalFrameCount) {
+ int currentCapacityFrames = buffer.length / channelCount;
+ if (frameCount + additionalFrameCount <= currentCapacityFrames) {
+ return buffer;
+ } else {
+ int newCapacityFrames = 3 * currentCapacityFrames / 2 + additionalFrameCount;
+ return Arrays.copyOf(buffer, newCapacityFrames * channelCount);
+ }
+ }
+
+ private void removeProcessedInputFrames(int positionFrames) {
+ int remainingFrames = inputFrameCount - positionFrames;
+ System.arraycopy(
+ inputBuffer, positionFrames * channelCount, inputBuffer, 0, remainingFrames * channelCount);
+ inputFrameCount = remainingFrames;
+ }
+
+ private void copyToOutput(short[] samples, int positionFrames, int frameCount) {
+ outputBuffer = ensureSpaceForAdditionalFrames(outputBuffer, outputFrameCount, frameCount);
+ System.arraycopy(
+ samples,
+ positionFrames * channelCount,
+ outputBuffer,
+ outputFrameCount * channelCount,
+ frameCount * channelCount);
+ outputFrameCount += frameCount;
+ }
+
+ private int copyInputToOutput(int positionFrames) {
+ int frameCount = Math.min(maxRequiredFrameCount, remainingInputToCopyFrameCount);
+ copyToOutput(inputBuffer, positionFrames, frameCount);
+ remainingInputToCopyFrameCount -= frameCount;
+ return frameCount;
+ }
+
+ private void downSampleInput(short[] samples, int position, int skip) {
+ // If skip is greater than one, average skip samples together and write them to the down-sample
+ // buffer. If channelCount is greater than one, mix the channels together as we down sample.
+ int frameCount = maxRequiredFrameCount / skip;
+ int samplesPerValue = channelCount * skip;
+ position *= channelCount;
+ for (int i = 0; i < frameCount; i++) {
+ int value = 0;
+ for (int j = 0; j < samplesPerValue; j++) {
+ value += samples[position + i * samplesPerValue + j];
+ }
+ value /= samplesPerValue;
+ downSampleBuffer[i] = (short) value;
+ }
+ }
+
+ private int findPitchPeriodInRange(short[] samples, int position, int minPeriod, int maxPeriod) {
+ // Find the best frequency match in the range, and given a sample skip multiple. For now, just
+ // find the pitch of the first channel.
+ int bestPeriod = 0;
+ int worstPeriod = 255;
+ int minDiff = 1;
+ int maxDiff = 0;
+ position *= channelCount;
+ for (int period = minPeriod; period <= maxPeriod; period++) {
+ int diff = 0;
+ for (int i = 0; i < period; i++) {
+ short sVal = samples[position + i];
+ short pVal = samples[position + period + i];
+ diff += Math.abs(sVal - pVal);
+ }
+ // Note that the highest number of samples we add into diff will be less than 256, since we
+ // skip samples. Thus, diff is a 24 bit number, and we can safely multiply by numSamples
+ // without overflow.
+ if (diff * bestPeriod < minDiff * period) {
+ minDiff = diff;
+ bestPeriod = period;
+ }
+ if (diff * worstPeriod > maxDiff * period) {
+ maxDiff = diff;
+ worstPeriod = period;
+ }
+ }
+ this.minDiff = minDiff / bestPeriod;
+ this.maxDiff = maxDiff / worstPeriod;
+ return bestPeriod;
+ }
+
+ /**
+ * Returns whether the previous pitch period estimate is a better approximation, which can occur
+ * at the abrupt end of voiced words.
+ */
+ private boolean previousPeriodBetter(int minDiff, int maxDiff) {
+ if (minDiff == 0 || prevPeriod == 0) {
+ return false;
+ }
+ if (maxDiff > minDiff * 3) {
+ // Got a reasonable match this period.
+ return false;
+ }
+ if (minDiff * 2 <= prevMinDiff * 3) {
+ // Mismatch is not that much greater this period.
+ return false;
+ }
+ return true;
+ }
+
+ private int findPitchPeriod(short[] samples, int position) {
+ // Find the pitch period. This is a critical step, and we may have to try multiple ways to get a
+ // good answer. This version uses AMDF. To improve speed, we down sample by an integer factor
+ // get in the 11 kHz range, and then do it again with a narrower frequency range without down
+ // sampling.
+ int period;
+ int retPeriod;
+ int skip = inputSampleRateHz > AMDF_FREQUENCY ? inputSampleRateHz / AMDF_FREQUENCY : 1;
+ if (channelCount == 1 && skip == 1) {
+ period = findPitchPeriodInRange(samples, position, minPeriod, maxPeriod);
+ } else {
+ downSampleInput(samples, position, skip);
+ period = findPitchPeriodInRange(downSampleBuffer, 0, minPeriod / skip, maxPeriod / skip);
+ if (skip != 1) {
+ period *= skip;
+ int minP = period - (skip * 4);
+ int maxP = period + (skip * 4);
+ if (minP < minPeriod) {
+ minP = minPeriod;
+ }
+ if (maxP > maxPeriod) {
+ maxP = maxPeriod;
+ }
+ if (channelCount == 1) {
+ period = findPitchPeriodInRange(samples, position, minP, maxP);
+ } else {
+ downSampleInput(samples, position, 1);
+ period = findPitchPeriodInRange(downSampleBuffer, 0, minP, maxP);
+ }
+ }
+ }
+ if (previousPeriodBetter(minDiff, maxDiff)) {
+ retPeriod = prevPeriod;
+ } else {
+ retPeriod = period;
+ }
+ prevMinDiff = minDiff;
+ prevPeriod = period;
+ return retPeriod;
+ }
+
+ private void moveNewSamplesToPitchBuffer(int originalOutputFrameCount) {
+ int frameCount = outputFrameCount - originalOutputFrameCount;
+ pitchBuffer = ensureSpaceForAdditionalFrames(pitchBuffer, pitchFrameCount, frameCount);
+ System.arraycopy(
+ outputBuffer,
+ originalOutputFrameCount * channelCount,
+ pitchBuffer,
+ pitchFrameCount * channelCount,
+ frameCount * channelCount);
+ outputFrameCount = originalOutputFrameCount;
+ pitchFrameCount += frameCount;
+ }
+
+ private void removePitchFrames(int frameCount) {
+ if (frameCount == 0) {
+ return;
+ }
+ System.arraycopy(
+ pitchBuffer,
+ frameCount * channelCount,
+ pitchBuffer,
+ 0,
+ (pitchFrameCount - frameCount) * channelCount);
+ pitchFrameCount -= frameCount;
+ }
+
+ private short interpolate(short[] in, int inPos, int oldSampleRate, int newSampleRate) {
+ short left = in[inPos];
+ short right = in[inPos + channelCount];
+ int position = newRatePosition * oldSampleRate;
+ int leftPosition = oldRatePosition * newSampleRate;
+ int rightPosition = (oldRatePosition + 1) * newSampleRate;
+ int ratio = rightPosition - position;
+ int width = rightPosition - leftPosition;
+ return (short) ((ratio * left + (width - ratio) * right) / width);
+ }
+
+ private void adjustRate(float rate, int originalOutputFrameCount) {
+ if (outputFrameCount == originalOutputFrameCount) {
+ return;
+ }
+ int newSampleRate = (int) (inputSampleRateHz / rate);
+ int oldSampleRate = inputSampleRateHz;
+ // Set these values to help with the integer math.
+ while (newSampleRate > (1 << 14) || oldSampleRate > (1 << 14)) {
+ newSampleRate /= 2;
+ oldSampleRate /= 2;
+ }
+ moveNewSamplesToPitchBuffer(originalOutputFrameCount);
+ // Leave at least one pitch sample in the buffer.
+ for (int position = 0; position < pitchFrameCount - 1; position++) {
+ while ((oldRatePosition + 1) * newSampleRate > newRatePosition * oldSampleRate) {
+ outputBuffer =
+ ensureSpaceForAdditionalFrames(
+ outputBuffer, outputFrameCount, /* additionalFrameCount= */ 1);
+ for (int i = 0; i < channelCount; i++) {
+ outputBuffer[outputFrameCount * channelCount + i] =
+ interpolate(pitchBuffer, position * channelCount + i, oldSampleRate, newSampleRate);
+ }
+ newRatePosition++;
+ outputFrameCount++;
+ }
+ oldRatePosition++;
+ if (oldRatePosition == oldSampleRate) {
+ oldRatePosition = 0;
+ Assertions.checkState(newRatePosition == newSampleRate);
+ newRatePosition = 0;
+ }
+ }
+ removePitchFrames(pitchFrameCount - 1);
+ }
+
+ private int skipPitchPeriod(short[] samples, int position, float speed, int period) {
+ // Skip over a pitch period, and copy period/speed samples to the output.
+ int newFrameCount;
+ if (speed >= 2.0f) {
+ newFrameCount = (int) (period / (speed - 1.0f));
+ } else {
+ newFrameCount = period;
+ remainingInputToCopyFrameCount = (int) (period * (2.0f - speed) / (speed - 1.0f));
+ }
+ outputBuffer = ensureSpaceForAdditionalFrames(outputBuffer, outputFrameCount, newFrameCount);
+ overlapAdd(
+ newFrameCount,
+ channelCount,
+ outputBuffer,
+ outputFrameCount,
+ samples,
+ position,
+ samples,
+ position + period);
+ outputFrameCount += newFrameCount;
+ return newFrameCount;
+ }
+
+ private int insertPitchPeriod(short[] samples, int position, float speed, int period) {
+ // Insert a pitch period, and determine how much input to copy directly.
+ int newFrameCount;
+ if (speed < 0.5f) {
+ newFrameCount = (int) (period * speed / (1.0f - speed));
+ } else {
+ newFrameCount = period;
+ remainingInputToCopyFrameCount = (int) (period * (2.0f * speed - 1.0f) / (1.0f - speed));
+ }
+ outputBuffer =
+ ensureSpaceForAdditionalFrames(outputBuffer, outputFrameCount, period + newFrameCount);
+ System.arraycopy(
+ samples,
+ position * channelCount,
+ outputBuffer,
+ outputFrameCount * channelCount,
+ period * channelCount);
+ overlapAdd(
+ newFrameCount,
+ channelCount,
+ outputBuffer,
+ outputFrameCount + period,
+ samples,
+ position + period,
+ samples,
+ position);
+ outputFrameCount += period + newFrameCount;
+ return newFrameCount;
+ }
+
+ private void changeSpeed(float speed) {
+ if (inputFrameCount < maxRequiredFrameCount) {
+ return;
+ }
+ int frameCount = inputFrameCount;
+ int positionFrames = 0;
+ do {
+ if (remainingInputToCopyFrameCount > 0) {
+ positionFrames += copyInputToOutput(positionFrames);
+ } else {
+ int period = findPitchPeriod(inputBuffer, positionFrames);
+ if (speed > 1.0) {
+ positionFrames += period + skipPitchPeriod(inputBuffer, positionFrames, speed, period);
+ } else {
+ positionFrames += insertPitchPeriod(inputBuffer, positionFrames, speed, period);
+ }
+ }
+ } while (positionFrames + maxRequiredFrameCount <= frameCount);
+ removeProcessedInputFrames(positionFrames);
+ }
+
+ private void processStreamInput() {
+ // Resample as many pitch periods as we have buffered on the input.
+ int originalOutputFrameCount = outputFrameCount;
+ float s = speed / pitch;
+ float r = rate * pitch;
+ if (s > 1.00001 || s < 0.99999) {
+ changeSpeed(s);
+ } else {
+ copyToOutput(inputBuffer, 0, inputFrameCount);
+ inputFrameCount = 0;
+ }
+ if (r != 1.0f) {
+ adjustRate(r, originalOutputFrameCount);
+ }
+ }
+
+ private static void overlapAdd(
+ int frameCount,
+ int channelCount,
+ short[] out,
+ int outPosition,
+ short[] rampDown,
+ int rampDownPosition,
+ short[] rampUp,
+ int rampUpPosition) {
+ for (int i = 0; i < channelCount; i++) {
+ int o = outPosition * channelCount + i;
+ int u = rampUpPosition * channelCount + i;
+ int d = rampDownPosition * channelCount + i;
+ for (int t = 0; t < frameCount; t++) {
+ out[o] = (short) ((rampDown[d] * (frameCount - t) + rampUp[u] * t) / frameCount);
+ o += channelCount;
+ d += channelCount;
+ u += channelCount;
+ }
+ }
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SonicAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SonicAudioProcessor.java
new file mode 100644
index 0000000000..88a4d884bf
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/SonicAudioProcessor.java
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.ShortBuffer;
+
+/**
+ * An {@link AudioProcessor} that uses the Sonic library to modify audio speed/pitch/sample rate.
+ */
+public final class SonicAudioProcessor implements AudioProcessor {
+
+ /**
+ * The maximum allowed playback speed in {@link #setSpeed(float)}.
+ */
+ public static final float MAXIMUM_SPEED = 8.0f;
+ /**
+ * The minimum allowed playback speed in {@link #setSpeed(float)}.
+ */
+ public static final float MINIMUM_SPEED = 0.1f;
+ /**
+ * The maximum allowed pitch in {@link #setPitch(float)}.
+ */
+ public static final float MAXIMUM_PITCH = 8.0f;
+ /**
+ * The minimum allowed pitch in {@link #setPitch(float)}.
+ */
+ public static final float MINIMUM_PITCH = 0.1f;
+ /**
+ * Indicates that the output sample rate should be the same as the input.
+ */
+ public static final int SAMPLE_RATE_NO_CHANGE = -1;
+
+ /**
+ * The threshold below which the difference between two pitch/speed factors is negligible.
+ */
+ private static final float CLOSE_THRESHOLD = 0.01f;
+
+ /**
+ * The minimum number of output bytes at which the speedup is calculated using the input/output
+ * byte counts, rather than using the current playback parameters speed.
+ */
+ private static final int MIN_BYTES_FOR_SPEEDUP_CALCULATION = 1024;
+
+ private int pendingOutputSampleRate;
+ private float speed;
+ private float pitch;
+
+ private AudioFormat pendingInputAudioFormat;
+ private AudioFormat pendingOutputAudioFormat;
+ private AudioFormat inputAudioFormat;
+ private AudioFormat outputAudioFormat;
+
+ private boolean pendingSonicRecreation;
+ @Nullable private Sonic sonic;
+ private ByteBuffer buffer;
+ private ShortBuffer shortBuffer;
+ private ByteBuffer outputBuffer;
+ private long inputBytes;
+ private long outputBytes;
+ private boolean inputEnded;
+
+ /**
+ * Creates a new Sonic audio processor.
+ */
+ public SonicAudioProcessor() {
+ speed = 1f;
+ pitch = 1f;
+ pendingInputAudioFormat = AudioFormat.NOT_SET;
+ pendingOutputAudioFormat = AudioFormat.NOT_SET;
+ inputAudioFormat = AudioFormat.NOT_SET;
+ outputAudioFormat = AudioFormat.NOT_SET;
+ buffer = EMPTY_BUFFER;
+ shortBuffer = buffer.asShortBuffer();
+ outputBuffer = EMPTY_BUFFER;
+ pendingOutputSampleRate = SAMPLE_RATE_NO_CHANGE;
+ }
+
+ /**
+ * Sets the playback speed. This method may only be called after draining data through the
+ * processor. The value returned by {@link #isActive()} may change, and the processor must be
+ * {@link #flush() flushed} before queueing more data.
+ *
+ * @param speed The requested new playback speed.
+ * @return The actual new playback speed.
+ */
+ public float setSpeed(float speed) {
+ speed = Util.constrainValue(speed, MINIMUM_SPEED, MAXIMUM_SPEED);
+ if (this.speed != speed) {
+ this.speed = speed;
+ pendingSonicRecreation = true;
+ }
+ return speed;
+ }
+
+ /**
+ * Sets the playback pitch. This method may only be called after draining data through the
+ * processor. The value returned by {@link #isActive()} may change, and the processor must be
+ * {@link #flush() flushed} before queueing more data.
+ *
+ * @param pitch The requested new pitch.
+ * @return The actual new pitch.
+ */
+ public float setPitch(float pitch) {
+ pitch = Util.constrainValue(pitch, MINIMUM_PITCH, MAXIMUM_PITCH);
+ if (this.pitch != pitch) {
+ this.pitch = pitch;
+ pendingSonicRecreation = true;
+ }
+ return pitch;
+ }
+
+ /**
+ * Sets the sample rate for output audio, in Hertz. Pass {@link #SAMPLE_RATE_NO_CHANGE} to output
+ * audio at the same sample rate as the input. After calling this method, call {@link
+ * #configure(AudioFormat)} to configure the processor with the new sample rate.
+ *
+ * @param sampleRateHz The sample rate for output audio, in Hertz.
+ * @see #configure(AudioFormat)
+ */
+ public void setOutputSampleRateHz(int sampleRateHz) {
+ pendingOutputSampleRate = sampleRateHz;
+ }
+
+ /**
+ * Returns the specified duration scaled to take into account the speedup factor of this instance,
+ * in the same units as {@code duration}.
+ *
+ * @param duration The duration to scale taking into account speedup.
+ * @return The specified duration scaled to take into account speedup, in the same units as
+ * {@code duration}.
+ */
+ public long scaleDurationForSpeedup(long duration) {
+ if (outputBytes >= MIN_BYTES_FOR_SPEEDUP_CALCULATION) {
+ return outputAudioFormat.sampleRate == inputAudioFormat.sampleRate
+ ? Util.scaleLargeTimestamp(duration, inputBytes, outputBytes)
+ : Util.scaleLargeTimestamp(
+ duration,
+ inputBytes * outputAudioFormat.sampleRate,
+ outputBytes * inputAudioFormat.sampleRate);
+ } else {
+ return (long) ((double) speed * duration);
+ }
+ }
+
+ @Override
+ public AudioFormat configure(AudioFormat inputAudioFormat) throws UnhandledAudioFormatException {
+ if (inputAudioFormat.encoding != C.ENCODING_PCM_16BIT) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+ int outputSampleRateHz =
+ pendingOutputSampleRate == SAMPLE_RATE_NO_CHANGE
+ ? inputAudioFormat.sampleRate
+ : pendingOutputSampleRate;
+ pendingInputAudioFormat = inputAudioFormat;
+ pendingOutputAudioFormat =
+ new AudioFormat(outputSampleRateHz, inputAudioFormat.channelCount, C.ENCODING_PCM_16BIT);
+ pendingSonicRecreation = true;
+ return pendingOutputAudioFormat;
+ }
+
+ @Override
+ public boolean isActive() {
+ return pendingOutputAudioFormat.sampleRate != Format.NO_VALUE
+ && (Math.abs(speed - 1f) >= CLOSE_THRESHOLD
+ || Math.abs(pitch - 1f) >= CLOSE_THRESHOLD
+ || pendingOutputAudioFormat.sampleRate != pendingInputAudioFormat.sampleRate);
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ Sonic sonic = Assertions.checkNotNull(this.sonic);
+ if (inputBuffer.hasRemaining()) {
+ ShortBuffer shortBuffer = inputBuffer.asShortBuffer();
+ int inputSize = inputBuffer.remaining();
+ inputBytes += inputSize;
+ sonic.queueInput(shortBuffer);
+ inputBuffer.position(inputBuffer.position() + inputSize);
+ }
+ int outputSize = sonic.getOutputSize();
+ if (outputSize > 0) {
+ if (buffer.capacity() < outputSize) {
+ buffer = ByteBuffer.allocateDirect(outputSize).order(ByteOrder.nativeOrder());
+ shortBuffer = buffer.asShortBuffer();
+ } else {
+ buffer.clear();
+ shortBuffer.clear();
+ }
+ sonic.getOutput(shortBuffer);
+ outputBytes += outputSize;
+ buffer.limit(outputSize);
+ outputBuffer = buffer;
+ }
+ }
+
+ @Override
+ public void queueEndOfStream() {
+ if (sonic != null) {
+ sonic.queueEndOfStream();
+ }
+ inputEnded = true;
+ }
+
+ @Override
+ public ByteBuffer getOutput() {
+ ByteBuffer outputBuffer = this.outputBuffer;
+ this.outputBuffer = EMPTY_BUFFER;
+ return outputBuffer;
+ }
+
+ @Override
+ public boolean isEnded() {
+ return inputEnded && (sonic == null || sonic.getOutputSize() == 0);
+ }
+
+ @Override
+ public void flush() {
+ if (isActive()) {
+ inputAudioFormat = pendingInputAudioFormat;
+ outputAudioFormat = pendingOutputAudioFormat;
+ if (pendingSonicRecreation) {
+ sonic =
+ new Sonic(
+ inputAudioFormat.sampleRate,
+ inputAudioFormat.channelCount,
+ speed,
+ pitch,
+ outputAudioFormat.sampleRate);
+ } else if (sonic != null) {
+ sonic.flush();
+ }
+ }
+ outputBuffer = EMPTY_BUFFER;
+ inputBytes = 0;
+ outputBytes = 0;
+ inputEnded = false;
+ }
+
+ @Override
+ public void reset() {
+ speed = 1f;
+ pitch = 1f;
+ pendingInputAudioFormat = AudioFormat.NOT_SET;
+ pendingOutputAudioFormat = AudioFormat.NOT_SET;
+ inputAudioFormat = AudioFormat.NOT_SET;
+ outputAudioFormat = AudioFormat.NOT_SET;
+ buffer = EMPTY_BUFFER;
+ shortBuffer = buffer.asShortBuffer();
+ outputBuffer = EMPTY_BUFFER;
+ pendingOutputSampleRate = SAMPLE_RATE_NO_CHANGE;
+ pendingSonicRecreation = false;
+ sonic = null;
+ inputBytes = 0;
+ outputBytes = 0;
+ inputEnded = false;
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TeeAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TeeAudioProcessor.java
new file mode 100644
index 0000000000..42f151c5be
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TeeAudioProcessor.java
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import androidx.annotation.Nullable;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Assertions;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Log;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Audio processor that outputs its input unmodified and also outputs its input to a given sink.
+ * This is intended to be used for diagnostics and debugging.
+ *
+ * <p>This audio processor can be inserted into the audio processor chain to access audio data
+ * before/after particular processing steps have been applied. For example, to get audio output
+ * after playback speed adjustment and silence skipping have been applied it is necessary to pass a
+ * custom {@link org.mozilla.thirdparty.com.google.android.exoplayer2audio.DefaultAudioSink.AudioProcessorChain} when
+ * creating the audio sink, and include this audio processor after all other audio processors.
+ */
+public final class TeeAudioProcessor extends BaseAudioProcessor {
+
+ /** A sink for audio buffers handled by the audio processor. */
+ public interface AudioBufferSink {
+
+ /** Called when the audio processor is flushed with a format of subsequent input. */
+ void flush(int sampleRateHz, int channelCount, @C.PcmEncoding int encoding);
+
+ /**
+ * Called when data is written to the audio processor.
+ *
+ * @param buffer A read-only buffer containing input which the audio processor will handle.
+ */
+ void handleBuffer(ByteBuffer buffer);
+ }
+
+ private final AudioBufferSink audioBufferSink;
+
+ /**
+ * Creates a new tee audio processor, sending incoming data to the given {@link AudioBufferSink}.
+ *
+ * @param audioBufferSink The audio buffer sink that will receive input queued to this audio
+ * processor.
+ */
+ public TeeAudioProcessor(AudioBufferSink audioBufferSink) {
+ this.audioBufferSink = Assertions.checkNotNull(audioBufferSink);
+ }
+
+ @Override
+ public AudioFormat onConfigure(AudioFormat inputAudioFormat) {
+ // This processor is always active (if passed to the sink) and outputs its input.
+ return inputAudioFormat;
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ int remaining = inputBuffer.remaining();
+ if (remaining == 0) {
+ return;
+ }
+ audioBufferSink.handleBuffer(inputBuffer.asReadOnlyBuffer());
+ replaceOutputBuffer(remaining).put(inputBuffer).flip();
+ }
+
+ @Override
+ protected void onQueueEndOfStream() {
+ flushSinkIfActive();
+ }
+
+ @Override
+ protected void onReset() {
+ flushSinkIfActive();
+ }
+
+ private void flushSinkIfActive() {
+ if (isActive()) {
+ audioBufferSink.flush(
+ inputAudioFormat.sampleRate, inputAudioFormat.channelCount, inputAudioFormat.encoding);
+ }
+ }
+
+ /**
+ * A sink for audio buffers that writes output audio as .wav files with a given path prefix. When
+ * new audio data is handled after flushing the audio processor, a counter is incremented and its
+ * value is appended to the output file name.
+ *
+ * <p>Note: if writing to external storage it's necessary to grant the {@code
+ * WRITE_EXTERNAL_STORAGE} permission.
+ */
+ public static final class WavFileAudioBufferSink implements AudioBufferSink {
+
+ private static final String TAG = "WaveFileAudioBufferSink";
+
+ private static final int FILE_SIZE_MINUS_8_OFFSET = 4;
+ private static final int FILE_SIZE_MINUS_44_OFFSET = 40;
+ private static final int HEADER_LENGTH = 44;
+
+ private final String outputFileNamePrefix;
+ private final byte[] scratchBuffer;
+ private final ByteBuffer scratchByteBuffer;
+
+ private int sampleRateHz;
+ private int channelCount;
+ @C.PcmEncoding private int encoding;
+ @Nullable private RandomAccessFile randomAccessFile;
+ private int counter;
+ private int bytesWritten;
+
+ /**
+ * Creates a new audio buffer sink that writes to .wav files with the given prefix.
+ *
+ * @param outputFileNamePrefix The prefix for output files.
+ */
+ public WavFileAudioBufferSink(String outputFileNamePrefix) {
+ this.outputFileNamePrefix = outputFileNamePrefix;
+ scratchBuffer = new byte[1024];
+ scratchByteBuffer = ByteBuffer.wrap(scratchBuffer).order(ByteOrder.LITTLE_ENDIAN);
+ }
+
+ @Override
+ public void flush(int sampleRateHz, int channelCount, @C.PcmEncoding int encoding) {
+ try {
+ reset();
+ } catch (IOException e) {
+ Log.e(TAG, "Error resetting", e);
+ }
+ this.sampleRateHz = sampleRateHz;
+ this.channelCount = channelCount;
+ this.encoding = encoding;
+ }
+
+ @Override
+ public void handleBuffer(ByteBuffer buffer) {
+ try {
+ maybePrepareFile();
+ writeBuffer(buffer);
+ } catch (IOException e) {
+ Log.e(TAG, "Error writing data", e);
+ }
+ }
+
+ private void maybePrepareFile() throws IOException {
+ if (randomAccessFile != null) {
+ return;
+ }
+ RandomAccessFile randomAccessFile = new RandomAccessFile(getNextOutputFileName(), "rw");
+ writeFileHeader(randomAccessFile);
+ this.randomAccessFile = randomAccessFile;
+ bytesWritten = HEADER_LENGTH;
+ }
+
+ private void writeFileHeader(RandomAccessFile randomAccessFile) throws IOException {
+ // Write the start of the header as big endian data.
+ randomAccessFile.writeInt(WavUtil.RIFF_FOURCC);
+ randomAccessFile.writeInt(-1);
+ randomAccessFile.writeInt(WavUtil.WAVE_FOURCC);
+ randomAccessFile.writeInt(WavUtil.FMT_FOURCC);
+
+ // Write the rest of the header as little endian data.
+ scratchByteBuffer.clear();
+ scratchByteBuffer.putInt(16);
+ scratchByteBuffer.putShort((short) WavUtil.getTypeForPcmEncoding(encoding));
+ scratchByteBuffer.putShort((short) channelCount);
+ scratchByteBuffer.putInt(sampleRateHz);
+ int bytesPerSample = Util.getPcmFrameSize(encoding, channelCount);
+ scratchByteBuffer.putInt(bytesPerSample * sampleRateHz);
+ scratchByteBuffer.putShort((short) bytesPerSample);
+ scratchByteBuffer.putShort((short) (8 * bytesPerSample / channelCount));
+ randomAccessFile.write(scratchBuffer, 0, scratchByteBuffer.position());
+
+ // Write the start of the data chunk as big endian data.
+ randomAccessFile.writeInt(WavUtil.DATA_FOURCC);
+ randomAccessFile.writeInt(-1);
+ }
+
+ private void writeBuffer(ByteBuffer buffer) throws IOException {
+ RandomAccessFile randomAccessFile = Assertions.checkNotNull(this.randomAccessFile);
+ while (buffer.hasRemaining()) {
+ int bytesToWrite = Math.min(buffer.remaining(), scratchBuffer.length);
+ buffer.get(scratchBuffer, 0, bytesToWrite);
+ randomAccessFile.write(scratchBuffer, 0, bytesToWrite);
+ bytesWritten += bytesToWrite;
+ }
+ }
+
+ private void reset() throws IOException {
+ RandomAccessFile randomAccessFile = this.randomAccessFile;
+ if (randomAccessFile == null) {
+ return;
+ }
+
+ try {
+ scratchByteBuffer.clear();
+ scratchByteBuffer.putInt(bytesWritten - 8);
+ randomAccessFile.seek(FILE_SIZE_MINUS_8_OFFSET);
+ randomAccessFile.write(scratchBuffer, 0, 4);
+
+ scratchByteBuffer.clear();
+ scratchByteBuffer.putInt(bytesWritten - 44);
+ randomAccessFile.seek(FILE_SIZE_MINUS_44_OFFSET);
+ randomAccessFile.write(scratchBuffer, 0, 4);
+ } catch (IOException e) {
+ // The file may still be playable, so just log a warning.
+ Log.w(TAG, "Error updating file size", e);
+ }
+
+ try {
+ randomAccessFile.close();
+ } finally {
+ this.randomAccessFile = null;
+ }
+ }
+
+ private String getNextOutputFileName() {
+ return Util.formatInvariant("%s-%04d.wav", outputFileNamePrefix, counter++);
+ }
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TrimmingAudioProcessor.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TrimmingAudioProcessor.java
new file mode 100644
index 0000000000..1326cf63ee
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/TrimmingAudioProcessor.java
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+import java.nio.ByteBuffer;
+
+/** Audio processor for trimming samples from the start/end of data. */
+/* package */ final class TrimmingAudioProcessor extends BaseAudioProcessor {
+
+ @C.PcmEncoding private static final int OUTPUT_ENCODING = C.ENCODING_PCM_16BIT;
+
+ private int trimStartFrames;
+ private int trimEndFrames;
+ private boolean reconfigurationPending;
+
+ private int pendingTrimStartBytes;
+ private byte[] endBuffer;
+ private int endBufferSize;
+ private long trimmedFrameCount;
+
+ /** Creates a new audio processor for trimming samples from the start/end of data. */
+ public TrimmingAudioProcessor() {
+ endBuffer = Util.EMPTY_BYTE_ARRAY;
+ }
+
+ /**
+ * Sets the number of audio frames to trim from the start and end of audio passed to this
+ * processor. After calling this method, call {@link #configure(AudioFormat)} to apply the new
+ * trimming frame counts.
+ *
+ * @param trimStartFrames The number of audio frames to trim from the start of audio.
+ * @param trimEndFrames The number of audio frames to trim from the end of audio.
+ * @see AudioSink#configure(int, int, int, int, int[], int, int)
+ */
+ public void setTrimFrameCount(int trimStartFrames, int trimEndFrames) {
+ this.trimStartFrames = trimStartFrames;
+ this.trimEndFrames = trimEndFrames;
+ }
+
+ /** Sets the trimmed frame count returned by {@link #getTrimmedFrameCount()} to zero. */
+ public void resetTrimmedFrameCount() {
+ trimmedFrameCount = 0;
+ }
+
+ /**
+ * Returns the number of audio frames trimmed since the last call to {@link
+ * #resetTrimmedFrameCount()}.
+ */
+ public long getTrimmedFrameCount() {
+ return trimmedFrameCount;
+ }
+
+ @Override
+ public AudioFormat onConfigure(AudioFormat inputAudioFormat)
+ throws UnhandledAudioFormatException {
+ if (inputAudioFormat.encoding != OUTPUT_ENCODING) {
+ throw new UnhandledAudioFormatException(inputAudioFormat);
+ }
+ reconfigurationPending = true;
+ return trimStartFrames != 0 || trimEndFrames != 0 ? inputAudioFormat : AudioFormat.NOT_SET;
+ }
+
+ @Override
+ public void queueInput(ByteBuffer inputBuffer) {
+ int position = inputBuffer.position();
+ int limit = inputBuffer.limit();
+ int remaining = limit - position;
+
+ if (remaining == 0) {
+ return;
+ }
+
+ // Trim any pending start bytes from the input buffer.
+ int trimBytes = Math.min(remaining, pendingTrimStartBytes);
+ trimmedFrameCount += trimBytes / inputAudioFormat.bytesPerFrame;
+ pendingTrimStartBytes -= trimBytes;
+ inputBuffer.position(position + trimBytes);
+ if (pendingTrimStartBytes > 0) {
+ // Nothing to output yet.
+ return;
+ }
+ remaining -= trimBytes;
+
+ // endBuffer must be kept as full as possible, so that we trim the right amount of media if we
+ // don't receive any more input. After taking into account the number of bytes needed to keep
+ // endBuffer as full as possible, the output should be any surplus bytes currently in endBuffer
+ // followed by any surplus bytes in the new inputBuffer.
+ int remainingBytesToOutput = endBufferSize + remaining - endBuffer.length;
+ ByteBuffer buffer = replaceOutputBuffer(remainingBytesToOutput);
+
+ // Output from endBuffer.
+ int endBufferBytesToOutput = Util.constrainValue(remainingBytesToOutput, 0, endBufferSize);
+ buffer.put(endBuffer, 0, endBufferBytesToOutput);
+ remainingBytesToOutput -= endBufferBytesToOutput;
+
+ // Output from inputBuffer, restoring its limit afterwards.
+ int inputBufferBytesToOutput = Util.constrainValue(remainingBytesToOutput, 0, remaining);
+ inputBuffer.limit(inputBuffer.position() + inputBufferBytesToOutput);
+ buffer.put(inputBuffer);
+ inputBuffer.limit(limit);
+ remaining -= inputBufferBytesToOutput;
+
+ // Compact endBuffer, then repopulate it using the new input.
+ endBufferSize -= endBufferBytesToOutput;
+ System.arraycopy(endBuffer, endBufferBytesToOutput, endBuffer, 0, endBufferSize);
+ inputBuffer.get(endBuffer, endBufferSize, remaining);
+ endBufferSize += remaining;
+
+ buffer.flip();
+ }
+
+ @Override
+ public ByteBuffer getOutput() {
+ if (super.isEnded() && endBufferSize > 0) {
+ // Because audio processors may be drained in the middle of the stream we assume that the
+ // contents of the end buffer need to be output. For gapless transitions, configure will
+ // always be called, so the end buffer is cleared in onQueueEndOfStream.
+ replaceOutputBuffer(endBufferSize).put(endBuffer, 0, endBufferSize).flip();
+ endBufferSize = 0;
+ }
+ return super.getOutput();
+ }
+
+ @Override
+ public boolean isEnded() {
+ return super.isEnded() && endBufferSize == 0;
+ }
+
+ @Override
+ protected void onQueueEndOfStream() {
+ if (reconfigurationPending) {
+ // Trim audio in the end buffer.
+ if (endBufferSize > 0) {
+ trimmedFrameCount += endBufferSize / inputAudioFormat.bytesPerFrame;
+ }
+ endBufferSize = 0;
+ }
+ }
+
+ @Override
+ protected void onFlush() {
+ if (reconfigurationPending) {
+ // This is the initial flush after reconfiguration. Prepare to trim bytes from the start/end.
+ reconfigurationPending = false;
+ endBuffer = new byte[trimEndFrames * inputAudioFormat.bytesPerFrame];
+ pendingTrimStartBytes = trimStartFrames * inputAudioFormat.bytesPerFrame;
+ } else {
+ // This is a flush during playback (after the initial flush). We assume this was caused by a
+ // seek to a non-zero position and clear pending start bytes. This assumption may be wrong (we
+ // may be seeking to zero), but playing data that should have been trimmed shouldn't be
+ // noticeable after a seek. Ideally we would check the timestamp of the first input buffer
+ // queued after flushing to decide whether to trim (see also [Internal: b/77292509]).
+ pendingTrimStartBytes = 0;
+ }
+ endBufferSize = 0;
+ }
+
+ @Override
+ protected void onReset() {
+ endBuffer = Util.EMPTY_BYTE_ARRAY;
+ }
+
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/WavUtil.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/WavUtil.java
new file mode 100644
index 0000000000..d1245761aa
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/WavUtil.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.C;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.Format;
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.Util;
+
+/** Utilities for handling WAVE files. */
+public final class WavUtil {
+
+ /** Four character code for "RIFF". */
+ public static final int RIFF_FOURCC = 0x52494646;
+ /** Four character code for "WAVE". */
+ public static final int WAVE_FOURCC = 0x57415645;
+ /** Four character code for "fmt ". */
+ public static final int FMT_FOURCC = 0x666d7420;
+ /** Four character code for "data". */
+ public static final int DATA_FOURCC = 0x64617461;
+
+ /** WAVE type value for integer PCM audio data. */
+ public static final int TYPE_PCM = 0x0001;
+ /** WAVE type value for float PCM audio data. */
+ public static final int TYPE_FLOAT = 0x0003;
+ /** WAVE type value for 8-bit ITU-T G.711 A-law audio data. */
+ public static final int TYPE_ALAW = 0x0006;
+ /** WAVE type value for 8-bit ITU-T G.711 mu-law audio data. */
+ public static final int TYPE_MLAW = 0x0007;
+ /** WAVE type value for IMA ADPCM audio data. */
+ public static final int TYPE_IMA_ADPCM = 0x0011;
+ /** WAVE type value for extended WAVE format. */
+ public static final int TYPE_WAVE_FORMAT_EXTENSIBLE = 0xFFFE;
+
+ /**
+ * Returns the WAVE format type value for the given {@link C.PcmEncoding}.
+ *
+ * @param pcmEncoding The {@link C.PcmEncoding} value.
+ * @return The corresponding WAVE format type.
+ * @throws IllegalArgumentException If {@code pcmEncoding} is not a {@link C.PcmEncoding}, or if
+ * it's {@link C#ENCODING_INVALID} or {@link Format#NO_VALUE}.
+ */
+ public static int getTypeForPcmEncoding(@C.PcmEncoding int pcmEncoding) {
+ switch (pcmEncoding) {
+ case C.ENCODING_PCM_8BIT:
+ case C.ENCODING_PCM_16BIT:
+ case C.ENCODING_PCM_24BIT:
+ case C.ENCODING_PCM_32BIT:
+ return TYPE_PCM;
+ case C.ENCODING_PCM_FLOAT:
+ return TYPE_FLOAT;
+ case C.ENCODING_PCM_16BIT_BIG_ENDIAN: // Not TYPE_PCM, because TYPE_PCM is little endian.
+ case C.ENCODING_INVALID:
+ case Format.NO_VALUE:
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
+
+ /**
+ * Returns the {@link C.PcmEncoding} for the given WAVE format type value, or {@link
+ * C#ENCODING_INVALID} if the type is not a known PCM type.
+ */
+ public static @C.PcmEncoding int getPcmEncodingForType(int type, int bitsPerSample) {
+ switch (type) {
+ case TYPE_PCM:
+ case TYPE_WAVE_FORMAT_EXTENSIBLE:
+ return Util.getPcmEncoding(bitsPerSample);
+ case TYPE_FLOAT:
+ return bitsPerSample == 32 ? C.ENCODING_PCM_FLOAT : C.ENCODING_INVALID;
+ default:
+ return C.ENCODING_INVALID;
+ }
+ }
+
+ private WavUtil() {
+ // Prevent instantiation.
+ }
+}
diff --git a/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/package-info.java b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/package-info.java
new file mode 100644
index 0000000000..95c29d7333
--- /dev/null
+++ b/mobile/android/exoplayer2/src/main/java/org/mozilla/thirdparty/com/google/android/exoplayer2/audio/package-info.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@NonNullApi
+package org.mozilla.thirdparty.com.google.android.exoplayer2.audio;
+
+import org.mozilla.thirdparty.com.google.android.exoplayer2.util.NonNullApi;