summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio')
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java81
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/VolumeLogger.java83
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java227
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java122
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java743
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java585
-rw-r--r--third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java308
7 files changed, 2149 insertions, 0 deletions
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java
new file mode 100644
index 0000000000..70c625ab4f
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.media.AudioTrack;
+import android.os.Build;
+import org.webrtc.Logging;
+
+// Lowers the buffer size if no underruns are detected for 100 ms. Once an
+// underrun is detected, the buffer size is increased by 10 ms and it will not
+// be lowered further. The buffer size will never be increased more than
+// 5 times, to avoid the possibility of the buffer size increasing without
+// bounds.
+class LowLatencyAudioBufferManager {
+ private static final String TAG = "LowLatencyAudioBufferManager";
+ // The underrun count that was valid during the previous call to maybeAdjustBufferSize(). Used to
+ // detect increases in the value.
+ private int prevUnderrunCount;
+ // The number of ticks to wait without an underrun before decreasing the buffer size.
+ private int ticksUntilNextDecrease;
+ // Indicate if we should continue to decrease the buffer size.
+ private boolean keepLoweringBufferSize;
+ // How often the buffer size was increased.
+ private int bufferIncreaseCounter;
+
+ public LowLatencyAudioBufferManager() {
+ this.prevUnderrunCount = 0;
+ this.ticksUntilNextDecrease = 10;
+ this.keepLoweringBufferSize = true;
+ this.bufferIncreaseCounter = 0;
+ }
+
+ public void maybeAdjustBufferSize(AudioTrack audioTrack) {
+ if (Build.VERSION.SDK_INT >= 26) {
+ final int underrunCount = audioTrack.getUnderrunCount();
+ if (underrunCount > prevUnderrunCount) {
+ // Don't increase buffer more than 5 times. Continuing to increase the buffer size
+ // could be harmful on low-power devices that regularly experience underruns under
+ // normal conditions.
+ if (bufferIncreaseCounter < 5) {
+ // Underrun detected, increase buffer size by 10ms.
+ final int currentBufferSize = audioTrack.getBufferSizeInFrames();
+ final int newBufferSize = currentBufferSize + audioTrack.getPlaybackRate() / 100;
+ Logging.d(TAG,
+ "Underrun detected! Increasing AudioTrack buffer size from " + currentBufferSize
+ + " to " + newBufferSize);
+ audioTrack.setBufferSizeInFrames(newBufferSize);
+ bufferIncreaseCounter++;
+ }
+ // Stop trying to lower the buffer size.
+ keepLoweringBufferSize = false;
+ prevUnderrunCount = underrunCount;
+ ticksUntilNextDecrease = 10;
+ } else if (keepLoweringBufferSize) {
+ ticksUntilNextDecrease--;
+ if (ticksUntilNextDecrease <= 0) {
+ // No underrun seen for 100 ms, try to lower the buffer size by 10ms.
+ final int bufferSize10ms = audioTrack.getPlaybackRate() / 100;
+ // Never go below a buffer size of 10ms.
+ final int currentBufferSize = audioTrack.getBufferSizeInFrames();
+ final int newBufferSize = Math.max(bufferSize10ms, currentBufferSize - bufferSize10ms);
+ if (newBufferSize != currentBufferSize) {
+ Logging.d(TAG,
+ "Lowering AudioTrack buffer size from " + currentBufferSize + " to "
+ + newBufferSize);
+ audioTrack.setBufferSizeInFrames(newBufferSize);
+ }
+ ticksUntilNextDecrease = 10;
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/VolumeLogger.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/VolumeLogger.java
new file mode 100644
index 0000000000..06d5cd3a8e
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/VolumeLogger.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.media.AudioManager;
+import androidx.annotation.Nullable;
+import java.util.Timer;
+import java.util.TimerTask;
+import org.webrtc.Logging;
+
+// TODO(magjed): Do we really need to spawn a new thread just to log volume? Can we re-use the
+// AudioTrackThread instead?
+/**
+ * Private utility class that periodically checks and logs the volume level of the audio stream that
+ * is currently controlled by the volume control. A timer triggers logs once every 30 seconds and
+ * the timer's associated thread is named "WebRtcVolumeLevelLoggerThread".
+ */
+class VolumeLogger {
+ private static final String TAG = "VolumeLogger";
+ private static final String THREAD_NAME = "WebRtcVolumeLevelLoggerThread";
+ private static final int TIMER_PERIOD_IN_SECONDS = 30;
+
+ private final AudioManager audioManager;
+ private @Nullable Timer timer;
+
+ public VolumeLogger(AudioManager audioManager) {
+ this.audioManager = audioManager;
+ }
+
+ public void start() {
+ Logging.d(TAG, "start" + WebRtcAudioUtils.getThreadInfo());
+ if (timer != null) {
+ return;
+ }
+ Logging.d(TAG, "audio mode is: " + WebRtcAudioUtils.modeToString(audioManager.getMode()));
+
+ timer = new Timer(THREAD_NAME);
+ timer.schedule(new LogVolumeTask(audioManager.getStreamMaxVolume(AudioManager.STREAM_RING),
+ audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL)),
+ 0, TIMER_PERIOD_IN_SECONDS * 1000);
+ }
+
+ private class LogVolumeTask extends TimerTask {
+ private final int maxRingVolume;
+ private final int maxVoiceCallVolume;
+
+ LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume) {
+ this.maxRingVolume = maxRingVolume;
+ this.maxVoiceCallVolume = maxVoiceCallVolume;
+ }
+
+ @Override
+ public void run() {
+ final int mode = audioManager.getMode();
+ if (mode == AudioManager.MODE_RINGTONE) {
+ Logging.d(TAG,
+ "STREAM_RING stream volume: " + audioManager.getStreamVolume(AudioManager.STREAM_RING)
+ + " (max=" + maxRingVolume + ")");
+ } else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
+ Logging.d(TAG,
+ "VOICE_CALL stream volume: "
+ + audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL)
+ + " (max=" + maxVoiceCallVolume + ")");
+ }
+ }
+ }
+
+ public void stop() {
+ Logging.d(TAG, "stop" + WebRtcAudioUtils.getThreadInfo());
+ if (timer != null) {
+ timer.cancel();
+ timer = null;
+ }
+ }
+}
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
new file mode 100644
index 0000000000..a9ff1011b6
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.media.audiofx.AcousticEchoCanceler;
+import android.media.audiofx.AudioEffect;
+import android.media.audiofx.AudioEffect.Descriptor;
+import android.media.audiofx.NoiseSuppressor;
+import android.os.Build;
+import androidx.annotation.Nullable;
+import java.util.UUID;
+import org.webrtc.Logging;
+
+// This class wraps control of three different platform effects. Supported
+// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
+// Calling enable() will active all effects that are
+// supported by the device if the corresponding `shouldEnableXXX` member is set.
+class WebRtcAudioEffects {
+ private static final boolean DEBUG = false;
+
+ private static final String TAG = "WebRtcAudioEffectsExternal";
+
+ // UUIDs for Software Audio Effects that we want to avoid using.
+ // The implementor field will be set to "The Android Open Source Project".
+ private static final UUID AOSP_ACOUSTIC_ECHO_CANCELER =
+ UUID.fromString("bb392ec0-8d4d-11e0-a896-0002a5d5c51b");
+ private static final UUID AOSP_NOISE_SUPPRESSOR =
+ UUID.fromString("c06c8400-8e06-11e0-9cb6-0002a5d5c51b");
+
+ // Contains the available effect descriptors returned from the
+ // AudioEffect.getEffects() call. This result is cached to avoid doing the
+ // slow OS call multiple times.
+ private static @Nullable Descriptor[] cachedEffects;
+
+ // Contains the audio effect objects. Created in enable() and destroyed
+ // in release().
+ private @Nullable AcousticEchoCanceler aec;
+ private @Nullable NoiseSuppressor ns;
+
+ // Affects the final state given to the setEnabled() method on each effect.
+ // The default state is set to "disabled" but each effect can also be enabled
+ // by calling setAEC() and setNS().
+ private boolean shouldEnableAec;
+ private boolean shouldEnableNs;
+
+ // Returns true if all conditions for supporting HW Acoustic Echo Cancellation (AEC) are
+ // fulfilled.
+ public static boolean isAcousticEchoCancelerSupported() {
+ return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC, AOSP_ACOUSTIC_ECHO_CANCELER);
+ }
+
+ // Returns true if all conditions for supporting HW Noise Suppression (NS) are fulfilled.
+ public static boolean isNoiseSuppressorSupported() {
+ return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS, AOSP_NOISE_SUPPRESSOR);
+ }
+
+ public WebRtcAudioEffects() {
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ }
+
+ // Call this method to enable or disable the platform AEC. It modifies
+ // `shouldEnableAec` which is used in enable() where the actual state
+ // of the AEC effect is modified. Returns true if HW AEC is supported and
+ // false otherwise.
+ public boolean setAEC(boolean enable) {
+ Logging.d(TAG, "setAEC(" + enable + ")");
+ if (!isAcousticEchoCancelerSupported()) {
+ Logging.w(TAG, "Platform AEC is not supported");
+ shouldEnableAec = false;
+ return false;
+ }
+ if (aec != null && (enable != shouldEnableAec)) {
+ Logging.e(TAG, "Platform AEC state can't be modified while recording");
+ return false;
+ }
+ shouldEnableAec = enable;
+ return true;
+ }
+
+ // Call this method to enable or disable the platform NS. It modifies
+ // `shouldEnableNs` which is used in enable() where the actual state
+ // of the NS effect is modified. Returns true if HW NS is supported and
+ // false otherwise.
+ public boolean setNS(boolean enable) {
+ Logging.d(TAG, "setNS(" + enable + ")");
+ if (!isNoiseSuppressorSupported()) {
+ Logging.w(TAG, "Platform NS is not supported");
+ shouldEnableNs = false;
+ return false;
+ }
+ if (ns != null && (enable != shouldEnableNs)) {
+ Logging.e(TAG, "Platform NS state can't be modified while recording");
+ return false;
+ }
+ shouldEnableNs = enable;
+ return true;
+ }
+
+ public void enable(int audioSession) {
+ Logging.d(TAG, "enable(audioSession=" + audioSession + ")");
+ assertTrue(aec == null);
+ assertTrue(ns == null);
+
+ if (DEBUG) {
+ // Add logging of supported effects but filter out "VoIP effects", i.e.,
+ // AEC, AEC and NS. Avoid calling AudioEffect.queryEffects() unless the
+ // DEBUG flag is set since we have seen crashes in this API.
+ for (Descriptor d : AudioEffect.queryEffects()) {
+ if (effectTypeIsVoIP(d.type)) {
+ Logging.d(TAG,
+ "name: " + d.name + ", "
+ + "mode: " + d.connectMode + ", "
+ + "implementor: " + d.implementor + ", "
+ + "UUID: " + d.uuid);
+ }
+ }
+ }
+
+ if (isAcousticEchoCancelerSupported()) {
+ // Create an AcousticEchoCanceler and attach it to the AudioRecord on
+ // the specified audio session.
+ aec = AcousticEchoCanceler.create(audioSession);
+ if (aec != null) {
+ boolean enabled = aec.getEnabled();
+ boolean enable = shouldEnableAec && isAcousticEchoCancelerSupported();
+ if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
+ Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
+ }
+ Logging.d(TAG,
+ "AcousticEchoCanceler: was " + (enabled ? "enabled" : "disabled") + ", enable: "
+ + enable + ", is now: " + (aec.getEnabled() ? "enabled" : "disabled"));
+ } else {
+ Logging.e(TAG, "Failed to create the AcousticEchoCanceler instance");
+ }
+ }
+
+ if (isNoiseSuppressorSupported()) {
+ // Create an NoiseSuppressor and attach it to the AudioRecord on the
+ // specified audio session.
+ ns = NoiseSuppressor.create(audioSession);
+ if (ns != null) {
+ boolean enabled = ns.getEnabled();
+ boolean enable = shouldEnableNs && isNoiseSuppressorSupported();
+ if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
+ Logging.e(TAG, "Failed to set the NoiseSuppressor state");
+ }
+ Logging.d(TAG,
+ "NoiseSuppressor: was " + (enabled ? "enabled" : "disabled") + ", enable: " + enable
+ + ", is now: " + (ns.getEnabled() ? "enabled" : "disabled"));
+ } else {
+ Logging.e(TAG, "Failed to create the NoiseSuppressor instance");
+ }
+ }
+ }
+
+ // Releases all native audio effect resources. It is a good practice to
+ // release the effect engine when not in use as control can be returned
+ // to other applications or the native resources released.
+ public void release() {
+ Logging.d(TAG, "release");
+ if (aec != null) {
+ aec.release();
+ aec = null;
+ }
+ if (ns != null) {
+ ns.release();
+ ns = null;
+ }
+ }
+
+ // Returns true for effect types in `type` that are of "VoIP" types:
+ // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
+ // Noise Suppressor (NS). Note that, an extra check for support is needed
+ // in each comparison since some devices includes effects in the
+ // AudioEffect.Descriptor array that are actually not available on the device.
+ // As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
+ // AutomaticGainControl.isAvailable() returns false.
+ private boolean effectTypeIsVoIP(UUID type) {
+ return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported())
+ || (AudioEffect.EFFECT_TYPE_NS.equals(type) && isNoiseSuppressorSupported());
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ // Returns the cached copy of the audio effects array, if available, or
+ // queries the operating system for the list of effects.
+ private static @Nullable Descriptor[] getAvailableEffects() {
+ if (cachedEffects != null) {
+ return cachedEffects;
+ }
+ // The caching is best effort only - if this method is called from several
+ // threads in parallel, they may end up doing the underlying OS call
+ // multiple times. It's normally only called on one thread so there's no
+ // real need to optimize for the multiple threads case.
+ cachedEffects = AudioEffect.queryEffects();
+ return cachedEffects;
+ }
+
+ // Returns true if an effect of the specified type is available. Functionally
+ // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
+ // faster as it avoids the expensive OS call to enumerate effects.
+ private static boolean isEffectTypeAvailable(UUID effectType, UUID blockListedUuid) {
+ Descriptor[] effects = getAvailableEffects();
+ if (effects == null) {
+ return false;
+ }
+ for (Descriptor d : effects) {
+ if (d.type.equals(effectType)) {
+ return !d.uuid.equals(blockListedUuid);
+ }
+ }
+ return false;
+ }
+}
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
new file mode 100644
index 0000000000..f398602a28
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.os.Build;
+import org.webrtc.Logging;
+import org.webrtc.CalledByNative;
+
+/**
+ * This class contains static functions to query sample rate and input/output audio buffer sizes.
+ */
+class WebRtcAudioManager {
+ private static final String TAG = "WebRtcAudioManagerExternal";
+
+ private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ private static final int DEFAULT_FRAME_PER_BUFFER = 256;
+
+ @CalledByNative
+ static AudioManager getAudioManager(Context context) {
+ return (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
+ }
+
+ @CalledByNative
+ static int getOutputBufferSize(
+ Context context, AudioManager audioManager, int sampleRate, int numberOfOutputChannels) {
+ return isLowLatencyOutputSupported(context)
+ ? getLowLatencyFramesPerBuffer(audioManager)
+ : getMinOutputFrameSize(sampleRate, numberOfOutputChannels);
+ }
+
+ @CalledByNative
+ static int getInputBufferSize(
+ Context context, AudioManager audioManager, int sampleRate, int numberOfInputChannels) {
+ return isLowLatencyInputSupported(context)
+ ? getLowLatencyFramesPerBuffer(audioManager)
+ : getMinInputFrameSize(sampleRate, numberOfInputChannels);
+ }
+
+ private static boolean isLowLatencyOutputSupported(Context context) {
+ return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+ }
+
+ private static boolean isLowLatencyInputSupported(Context context) {
+ // TODO(henrika): investigate if some sort of device list is needed here
+ // as well. The NDK doc states that: "As of API level 21, lower latency
+ // audio input is supported on select devices. To take advantage of this
+ // feature, first confirm that lower latency output is available".
+ return isLowLatencyOutputSupported(context);
+ }
+
+ /**
+ * Returns the native input/output sample rate for this device's output stream.
+ */
+ @CalledByNative
+ static int getSampleRate(AudioManager audioManager) {
+ // Override this if we're running on an old emulator image which only
+ // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
+ if (WebRtcAudioUtils.runningOnEmulator()) {
+ Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
+ return 8000;
+ }
+ // Deliver best possible estimate based on default Android AudioManager APIs.
+ final int sampleRateHz = getSampleRateForApiLevel(audioManager);
+ Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
+ return sampleRateHz;
+ }
+
+ private static int getSampleRateForApiLevel(AudioManager audioManager) {
+ String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null) ? DEFAULT_SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
+ }
+
+ // Returns the native output buffer size for low-latency output streams.
+ private static int getLowLatencyFramesPerBuffer(AudioManager audioManager) {
+ String framesPerBuffer =
+ audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+ return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
+ }
+
+ // Returns the minimum output buffer size for Java based audio (AudioTrack).
+ // This size can also be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency output.
+ private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig =
+ (numChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+ return AudioTrack.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+ / bytesPerFrame;
+ }
+
+ // Returns the minimum input buffer size for Java based audio (AudioRecord).
+ // This size can calso be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency input.
+ private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig =
+ (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+ return AudioRecord.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+ / bytesPerFrame;
+ }
+}
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
new file mode 100644
index 0000000000..6647e5fcbb
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
@@ -0,0 +1,743 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.media.AudioDeviceInfo;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioRecordingConfiguration;
+import android.media.AudioTimestamp;
+import android.media.MediaRecorder.AudioSource;
+import android.os.Build;
+import android.os.Process;
+import androidx.annotation.Nullable;
+import androidx.annotation.RequiresApi;
+import java.lang.System;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import org.webrtc.CalledByNative;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordErrorCallback;
+import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStartErrorCode;
+import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStateCallback;
+import org.webrtc.audio.JavaAudioDeviceModule.SamplesReadyCallback;
+
+class WebRtcAudioRecord {
+ private static final String TAG = "WebRtcAudioRecordExternal";
+
+ // Requested size of each recorded buffer provided to the client.
+ private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+ // Average number of callbacks per second.
+ private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+ // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
+ // buffer size). The extra space is allocated to guard against glitches under
+ // high load.
+ private static final int BUFFER_SIZE_FACTOR = 2;
+
+ // The AudioRecordJavaThread is allowed to wait for successful call to join()
+ // but the wait times out afther this amount of time.
+ private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+ public static final int DEFAULT_AUDIO_SOURCE = AudioSource.VOICE_COMMUNICATION;
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ public static final int DEFAULT_AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
+
+ // Indicates AudioRecord has started recording audio.
+ private static final int AUDIO_RECORD_START = 0;
+
+ // Indicates AudioRecord has stopped recording audio.
+ private static final int AUDIO_RECORD_STOP = 1;
+
+ // Time to wait before checking recording status after start has been called. Tests have
+ // shown that the result can sometimes be invalid (our own status might be missing) if we check
+ // directly after start.
+ private static final int CHECK_REC_STATUS_DELAY_MS = 100;
+
+ private final Context context;
+ private final AudioManager audioManager;
+ private final int audioSource;
+ private final int audioFormat;
+
+ private long nativeAudioRecord;
+
+ private final WebRtcAudioEffects effects = new WebRtcAudioEffects();
+
+ private @Nullable ByteBuffer byteBuffer;
+
+ private @Nullable AudioRecord audioRecord;
+ private @Nullable AudioRecordThread audioThread;
+ private @Nullable AudioDeviceInfo preferredDevice;
+
+ private final ScheduledExecutorService executor;
+ private @Nullable ScheduledFuture<String> future;
+
+ private volatile boolean microphoneMute;
+ private final AtomicReference<Boolean> audioSourceMatchesRecordingSessionRef =
+ new AtomicReference<>();
+ private byte[] emptyBytes;
+
+ private final @Nullable AudioRecordErrorCallback errorCallback;
+ private final @Nullable AudioRecordStateCallback stateCallback;
+ private final @Nullable SamplesReadyCallback audioSamplesReadyCallback;
+ private final boolean isAcousticEchoCancelerSupported;
+ private final boolean isNoiseSuppressorSupported;
+
+ /**
+ * Audio thread which keeps calling ByteBuffer.read() waiting for audio
+ * to be recorded. Feeds recorded data to the native counterpart as a
+ * periodic sequence of callbacks using DataIsRecorded().
+ * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+ */
+ private class AudioRecordThread extends Thread {
+ private volatile boolean keepAlive = true;
+
+ public AudioRecordThread(String name) {
+ super(name);
+ }
+
+ @Override
+ public void run() {
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+ Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
+ assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING);
+
+ // Audio recording has started and the client is informed about it.
+ doAudioRecordStateCallback(AUDIO_RECORD_START);
+
+ long lastTime = System.nanoTime();
+ AudioTimestamp audioTimestamp = null;
+ if (Build.VERSION.SDK_INT >= 24) {
+ audioTimestamp = new AudioTimestamp();
+ }
+ while (keepAlive) {
+ int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
+ if (bytesRead == byteBuffer.capacity()) {
+ if (microphoneMute) {
+ byteBuffer.clear();
+ byteBuffer.put(emptyBytes);
+ }
+ // It's possible we've been shut down during the read, and stopRecording() tried and
+ // failed to join this thread. To be a bit safer, try to avoid calling any native methods
+ // in case they've been unregistered after stopRecording() returned.
+ if (keepAlive) {
+ long captureTimeNs = 0;
+ if (Build.VERSION.SDK_INT >= 24) {
+ if (audioRecord.getTimestamp(audioTimestamp, AudioTimestamp.TIMEBASE_MONOTONIC)
+ == AudioRecord.SUCCESS) {
+ captureTimeNs = audioTimestamp.nanoTime;
+ }
+ }
+ nativeDataIsRecorded(nativeAudioRecord, bytesRead, captureTimeNs);
+ }
+ if (audioSamplesReadyCallback != null) {
+ // Copy the entire byte buffer array. The start of the byteBuffer is not necessarily
+ // at index 0.
+ byte[] data = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.arrayOffset(),
+ byteBuffer.capacity() + byteBuffer.arrayOffset());
+ audioSamplesReadyCallback.onWebRtcAudioRecordSamplesReady(
+ new JavaAudioDeviceModule.AudioSamples(audioRecord.getAudioFormat(),
+ audioRecord.getChannelCount(), audioRecord.getSampleRate(), data));
+ }
+ } else {
+ String errorMessage = "AudioRecord.read failed: " + bytesRead;
+ Logging.e(TAG, errorMessage);
+ if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
+ keepAlive = false;
+ reportWebRtcAudioRecordError(errorMessage);
+ }
+ }
+ }
+
+ try {
+ if (audioRecord != null) {
+ audioRecord.stop();
+ doAudioRecordStateCallback(AUDIO_RECORD_STOP);
+ }
+ } catch (IllegalStateException e) {
+ Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
+ }
+ }
+
+ // Stops the inner thread loop and also calls AudioRecord.stop().
+ // Does not block the calling thread.
+ public void stopThread() {
+ Logging.d(TAG, "stopThread");
+ keepAlive = false;
+ }
+ }
+
+ @CalledByNative
+ WebRtcAudioRecord(Context context, AudioManager audioManager) {
+ this(context, newDefaultScheduler() /* scheduler */, audioManager, DEFAULT_AUDIO_SOURCE,
+ DEFAULT_AUDIO_FORMAT, null /* errorCallback */, null /* stateCallback */,
+ null /* audioSamplesReadyCallback */, WebRtcAudioEffects.isAcousticEchoCancelerSupported(),
+ WebRtcAudioEffects.isNoiseSuppressorSupported());
+ }
+
+ public WebRtcAudioRecord(Context context, ScheduledExecutorService scheduler,
+ AudioManager audioManager, int audioSource, int audioFormat,
+ @Nullable AudioRecordErrorCallback errorCallback,
+ @Nullable AudioRecordStateCallback stateCallback,
+ @Nullable SamplesReadyCallback audioSamplesReadyCallback,
+ boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported) {
+ if (isAcousticEchoCancelerSupported && !WebRtcAudioEffects.isAcousticEchoCancelerSupported()) {
+ throw new IllegalArgumentException("HW AEC not supported");
+ }
+ if (isNoiseSuppressorSupported && !WebRtcAudioEffects.isNoiseSuppressorSupported()) {
+ throw new IllegalArgumentException("HW NS not supported");
+ }
+ this.context = context;
+ this.executor = scheduler;
+ this.audioManager = audioManager;
+ this.audioSource = audioSource;
+ this.audioFormat = audioFormat;
+ this.errorCallback = errorCallback;
+ this.stateCallback = stateCallback;
+ this.audioSamplesReadyCallback = audioSamplesReadyCallback;
+ this.isAcousticEchoCancelerSupported = isAcousticEchoCancelerSupported;
+ this.isNoiseSuppressorSupported = isNoiseSuppressorSupported;
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ }
+
+ @CalledByNative
+ public void setNativeAudioRecord(long nativeAudioRecord) {
+ this.nativeAudioRecord = nativeAudioRecord;
+ }
+
+ @CalledByNative
+ boolean isAcousticEchoCancelerSupported() {
+ return isAcousticEchoCancelerSupported;
+ }
+
+ @CalledByNative
+ boolean isNoiseSuppressorSupported() {
+ return isNoiseSuppressorSupported;
+ }
+
+ // Returns true if a valid call to verifyAudioConfig() has been done. Should always be
+ // checked before using the returned value of isAudioSourceMatchingRecordingSession().
+ @CalledByNative
+ boolean isAudioConfigVerified() {
+ return audioSourceMatchesRecordingSessionRef.get() != null;
+ }
+
+ // Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when
+ // startRecording() has been called. Hence, should preferably be called in combination with
+ // stopRecording() to ensure that it has been set properly. `isAudioConfigVerified` is
+ // enabled in WebRtcAudioRecord to ensure that the returned value is valid.
+ @CalledByNative
+ boolean isAudioSourceMatchingRecordingSession() {
+ Boolean audioSourceMatchesRecordingSession = audioSourceMatchesRecordingSessionRef.get();
+ if (audioSourceMatchesRecordingSession == null) {
+ Logging.w(TAG, "Audio configuration has not yet been verified");
+ return false;
+ }
+ return audioSourceMatchesRecordingSession;
+ }
+
+ @CalledByNative
+ private boolean enableBuiltInAEC(boolean enable) {
+ Logging.d(TAG, "enableBuiltInAEC(" + enable + ")");
+ return effects.setAEC(enable);
+ }
+
+ @CalledByNative
+ private boolean enableBuiltInNS(boolean enable) {
+ Logging.d(TAG, "enableBuiltInNS(" + enable + ")");
+ return effects.setNS(enable);
+ }
+
+ @CalledByNative
+ private int initRecording(int sampleRate, int channels) {
+ Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
+ if (audioRecord != null) {
+ reportWebRtcAudioRecordInitError("InitRecording called twice without StopRecording.");
+ return -1;
+ }
+ final int bytesPerFrame = channels * getBytesPerSample(audioFormat);
+ final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
+ byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
+ if (!(byteBuffer.hasArray())) {
+ reportWebRtcAudioRecordInitError("ByteBuffer does not have backing array.");
+ return -1;
+ }
+ Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+ emptyBytes = new byte[byteBuffer.capacity()];
+ // Rather than passing the ByteBuffer with every callback (requiring
+ // the potentially expensive GetDirectBufferAddress) we simply have the
+ // the native class cache the address to the memory once.
+ nativeCacheDirectBufferAddress(nativeAudioRecord, byteBuffer);
+
+ // Get the minimum buffer size required for the successful creation of
+ // an AudioRecord object, in byte units.
+ // Note that this size doesn't guarantee a smooth recording under load.
+ final int channelConfig = channelCountToConfiguration(channels);
+ int minBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
+ if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
+ reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
+ return -1;
+ }
+ Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
+
+ // Use a larger buffer size than the minimum required when creating the
+ // AudioRecord instance to ensure smooth recording under load. It has been
+ // verified that it does not increase the actual recording latency.
+ int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
+ Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
+ try {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
+ // Use the AudioRecord.Builder class on Android M (23) and above.
+ // Throws IllegalArgumentException.
+ audioRecord = createAudioRecordOnMOrHigher(
+ audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
+ audioSourceMatchesRecordingSessionRef.set(null);
+ if (preferredDevice != null) {
+ setPreferredDevice(preferredDevice);
+ }
+ } else {
+ // Use the old AudioRecord constructor for API levels below 23.
+ // Throws UnsupportedOperationException.
+ audioRecord = createAudioRecordOnLowerThanM(
+ audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
+ audioSourceMatchesRecordingSessionRef.set(null);
+ }
+ } catch (IllegalArgumentException | UnsupportedOperationException e) {
+ // Report of exception message is sufficient. Example: "Cannot create AudioRecord".
+ reportWebRtcAudioRecordInitError(e.getMessage());
+ releaseAudioResources();
+ return -1;
+ }
+ if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
+ reportWebRtcAudioRecordInitError("Creation or initialization of audio recorder failed.");
+ releaseAudioResources();
+ return -1;
+ }
+ effects.enable(audioRecord.getAudioSessionId());
+ logMainParameters();
+ logMainParametersExtended();
+ // Check number of active recording sessions. Should be zero but we have seen conflict cases
+ // and adding a log for it can help us figure out details about conflicting sessions.
+ final int numActiveRecordingSessions =
+ logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */);
+ if (numActiveRecordingSessions != 0) {
+ // Log the conflict as a warning since initialization did in fact succeed. Most likely, the
+ // upcoming call to startRecording() will fail under these conditions.
+ Logging.w(
+ TAG, "Potential microphone conflict. Active sessions: " + numActiveRecordingSessions);
+ }
+ return framesPerBuffer;
+ }
+
+ /**
+ * Prefer a specific {@link AudioDeviceInfo} device for recording. Calling after recording starts
+ * is valid but may cause a temporary interruption if the audio routing changes.
+ */
+ @RequiresApi(Build.VERSION_CODES.M)
+ @TargetApi(Build.VERSION_CODES.M)
+ void setPreferredDevice(@Nullable AudioDeviceInfo preferredDevice) {
+ Logging.d(
+ TAG, "setPreferredDevice " + (preferredDevice != null ? preferredDevice.getId() : null));
+ this.preferredDevice = preferredDevice;
+ if (audioRecord != null) {
+ if (!audioRecord.setPreferredDevice(preferredDevice)) {
+ Logging.e(TAG, "setPreferredDevice failed");
+ }
+ }
+ }
+
+ @CalledByNative
+ private boolean startRecording() {
+ Logging.d(TAG, "startRecording");
+ assertTrue(audioRecord != null);
+ assertTrue(audioThread == null);
+ try {
+ audioRecord.startRecording();
+ } catch (IllegalStateException e) {
+ reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
+ "AudioRecord.startRecording failed: " + e.getMessage());
+ return false;
+ }
+ if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
+ reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
+ "AudioRecord.startRecording failed - incorrect state: "
+ + audioRecord.getRecordingState());
+ return false;
+ }
+ audioThread = new AudioRecordThread("AudioRecordJavaThread");
+ audioThread.start();
+ scheduleLogRecordingConfigurationsTask(audioRecord);
+ return true;
+ }
+
+ @CalledByNative
+ private boolean stopRecording() {
+ Logging.d(TAG, "stopRecording");
+ assertTrue(audioThread != null);
+ if (future != null) {
+ if (!future.isDone()) {
+ // Might be needed if the client calls startRecording(), stopRecording() back-to-back.
+ future.cancel(true /* mayInterruptIfRunning */);
+ }
+ future = null;
+ }
+ audioThread.stopThread();
+ if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
+ Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ }
+ audioThread = null;
+ effects.release();
+ releaseAudioResources();
+ return true;
+ }
+
+ @TargetApi(Build.VERSION_CODES.M)
+ private static AudioRecord createAudioRecordOnMOrHigher(
+ int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes) {
+ Logging.d(TAG, "createAudioRecordOnMOrHigher");
+ return new AudioRecord.Builder()
+ .setAudioSource(audioSource)
+ .setAudioFormat(new AudioFormat.Builder()
+ .setEncoding(audioFormat)
+ .setSampleRate(sampleRate)
+ .setChannelMask(channelConfig)
+ .build())
+ .setBufferSizeInBytes(bufferSizeInBytes)
+ .build();
+ }
+
+ private static AudioRecord createAudioRecordOnLowerThanM(
+ int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes) {
+ Logging.d(TAG, "createAudioRecordOnLowerThanM");
+ return new AudioRecord(audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
+ }
+
+ private void logMainParameters() {
+ Logging.d(TAG,
+ "AudioRecord: "
+ + "session ID: " + audioRecord.getAudioSessionId() + ", "
+ + "channels: " + audioRecord.getChannelCount() + ", "
+ + "sample rate: " + audioRecord.getSampleRate());
+ }
+
+ @TargetApi(Build.VERSION_CODES.M)
+ private void logMainParametersExtended() {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
+ Logging.d(TAG,
+ "AudioRecord: "
+ // The frame count of the native AudioRecord buffer.
+ + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
+ }
+ }
+
+ @TargetApi(Build.VERSION_CODES.N)
+ // Checks the number of active recording sessions and logs the states of all active sessions.
+ // Returns number of active sessions. Note that this could occur on arbituary thread.
+ private int logRecordingConfigurations(AudioRecord audioRecord, boolean verifyAudioConfig) {
+ if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
+ Logging.w(TAG, "AudioManager#getActiveRecordingConfigurations() requires N or higher");
+ return 0;
+ }
+ if (audioRecord == null) {
+ return 0;
+ }
+
+ // Get a list of the currently active audio recording configurations of the device (can be more
+ // than one). An empty list indicates there is no recording active when queried.
+ List<AudioRecordingConfiguration> configs = audioManager.getActiveRecordingConfigurations();
+ final int numActiveRecordingSessions = configs.size();
+ Logging.d(TAG, "Number of active recording sessions: " + numActiveRecordingSessions);
+ if (numActiveRecordingSessions > 0) {
+ logActiveRecordingConfigs(audioRecord.getAudioSessionId(), configs);
+ if (verifyAudioConfig) {
+ // Run an extra check to verify that the existing audio source doing the recording (tied
+ // to the AudioRecord instance) is matching what the audio recording configuration lists
+ // as its client parameters. If these do not match, recording might work but under invalid
+ // conditions.
+ audioSourceMatchesRecordingSessionRef.set(
+ verifyAudioConfig(audioRecord.getAudioSource(), audioRecord.getAudioSessionId(),
+ audioRecord.getFormat(), audioRecord.getRoutedDevice(), configs));
+ }
+ }
+ return numActiveRecordingSessions;
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private int channelCountToConfiguration(int channels) {
+ return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+ }
+
+ private native void nativeCacheDirectBufferAddress(
+ long nativeAudioRecordJni, ByteBuffer byteBuffer);
+ private native void nativeDataIsRecorded(
+ long nativeAudioRecordJni, int bytes, long captureTimestampNs);
+
+ // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
+ // the microphone is muted.
+ public void setMicrophoneMute(boolean mute) {
+ Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
+ microphoneMute = mute;
+ }
+
+ // Releases the native AudioRecord resources.
+ private void releaseAudioResources() {
+ Logging.d(TAG, "releaseAudioResources");
+ if (audioRecord != null) {
+ audioRecord.release();
+ audioRecord = null;
+ }
+ audioSourceMatchesRecordingSessionRef.set(null);
+ }
+
+ private void reportWebRtcAudioRecordInitError(String errorMessage) {
+ Logging.e(TAG, "Init recording error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordInitError(errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioRecordStartError(
+ AudioRecordStartErrorCode errorCode, String errorMessage) {
+ Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioRecordError(String errorMessage) {
+ Logging.e(TAG, "Run-time recording error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioRecordError(errorMessage);
+ }
+ }
+
+ private void doAudioRecordStateCallback(int audioState) {
+ Logging.d(TAG, "doAudioRecordStateCallback: " + audioStateToString(audioState));
+ if (stateCallback != null) {
+ if (audioState == WebRtcAudioRecord.AUDIO_RECORD_START) {
+ stateCallback.onWebRtcAudioRecordStart();
+ } else if (audioState == WebRtcAudioRecord.AUDIO_RECORD_STOP) {
+ stateCallback.onWebRtcAudioRecordStop();
+ } else {
+ Logging.e(TAG, "Invalid audio state");
+ }
+ }
+ }
+
+ // Reference from Android code, AudioFormat.getBytesPerSample. BitPerSample / 8
+ // Default audio data format is PCM 16 bits per sample.
+ // Guaranteed to be supported by all devices
+ private static int getBytesPerSample(int audioFormat) {
+ switch (audioFormat) {
+ case AudioFormat.ENCODING_PCM_8BIT:
+ return 1;
+ case AudioFormat.ENCODING_PCM_16BIT:
+ case AudioFormat.ENCODING_IEC61937:
+ case AudioFormat.ENCODING_DEFAULT:
+ return 2;
+ case AudioFormat.ENCODING_PCM_FLOAT:
+ return 4;
+ case AudioFormat.ENCODING_INVALID:
+ default:
+ throw new IllegalArgumentException("Bad audio format " + audioFormat);
+ }
+ }
+
+ // Use an ExecutorService to schedule a task after a given delay where the task consists of
+ // checking (by logging) the current status of active recording sessions.
+ private void scheduleLogRecordingConfigurationsTask(AudioRecord audioRecord) {
+ Logging.d(TAG, "scheduleLogRecordingConfigurationsTask");
+ if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
+ return;
+ }
+
+ Callable<String> callable = () -> {
+ if (this.audioRecord == audioRecord) {
+ logRecordingConfigurations(audioRecord, true /* verifyAudioConfig */);
+ } else {
+ Logging.d(TAG, "audio record has changed");
+ }
+ return "Scheduled task is done";
+ };
+
+ if (future != null && !future.isDone()) {
+ future.cancel(true /* mayInterruptIfRunning */);
+ }
+ // Schedule call to logRecordingConfigurations() from executor thread after fixed delay.
+ future = executor.schedule(callable, CHECK_REC_STATUS_DELAY_MS, TimeUnit.MILLISECONDS);
+ };
+
+ @TargetApi(Build.VERSION_CODES.N)
+ private static boolean logActiveRecordingConfigs(
+ int session, List<AudioRecordingConfiguration> configs) {
+ assertTrue(!configs.isEmpty());
+ final Iterator<AudioRecordingConfiguration> it = configs.iterator();
+ Logging.d(TAG, "AudioRecordingConfigurations: ");
+ while (it.hasNext()) {
+ final AudioRecordingConfiguration config = it.next();
+ StringBuilder conf = new StringBuilder();
+ // The audio source selected by the client.
+ final int audioSource = config.getClientAudioSource();
+ conf.append(" client audio source=")
+ .append(WebRtcAudioUtils.audioSourceToString(audioSource))
+ .append(", client session id=")
+ .append(config.getClientAudioSessionId())
+ // Compare with our own id (based on AudioRecord#getAudioSessionId()).
+ .append(" (")
+ .append(session)
+ .append(")")
+ .append("\n");
+ // Audio format at which audio is recorded on this Android device. Note that it may differ
+ // from the client application recording format (see getClientFormat()).
+ AudioFormat format = config.getFormat();
+ conf.append(" Device AudioFormat: ")
+ .append("channel count=")
+ .append(format.getChannelCount())
+ .append(", channel index mask=")
+ .append(format.getChannelIndexMask())
+ // Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
+ .append(", channel mask=")
+ .append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
+ .append(", encoding=")
+ .append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
+ .append(", sample rate=")
+ .append(format.getSampleRate())
+ .append("\n");
+ // Audio format at which the client application is recording audio.
+ format = config.getClientFormat();
+ conf.append(" Client AudioFormat: ")
+ .append("channel count=")
+ .append(format.getChannelCount())
+ .append(", channel index mask=")
+ .append(format.getChannelIndexMask())
+ // Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
+ .append(", channel mask=")
+ .append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
+ .append(", encoding=")
+ .append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
+ .append(", sample rate=")
+ .append(format.getSampleRate())
+ .append("\n");
+ // Audio input device used for this recording session.
+ final AudioDeviceInfo device = config.getAudioDevice();
+ if (device != null) {
+ assertTrue(device.isSource());
+ conf.append(" AudioDevice: ")
+ .append("type=")
+ .append(WebRtcAudioUtils.deviceTypeToString(device.getType()))
+ .append(", id=")
+ .append(device.getId());
+ }
+ Logging.d(TAG, conf.toString());
+ }
+ return true;
+ }
+
+ // Verify that the client audio configuration (device and format) matches the requested
+ // configuration (same as AudioRecord's).
+ @TargetApi(Build.VERSION_CODES.N)
+ private static boolean verifyAudioConfig(int source, int session, AudioFormat format,
+ AudioDeviceInfo device, List<AudioRecordingConfiguration> configs) {
+ assertTrue(!configs.isEmpty());
+ final Iterator<AudioRecordingConfiguration> it = configs.iterator();
+ while (it.hasNext()) {
+ final AudioRecordingConfiguration config = it.next();
+ final AudioDeviceInfo configDevice = config.getAudioDevice();
+ if (configDevice == null) {
+ continue;
+ }
+ if ((config.getClientAudioSource() == source)
+ && (config.getClientAudioSessionId() == session)
+ // Check the client format (should match the format of the AudioRecord instance).
+ && (config.getClientFormat().getEncoding() == format.getEncoding())
+ && (config.getClientFormat().getSampleRate() == format.getSampleRate())
+ && (config.getClientFormat().getChannelMask() == format.getChannelMask())
+ && (config.getClientFormat().getChannelIndexMask() == format.getChannelIndexMask())
+ // Ensure that the device format is properly configured.
+ && (config.getFormat().getEncoding() != AudioFormat.ENCODING_INVALID)
+ && (config.getFormat().getSampleRate() > 0)
+ // For the channel mask, either the position or index-based value must be valid.
+ && ((config.getFormat().getChannelMask() != AudioFormat.CHANNEL_INVALID)
+ || (config.getFormat().getChannelIndexMask() != AudioFormat.CHANNEL_INVALID))
+ && checkDeviceMatch(configDevice, device)) {
+ Logging.d(TAG, "verifyAudioConfig: PASS");
+ return true;
+ }
+ }
+ Logging.e(TAG, "verifyAudioConfig: FAILED");
+ return false;
+ }
+
+ @TargetApi(Build.VERSION_CODES.N)
+ // Returns true if device A parameters matches those of device B.
+ // TODO(henrika): can be improved by adding AudioDeviceInfo#getAddress() but it requires API 29.
+ private static boolean checkDeviceMatch(AudioDeviceInfo devA, AudioDeviceInfo devB) {
+ return ((devA.getId() == devB.getId() && (devA.getType() == devB.getType())));
+ }
+
+ private static String audioStateToString(int state) {
+ switch (state) {
+ case WebRtcAudioRecord.AUDIO_RECORD_START:
+ return "START";
+ case WebRtcAudioRecord.AUDIO_RECORD_STOP:
+ return "STOP";
+ default:
+ return "INVALID";
+ }
+ }
+
+ private static final AtomicInteger nextSchedulerId = new AtomicInteger(0);
+
+ static ScheduledExecutorService newDefaultScheduler() {
+ AtomicInteger nextThreadId = new AtomicInteger(0);
+ return Executors.newScheduledThreadPool(0, new ThreadFactory() {
+ /**
+ * Constructs a new {@code Thread}
+ */
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread thread = Executors.defaultThreadFactory().newThread(r);
+ thread.setName(String.format("WebRtcAudioRecordScheduler-%s-%s",
+ nextSchedulerId.getAndIncrement(), nextThreadId.getAndIncrement()));
+ return thread;
+ }
+ });
+ }
+}
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
new file mode 100644
index 0000000000..2b34e34013
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.media.AudioAttributes;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.os.Build;
+import android.os.Process;
+import androidx.annotation.Nullable;
+import java.nio.ByteBuffer;
+import org.webrtc.CalledByNative;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
+import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStartErrorCode;
+import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStateCallback;
+import org.webrtc.audio.LowLatencyAudioBufferManager;
+
+class WebRtcAudioTrack {
+ private static final String TAG = "WebRtcAudioTrackExternal";
+
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
+ // Requested size of each recorded buffer provided to the client.
+ private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+ // Average number of callbacks per second.
+ private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+ // The AudioTrackThread is allowed to wait for successful call to join()
+ // but the wait times out afther this amount of time.
+ private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+ // By default, WebRTC creates audio tracks with a usage attribute
+ // corresponding to voice communications, such as telephony or VoIP.
+ private static final int DEFAULT_USAGE = AudioAttributes.USAGE_VOICE_COMMUNICATION;
+
+ // Indicates the AudioTrack has started playing audio.
+ private static final int AUDIO_TRACK_START = 0;
+
+ // Indicates the AudioTrack has stopped playing audio.
+ private static final int AUDIO_TRACK_STOP = 1;
+
+ private long nativeAudioTrack;
+ private final Context context;
+ private final AudioManager audioManager;
+ private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
+
+ private ByteBuffer byteBuffer;
+
+ private @Nullable final AudioAttributes audioAttributes;
+ private @Nullable AudioTrack audioTrack;
+ private @Nullable AudioTrackThread audioThread;
+ private final VolumeLogger volumeLogger;
+
+ // Samples to be played are replaced by zeros if `speakerMute` is set to true.
+ // Can be used to ensure that the speaker is fully muted.
+ private volatile boolean speakerMute;
+ private byte[] emptyBytes;
+ private boolean useLowLatency;
+ private int initialBufferSizeInFrames;
+
+ private final @Nullable AudioTrackErrorCallback errorCallback;
+ private final @Nullable AudioTrackStateCallback stateCallback;
+
+ /**
+ * Audio thread which keeps calling AudioTrack.write() to stream audio.
+ * Data is periodically acquired from the native WebRTC layer using the
+ * nativeGetPlayoutData callback function.
+ * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+ */
+ private class AudioTrackThread extends Thread {
+ private volatile boolean keepAlive = true;
+ private LowLatencyAudioBufferManager bufferManager;
+
+ public AudioTrackThread(String name) {
+ super(name);
+ bufferManager = new LowLatencyAudioBufferManager();
+ }
+
+ @Override
+ public void run() {
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+ Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
+ assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
+
+ // Audio playout has started and the client is informed about it.
+ doAudioTrackStateCallback(AUDIO_TRACK_START);
+
+ // Fixed size in bytes of each 10ms block of audio data that we ask for
+ // using callbacks to the native WebRTC client.
+ final int sizeInBytes = byteBuffer.capacity();
+
+ while (keepAlive) {
+ // Get 10ms of PCM data from the native WebRTC client. Audio data is
+ // written into the common ByteBuffer using the address that was
+ // cached at construction.
+ nativeGetPlayoutData(nativeAudioTrack, sizeInBytes);
+ // Write data until all data has been written to the audio sink.
+ // Upon return, the buffer position will have been advanced to reflect
+ // the amount of data that was successfully written to the AudioTrack.
+ assertTrue(sizeInBytes <= byteBuffer.remaining());
+ if (speakerMute) {
+ byteBuffer.clear();
+ byteBuffer.put(emptyBytes);
+ byteBuffer.position(0);
+ }
+ int bytesWritten = audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
+ if (bytesWritten != sizeInBytes) {
+ Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
+ // If a write() returns a negative value, an error has occurred.
+ // Stop playing and report an error in this case.
+ if (bytesWritten < 0) {
+ keepAlive = false;
+ reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
+ }
+ }
+ if (useLowLatency) {
+ bufferManager.maybeAdjustBufferSize(audioTrack);
+ }
+ // The byte buffer must be rewinded since byteBuffer.position() is
+ // increased at each call to AudioTrack.write(). If we don't do this,
+ // next call to AudioTrack.write() will fail.
+ byteBuffer.rewind();
+
+ // TODO(henrika): it is possible to create a delay estimate here by
+ // counting number of written frames and subtracting the result from
+ // audioTrack.getPlaybackHeadPosition().
+ }
+ }
+
+ // Stops the inner thread loop which results in calling AudioTrack.stop().
+ // Does not block the calling thread.
+ public void stopThread() {
+ Logging.d(TAG, "stopThread");
+ keepAlive = false;
+ }
+ }
+
+ @CalledByNative
+ WebRtcAudioTrack(Context context, AudioManager audioManager) {
+ this(context, audioManager, null /* audioAttributes */, null /* errorCallback */,
+ null /* stateCallback */, false /* useLowLatency */, true /* enableVolumeLogger */);
+ }
+
+ WebRtcAudioTrack(Context context, AudioManager audioManager,
+ @Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback,
+ @Nullable AudioTrackStateCallback stateCallback, boolean useLowLatency,
+ boolean enableVolumeLogger) {
+ threadChecker.detachThread();
+ this.context = context;
+ this.audioManager = audioManager;
+ this.audioAttributes = audioAttributes;
+ this.errorCallback = errorCallback;
+ this.stateCallback = stateCallback;
+ this.volumeLogger = enableVolumeLogger ? new VolumeLogger(audioManager) : null;
+ this.useLowLatency = useLowLatency;
+ Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+ }
+
+ @CalledByNative
+ public void setNativeAudioTrack(long nativeAudioTrack) {
+ this.nativeAudioTrack = nativeAudioTrack;
+ }
+
+ @CalledByNative
+ private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG,
+ "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels
+ + ", bufferSizeFactor=" + bufferSizeFactor + ")");
+ final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+ byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
+ Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+ emptyBytes = new byte[byteBuffer.capacity()];
+ // Rather than passing the ByteBuffer with every callback (requiring
+ // the potentially expensive GetDirectBufferAddress) we simply have the
+ // the native class cache the address to the memory once.
+ nativeCacheDirectBufferAddress(nativeAudioTrack, byteBuffer);
+
+ // Get the minimum buffer size required for the successful creation of an
+ // AudioTrack object to be created in the MODE_STREAM mode.
+ // Note that this size doesn't guarantee a smooth playback under load.
+ final int channelConfig = channelCountToConfiguration(channels);
+ final int minBufferSizeInBytes = (int) (AudioTrack.getMinBufferSize(sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT)
+ * bufferSizeFactor);
+ Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
+ // For the streaming mode, data must be written to the audio sink in
+ // chunks of size (given by byteBuffer.capacity()) less than or equal
+ // to the total buffer size `minBufferSizeInBytes`. But, we have seen
+ // reports of "getMinBufferSize(): error querying hardware". Hence, it
+ // can happen that `minBufferSizeInBytes` contains an invalid value.
+ if (minBufferSizeInBytes < byteBuffer.capacity()) {
+ reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
+ return -1;
+ }
+
+ // Don't use low-latency mode when a bufferSizeFactor > 1 is used. When bufferSizeFactor > 1
+ // we want to use a larger buffer to prevent underruns. However, low-latency mode would
+ // decrease the buffer size, which makes the bufferSizeFactor have no effect.
+ if (bufferSizeFactor > 1.0) {
+ useLowLatency = false;
+ }
+
+ // Ensure that prevision audio session was stopped correctly before trying
+ // to create a new AudioTrack.
+ if (audioTrack != null) {
+ reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
+ return -1;
+ }
+ try {
+ // Create an AudioTrack object and initialize its associated audio buffer.
+ // The size of this buffer determines how long an AudioTrack can play
+ // before running out of data.
+ if (useLowLatency && Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
+ // On API level 26 or higher, we can use a low latency mode.
+ audioTrack = createAudioTrackOnOreoOrHigher(
+ sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
+ } else {
+ // As we are on API level 21 or higher, it is possible to use a special AudioTrack
+ // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
+ // supersede the notion of stream types for defining the behavior of audio playback,
+ // and to allow certain platforms or routing policies to use this information for more
+ // refined volume or routing decisions.
+ audioTrack = createAudioTrackBeforeOreo(
+ sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
+ }
+ } catch (IllegalArgumentException e) {
+ reportWebRtcAudioTrackInitError(e.getMessage());
+ releaseAudioResources();
+ return -1;
+ }
+
+ // It can happen that an AudioTrack is created but it was not successfully
+ // initialized upon creation. Seems to be the case e.g. when the maximum
+ // number of globally available audio tracks is exceeded.
+ if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
+ reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
+ releaseAudioResources();
+ return -1;
+ }
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
+ initialBufferSizeInFrames = audioTrack.getBufferSizeInFrames();
+ } else {
+ initialBufferSizeInFrames = -1;
+ }
+ logMainParameters();
+ logMainParametersExtended();
+ return minBufferSizeInBytes;
+ }
+
+ @CalledByNative
+ private boolean startPlayout() {
+ threadChecker.checkIsOnValidThread();
+ if (volumeLogger != null) {
+ volumeLogger.start();
+ }
+ Logging.d(TAG, "startPlayout");
+ assertTrue(audioTrack != null);
+ assertTrue(audioThread == null);
+
+ // Starts playing an audio track.
+ try {
+ audioTrack.play();
+ } catch (IllegalStateException e) {
+ reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
+ "AudioTrack.play failed: " + e.getMessage());
+ releaseAudioResources();
+ return false;
+ }
+ if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
+ reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
+ "AudioTrack.play failed - incorrect state :" + audioTrack.getPlayState());
+ releaseAudioResources();
+ return false;
+ }
+
+ // Create and start new high-priority thread which calls AudioTrack.write()
+ // and where we also call the native nativeGetPlayoutData() callback to
+ // request decoded audio from WebRTC.
+ audioThread = new AudioTrackThread("AudioTrackJavaThread");
+ audioThread.start();
+ return true;
+ }
+
+ @CalledByNative
+ private boolean stopPlayout() {
+ threadChecker.checkIsOnValidThread();
+ if (volumeLogger != null) {
+ volumeLogger.stop();
+ }
+ Logging.d(TAG, "stopPlayout");
+ assertTrue(audioThread != null);
+ logUnderrunCount();
+ audioThread.stopThread();
+
+ Logging.d(TAG, "Stopping the AudioTrackThread...");
+ audioThread.interrupt();
+ if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
+ Logging.e(TAG, "Join of AudioTrackThread timed out.");
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ }
+ Logging.d(TAG, "AudioTrackThread has now been stopped.");
+ audioThread = null;
+ if (audioTrack != null) {
+ Logging.d(TAG, "Calling AudioTrack.stop...");
+ try {
+ audioTrack.stop();
+ Logging.d(TAG, "AudioTrack.stop is done.");
+ doAudioTrackStateCallback(AUDIO_TRACK_STOP);
+ } catch (IllegalStateException e) {
+ Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
+ }
+ }
+ releaseAudioResources();
+ return true;
+ }
+
+ // Get max possible volume index for a phone call audio stream.
+ @CalledByNative
+ private int getStreamMaxVolume() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "getStreamMaxVolume");
+ return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
+ }
+
+ // Set current volume level for a phone call audio stream.
+ @CalledByNative
+ private boolean setStreamVolume(int volume) {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "setStreamVolume(" + volume + ")");
+ if (audioManager.isVolumeFixed()) {
+ Logging.e(TAG, "The device implements a fixed volume policy.");
+ return false;
+ }
+ audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
+ return true;
+ }
+
+ /** Get current volume level for a phone call audio stream. */
+ @CalledByNative
+ private int getStreamVolume() {
+ threadChecker.checkIsOnValidThread();
+ Logging.d(TAG, "getStreamVolume");
+ return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
+ }
+
+ @CalledByNative
+ private int GetPlayoutUnderrunCount() {
+ if (Build.VERSION.SDK_INT >= 24) {
+ if (audioTrack != null) {
+ return audioTrack.getUnderrunCount();
+ } else {
+ return -1;
+ }
+ } else {
+ return -2;
+ }
+ }
+
+ private void logMainParameters() {
+ Logging.d(TAG,
+ "AudioTrack: "
+ + "session ID: " + audioTrack.getAudioSessionId() + ", "
+ + "channels: " + audioTrack.getChannelCount() + ", "
+ + "sample rate: " + audioTrack.getSampleRate()
+ + ", "
+ // Gain (>=1.0) expressed as linear multiplier on sample values.
+ + "max gain: " + AudioTrack.getMaxVolume());
+ }
+
+ private static void logNativeOutputSampleRate(int requestedSampleRateInHz) {
+ final int nativeOutputSampleRate =
+ AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
+ Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
+ if (requestedSampleRateInHz != nativeOutputSampleRate) {
+ Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
+ }
+ }
+
+ private static AudioAttributes getAudioAttributes(@Nullable AudioAttributes overrideAttributes) {
+ AudioAttributes.Builder attributesBuilder =
+ new AudioAttributes.Builder()
+ .setUsage(DEFAULT_USAGE)
+ .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH);
+
+ if (overrideAttributes != null) {
+ if (overrideAttributes.getUsage() != AudioAttributes.USAGE_UNKNOWN) {
+ attributesBuilder.setUsage(overrideAttributes.getUsage());
+ }
+ if (overrideAttributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN) {
+ attributesBuilder.setContentType(overrideAttributes.getContentType());
+ }
+
+ attributesBuilder.setFlags(overrideAttributes.getFlags());
+
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
+ attributesBuilder = applyAttributesOnQOrHigher(attributesBuilder, overrideAttributes);
+ }
+ }
+ return attributesBuilder.build();
+ }
+
+ // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
+ // It allows certain platforms or routing policies to use this information for more
+ // refined volume or routing decisions.
+ private static AudioTrack createAudioTrackBeforeOreo(int sampleRateInHz, int channelConfig,
+ int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
+ Logging.d(TAG, "createAudioTrackBeforeOreo");
+ logNativeOutputSampleRate(sampleRateInHz);
+
+ // Create an audio track where the audio usage is for VoIP and the content type is speech.
+ return new AudioTrack(getAudioAttributes(overrideAttributes),
+ new AudioFormat.Builder()
+ .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
+ .setSampleRate(sampleRateInHz)
+ .setChannelMask(channelConfig)
+ .build(),
+ bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
+ }
+
+ // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
+ // Use the low-latency mode to improve audio latency. Note that the low-latency mode may
+ // prevent effects (such as AEC) from working. Assuming AEC is working, the delay changes
+ // that happen in low-latency mode during the call will cause the AEC to perform worse.
+ // The behavior of the low-latency mode may be device dependent, use at your own risk.
+ @TargetApi(Build.VERSION_CODES.O)
+ private static AudioTrack createAudioTrackOnOreoOrHigher(int sampleRateInHz, int channelConfig,
+ int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
+ Logging.d(TAG, "createAudioTrackOnOreoOrHigher");
+ logNativeOutputSampleRate(sampleRateInHz);
+
+ // Create an audio track where the audio usage is for VoIP and the content type is speech.
+ return new AudioTrack.Builder()
+ .setAudioAttributes(getAudioAttributes(overrideAttributes))
+ .setAudioFormat(new AudioFormat.Builder()
+ .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
+ .setSampleRate(sampleRateInHz)
+ .setChannelMask(channelConfig)
+ .build())
+ .setBufferSizeInBytes(bufferSizeInBytes)
+ .setPerformanceMode(AudioTrack.PERFORMANCE_MODE_LOW_LATENCY)
+ .setTransferMode(AudioTrack.MODE_STREAM)
+ .setSessionId(AudioManager.AUDIO_SESSION_ID_GENERATE)
+ .build();
+ }
+
+ @TargetApi(Build.VERSION_CODES.Q)
+ private static AudioAttributes.Builder applyAttributesOnQOrHigher(
+ AudioAttributes.Builder builder, AudioAttributes overrideAttributes) {
+ return builder.setAllowedCapturePolicy(overrideAttributes.getAllowedCapturePolicy());
+ }
+
+ private void logBufferSizeInFrames() {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
+ Logging.d(TAG,
+ "AudioTrack: "
+ // The effective size of the AudioTrack buffer that the app writes to.
+ + "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
+ }
+ }
+
+ @CalledByNative
+ private int getBufferSizeInFrames() {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
+ return audioTrack.getBufferSizeInFrames();
+ }
+ return -1;
+ }
+
+ @CalledByNative
+ private int getInitialBufferSizeInFrames() {
+ return initialBufferSizeInFrames;
+ }
+
+ private void logBufferCapacityInFrames() {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
+ Logging.d(TAG,
+ "AudioTrack: "
+ // Maximum size of the AudioTrack buffer in frames.
+ + "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
+ }
+ }
+
+ private void logMainParametersExtended() {
+ logBufferSizeInFrames();
+ logBufferCapacityInFrames();
+ }
+
+ // Prints the number of underrun occurrences in the application-level write
+ // buffer since the AudioTrack was created. An underrun occurs if the app does
+ // not write audio data quickly enough, causing the buffer to underflow and a
+ // potential audio glitch.
+ // TODO(henrika): keep track of this value in the field and possibly add new
+ // UMA stat if needed.
+ private void logUnderrunCount() {
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
+ Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
+ }
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
+ private static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new AssertionError("Expected condition to be true");
+ }
+ }
+
+ private int channelCountToConfiguration(int channels) {
+ return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+ }
+
+ private static native void nativeCacheDirectBufferAddress(
+ long nativeAudioTrackJni, ByteBuffer byteBuffer);
+ private static native void nativeGetPlayoutData(long nativeAudioTrackJni, int bytes);
+
+ // Sets all samples to be played out to zero if `mute` is true, i.e.,
+ // ensures that the speaker is muted.
+ public void setSpeakerMute(boolean mute) {
+ Logging.w(TAG, "setSpeakerMute(" + mute + ")");
+ speakerMute = mute;
+ }
+
+ // Releases the native AudioTrack resources.
+ private void releaseAudioResources() {
+ Logging.d(TAG, "releaseAudioResources");
+ if (audioTrack != null) {
+ audioTrack.release();
+ audioTrack = null;
+ }
+ }
+
+ private void reportWebRtcAudioTrackInitError(String errorMessage) {
+ Logging.e(TAG, "Init playout error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackInitError(errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioTrackStartError(
+ AudioTrackStartErrorCode errorCode, String errorMessage) {
+ Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
+ }
+ }
+
+ private void reportWebRtcAudioTrackError(String errorMessage) {
+ Logging.e(TAG, "Run-time playback error: " + errorMessage);
+ WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
+ if (errorCallback != null) {
+ errorCallback.onWebRtcAudioTrackError(errorMessage);
+ }
+ }
+
+ private void doAudioTrackStateCallback(int audioState) {
+ Logging.d(TAG, "doAudioTrackStateCallback: " + audioState);
+ if (stateCallback != null) {
+ if (audioState == WebRtcAudioTrack.AUDIO_TRACK_START) {
+ stateCallback.onWebRtcAudioTrackStart();
+ } else if (audioState == WebRtcAudioTrack.AUDIO_TRACK_STOP) {
+ stateCallback.onWebRtcAudioTrackStop();
+ } else {
+ Logging.e(TAG, "Invalid audio state");
+ }
+ }
+ }
+}
diff --git a/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java
new file mode 100644
index 0000000000..7b4b809ab1
--- /dev/null
+++ b/third_party/libwebrtc/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.audio;
+
+import static android.media.AudioManager.MODE_IN_CALL;
+import static android.media.AudioManager.MODE_IN_COMMUNICATION;
+import static android.media.AudioManager.MODE_NORMAL;
+import static android.media.AudioManager.MODE_RINGTONE;
+
+import android.annotation.SuppressLint;
+import android.annotation.TargetApi;
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioDeviceInfo;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.MediaRecorder.AudioSource;
+import android.os.Build;
+import java.lang.Thread;
+import java.util.Arrays;
+import org.webrtc.Logging;
+
+final class WebRtcAudioUtils {
+ private static final String TAG = "WebRtcAudioUtilsExternal";
+
+ // Helper method for building a string of thread information.
+ public static String getThreadInfo() {
+ return "@[name=" + Thread.currentThread().getName() + ", id=" + Thread.currentThread().getId()
+ + "]";
+ }
+
+ // Returns true if we're running on emulator.
+ public static boolean runningOnEmulator() {
+ return Build.HARDWARE.equals("goldfish") && Build.BRAND.startsWith("generic_");
+ }
+
+ // Information about the current build, taken from system properties.
+ static void logDeviceInfo(String tag) {
+ Logging.d(tag,
+ "Android SDK: " + Build.VERSION.SDK_INT + ", "
+ + "Release: " + Build.VERSION.RELEASE + ", "
+ + "Brand: " + Build.BRAND + ", "
+ + "Device: " + Build.DEVICE + ", "
+ + "Id: " + Build.ID + ", "
+ + "Hardware: " + Build.HARDWARE + ", "
+ + "Manufacturer: " + Build.MANUFACTURER + ", "
+ + "Model: " + Build.MODEL + ", "
+ + "Product: " + Build.PRODUCT);
+ }
+
+ // Logs information about the current audio state. The idea is to call this
+ // method when errors are detected to log under what conditions the error
+ // occurred. Hopefully it will provide clues to what might be the root cause.
+ static void logAudioState(String tag, Context context, AudioManager audioManager) {
+ logDeviceInfo(tag);
+ logAudioStateBasic(tag, context, audioManager);
+ logAudioStateVolume(tag, audioManager);
+ logAudioDeviceInfo(tag, audioManager);
+ }
+
+ // Converts AudioDeviceInfo types to local string representation.
+ static String deviceTypeToString(int type) {
+ switch (type) {
+ case AudioDeviceInfo.TYPE_UNKNOWN:
+ return "TYPE_UNKNOWN";
+ case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
+ return "TYPE_BUILTIN_EARPIECE";
+ case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
+ return "TYPE_BUILTIN_SPEAKER";
+ case AudioDeviceInfo.TYPE_WIRED_HEADSET:
+ return "TYPE_WIRED_HEADSET";
+ case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
+ return "TYPE_WIRED_HEADPHONES";
+ case AudioDeviceInfo.TYPE_LINE_ANALOG:
+ return "TYPE_LINE_ANALOG";
+ case AudioDeviceInfo.TYPE_LINE_DIGITAL:
+ return "TYPE_LINE_DIGITAL";
+ case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
+ return "TYPE_BLUETOOTH_SCO";
+ case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
+ return "TYPE_BLUETOOTH_A2DP";
+ case AudioDeviceInfo.TYPE_HDMI:
+ return "TYPE_HDMI";
+ case AudioDeviceInfo.TYPE_HDMI_ARC:
+ return "TYPE_HDMI_ARC";
+ case AudioDeviceInfo.TYPE_USB_DEVICE:
+ return "TYPE_USB_DEVICE";
+ case AudioDeviceInfo.TYPE_USB_ACCESSORY:
+ return "TYPE_USB_ACCESSORY";
+ case AudioDeviceInfo.TYPE_DOCK:
+ return "TYPE_DOCK";
+ case AudioDeviceInfo.TYPE_FM:
+ return "TYPE_FM";
+ case AudioDeviceInfo.TYPE_BUILTIN_MIC:
+ return "TYPE_BUILTIN_MIC";
+ case AudioDeviceInfo.TYPE_FM_TUNER:
+ return "TYPE_FM_TUNER";
+ case AudioDeviceInfo.TYPE_TV_TUNER:
+ return "TYPE_TV_TUNER";
+ case AudioDeviceInfo.TYPE_TELEPHONY:
+ return "TYPE_TELEPHONY";
+ case AudioDeviceInfo.TYPE_AUX_LINE:
+ return "TYPE_AUX_LINE";
+ case AudioDeviceInfo.TYPE_IP:
+ return "TYPE_IP";
+ case AudioDeviceInfo.TYPE_BUS:
+ return "TYPE_BUS";
+ case AudioDeviceInfo.TYPE_USB_HEADSET:
+ return "TYPE_USB_HEADSET";
+ default:
+ return "TYPE_UNKNOWN";
+ }
+ }
+
+ @TargetApi(Build.VERSION_CODES.N)
+ public static String audioSourceToString(int source) {
+ // AudioSource.UNPROCESSED requires API level 29. Use local define instead.
+ final int VOICE_PERFORMANCE = 10;
+ switch (source) {
+ case AudioSource.DEFAULT:
+ return "DEFAULT";
+ case AudioSource.MIC:
+ return "MIC";
+ case AudioSource.VOICE_UPLINK:
+ return "VOICE_UPLINK";
+ case AudioSource.VOICE_DOWNLINK:
+ return "VOICE_DOWNLINK";
+ case AudioSource.VOICE_CALL:
+ return "VOICE_CALL";
+ case AudioSource.CAMCORDER:
+ return "CAMCORDER";
+ case AudioSource.VOICE_RECOGNITION:
+ return "VOICE_RECOGNITION";
+ case AudioSource.VOICE_COMMUNICATION:
+ return "VOICE_COMMUNICATION";
+ case AudioSource.UNPROCESSED:
+ return "UNPROCESSED";
+ case VOICE_PERFORMANCE:
+ return "VOICE_PERFORMANCE";
+ default:
+ return "INVALID";
+ }
+ }
+
+ public static String channelMaskToString(int mask) {
+ // For input or AudioRecord, the mask should be AudioFormat#CHANNEL_IN_MONO or
+ // AudioFormat#CHANNEL_IN_STEREO. AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all
+ // devices.
+ switch (mask) {
+ case AudioFormat.CHANNEL_IN_STEREO:
+ return "IN_STEREO";
+ case AudioFormat.CHANNEL_IN_MONO:
+ return "IN_MONO";
+ default:
+ return "INVALID";
+ }
+ }
+
+ @TargetApi(Build.VERSION_CODES.N)
+ public static String audioEncodingToString(int enc) {
+ switch (enc) {
+ case AudioFormat.ENCODING_INVALID:
+ return "INVALID";
+ case AudioFormat.ENCODING_PCM_16BIT:
+ return "PCM_16BIT";
+ case AudioFormat.ENCODING_PCM_8BIT:
+ return "PCM_8BIT";
+ case AudioFormat.ENCODING_PCM_FLOAT:
+ return "PCM_FLOAT";
+ case AudioFormat.ENCODING_AC3:
+ return "AC3";
+ case AudioFormat.ENCODING_E_AC3:
+ return "AC3";
+ case AudioFormat.ENCODING_DTS:
+ return "DTS";
+ case AudioFormat.ENCODING_DTS_HD:
+ return "DTS_HD";
+ case AudioFormat.ENCODING_MP3:
+ return "MP3";
+ default:
+ return "Invalid encoding: " + enc;
+ }
+ }
+
+ // Reports basic audio statistics.
+ private static void logAudioStateBasic(String tag, Context context, AudioManager audioManager) {
+ Logging.d(tag,
+ "Audio State: "
+ + "audio mode: " + modeToString(audioManager.getMode()) + ", "
+ + "has mic: " + hasMicrophone(context) + ", "
+ + "mic muted: " + audioManager.isMicrophoneMute() + ", "
+ + "music active: " + audioManager.isMusicActive() + ", "
+ + "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
+ + "BT SCO: " + audioManager.isBluetoothScoOn());
+ }
+
+ // Adds volume information for all possible stream types.
+ private static void logAudioStateVolume(String tag, AudioManager audioManager) {
+ final int[] streams = {AudioManager.STREAM_VOICE_CALL, AudioManager.STREAM_MUSIC,
+ AudioManager.STREAM_RING, AudioManager.STREAM_ALARM, AudioManager.STREAM_NOTIFICATION,
+ AudioManager.STREAM_SYSTEM};
+ Logging.d(tag, "Audio State: ");
+ // Some devices may not have volume controls and might use a fixed volume.
+ boolean fixedVolume = audioManager.isVolumeFixed();
+ Logging.d(tag, " fixed volume=" + fixedVolume);
+ if (!fixedVolume) {
+ for (int stream : streams) {
+ StringBuilder info = new StringBuilder();
+ info.append(" " + streamTypeToString(stream) + ": ");
+ info.append("volume=").append(audioManager.getStreamVolume(stream));
+ info.append(", max=").append(audioManager.getStreamMaxVolume(stream));
+ logIsStreamMute(tag, audioManager, stream, info);
+ Logging.d(tag, info.toString());
+ }
+ }
+ }
+
+ private static void logIsStreamMute(
+ String tag, AudioManager audioManager, int stream, StringBuilder info) {
+ if (Build.VERSION.SDK_INT >= 23) {
+ info.append(", muted=").append(audioManager.isStreamMute(stream));
+ }
+ }
+
+ // Moz linting complains even though AudioManager.GET_DEVICES_ALL is
+ // listed in the docs here:
+ // https://developer.android.com/reference/android/media/AudioManager#GET_DEVICES_ALL
+ @SuppressLint("WrongConstant")
+ private static void logAudioDeviceInfo(String tag, AudioManager audioManager) {
+ if (Build.VERSION.SDK_INT < 23) {
+ return;
+ }
+ final AudioDeviceInfo[] devices = audioManager.getDevices(AudioManager.GET_DEVICES_ALL);
+ if (devices.length == 0) {
+ return;
+ }
+ Logging.d(tag, "Audio Devices: ");
+ for (AudioDeviceInfo device : devices) {
+ StringBuilder info = new StringBuilder();
+ info.append(" ").append(deviceTypeToString(device.getType()));
+ info.append(device.isSource() ? "(in): " : "(out): ");
+ // An empty array indicates that the device supports arbitrary channel counts.
+ if (device.getChannelCounts().length > 0) {
+ info.append("channels=").append(Arrays.toString(device.getChannelCounts()));
+ info.append(", ");
+ }
+ if (device.getEncodings().length > 0) {
+ // Examples: ENCODING_PCM_16BIT = 2, ENCODING_PCM_FLOAT = 4.
+ info.append("encodings=").append(Arrays.toString(device.getEncodings()));
+ info.append(", ");
+ }
+ if (device.getSampleRates().length > 0) {
+ info.append("sample rates=").append(Arrays.toString(device.getSampleRates()));
+ info.append(", ");
+ }
+ info.append("id=").append(device.getId());
+ Logging.d(tag, info.toString());
+ }
+ }
+
+ // Converts media.AudioManager modes into local string representation.
+ static String modeToString(int mode) {
+ switch (mode) {
+ case MODE_IN_CALL:
+ return "MODE_IN_CALL";
+ case MODE_IN_COMMUNICATION:
+ return "MODE_IN_COMMUNICATION";
+ case MODE_NORMAL:
+ return "MODE_NORMAL";
+ case MODE_RINGTONE:
+ return "MODE_RINGTONE";
+ default:
+ return "MODE_INVALID";
+ }
+ }
+
+ private static String streamTypeToString(int stream) {
+ switch (stream) {
+ case AudioManager.STREAM_VOICE_CALL:
+ return "STREAM_VOICE_CALL";
+ case AudioManager.STREAM_MUSIC:
+ return "STREAM_MUSIC";
+ case AudioManager.STREAM_RING:
+ return "STREAM_RING";
+ case AudioManager.STREAM_ALARM:
+ return "STREAM_ALARM";
+ case AudioManager.STREAM_NOTIFICATION:
+ return "STREAM_NOTIFICATION";
+ case AudioManager.STREAM_SYSTEM:
+ return "STREAM_SYSTEM";
+ default:
+ return "STREAM_INVALID";
+ }
+ }
+
+ // Returns true if the device can record audio via a microphone.
+ private static boolean hasMicrophone(Context context) {
+ return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_MICROPHONE);
+ }
+}