summaryrefslogtreecommitdiffstats
path: root/third_party/rust/cubeb-coreaudio/src/backend
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/cubeb-coreaudio/src/backend
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/cubeb-coreaudio/src/backend')
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/aggregate_device.rs691
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/auto_release.rs77
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/buffer_manager.rs355
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/device_property.rs360
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/mixer.rs492
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/mod.rs4423
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/resampler.rs84
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/aggregate_device.rs400
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/api.rs1663
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/backlog.rs36
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/device_change.rs885
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/device_property.rs473
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/interfaces.rs1215
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/manual.rs614
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/mod.rs12
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/parallel.rs572
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/tone.rs215
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/tests/utils.rs1247
-rw-r--r--third_party/rust/cubeb-coreaudio/src/backend/utils.rs107
19 files changed, 13921 insertions, 0 deletions
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/aggregate_device.rs b/third_party/rust/cubeb-coreaudio/src/backend/aggregate_device.rs
new file mode 100644
index 0000000000..2738631b87
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/aggregate_device.rs
@@ -0,0 +1,691 @@
+use super::*;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+pub const DRIFT_COMPENSATION: u32 = 1;
+
+#[derive(Debug)]
+pub struct AggregateDevice {
+ plugin_id: AudioObjectID,
+ device_id: AudioObjectID,
+ // For log only
+ input_id: AudioObjectID,
+ output_id: AudioObjectID,
+}
+
+#[derive(Debug)]
+pub enum Error {
+ OS(OSStatus),
+ Timeout(std::time::Duration),
+ LessThan2Devices(usize),
+}
+
+impl From<OSStatus> for Error {
+ fn from(status: OSStatus) -> Self {
+ Error::OS(status)
+ }
+}
+
+impl From<std::time::Duration> for Error {
+ fn from(duration: std::time::Duration) -> Self {
+ Error::Timeout(duration)
+ }
+}
+
+impl From<usize> for Error {
+ fn from(number: usize) -> Self {
+ Error::LessThan2Devices(number)
+ }
+}
+
+impl std::fmt::Display for Error {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Error::OS(status) => write!(f, "OSStatus({})", status),
+ Error::Timeout(duration) => write!(f, "Timeout({:?})", duration),
+ Error::LessThan2Devices(number) => write!(f, "LessThan2Devices({} only)", number),
+ }
+ }
+}
+
+impl AggregateDevice {
+ // Aggregate Device is a virtual audio interface which utilizes inputs and outputs
+ // of one or more physical audio interfaces. It is possible to use the clock of
+ // one of the devices as a master clock for all the combined devices and enable
+ // drift compensation for the devices that are not designated clock master.
+ //
+ // Creating a new aggregate device programmatically requires [0][1]:
+ // 1. Locate the base plug-in ("com.apple.audio.CoreAudio")
+ // 2. Create a dictionary that describes the aggregate device
+ // (don't add sub-devices in that step, prone to fail [0])
+ // 3. Ask the base plug-in to create the aggregate device (blank)
+ // 4. Add the array of sub-devices.
+ // 5. Set the master device (1st output device in our case)
+ // 6. Enable drift compensation for the non-master devices
+ //
+ // [0] https://lists.apple.com/archives/coreaudio-api/2006/Apr/msg00092.html
+ // [1] https://lists.apple.com/archives/coreaudio-api/2005/Jul/msg00150.html
+ // [2] CoreAudio.framework/Headers/AudioHardware.h
+ pub fn new(
+ input_id: AudioObjectID,
+ output_id: AudioObjectID,
+ ) -> std::result::Result<Self, Error> {
+ let plugin_id = Self::get_system_plugin_id()?;
+ let device_id = Self::create_blank_device_sync(plugin_id)?;
+
+ let mut cleanup = finally(|| {
+ let r = Self::destroy_device(plugin_id, device_id);
+ assert!(r.is_ok());
+ });
+
+ Self::set_sub_devices_sync(device_id, input_id, output_id)?;
+ Self::set_master_device(device_id, output_id)?;
+ Self::activate_clock_drift_compensation(device_id)?;
+ Self::workaround_for_airpod(device_id, input_id, output_id)?;
+
+ cleanup.dismiss();
+
+ cubeb_log!(
+ "Add devices input {} and output {} into an aggregate device {}",
+ input_id,
+ output_id,
+ device_id
+ );
+ Ok(Self {
+ plugin_id,
+ device_id,
+ input_id,
+ output_id,
+ })
+ }
+
+ pub fn get_device_id(&self) -> AudioObjectID {
+ self.device_id
+ }
+
+ // The following APIs are set to `pub` for testing purpose.
+ pub fn get_system_plugin_id() -> std::result::Result<AudioObjectID, Error> {
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioHardwarePropertyPlugInForBundleID,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut size: usize = 0;
+ let status =
+ audio_object_get_property_data_size(kAudioObjectSystemObject, &address, &mut size);
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+ assert_ne!(size, 0);
+
+ let mut plugin_id = kAudioObjectUnknown;
+ let mut in_bundle_ref = cfstringref_from_static_string("com.apple.audio.CoreAudio");
+ let mut translation_value = AudioValueTranslation {
+ mInputData: &mut in_bundle_ref as *mut CFStringRef as *mut c_void,
+ mInputDataSize: mem::size_of::<CFStringRef>() as u32,
+ mOutputData: &mut plugin_id as *mut AudioObjectID as *mut c_void,
+ mOutputDataSize: mem::size_of::<AudioObjectID>() as u32,
+ };
+ assert_eq!(size, mem::size_of_val(&translation_value));
+
+ let status = audio_object_get_property_data(
+ kAudioObjectSystemObject,
+ &address,
+ &mut size,
+ &mut translation_value,
+ );
+ unsafe {
+ CFRelease(in_bundle_ref as *const c_void);
+ }
+ if status == NO_ERR {
+ assert_ne!(plugin_id, kAudioObjectUnknown);
+ Ok(plugin_id)
+ } else {
+ Err(Error::from(status))
+ }
+ }
+
+ pub fn create_blank_device_sync(
+ plugin_id: AudioObjectID,
+ ) -> std::result::Result<AudioObjectID, Error> {
+ let waiting_time = Duration::new(5, 0);
+
+ let condvar_pair = Arc::new((Mutex::new(Vec::<AudioObjectID>::new()), Condvar::new()));
+ let mut cloned_condvar_pair = condvar_pair.clone();
+ let data_ptr = &mut cloned_condvar_pair as *mut Arc<(Mutex<Vec<AudioObjectID>>, Condvar)>;
+
+ let address = get_property_address(
+ Property::HardwareDevices,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+
+ let status = audio_object_add_property_listener(
+ kAudioObjectSystemObject,
+ &address,
+ devices_changed_callback,
+ data_ptr as *mut c_void,
+ );
+ assert_eq!(status, NO_ERR);
+
+ let _teardown = finally(|| {
+ let status = audio_object_remove_property_listener(
+ kAudioObjectSystemObject,
+ &address,
+ devices_changed_callback,
+ data_ptr as *mut c_void,
+ );
+ assert_eq!(status, NO_ERR);
+ });
+
+ let device = Self::create_blank_device(plugin_id)?;
+
+ // Wait until the aggregate is created.
+ let (lock, cvar) = &*condvar_pair;
+ let devices = lock.lock().unwrap();
+ if !devices.contains(&device) {
+ let (devs, timeout_res) = cvar.wait_timeout(devices, waiting_time).unwrap();
+ if timeout_res.timed_out() {
+ cubeb_log!(
+ "Time out for waiting the creation of aggregate device {}!",
+ device
+ );
+ }
+ if !devs.contains(&device) {
+ return Err(Error::from(waiting_time));
+ }
+ }
+
+ extern "C" fn devices_changed_callback(
+ id: AudioObjectID,
+ _number_of_addresses: u32,
+ _addresses: *const AudioObjectPropertyAddress,
+ data: *mut c_void,
+ ) -> OSStatus {
+ assert_eq!(id, kAudioObjectSystemObject);
+ let pair = unsafe { &mut *(data as *mut Arc<(Mutex<Vec<AudioObjectID>>, Condvar)>) };
+ let (lock, cvar) = &**pair;
+ let mut devices = lock.lock().unwrap();
+ *devices = audiounit_get_devices();
+ cvar.notify_one();
+ NO_ERR
+ }
+
+ Ok(device)
+ }
+
+ pub fn create_blank_device(
+ plugin_id: AudioObjectID,
+ ) -> std::result::Result<AudioObjectID, Error> {
+ assert_ne!(plugin_id, kAudioObjectUnknown);
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioPlugInCreateAggregateDevice,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut size: usize = 0;
+ let status = audio_object_get_property_data_size(plugin_id, &address, &mut size);
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+ assert_ne!(size, 0);
+
+ let sys_time = SystemTime::now();
+ let time_id = sys_time.duration_since(UNIX_EPOCH).unwrap().as_nanos();
+ let device_name = format!("{}_{}", PRIVATE_AGGREGATE_DEVICE_NAME, time_id);
+ let device_uid = format!("org.mozilla.{}", device_name);
+
+ let mut device_id = kAudioObjectUnknown;
+ let status = unsafe {
+ let device_dict = CFMutableDictRef::default();
+
+ // Set the name of the device.
+ let device_name = cfstringref_from_string(&device_name);
+ device_dict.add_value(
+ cfstringref_from_static_string(AGGREGATE_DEVICE_NAME_KEY) as *const c_void,
+ device_name as *const c_void,
+ );
+ CFRelease(device_name as *const c_void);
+
+ // Set the uid of the device.
+ let device_uid = cfstringref_from_string(&device_uid);
+ device_dict.add_value(
+ cfstringref_from_static_string(AGGREGATE_DEVICE_UID_KEY) as *const c_void,
+ device_uid as *const c_void,
+ );
+ CFRelease(device_uid as *const c_void);
+
+ // Make the device private to the process creating it.
+ let private_value: i32 = 1;
+ let device_private_key = CFNumberCreate(
+ kCFAllocatorDefault,
+ i64::from(kCFNumberIntType),
+ &private_value as *const i32 as *const c_void,
+ );
+ device_dict.add_value(
+ cfstringref_from_static_string(AGGREGATE_DEVICE_PRIVATE_KEY) as *const c_void,
+ device_private_key as *const c_void,
+ );
+ CFRelease(device_private_key as *const c_void);
+
+ // Set the device to a stacked aggregate (i.e. multi-output device).
+ let stacked_value: i32 = 0; // 1 for normal aggregate device.
+ let device_stacked_key = CFNumberCreate(
+ kCFAllocatorDefault,
+ i64::from(kCFNumberIntType),
+ &stacked_value as *const i32 as *const c_void,
+ );
+ device_dict.add_value(
+ cfstringref_from_static_string(AGGREGATE_DEVICE_STACKED_KEY) as *const c_void,
+ device_stacked_key as *const c_void,
+ );
+ CFRelease(device_stacked_key as *const c_void);
+
+ // This call will fire `audiounit_collection_changed_callback` indirectly!
+ audio_object_get_property_data_with_qualifier(
+ plugin_id,
+ &address,
+ mem::size_of_val(&device_dict),
+ &device_dict,
+ &mut size,
+ &mut device_id,
+ )
+ };
+ if status == NO_ERR {
+ assert_ne!(device_id, kAudioObjectUnknown);
+ Ok(device_id)
+ } else {
+ Err(Error::from(status))
+ }
+ }
+
+ pub fn set_sub_devices_sync(
+ device_id: AudioDeviceID,
+ input_id: AudioDeviceID,
+ output_id: AudioDeviceID,
+ ) -> std::result::Result<(), Error> {
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioAggregateDevicePropertyFullSubDeviceList,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let waiting_time = Duration::new(5, 0);
+
+ let condvar_pair = Arc::new((Mutex::new(AudioObjectID::default()), Condvar::new()));
+ let mut cloned_condvar_pair = condvar_pair.clone();
+ let data_ptr = &mut cloned_condvar_pair as *mut Arc<(Mutex<AudioObjectID>, Condvar)>;
+
+ let status = audio_object_add_property_listener(
+ device_id,
+ &address,
+ devices_changed_callback,
+ data_ptr as *mut c_void,
+ );
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+
+ let remove_listener = || -> OSStatus {
+ audio_object_remove_property_listener(
+ device_id,
+ &address,
+ devices_changed_callback,
+ data_ptr as *mut c_void,
+ )
+ };
+
+ Self::set_sub_devices(device_id, input_id, output_id)?;
+
+ // Wait until the sub devices are added.
+ let (lock, cvar) = &*condvar_pair;
+ let device = lock.lock().unwrap();
+ if *device != device_id {
+ let (dev, timeout_res) = cvar.wait_timeout(device, waiting_time).unwrap();
+ if timeout_res.timed_out() {
+ cubeb_log!(
+ "Time out for waiting for adding devices({}, {}) to aggregate device {}!",
+ input_id,
+ output_id,
+ device_id
+ );
+ }
+ if *dev != device_id {
+ let status = remove_listener();
+ // If the error is kAudioHardwareBadObjectError, it implies `device_id` is somehow
+ // dead, so its listener should receive nothing. It's ok to leave here.
+ assert!(status == NO_ERR || status == (kAudioHardwareBadObjectError as OSStatus));
+ // TODO: Destroy the aggregate device immediately if error is not
+ // kAudioHardwareBadObjectError. Otherwise the `devices_changed_callback` is able
+ // to touch the `cloned_condvar_pair` after it's freed.
+ return Err(Error::from(waiting_time));
+ }
+ }
+
+ extern "C" fn devices_changed_callback(
+ id: AudioObjectID,
+ _number_of_addresses: u32,
+ _addresses: *const AudioObjectPropertyAddress,
+ data: *mut c_void,
+ ) -> OSStatus {
+ let pair = unsafe { &mut *(data as *mut Arc<(Mutex<AudioObjectID>, Condvar)>) };
+ let (lock, cvar) = &**pair;
+ let mut device = lock.lock().unwrap();
+ *device = id;
+ cvar.notify_one();
+ NO_ERR
+ }
+
+ let status = remove_listener();
+ assert_eq!(status, NO_ERR);
+ Ok(())
+ }
+
+ pub fn set_sub_devices(
+ device_id: AudioDeviceID,
+ input_id: AudioDeviceID,
+ output_id: AudioDeviceID,
+ ) -> std::result::Result<(), Error> {
+ assert_ne!(device_id, kAudioObjectUnknown);
+ assert_ne!(input_id, kAudioObjectUnknown);
+ assert_ne!(output_id, kAudioObjectUnknown);
+ assert_ne!(input_id, output_id);
+
+ let output_sub_devices = Self::get_sub_devices(output_id)?;
+ let input_sub_devices = Self::get_sub_devices(input_id)?;
+
+ unsafe {
+ let sub_devices = CFArrayCreateMutable(ptr::null(), 0, &kCFTypeArrayCallBacks);
+ // The order of the items in the array is significant and is used to determine the order of the streams
+ // of the AudioAggregateDevice.
+ for device in output_sub_devices {
+ let uid = get_device_global_uid(device)?;
+ CFArrayAppendValue(sub_devices, uid.get_raw() as *const c_void);
+ }
+
+ for device in input_sub_devices {
+ let uid = get_device_global_uid(device)?;
+ CFArrayAppendValue(sub_devices, uid.get_raw() as *const c_void);
+ }
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioAggregateDevicePropertyFullSubDeviceList,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let size = mem::size_of::<CFMutableArrayRef>();
+ let status = audio_object_set_property_data(device_id, &address, size, &sub_devices);
+ CFRelease(sub_devices as *const c_void);
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(Error::from(status))
+ }
+ }
+ }
+
+ pub fn get_sub_devices(
+ device_id: AudioDeviceID,
+ ) -> std::result::Result<Vec<AudioObjectID>, Error> {
+ assert_ne!(device_id, kAudioObjectUnknown);
+
+ let mut sub_devices = Vec::new();
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioAggregateDevicePropertyActiveSubDeviceList,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+ let mut size: usize = 0;
+ let status = audio_object_get_property_data_size(device_id, &address, &mut size);
+
+ if status == kAudioHardwareUnknownPropertyError as OSStatus {
+ // Return a vector containing the device itself if the device has no sub devices.
+ sub_devices.push(device_id);
+ return Ok(sub_devices);
+ } else if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+
+ assert_ne!(size, 0);
+
+ let count = size / mem::size_of::<AudioObjectID>();
+ sub_devices = allocate_array(count);
+ let status = audio_object_get_property_data(
+ device_id,
+ &address,
+ &mut size,
+ sub_devices.as_mut_ptr(),
+ );
+
+ if status == NO_ERR {
+ Ok(sub_devices)
+ } else {
+ Err(Error::from(status))
+ }
+ }
+
+ pub fn set_master_device(
+ device_id: AudioDeviceID,
+ primary_id: AudioDeviceID,
+ ) -> std::result::Result<(), Error> {
+ assert_ne!(device_id, kAudioObjectUnknown);
+ assert_ne!(primary_id, kAudioObjectUnknown);
+
+ cubeb_log!(
+ "Set master device of the aggregate device {} to device {}",
+ device_id,
+ primary_id
+ );
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioAggregateDevicePropertyMasterSubDevice,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ // Master become the 1st sub device of the primary device
+ let output_sub_devices = Self::get_sub_devices(primary_id)?;
+ assert!(!output_sub_devices.is_empty());
+ let master_sub_device_uid = get_device_global_uid(output_sub_devices[0]).unwrap();
+ let master_sub_device = master_sub_device_uid.get_raw();
+ let size = mem::size_of::<CFStringRef>();
+ let status = audio_object_set_property_data(device_id, &address, size, &master_sub_device);
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(Error::from(status))
+ }
+ }
+
+ pub fn activate_clock_drift_compensation(
+ device_id: AudioObjectID,
+ ) -> std::result::Result<(), Error> {
+ assert_ne!(device_id, kAudioObjectUnknown);
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioObjectPropertyOwnedObjects,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let qualifier_data_size = mem::size_of::<AudioObjectID>();
+ let class_id: AudioClassID = kAudioSubDeviceClassID;
+ let qualifier_data = &class_id;
+
+ let mut size: usize = 0;
+ let status = audio_object_get_property_data_size_with_qualifier(
+ device_id,
+ &address,
+ qualifier_data_size,
+ qualifier_data,
+ &mut size,
+ );
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+ assert!(size > 0);
+ let subdevices_num = size / mem::size_of::<AudioObjectID>();
+ if subdevices_num < 2 {
+ cubeb_log!(
+ "Aggregate-device {} contains {} sub-devices only.\
+ We should have at least one input and one output device.",
+ device_id,
+ subdevices_num
+ );
+ return Err(Error::LessThan2Devices(subdevices_num));
+ }
+ let mut sub_devices: Vec<AudioObjectID> = allocate_array(subdevices_num);
+ let status = audio_object_get_property_data_with_qualifier(
+ device_id,
+ &address,
+ qualifier_data_size,
+ qualifier_data,
+ &mut size,
+ sub_devices.as_mut_ptr(),
+ );
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioSubDevicePropertyDriftCompensation,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ // Start from the second device since the first is the master clock
+ for device in &sub_devices[1..] {
+ let status = audio_object_set_property_data(
+ *device,
+ &address,
+ mem::size_of::<u32>(),
+ &DRIFT_COMPENSATION,
+ );
+ if status != NO_ERR {
+ cubeb_log!(
+ "Failed to set drift compensation for {}. Ignore it.",
+ device
+ );
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn destroy_device(
+ plugin_id: AudioObjectID,
+ mut device_id: AudioDeviceID,
+ ) -> std::result::Result<(), Error> {
+ assert_ne!(plugin_id, kAudioObjectUnknown);
+ assert_ne!(device_id, kAudioObjectUnknown);
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioPlugInDestroyAggregateDevice,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut size: usize = 0;
+ let status = audio_object_get_property_data_size(plugin_id, &address, &mut size);
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+ assert!(size > 0);
+
+ let status = audio_object_get_property_data(plugin_id, &address, &mut size, &mut device_id);
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(Error::from(status))
+ }
+ }
+
+ pub fn workaround_for_airpod(
+ device_id: AudioDeviceID,
+ input_id: AudioDeviceID,
+ output_id: AudioDeviceID,
+ ) -> std::result::Result<(), Error> {
+ assert_ne!(device_id, kAudioObjectUnknown);
+ assert_ne!(input_id, kAudioObjectUnknown);
+ assert_ne!(output_id, kAudioObjectUnknown);
+ assert_ne!(input_id, output_id);
+
+ let label = get_device_label(input_id, DeviceType::INPUT)?;
+ let input_label = label.into_string();
+
+ let label = get_device_label(output_id, DeviceType::OUTPUT)?;
+ let output_label = label.into_string();
+
+ if input_label.contains("AirPods") && output_label.contains("AirPods") {
+ let input_rate =
+ get_device_sample_rate(input_id, DeviceType::INPUT | DeviceType::OUTPUT)?;
+ cubeb_log!(
+ "The nominal rate of the input device {}: {}",
+ input_id,
+ input_rate
+ );
+
+ let output_rate =
+ match get_device_sample_rate(output_id, DeviceType::INPUT | DeviceType::OUTPUT) {
+ Ok(rate) => format!("{}", rate),
+ Err(e) => format!("Error {}", e),
+ };
+ cubeb_log!(
+ "The nominal rate of the output device {}: {}",
+ output_id,
+ output_rate
+ );
+
+ let addr = AudioObjectPropertyAddress {
+ mSelector: kAudioDevicePropertyNominalSampleRate,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let status = audio_object_set_property_data(
+ device_id,
+ &addr,
+ mem::size_of::<f64>(),
+ &input_rate,
+ );
+ if status != NO_ERR {
+ return Err(Error::from(status));
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Default for AggregateDevice {
+ fn default() -> Self {
+ Self {
+ plugin_id: kAudioObjectUnknown,
+ device_id: kAudioObjectUnknown,
+ input_id: kAudioObjectUnknown,
+ output_id: kAudioObjectUnknown,
+ }
+ }
+}
+
+impl Drop for AggregateDevice {
+ fn drop(&mut self) {
+ if self.plugin_id != kAudioObjectUnknown && self.device_id != kAudioObjectUnknown {
+ if let Err(r) = Self::destroy_device(self.plugin_id, self.device_id) {
+ cubeb_log!(
+ "Failed to destroyed aggregate device {}. Error: {}",
+ self.device_id,
+ r
+ );
+ } else {
+ cubeb_log!(
+ "Destroyed aggregate device {} (input {}, output {})",
+ self.device_id,
+ self.input_id,
+ self.output_id
+ );
+ }
+ }
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/auto_release.rs b/third_party/rust/cubeb-coreaudio/src/backend/auto_release.rs
new file mode 100644
index 0000000000..97e091a497
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/auto_release.rs
@@ -0,0 +1,77 @@
+use std::fmt;
+
+pub struct AutoRelease<T> {
+ ptr: *mut T,
+ release_func: unsafe extern "C" fn(*mut T),
+}
+
+impl<T> AutoRelease<T> {
+ pub fn new(ptr: *mut T, release_func: unsafe extern "C" fn(*mut T)) -> Self {
+ Self { ptr, release_func }
+ }
+
+ pub fn reset(&mut self, ptr: *mut T) {
+ self.release();
+ self.ptr = ptr;
+ }
+
+ pub fn as_ref(&self) -> &T {
+ assert!(!self.ptr.is_null());
+ unsafe { &*self.ptr }
+ }
+
+ pub fn as_mut(&mut self) -> &mut T {
+ assert!(!self.ptr.is_null());
+ unsafe { &mut *self.ptr }
+ }
+
+ pub fn as_ptr(&self) -> *const T {
+ self.ptr
+ }
+
+ fn release(&self) {
+ if !self.ptr.is_null() {
+ unsafe {
+ (self.release_func)(self.ptr);
+ }
+ }
+ }
+}
+
+impl<T> Drop for AutoRelease<T> {
+ fn drop(&mut self) {
+ self.release();
+ }
+}
+
+// Explicit Debug impl to work for the type T
+// that doesn't implement Debug trait.
+impl<T> fmt::Debug for AutoRelease<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("AutoRelease")
+ .field("ptr", &self.ptr)
+ .field("release_func", &self.release_func)
+ .finish()
+ }
+}
+
+#[test]
+fn test_auto_release() {
+ use std::mem;
+ use std::ptr;
+
+ unsafe extern "C" fn allocate() -> *mut libc::c_void {
+ // println!("Allocate!");
+ libc::calloc(1, mem::size_of::<u32>())
+ }
+
+ unsafe extern "C" fn deallocate(ptr: *mut libc::c_void) {
+ // println!("Deallocate!");
+ libc::free(ptr);
+ }
+
+ let mut auto_release = AutoRelease::new(ptr::null_mut(), deallocate);
+ let ptr = unsafe { allocate() };
+ auto_release.reset(ptr);
+ assert_eq!(auto_release.as_ptr(), ptr);
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/buffer_manager.rs b/third_party/rust/cubeb-coreaudio/src/backend/buffer_manager.rs
new file mode 100644
index 0000000000..6f1c299bfb
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/buffer_manager.rs
@@ -0,0 +1,355 @@
+use std::cmp::Ordering;
+use std::fmt;
+use std::os::raw::c_void;
+use std::slice;
+
+use cubeb_backend::SampleFormat;
+
+use super::ringbuf::RingBuffer;
+
+use self::LinearBuffer::*;
+use self::RingBufferConsumer::*;
+use self::RingBufferProducer::*;
+
+// Shuffles the data so that the first n channels of the interleaved buffer are overwritten by
+// the remaining channels.
+fn drop_first_n_channels_in_place<T: Copy>(
+ n: usize,
+ data: &mut [T],
+ frame_count: usize,
+ channel_count: usize,
+) {
+ // This function works if the numbers are equal but it's not particularly useful, so we hope to
+ // catch issues by checking using > and not >= here.
+ assert!(channel_count > n);
+ let mut read_idx: usize = 0;
+ let mut write_idx: usize = 0;
+
+ let channel_to_keep = channel_count - n;
+ for _ in 0..frame_count {
+ read_idx += n;
+ for _ in 0..channel_to_keep {
+ data[write_idx] = data[read_idx];
+ read_idx += 1;
+ write_idx += 1;
+ }
+ }
+}
+
+// It can be that the a stereo microphone is in use, but the user asked for mono input. In this
+// particular case, downmix the stereo pair into a mono channel. In all other cases, simply drop
+// the remaining channels before appending to the ringbuffer, becauses there is no right or wrong
+// way to do this, unlike with the output side, where proper channel matrixing can be done.
+// Return the number of valid samples in the buffer.
+fn remix_or_drop_channels<T: Copy + std::ops::Add<Output = T>>(
+ input_channels: usize,
+ output_channels: usize,
+ data: &mut [T],
+ frame_count: usize,
+) -> usize {
+ assert!(input_channels >= output_channels);
+ // Nothing to do, just return
+ if input_channels == output_channels {
+ return output_channels * frame_count;
+ }
+ // Simple stereo downmix
+ if input_channels == 2 && output_channels == 1 {
+ let mut read_idx = 0;
+ for (write_idx, _) in (0..frame_count).enumerate() {
+ data[write_idx] = data[read_idx] + data[read_idx + 1];
+ read_idx += 2;
+ }
+ return output_channels * frame_count;
+ }
+ // Drop excess channels
+ let mut read_idx = 0;
+ let mut write_idx = 0;
+ let channel_dropped_count = input_channels - output_channels;
+ for _ in 0..frame_count {
+ for _ in 0..output_channels {
+ data[write_idx] = data[read_idx];
+ write_idx += 1;
+ read_idx += 1;
+ }
+ read_idx += channel_dropped_count;
+ }
+ output_channels * frame_count
+}
+
+fn process_data<T: Copy + std::ops::Add<Output = T>>(
+ data: *mut c_void,
+ frame_count: usize,
+ input_channel_count: usize,
+ input_channels_to_ignore: usize,
+ output_channel_count: usize,
+) -> &'static [T] {
+ assert!(
+ input_channels_to_ignore == 0
+ || input_channel_count >= input_channels_to_ignore + output_channel_count
+ );
+ let input_slice = unsafe {
+ slice::from_raw_parts_mut::<T>(data as *mut T, frame_count * input_channel_count)
+ };
+ match input_channel_count.cmp(&output_channel_count) {
+ Ordering::Equal => {
+ assert_eq!(input_channels_to_ignore, 0);
+ input_slice
+ }
+ Ordering::Greater => {
+ if input_channels_to_ignore > 0 {
+ drop_first_n_channels_in_place(
+ input_channels_to_ignore,
+ input_slice,
+ frame_count,
+ input_channel_count,
+ );
+ }
+ let new_count_remixed = remix_or_drop_channels(
+ input_channel_count - input_channels_to_ignore,
+ output_channel_count,
+ input_slice,
+ frame_count,
+ );
+ unsafe { slice::from_raw_parts_mut::<T>(data as *mut T, new_count_remixed) }
+ }
+ Ordering::Less => {
+ assert!(input_channel_count < output_channel_count);
+ // Upmix happens on pull.
+ input_slice
+ }
+ }
+}
+
+pub enum RingBufferConsumer {
+ IntegerRingBufferConsumer(ringbuf::Consumer<i16>),
+ FloatRingBufferConsumer(ringbuf::Consumer<f32>),
+}
+
+pub enum RingBufferProducer {
+ IntegerRingBufferProducer(ringbuf::Producer<i16>),
+ FloatRingBufferProducer(ringbuf::Producer<f32>),
+}
+
+pub enum LinearBuffer {
+ IntegerLinearBuffer(Vec<i16>),
+ FloatLinearBuffer(Vec<f32>),
+}
+
+pub struct BufferManager {
+ consumer: RingBufferConsumer,
+ producer: RingBufferProducer,
+ linear_buffer: LinearBuffer,
+ // The number of channels in the interleaved data given to push_data
+ input_channel_count: usize,
+ // The number of channels that needs to be skipped in the beginning of input_channel_count
+ input_channels_to_ignore: usize,
+ // The number of channels we actually needs, which is also the channel count of the
+ // processed data stored in the internal ring buffer.
+ output_channel_count: usize,
+}
+
+impl BufferManager {
+ pub fn new(
+ format: SampleFormat,
+ buffer_size_frames: usize,
+ input_channel_count: usize,
+ input_channels_to_ignore: usize,
+ output_channel_count: usize,
+ ) -> Self {
+ assert!(
+ (input_channels_to_ignore == 0 && input_channel_count == 1)
+ || input_channel_count >= input_channels_to_ignore + output_channel_count
+ );
+ // 8 times the expected callback size, to handle the input callback being caled multiple
+ // times in a row correctly.
+ let buffer_element_count = output_channel_count * buffer_size_frames * 8;
+ match format {
+ SampleFormat::S16LE | SampleFormat::S16BE | SampleFormat::S16NE => {
+ let ring = RingBuffer::<i16>::new(buffer_element_count);
+ let (prod, cons) = ring.split();
+ Self {
+ producer: IntegerRingBufferProducer(prod),
+ consumer: IntegerRingBufferConsumer(cons),
+ linear_buffer: IntegerLinearBuffer(Vec::<i16>::with_capacity(
+ buffer_element_count,
+ )),
+ input_channel_count,
+ input_channels_to_ignore,
+ output_channel_count,
+ }
+ }
+ SampleFormat::Float32LE | SampleFormat::Float32BE | SampleFormat::Float32NE => {
+ let ring = RingBuffer::<f32>::new(buffer_element_count);
+ let (prod, cons) = ring.split();
+ Self {
+ producer: FloatRingBufferProducer(prod),
+ consumer: FloatRingBufferConsumer(cons),
+ linear_buffer: FloatLinearBuffer(Vec::<f32>::with_capacity(
+ buffer_element_count,
+ )),
+ input_channel_count,
+ input_channels_to_ignore,
+ output_channel_count,
+ }
+ }
+ }
+ }
+ fn stored_channel_count(&self) -> usize {
+ if self.output_channel_count > self.input_channel_count {
+ // This case allows upmix from mono on pull.
+ self.input_channel_count
+ } else {
+ // Other cases only downmix on push.
+ self.output_channel_count
+ }
+ }
+ fn input_channel_count(&self) -> usize {
+ self.input_channel_count
+ }
+ fn input_channels_to_ignore(&self) -> usize {
+ self.input_channels_to_ignore
+ }
+ fn output_channel_count(&self) -> usize {
+ self.output_channel_count
+ }
+ pub fn push_data(&mut self, data: *mut c_void, frame_count: usize) {
+ let to_push = frame_count * self.stored_channel_count();
+ let input_channel_count = self.input_channel_count();
+ let input_channels_to_ignore = self.input_channels_to_ignore();
+ let output_channel_count = self.output_channel_count();
+ let pushed = match &mut self.producer {
+ RingBufferProducer::FloatRingBufferProducer(p) => {
+ let processed_input = process_data(
+ data,
+ frame_count,
+ input_channel_count,
+ input_channels_to_ignore,
+ output_channel_count,
+ );
+ p.push_slice(processed_input)
+ }
+ RingBufferProducer::IntegerRingBufferProducer(p) => {
+ let processed_input = process_data(
+ data,
+ frame_count,
+ input_channel_count,
+ input_channels_to_ignore,
+ output_channel_count,
+ );
+ p.push_slice(processed_input)
+ }
+ };
+ assert!(pushed <= to_push);
+ if pushed != to_push {
+ cubeb_alog!(
+ "Input ringbuffer full, could only push {} instead of {}",
+ pushed,
+ to_push
+ );
+ }
+ }
+ fn pull_data(&mut self, data: *mut c_void, needed_samples: usize) {
+ assert_eq!(needed_samples % self.output_channel_count(), 0);
+ let needed_frames = needed_samples / self.output_channel_count();
+ let to_pull = needed_frames * self.stored_channel_count();
+ match &mut self.consumer {
+ IntegerRingBufferConsumer(p) => {
+ let input: &mut [i16] =
+ unsafe { slice::from_raw_parts_mut::<i16>(data as *mut i16, needed_samples) };
+ let pulled = p.pop_slice(input);
+ if pulled < to_pull {
+ cubeb_alog!(
+ "Underrun during input data pull: (needed: {}, available: {})",
+ to_pull,
+ pulled
+ );
+ for i in 0..(to_pull - pulled) {
+ input[pulled + i] = 0;
+ }
+ }
+ if needed_samples > to_pull {
+ // Mono upmix. This can happen with voice processing.
+ let mut write_idx = needed_samples;
+ for read_idx in (0..to_pull).rev() {
+ write_idx -= self.output_channel_count();
+ for offset in 0..self.output_channel_count() {
+ input[write_idx + offset] = input[read_idx];
+ }
+ }
+ }
+ }
+ FloatRingBufferConsumer(p) => {
+ let input: &mut [f32] =
+ unsafe { slice::from_raw_parts_mut::<f32>(data as *mut f32, needed_samples) };
+ let pulled = p.pop_slice(input);
+ if pulled < to_pull {
+ cubeb_alog!(
+ "Underrun during input data pull: (needed: {}, available: {})",
+ to_pull,
+ pulled
+ );
+ for i in 0..(to_pull - pulled) {
+ input[pulled + i] = 0.0;
+ }
+ }
+ if needed_samples > to_pull {
+ // Mono upmix. This can happen with voice processing.
+ let mut write_idx = needed_samples;
+ for read_idx in (0..to_pull).rev() {
+ write_idx -= self.output_channel_count();
+ for offset in 0..self.output_channel_count() {
+ input[write_idx + offset] = input[read_idx];
+ }
+ }
+ }
+ }
+ }
+ }
+ pub fn get_linear_data(&mut self, frame_count: usize) -> *mut c_void {
+ let output_sample_count = frame_count * self.output_channel_count();
+ let p = match &mut self.linear_buffer {
+ LinearBuffer::IntegerLinearBuffer(b) => {
+ b.resize(output_sample_count, 0);
+ b.as_mut_ptr() as *mut c_void
+ }
+ LinearBuffer::FloatLinearBuffer(b) => {
+ b.resize(output_sample_count, 0.);
+ b.as_mut_ptr() as *mut c_void
+ }
+ };
+ self.pull_data(p, output_sample_count);
+
+ p
+ }
+ pub fn available_frames(&self) -> usize {
+ assert_ne!(self.stored_channel_count(), 0);
+ let stored_samples = match &self.consumer {
+ IntegerRingBufferConsumer(p) => p.len(),
+ FloatRingBufferConsumer(p) => p.len(),
+ };
+ stored_samples / self.stored_channel_count()
+ }
+ pub fn trim(&mut self, final_frame_count: usize) {
+ let final_sample_count = final_frame_count * self.stored_channel_count();
+ match &mut self.consumer {
+ IntegerRingBufferConsumer(c) => {
+ let available = c.len();
+ assert!(available >= final_sample_count);
+ let to_pop = available - final_sample_count;
+ c.discard(to_pop);
+ }
+ FloatRingBufferConsumer(c) => {
+ let available = c.len();
+ assert!(available >= final_sample_count);
+ let to_pop = available - final_sample_count;
+ c.discard(to_pop);
+ }
+ }
+ }
+}
+
+impl fmt::Debug for BufferManager {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/device_property.rs b/third_party/rust/cubeb-coreaudio/src/backend/device_property.rs
new file mode 100644
index 0000000000..b9a50c6576
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/device_property.rs
@@ -0,0 +1,360 @@
+use super::*;
+
+pub fn get_device_uid(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<StringRef, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceUID, devtype);
+ let mut size = mem::size_of::<CFStringRef>();
+ let mut uid: CFStringRef = ptr::null();
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut uid);
+ if err == NO_ERR {
+ Ok(StringRef::new(uid as _))
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_model_uid(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<StringRef, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::ModelUID, devtype);
+ let mut size = mem::size_of::<CFStringRef>();
+ let mut uid: CFStringRef = ptr::null();
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut uid);
+ if err == NO_ERR {
+ Ok(StringRef::new(uid as _))
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_transport_type(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::TransportType, devtype);
+ let mut size = mem::size_of::<u32>();
+ let mut transport: u32 = 0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut transport);
+ if err == NO_ERR {
+ Ok(transport)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_source(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceSource, devtype);
+ let mut size = mem::size_of::<u32>();
+ let mut source: u32 = 0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut source);
+ if err == NO_ERR {
+ Ok(source)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_source_name(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<StringRef, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let mut source: u32 = get_device_source(id, devtype)?;
+ let address = get_property_address(Property::DeviceSourceName, devtype);
+ let mut size = mem::size_of::<AudioValueTranslation>();
+ let mut name: CFStringRef = ptr::null();
+ let mut trl = AudioValueTranslation {
+ mInputData: &mut source as *mut u32 as *mut c_void,
+ mInputDataSize: mem::size_of::<u32>() as u32,
+ mOutputData: &mut name as *mut CFStringRef as *mut c_void,
+ mOutputDataSize: mem::size_of::<CFStringRef>() as u32,
+ };
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut trl);
+ if err == NO_ERR {
+ Ok(StringRef::new(name as _))
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_name(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<StringRef, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceName, devtype);
+ let mut size = mem::size_of::<CFStringRef>();
+ let mut name: CFStringRef = ptr::null();
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut name);
+ if err == NO_ERR {
+ Ok(StringRef::new(name as _))
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_manufacturer(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<StringRef, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceManufacturer, devtype);
+ let mut size = mem::size_of::<CFStringRef>();
+ let mut manufacturer: CFStringRef = ptr::null();
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut manufacturer);
+ if err == NO_ERR {
+ Ok(StringRef::new(manufacturer as _))
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_buffer_frame_size_range(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<AudioValueRange, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceBufferFrameSizeRange, devtype);
+ let mut size = mem::size_of::<AudioValueRange>();
+ let mut range = AudioValueRange::default();
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut range);
+ if err == NO_ERR {
+ Ok(range)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_latency(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceLatency, devtype);
+ let mut size = mem::size_of::<u32>();
+ let mut latency: u32 = 0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut latency);
+ if err == NO_ERR {
+ Ok(latency)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_streams(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<Vec<AudioStreamID>, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceStreams, devtype);
+
+ let mut size: usize = 0;
+ let err = audio_object_get_property_data_size(id, &address, &mut size);
+ if err != NO_ERR {
+ return Err(err);
+ }
+
+ let mut streams: Vec<AudioObjectID> = allocate_array_by_size(size);
+ let err = audio_object_get_property_data(id, &address, &mut size, streams.as_mut_ptr());
+ if err == NO_ERR {
+ Ok(streams)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_device_sample_rate(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<f64, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceSampleRate, devtype);
+ let mut size = mem::size_of::<f64>();
+ let mut rate: f64 = 0.0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut rate);
+ if err == NO_ERR {
+ Ok(rate)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_ranges_of_device_sample_rate(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<Vec<AudioValueRange>, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::DeviceSampleRates, devtype);
+
+ let mut size: usize = 0;
+ let err = audio_object_get_property_data_size(id, &address, &mut size);
+ if err != NO_ERR {
+ return Err(err);
+ }
+
+ let mut ranges: Vec<AudioValueRange> = allocate_array_by_size(size);
+ let err = audio_object_get_property_data(id, &address, &mut size, ranges.as_mut_ptr());
+ if err == NO_ERR {
+ Ok(ranges)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_stream_latency(id: AudioStreamID) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(
+ Property::StreamLatency,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+ let mut size = mem::size_of::<u32>();
+ let mut latency: u32 = 0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut latency);
+ if err == NO_ERR {
+ Ok(latency)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_stream_terminal_type(id: AudioStreamID) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(
+ Property::StreamTerminalType,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+ let mut size = mem::size_of::<u32>();
+ let mut terminal_type: u32 = 0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut terminal_type);
+ if err == NO_ERR {
+ Ok(terminal_type)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_stream_virtual_format(
+ id: AudioStreamID,
+) -> std::result::Result<AudioStreamBasicDescription, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(
+ Property::StreamVirtualFormat,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+ let mut size = mem::size_of::<AudioStreamBasicDescription>();
+ let mut format = AudioStreamBasicDescription::default();
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut format);
+ if err == NO_ERR {
+ Ok(format)
+ } else {
+ Err(err)
+ }
+}
+
+pub fn get_clock_domain(
+ id: AudioStreamID,
+ devtype: DeviceType,
+) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = get_property_address(Property::ClockDomain, devtype);
+ let mut size = mem::size_of::<u32>();
+ let mut clock_domain: u32 = 0;
+ let err = audio_object_get_property_data(id, &address, &mut size, &mut clock_domain);
+ if err == NO_ERR {
+ Ok(clock_domain)
+ } else {
+ Err(err)
+ }
+}
+
+pub enum Property {
+ DeviceBufferFrameSizeRange,
+ DeviceIsAlive,
+ DeviceLatency,
+ DeviceManufacturer,
+ DeviceName,
+ DeviceSampleRate,
+ DeviceSampleRates,
+ DeviceSource,
+ DeviceSourceName,
+ DeviceStreams,
+ DeviceUID,
+ HardwareDefaultInputDevice,
+ HardwareDefaultOutputDevice,
+ HardwareDevices,
+ ModelUID,
+ StreamLatency,
+ StreamTerminalType,
+ StreamVirtualFormat,
+ TransportType,
+ ClockDomain,
+}
+
+impl From<Property> for AudioObjectPropertySelector {
+ fn from(p: Property) -> Self {
+ match p {
+ Property::DeviceBufferFrameSizeRange => kAudioDevicePropertyBufferFrameSizeRange,
+ Property::DeviceIsAlive => kAudioDevicePropertyDeviceIsAlive,
+ Property::DeviceLatency => kAudioDevicePropertyLatency,
+ Property::DeviceManufacturer => kAudioObjectPropertyManufacturer,
+ Property::DeviceName => kAudioObjectPropertyName,
+ Property::DeviceSampleRate => kAudioDevicePropertyNominalSampleRate,
+ Property::DeviceSampleRates => kAudioDevicePropertyAvailableNominalSampleRates,
+ Property::DeviceSource => kAudioDevicePropertyDataSource,
+ Property::DeviceSourceName => kAudioDevicePropertyDataSourceNameForIDCFString,
+ Property::DeviceStreams => kAudioDevicePropertyStreams,
+ Property::DeviceUID => kAudioDevicePropertyDeviceUID,
+ Property::HardwareDefaultInputDevice => kAudioHardwarePropertyDefaultInputDevice,
+ Property::HardwareDefaultOutputDevice => kAudioHardwarePropertyDefaultOutputDevice,
+ Property::HardwareDevices => kAudioHardwarePropertyDevices,
+ Property::ModelUID => kAudioDevicePropertyModelUID,
+ Property::StreamLatency => kAudioStreamPropertyLatency,
+ Property::StreamTerminalType => kAudioStreamPropertyTerminalType,
+ Property::StreamVirtualFormat => kAudioStreamPropertyVirtualFormat,
+ Property::TransportType => kAudioDevicePropertyTransportType,
+ Property::ClockDomain => kAudioDevicePropertyClockDomain,
+ }
+ }
+}
+
+pub fn get_property_address(property: Property, devtype: DeviceType) -> AudioObjectPropertyAddress {
+ const GLOBAL: ffi::cubeb_device_type =
+ ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT;
+ let scope = match devtype.bits() {
+ ffi::CUBEB_DEVICE_TYPE_INPUT => kAudioDevicePropertyScopeInput,
+ ffi::CUBEB_DEVICE_TYPE_OUTPUT => kAudioDevicePropertyScopeOutput,
+ GLOBAL => kAudioObjectPropertyScopeGlobal,
+ _ => panic!("Invalid type"),
+ };
+ AudioObjectPropertyAddress {
+ mSelector: property.into(),
+ mScope: scope,
+ mElement: kAudioObjectPropertyElementMaster,
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/mixer.rs b/third_party/rust/cubeb-coreaudio/src/backend/mixer.rs
new file mode 100644
index 0000000000..a4f63926b1
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/mixer.rs
@@ -0,0 +1,492 @@
+use cubeb_backend::{ChannelLayout, SampleFormat};
+use std::mem;
+use std::os::raw::{c_int, c_void};
+
+extern crate audio_mixer;
+pub use self::audio_mixer::Channel;
+
+const CHANNEL_OERDER: [audio_mixer::Channel; audio_mixer::Channel::count()] = [
+ audio_mixer::Channel::FrontLeft,
+ audio_mixer::Channel::FrontRight,
+ audio_mixer::Channel::FrontCenter,
+ audio_mixer::Channel::LowFrequency,
+ audio_mixer::Channel::BackLeft,
+ audio_mixer::Channel::BackRight,
+ audio_mixer::Channel::FrontLeftOfCenter,
+ audio_mixer::Channel::FrontRightOfCenter,
+ audio_mixer::Channel::BackCenter,
+ audio_mixer::Channel::SideLeft,
+ audio_mixer::Channel::SideRight,
+ audio_mixer::Channel::TopCenter,
+ audio_mixer::Channel::TopFrontLeft,
+ audio_mixer::Channel::TopFrontCenter,
+ audio_mixer::Channel::TopFrontRight,
+ audio_mixer::Channel::TopBackLeft,
+ audio_mixer::Channel::TopBackCenter,
+ audio_mixer::Channel::TopBackRight,
+ audio_mixer::Channel::Silence,
+];
+
+pub fn get_channel_order(channel_layout: ChannelLayout) -> Vec<audio_mixer::Channel> {
+ let mut map = channel_layout.bits();
+ let mut order = Vec::new();
+ let mut channel_index: usize = 0;
+ while map != 0 {
+ if map & 1 == 1 {
+ order.push(CHANNEL_OERDER[channel_index]);
+ }
+ map >>= 1;
+ channel_index += 1;
+ }
+ order
+}
+
+fn get_default_channel_order(channel_count: usize) -> Vec<audio_mixer::Channel> {
+ assert_ne!(channel_count, 0);
+ let mut channels = Vec::with_capacity(channel_count);
+ for channel in CHANNEL_OERDER.iter().take(channel_count) {
+ channels.push(*channel);
+ }
+
+ if channel_count > CHANNEL_OERDER.len() {
+ channels.extend(vec![
+ audio_mixer::Channel::Silence;
+ channel_count - CHANNEL_OERDER.len()
+ ]);
+ }
+
+ channels
+}
+
+#[derive(Debug)]
+enum MixerType {
+ IntegerMixer(audio_mixer::Mixer<i16>),
+ FloatMixer(audio_mixer::Mixer<f32>),
+}
+
+impl MixerType {
+ fn new(
+ format: SampleFormat,
+ input_channels: &[audio_mixer::Channel],
+ output_channels: &[audio_mixer::Channel],
+ ) -> Self {
+ match format {
+ SampleFormat::S16LE | SampleFormat::S16BE | SampleFormat::S16NE => {
+ cubeb_log!("Create an integer type(i16) mixer");
+ Self::IntegerMixer(audio_mixer::Mixer::<i16>::new(
+ input_channels,
+ output_channels,
+ ))
+ }
+ SampleFormat::Float32LE | SampleFormat::Float32BE | SampleFormat::Float32NE => {
+ cubeb_log!("Create an floating type(f32) mixer");
+ Self::FloatMixer(audio_mixer::Mixer::<f32>::new(
+ input_channels,
+ output_channels,
+ ))
+ }
+ }
+ }
+
+ fn sample_size(&self) -> usize {
+ match self {
+ MixerType::IntegerMixer(_) => mem::size_of::<i16>(),
+ MixerType::FloatMixer(_) => mem::size_of::<f32>(),
+ }
+ }
+
+ fn input_channels(&self) -> &[Channel] {
+ match self {
+ MixerType::IntegerMixer(m) => m.input_channels(),
+ MixerType::FloatMixer(m) => m.input_channels(),
+ }
+ }
+
+ fn output_channels(&self) -> &[Channel] {
+ match self {
+ MixerType::IntegerMixer(m) => m.output_channels(),
+ MixerType::FloatMixer(m) => m.output_channels(),
+ }
+ }
+
+ fn mix(
+ &self,
+ input_buffer_ptr: *const (),
+ input_buffer_size: usize,
+ output_buffer_ptr: *mut (),
+ output_buffer_size: usize,
+ frames: usize,
+ ) {
+ use std::slice;
+
+ // Check input buffer size.
+ let size_needed = frames * self.input_channels().len() * self.sample_size();
+ assert!(input_buffer_size >= size_needed);
+ // Check output buffer size.
+ let size_needed = frames * self.output_channels().len() * self.sample_size();
+ assert!(output_buffer_size >= size_needed);
+
+ match self {
+ MixerType::IntegerMixer(m) => {
+ let in_buf_ptr = input_buffer_ptr as *const i16;
+ let out_buf_ptr = output_buffer_ptr as *mut i16;
+ let input_buffer = unsafe {
+ slice::from_raw_parts(in_buf_ptr, frames * self.input_channels().len())
+ };
+ let output_buffer = unsafe {
+ slice::from_raw_parts_mut(out_buf_ptr, frames * self.output_channels().len())
+ };
+ let mut in_buf = input_buffer.chunks(self.input_channels().len());
+ let mut out_buf = output_buffer.chunks_mut(self.output_channels().len());
+ for _ in 0..frames {
+ m.mix(in_buf.next().unwrap(), out_buf.next().unwrap());
+ }
+ }
+ MixerType::FloatMixer(m) => {
+ let in_buf_ptr = input_buffer_ptr as *const f32;
+ let out_buf_ptr = output_buffer_ptr as *mut f32;
+ let input_buffer = unsafe {
+ slice::from_raw_parts(in_buf_ptr, frames * self.input_channels().len())
+ };
+ let output_buffer = unsafe {
+ slice::from_raw_parts_mut(out_buf_ptr, frames * self.output_channels().len())
+ };
+ let mut in_buf = input_buffer.chunks(self.input_channels().len());
+ let mut out_buf = output_buffer.chunks_mut(self.output_channels().len());
+ for _ in 0..frames {
+ m.mix(in_buf.next().unwrap(), out_buf.next().unwrap());
+ }
+ }
+ };
+ }
+}
+
+#[derive(Debug)]
+pub struct Mixer {
+ mixer: MixerType,
+ // Only accessed from callback thread.
+ buffer: Vec<u8>,
+}
+
+impl Mixer {
+ pub fn new(
+ format: SampleFormat,
+ in_channel_count: usize,
+ input_layout: ChannelLayout,
+ out_channel_count: usize,
+ mut output_channels: Vec<audio_mixer::Channel>,
+ ) -> Self {
+ assert!(in_channel_count > 0);
+ assert!(out_channel_count > 0);
+
+ cubeb_log!(
+ "Creating a mixer with input channel count: {}, input layout: {:?},\
+ out channel count: {}, output channels: {:?}",
+ in_channel_count,
+ input_layout,
+ out_channel_count,
+ output_channels
+ );
+
+ let input_channels = if in_channel_count as u32 != input_layout.bits().count_ones() {
+ cubeb_log!(
+ "Mismatch between input channels and layout. Applying default layout instead"
+ );
+ get_default_channel_order(in_channel_count)
+ } else {
+ get_channel_order(input_layout)
+ };
+
+ // When having one or two channel, force mono or stereo. Some devices (namely,
+ // Bose QC35, mark 1 and 2), expose a single channel mapped to the right for
+ // some reason. Some devices (e.g., builtin speaker on MacBook Pro 2018) map
+ // the channel layout to the undefined channels.
+ if out_channel_count == 1 {
+ output_channels = vec![audio_mixer::Channel::FrontCenter];
+ } else if out_channel_count == 2 {
+ output_channels = vec![
+ audio_mixer::Channel::FrontLeft,
+ audio_mixer::Channel::FrontRight,
+ ];
+ }
+
+ let all_silence = vec![audio_mixer::Channel::Silence; out_channel_count];
+ if output_channels.is_empty()
+ || out_channel_count != output_channels.len()
+ || all_silence == output_channels
+ || Self::non_silent_duplicate_channel_present(&output_channels)
+ {
+ cubeb_log!("Use invalid layout. Apply default layout instead");
+ output_channels = get_default_channel_order(out_channel_count);
+ }
+
+ Self {
+ mixer: MixerType::new(format, &input_channels, &output_channels),
+ buffer: Vec::new(),
+ }
+ }
+
+ pub fn update_buffer_size(&mut self, frames: usize) -> bool {
+ let size_needed = frames * self.mixer.input_channels().len() * self.mixer.sample_size();
+ let elements_needed = size_needed / mem::size_of::<u8>();
+ if self.buffer.len() < elements_needed {
+ self.buffer.resize(elements_needed, 0);
+ true
+ } else {
+ false
+ }
+ }
+
+ pub fn get_buffer_mut_ptr(&mut self) -> *mut u8 {
+ self.buffer.as_mut_ptr()
+ }
+
+ // `update_buffer_size` must be called before this.
+ pub fn mix(&self, frames: usize, dest_buffer: *mut c_void, dest_buffer_size: usize) -> c_int {
+ let (src_buffer_ptr, src_buffer_size) = self.get_buffer_info();
+ self.mixer.mix(
+ src_buffer_ptr as *const (),
+ src_buffer_size,
+ dest_buffer as *mut (),
+ dest_buffer_size,
+ frames,
+ );
+ 0
+ }
+
+ fn get_buffer_info(&self) -> (*const u8, usize) {
+ (
+ self.buffer.as_ptr(),
+ self.buffer.len() * mem::size_of::<u8>(),
+ )
+ }
+
+ fn non_silent_duplicate_channel_present(channels: &[audio_mixer::Channel]) -> bool {
+ let mut bitmap: u32 = 0;
+ for channel in channels {
+ if channel != &Channel::Silence {
+ if (bitmap & channel.bitmask()) != 0 {
+ return true;
+ }
+ bitmap |= channel.bitmask();
+ }
+ }
+ false
+ }
+}
+
+// This test gives a clear channel order of the ChannelLayout passed from cubeb interface.
+#[test]
+fn test_get_channel_order() {
+ assert_eq!(
+ get_channel_order(ChannelLayout::MONO),
+ [Channel::FrontCenter]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::MONO_LFE),
+ [Channel::FrontCenter, Channel::LowFrequency]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::STEREO),
+ [Channel::FrontLeft, Channel::FrontRight]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::STEREO_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::LowFrequency
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::LowFrequency
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_2F1),
+ [Channel::FrontLeft, Channel::FrontRight, Channel::BackCenter]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_2F1_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::LowFrequency,
+ Channel::BackCenter
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F1),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::BackCenter
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F1_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::LowFrequency,
+ Channel::BackCenter
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_2F2),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::SideLeft,
+ Channel::SideRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_2F2_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::LowFrequency,
+ Channel::SideLeft,
+ Channel::SideRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::QUAD),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::BackLeft,
+ Channel::BackRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::QUAD_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::LowFrequency,
+ Channel::BackLeft,
+ Channel::BackRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F2),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::SideLeft,
+ Channel::SideRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F2_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::LowFrequency,
+ Channel::SideLeft,
+ Channel::SideRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F2_BACK),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::BackLeft,
+ Channel::BackRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F2_LFE_BACK),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::LowFrequency,
+ Channel::BackLeft,
+ Channel::BackRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F3R_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::LowFrequency,
+ Channel::BackCenter,
+ Channel::SideLeft,
+ Channel::SideRight
+ ]
+ );
+ assert_eq!(
+ get_channel_order(ChannelLayout::_3F4_LFE),
+ [
+ Channel::FrontLeft,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::LowFrequency,
+ Channel::BackLeft,
+ Channel::BackRight,
+ Channel::SideLeft,
+ Channel::SideRight
+ ]
+ );
+}
+
+#[test]
+fn test_get_default_channel_order() {
+ for len in 1..CHANNEL_OERDER.len() + 10 {
+ let channels = get_default_channel_order(len);
+ if len <= CHANNEL_OERDER.len() {
+ assert_eq!(channels, &CHANNEL_OERDER[..len]);
+ } else {
+ let silences = vec![audio_mixer::Channel::Silence; len - CHANNEL_OERDER.len()];
+ assert_eq!(channels[..CHANNEL_OERDER.len()], CHANNEL_OERDER);
+ assert_eq!(&channels[CHANNEL_OERDER.len()..], silences.as_slice());
+ }
+ }
+}
+
+#[test]
+fn test_non_silent_duplicate_channels() {
+ let duplicate = [
+ Channel::FrontLeft,
+ Channel::Silence,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::Silence,
+ Channel::FrontRight,
+ ];
+ assert!(Mixer::non_silent_duplicate_channel_present(&duplicate));
+
+ let non_duplicate = [
+ Channel::FrontLeft,
+ Channel::Silence,
+ Channel::FrontRight,
+ Channel::FrontCenter,
+ Channel::Silence,
+ Channel::Silence,
+ ];
+ assert!(!Mixer::non_silent_duplicate_channel_present(&non_duplicate));
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/mod.rs b/third_party/rust/cubeb-coreaudio/src/backend/mod.rs
new file mode 100644
index 0000000000..61ae44fea1
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/mod.rs
@@ -0,0 +1,4423 @@
+// Copyright © 2018 Mozilla Foundation
+//
+// This program is made available under an ISC-style license. See the
+// accompanying file LICENSE for details.
+#![allow(unused_assignments)]
+#![allow(unused_must_use)]
+
+extern crate coreaudio_sys_utils;
+extern crate libc;
+extern crate ringbuf;
+
+mod aggregate_device;
+mod auto_release;
+mod buffer_manager;
+mod device_property;
+mod mixer;
+mod resampler;
+mod utils;
+
+use self::aggregate_device::*;
+use self::auto_release::*;
+use self::buffer_manager::*;
+use self::coreaudio_sys_utils::aggregate_device::*;
+use self::coreaudio_sys_utils::audio_device_extensions::*;
+use self::coreaudio_sys_utils::audio_object::*;
+use self::coreaudio_sys_utils::audio_unit::*;
+use self::coreaudio_sys_utils::cf_mutable_dict::*;
+use self::coreaudio_sys_utils::dispatch::*;
+use self::coreaudio_sys_utils::string::*;
+use self::coreaudio_sys_utils::sys::*;
+use self::device_property::*;
+use self::mixer::*;
+use self::resampler::*;
+use self::utils::*;
+use backend::ringbuf::RingBuffer;
+use cubeb_backend::{
+ ffi, ChannelLayout, Context, ContextOps, DeviceCollectionRef, DeviceId, DeviceRef, DeviceType,
+ Error, InputProcessingParams, Ops, Result, SampleFormat, State, Stream, StreamOps,
+ StreamParams, StreamParamsRef, StreamPrefs,
+};
+use mach::mach_time::{mach_absolute_time, mach_timebase_info};
+use std::cmp;
+use std::ffi::{CStr, CString};
+use std::fmt;
+use std::mem;
+use std::os::raw::{c_uint, c_void};
+use std::ptr;
+use std::slice;
+use std::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering};
+use std::sync::{Arc, Condvar, Mutex};
+use std::time::Duration;
+const NO_ERR: OSStatus = 0;
+
+const AU_OUT_BUS: AudioUnitElement = 0;
+const AU_IN_BUS: AudioUnitElement = 1;
+
+const DISPATCH_QUEUE_LABEL: &str = "org.mozilla.cubeb";
+const PRIVATE_AGGREGATE_DEVICE_NAME: &str = "CubebAggregateDevice";
+const VOICEPROCESSING_AGGREGATE_DEVICE_NAME: &str = "VPAUAggregateAudioDevice";
+
+const APPLE_STUDIO_DISPLAY_USB_ID: &str = "05AC:1114";
+
+// Testing empirically, some headsets report a minimal latency that is very low,
+// but this does not work in practice. Lie and say the minimum is 128 frames.
+const SAFE_MIN_LATENCY_FRAMES: u32 = 128;
+const SAFE_MAX_LATENCY_FRAMES: u32 = 512;
+
+bitflags! {
+ #[allow(non_camel_case_types)]
+ #[derive(Clone, Debug, PartialEq, Copy)]
+ struct device_flags: u32 {
+ const DEV_UNKNOWN = 0b0000_0000; // Unknown
+ const DEV_INPUT = 0b0000_0001; // Record device like mic
+ const DEV_OUTPUT = 0b0000_0010; // Playback device like speakers
+ const DEV_SELECTED_DEFAULT = 0b0000_0100; // User selected to use the system default device
+ }
+}
+
+lazy_static! {
+ static ref HOST_TIME_TO_NS_RATIO: (u32, u32) = {
+ let mut timebase_info = mach_timebase_info { numer: 0, denom: 0 };
+ unsafe {
+ mach_timebase_info(&mut timebase_info);
+ }
+ (timebase_info.numer, timebase_info.denom)
+ };
+}
+
+fn make_sized_audio_channel_layout(sz: usize) -> AutoRelease<AudioChannelLayout> {
+ assert!(sz >= mem::size_of::<AudioChannelLayout>());
+ assert_eq!(
+ (sz - mem::size_of::<AudioChannelLayout>()) % mem::size_of::<AudioChannelDescription>(),
+ 0
+ );
+ let acl = unsafe { libc::calloc(1, sz) } as *mut AudioChannelLayout;
+
+ unsafe extern "C" fn free_acl(acl: *mut AudioChannelLayout) {
+ libc::free(acl as *mut libc::c_void);
+ }
+
+ AutoRelease::new(acl, free_acl)
+}
+
+#[allow(non_camel_case_types)]
+#[derive(Clone, Debug)]
+struct device_info {
+ id: AudioDeviceID,
+ flags: device_flags,
+}
+
+impl Default for device_info {
+ fn default() -> Self {
+ Self {
+ id: kAudioObjectUnknown,
+ flags: device_flags::DEV_UNKNOWN,
+ }
+ }
+}
+
+#[allow(non_camel_case_types)]
+#[derive(Debug)]
+struct device_property_listener {
+ device: AudioDeviceID,
+ property: AudioObjectPropertyAddress,
+ listener: audio_object_property_listener_proc,
+}
+
+impl device_property_listener {
+ fn new(
+ device: AudioDeviceID,
+ property: AudioObjectPropertyAddress,
+ listener: audio_object_property_listener_proc,
+ ) -> Self {
+ Self {
+ device,
+ property,
+ listener,
+ }
+ }
+}
+
+#[derive(Debug, PartialEq)]
+struct CAChannelLabel(AudioChannelLabel);
+
+impl From<CAChannelLabel> for mixer::Channel {
+ fn from(label: CAChannelLabel) -> mixer::Channel {
+ use self::coreaudio_sys_utils::sys;
+ match label.0 {
+ sys::kAudioChannelLabel_Left => mixer::Channel::FrontLeft,
+ sys::kAudioChannelLabel_Right => mixer::Channel::FrontRight,
+ sys::kAudioChannelLabel_Center | sys::kAudioChannelLabel_Mono => {
+ mixer::Channel::FrontCenter
+ }
+ sys::kAudioChannelLabel_LFEScreen => mixer::Channel::LowFrequency,
+ sys::kAudioChannelLabel_LeftSurround => mixer::Channel::BackLeft,
+ sys::kAudioChannelLabel_RightSurround => mixer::Channel::BackRight,
+ sys::kAudioChannelLabel_LeftCenter => mixer::Channel::FrontLeftOfCenter,
+ sys::kAudioChannelLabel_RightCenter => mixer::Channel::FrontRightOfCenter,
+ sys::kAudioChannelLabel_CenterSurround => mixer::Channel::BackCenter,
+ sys::kAudioChannelLabel_LeftSurroundDirect => mixer::Channel::SideLeft,
+ sys::kAudioChannelLabel_RightSurroundDirect => mixer::Channel::SideRight,
+ sys::kAudioChannelLabel_TopCenterSurround => mixer::Channel::TopCenter,
+ sys::kAudioChannelLabel_VerticalHeightLeft => mixer::Channel::TopFrontLeft,
+ sys::kAudioChannelLabel_VerticalHeightCenter => mixer::Channel::TopFrontCenter,
+ sys::kAudioChannelLabel_VerticalHeightRight => mixer::Channel::TopFrontRight,
+ sys::kAudioChannelLabel_TopBackLeft => mixer::Channel::TopBackLeft,
+ sys::kAudioChannelLabel_TopBackCenter => mixer::Channel::TopBackCenter,
+ sys::kAudioChannelLabel_TopBackRight => mixer::Channel::TopBackRight,
+ _ => mixer::Channel::Silence,
+ }
+ }
+}
+
+fn set_notification_runloop() {
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioHardwarePropertyRunLoop,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ // Ask HAL to manage its own thread for notification by setting the run_loop to NULL.
+ // Otherwise HAL may use main thread to fire notifications.
+ let run_loop: CFRunLoopRef = ptr::null_mut();
+ let size = mem::size_of::<CFRunLoopRef>();
+ let status =
+ audio_object_set_property_data(kAudioObjectSystemObject, &address, size, &run_loop);
+ if status != NO_ERR {
+ cubeb_log!("Could not make global CoreAudio notifications use their own thread.");
+ }
+}
+
+fn create_device_info(devid: AudioDeviceID, devtype: DeviceType) -> Option<device_info> {
+ assert_ne!(devid, kAudioObjectSystemObject);
+
+ let mut flags = match devtype {
+ DeviceType::INPUT => device_flags::DEV_INPUT,
+ DeviceType::OUTPUT => device_flags::DEV_OUTPUT,
+ _ => panic!("Only accept input or output type"),
+ };
+
+ if devid == kAudioObjectUnknown {
+ cubeb_log!("Using the system default device");
+ flags |= device_flags::DEV_SELECTED_DEFAULT;
+ get_default_device(devtype).map(|id| device_info { id, flags })
+ } else {
+ Some(device_info { id: devid, flags })
+ }
+}
+
+fn create_stream_description(stream_params: &StreamParams) -> Result<AudioStreamBasicDescription> {
+ assert!(stream_params.rate() > 0);
+ assert!(stream_params.channels() > 0);
+
+ let mut desc = AudioStreamBasicDescription::default();
+
+ match stream_params.format() {
+ SampleFormat::S16LE => {
+ desc.mBitsPerChannel = 16;
+ desc.mFormatFlags = kAudioFormatFlagIsSignedInteger;
+ }
+ SampleFormat::S16BE => {
+ desc.mBitsPerChannel = 16;
+ desc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian;
+ }
+ SampleFormat::Float32LE => {
+ desc.mBitsPerChannel = 32;
+ desc.mFormatFlags = kAudioFormatFlagIsFloat;
+ }
+ SampleFormat::Float32BE => {
+ desc.mBitsPerChannel = 32;
+ desc.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian;
+ }
+ _ => {
+ return Err(Error::invalid_format());
+ }
+ }
+
+ desc.mFormatID = kAudioFormatLinearPCM;
+ desc.mFormatFlags |= kLinearPCMFormatFlagIsPacked;
+ desc.mSampleRate = f64::from(stream_params.rate());
+ desc.mChannelsPerFrame = stream_params.channels();
+
+ desc.mBytesPerFrame = (desc.mBitsPerChannel / 8) * desc.mChannelsPerFrame;
+ desc.mFramesPerPacket = 1;
+ desc.mBytesPerPacket = desc.mBytesPerFrame * desc.mFramesPerPacket;
+
+ desc.mReserved = 0;
+
+ Ok(desc)
+}
+
+fn set_volume(unit: AudioUnit, volume: f32) -> Result<()> {
+ assert!(!unit.is_null());
+ let r = audio_unit_set_parameter(
+ unit,
+ kHALOutputParam_Volume,
+ kAudioUnitScope_Global,
+ 0,
+ volume,
+ 0,
+ );
+ if r == NO_ERR {
+ Ok(())
+ } else {
+ cubeb_log!("AudioUnitSetParameter/kHALOutputParam_Volume rv={}", r);
+ Err(Error::error())
+ }
+}
+
+fn get_volume(unit: AudioUnit) -> Result<f32> {
+ assert!(!unit.is_null());
+ let mut volume: f32 = 0.0;
+ let r = audio_unit_get_parameter(
+ unit,
+ kHALOutputParam_Volume,
+ kAudioUnitScope_Global,
+ 0,
+ &mut volume,
+ );
+ if r == NO_ERR {
+ Ok(volume)
+ } else {
+ cubeb_log!("AudioUnitGetParameter/kHALOutputParam_Volume rv={}", r);
+ Err(Error::error())
+ }
+}
+
+fn set_input_mute(unit: AudioUnit, mute: bool) -> Result<()> {
+ assert!(!unit.is_null());
+ let mute: u32 = mute.into();
+ let mut old_mute: u32 = 0;
+ let r = audio_unit_get_property(
+ unit,
+ kAUVoiceIOProperty_MuteOutput,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut old_mute,
+ &mut mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/kAUVoiceIOProperty_MuteOutput rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+ if old_mute == mute {
+ return Ok(());
+ }
+ let r = audio_unit_set_property(
+ unit,
+ kAUVoiceIOProperty_MuteOutput,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mute,
+ mem::size_of::<u32>(),
+ );
+ if r == NO_ERR {
+ Ok(())
+ } else {
+ cubeb_log!(
+ "AudioUnitSetProperty/kAUVoiceIOProperty_MuteOutput rv={}",
+ r
+ );
+ Err(Error::error())
+ }
+}
+
+fn set_input_processing_params(unit: AudioUnit, params: InputProcessingParams) -> Result<()> {
+ assert!(!unit.is_null());
+ let aec = params.contains(InputProcessingParams::ECHO_CANCELLATION);
+ let ns = params.contains(InputProcessingParams::NOISE_SUPPRESSION);
+ let agc = params.contains(InputProcessingParams::AUTOMATIC_GAIN_CONTROL);
+
+ // AEC and NS are active as soon as VPIO is not bypassed, therefore the only combinations
+ // of those we can explicitly support are {} and {aec, ns}.
+ if aec != ns {
+ // No control to turn on AEC without NS or vice versa.
+ return Err(Error::error());
+ }
+
+ let mut old_agc: u32 = 0;
+ let r = audio_unit_get_property(
+ unit,
+ kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut old_agc,
+ &mut mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/kAUVoiceIOProperty_VoiceProcessingEnableAGC rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ if (old_agc == 1) != agc {
+ let agc = u32::from(agc);
+ let r = audio_unit_set_property(
+ unit,
+ kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &agc,
+ mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/kAUVoiceIOProperty_VoiceProcessingEnableAGC rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+ }
+
+ let mut old_bypass: u32 = 0;
+ let r = audio_unit_get_property(
+ unit,
+ kAUVoiceIOProperty_BypassVoiceProcessing,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut old_bypass,
+ &mut mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/kAUVoiceIOProperty_BypassVoiceProcessing rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ let bypass = u32::from(!aec);
+ if old_bypass != bypass {
+ let r = audio_unit_set_property(
+ unit,
+ kAUVoiceIOProperty_BypassVoiceProcessing,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &bypass,
+ mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/kAUVoiceIOProperty_BypassVoiceProcessing rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+ }
+
+ Ok(())
+}
+
+fn minimum_resampling_input_frames(
+ input_rate: f64,
+ output_rate: f64,
+ output_frames: usize,
+) -> usize {
+ assert!(!approx_eq!(f64, input_rate, 0_f64));
+ assert!(!approx_eq!(f64, output_rate, 0_f64));
+ if approx_eq!(f64, input_rate, output_rate) {
+ return output_frames;
+ }
+ (input_rate * output_frames as f64 / output_rate).ceil() as usize
+}
+
+fn audiounit_make_silent(io_data: &AudioBuffer) {
+ assert!(!io_data.mData.is_null());
+ let bytes = unsafe {
+ let ptr = io_data.mData as *mut u8;
+ let len = io_data.mDataByteSize as usize;
+ slice::from_raw_parts_mut(ptr, len)
+ };
+ for data in bytes.iter_mut() {
+ *data = 0;
+ }
+}
+
+extern "C" fn audiounit_input_callback(
+ user_ptr: *mut c_void,
+ flags: *mut AudioUnitRenderActionFlags,
+ tstamp: *const AudioTimeStamp,
+ bus: u32,
+ input_frames: u32,
+ _: *mut AudioBufferList,
+) -> OSStatus {
+ enum ErrorHandle {
+ Return(OSStatus),
+ Reinit,
+ }
+
+ assert!(input_frames > 0);
+ assert_eq!(bus, AU_IN_BUS);
+
+ assert!(!user_ptr.is_null());
+ let stm = unsafe { &mut *(user_ptr as *mut AudioUnitStream) };
+ let using_voice_processing_unit = stm.core_stream_data.using_voice_processing_unit();
+
+ if unsafe { *flags | kAudioTimeStampHostTimeValid } != 0 {
+ let now = unsafe { mach_absolute_time() };
+ let input_latency_frames = compute_input_latency(stm, unsafe { (*tstamp).mHostTime }, now);
+ stm.total_input_latency_frames
+ .store(input_latency_frames, Ordering::SeqCst);
+ }
+
+ if stm.stopped.load(Ordering::SeqCst) {
+ cubeb_log!("({:p}) input stopped", stm as *const AudioUnitStream);
+ return NO_ERR;
+ }
+
+ let handler = |stm: &mut AudioUnitStream,
+ flags: *mut AudioUnitRenderActionFlags,
+ tstamp: *const AudioTimeStamp,
+ bus: u32,
+ input_frames: u32|
+ -> ErrorHandle {
+ let input_buffer_manager = stm.core_stream_data.input_buffer_manager.as_mut().unwrap();
+ assert_eq!(
+ stm.core_stream_data.stm_ptr,
+ user_ptr as *const AudioUnitStream
+ );
+
+ // `flags` and `tstamp` must be non-null so they can be casted into the references.
+ assert!(!flags.is_null());
+ let flags = unsafe { &mut (*flags) };
+ assert!(!tstamp.is_null());
+ let tstamp = unsafe { &(*tstamp) };
+
+ // Create the AudioBufferList to store input.
+ let mut input_buffer_list = AudioBufferList::default();
+ input_buffer_list.mBuffers[0].mDataByteSize =
+ stm.core_stream_data.input_dev_desc.mBytesPerFrame * input_frames;
+ input_buffer_list.mBuffers[0].mData = ptr::null_mut();
+ input_buffer_list.mBuffers[0].mNumberChannels =
+ stm.core_stream_data.input_dev_desc.mChannelsPerFrame;
+ input_buffer_list.mNumberBuffers = 1;
+
+ debug_assert!(!stm.core_stream_data.input_unit.is_null());
+ let status = audio_unit_render(
+ stm.core_stream_data.input_unit,
+ flags,
+ tstamp,
+ bus,
+ input_frames,
+ &mut input_buffer_list,
+ );
+ if (status != NO_ERR)
+ && (status != kAudioUnitErr_CannotDoInCurrentContext
+ || stm.core_stream_data.output_unit.is_null())
+ {
+ return ErrorHandle::Return(status);
+ }
+ let handle = if status == kAudioUnitErr_CannotDoInCurrentContext {
+ assert!(!stm.core_stream_data.output_unit.is_null());
+ // kAudioUnitErr_CannotDoInCurrentContext is returned when using a BT
+ // headset and the profile is changed from A2DP to HFP/HSP. The previous
+ // output device is no longer valid and must be reset.
+ // For now state that no error occurred and feed silence, stream will be
+ // resumed once reinit has completed.
+ ErrorHandle::Reinit
+ } else {
+ assert_eq!(status, NO_ERR);
+ input_buffer_manager
+ .push_data(input_buffer_list.mBuffers[0].mData, input_frames as usize);
+ ErrorHandle::Return(status)
+ };
+
+ // Full Duplex. We'll call data_callback in the AudioUnit output callback. Record this
+ // callback for logging.
+ if !stm.core_stream_data.output_unit.is_null() {
+ let input_callback_data = InputCallbackData {
+ bytes: input_buffer_list.mBuffers[0].mDataByteSize,
+ rendered_frames: input_frames,
+ total_available: input_buffer_manager.available_frames(),
+ channels: input_buffer_list.mBuffers[0].mNumberChannels,
+ num_buf: input_buffer_list.mNumberBuffers,
+ };
+ stm.core_stream_data
+ .input_logging
+ .as_mut()
+ .unwrap()
+ .push(input_callback_data);
+ return handle;
+ }
+
+ cubeb_alogv!(
+ "({:p}) input: buffers {}, size {}, channels {}, rendered frames {}, total frames {}.",
+ stm.core_stream_data.stm_ptr,
+ input_buffer_list.mNumberBuffers,
+ input_buffer_list.mBuffers[0].mDataByteSize,
+ input_buffer_list.mBuffers[0].mNumberChannels,
+ input_frames,
+ input_buffer_manager.available_frames()
+ );
+
+ // Input only. Call the user callback through resampler.
+ // Resampler will deliver input buffer in the correct rate.
+ assert!(input_frames as usize <= input_buffer_manager.available_frames());
+ stm.frames_read.fetch_add(
+ input_buffer_manager.available_frames(),
+ atomic::Ordering::SeqCst,
+ );
+ let mut total_input_frames = input_buffer_manager.available_frames() as i64;
+ let input_buffer =
+ input_buffer_manager.get_linear_data(input_buffer_manager.available_frames());
+ let outframes = stm.core_stream_data.resampler.fill(
+ input_buffer,
+ &mut total_input_frames,
+ ptr::null_mut(),
+ 0,
+ );
+ if outframes < 0 {
+ stm.stopped.store(true, Ordering::SeqCst);
+ stm.notify_state_changed(State::Error);
+ let queue = stm.queue.clone();
+ // Use a new thread, through the queue, to avoid deadlock when calling
+ // AudioOutputUnitStop method from inside render callback
+ queue.run_async(move || {
+ stm.core_stream_data.stop_audiounits();
+ });
+ return handle;
+ }
+ if outframes < total_input_frames {
+ stm.draining.store(true, Ordering::SeqCst);
+ }
+
+ handle
+ };
+
+ // If the stream is drained, do nothing.
+ let handle = if !stm.draining.load(Ordering::SeqCst) {
+ handler(stm, flags, tstamp, bus, input_frames)
+ } else {
+ ErrorHandle::Return(NO_ERR)
+ };
+
+ // If the input (input-only stream) or the output is drained (duplex stream),
+ // cancel this callback. Note that for voice processing cases (a single unit),
+ // the output callback handles stopping the unit and notifying of state.
+ if !using_voice_processing_unit && stm.draining.load(Ordering::SeqCst) {
+ let r = stop_audiounit(stm.core_stream_data.input_unit);
+ assert!(r.is_ok());
+ // Only fire state-changed callback for input-only stream.
+ // The state-changed callback for the duplex stream is fired in the output callback.
+ if stm.core_stream_data.output_unit.is_null() {
+ stm.notify_state_changed(State::Drained);
+ }
+ }
+
+ match handle {
+ ErrorHandle::Reinit => {
+ stm.reinit_async();
+ NO_ERR
+ }
+ ErrorHandle::Return(s) => s,
+ }
+}
+
+fn host_time_to_ns(host_time: u64) -> u64 {
+ let mut rv: f64 = host_time as f64;
+ rv *= HOST_TIME_TO_NS_RATIO.0 as f64;
+ rv /= HOST_TIME_TO_NS_RATIO.1 as f64;
+ rv as u64
+}
+
+fn compute_output_latency(stm: &AudioUnitStream, audio_output_time: u64, now: u64) -> u32 {
+ const NS2S: u64 = 1_000_000_000;
+ let output_hw_rate = stm.core_stream_data.output_dev_desc.mSampleRate as u64;
+ let fixed_latency_ns =
+ (stm.output_device_latency_frames.load(Ordering::SeqCst) as u64 * NS2S) / output_hw_rate;
+ // The total output latency is the timestamp difference + the stream latency + the hardware
+ // latency.
+ let total_output_latency_ns =
+ fixed_latency_ns + host_time_to_ns(audio_output_time.saturating_sub(now));
+
+ (total_output_latency_ns * output_hw_rate / NS2S) as u32
+}
+
+fn compute_input_latency(stm: &AudioUnitStream, audio_input_time: u64, now: u64) -> u32 {
+ const NS2S: u64 = 1_000_000_000;
+ let input_hw_rate = stm.core_stream_data.input_dev_desc.mSampleRate as u64;
+ let fixed_latency_ns =
+ (stm.input_device_latency_frames.load(Ordering::SeqCst) as u64 * NS2S) / input_hw_rate;
+ // The total input latency is the timestamp difference + the stream latency +
+ // the hardware latency.
+ let total_input_latency_ns =
+ host_time_to_ns(now.saturating_sub(audio_input_time)) + fixed_latency_ns;
+
+ (total_input_latency_ns * input_hw_rate / NS2S) as u32
+}
+
+extern "C" fn audiounit_output_callback(
+ user_ptr: *mut c_void,
+ flags: *mut AudioUnitRenderActionFlags,
+ tstamp: *const AudioTimeStamp,
+ bus: u32,
+ output_frames: u32,
+ out_buffer_list: *mut AudioBufferList,
+) -> OSStatus {
+ assert_eq!(bus, AU_OUT_BUS);
+ assert!(!out_buffer_list.is_null());
+
+ assert!(!user_ptr.is_null());
+ let stm = unsafe { &mut *(user_ptr as *mut AudioUnitStream) };
+
+ let out_buffer_list_ref = unsafe { &mut (*out_buffer_list) };
+ assert_eq!(out_buffer_list_ref.mNumberBuffers, 1);
+ let buffers = unsafe {
+ let ptr = out_buffer_list_ref.mBuffers.as_mut_ptr();
+ let len = out_buffer_list_ref.mNumberBuffers as usize;
+ slice::from_raw_parts_mut(ptr, len)
+ };
+
+ if stm.stopped.load(Ordering::SeqCst) {
+ cubeb_alog!("({:p}) output stopped.", stm as *const AudioUnitStream);
+ audiounit_make_silent(&buffers[0]);
+ return NO_ERR;
+ }
+
+ if stm.draining.load(Ordering::SeqCst) {
+ // Cancel the output callback only. For duplex stream,
+ // the input callback will be cancelled in its own callback.
+ let r = stop_audiounit(stm.core_stream_data.output_unit);
+ assert!(r.is_ok());
+ stm.notify_state_changed(State::Drained);
+ audiounit_make_silent(&buffers[0]);
+ return NO_ERR;
+ }
+
+ let now = unsafe { mach_absolute_time() };
+
+ if unsafe { *flags | kAudioTimeStampHostTimeValid } != 0 {
+ let output_latency_frames =
+ compute_output_latency(stm, unsafe { (*tstamp).mHostTime }, now);
+ stm.total_output_latency_frames
+ .store(output_latency_frames, Ordering::SeqCst);
+ }
+ // Get output buffer
+ let output_buffer = match stm.core_stream_data.mixer.as_mut() {
+ None => buffers[0].mData,
+ Some(mixer) => {
+ // If remixing needs to occur, we can't directly work in our final
+ // destination buffer as data may be overwritten or too small to start with.
+ mixer.update_buffer_size(output_frames as usize);
+ mixer.get_buffer_mut_ptr() as *mut c_void
+ }
+ };
+
+ let prev_frames_written = stm.frames_written.load(Ordering::SeqCst);
+
+ stm.frames_written
+ .fetch_add(output_frames as usize, Ordering::SeqCst);
+
+ // Also get the input buffer if the stream is duplex
+ let (input_buffer, mut input_frames) = if !stm.core_stream_data.input_unit.is_null() {
+ let input_logging = &mut stm.core_stream_data.input_logging.as_mut().unwrap();
+ if input_logging.is_empty() {
+ cubeb_alogv!("no audio input data in output callback");
+ } else {
+ while let Some(input_callback_data) = input_logging.pop() {
+ cubeb_alogv!(
+ "input: buffers {}, size {}, channels {}, rendered frames {}, total frames {}.",
+ input_callback_data.num_buf,
+ input_callback_data.bytes,
+ input_callback_data.channels,
+ input_callback_data.rendered_frames,
+ input_callback_data.total_available
+ );
+ }
+ }
+ let input_buffer_manager = stm.core_stream_data.input_buffer_manager.as_mut().unwrap();
+ assert_ne!(stm.core_stream_data.input_dev_desc.mChannelsPerFrame, 0);
+ // If the output callback came first and this is a duplex stream, we need to
+ // fill in some additional silence in the resampler.
+ // Otherwise, if we had more than expected callbacks in a row, or we're
+ // currently switching, we add some silence as well to compensate for the
+ // fact that we're lacking some input data.
+ let input_frames_needed = minimum_resampling_input_frames(
+ stm.core_stream_data.input_dev_desc.mSampleRate,
+ f64::from(stm.core_stream_data.output_stream_params.rate()),
+ output_frames as usize,
+ );
+ let buffered_input_frames = input_buffer_manager.available_frames();
+ // Else if the input has buffered a lot already because the output started late, we
+ // need to trim the input buffer
+ if prev_frames_written == 0 && buffered_input_frames > input_frames_needed {
+ input_buffer_manager.trim(input_frames_needed);
+ let popped_frames = buffered_input_frames - input_frames_needed;
+ cubeb_alog!("Dropping {} frames in input buffer.", popped_frames);
+ }
+
+ let input_frames = if input_frames_needed > buffered_input_frames
+ && (stm.switching_device.load(Ordering::SeqCst)
+ || stm.reinit_pending.load(Ordering::SeqCst)
+ || stm.frames_read.load(Ordering::SeqCst) == 0)
+ {
+ // The silent frames will be inserted in `get_linear_data` below.
+ let silent_frames_to_push = input_frames_needed - buffered_input_frames;
+ cubeb_alog!(
+ "({:p}) Missing Frames: {} will append {} frames of input silence.",
+ stm.core_stream_data.stm_ptr,
+ if stm.frames_read.load(Ordering::SeqCst) == 0 {
+ "input hasn't started,"
+ } else if stm.switching_device.load(Ordering::SeqCst) {
+ "device switching,"
+ } else {
+ "reinit pending,"
+ },
+ silent_frames_to_push
+ );
+ input_frames_needed
+ } else {
+ buffered_input_frames
+ };
+
+ stm.frames_read.fetch_add(input_frames, Ordering::SeqCst);
+ (
+ input_buffer_manager.get_linear_data(input_frames),
+ input_frames as i64,
+ )
+ } else {
+ (ptr::null_mut::<c_void>(), 0)
+ };
+
+ cubeb_alogv!(
+ "({:p}) output: buffers {}, size {}, channels {}, frames {}.",
+ stm as *const AudioUnitStream,
+ buffers.len(),
+ buffers[0].mDataByteSize,
+ buffers[0].mNumberChannels,
+ output_frames
+ );
+
+ let outframes = stm.core_stream_data.resampler.fill(
+ input_buffer,
+ if input_buffer.is_null() {
+ ptr::null_mut()
+ } else {
+ &mut input_frames
+ },
+ output_buffer,
+ i64::from(output_frames),
+ );
+
+ if outframes < 0 || outframes > i64::from(output_frames) {
+ stm.stopped.store(true, Ordering::SeqCst);
+ stm.notify_state_changed(State::Error);
+ let queue = stm.queue.clone();
+ // Use a new thread, through the queue, to avoid deadlock when calling
+ // AudioOutputUnitStop method from inside render callback
+ queue.run_async(move || {
+ stm.core_stream_data.stop_audiounits();
+ });
+ audiounit_make_silent(&buffers[0]);
+ return NO_ERR;
+ }
+
+ stm.draining
+ .store(outframes < i64::from(output_frames), Ordering::SeqCst);
+ stm.output_callback_timing_data_write
+ .write(OutputCallbackTimingData {
+ frames_queued: stm.frames_queued,
+ timestamp: now,
+ buffer_size: outframes as u64,
+ });
+
+ stm.frames_queued += outframes as u64;
+
+ // Post process output samples.
+ if stm.draining.load(Ordering::SeqCst) {
+ // Clear missing frames (silence)
+ let frames_to_bytes = |frames: usize| -> usize {
+ let sample_size = cubeb_sample_size(stm.core_stream_data.output_stream_params.format());
+ let channel_count = stm.core_stream_data.output_stream_params.channels() as usize;
+ frames * sample_size * channel_count
+ };
+ let out_bytes = unsafe {
+ slice::from_raw_parts_mut(
+ output_buffer as *mut u8,
+ frames_to_bytes(output_frames as usize),
+ )
+ };
+ let start = frames_to_bytes(outframes as usize);
+ for byte in out_bytes.iter_mut().skip(start) {
+ *byte = 0;
+ }
+ }
+
+ // Mixing
+ if stm.core_stream_data.mixer.is_some() {
+ assert!(
+ buffers[0].mDataByteSize
+ >= stm.core_stream_data.output_dev_desc.mBytesPerFrame * output_frames
+ );
+ stm.core_stream_data.mixer.as_mut().unwrap().mix(
+ output_frames as usize,
+ buffers[0].mData,
+ buffers[0].mDataByteSize as usize,
+ );
+ }
+ NO_ERR
+}
+
+#[allow(clippy::cognitive_complexity)]
+extern "C" fn audiounit_property_listener_callback(
+ id: AudioObjectID,
+ address_count: u32,
+ addresses: *const AudioObjectPropertyAddress,
+ user: *mut c_void,
+) -> OSStatus {
+ assert_ne!(address_count, 0);
+
+ let stm = unsafe { &mut *(user as *mut AudioUnitStream) };
+ let addrs = unsafe { slice::from_raw_parts(addresses, address_count as usize) };
+ if stm.switching_device.load(Ordering::SeqCst) {
+ cubeb_log!(
+ "Switching is already taking place. Skipping event for device {}",
+ id
+ );
+ return NO_ERR;
+ }
+ stm.switching_device.store(true, Ordering::SeqCst);
+
+ let mut explicit_device_dead = false;
+
+ cubeb_log!(
+ "({:p}) Handling {} device changed events for device {}",
+ stm as *const AudioUnitStream,
+ address_count,
+ id
+ );
+ for (i, addr) in addrs.iter().enumerate() {
+ let p = PropertySelector::from(addr.mSelector);
+ cubeb_log!("Event #{}: {}", i, p);
+ assert_ne!(p, PropertySelector::Unknown);
+ if p == PropertySelector::DeviceIsAlive {
+ explicit_device_dead = true;
+ }
+ }
+
+ // Handle the events
+ if explicit_device_dead {
+ cubeb_log!("The user-selected input or output device is dead, entering error state");
+ stm.stopped.store(true, Ordering::SeqCst);
+
+ // Use a different thread, through the queue, to avoid deadlock when calling
+ // Get/SetProperties method from inside notify callback
+ stm.queue.clone().run_async(move || {
+ stm.core_stream_data.stop_audiounits();
+ stm.close_on_error();
+ });
+ return NO_ERR;
+ }
+ {
+ let callback = stm.device_changed_callback.lock().unwrap();
+ if let Some(device_changed_callback) = *callback {
+ unsafe {
+ device_changed_callback(stm.user_ptr);
+ }
+ }
+ }
+ stm.reinit_async();
+
+ NO_ERR
+}
+
+fn get_default_device(devtype: DeviceType) -> Option<AudioObjectID> {
+ match get_default_device_id(devtype) {
+ Err(e) => {
+ cubeb_log!("Cannot get default {:?} device. Error: {}", devtype, e);
+ None
+ }
+ Ok(id) if id == kAudioObjectUnknown => {
+ cubeb_log!("Get an invalid default {:?} device: {}", devtype, id);
+ None
+ }
+ Ok(id) => Some(id),
+ }
+}
+
+fn get_default_device_id(devtype: DeviceType) -> std::result::Result<AudioObjectID, OSStatus> {
+ let address = get_property_address(
+ match devtype {
+ DeviceType::INPUT => Property::HardwareDefaultInputDevice,
+ DeviceType::OUTPUT => Property::HardwareDefaultOutputDevice,
+ _ => panic!("Unsupport type"),
+ },
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+
+ let mut devid: AudioDeviceID = kAudioObjectUnknown;
+ let mut size = mem::size_of::<AudioDeviceID>();
+ let status =
+ audio_object_get_property_data(kAudioObjectSystemObject, &address, &mut size, &mut devid);
+ if status == NO_ERR {
+ Ok(devid)
+ } else {
+ Err(status)
+ }
+}
+
+fn audiounit_convert_channel_layout(layout: &AudioChannelLayout) -> Result<Vec<mixer::Channel>> {
+ if layout.mChannelLayoutTag != kAudioChannelLayoutTag_UseChannelDescriptions {
+ // kAudioChannelLayoutTag_UseChannelBitmap
+ // kAudioChannelLayoutTag_Mono
+ // kAudioChannelLayoutTag_Stereo
+ // ....
+ cubeb_log!("Only handling UseChannelDescriptions for now.\n");
+ return Err(Error::error());
+ }
+
+ let channel_descriptions = unsafe {
+ slice::from_raw_parts(
+ layout.mChannelDescriptions.as_ptr(),
+ layout.mNumberChannelDescriptions as usize,
+ )
+ };
+
+ let mut channels = Vec::with_capacity(layout.mNumberChannelDescriptions as usize);
+ for description in channel_descriptions {
+ let label = CAChannelLabel(description.mChannelLabel);
+ channels.push(label.into());
+ }
+
+ Ok(channels)
+}
+
+fn audiounit_get_preferred_channel_layout(output_unit: AudioUnit) -> Result<Vec<mixer::Channel>> {
+ let mut rv = NO_ERR;
+ let mut size: usize = 0;
+ rv = audio_unit_get_property_info(
+ output_unit,
+ kAudioDevicePropertyPreferredChannelLayout,
+ kAudioUnitScope_Output,
+ AU_OUT_BUS,
+ &mut size,
+ None,
+ );
+ if rv != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetPropertyInfo/kAudioDevicePropertyPreferredChannelLayout rv={}",
+ rv
+ );
+ return Err(Error::error());
+ }
+ debug_assert!(size > 0);
+
+ let mut layout = make_sized_audio_channel_layout(size);
+ rv = audio_unit_get_property(
+ output_unit,
+ kAudioDevicePropertyPreferredChannelLayout,
+ kAudioUnitScope_Output,
+ AU_OUT_BUS,
+ layout.as_mut(),
+ &mut size,
+ );
+ if rv != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/kAudioDevicePropertyPreferredChannelLayout rv={}",
+ rv
+ );
+ return Err(Error::error());
+ }
+
+ audiounit_convert_channel_layout(layout.as_ref())
+}
+
+// This is for output AudioUnit only. Calling this by input-only AudioUnit is prone
+// to crash intermittently.
+fn audiounit_get_current_channel_layout(output_unit: AudioUnit) -> Result<Vec<mixer::Channel>> {
+ let mut rv = NO_ERR;
+ let mut size: usize = 0;
+ rv = audio_unit_get_property_info(
+ output_unit,
+ kAudioUnitProperty_AudioChannelLayout,
+ kAudioUnitScope_Output,
+ AU_OUT_BUS,
+ &mut size,
+ None,
+ );
+ if rv != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetPropertyInfo/kAudioUnitProperty_AudioChannelLayout rv={}",
+ rv
+ );
+ return Err(Error::error());
+ }
+ debug_assert!(size > 0);
+
+ let mut layout = make_sized_audio_channel_layout(size);
+ rv = audio_unit_get_property(
+ output_unit,
+ kAudioUnitProperty_AudioChannelLayout,
+ kAudioUnitScope_Output,
+ AU_OUT_BUS,
+ layout.as_mut(),
+ &mut size,
+ );
+ if rv != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/kAudioUnitProperty_AudioChannelLayout rv={}",
+ rv
+ );
+ return Err(Error::error());
+ }
+
+ audiounit_convert_channel_layout(layout.as_ref())
+}
+
+fn get_channel_layout(output_unit: AudioUnit) -> Result<Vec<mixer::Channel>> {
+ audiounit_get_current_channel_layout(output_unit)
+ .or_else(|_| {
+ // The kAudioUnitProperty_AudioChannelLayout property isn't known before
+ // macOS 10.12, attempt another method.
+ cubeb_log!(
+ "Cannot get current channel layout for audiounit @ {:p}. Trying preferred channel layout.",
+ output_unit
+ );
+ audiounit_get_preferred_channel_layout(output_unit)
+ })
+}
+
+fn start_audiounit(unit: AudioUnit) -> Result<()> {
+ let status = audio_output_unit_start(unit);
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ cubeb_log!("Cannot start audiounit @ {:p}. Error: {}", unit, status);
+ Err(Error::error())
+ }
+}
+
+fn stop_audiounit(unit: AudioUnit) -> Result<()> {
+ let status = audio_output_unit_stop(unit);
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ cubeb_log!("Cannot stop audiounit @ {:p}. Error: {}", unit, status);
+ Err(Error::error())
+ }
+}
+
+fn create_audiounit(device: &device_info) -> Result<AudioUnit> {
+ assert!(device
+ .flags
+ .intersects(device_flags::DEV_INPUT | device_flags::DEV_OUTPUT));
+ assert!(!device
+ .flags
+ .contains(device_flags::DEV_INPUT | device_flags::DEV_OUTPUT));
+
+ let unit = create_blank_audiounit()?;
+ let mut bus = AU_OUT_BUS;
+
+ if device.flags.contains(device_flags::DEV_INPUT) {
+ // Input only.
+ if let Err(e) = enable_audiounit_scope(unit, DeviceType::INPUT, true) {
+ cubeb_log!("Failed to enable audiounit input scope. Error: {}", e);
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+ if let Err(e) = enable_audiounit_scope(unit, DeviceType::OUTPUT, false) {
+ cubeb_log!("Failed to disable audiounit output scope. Error: {}", e);
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+ bus = AU_IN_BUS;
+ }
+
+ if device.flags.contains(device_flags::DEV_OUTPUT) {
+ // Output only.
+ if let Err(e) = enable_audiounit_scope(unit, DeviceType::OUTPUT, true) {
+ cubeb_log!("Failed to enable audiounit output scope. Error: {}", e);
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+ if let Err(e) = enable_audiounit_scope(unit, DeviceType::INPUT, false) {
+ cubeb_log!("Failed to disable audiounit input scope. Error: {}", e);
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+ bus = AU_OUT_BUS;
+ }
+
+ if let Err(e) = set_device_to_audiounit(unit, device.id, bus) {
+ cubeb_log!(
+ "Failed to set device {} to the created audiounit. Error: {}",
+ device.id,
+ e
+ );
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+
+ Ok(unit)
+}
+
+fn create_voiceprocessing_audiounit(
+ in_device: &device_info,
+ out_device: &device_info,
+) -> Result<AudioUnit> {
+ assert!(in_device.flags.contains(device_flags::DEV_INPUT));
+ assert!(!in_device.flags.contains(device_flags::DEV_OUTPUT));
+ assert!(!out_device.flags.contains(device_flags::DEV_INPUT));
+ assert!(out_device.flags.contains(device_flags::DEV_OUTPUT));
+
+ let unit = create_typed_audiounit(kAudioUnitSubType_VoiceProcessingIO)?;
+
+ if let Err(e) = set_device_to_audiounit(unit, in_device.id, AU_IN_BUS) {
+ cubeb_log!(
+ "Failed to set in device {} to the created audiounit. Error: {}",
+ in_device.id,
+ e
+ );
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+
+ if let Err(e) = set_device_to_audiounit(unit, out_device.id, AU_OUT_BUS) {
+ cubeb_log!(
+ "Failed to set out device {} to the created audiounit. Error: {}",
+ out_device.id,
+ e
+ );
+ dispose_audio_unit(unit);
+ return Err(Error::error());
+ }
+
+ Ok(unit)
+}
+
+fn enable_audiounit_scope(
+ unit: AudioUnit,
+ devtype: DeviceType,
+ enable_io: bool,
+) -> std::result::Result<(), OSStatus> {
+ assert!(!unit.is_null());
+
+ let enable = u32::from(enable_io);
+ let (scope, element) = match devtype {
+ DeviceType::INPUT => (kAudioUnitScope_Input, AU_IN_BUS),
+ DeviceType::OUTPUT => (kAudioUnitScope_Output, AU_OUT_BUS),
+ _ => panic!(
+ "Enable AudioUnit {:?} with unsupported type: {:?}",
+ unit, devtype
+ ),
+ };
+ let status = audio_unit_set_property(
+ unit,
+ kAudioOutputUnitProperty_EnableIO,
+ scope,
+ element,
+ &enable,
+ mem::size_of::<u32>(),
+ );
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(status)
+ }
+}
+
+fn set_device_to_audiounit(
+ unit: AudioUnit,
+ device_id: AudioObjectID,
+ bus: AudioUnitElement,
+) -> std::result::Result<(), OSStatus> {
+ assert!(!unit.is_null());
+
+ let status = audio_unit_set_property(
+ unit,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ bus,
+ &device_id,
+ mem::size_of::<AudioDeviceID>(),
+ );
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(status)
+ }
+}
+
+fn create_typed_audiounit(sub_type: c_uint) -> Result<AudioUnit> {
+ let desc = AudioComponentDescription {
+ componentType: kAudioUnitType_Output,
+ componentSubType: sub_type,
+ componentManufacturer: kAudioUnitManufacturer_Apple,
+ componentFlags: 0,
+ componentFlagsMask: 0,
+ };
+
+ let comp = unsafe { AudioComponentFindNext(ptr::null_mut(), &desc) };
+ if comp.is_null() {
+ cubeb_log!("Could not find matching audio hardware.");
+ return Err(Error::error());
+ }
+ let mut unit: AudioUnit = ptr::null_mut();
+ let status = unsafe { AudioComponentInstanceNew(comp, &mut unit) };
+ if status == NO_ERR {
+ assert!(!unit.is_null());
+ Ok(unit)
+ } else {
+ cubeb_log!("Fail to get a new AudioUnit. Error: {}", status);
+ Err(Error::error())
+ }
+}
+
+fn create_blank_audiounit() -> Result<AudioUnit> {
+ #[cfg(not(target_os = "ios"))]
+ return create_typed_audiounit(kAudioUnitSubType_HALOutput);
+ #[cfg(target_os = "ios")]
+ return create_typed_audiounit(kAudioUnitSubType_RemoteIO);
+}
+
+fn get_buffer_size(unit: AudioUnit, devtype: DeviceType) -> std::result::Result<u32, OSStatus> {
+ assert!(!unit.is_null());
+ let (scope, element) = match devtype {
+ DeviceType::INPUT => (kAudioUnitScope_Output, AU_IN_BUS),
+ DeviceType::OUTPUT => (kAudioUnitScope_Input, AU_OUT_BUS),
+ _ => panic!(
+ "Get buffer size of AudioUnit {:?} with unsupported type: {:?}",
+ unit, devtype
+ ),
+ };
+ let mut frames: u32 = 0;
+ let mut size = mem::size_of::<u32>();
+ let status = audio_unit_get_property(
+ unit,
+ kAudioDevicePropertyBufferFrameSize,
+ scope,
+ element,
+ &mut frames,
+ &mut size,
+ );
+ if status == NO_ERR {
+ Ok(frames)
+ } else {
+ Err(status)
+ }
+}
+
+fn set_buffer_size(
+ unit: AudioUnit,
+ devtype: DeviceType,
+ frames: u32,
+) -> std::result::Result<(), OSStatus> {
+ assert!(!unit.is_null());
+ let (scope, element) = match devtype {
+ DeviceType::INPUT => (kAudioUnitScope_Output, AU_IN_BUS),
+ DeviceType::OUTPUT => (kAudioUnitScope_Input, AU_OUT_BUS),
+ _ => panic!(
+ "Set buffer size of AudioUnit {:?} with unsupported type: {:?}",
+ unit, devtype
+ ),
+ };
+ let status = audio_unit_set_property(
+ unit,
+ kAudioDevicePropertyBufferFrameSize,
+ scope,
+ element,
+ &frames,
+ mem::size_of_val(&frames),
+ );
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(status)
+ }
+}
+
+#[allow(clippy::mutex_atomic)] // The mutex needs to be fed into Condvar::wait_timeout.
+fn set_buffer_size_sync(unit: AudioUnit, devtype: DeviceType, frames: u32) -> Result<()> {
+ let current_frames = get_buffer_size(unit, devtype).map_err(|e| {
+ cubeb_log!(
+ "Cannot get buffer size of AudioUnit {:?} for {:?}. Error: {}",
+ unit,
+ devtype,
+ e
+ );
+ Error::error()
+ })?;
+ if frames == current_frames {
+ cubeb_log!(
+ "The buffer frame size of AudioUnit {:?} for {:?} is already {}",
+ unit,
+ devtype,
+ frames
+ );
+ return Ok(());
+ }
+
+ let waiting_time = Duration::from_millis(100);
+ let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ let mut pair2 = pair.clone();
+ let pair_ptr = &mut pair2;
+
+ assert_eq!(
+ audio_unit_add_property_listener(
+ unit,
+ kAudioDevicePropertyBufferFrameSize,
+ buffer_size_changed_callback,
+ pair_ptr,
+ ),
+ NO_ERR
+ );
+
+ let _teardown = finally(|| {
+ assert_eq!(
+ audio_unit_remove_property_listener_with_user_data(
+ unit,
+ kAudioDevicePropertyBufferFrameSize,
+ buffer_size_changed_callback,
+ pair_ptr,
+ ),
+ NO_ERR
+ );
+ });
+
+ set_buffer_size(unit, devtype, frames).map_err(|e| {
+ cubeb_log!(
+ "Failed to set buffer size for AudioUnit {:?} for {:?}. Error: {}",
+ unit,
+ devtype,
+ e
+ );
+ Error::error()
+ })?;
+
+ let (lock, cvar) = &*pair;
+ let changed = lock.lock().unwrap();
+ if !*changed {
+ let (chg, timeout_res) = cvar.wait_timeout(changed, waiting_time).unwrap();
+ if timeout_res.timed_out() {
+ cubeb_log!(
+ "Timed out for waiting the buffer frame size setting of AudioUnit {:?} for {:?}",
+ unit,
+ devtype
+ );
+ }
+ if !*chg {
+ return Err(Error::error());
+ }
+ }
+
+ let new_frames = get_buffer_size(unit, devtype).map_err(|e| {
+ cubeb_log!(
+ "Cannot get new buffer size of AudioUnit {:?} for {:?}. Error: {}",
+ unit,
+ devtype,
+ e
+ );
+ Error::error()
+ })?;
+ cubeb_log!(
+ "The new buffer frames size of AudioUnit {:?} for {:?} is {}",
+ unit,
+ devtype,
+ new_frames
+ );
+
+ extern "C" fn buffer_size_changed_callback(
+ in_client_data: *mut c_void,
+ _in_unit: AudioUnit,
+ in_property_id: AudioUnitPropertyID,
+ in_scope: AudioUnitScope,
+ in_element: AudioUnitElement,
+ ) {
+ if in_scope == 0 {
+ // filter out the callback for global scope.
+ return;
+ }
+ assert!(in_element == AU_IN_BUS || in_element == AU_OUT_BUS);
+ assert_eq!(in_property_id, kAudioDevicePropertyBufferFrameSize);
+ let pair = unsafe { &mut *(in_client_data as *mut Arc<(Mutex<bool>, Condvar)>) };
+ let (lock, cvar) = &**pair;
+ let mut changed = lock.lock().unwrap();
+ *changed = true;
+ cvar.notify_one();
+ }
+
+ Ok(())
+}
+
+fn convert_uint32_into_string(data: u32) -> CString {
+ let empty = CString::default();
+ if data == 0 {
+ return empty;
+ }
+
+ // Reverse 0xWXYZ into 0xZYXW.
+ let mut buffer = vec![b'\x00'; 4]; // 4 bytes for uint32.
+ buffer[0] = (data >> 24) as u8;
+ buffer[1] = (data >> 16) as u8;
+ buffer[2] = (data >> 8) as u8;
+ buffer[3] = (data) as u8;
+
+ // CString::new() will consume the input bytes vec and add a '\0' at the
+ // end of the bytes. The input bytes vec must not contain any 0 bytes in
+ // it in case causing memory leaks.
+ CString::new(buffer).unwrap_or(empty)
+}
+
+fn get_channel_count(
+ devid: AudioObjectID,
+ devtype: DeviceType,
+) -> std::result::Result<u32, OSStatus> {
+ assert_ne!(devid, kAudioObjectUnknown);
+
+ let mut streams = get_device_streams(devid, devtype)?;
+ let model_uid =
+ get_device_model_uid(devid, devtype).map_or_else(|_| String::new(), |s| s.into_string());
+
+ if devtype == DeviceType::INPUT {
+ // With VPIO, output devices will/may get a Tap that appears as input channels on the
+ // output device id. One could check for whether the output device has a tap enabled,
+ // but it is impossible to distinguish an output-only device from an input+output
+ // device. There have also been corner cases observed, where the device does NOT have
+ // a Tap enabled, but it still has the extra input channels from the Tap.
+ // We can check the terminal type of the input stream instead, the VPIO type is
+ // INPUT_UNDEFINED or an output type, we explicitly ignore those and keep all other cases.
+ streams.retain(|stream| {
+ let terminal_type = get_stream_terminal_type(*stream);
+ if terminal_type.is_err() {
+ return true;
+ }
+
+ #[allow(non_upper_case_globals)]
+ match terminal_type.unwrap() {
+ kAudioStreamTerminalTypeMicrophone
+ | kAudioStreamTerminalTypeHeadsetMicrophone
+ | kAudioStreamTerminalTypeReceiverMicrophone => true,
+ kAudioStreamTerminalTypeUnknown => {
+ cubeb_log!("Unknown TerminalType for input stream. Ignoring its channels.");
+ false
+ }
+ t if [
+ kAudioStreamTerminalTypeSpeaker,
+ kAudioStreamTerminalTypeHeadphones,
+ kAudioStreamTerminalTypeLFESpeaker,
+ kAudioStreamTerminalTypeReceiverSpeaker,
+ ]
+ .contains(&t) =>
+ {
+ cubeb_log!(
+ "Output TerminalType {:#06X} for input stream. Ignoring its channels.",
+ t
+ );
+ false
+ }
+ INPUT_UNDEFINED => {
+ cubeb_log!(
+ "INPUT_UNDEFINED TerminalType for input stream. Ignoring its channels."
+ );
+ false
+ }
+ // The input tap stream on the Studio Display Speakers has a terminal type that
+ // is not clearly output-specific. We special-case it here.
+ EXTERNAL_DIGITAL_AUDIO_INTERFACE
+ if model_uid.contains(APPLE_STUDIO_DISPLAY_USB_ID) =>
+ {
+ false
+ }
+ // Note INPUT_UNDEFINED is 0x200 and INPUT_MICROPHONE is 0x201
+ t if (INPUT_MICROPHONE..OUTPUT_UNDEFINED).contains(&t) => true,
+ t if (OUTPUT_UNDEFINED..BIDIRECTIONAL_UNDEFINED).contains(&t) => false,
+ t if (BIDIRECTIONAL_UNDEFINED..TELEPHONY_UNDEFINED).contains(&t) => true,
+ t if (TELEPHONY_UNDEFINED..EXTERNAL_UNDEFINED).contains(&t) => true,
+ t => {
+ cubeb_log!("Unknown TerminalType {:#06X} for input stream.", t);
+ true
+ }
+ }
+ });
+ }
+
+ let mut count = 0;
+ for stream in streams {
+ if let Ok(format) = get_stream_virtual_format(stream) {
+ count += format.mChannelsPerFrame;
+ }
+ }
+ Ok(count)
+}
+
+fn get_range_of_sample_rates(
+ devid: AudioObjectID,
+ devtype: DeviceType,
+) -> std::result::Result<(f64, f64), String> {
+ let result = get_ranges_of_device_sample_rate(devid, devtype);
+ if let Err(e) = result {
+ return Err(format!("status {}", e));
+ }
+ let rates = result.unwrap();
+ if rates.is_empty() {
+ return Err(String::from("No data"));
+ }
+ let (mut min, mut max) = (std::f64::MAX, std::f64::MIN);
+ for rate in rates {
+ if rate.mMaximum > max {
+ max = rate.mMaximum;
+ }
+ if rate.mMinimum < min {
+ min = rate.mMinimum;
+ }
+ }
+ Ok((min, max))
+}
+
+fn get_fixed_latency(devid: AudioObjectID, devtype: DeviceType) -> u32 {
+ let device_latency = match get_device_latency(devid, devtype) {
+ Ok(latency) => latency,
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the device latency for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ 0 // default device latency
+ }
+ };
+
+ let stream_latency = get_device_streams(devid, devtype).and_then(|streams| {
+ if streams.is_empty() {
+ cubeb_log!(
+ "No stream on device {} in {:?} scope!",
+ devid,
+ devtype
+ );
+ Ok(0) // default stream latency
+ } else {
+ get_stream_latency(streams[0])
+ }
+ }).map_err(|e| {
+ cubeb_log!(
+ "Cannot get the stream, or the latency of the first stream on device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ e
+ }).unwrap_or(0); // default stream latency
+
+ device_latency + stream_latency
+}
+
+#[allow(non_upper_case_globals)]
+fn get_device_group_id(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<CString, OSStatus> {
+ match get_device_transport_type(id, devtype) {
+ Ok(kAudioDeviceTransportTypeBuiltIn) => {
+ cubeb_log!(
+ "The transport type is {:?}",
+ convert_uint32_into_string(kAudioDeviceTransportTypeBuiltIn)
+ );
+ match get_custom_group_id(id, devtype) {
+ Some(id) => return Ok(id),
+ None => {
+ cubeb_log!("Getting model UID instead.");
+ }
+ };
+ }
+ Ok(trans_type) => {
+ cubeb_log!(
+ "The transport type is {:?}. Getting model UID instead.",
+ convert_uint32_into_string(trans_type)
+ );
+ }
+ Err(e) => {
+ cubeb_log!(
+ "Error: {} when getting transport type. Get model uid instead.",
+ e
+ );
+ }
+ }
+
+ // Some devices (e.g. AirPods) might only set the model-uid in the global scope.
+ // The query might fail if the scope is input-only or output-only.
+ get_device_model_uid(id, devtype)
+ .or_else(|_| get_device_model_uid(id, DeviceType::INPUT | DeviceType::OUTPUT))
+ .map(|uid| uid.into_cstring())
+}
+
+fn get_custom_group_id(id: AudioDeviceID, devtype: DeviceType) -> Option<CString> {
+ const IMIC: u32 = 0x696D_6963; // "imic" (internal microphone)
+ const ISPK: u32 = 0x6973_706B; // "ispk" (internal speaker)
+ const EMIC: u32 = 0x656D_6963; // "emic" (external microphone)
+ const HDPN: u32 = 0x6864_706E; // "hdpn" (headphone)
+
+ match get_device_source(id, devtype) {
+ s @ Ok(IMIC) | s @ Ok(ISPK) => {
+ const GROUP_ID: &str = "builtin-internal-mic|spk";
+ cubeb_log!(
+ "Using hardcode group id: {} when source is: {:?}.",
+ GROUP_ID,
+ convert_uint32_into_string(s.unwrap())
+ );
+ return Some(CString::new(GROUP_ID).unwrap());
+ }
+ s @ Ok(EMIC) | s @ Ok(HDPN) => {
+ const GROUP_ID: &str = "builtin-external-mic|hdpn";
+ cubeb_log!(
+ "Using hardcode group id: {} when source is: {:?}.",
+ GROUP_ID,
+ convert_uint32_into_string(s.unwrap())
+ );
+ return Some(CString::new(GROUP_ID).unwrap());
+ }
+ Ok(s) => {
+ cubeb_log!(
+ "No custom group id when source is: {:?}.",
+ convert_uint32_into_string(s)
+ );
+ }
+ Err(e) => {
+ cubeb_log!("Error: {} when getting device source. ", e);
+ }
+ }
+ None
+}
+
+fn get_device_label(
+ id: AudioDeviceID,
+ devtype: DeviceType,
+) -> std::result::Result<StringRef, OSStatus> {
+ get_device_source_name(id, devtype).or_else(|_| get_device_name(id, devtype))
+}
+
+fn get_device_global_uid(id: AudioDeviceID) -> std::result::Result<StringRef, OSStatus> {
+ get_device_uid(id, DeviceType::INPUT | DeviceType::OUTPUT)
+}
+
+#[allow(clippy::cognitive_complexity)]
+fn create_cubeb_device_info(
+ devid: AudioObjectID,
+ devtype: DeviceType,
+) -> Result<ffi::cubeb_device_info> {
+ if devtype != DeviceType::INPUT && devtype != DeviceType::OUTPUT {
+ return Err(Error::error());
+ }
+ let channels = get_channel_count(devid, devtype).map_err(|e| {
+ cubeb_log!("Cannot get the channel count. Error: {}", e);
+ Error::error()
+ })?;
+ if channels == 0 {
+ // Invalid type for this device.
+ return Err(Error::error());
+ }
+
+ let mut dev_info = ffi::cubeb_device_info {
+ max_channels: channels,
+ ..Default::default()
+ };
+
+ assert!(
+ mem::size_of::<ffi::cubeb_devid>() >= mem::size_of_val(&devid),
+ "cubeb_devid can't represent devid"
+ );
+ dev_info.devid = devid as ffi::cubeb_devid;
+
+ match get_device_uid(devid, devtype) {
+ Ok(uid) => {
+ let c_string = uid.into_cstring();
+ dev_info.device_id = c_string.into_raw();
+ }
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the UID for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ }
+ }
+
+ match get_device_group_id(devid, devtype) {
+ Ok(group_id) => {
+ dev_info.group_id = group_id.into_raw();
+ }
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the model UID for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ }
+ }
+
+ let label = match get_device_label(devid, devtype) {
+ Ok(label) => label.into_cstring(),
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the label for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ CString::default()
+ }
+ };
+ dev_info.friendly_name = label.into_raw();
+
+ match get_device_manufacturer(devid, devtype) {
+ Ok(vendor) => {
+ let vendor = vendor.into_cstring();
+ dev_info.vendor_name = vendor.into_raw();
+ }
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the manufacturer for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ }
+ }
+
+ dev_info.device_type = match devtype {
+ DeviceType::INPUT => ffi::CUBEB_DEVICE_TYPE_INPUT,
+ DeviceType::OUTPUT => ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ _ => panic!("invalid type"),
+ };
+
+ dev_info.state = ffi::CUBEB_DEVICE_STATE_ENABLED;
+ dev_info.preferred = match get_default_device(devtype) {
+ Some(id) if id == devid => ffi::CUBEB_DEVICE_PREF_ALL,
+ _ => ffi::CUBEB_DEVICE_PREF_NONE,
+ };
+
+ dev_info.format = ffi::CUBEB_DEVICE_FMT_ALL;
+ dev_info.default_format = ffi::CUBEB_DEVICE_FMT_F32NE;
+
+ match get_device_sample_rate(devid, devtype) {
+ Ok(rate) => {
+ dev_info.default_rate = rate as u32;
+ }
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the sample rate for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ }
+ }
+
+ match get_range_of_sample_rates(devid, devtype) {
+ Ok((min, max)) => {
+ dev_info.min_rate = min as u32;
+ dev_info.max_rate = max as u32;
+ }
+ Err(e) => {
+ cubeb_log!(
+ "Cannot get the range of sample rate for device {} in {:?} scope. Error: {}",
+ devid,
+ devtype,
+ e
+ );
+ }
+ }
+
+ let latency = get_fixed_latency(devid, devtype);
+
+ let (latency_low, latency_high) = match get_device_buffer_frame_size_range(devid, devtype) {
+ Ok(range) => (
+ latency + range.mMinimum as u32,
+ latency + range.mMaximum as u32,
+ ),
+ Err(e) => {
+ cubeb_log!("Cannot get the buffer frame size for device {} in {:?} scope. Using default value instead. Error: {}", devid, devtype, e);
+ (
+ 10 * dev_info.default_rate / 1000,
+ 100 * dev_info.default_rate / 1000,
+ )
+ }
+ };
+ dev_info.latency_lo = latency_low;
+ dev_info.latency_hi = latency_high;
+
+ Ok(dev_info)
+}
+
+fn destroy_cubeb_device_info(device: &mut ffi::cubeb_device_info) {
+ // This should be mapped to the memory allocation in `create_cubeb_device_info`.
+ // The `device_id`, `group_id`, `vendor_name` can be null pointer if the queries
+ // failed, while `friendly_name` will be assigned to a default empty "" string.
+ // Set the pointers to null in case it points to some released memory.
+ unsafe {
+ if !device.device_id.is_null() {
+ let _ = CString::from_raw(device.device_id as *mut _);
+ device.device_id = ptr::null();
+ }
+
+ if !device.group_id.is_null() {
+ let _ = CString::from_raw(device.group_id as *mut _);
+ device.group_id = ptr::null();
+ }
+
+ assert!(!device.friendly_name.is_null());
+ let _ = CString::from_raw(device.friendly_name as *mut _);
+ device.friendly_name = ptr::null();
+
+ if !device.vendor_name.is_null() {
+ let _ = CString::from_raw(device.vendor_name as *mut _);
+ device.vendor_name = ptr::null();
+ }
+ }
+}
+
+fn audiounit_get_devices() -> Vec<AudioObjectID> {
+ let mut size: usize = 0;
+ let address = get_property_address(
+ Property::HardwareDevices,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+ let mut ret =
+ audio_object_get_property_data_size(kAudioObjectSystemObject, &address, &mut size);
+ if ret != NO_ERR {
+ return Vec::new();
+ }
+ // Total number of input and output devices.
+ let mut devices: Vec<AudioObjectID> = allocate_array_by_size(size);
+ ret = audio_object_get_property_data(
+ kAudioObjectSystemObject,
+ &address,
+ &mut size,
+ devices.as_mut_ptr(),
+ );
+ if ret != NO_ERR {
+ return Vec::new();
+ }
+ devices
+}
+
+fn audiounit_get_devices_of_type(devtype: DeviceType) -> Vec<AudioObjectID> {
+ assert!(devtype.intersects(DeviceType::INPUT | DeviceType::OUTPUT));
+
+ let mut devices = audiounit_get_devices();
+
+ // Remove the aggregate device from the list of devices (if any).
+ devices.retain(|&device| {
+ // TODO: (bug 1628411) Figure out when `device` is `kAudioObjectUnknown`.
+ if device == kAudioObjectUnknown {
+ false
+ } else if let Ok(uid) = get_device_global_uid(device) {
+ let uid = uid.into_string();
+ !uid.contains(PRIVATE_AGGREGATE_DEVICE_NAME)
+ && !uid.contains(VOICEPROCESSING_AGGREGATE_DEVICE_NAME)
+ } else {
+ // Fail to get device uid.
+ true
+ }
+ });
+
+ // Expected sorted but did not find anything in the docs.
+ devices.sort_unstable();
+ if devtype.contains(DeviceType::INPUT | DeviceType::OUTPUT) {
+ return devices;
+ }
+
+ let mut devices_in_scope = Vec::new();
+ for device in devices {
+ let label = match get_device_label(device, DeviceType::OUTPUT | DeviceType::INPUT) {
+ Ok(label) => label.into_string(),
+ Err(e) => format!("Unknown(error: {})", e),
+ };
+ let info = format!("{} ({})", device, label);
+
+ if let Ok(channels) = get_channel_count(device, devtype) {
+ cubeb_log!("Device {} has {} {:?}-channels", info, channels, devtype);
+ if channels > 0 {
+ devices_in_scope.push(device);
+ }
+ } else {
+ cubeb_log!("Cannot get the channel count for device {}. Ignored.", info);
+ }
+ }
+
+ devices_in_scope
+}
+
+extern "C" fn audiounit_collection_changed_callback(
+ _in_object_id: AudioObjectID,
+ _in_number_addresses: u32,
+ _in_addresses: *const AudioObjectPropertyAddress,
+ in_client_data: *mut c_void,
+) -> OSStatus {
+ let context = unsafe { &mut *(in_client_data as *mut AudioUnitContext) };
+
+ let queue = context.serial_queue.clone();
+
+ // This can be called from inside an AudioUnit function, dispatch to another queue.
+ queue.run_async(move || {
+ let ctx_ptr = context as *const AudioUnitContext;
+
+ let mut devices = context.devices.lock().unwrap();
+
+ if devices.input.changed_callback.is_none() && devices.output.changed_callback.is_none() {
+ return;
+ }
+ if devices.input.changed_callback.is_some() {
+ let input_devices = audiounit_get_devices_of_type(DeviceType::INPUT);
+ if devices.input.update_devices(input_devices) {
+ unsafe {
+ devices.input.changed_callback.unwrap()(
+ ctx_ptr as *mut ffi::cubeb,
+ devices.input.callback_user_ptr,
+ );
+ }
+ }
+ }
+ if devices.output.changed_callback.is_some() {
+ let output_devices = audiounit_get_devices_of_type(DeviceType::OUTPUT);
+ if devices.output.update_devices(output_devices) {
+ unsafe {
+ devices.output.changed_callback.unwrap()(
+ ctx_ptr as *mut ffi::cubeb,
+ devices.output.callback_user_ptr,
+ );
+ }
+ }
+ }
+ });
+
+ NO_ERR
+}
+
+#[derive(Debug)]
+struct DevicesData {
+ changed_callback: ffi::cubeb_device_collection_changed_callback,
+ callback_user_ptr: *mut c_void,
+ devices: Vec<AudioObjectID>,
+}
+
+impl DevicesData {
+ fn set(
+ &mut self,
+ changed_callback: ffi::cubeb_device_collection_changed_callback,
+ callback_user_ptr: *mut c_void,
+ devices: Vec<AudioObjectID>,
+ ) {
+ self.changed_callback = changed_callback;
+ self.callback_user_ptr = callback_user_ptr;
+ self.devices = devices;
+ }
+
+ fn update_devices(&mut self, devices: Vec<AudioObjectID>) -> bool {
+ // Elements in the vector expected sorted.
+ if self.devices == devices {
+ return false;
+ }
+ self.devices = devices;
+ true
+ }
+
+ fn clear(&mut self) {
+ self.changed_callback = None;
+ self.callback_user_ptr = ptr::null_mut();
+ self.devices.clear();
+ }
+
+ fn is_empty(&self) -> bool {
+ self.changed_callback.is_none()
+ && self.callback_user_ptr.is_null()
+ && self.devices.is_empty()
+ }
+}
+
+impl Default for DevicesData {
+ fn default() -> Self {
+ Self {
+ changed_callback: None,
+ callback_user_ptr: ptr::null_mut(),
+ devices: Vec::new(),
+ }
+ }
+}
+
+#[derive(Debug, Default)]
+struct SharedDevices {
+ input: DevicesData,
+ output: DevicesData,
+}
+
+#[derive(Debug, Default)]
+struct LatencyController {
+ streams: u32,
+ latency: Option<u32>,
+}
+
+impl LatencyController {
+ fn add_stream(&mut self, latency: u32) -> Option<u32> {
+ self.streams += 1;
+ // For the 1st stream set anything within safe min-max
+ if self.streams == 1 {
+ assert!(self.latency.is_none());
+ // Silently clamp the latency down to the platform default, because we
+ // synthetize the clock from the callbacks, and we want the clock to update often.
+ self.latency = Some(latency.clamp(SAFE_MIN_LATENCY_FRAMES, SAFE_MAX_LATENCY_FRAMES));
+ }
+ self.latency
+ }
+
+ fn subtract_stream(&mut self) -> Option<u32> {
+ self.streams -= 1;
+ if self.streams == 0 {
+ assert!(self.latency.is_some());
+ self.latency = None;
+ }
+ self.latency
+ }
+}
+
+pub const OPS: Ops = capi_new!(AudioUnitContext, AudioUnitStream);
+
+// The fisrt member of the Cubeb context must be a pointer to a Ops struct. The Ops struct is an
+// interface to link to all the Cubeb APIs, and the Cubeb interface use this assumption to operate
+// the Cubeb APIs on different implementation.
+// #[repr(C)] is used to prevent any padding from being added in the beginning of the AudioUnitContext.
+#[repr(C)]
+#[derive(Debug)]
+pub struct AudioUnitContext {
+ _ops: *const Ops,
+ serial_queue: Queue,
+ latency_controller: Mutex<LatencyController>,
+ devices: Mutex<SharedDevices>,
+}
+
+impl AudioUnitContext {
+ fn new() -> Self {
+ Self {
+ _ops: &OPS as *const _,
+ serial_queue: Queue::new(DISPATCH_QUEUE_LABEL),
+ latency_controller: Mutex::new(LatencyController::default()),
+ devices: Mutex::new(SharedDevices::default()),
+ }
+ }
+
+ fn active_streams(&self) -> u32 {
+ let controller = self.latency_controller.lock().unwrap();
+ controller.streams
+ }
+
+ fn update_latency_by_adding_stream(&self, latency_frames: u32) -> Option<u32> {
+ let mut controller = self.latency_controller.lock().unwrap();
+ controller.add_stream(latency_frames)
+ }
+
+ fn update_latency_by_removing_stream(&self) -> Option<u32> {
+ let mut controller = self.latency_controller.lock().unwrap();
+ controller.subtract_stream()
+ }
+
+ fn add_devices_changed_listener(
+ &mut self,
+ devtype: DeviceType,
+ collection_changed_callback: ffi::cubeb_device_collection_changed_callback,
+ user_ptr: *mut c_void,
+ ) -> Result<()> {
+ assert!(devtype.intersects(DeviceType::INPUT | DeviceType::OUTPUT));
+ assert!(collection_changed_callback.is_some());
+
+ let context_ptr = self as *mut AudioUnitContext;
+ let mut devices = self.devices.lock().unwrap();
+
+ // Note: second register without unregister first causes 'nope' error.
+ // Current implementation requires unregister before register a new cb.
+ if devtype.contains(DeviceType::INPUT) && devices.input.changed_callback.is_some()
+ || devtype.contains(DeviceType::OUTPUT) && devices.output.changed_callback.is_some()
+ {
+ return Err(Error::invalid_parameter());
+ }
+
+ if devices.input.changed_callback.is_none() && devices.output.changed_callback.is_none() {
+ let address = get_property_address(
+ Property::HardwareDevices,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+ let ret = audio_object_add_property_listener(
+ kAudioObjectSystemObject,
+ &address,
+ audiounit_collection_changed_callback,
+ context_ptr,
+ );
+ if ret != NO_ERR {
+ cubeb_log!(
+ "Cannot add devices-changed listener for {:?}, Error: {}",
+ devtype,
+ ret
+ );
+ return Err(Error::error());
+ }
+ }
+
+ if devtype.contains(DeviceType::INPUT) {
+ // Expected empty after unregister.
+ assert!(devices.input.is_empty());
+ devices.input.set(
+ collection_changed_callback,
+ user_ptr,
+ audiounit_get_devices_of_type(DeviceType::INPUT),
+ );
+ }
+
+ if devtype.contains(DeviceType::OUTPUT) {
+ // Expected empty after unregister.
+ assert!(devices.output.is_empty());
+ devices.output.set(
+ collection_changed_callback,
+ user_ptr,
+ audiounit_get_devices_of_type(DeviceType::OUTPUT),
+ );
+ }
+
+ Ok(())
+ }
+
+ fn remove_devices_changed_listener(&mut self, devtype: DeviceType) -> Result<()> {
+ if !devtype.intersects(DeviceType::INPUT | DeviceType::OUTPUT) {
+ return Err(Error::invalid_parameter());
+ }
+
+ let context_ptr = self as *mut AudioUnitContext;
+ let mut devices = self.devices.lock().unwrap();
+
+ if devtype.contains(DeviceType::INPUT) {
+ devices.input.clear();
+ }
+
+ if devtype.contains(DeviceType::OUTPUT) {
+ devices.output.clear();
+ }
+
+ if devices.input.changed_callback.is_some() || devices.output.changed_callback.is_some() {
+ return Ok(());
+ }
+
+ let address = get_property_address(
+ Property::HardwareDevices,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ );
+ // Note: unregister a non registered cb is not a problem, not checking.
+ let r = audio_object_remove_property_listener(
+ kAudioObjectSystemObject,
+ &address,
+ audiounit_collection_changed_callback,
+ context_ptr,
+ );
+ if r == NO_ERR {
+ Ok(())
+ } else {
+ cubeb_log!(
+ "Cannot remove devices-changed listener for {:?}, Error: {}",
+ devtype,
+ r
+ );
+ Err(Error::error())
+ }
+ }
+}
+
+impl ContextOps for AudioUnitContext {
+ fn init(_context_name: Option<&CStr>) -> Result<Context> {
+ set_notification_runloop();
+ let ctx = Box::new(AudioUnitContext::new());
+ Ok(unsafe { Context::from_ptr(Box::into_raw(ctx) as *mut _) })
+ }
+
+ fn backend_id(&mut self) -> &'static CStr {
+ unsafe { CStr::from_ptr(b"audiounit-rust\0".as_ptr() as *const _) }
+ }
+ #[cfg(target_os = "ios")]
+ fn max_channel_count(&mut self) -> Result<u32> {
+ Ok(2u32)
+ }
+ #[cfg(not(target_os = "ios"))]
+ fn max_channel_count(&mut self) -> Result<u32> {
+ let device = match get_default_device(DeviceType::OUTPUT) {
+ None => {
+ cubeb_log!("Could not get default output device");
+ return Err(Error::error());
+ }
+ Some(id) => id,
+ };
+ get_channel_count(device, DeviceType::OUTPUT).map_err(|e| {
+ cubeb_log!("Cannot get the channel count. Error: {}", e);
+ Error::error()
+ })
+ }
+ #[cfg(target_os = "ios")]
+ fn min_latency(&mut self, _params: StreamParams) -> Result<u32> {
+ Err(not_supported());
+ }
+ #[cfg(not(target_os = "ios"))]
+ fn min_latency(&mut self, _params: StreamParams) -> Result<u32> {
+ let device = match get_default_device(DeviceType::OUTPUT) {
+ None => {
+ cubeb_log!("Could not get default output device");
+ return Err(Error::error());
+ }
+ Some(id) => id,
+ };
+
+ let range =
+ get_device_buffer_frame_size_range(device, DeviceType::OUTPUT).map_err(|e| {
+ cubeb_log!("Could not get acceptable latency range. Error: {}", e);
+ Error::error()
+ })?;
+
+ Ok(cmp::max(range.mMinimum as u32, SAFE_MIN_LATENCY_FRAMES))
+ }
+ #[cfg(target_os = "ios")]
+ fn preferred_sample_rate(&mut self) -> Result<u32> {
+ Err(not_supported());
+ }
+ #[cfg(not(target_os = "ios"))]
+ fn preferred_sample_rate(&mut self) -> Result<u32> {
+ let device = match get_default_device(DeviceType::OUTPUT) {
+ None => {
+ cubeb_log!("Could not get default output device");
+ return Err(Error::error());
+ }
+ Some(id) => id,
+ };
+ let rate = get_device_sample_rate(device, DeviceType::OUTPUT).map_err(|e| {
+ cubeb_log!(
+ "Cannot get the sample rate of the default output device. Error: {}",
+ e
+ );
+ Error::error()
+ })?;
+ Ok(rate as u32)
+ }
+ fn supported_input_processing_params(&mut self) -> Result<InputProcessingParams> {
+ Ok(InputProcessingParams::ECHO_CANCELLATION
+ | InputProcessingParams::NOISE_SUPPRESSION
+ | InputProcessingParams::AUTOMATIC_GAIN_CONTROL)
+ }
+ fn enumerate_devices(
+ &mut self,
+ devtype: DeviceType,
+ collection: &DeviceCollectionRef,
+ ) -> Result<()> {
+ let mut device_infos = Vec::new();
+ let dev_types = [DeviceType::INPUT, DeviceType::OUTPUT];
+ for dev_type in dev_types.iter() {
+ if !devtype.contains(*dev_type) {
+ continue;
+ }
+ let devices = audiounit_get_devices_of_type(*dev_type);
+ for device in devices {
+ if let Ok(info) = create_cubeb_device_info(device, *dev_type) {
+ device_infos.push(info);
+ }
+ }
+ }
+ let (ptr, len) = if device_infos.is_empty() {
+ (ptr::null_mut(), 0)
+ } else {
+ forget_vec(device_infos)
+ };
+ let coll = unsafe { &mut *collection.as_ptr() };
+ coll.device = ptr;
+ coll.count = len;
+ Ok(())
+ }
+ fn device_collection_destroy(&mut self, collection: &mut DeviceCollectionRef) -> Result<()> {
+ assert!(!collection.as_ptr().is_null());
+ let coll = unsafe { &mut *collection.as_ptr() };
+ if coll.device.is_null() {
+ return Ok(());
+ }
+
+ let mut devices = retake_forgotten_vec(coll.device, coll.count);
+ for device in &mut devices {
+ destroy_cubeb_device_info(device);
+ }
+ drop(devices); // Release the memory.
+ coll.device = ptr::null_mut();
+ coll.count = 0;
+ Ok(())
+ }
+ fn stream_init(
+ &mut self,
+ _stream_name: Option<&CStr>,
+ input_device: DeviceId,
+ input_stream_params: Option<&StreamParamsRef>,
+ output_device: DeviceId,
+ output_stream_params: Option<&StreamParamsRef>,
+ latency_frames: u32,
+ data_callback: ffi::cubeb_data_callback,
+ state_callback: ffi::cubeb_state_callback,
+ user_ptr: *mut c_void,
+ ) -> Result<Stream> {
+ if !input_device.is_null() && input_stream_params.is_none() {
+ cubeb_log!("Cannot init an input device without input stream params");
+ return Err(Error::invalid_parameter());
+ }
+
+ if !output_device.is_null() && output_stream_params.is_none() {
+ cubeb_log!("Cannot init an output device without output stream params");
+ return Err(Error::invalid_parameter());
+ }
+
+ if input_stream_params.is_none() && output_stream_params.is_none() {
+ cubeb_log!("Cannot init a stream without any stream params");
+ return Err(Error::invalid_parameter());
+ }
+
+ if data_callback.is_none() {
+ cubeb_log!("Cannot init a stream without a data callback");
+ return Err(Error::invalid_parameter());
+ }
+
+ // Latency cannot change if another stream is operating in parallel. In this case
+ // latency is set to the other stream value.
+ let global_latency_frames = self
+ .update_latency_by_adding_stream(latency_frames)
+ .unwrap();
+ if global_latency_frames != latency_frames {
+ cubeb_log!(
+ "Use global latency {} instead of the requested latency {}.",
+ global_latency_frames,
+ latency_frames
+ );
+ }
+
+ let in_stm_settings = if let Some(params) = input_stream_params {
+ let in_device =
+ match create_device_info(input_device as AudioDeviceID, DeviceType::INPUT) {
+ None => {
+ cubeb_log!("Fail to create device info for input");
+ return Err(Error::error());
+ }
+ Some(d) => d,
+ };
+ let stm_params = StreamParams::from(unsafe { *params.as_ptr() });
+ Some((stm_params, in_device))
+ } else {
+ None
+ };
+
+ let out_stm_settings = if let Some(params) = output_stream_params {
+ let out_device =
+ match create_device_info(output_device as AudioDeviceID, DeviceType::OUTPUT) {
+ None => {
+ cubeb_log!("Fail to create device info for output");
+ return Err(Error::error());
+ }
+ Some(d) => d,
+ };
+ let stm_params = StreamParams::from(unsafe { *params.as_ptr() });
+ Some((stm_params, out_device))
+ } else {
+ None
+ };
+
+ let mut boxed_stream = Box::new(AudioUnitStream::new(
+ self,
+ user_ptr,
+ data_callback,
+ state_callback,
+ global_latency_frames,
+ ));
+
+ // Rename the task queue to be an unique label.
+ let queue_label = format!("{}.{:p}", DISPATCH_QUEUE_LABEL, boxed_stream.as_ref());
+ boxed_stream.queue = Queue::new(queue_label.as_str());
+
+ boxed_stream.core_stream_data =
+ CoreStreamData::new(boxed_stream.as_ref(), in_stm_settings, out_stm_settings);
+
+ let mut result = Ok(());
+ boxed_stream.queue.clone().run_sync(|| {
+ result = boxed_stream.core_stream_data.setup();
+ });
+ if let Err(r) = result {
+ cubeb_log!(
+ "({:p}) Could not setup the audiounit stream.",
+ boxed_stream.as_ref()
+ );
+ return Err(r);
+ }
+
+ let cubeb_stream = unsafe { Stream::from_ptr(Box::into_raw(boxed_stream) as *mut _) };
+ cubeb_log!(
+ "({:p}) Cubeb stream init successful.",
+ cubeb_stream.as_ref()
+ );
+ Ok(cubeb_stream)
+ }
+ fn register_device_collection_changed(
+ &mut self,
+ devtype: DeviceType,
+ collection_changed_callback: ffi::cubeb_device_collection_changed_callback,
+ user_ptr: *mut c_void,
+ ) -> Result<()> {
+ if devtype == DeviceType::UNKNOWN {
+ return Err(Error::invalid_parameter());
+ }
+ if collection_changed_callback.is_some() {
+ self.add_devices_changed_listener(devtype, collection_changed_callback, user_ptr)
+ } else {
+ self.remove_devices_changed_listener(devtype)
+ }
+ }
+}
+
+impl Drop for AudioUnitContext {
+ fn drop(&mut self) {
+ let devices = self.devices.lock().unwrap();
+ assert!(
+ devices.input.changed_callback.is_none() && devices.output.changed_callback.is_none()
+ );
+
+ {
+ let controller = self.latency_controller.lock().unwrap();
+ // Disabling this assert for bug 1083664 -- we seem to leak a stream
+ // assert(controller.streams == 0);
+ if controller.streams > 0 {
+ cubeb_log!(
+ "({:p}) API misuse, {} streams active when context destroyed!",
+ self as *const AudioUnitContext,
+ controller.streams
+ );
+ }
+ }
+ // Make sure all the pending (device-collection-changed-callback) tasks
+ // in queue are done, and cancel all the tasks appended after `drop` is executed.
+ let queue = self.serial_queue.clone();
+ queue.run_final(|| {});
+ }
+}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for AudioUnitContext {}
+unsafe impl Sync for AudioUnitContext {}
+
+// Holds the information for an audio input callback call, for debugging purposes.
+struct InputCallbackData {
+ bytes: u32,
+ rendered_frames: u32,
+ total_available: usize,
+ channels: u32,
+ num_buf: u32,
+}
+struct InputCallbackLogger {
+ prod: ringbuf::Producer<InputCallbackData>,
+ cons: ringbuf::Consumer<InputCallbackData>,
+}
+
+impl InputCallbackLogger {
+ fn new() -> Self {
+ let ring = RingBuffer::<InputCallbackData>::new(16);
+ let (prod, cons) = ring.split();
+ Self { prod, cons }
+ }
+
+ fn push(&mut self, data: InputCallbackData) {
+ self.prod.push(data);
+ }
+
+ fn pop(&mut self) -> Option<InputCallbackData> {
+ self.cons.pop()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.cons.is_empty()
+ }
+}
+
+impl fmt::Debug for InputCallbackLogger {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "InputCallbackLogger {{ prod: {}, cons: {} }}",
+ self.prod.len(),
+ self.cons.len()
+ )
+ }
+}
+
+#[derive(Debug)]
+struct CoreStreamData<'ctx> {
+ stm_ptr: *const AudioUnitStream<'ctx>,
+ aggregate_device: Option<AggregateDevice>,
+ mixer: Option<Mixer>,
+ resampler: Resampler,
+ // Stream creation parameters.
+ input_stream_params: StreamParams,
+ output_stream_params: StreamParams,
+ // Device settings for AudioUnits.
+ input_dev_desc: AudioStreamBasicDescription,
+ output_dev_desc: AudioStreamBasicDescription,
+ // I/O AudioUnits.
+ input_unit: AudioUnit,
+ output_unit: AudioUnit,
+ // Info of the I/O devices.
+ input_device: device_info,
+ output_device: device_info,
+ input_processing_params: InputProcessingParams,
+ input_mute: bool,
+ input_buffer_manager: Option<BufferManager>,
+ // Listeners indicating what system events are monitored.
+ default_input_listener: Option<device_property_listener>,
+ default_output_listener: Option<device_property_listener>,
+ input_alive_listener: Option<device_property_listener>,
+ input_source_listener: Option<device_property_listener>,
+ output_alive_listener: Option<device_property_listener>,
+ output_source_listener: Option<device_property_listener>,
+ input_logging: Option<InputCallbackLogger>,
+}
+
+impl<'ctx> Default for CoreStreamData<'ctx> {
+ fn default() -> Self {
+ Self {
+ stm_ptr: ptr::null(),
+ aggregate_device: None,
+ mixer: None,
+ resampler: Resampler::default(),
+ input_stream_params: StreamParams::from(ffi::cubeb_stream_params {
+ format: ffi::CUBEB_SAMPLE_FLOAT32NE,
+ rate: 0,
+ channels: 0,
+ layout: ffi::CUBEB_LAYOUT_UNDEFINED,
+ prefs: ffi::CUBEB_STREAM_PREF_NONE,
+ }),
+ output_stream_params: StreamParams::from(ffi::cubeb_stream_params {
+ format: ffi::CUBEB_SAMPLE_FLOAT32NE,
+ rate: 0,
+ channels: 0,
+ layout: ffi::CUBEB_LAYOUT_UNDEFINED,
+ prefs: ffi::CUBEB_STREAM_PREF_NONE,
+ }),
+ input_dev_desc: AudioStreamBasicDescription::default(),
+ output_dev_desc: AudioStreamBasicDescription::default(),
+ input_unit: ptr::null_mut(),
+ output_unit: ptr::null_mut(),
+ input_device: device_info::default(),
+ output_device: device_info::default(),
+ input_processing_params: InputProcessingParams::NONE,
+ input_mute: false,
+ input_buffer_manager: None,
+ default_input_listener: None,
+ default_output_listener: None,
+ input_alive_listener: None,
+ input_source_listener: None,
+ output_alive_listener: None,
+ output_source_listener: None,
+ input_logging: None,
+ }
+ }
+}
+
+impl<'ctx> CoreStreamData<'ctx> {
+ fn new(
+ stm: &AudioUnitStream<'ctx>,
+ input_stream_settings: Option<(StreamParams, device_info)>,
+ output_stream_settings: Option<(StreamParams, device_info)>,
+ ) -> Self {
+ fn get_default_sttream_params() -> StreamParams {
+ StreamParams::from(ffi::cubeb_stream_params {
+ format: ffi::CUBEB_SAMPLE_FLOAT32NE,
+ rate: 0,
+ channels: 0,
+ layout: ffi::CUBEB_LAYOUT_UNDEFINED,
+ prefs: ffi::CUBEB_STREAM_PREF_NONE,
+ })
+ }
+ let (in_stm_params, in_dev) =
+ input_stream_settings.unwrap_or((get_default_sttream_params(), device_info::default()));
+ let (out_stm_params, out_dev) = output_stream_settings
+ .unwrap_or((get_default_sttream_params(), device_info::default()));
+ Self {
+ stm_ptr: stm,
+ aggregate_device: None,
+ mixer: None,
+ resampler: Resampler::default(),
+ input_stream_params: in_stm_params,
+ output_stream_params: out_stm_params,
+ input_dev_desc: AudioStreamBasicDescription::default(),
+ output_dev_desc: AudioStreamBasicDescription::default(),
+ input_unit: ptr::null_mut(),
+ output_unit: ptr::null_mut(),
+ input_device: in_dev,
+ output_device: out_dev,
+ input_processing_params: InputProcessingParams::NONE,
+ input_mute: false,
+ input_buffer_manager: None,
+ default_input_listener: None,
+ default_output_listener: None,
+ input_alive_listener: None,
+ input_source_listener: None,
+ output_alive_listener: None,
+ output_source_listener: None,
+ input_logging: None,
+ }
+ }
+
+ fn debug_assert_is_on_stream_queue(&self) {
+ if self.stm_ptr.is_null() {
+ return;
+ }
+ let stm = unsafe { &*self.stm_ptr };
+ stm.queue.debug_assert_is_current();
+ }
+
+ fn start_audiounits(&self) -> Result<()> {
+ self.debug_assert_is_on_stream_queue();
+ // Only allowed to be called after the stream is initialized
+ // and before the stream is destroyed.
+ debug_assert!(!self.input_unit.is_null() || !self.output_unit.is_null());
+
+ if !self.input_unit.is_null() {
+ start_audiounit(self.input_unit)?;
+ }
+ if self.using_voice_processing_unit() {
+ // Handle the VoiceProcessIO case where there is a single unit.
+ return Ok(());
+ }
+ if !self.output_unit.is_null() {
+ start_audiounit(self.output_unit)?;
+ }
+ Ok(())
+ }
+
+ fn stop_audiounits(&self) {
+ self.debug_assert_is_on_stream_queue();
+ if !self.input_unit.is_null() {
+ let r = stop_audiounit(self.input_unit);
+ assert!(r.is_ok());
+ }
+ if self.using_voice_processing_unit() {
+ // Handle the VoiceProcessIO case where there is a single unit.
+ return;
+ }
+ if !self.output_unit.is_null() {
+ let r = stop_audiounit(self.output_unit);
+ assert!(r.is_ok());
+ }
+ }
+
+ fn has_input(&self) -> bool {
+ self.input_stream_params.rate() > 0
+ }
+
+ fn has_output(&self) -> bool {
+ self.output_stream_params.rate() > 0
+ }
+
+ fn using_voice_processing_unit(&self) -> bool {
+ !self.input_unit.is_null() && self.input_unit == self.output_unit
+ }
+
+ fn same_clock_domain(&self) -> bool {
+ self.debug_assert_is_on_stream_queue();
+ // If not setting up a duplex stream, there is only one device,
+ // no reclocking necessary.
+ if !(self.has_input() && self.has_output()) {
+ return true;
+ }
+ let input_domain = match get_clock_domain(self.input_device.id, DeviceType::INPUT) {
+ Ok(clock_domain) => clock_domain,
+ Err(_) => {
+ cubeb_log!("Coudn't determine clock domains for input.");
+ return false;
+ }
+ };
+
+ let output_domain = match get_clock_domain(self.output_device.id, DeviceType::OUTPUT) {
+ Ok(clock_domain) => clock_domain,
+ Err(_) => {
+ cubeb_log!("Coudn't determine clock domains for input.");
+ return false;
+ }
+ };
+ input_domain == output_domain
+ }
+
+ fn should_block_vpio_for_device_pair(
+ &self,
+ in_device: &device_info,
+ out_device: &device_info,
+ ) -> bool {
+ self.debug_assert_is_on_stream_queue();
+ cubeb_log!("Evaluating device pair against VPIO block list");
+ let log_device = |id, devtype| -> std::result::Result<(), OSStatus> {
+ cubeb_log!("{} uid=\"{}\", model_uid=\"{}\", transport_type={:?}, source={:?}, source_name=\"{}\", name=\"{}\", manufacturer=\"{}\"",
+ if devtype == DeviceType::INPUT {
+ "Input"
+ } else {
+ debug_assert_eq!(devtype, DeviceType::OUTPUT);
+ "Output"
+ },
+ get_device_uid(id, devtype).map(|s| s.into_string()).unwrap_or_default(),
+ get_device_model_uid(id, devtype).map(|s| s.into_string()).unwrap_or_default(),
+ convert_uint32_into_string(get_device_transport_type(id, devtype).unwrap_or(0)),
+ convert_uint32_into_string(get_device_source(id, devtype).unwrap_or(0)),
+ get_device_source_name(id, devtype).map(|s| s.into_string()).unwrap_or_default(),
+ get_device_name(id, devtype).map(|s| s.into_string()).unwrap_or_default(),
+ get_device_manufacturer(id, devtype).map(|s| s.into_string()).unwrap_or_default());
+ Ok(())
+ };
+ log_device(in_device.id, DeviceType::INPUT);
+ log_device(out_device.id, DeviceType::OUTPUT);
+ match (
+ get_device_model_uid(in_device.id, DeviceType::INPUT).map(|s| s.to_string()),
+ get_device_model_uid(out_device.id, DeviceType::OUTPUT).map(|s| s.to_string()),
+ ) {
+ (Ok(in_model_uid), Ok(out_model_uid))
+ if in_model_uid.contains(APPLE_STUDIO_DISPLAY_USB_ID)
+ && out_model_uid.contains(APPLE_STUDIO_DISPLAY_USB_ID) =>
+ {
+ cubeb_log!("Both input and output device is an Apple Studio Display. BLOCKED");
+ true
+ }
+ _ => {
+ cubeb_log!("Device pair is not blocked");
+ false
+ }
+ }
+ }
+
+ fn create_audiounits(&mut self) -> Result<(device_info, device_info)> {
+ self.debug_assert_is_on_stream_queue();
+ let should_use_voice_processing_unit = self.has_input()
+ && self.has_output()
+ && self
+ .input_stream_params
+ .prefs()
+ .contains(StreamPrefs::VOICE)
+ && !self.should_block_vpio_for_device_pair(&self.input_device, &self.output_device);
+
+ let should_use_aggregate_device = {
+ // It's impossible to create an aggregate device from an aggregate device, and it's
+ // unnecessary to create an aggregate device when opening the same device input/output. In
+ // all other cases, use an aggregate device.
+ let mut either_already_aggregate = false;
+ if self.has_input() {
+ let input_is_aggregate =
+ get_device_transport_type(self.input_device.id, DeviceType::INPUT).unwrap_or(0)
+ == kAudioDeviceTransportTypeAggregate;
+ if input_is_aggregate {
+ either_already_aggregate = true;
+ }
+ cubeb_log!(
+ "Input device ID: {} (aggregate: {:?})",
+ self.input_device.id,
+ input_is_aggregate
+ );
+ }
+ if self.has_output() {
+ let output_is_aggregate =
+ get_device_transport_type(self.output_device.id, DeviceType::OUTPUT)
+ .unwrap_or(0)
+ == kAudioDeviceTransportTypeAggregate;
+ if output_is_aggregate {
+ either_already_aggregate = true;
+ }
+ cubeb_log!(
+ "Output device ID: {} (aggregate: {:?})",
+ self.output_device.id,
+ output_is_aggregate
+ );
+ }
+ // Only use an aggregate device when the device are different.
+ self.has_input()
+ && self.has_output()
+ && self.input_device.id != self.output_device.id
+ && !either_already_aggregate
+ };
+
+ // Create an AudioUnit:
+ // - If we're eligible to use voice processing, try creating a VoiceProcessingIO AudioUnit.
+ // - If we should use an aggregate device, try creating one and input and output AudioUnits next.
+ // - As last resort, create regular AudioUnits. This is also the normal non-duplex path.
+
+ if should_use_voice_processing_unit {
+ if let Ok(au) =
+ create_voiceprocessing_audiounit(&self.input_device, &self.output_device)
+ {
+ cubeb_log!("({:p}) Using VoiceProcessingIO AudioUnit", self.stm_ptr);
+ self.input_unit = au;
+ self.output_unit = au;
+ return Ok((self.input_device.clone(), self.output_device.clone()));
+ }
+ cubeb_log!(
+ "({:p}) Failed to create VoiceProcessingIO AudioUnit. Trying a regular one.",
+ self.stm_ptr
+ );
+ }
+
+ if should_use_aggregate_device {
+ if let Ok(device) = AggregateDevice::new(self.input_device.id, self.output_device.id) {
+ let in_dev_info = {
+ device_info {
+ id: device.get_device_id(),
+ ..self.input_device
+ }
+ };
+ let out_dev_info = {
+ device_info {
+ id: device.get_device_id(),
+ ..self.output_device
+ }
+ };
+
+ match (
+ create_audiounit(&in_dev_info),
+ create_audiounit(&out_dev_info),
+ ) {
+ (Ok(in_au), Ok(out_au)) => {
+ cubeb_log!(
+ "({:p}) Using an aggregate device {} for input and output.",
+ self.stm_ptr,
+ device.get_device_id()
+ );
+ self.aggregate_device = Some(device);
+ self.input_unit = in_au;
+ self.output_unit = out_au;
+ return Ok((in_dev_info, out_dev_info));
+ }
+ (Err(e), Ok(au)) => {
+ cubeb_log!(
+ "({:p}) Failed to create input AudioUnit for aggregate device. Error: {}.",
+ self.stm_ptr,
+ e
+ );
+ dispose_audio_unit(au);
+ }
+ (Ok(au), Err(e)) => {
+ cubeb_log!(
+ "({:p}) Failed to create output AudioUnit for aggregate device. Error: {}.",
+ self.stm_ptr,
+ e
+ );
+ dispose_audio_unit(au);
+ }
+ (Err(e), _) => {
+ cubeb_log!(
+ "({:p}) Failed to create AudioUnits for aggregate device. Error: {}.",
+ self.stm_ptr,
+ e
+ );
+ }
+ }
+ }
+ cubeb_log!(
+ "({:p}) Failed to set up aggregate device. Using regular AudioUnits.",
+ self.stm_ptr
+ );
+ }
+
+ if self.has_input() {
+ match create_audiounit(&self.input_device) {
+ Ok(in_au) => self.input_unit = in_au,
+ Err(e) => {
+ cubeb_log!(
+ "({:p}) Failed to create regular AudioUnit for input. Error: {}",
+ self.stm_ptr,
+ e
+ );
+ return Err(e);
+ }
+ }
+ }
+
+ if self.has_output() {
+ match create_audiounit(&self.output_device) {
+ Ok(out_au) => self.output_unit = out_au,
+ Err(e) => {
+ cubeb_log!(
+ "({:p}) Failed to create regular AudioUnit for output. Error: {}",
+ self.stm_ptr,
+ e
+ );
+ if !self.input_unit.is_null() {
+ dispose_audio_unit(self.input_unit);
+ self.input_unit = ptr::null_mut();
+ }
+ return Err(e);
+ }
+ }
+ }
+
+ Ok((self.input_device.clone(), self.output_device.clone()))
+ }
+
+ #[allow(clippy::cognitive_complexity)] // TODO: Refactoring.
+ fn setup(&mut self) -> Result<()> {
+ self.debug_assert_is_on_stream_queue();
+ if self
+ .input_stream_params
+ .prefs()
+ .contains(StreamPrefs::LOOPBACK)
+ || self
+ .output_stream_params
+ .prefs()
+ .contains(StreamPrefs::LOOPBACK)
+ {
+ cubeb_log!("({:p}) Loopback not supported for audiounit.", self.stm_ptr);
+ return Err(Error::not_supported());
+ }
+
+ let same_clock_domain = self.same_clock_domain();
+ let (in_dev_info, out_dev_info) = self.create_audiounits()?;
+ let using_voice_processing_unit = self.using_voice_processing_unit();
+
+ assert!(!self.stm_ptr.is_null());
+ let stream = unsafe { &(*self.stm_ptr) };
+
+ // Configure I/O stream
+ if self.has_input() {
+ assert!(!self.input_unit.is_null());
+
+ cubeb_log!(
+ "({:p}) Initializing input by device info: {:?}",
+ self.stm_ptr,
+ in_dev_info
+ );
+
+ let device_channel_count =
+ get_channel_count(self.input_device.id, DeviceType::INPUT).unwrap_or(0);
+ if device_channel_count < self.input_stream_params.channels() {
+ cubeb_log!(
+ "({:p}) Invalid input channel count; device={}, params={}",
+ self.stm_ptr,
+ device_channel_count,
+ self.input_stream_params.channels()
+ );
+ return Err(Error::invalid_parameter());
+ }
+
+ cubeb_log!(
+ "({:p}) Opening input side: rate {}, channels {}, format {:?}, layout {:?}, prefs {:?}, latency in frames {}, voice processing {}.",
+ self.stm_ptr,
+ self.input_stream_params.rate(),
+ self.input_stream_params.channels(),
+ self.input_stream_params.format(),
+ self.input_stream_params.layout(),
+ self.input_stream_params.prefs(),
+ stream.latency_frames,
+ using_voice_processing_unit
+ );
+
+ // Get input device hardware information.
+ let mut input_hw_desc = AudioStreamBasicDescription::default();
+ let mut size = mem::size_of::<AudioStreamBasicDescription>();
+ let r = audio_unit_get_property(
+ self.input_unit,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ AU_IN_BUS,
+ &mut input_hw_desc,
+ &mut size,
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/input/kAudioUnitProperty_StreamFormat rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+ cubeb_log!(
+ "({:p}) Input hardware description: {:?}",
+ self.stm_ptr,
+ input_hw_desc
+ );
+ // In some cases with VPIO the stream format's mChannelsPerFrame is higher than
+ // expected. Use get_channel_count as source of truth.
+ input_hw_desc.mChannelsPerFrame = device_channel_count;
+ // Notice: when we are using aggregate device, the input_hw_desc.mChannelsPerFrame is
+ // the total of all the input channel count of the devices added in the aggregate device.
+ // Due to our aggregate device settings, the data captured by the output device's input
+ // channels will be put in the beginning of the raw data given by the input callback.
+
+ // Always request all the input channels of the device, and only pass the correct
+ // channels to the audio callback.
+ let params = unsafe {
+ let mut p = *self.input_stream_params.as_ptr();
+ p.channels = if using_voice_processing_unit {
+ // VPIO is always MONO.
+ 1
+ } else {
+ input_hw_desc.mChannelsPerFrame
+ };
+ // Input AudioUnit must be configured with device's sample rate.
+ // we will resample inside input callback.
+ p.rate = input_hw_desc.mSampleRate as _;
+ StreamParams::from(p)
+ };
+
+ self.input_dev_desc = create_stream_description(&params).map_err(|e| {
+ cubeb_log!(
+ "({:p}) Setting format description for input failed.",
+ self.stm_ptr
+ );
+ e
+ })?;
+
+ assert_eq!(self.input_dev_desc.mSampleRate, input_hw_desc.mSampleRate);
+
+ // Use latency to set buffer size
+ assert_ne!(stream.latency_frames, 0);
+ if let Err(r) =
+ set_buffer_size_sync(self.input_unit, DeviceType::INPUT, stream.latency_frames)
+ {
+ cubeb_log!("({:p}) Error in change input buffer size.", self.stm_ptr);
+ return Err(r);
+ }
+
+ let r = audio_unit_set_property(
+ self.input_unit,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ AU_IN_BUS,
+ &self.input_dev_desc,
+ mem::size_of::<AudioStreamBasicDescription>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/input/kAudioUnitProperty_StreamFormat rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ // Frames per buffer in the input callback.
+ let r = audio_unit_set_property(
+ self.input_unit,
+ kAudioUnitProperty_MaximumFramesPerSlice,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &stream.latency_frames,
+ mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/input/kAudioUnitProperty_MaximumFramesPerSlice rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ // When we use the aggregate device, the self.input_dev_desc.mChannelsPerFrame is the
+ // total input channel count of all the device added in the aggregate device. However,
+ // we only need the audio data captured by the requested input device, so we need to
+ // ignore some data captured by the audio input of the requested output device (e.g.,
+ // the requested output device is a USB headset with built-in mic), in the beginning of
+ // the raw data taken from input callback.
+ self.input_buffer_manager = Some(BufferManager::new(
+ self.input_stream_params.format(),
+ SAFE_MAX_LATENCY_FRAMES as usize,
+ self.input_dev_desc.mChannelsPerFrame as usize,
+ self.input_dev_desc
+ .mChannelsPerFrame
+ .saturating_sub(device_channel_count) as usize,
+ self.input_stream_params.channels() as usize,
+ ));
+
+ let aurcbs_in = AURenderCallbackStruct {
+ inputProc: Some(audiounit_input_callback),
+ inputProcRefCon: self.stm_ptr as *mut c_void,
+ };
+
+ let r = audio_unit_set_property(
+ self.input_unit,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global,
+ AU_OUT_BUS,
+ &aurcbs_in,
+ mem::size_of_val(&aurcbs_in),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/input/kAudioOutputUnitProperty_SetInputCallback rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ stream.frames_read.store(0, Ordering::SeqCst);
+
+ cubeb_log!(
+ "({:p}) Input audiounit init with device {} successfully.",
+ self.stm_ptr,
+ in_dev_info.id
+ );
+ }
+
+ if self.has_output() {
+ assert!(!self.output_unit.is_null());
+
+ cubeb_log!(
+ "({:p}) Initialize output by device info: {:?}",
+ self.stm_ptr,
+ out_dev_info
+ );
+
+ cubeb_log!(
+ "({:p}) Opening output side: rate {}, channels {}, format {:?}, layout {:?}, prefs {:?}, latency in frames {}, voice processing {}.",
+ self.stm_ptr,
+ self.output_stream_params.rate(),
+ self.output_stream_params.channels(),
+ self.output_stream_params.format(),
+ self.output_stream_params.layout(),
+ self.output_stream_params.prefs(),
+ stream.latency_frames,
+ using_voice_processing_unit
+ );
+
+ // Get output device hardware information.
+ let mut output_hw_desc = AudioStreamBasicDescription::default();
+ let mut size = mem::size_of::<AudioStreamBasicDescription>();
+ let r = audio_unit_get_property(
+ self.output_unit,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ AU_OUT_BUS,
+ &mut output_hw_desc,
+ &mut size,
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitGetProperty/output/kAudioUnitProperty_StreamFormat rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+ cubeb_log!(
+ "({:p}) Output hardware description: {:?}",
+ self.stm_ptr,
+ output_hw_desc
+ );
+
+ // In some cases with (other streams using) VPIO the stream format's mChannelsPerFrame
+ // is higher than expected. Use get_channel_count as source of truth.
+ output_hw_desc.mChannelsPerFrame =
+ get_channel_count(self.output_device.id, DeviceType::OUTPUT).unwrap_or(0);
+
+ // This has been observed in the wild.
+ if output_hw_desc.mChannelsPerFrame == 0 {
+ cubeb_log!(
+ "({:p}) Output hardware description channel count is zero",
+ self.stm_ptr
+ );
+ return Err(Error::error());
+ }
+
+ // Notice: when we are using aggregate device, the output_hw_desc.mChannelsPerFrame is
+ // the total of all the output channel count of the devices added in the aggregate device.
+ // Due to our aggregate device settings, the data recorded by the input device's output
+ // channels will be appended at the end of the raw data given by the output callback.
+ let params = unsafe {
+ let mut p = *self.output_stream_params.as_ptr();
+ p.channels = if using_voice_processing_unit {
+ // VPIO is always MONO.
+ 1
+ } else {
+ output_hw_desc.mChannelsPerFrame
+ };
+ if using_voice_processing_unit {
+ // VPIO will always use the sample rate of the input hw for both input and output,
+ // as reported to us. (We can override it but we cannot improve quality this way).
+ p.rate = self.input_dev_desc.mSampleRate as _;
+ }
+ StreamParams::from(p)
+ };
+
+ self.output_dev_desc = create_stream_description(&params).map_err(|e| {
+ cubeb_log!(
+ "({:p}) Could not initialize the audio stream description.",
+ self.stm_ptr
+ );
+ e
+ })?;
+
+ let device_layout = self
+ .get_output_channel_layout()
+ .map_err(|e| {
+ cubeb_log!(
+ "({:p}) Could not get any channel layout. Defaulting to no channels.",
+ self.stm_ptr
+ );
+ e
+ })
+ .unwrap_or_default();
+
+ cubeb_log!(
+ "({:p} Using output device channel layout {:?}",
+ self.stm_ptr,
+ device_layout
+ );
+
+ // The mixer will be set up when
+ // 1. using aggregate device whose input device has output channels
+ // 2. output device has more channels than we need
+ // 3. output device has different layout than the one we have
+ self.mixer = if self.output_dev_desc.mChannelsPerFrame
+ != self.output_stream_params.channels()
+ || device_layout != mixer::get_channel_order(self.output_stream_params.layout())
+ {
+ cubeb_log!("Incompatible channel layouts detected, setting up remixer");
+ // We will be remixing the data before it reaches the output device.
+ Some(Mixer::new(
+ self.output_stream_params.format(),
+ self.output_stream_params.channels() as usize,
+ self.output_stream_params.layout(),
+ self.output_dev_desc.mChannelsPerFrame as usize,
+ device_layout,
+ ))
+ } else {
+ None
+ };
+
+ let r = audio_unit_set_property(
+ self.output_unit,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ AU_OUT_BUS,
+ &self.output_dev_desc,
+ mem::size_of::<AudioStreamBasicDescription>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/output/kAudioUnitProperty_StreamFormat rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ // Use latency to set buffer size
+ assert_ne!(stream.latency_frames, 0);
+ if let Err(r) =
+ set_buffer_size_sync(self.output_unit, DeviceType::OUTPUT, stream.latency_frames)
+ {
+ cubeb_log!("({:p}) Error in change output buffer size.", self.stm_ptr);
+ return Err(r);
+ }
+
+ // Frames per buffer in the input callback.
+ let r = audio_unit_set_property(
+ self.output_unit,
+ kAudioUnitProperty_MaximumFramesPerSlice,
+ kAudioUnitScope_Global,
+ AU_OUT_BUS,
+ &stream.latency_frames,
+ mem::size_of::<u32>(),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/output/kAudioUnitProperty_MaximumFramesPerSlice rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ let aurcbs_out = AURenderCallbackStruct {
+ inputProc: Some(audiounit_output_callback),
+ inputProcRefCon: self.stm_ptr as *mut c_void,
+ };
+ let r = audio_unit_set_property(
+ self.output_unit,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Global,
+ AU_OUT_BUS,
+ &aurcbs_out,
+ mem::size_of_val(&aurcbs_out),
+ );
+ if r != NO_ERR {
+ cubeb_log!(
+ "AudioUnitSetProperty/output/kAudioUnitProperty_SetRenderCallback rv={}",
+ r
+ );
+ return Err(Error::error());
+ }
+
+ stream.frames_written.store(0, Ordering::SeqCst);
+
+ cubeb_log!(
+ "({:p}) Output audiounit init with device {} successfully.",
+ self.stm_ptr,
+ out_dev_info.id
+ );
+ }
+
+ // We use a resampler because input AudioUnit operates
+ // reliable only in the capture device sample rate.
+ // Resampler will convert it to the user sample rate
+ // and deliver it to the callback.
+ let target_sample_rate = if self.has_input() {
+ self.input_stream_params.rate()
+ } else {
+ assert!(self.has_output());
+ self.output_stream_params.rate()
+ };
+
+ let resampler_input_params = if self.has_input() {
+ let mut p = unsafe { *(self.input_stream_params.as_ptr()) };
+ p.rate = self.input_dev_desc.mSampleRate as u32;
+ Some(p)
+ } else {
+ None
+ };
+ let resampler_output_params = if self.has_output() {
+ let mut p = unsafe { *(self.output_stream_params.as_ptr()) };
+ p.rate = self.output_dev_desc.mSampleRate as u32;
+ Some(p)
+ } else {
+ None
+ };
+
+ // Only reclock if there is an input and we couldn't use an aggregate device, and the
+ // devices are not part of the same clock domain.
+ let reclock_policy = if self.aggregate_device.is_none()
+ && !using_voice_processing_unit
+ && !same_clock_domain
+ {
+ cubeb_log!(
+ "Reclocking duplex steam using_aggregate_device={} same_clock_domain={}",
+ self.aggregate_device.is_some(),
+ same_clock_domain
+ );
+ ffi::CUBEB_RESAMPLER_RECLOCK_INPUT
+ } else {
+ ffi::CUBEB_RESAMPLER_RECLOCK_NONE
+ };
+
+ self.resampler = Resampler::new(
+ self.stm_ptr as *mut ffi::cubeb_stream,
+ resampler_input_params,
+ resampler_output_params,
+ target_sample_rate,
+ stream.data_callback,
+ stream.user_ptr,
+ ffi::CUBEB_RESAMPLER_QUALITY_DESKTOP,
+ reclock_policy,
+ );
+
+ // In duplex, the input thread might be different from the output thread, and we're logging
+ // everything from the output thread: relay the audio input callback information using a
+ // ring buffer to diagnose issues.
+ if self.has_input() && self.has_output() {
+ self.input_logging = Some(InputCallbackLogger::new());
+ }
+
+ if !self.input_unit.is_null() {
+ let r = audio_unit_initialize(self.input_unit);
+ if r != NO_ERR {
+ cubeb_log!("AudioUnitInitialize/input rv={}", r);
+ return Err(Error::error());
+ }
+
+ stream.input_device_latency_frames.store(
+ get_fixed_latency(self.input_device.id, DeviceType::INPUT),
+ Ordering::SeqCst,
+ );
+ }
+
+ if !self.output_unit.is_null() {
+ if self.input_unit != self.output_unit {
+ let r = audio_unit_initialize(self.output_unit);
+ if r != NO_ERR {
+ cubeb_log!("AudioUnitInitialize/output rv={}", r);
+ return Err(Error::error());
+ }
+ }
+
+ stream.output_device_latency_frames.store(
+ get_fixed_latency(self.output_device.id, DeviceType::OUTPUT),
+ Ordering::SeqCst,
+ );
+
+ let mut unit_s: f64 = 0.0;
+ let mut size = mem::size_of_val(&unit_s);
+ if audio_unit_get_property(
+ self.output_unit,
+ kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global,
+ 0,
+ &mut unit_s,
+ &mut size,
+ ) == NO_ERR
+ {
+ stream.output_device_latency_frames.fetch_add(
+ (unit_s * self.output_dev_desc.mSampleRate) as u32,
+ Ordering::SeqCst,
+ );
+ }
+ }
+
+ if using_voice_processing_unit {
+ // The VPIO AudioUnit automatically ducks other audio streams on the VPIO
+ // output device. Its ramp duration is 0.5s when ducking, so unduck similarly
+ // now.
+ // NOTE: On MacOS 14 the ducking happens on creation of the VPIO AudioUnit.
+ // On MacOS 10.15 it happens on both creation and initialization, which
+ // is why we defer the unducking until now.
+ let r = audio_device_duck(self.output_device.id, 1.0, ptr::null_mut(), 0.5);
+ if r != NO_ERR {
+ cubeb_log!(
+ "({:p}) Failed to undo ducking of voiceprocessing on output device {}. Proceeding... Error: {}",
+ self.stm_ptr,
+ self.output_device.id,
+ r
+ );
+ }
+
+ // Always try to remember the applied input mute state. If it cannot be applied
+ // to the new device pair, we notify the client of an error and it will have to
+ // open a new stream.
+ if let Err(r) = set_input_mute(self.input_unit, self.input_mute) {
+ cubeb_log!(
+ "({:p}) Failed to set mute state of voiceprocessing. Error: {}",
+ self.stm_ptr,
+ r
+ );
+ return Err(r);
+ }
+
+ // Always try to remember the applied input processing params. If they cannot
+ // be applied in the new device pair, we notify the client of an error and it
+ // will have to open a new stream.
+ if let Err(r) =
+ set_input_processing_params(self.input_unit, self.input_processing_params)
+ {
+ cubeb_log!(
+ "({:p}) Failed to set params of voiceprocessing. Error: {}",
+ self.stm_ptr,
+ r
+ );
+ return Err(r);
+ }
+ }
+
+ if let Err(r) = self.install_system_changed_callback() {
+ cubeb_log!(
+ "({:p}) Could not install the device change callback.",
+ self.stm_ptr
+ );
+ return Err(r);
+ }
+
+ if let Err(r) = self.install_device_changed_callback() {
+ cubeb_log!(
+ "({:p}) Could not install all device change callback.",
+ self.stm_ptr
+ );
+ return Err(r);
+ }
+
+ // We have either default_input_listener or input_alive_listener.
+ // We cannot have both of them at the same time.
+ assert!(
+ !self.has_input()
+ || ((self.default_input_listener.is_some() != self.input_alive_listener.is_some())
+ && (self.default_input_listener.is_some()
+ || self.input_alive_listener.is_some()))
+ );
+
+ // We have either default_output_listener or output_alive_listener.
+ // We cannot have both of them at the same time.
+ assert!(
+ !self.has_output()
+ || ((self.default_output_listener.is_some()
+ != self.output_alive_listener.is_some())
+ && (self.default_output_listener.is_some()
+ || self.output_alive_listener.is_some()))
+ );
+
+ Ok(())
+ }
+
+ fn close(&mut self) {
+ self.debug_assert_is_on_stream_queue();
+ if !self.input_unit.is_null() {
+ audio_unit_uninitialize(self.input_unit);
+ if self.using_voice_processing_unit() {
+ // Handle the VoiceProcessIO case where there is a single unit.
+ self.output_unit = ptr::null_mut();
+ }
+
+ // Cannot unset self.input_unit yet, since the output callback might be live
+ // and reading it.
+ }
+
+ if !self.output_unit.is_null() {
+ audio_unit_uninitialize(self.output_unit);
+ dispose_audio_unit(self.output_unit);
+ self.output_unit = ptr::null_mut();
+ }
+
+ if !self.input_unit.is_null() {
+ dispose_audio_unit(self.input_unit);
+ self.input_unit = ptr::null_mut();
+ }
+
+ self.resampler.destroy();
+ self.mixer = None;
+ self.aggregate_device = None;
+
+ if self.uninstall_system_changed_callback().is_err() {
+ cubeb_log!(
+ "({:p}) Could not uninstall the system changed callback",
+ self.stm_ptr
+ );
+ }
+
+ if self.uninstall_device_changed_callback().is_err() {
+ cubeb_log!(
+ "({:p}) Could not uninstall all device change listeners",
+ self.stm_ptr
+ );
+ }
+ }
+
+ fn install_device_changed_callback(&mut self) -> Result<()> {
+ self.debug_assert_is_on_stream_queue();
+ assert!(!self.stm_ptr.is_null());
+ let stm = unsafe { &(*self.stm_ptr) };
+
+ if !self.output_unit.is_null() {
+ assert_ne!(self.output_device.id, kAudioObjectUnknown);
+ assert_ne!(self.output_device.id, kAudioObjectSystemObject);
+ assert!(
+ self.output_source_listener.is_none(),
+ "register output_source_listener without unregistering the one in use"
+ );
+ assert!(
+ self.output_alive_listener.is_none(),
+ "register output_alive_listener without unregistering the one in use"
+ );
+
+ // Get the notification when the data source on the same device changes,
+ // e.g., when the user plugs in a TRRS headset into the headphone jack.
+ self.output_source_listener = Some(device_property_listener::new(
+ self.output_device.id,
+ get_property_address(Property::DeviceSource, DeviceType::OUTPUT),
+ audiounit_property_listener_callback,
+ ));
+ let rv = stm.add_device_listener(self.output_source_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ self.output_source_listener = None;
+ cubeb_log!("AudioObjectAddPropertyListener/output/kAudioDevicePropertyDataSource rv={}, device id={}", rv, self.output_device.id);
+ return Err(Error::error());
+ }
+
+ // Get the notification when the output device is going away
+ // if the output doesn't follow the system default.
+ if !self
+ .output_device
+ .flags
+ .contains(device_flags::DEV_SELECTED_DEFAULT)
+ {
+ self.output_alive_listener = Some(device_property_listener::new(
+ self.output_device.id,
+ get_property_address(
+ Property::DeviceIsAlive,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ audiounit_property_listener_callback,
+ ));
+ let rv = stm.add_device_listener(self.output_alive_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ self.output_alive_listener = None;
+ cubeb_log!("AudioObjectAddPropertyListener/output/kAudioDevicePropertyDeviceIsAlive rv={}, device id ={}", rv, self.output_device.id);
+ return Err(Error::error());
+ }
+ }
+ }
+
+ if !self.input_unit.is_null() {
+ assert_ne!(self.input_device.id, kAudioObjectUnknown);
+ assert_ne!(self.input_device.id, kAudioObjectSystemObject);
+ assert!(
+ self.input_source_listener.is_none(),
+ "register input_source_listener without unregistering the one in use"
+ );
+ assert!(
+ self.input_alive_listener.is_none(),
+ "register input_alive_listener without unregistering the one in use"
+ );
+
+ // Get the notification when the data source on the same device changes,
+ // e.g., when the user plugs in a TRRS mic into the headphone jack.
+ self.input_source_listener = Some(device_property_listener::new(
+ self.input_device.id,
+ get_property_address(Property::DeviceSource, DeviceType::INPUT),
+ audiounit_property_listener_callback,
+ ));
+ let rv = stm.add_device_listener(self.input_source_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ self.input_source_listener = None;
+ cubeb_log!("AudioObjectAddPropertyListener/input/kAudioDevicePropertyDataSource rv={}, device id={}", rv, self.input_device.id);
+ return Err(Error::error());
+ }
+
+ // Get the notification when the input device is going away
+ // if the input doesn't follow the system default.
+ if !self
+ .input_device
+ .flags
+ .contains(device_flags::DEV_SELECTED_DEFAULT)
+ {
+ self.input_alive_listener = Some(device_property_listener::new(
+ self.input_device.id,
+ get_property_address(
+ Property::DeviceIsAlive,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ audiounit_property_listener_callback,
+ ));
+ let rv = stm.add_device_listener(self.input_alive_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ self.input_alive_listener = None;
+ cubeb_log!("AudioObjectAddPropertyListener/input/kAudioDevicePropertyDeviceIsAlive rv={}, device id ={}", rv, self.input_device.id);
+ return Err(Error::error());
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn install_system_changed_callback(&mut self) -> Result<()> {
+ self.debug_assert_is_on_stream_queue();
+ assert!(!self.stm_ptr.is_null());
+ let stm = unsafe { &(*self.stm_ptr) };
+
+ if !self.output_unit.is_null()
+ && self
+ .output_device
+ .flags
+ .contains(device_flags::DEV_SELECTED_DEFAULT)
+ {
+ assert!(
+ self.default_output_listener.is_none(),
+ "register default_output_listener without unregistering the one in use"
+ );
+
+ // Get the notification when the default output audio changes, e.g.,
+ // when the user plugs in a USB headset and the system chooses it automatically as the default,
+ // or when another device is chosen in the dropdown list.
+ self.default_output_listener = Some(device_property_listener::new(
+ kAudioObjectSystemObject,
+ get_property_address(
+ Property::HardwareDefaultOutputDevice,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ audiounit_property_listener_callback,
+ ));
+ let r = stm.add_device_listener(self.default_output_listener.as_ref().unwrap());
+ if r != NO_ERR {
+ self.default_output_listener = None;
+ cubeb_log!("AudioObjectAddPropertyListener/output/kAudioHardwarePropertyDefaultOutputDevice rv={}", r);
+ return Err(Error::error());
+ }
+ }
+
+ if !self.input_unit.is_null()
+ && self
+ .input_device
+ .flags
+ .contains(device_flags::DEV_SELECTED_DEFAULT)
+ {
+ assert!(
+ self.default_input_listener.is_none(),
+ "register default_input_listener without unregistering the one in use"
+ );
+
+ // Get the notification when the default intput audio changes, e.g.,
+ // when the user plugs in a USB mic and the system chooses it automatically as the default,
+ // or when another device is chosen in the system preference.
+ self.default_input_listener = Some(device_property_listener::new(
+ kAudioObjectSystemObject,
+ get_property_address(
+ Property::HardwareDefaultInputDevice,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ audiounit_property_listener_callback,
+ ));
+ let r = stm.add_device_listener(self.default_input_listener.as_ref().unwrap());
+ if r != NO_ERR {
+ self.default_input_listener = None;
+ cubeb_log!("AudioObjectAddPropertyListener/input/kAudioHardwarePropertyDefaultInputDevice rv={}", r);
+ return Err(Error::error());
+ }
+ }
+
+ Ok(())
+ }
+
+ fn uninstall_device_changed_callback(&mut self) -> Result<()> {
+ self.debug_assert_is_on_stream_queue();
+ if self.stm_ptr.is_null() {
+ assert!(
+ self.output_source_listener.is_none()
+ && self.output_alive_listener.is_none()
+ && self.input_source_listener.is_none()
+ && self.input_alive_listener.is_none()
+ );
+ return Ok(());
+ }
+
+ let stm = unsafe { &(*self.stm_ptr) };
+
+ // Failing to uninstall listeners is not a fatal error.
+ let mut r = Ok(());
+
+ if self.output_source_listener.is_some() {
+ let rv = stm.remove_device_listener(self.output_source_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ cubeb_log!("AudioObjectRemovePropertyListener/output/kAudioDevicePropertyDataSource rv={}, device id={}", rv, self.output_device.id);
+ r = Err(Error::error());
+ }
+ self.output_source_listener = None;
+ }
+
+ if self.output_alive_listener.is_some() {
+ let rv = stm.remove_device_listener(self.output_alive_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ cubeb_log!("AudioObjectRemovePropertyListener/output/kAudioDevicePropertyDeviceIsAlive rv={}, device id={}", rv, self.output_device.id);
+ r = Err(Error::error());
+ }
+ self.output_alive_listener = None;
+ }
+
+ if self.input_source_listener.is_some() {
+ let rv = stm.remove_device_listener(self.input_source_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ cubeb_log!("AudioObjectRemovePropertyListener/input/kAudioDevicePropertyDataSource rv={}, device id={}", rv, self.input_device.id);
+ r = Err(Error::error());
+ }
+ self.input_source_listener = None;
+ }
+
+ if self.input_alive_listener.is_some() {
+ let rv = stm.remove_device_listener(self.input_alive_listener.as_ref().unwrap());
+ if rv != NO_ERR {
+ cubeb_log!("AudioObjectRemovePropertyListener/input/kAudioDevicePropertyDeviceIsAlive rv={}, device id={}", rv, self.input_device.id);
+ r = Err(Error::error());
+ }
+ self.input_alive_listener = None;
+ }
+
+ r
+ }
+
+ fn uninstall_system_changed_callback(&mut self) -> Result<()> {
+ self.debug_assert_is_on_stream_queue();
+ if self.stm_ptr.is_null() {
+ assert!(
+ self.default_output_listener.is_none() && self.default_input_listener.is_none()
+ );
+ return Ok(());
+ }
+
+ let stm = unsafe { &(*self.stm_ptr) };
+
+ if self.default_output_listener.is_some() {
+ let r = stm.remove_device_listener(self.default_output_listener.as_ref().unwrap());
+ if r != NO_ERR {
+ return Err(Error::error());
+ }
+ self.default_output_listener = None;
+ }
+
+ if self.default_input_listener.is_some() {
+ let r = stm.remove_device_listener(self.default_input_listener.as_ref().unwrap());
+ if r != NO_ERR {
+ return Err(Error::error());
+ }
+ self.default_input_listener = None;
+ }
+
+ Ok(())
+ }
+
+ fn get_output_channel_layout(&self) -> Result<Vec<mixer::Channel>> {
+ self.debug_assert_is_on_stream_queue();
+ assert!(!self.output_unit.is_null());
+ if self.using_voice_processing_unit() {
+ return Ok(get_channel_order(ChannelLayout::MONO));
+ }
+ get_channel_layout(self.output_unit)
+ }
+}
+
+impl<'ctx> Drop for CoreStreamData<'ctx> {
+ fn drop(&mut self) {
+ self.debug_assert_is_on_stream_queue();
+ self.stop_audiounits();
+ self.close();
+ }
+}
+
+#[derive(Debug, Clone)]
+struct OutputCallbackTimingData {
+ frames_queued: u64,
+ timestamp: u64,
+ buffer_size: u64,
+}
+
+// The fisrt two members of the Cubeb stream must be a pointer to its Cubeb context and a void user
+// defined pointer. The Cubeb interface use this assumption to operate the Cubeb APIs.
+// #[repr(C)] is used to prevent any padding from being added in the beginning of the AudioUnitStream.
+#[repr(C)]
+#[derive(Debug)]
+// Allow exposing this private struct in public interfaces when running tests.
+#[cfg_attr(test, allow(private_in_public))]
+struct AudioUnitStream<'ctx> {
+ context: &'ctx mut AudioUnitContext,
+ user_ptr: *mut c_void,
+ // Task queue for the stream.
+ queue: Queue,
+
+ data_callback: ffi::cubeb_data_callback,
+ state_callback: ffi::cubeb_state_callback,
+ device_changed_callback: Mutex<ffi::cubeb_device_changed_callback>,
+ // Frame counters
+ frames_queued: u64,
+ // How many frames got read from the input since the stream started (includes
+ // padded silence)
+ frames_read: AtomicUsize,
+ // How many frames got written to the output device since the stream started
+ frames_written: AtomicUsize,
+ stopped: AtomicBool,
+ draining: AtomicBool,
+ reinit_pending: AtomicBool,
+ destroy_pending: AtomicBool,
+ // Latency requested by the user.
+ latency_frames: u32,
+ // Fixed latency, characteristic of the device.
+ output_device_latency_frames: AtomicU32,
+ input_device_latency_frames: AtomicU32,
+ // Total latency: the latency of the device + the OS latency
+ total_output_latency_frames: AtomicU32,
+ total_input_latency_frames: AtomicU32,
+ output_callback_timing_data_read: triple_buffer::Output<OutputCallbackTimingData>,
+ output_callback_timing_data_write: triple_buffer::Input<OutputCallbackTimingData>,
+ prev_position: u64,
+ // This is true if a device change callback is currently running.
+ switching_device: AtomicBool,
+ core_stream_data: CoreStreamData<'ctx>,
+}
+
+impl<'ctx> AudioUnitStream<'ctx> {
+ fn new(
+ context: &'ctx mut AudioUnitContext,
+ user_ptr: *mut c_void,
+ data_callback: ffi::cubeb_data_callback,
+ state_callback: ffi::cubeb_state_callback,
+ latency_frames: u32,
+ ) -> Self {
+ let output_callback_timing_data =
+ triple_buffer::TripleBuffer::new(OutputCallbackTimingData {
+ frames_queued: 0,
+ timestamp: 0,
+ buffer_size: 0,
+ });
+ let (output_callback_timing_data_write, output_callback_timing_data_read) =
+ output_callback_timing_data.split();
+ AudioUnitStream {
+ context,
+ user_ptr,
+ queue: Queue::new(DISPATCH_QUEUE_LABEL),
+ data_callback,
+ state_callback,
+ device_changed_callback: Mutex::new(None),
+ frames_queued: 0,
+ frames_read: AtomicUsize::new(0),
+ frames_written: AtomicUsize::new(0),
+ stopped: AtomicBool::new(true),
+ draining: AtomicBool::new(false),
+ reinit_pending: AtomicBool::new(false),
+ destroy_pending: AtomicBool::new(false),
+ latency_frames,
+ output_device_latency_frames: AtomicU32::new(0),
+ input_device_latency_frames: AtomicU32::new(0),
+ total_output_latency_frames: AtomicU32::new(0),
+ total_input_latency_frames: AtomicU32::new(0),
+ output_callback_timing_data_write,
+ output_callback_timing_data_read,
+ prev_position: 0,
+ switching_device: AtomicBool::new(false),
+ core_stream_data: CoreStreamData::default(),
+ }
+ }
+
+ fn add_device_listener(&self, listener: &device_property_listener) -> OSStatus {
+ self.queue.debug_assert_is_current();
+ audio_object_add_property_listener(
+ listener.device,
+ &listener.property,
+ listener.listener,
+ self as *const Self as *mut c_void,
+ )
+ }
+
+ fn remove_device_listener(&self, listener: &device_property_listener) -> OSStatus {
+ self.queue.debug_assert_is_current();
+ audio_object_remove_property_listener(
+ listener.device,
+ &listener.property,
+ listener.listener,
+ self as *const Self as *mut c_void,
+ )
+ }
+
+ fn notify_state_changed(&self, state: State) {
+ if self.state_callback.is_none() {
+ return;
+ }
+ let callback = self.state_callback.unwrap();
+ unsafe {
+ callback(
+ self as *const AudioUnitStream as *mut ffi::cubeb_stream,
+ self.user_ptr,
+ state.into(),
+ );
+ }
+ }
+
+ fn reinit(&mut self) -> Result<()> {
+ self.queue.debug_assert_is_current();
+ // Call stop_audiounits to avoid potential data race. If there is a running data callback,
+ // which locks a mutex inside CoreAudio framework, then this call will block the current
+ // thread until the callback is finished since this call asks to lock a mutex inside
+ // CoreAudio framework that is used by the data callback.
+ if !self.stopped.load(Ordering::SeqCst) {
+ self.core_stream_data.stop_audiounits();
+ }
+
+ debug_assert!(
+ !self.core_stream_data.input_unit.is_null()
+ || !self.core_stream_data.output_unit.is_null()
+ );
+ let vol_rv = if self.core_stream_data.output_unit.is_null() {
+ Err(Error::error())
+ } else {
+ get_volume(self.core_stream_data.output_unit)
+ };
+
+ self.core_stream_data.close();
+
+ // Use the new default device if this stream was set to follow the output device.
+ if self.core_stream_data.has_output()
+ && self
+ .core_stream_data
+ .output_device
+ .flags
+ .contains(device_flags::DEV_SELECTED_DEFAULT)
+ {
+ self.core_stream_data.output_device =
+ match create_device_info(kAudioObjectUnknown, DeviceType::OUTPUT) {
+ None => {
+ cubeb_log!("Fail to create device info for output");
+ return Err(Error::error());
+ }
+ Some(d) => d,
+ };
+ }
+
+ // Likewise, for the input side
+ if self.core_stream_data.has_input()
+ && self
+ .core_stream_data
+ .input_device
+ .flags
+ .contains(device_flags::DEV_SELECTED_DEFAULT)
+ {
+ self.core_stream_data.input_device =
+ match create_device_info(kAudioObjectUnknown, DeviceType::INPUT) {
+ None => {
+ cubeb_log!("Fail to create device info for input");
+ return Err(Error::error());
+ }
+ Some(d) => d,
+ }
+ }
+
+ self.core_stream_data.setup().map_err(|e| {
+ cubeb_log!("({:p}) Setup failed.", self.core_stream_data.stm_ptr);
+ e
+ })?;
+
+ if let Ok(volume) = vol_rv {
+ set_volume(self.core_stream_data.output_unit, volume);
+ }
+
+ // If the stream was running, start it again.
+ if !self.stopped.load(Ordering::SeqCst) {
+ self.core_stream_data.start_audiounits().map_err(|e| {
+ cubeb_log!(
+ "({:p}) Start audiounit failed.",
+ self.core_stream_data.stm_ptr
+ );
+ e
+ })?;
+ }
+
+ Ok(())
+ }
+
+ fn reinit_async(&mut self) {
+ if self.reinit_pending.swap(true, Ordering::SeqCst) {
+ // A reinit task is already pending, nothing more to do.
+ cubeb_log!(
+ "({:p}) re-init stream task already pending, cancelling request",
+ self as *const AudioUnitStream
+ );
+ return;
+ }
+
+ let queue = self.queue.clone();
+ // Use a new thread, through the queue, to avoid deadlock when calling
+ // Get/SetProperties method from inside notify callback
+ queue.run_async(move || {
+ let stm_ptr = self as *const AudioUnitStream;
+ if self.destroy_pending.load(Ordering::SeqCst) {
+ cubeb_log!(
+ "({:p}) stream pending destroy, cancelling reinit task",
+ stm_ptr
+ );
+ return;
+ }
+
+ if self.reinit().is_err() {
+ self.core_stream_data.close();
+ self.notify_state_changed(State::Error);
+ cubeb_log!(
+ "({:p}) Could not reopen the stream after switching.",
+ stm_ptr
+ );
+ }
+ self.switching_device.store(false, Ordering::SeqCst);
+ self.reinit_pending.store(false, Ordering::SeqCst);
+ });
+ }
+
+ fn close_on_error(&mut self) {
+ self.queue.debug_assert_is_current();
+ let stm_ptr = self as *const AudioUnitStream;
+
+ self.core_stream_data.close();
+ self.notify_state_changed(State::Error);
+ cubeb_log!("({:p}) Close the stream due to an error.", stm_ptr);
+
+ self.switching_device.store(false, Ordering::SeqCst);
+ }
+
+ fn destroy_internal(&mut self) {
+ self.queue.debug_assert_is_current();
+ self.core_stream_data.close();
+ assert!(self.context.active_streams() >= 1);
+ self.context.update_latency_by_removing_stream();
+ }
+
+ fn destroy(&mut self) {
+ self.queue.debug_assert_is_current();
+ if self
+ .core_stream_data
+ .uninstall_system_changed_callback()
+ .is_err()
+ {
+ cubeb_log!(
+ "({:p}) Could not uninstall the system changed callback",
+ self as *const AudioUnitStream
+ );
+ }
+
+ if self
+ .core_stream_data
+ .uninstall_device_changed_callback()
+ .is_err()
+ {
+ cubeb_log!(
+ "({:p}) Could not uninstall all device change listeners",
+ self as *const AudioUnitStream
+ );
+ }
+
+ // Execute the stream destroy work.
+ self.destroy_pending.store(true, Ordering::SeqCst);
+
+ // Call stop_audiounits to avoid potential data race. If there is a running data callback,
+ // which locks a mutex inside CoreAudio framework, then this call will block the current
+ // thread until the callback is finished since this call asks to lock a mutex inside
+ // CoreAudio framework that is used by the data callback.
+ if !self.stopped.load(Ordering::SeqCst) {
+ self.core_stream_data.stop_audiounits();
+ self.stopped.store(true, Ordering::SeqCst);
+ }
+
+ self.destroy_internal();
+
+ cubeb_log!(
+ "Cubeb stream ({:p}) destroyed successful.",
+ self as *const AudioUnitStream
+ );
+ }
+}
+
+impl<'ctx> Drop for AudioUnitStream<'ctx> {
+ fn drop(&mut self) {
+ // Execute destroy in serial queue to avoid collision with reinit when un/plug devices
+ self.queue.clone().run_final(move || {
+ self.destroy();
+ self.core_stream_data = CoreStreamData::default();
+ });
+ }
+}
+
+impl<'ctx> StreamOps for AudioUnitStream<'ctx> {
+ fn start(&mut self) -> Result<()> {
+ self.stopped.store(false, Ordering::SeqCst);
+ self.draining.store(false, Ordering::SeqCst);
+
+ // Execute start in serial queue to avoid racing with destroy or reinit.
+ let mut result = Err(Error::error());
+ let started = &mut result;
+ let stream = &self;
+ self.queue.run_sync(move || {
+ *started = stream.core_stream_data.start_audiounits();
+ });
+
+ result?;
+
+ self.notify_state_changed(State::Started);
+
+ cubeb_log!(
+ "Cubeb stream ({:p}) started successfully.",
+ self as *const AudioUnitStream
+ );
+ Ok(())
+ }
+ fn stop(&mut self) -> Result<()> {
+ self.stopped.store(true, Ordering::SeqCst);
+
+ // Execute stop in serial queue to avoid racing with destroy or reinit.
+ let stream = &self;
+ self.queue.run_sync(move || {
+ stream.core_stream_data.stop_audiounits();
+ });
+
+ self.notify_state_changed(State::Stopped);
+
+ cubeb_log!(
+ "Cubeb stream ({:p}) stopped successfully.",
+ self as *const AudioUnitStream
+ );
+ Ok(())
+ }
+ fn position(&mut self) -> Result<u64> {
+ let OutputCallbackTimingData {
+ frames_queued,
+ timestamp,
+ buffer_size,
+ } = self.output_callback_timing_data_read.read().clone();
+ let total_output_latency_frames =
+ u64::from(self.total_output_latency_frames.load(Ordering::SeqCst));
+ // If output latency is available, take it into account. Otherwise, use the number of
+ // frames played.
+ let position = if total_output_latency_frames != 0 {
+ if total_output_latency_frames > frames_queued {
+ 0
+ } else {
+ // Interpolate here to match other cubeb backends. Only return an interpolated time
+ // if we've played enough frames. If the stream is paused, clamp the interpolated
+ // number of frames to the buffer size.
+ const NS2S: u64 = 1_000_000_000;
+ let now = unsafe { mach_absolute_time() };
+ let diff = now - timestamp;
+ let interpolated_frames = cmp::min(
+ host_time_to_ns(diff)
+ * self.core_stream_data.output_stream_params.rate() as u64
+ / NS2S,
+ buffer_size,
+ );
+ (frames_queued - total_output_latency_frames) + interpolated_frames
+ }
+ } else {
+ frames_queued
+ };
+
+ // Ensure mononicity of the clock even when changing output device.
+ if position > self.prev_position {
+ self.prev_position = position;
+ }
+ Ok(self.prev_position)
+ }
+ #[cfg(target_os = "ios")]
+ fn latency(&mut self) -> Result<u32> {
+ Err(not_supported())
+ }
+ #[cfg(not(target_os = "ios"))]
+ fn latency(&mut self) -> Result<u32> {
+ Ok(self.total_output_latency_frames.load(Ordering::SeqCst))
+ }
+ #[cfg(target_os = "ios")]
+ fn input_latency(&mut self) -> Result<u32> {
+ Err(not_supported())
+ }
+ #[cfg(not(target_os = "ios"))]
+ fn input_latency(&mut self) -> Result<u32> {
+ let user_rate = self.core_stream_data.input_stream_params.rate();
+ let hw_rate = self.core_stream_data.input_dev_desc.mSampleRate as u32;
+ let frames = self.total_input_latency_frames.load(Ordering::SeqCst);
+ if frames != 0 {
+ if hw_rate == user_rate {
+ Ok(frames)
+ } else {
+ Ok((frames * user_rate) / hw_rate)
+ }
+ } else {
+ Err(Error::error())
+ }
+ }
+ fn set_volume(&mut self, volume: f32) -> Result<()> {
+ // Execute set_volume in serial queue to avoid racing with destroy or reinit.
+ let mut result = Err(Error::error());
+ let set = &mut result;
+ let stream = &self;
+ self.queue.run_sync(move || {
+ *set = set_volume(stream.core_stream_data.output_unit, volume);
+ });
+
+ result?;
+
+ cubeb_log!(
+ "Cubeb stream ({:p}) set volume to {}.",
+ self as *const AudioUnitStream,
+ volume
+ );
+ Ok(())
+ }
+ fn set_name(&mut self, _: &CStr) -> Result<()> {
+ Err(Error::not_supported())
+ }
+ fn current_device(&mut self) -> Result<&DeviceRef> {
+ Err(Error::not_supported())
+ }
+ fn set_input_mute(&mut self, mute: bool) -> Result<()> {
+ if self.core_stream_data.input_unit.is_null() {
+ return Err(Error::invalid_parameter());
+ }
+
+ if !self.core_stream_data.using_voice_processing_unit() {
+ return Err(Error::error());
+ }
+
+ // Execute set_input_mute in serial queue to avoid racing with destroy or reinit.
+ let mut result = Err(Error::error());
+ let set = &mut result;
+ let stream = &self;
+ self.queue.run_sync(move || {
+ *set = set_input_mute(stream.core_stream_data.input_unit, mute);
+ });
+
+ result?;
+
+ cubeb_log!(
+ "Cubeb stream ({:p}) set input mute to {}.",
+ self as *const AudioUnitStream,
+ mute
+ );
+ self.core_stream_data.input_mute = mute;
+ Ok(())
+ }
+ fn set_input_processing_params(&mut self, params: InputProcessingParams) -> Result<()> {
+ // CUBEB_ERROR_INVALID_PARAMETER if a given param is not supported by
+ // this backend, or if this stream does not have an input device
+ if self.core_stream_data.input_unit.is_null() {
+ return Err(Error::invalid_parameter());
+ }
+
+ if self
+ .context
+ .supported_input_processing_params()
+ .unwrap()
+ .intersection(params)
+ != params
+ {
+ return Err(Error::invalid_parameter());
+ }
+
+ // CUBEB_ERROR if params could not be applied
+ // note: only works with VoiceProcessingIO
+ if !self.core_stream_data.using_voice_processing_unit() {
+ return Err(Error::error());
+ }
+
+ // Execute set_input_processing_params in serial queue to avoid racing with destroy or reinit.
+ let mut result = Err(Error::error());
+ let set = &mut result;
+ let stream = &self;
+ self.queue.run_sync(move || {
+ *set = set_input_processing_params(stream.core_stream_data.input_unit, params);
+ });
+
+ result?;
+
+ cubeb_log!(
+ "Cubeb stream ({:p}) set input processing params to {:?}.",
+ self as *const AudioUnitStream,
+ params
+ );
+ self.core_stream_data.input_processing_params = params;
+ Ok(())
+ }
+ #[cfg(target_os = "ios")]
+ fn device_destroy(&mut self, device: &DeviceRef) -> Result<()> {
+ Err(not_supported())
+ }
+ #[cfg(not(target_os = "ios"))]
+ fn device_destroy(&mut self, device: &DeviceRef) -> Result<()> {
+ if device.as_ptr().is_null() {
+ Err(Error::error())
+ } else {
+ unsafe {
+ let mut dev: Box<ffi::cubeb_device> = Box::from_raw(device.as_ptr() as *mut _);
+ if !dev.output_name.is_null() {
+ let _ = CString::from_raw(dev.output_name as *mut _);
+ dev.output_name = ptr::null_mut();
+ }
+ if !dev.input_name.is_null() {
+ let _ = CString::from_raw(dev.input_name as *mut _);
+ dev.input_name = ptr::null_mut();
+ }
+ drop(dev);
+ }
+ Ok(())
+ }
+ }
+ fn register_device_changed_callback(
+ &mut self,
+ device_changed_callback: ffi::cubeb_device_changed_callback,
+ ) -> Result<()> {
+ let mut callback = self.device_changed_callback.lock().unwrap();
+ // Note: second register without unregister first causes 'nope' error.
+ // Current implementation requires unregister before register a new cb.
+ if device_changed_callback.is_some() && callback.is_some() {
+ Err(Error::invalid_parameter())
+ } else {
+ *callback = device_changed_callback;
+ Ok(())
+ }
+ }
+}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<'ctx> Send for AudioUnitStream<'ctx> {}
+unsafe impl<'ctx> Sync for AudioUnitStream<'ctx> {}
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/resampler.rs b/third_party/rust/cubeb-coreaudio/src/backend/resampler.rs
new file mode 100644
index 0000000000..b72fc6310b
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/resampler.rs
@@ -0,0 +1,84 @@
+use super::auto_release::*;
+use cubeb_backend::ffi;
+use std::os::raw::{c_long, c_uint, c_void};
+use std::ptr;
+
+#[derive(Debug)]
+pub struct Resampler(AutoRelease<ffi::cubeb_resampler>);
+
+impl Resampler {
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ stream: *mut ffi::cubeb_stream,
+ mut input_params: Option<ffi::cubeb_stream_params>,
+ mut output_params: Option<ffi::cubeb_stream_params>,
+ target_rate: c_uint,
+ data_callback: ffi::cubeb_data_callback,
+ user_ptr: *mut c_void,
+ quality: ffi::cubeb_resampler_quality,
+ reclock: ffi::cubeb_resampler_reclock,
+ ) -> Self {
+ let raw_resampler = unsafe {
+ let in_params = match &mut input_params {
+ Some(p) => p,
+ None => ptr::null_mut(),
+ };
+ let out_params = match &mut output_params {
+ Some(p) => p,
+ None => ptr::null_mut(),
+ };
+ ffi::cubeb_resampler_create(
+ stream,
+ in_params,
+ out_params,
+ target_rate,
+ data_callback,
+ user_ptr,
+ quality,
+ reclock,
+ )
+ };
+ assert!(!raw_resampler.is_null(), "Failed to create resampler");
+ let resampler = AutoRelease::new(raw_resampler, ffi::cubeb_resampler_destroy);
+ Self(resampler)
+ }
+
+ pub fn fill(
+ &mut self,
+ input_buffer: *mut c_void,
+ input_frame_count: *mut c_long,
+ output_buffer: *mut c_void,
+ output_frames_needed: c_long,
+ ) -> c_long {
+ unsafe {
+ ffi::cubeb_resampler_fill(
+ self.0.as_mut(),
+ input_buffer,
+ input_frame_count,
+ output_buffer,
+ output_frames_needed,
+ )
+ }
+ }
+
+ pub fn destroy(&mut self) {
+ if !self.0.as_ptr().is_null() {
+ self.0.reset(ptr::null_mut());
+ }
+ }
+}
+
+impl Drop for Resampler {
+ fn drop(&mut self) {
+ self.destroy();
+ }
+}
+
+impl Default for Resampler {
+ fn default() -> Self {
+ Self(AutoRelease::new(
+ ptr::null_mut(),
+ ffi::cubeb_resampler_destroy,
+ ))
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/aggregate_device.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/aggregate_device.rs
new file mode 100644
index 0000000000..1d3c341ae8
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/aggregate_device.rs
@@ -0,0 +1,400 @@
+use super::utils::{
+ test_get_all_devices, test_get_all_onwed_devices, test_get_default_device,
+ test_get_drift_compensations, test_get_master_device, DeviceFilter, Scope,
+};
+use super::*;
+
+// AggregateDevice::set_sub_devices
+// ------------------------------------
+#[test]
+#[should_panic]
+fn test_aggregate_set_sub_devices_for_an_unknown_aggregate_device() {
+ // If aggregate device id is kAudioObjectUnknown, we are unable to set device list.
+ let default_input = test_get_default_device(Scope::Input);
+ let default_output = test_get_default_device(Scope::Output);
+ if default_input.is_none() || default_output.is_none() {
+ panic!("No input or output device.");
+ }
+
+ let default_input = default_input.unwrap();
+ let default_output = default_output.unwrap();
+ assert!(
+ AggregateDevice::set_sub_devices(kAudioObjectUnknown, default_input, default_output)
+ .is_err()
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_aggregate_set_sub_devices_for_unknown_devices() {
+ // If aggregate device id is kAudioObjectUnknown, we are unable to set device list.
+ assert!(AggregateDevice::set_sub_devices(
+ kAudioObjectUnknown,
+ kAudioObjectUnknown,
+ kAudioObjectUnknown
+ )
+ .is_err());
+}
+
+// AggregateDevice::get_sub_devices
+// ------------------------------------
+// You can check this by creating an aggregate device in `Audio MIDI Setup`
+// application and print out the sub devices of them!
+#[test]
+fn test_aggregate_get_sub_devices() {
+ let devices = test_get_all_devices(DeviceFilter::ExcludeCubebAggregateAndVPIO);
+ for device in devices {
+ // `AggregateDevice::get_sub_devices(device)` will return a single-element vector
+ // containing `device` itself if it's not an aggregate device. This test assumes devices
+ // is not an empty aggregate device (Test will panic when calling get_sub_devices with
+ // an empty aggregate device).
+ let sub_devices = AggregateDevice::get_sub_devices(device).unwrap();
+ // TODO: If the device is a blank aggregate device, then the assertion fails!
+ assert!(!sub_devices.is_empty());
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_aggregate_get_sub_devices_for_a_unknown_device() {
+ let devices = AggregateDevice::get_sub_devices(kAudioObjectUnknown).unwrap();
+ assert!(devices.is_empty());
+}
+
+// AggregateDevice::set_master_device
+// ------------------------------------
+#[test]
+#[should_panic]
+fn test_aggregate_set_master_device_for_an_unknown_aggregate_device() {
+ assert!(AggregateDevice::set_master_device(kAudioObjectUnknown, kAudioObjectUnknown).is_err());
+}
+
+// AggregateDevice::activate_clock_drift_compensation
+// ------------------------------------
+#[test]
+#[should_panic]
+fn test_aggregate_activate_clock_drift_compensation_for_an_unknown_aggregate_device() {
+ assert!(AggregateDevice::activate_clock_drift_compensation(kAudioObjectUnknown).is_err());
+}
+
+// AggregateDevice::destroy_device
+// ------------------------------------
+#[test]
+#[should_panic]
+fn test_aggregate_destroy_device_for_unknown_plugin_and_aggregate_devices() {
+ assert!(AggregateDevice::destroy_device(kAudioObjectUnknown, kAudioObjectUnknown).is_err())
+}
+
+#[test]
+#[should_panic]
+fn test_aggregate_destroy_aggregate_device_for_a_unknown_aggregate_device() {
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ assert!(AggregateDevice::destroy_device(plugin, kAudioObjectUnknown).is_err());
+}
+
+// Default Ignored Tests
+// ================================================================================================
+// The following tests that calls `AggregateDevice::create_blank_device` are marked `ignore` by
+// default since the device-collection-changed callbacks will be fired upon
+// `AggregateDevice::create_blank_device` is called (it will plug a new device in system!).
+// Some tests rely on the device-collection-changed callbacks in a certain way. The callbacks
+// fired from a unexpected `AggregateDevice::create_blank_device` will break those tests.
+
+// AggregateDevice::create_blank_device_sync
+// ------------------------------------
+#[test]
+#[ignore]
+fn test_aggregate_create_blank_device() {
+ // TODO: Test this when there is no available devices.
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ let devices = test_get_all_devices(DeviceFilter::IncludeAll);
+ let device = devices.into_iter().find(|dev| dev == &device).unwrap();
+ let uid = get_device_global_uid(device).unwrap().into_string();
+ assert!(uid.contains(PRIVATE_AGGREGATE_DEVICE_NAME));
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+// AggregateDevice::get_sub_devices
+// ------------------------------------
+#[test]
+#[ignore]
+#[should_panic]
+fn test_aggregate_get_sub_devices_for_blank_aggregate_devices() {
+ // TODO: Test this when there is no available devices.
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ // There is no sub device in a blank aggregate device!
+ // AggregateDevice::get_sub_devices guarantees returning a non-empty devices vector, so
+ // the following call will panic!
+ let sub_devices = AggregateDevice::get_sub_devices(device).unwrap();
+ assert!(sub_devices.is_empty());
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+// AggregateDevice::set_sub_devices_sync
+// ------------------------------------
+#[test]
+#[ignore]
+fn test_aggregate_set_sub_devices() {
+ let input_device = test_get_default_device(Scope::Input);
+ let output_device = test_get_default_device(Scope::Output);
+ if input_device.is_none() || output_device.is_none() || input_device == output_device {
+ println!("No input or output device to create an aggregate device.");
+ return;
+ }
+
+ let input_device = input_device.unwrap();
+ let output_device = output_device.unwrap();
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ assert!(AggregateDevice::set_sub_devices_sync(device, input_device, output_device).is_ok());
+
+ let sub_devices = AggregateDevice::get_sub_devices(device).unwrap();
+ let input_sub_devices = AggregateDevice::get_sub_devices(input_device).unwrap();
+ let output_sub_devices = AggregateDevice::get_sub_devices(output_device).unwrap();
+
+ // TODO: There may be overlapping devices between input_sub_devices and output_sub_devices,
+ // but now AggregateDevice::set_sub_devices will add them directly.
+ assert_eq!(
+ sub_devices.len(),
+ input_sub_devices.len() + output_sub_devices.len()
+ );
+ for dev in &input_sub_devices {
+ assert!(sub_devices.contains(dev));
+ }
+ for dev in &output_sub_devices {
+ assert!(sub_devices.contains(dev));
+ }
+
+ let onwed_devices = test_get_all_onwed_devices(device);
+ let onwed_device_uids = get_device_uids(&onwed_devices);
+ let input_sub_device_uids = get_device_uids(&input_sub_devices);
+ let output_sub_device_uids = get_device_uids(&output_sub_devices);
+ for uid in &input_sub_device_uids {
+ assert!(onwed_device_uids.contains(uid));
+ }
+ for uid in &output_sub_device_uids {
+ assert!(onwed_device_uids.contains(uid));
+ }
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+#[test]
+#[ignore]
+#[should_panic]
+fn test_aggregate_set_sub_devices_for_unknown_input_devices() {
+ let output_device = test_get_default_device(Scope::Output);
+ if output_device.is_none() {
+ panic!("Need a output device for the test!");
+ }
+ let output_device = output_device.unwrap();
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+
+ assert!(AggregateDevice::set_sub_devices(device, kAudioObjectUnknown, output_device).is_err());
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+#[test]
+#[ignore]
+#[should_panic]
+fn test_aggregate_set_sub_devices_for_unknown_output_devices() {
+ let input_device = test_get_default_device(Scope::Input);
+ if input_device.is_none() {
+ panic!("Need a input device for the test!");
+ }
+ let input_device = input_device.unwrap();
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+
+ assert!(AggregateDevice::set_sub_devices(device, input_device, kAudioObjectUnknown).is_err());
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+fn get_device_uids(devices: &Vec<AudioObjectID>) -> Vec<String> {
+ devices
+ .iter()
+ .map(|device| get_device_global_uid(*device).unwrap().into_string())
+ .collect()
+}
+
+// AggregateDevice::set_master_device
+// ------------------------------------
+#[test]
+#[ignore]
+fn test_aggregate_set_master_device() {
+ let input_device = test_get_default_device(Scope::Input);
+ let output_device = test_get_default_device(Scope::Output);
+ if input_device.is_none() || output_device.is_none() || input_device == output_device {
+ println!("No input or output device to create an aggregate device.");
+ return;
+ }
+
+ let input_device = input_device.unwrap();
+ let output_device = output_device.unwrap();
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ assert!(AggregateDevice::set_sub_devices_sync(device, input_device, output_device).is_ok());
+ assert!(AggregateDevice::set_master_device(device, output_device).is_ok());
+
+ // Check if master is set to the first sub device of the default output device.
+ let first_output_sub_device_uid =
+ get_device_uid(AggregateDevice::get_sub_devices(device).unwrap()[0]);
+ let master_device_uid = test_get_master_device(device);
+ assert_eq!(first_output_sub_device_uid, master_device_uid);
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+#[test]
+#[ignore]
+fn test_aggregate_set_master_device_for_a_blank_aggregate_device() {
+ let output_device = test_get_default_device(Scope::Output);
+ if output_device.is_none() {
+ println!("No output device to test.");
+ return;
+ }
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ assert!(AggregateDevice::set_master_device(device, output_device.unwrap()).is_ok());
+
+ // TODO: it's really weird the aggregate device actually own nothing
+ // but its master device can be set successfully!
+ // The sub devices of this blank aggregate device (by `AggregateDevice::get_sub_devices`)
+ // and the own devices (by `test_get_all_onwed_devices`) is empty since the size returned
+ // from `audio_object_get_property_data_size` is 0.
+ // The CFStringRef of the master device returned from `test_get_master_device` is actually
+ // non-null.
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+fn get_device_uid(id: AudioObjectID) -> String {
+ get_device_global_uid(id).unwrap().into_string()
+}
+
+// AggregateDevice::activate_clock_drift_compensation
+// ------------------------------------
+#[test]
+#[ignore]
+fn test_aggregate_activate_clock_drift_compensation() {
+ let input_device = test_get_default_device(Scope::Input);
+ let output_device = test_get_default_device(Scope::Output);
+ if input_device.is_none() || output_device.is_none() || input_device == output_device {
+ println!("No input or output device to create an aggregate device.");
+ return;
+ }
+
+ let input_device = input_device.unwrap();
+ let output_device = output_device.unwrap();
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ assert!(AggregateDevice::set_sub_devices_sync(device, input_device, output_device).is_ok());
+ assert!(AggregateDevice::set_master_device(device, output_device).is_ok());
+ assert!(AggregateDevice::activate_clock_drift_compensation(device).is_ok());
+
+ // Check the compensations.
+ let devices = test_get_all_onwed_devices(device);
+ let compensations = get_drift_compensations(&devices);
+ assert!(!compensations.is_empty());
+ assert_eq!(devices.len(), compensations.len());
+
+ for (i, compensation) in compensations.iter().enumerate() {
+ assert_eq!(*compensation, if i == 0 { 0 } else { DRIFT_COMPENSATION });
+ }
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+#[test]
+#[ignore]
+fn test_aggregate_activate_clock_drift_compensation_for_an_aggregate_device_without_master_device()
+{
+ let input_device = test_get_default_device(Scope::Input);
+ let output_device = test_get_default_device(Scope::Output);
+ if input_device.is_none() || output_device.is_none() || input_device == output_device {
+ println!("No input or output device to create an aggregate device.");
+ return;
+ }
+
+ let input_device = input_device.unwrap();
+ let output_device = output_device.unwrap();
+
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ assert!(AggregateDevice::set_sub_devices_sync(device, input_device, output_device).is_ok());
+
+ // TODO: Is the master device the first output sub device by default if we
+ // don't set that ? Is it because we add the output sub device list
+ // before the input's one ? (See implementation of
+ // AggregateDevice::set_sub_devices).
+ let first_output_sub_device_uid =
+ get_device_uid(AggregateDevice::get_sub_devices(output_device).unwrap()[0]);
+ let master_device_uid = test_get_master_device(device);
+ assert_eq!(first_output_sub_device_uid, master_device_uid);
+
+ // Compensate the drift directly without setting master device.
+ assert!(AggregateDevice::activate_clock_drift_compensation(device).is_ok());
+
+ // Check the compensations.
+ let devices = test_get_all_onwed_devices(device);
+ let compensations = get_drift_compensations(&devices);
+ assert!(!compensations.is_empty());
+ assert_eq!(devices.len(), compensations.len());
+
+ for (i, compensation) in compensations.iter().enumerate() {
+ assert_eq!(*compensation, if i == 0 { 0 } else { DRIFT_COMPENSATION });
+ }
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+#[test]
+#[should_panic]
+#[ignore]
+fn test_aggregate_activate_clock_drift_compensation_for_a_blank_aggregate_device() {
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+
+ let sub_devices = AggregateDevice::get_sub_devices(device).unwrap();
+ assert!(sub_devices.is_empty());
+ let onwed_devices = test_get_all_onwed_devices(device);
+ assert!(onwed_devices.is_empty());
+
+ // Get a panic since no sub devices to be set compensation.
+ assert!(AggregateDevice::activate_clock_drift_compensation(device).is_err());
+
+ assert!(AggregateDevice::destroy_device(plugin, device).is_ok());
+}
+
+fn get_drift_compensations(devices: &Vec<AudioObjectID>) -> Vec<u32> {
+ assert!(!devices.is_empty());
+ let mut compensations = Vec::new();
+ for device in devices {
+ let compensation = test_get_drift_compensations(*device).unwrap();
+ compensations.push(compensation);
+ }
+
+ compensations
+}
+
+// AggregateDevice::destroy_device
+// ------------------------------------
+#[test]
+#[ignore]
+#[should_panic]
+fn test_aggregate_destroy_aggregate_device_for_a_unknown_plugin_device() {
+ let plugin = AggregateDevice::get_system_plugin_id().unwrap();
+ let device = AggregateDevice::create_blank_device_sync(plugin).unwrap();
+ assert!(AggregateDevice::destroy_device(kAudioObjectUnknown, device).is_err());
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/api.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/api.rs
new file mode 100644
index 0000000000..4cd86c094e
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/api.rs
@@ -0,0 +1,1663 @@
+use super::utils::{
+ test_audiounit_get_buffer_frame_size, test_audiounit_scope_is_enabled, test_create_audiounit,
+ test_device_channels_in_scope, test_device_in_scope, test_get_all_devices,
+ test_get_default_audiounit, test_get_default_device, test_get_default_raw_stream,
+ test_get_devices_in_scope, test_get_raw_context, ComponentSubType, DeviceFilter, PropertyScope,
+ Scope,
+};
+use super::*;
+
+// make_sized_audio_channel_layout
+// ------------------------------------
+#[test]
+fn test_make_sized_audio_channel_layout() {
+ for channels in 1..10 {
+ let size = mem::size_of::<AudioChannelLayout>()
+ + (channels - 1) * mem::size_of::<AudioChannelDescription>();
+ let _ = make_sized_audio_channel_layout(size);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_make_sized_audio_channel_layout_with_wrong_size() {
+ // let _ = make_sized_audio_channel_layout(0);
+ let one_channel_size = mem::size_of::<AudioChannelLayout>();
+ let padding_size = 10;
+ assert_ne!(mem::size_of::<AudioChannelDescription>(), padding_size);
+ let wrong_size = one_channel_size + padding_size;
+ let _ = make_sized_audio_channel_layout(wrong_size);
+}
+
+// active_streams
+// update_latency_by_adding_stream
+// update_latency_by_removing_stream
+// ------------------------------------
+#[test]
+fn test_increase_and_decrease_context_streams() {
+ use std::thread;
+ const STREAMS: u32 = 10;
+
+ let context = AudioUnitContext::new();
+ let context_ptr_value = &context as *const AudioUnitContext as usize;
+
+ let mut join_handles = vec![];
+ for i in 0..STREAMS {
+ join_handles.push(thread::spawn(move || {
+ let context = unsafe { &*(context_ptr_value as *const AudioUnitContext) };
+
+ context.update_latency_by_adding_stream(i)
+ }));
+ }
+ let mut latencies = vec![];
+ for handle in join_handles {
+ latencies.push(handle.join().unwrap());
+ }
+ assert_eq!(context.active_streams(), STREAMS);
+ check_streams(&context, STREAMS);
+
+ check_latency(&context, latencies[0]);
+ for i in 0..latencies.len() - 1 {
+ assert_eq!(latencies[i], latencies[i + 1]);
+ }
+
+ let mut join_handles = vec![];
+ for _ in 0..STREAMS {
+ join_handles.push(thread::spawn(move || {
+ let context = unsafe { &*(context_ptr_value as *const AudioUnitContext) };
+ context.update_latency_by_removing_stream();
+ }));
+ }
+ for handle in join_handles {
+ let _ = handle.join();
+ }
+ check_streams(&context, 0);
+
+ check_latency(&context, None);
+}
+
+fn check_streams(context: &AudioUnitContext, number: u32) {
+ let guard = context.latency_controller.lock().unwrap();
+ assert_eq!(guard.streams, number);
+}
+
+fn check_latency(context: &AudioUnitContext, latency: Option<u32>) {
+ let guard = context.latency_controller.lock().unwrap();
+ assert_eq!(guard.latency, latency);
+}
+
+// make_silent
+// ------------------------------------
+#[test]
+fn test_make_silent() {
+ let mut array = allocate_array::<u32>(10);
+ for data in array.iter_mut() {
+ *data = 0xFFFF;
+ }
+
+ let mut buffer = AudioBuffer::default();
+ buffer.mData = array.as_mut_ptr() as *mut c_void;
+ buffer.mDataByteSize = (array.len() * mem::size_of::<u32>()) as u32;
+ buffer.mNumberChannels = 1;
+
+ audiounit_make_silent(&mut buffer);
+ for data in array {
+ assert_eq!(data, 0);
+ }
+}
+
+// minimum_resampling_input_frames
+// ------------------------------------
+#[test]
+fn test_minimum_resampling_input_frames() {
+ let input_rate = 48000_f64;
+ let output_rate = 44100_f64;
+
+ let frames = 100;
+ let times = input_rate / output_rate;
+ let expected = (frames as f64 * times).ceil() as usize;
+
+ assert_eq!(
+ minimum_resampling_input_frames(input_rate, output_rate, frames),
+ expected
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_minimum_resampling_input_frames_zero_input_rate() {
+ minimum_resampling_input_frames(0_f64, 44100_f64, 1);
+}
+
+#[test]
+#[should_panic]
+fn test_minimum_resampling_input_frames_zero_output_rate() {
+ minimum_resampling_input_frames(48000_f64, 0_f64, 1);
+}
+
+#[test]
+fn test_minimum_resampling_input_frames_equal_input_output_rate() {
+ let frames = 100;
+ assert_eq!(
+ minimum_resampling_input_frames(44100_f64, 44100_f64, frames),
+ frames
+ );
+}
+
+// create_device_info
+// ------------------------------------
+#[test]
+fn test_create_device_info_from_unknown_input_device() {
+ if let Some(default_device_id) = test_get_default_device(Scope::Input) {
+ let default_device = create_device_info(kAudioObjectUnknown, DeviceType::INPUT).unwrap();
+ assert_eq!(default_device.id, default_device_id);
+ assert_eq!(
+ default_device.flags,
+ device_flags::DEV_INPUT | device_flags::DEV_SELECTED_DEFAULT
+ );
+ } else {
+ println!("No input device to perform test.");
+ }
+}
+
+#[test]
+fn test_create_device_info_from_unknown_output_device() {
+ if let Some(default_device_id) = test_get_default_device(Scope::Output) {
+ let default_device = create_device_info(kAudioObjectUnknown, DeviceType::OUTPUT).unwrap();
+ assert_eq!(default_device.id, default_device_id);
+ assert_eq!(
+ default_device.flags,
+ device_flags::DEV_OUTPUT | device_flags::DEV_SELECTED_DEFAULT
+ );
+ } else {
+ println!("No output device to perform test.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_set_device_info_to_system_input_device() {
+ let _device = create_device_info(kAudioObjectSystemObject, DeviceType::INPUT);
+}
+
+#[test]
+#[should_panic]
+fn test_set_device_info_to_system_output_device() {
+ let _device = create_device_info(kAudioObjectSystemObject, DeviceType::OUTPUT);
+}
+
+// FIXME: Is it ok to set input device to a nonexistent device ?
+#[ignore]
+#[test]
+#[should_panic]
+fn test_set_device_info_to_nonexistent_input_device() {
+ let nonexistent_id = std::u32::MAX;
+ let _device = create_device_info(nonexistent_id, DeviceType::INPUT);
+}
+
+// FIXME: Is it ok to set output device to a nonexistent device ?
+#[ignore]
+#[test]
+#[should_panic]
+fn test_set_device_info_to_nonexistent_output_device() {
+ let nonexistent_id = std::u32::MAX;
+ let _device = create_device_info(nonexistent_id, DeviceType::OUTPUT);
+}
+
+// add_listener (for default output device)
+// ------------------------------------
+#[test]
+fn test_add_listener_unknown_device() {
+ extern "C" fn callback(
+ _id: AudioObjectID,
+ _number_of_addresses: u32,
+ _addresses: *const AudioObjectPropertyAddress,
+ _data: *mut c_void,
+ ) -> OSStatus {
+ assert!(false, "Should not be called.");
+ kAudioHardwareUnspecifiedError as OSStatus
+ }
+
+ test_get_default_raw_stream(|stream| {
+ let listener = device_property_listener::new(
+ kAudioObjectUnknown,
+ get_property_address(
+ Property::HardwareDefaultOutputDevice,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ callback,
+ );
+ let mut res: OSStatus = 0;
+ stream
+ .queue
+ .run_sync(|| res = stream.add_device_listener(&listener));
+ assert_eq!(res, kAudioHardwareBadObjectError as OSStatus);
+ });
+}
+
+// remove_listener (for default output device)
+// ------------------------------------
+#[test]
+fn test_add_listener_then_remove_system_device() {
+ extern "C" fn callback(
+ _id: AudioObjectID,
+ _number_of_addresses: u32,
+ _addresses: *const AudioObjectPropertyAddress,
+ _data: *mut c_void,
+ ) -> OSStatus {
+ assert!(false, "Should not be called.");
+ kAudioHardwareUnspecifiedError as OSStatus
+ }
+
+ test_get_default_raw_stream(|stream| {
+ let listener = device_property_listener::new(
+ kAudioObjectSystemObject,
+ get_property_address(
+ Property::HardwareDefaultOutputDevice,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ callback,
+ );
+ let mut res: OSStatus = 0;
+ stream
+ .queue
+ .run_sync(|| res = stream.add_device_listener(&listener));
+ assert_eq!(res, NO_ERR);
+ stream
+ .queue
+ .run_sync(|| res = stream.remove_device_listener(&listener));
+ assert_eq!(res, NO_ERR);
+ });
+}
+
+#[test]
+fn test_remove_listener_without_adding_any_listener_before_system_device() {
+ extern "C" fn callback(
+ _id: AudioObjectID,
+ _number_of_addresses: u32,
+ _addresses: *const AudioObjectPropertyAddress,
+ _data: *mut c_void,
+ ) -> OSStatus {
+ assert!(false, "Should not be called.");
+ kAudioHardwareUnspecifiedError as OSStatus
+ }
+
+ test_get_default_raw_stream(|stream| {
+ let listener = device_property_listener::new(
+ kAudioObjectSystemObject,
+ get_property_address(
+ Property::HardwareDefaultOutputDevice,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ callback,
+ );
+ let mut res: OSStatus = 0;
+ stream
+ .queue
+ .run_sync(|| res = stream.remove_device_listener(&listener));
+ assert_eq!(res, NO_ERR);
+ });
+}
+
+#[test]
+fn test_remove_listener_unknown_device() {
+ extern "C" fn callback(
+ _id: AudioObjectID,
+ _number_of_addresses: u32,
+ _addresses: *const AudioObjectPropertyAddress,
+ _data: *mut c_void,
+ ) -> OSStatus {
+ assert!(false, "Should not be called.");
+ kAudioHardwareUnspecifiedError as OSStatus
+ }
+
+ test_get_default_raw_stream(|stream| {
+ let listener = device_property_listener::new(
+ kAudioObjectUnknown,
+ get_property_address(
+ Property::HardwareDefaultOutputDevice,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ),
+ callback,
+ );
+ let mut res: OSStatus = 0;
+ stream
+ .queue
+ .run_sync(|| res = stream.remove_device_listener(&listener));
+ assert_eq!(res, kAudioHardwareBadObjectError as OSStatus);
+ });
+}
+
+// get_default_device_id
+// ------------------------------------
+#[test]
+fn test_get_default_device_id() {
+ if test_get_default_device(Scope::Input).is_some() {
+ assert_ne!(
+ get_default_device_id(DeviceType::INPUT).unwrap(),
+ kAudioObjectUnknown,
+ );
+ }
+
+ if test_get_default_device(Scope::Output).is_some() {
+ assert_ne!(
+ get_default_device_id(DeviceType::OUTPUT).unwrap(),
+ kAudioObjectUnknown,
+ );
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_default_device_id_with_unknown_type() {
+ assert!(get_default_device_id(DeviceType::UNKNOWN).is_err());
+}
+
+#[test]
+#[should_panic]
+fn test_get_default_device_id_with_inout_type() {
+ assert!(get_default_device_id(DeviceType::INPUT | DeviceType::OUTPUT).is_err());
+}
+
+// convert_channel_layout
+// ------------------------------------
+#[test]
+fn test_convert_channel_layout() {
+ let pairs = [
+ (vec![kAudioObjectUnknown], vec![mixer::Channel::Silence]),
+ (
+ vec![kAudioChannelLabel_Mono],
+ vec![mixer::Channel::FrontCenter],
+ ),
+ (
+ vec![kAudioChannelLabel_Mono, kAudioChannelLabel_LFEScreen],
+ vec![mixer::Channel::FrontCenter, mixer::Channel::LowFrequency],
+ ),
+ (
+ vec![kAudioChannelLabel_Left, kAudioChannelLabel_Right],
+ vec![mixer::Channel::FrontLeft, mixer::Channel::FrontRight],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Unknown,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::Silence,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Unused,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::Silence,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_ForeignLanguage,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::Silence,
+ ],
+ ),
+ // The SMPTE layouts.
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_CenterSurround,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::BackCenter,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_CenterSurround,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::BackCenter,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_CenterSurround,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::BackCenter,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_CenterSurround,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::BackCenter,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LeftSurroundDirect,
+ kAudioChannelLabel_RightSurroundDirect,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::SideLeft,
+ mixer::Channel::SideRight,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LeftSurroundDirect,
+ kAudioChannelLabel_RightSurroundDirect,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::SideLeft,
+ mixer::Channel::SideRight,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LeftSurround,
+ kAudioChannelLabel_RightSurround,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::BackLeft,
+ mixer::Channel::BackRight,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LeftSurround,
+ kAudioChannelLabel_RightSurround,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::BackLeft,
+ mixer::Channel::BackRight,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_LeftSurroundDirect,
+ kAudioChannelLabel_RightSurroundDirect,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::SideLeft,
+ mixer::Channel::SideRight,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_LeftSurroundDirect,
+ kAudioChannelLabel_RightSurroundDirect,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::SideLeft,
+ mixer::Channel::SideRight,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LeftSurround,
+ kAudioChannelLabel_RightSurround,
+ kAudioChannelLabel_Center,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::BackLeft,
+ mixer::Channel::BackRight,
+ mixer::Channel::FrontCenter,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_LeftSurround,
+ kAudioChannelLabel_RightSurround,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_LFEScreen,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::BackLeft,
+ mixer::Channel::BackRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::LowFrequency,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_LFEScreen,
+ kAudioChannelLabel_CenterSurround,
+ kAudioChannelLabel_LeftSurroundDirect,
+ kAudioChannelLabel_RightSurroundDirect,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::LowFrequency,
+ mixer::Channel::BackCenter,
+ mixer::Channel::SideLeft,
+ mixer::Channel::SideRight,
+ ],
+ ),
+ (
+ vec![
+ kAudioChannelLabel_Left,
+ kAudioChannelLabel_Right,
+ kAudioChannelLabel_Center,
+ kAudioChannelLabel_LFEScreen,
+ kAudioChannelLabel_LeftSurround,
+ kAudioChannelLabel_RightSurround,
+ kAudioChannelLabel_LeftSurroundDirect,
+ kAudioChannelLabel_RightSurroundDirect,
+ ],
+ vec![
+ mixer::Channel::FrontLeft,
+ mixer::Channel::FrontRight,
+ mixer::Channel::FrontCenter,
+ mixer::Channel::LowFrequency,
+ mixer::Channel::BackLeft,
+ mixer::Channel::BackRight,
+ mixer::Channel::SideLeft,
+ mixer::Channel::SideRight,
+ ],
+ ),
+ ];
+
+ const MAX_CHANNELS: usize = 10;
+ // A Rust mapping structure of the AudioChannelLayout with MAX_CHANNELS channels
+ // https://github.com/phracker/MacOSX-SDKs/blob/master/MacOSX10.13.sdk/System/Library/Frameworks/CoreAudio.framework/Versions/A/Headers/CoreAudioTypes.h#L1332
+ #[repr(C)]
+ struct TestLayout {
+ tag: AudioChannelLayoutTag,
+ map: AudioChannelBitmap,
+ number_channel_descriptions: UInt32,
+ channel_descriptions: [AudioChannelDescription; MAX_CHANNELS],
+ }
+
+ impl Default for TestLayout {
+ fn default() -> Self {
+ Self {
+ tag: AudioChannelLayoutTag::default(),
+ map: AudioChannelBitmap::default(),
+ number_channel_descriptions: UInt32::default(),
+ channel_descriptions: [AudioChannelDescription::default(); MAX_CHANNELS],
+ }
+ }
+ }
+
+ let mut layout = TestLayout::default();
+ layout.tag = kAudioChannelLayoutTag_UseChannelDescriptions;
+
+ for (labels, expected_layout) in pairs.iter() {
+ assert!(labels.len() <= MAX_CHANNELS);
+ layout.number_channel_descriptions = labels.len() as u32;
+ for (idx, label) in labels.iter().enumerate() {
+ layout.channel_descriptions[idx].mChannelLabel = *label;
+ }
+ let layout_ref = unsafe { &(*(&layout as *const TestLayout as *const AudioChannelLayout)) };
+ assert_eq!(
+ &audiounit_convert_channel_layout(layout_ref).unwrap(),
+ expected_layout
+ );
+ }
+}
+
+// get_preferred_channel_layout
+// ------------------------------------
+#[test]
+fn test_get_preferred_channel_layout_output() {
+ match test_get_default_audiounit(Scope::Output) {
+ Some(unit) => assert!(!audiounit_get_preferred_channel_layout(unit.get_inner())
+ .unwrap()
+ .is_empty()),
+ None => println!("No output audiounit for test."),
+ }
+}
+
+// get_current_channel_layout
+// ------------------------------------
+#[test]
+fn test_get_current_channel_layout_output() {
+ match test_get_default_audiounit(Scope::Output) {
+ Some(unit) => assert!(!audiounit_get_current_channel_layout(unit.get_inner())
+ .unwrap()
+ .is_empty()),
+ None => println!("No output audiounit for test."),
+ }
+}
+
+// create_stream_description
+// ------------------------------------
+#[test]
+fn test_create_stream_description() {
+ let mut channels = 0;
+ for (bits, format, flags) in [
+ (
+ 16_u32,
+ ffi::CUBEB_SAMPLE_S16LE,
+ kAudioFormatFlagIsSignedInteger,
+ ),
+ (
+ 16_u32,
+ ffi::CUBEB_SAMPLE_S16BE,
+ kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian,
+ ),
+ (32_u32, ffi::CUBEB_SAMPLE_FLOAT32LE, kAudioFormatFlagIsFloat),
+ (
+ 32_u32,
+ ffi::CUBEB_SAMPLE_FLOAT32BE,
+ kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian,
+ ),
+ ]
+ .iter()
+ {
+ let bytes = bits / 8;
+ channels += 1;
+
+ let mut raw = ffi::cubeb_stream_params::default();
+ raw.format = *format;
+ raw.rate = 48_000;
+ raw.channels = channels;
+ raw.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ raw.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+ let params = StreamParams::from(raw);
+ let description = create_stream_description(&params).unwrap();
+ assert_eq!(description.mFormatID, kAudioFormatLinearPCM);
+ assert_eq!(
+ description.mFormatFlags,
+ flags | kLinearPCMFormatFlagIsPacked
+ );
+ assert_eq!(description.mSampleRate as u32, raw.rate);
+ assert_eq!(description.mChannelsPerFrame, raw.channels);
+ assert_eq!(description.mBytesPerFrame, bytes * raw.channels);
+ assert_eq!(description.mFramesPerPacket, 1);
+ assert_eq!(description.mBytesPerPacket, bytes * raw.channels);
+ assert_eq!(description.mReserved, 0);
+ }
+}
+
+// create_blank_audiounit
+// ------------------------------------
+#[test]
+fn test_create_blank_audiounit() {
+ let unit = create_blank_audiounit().unwrap();
+ assert!(!unit.is_null());
+ // Destroy the AudioUnit
+ unsafe {
+ AudioUnitUninitialize(unit);
+ AudioComponentInstanceDispose(unit);
+ }
+}
+
+// enable_audiounit_scope
+// ------------------------------------
+#[test]
+fn test_enable_audiounit_scope() {
+ // It's ok to enable and disable the scopes of input or output
+ // for the unit whose subtype is kAudioUnitSubType_HALOutput
+ // even when there is no available input or output devices.
+ if let Some(unit) = test_create_audiounit(ComponentSubType::HALOutput) {
+ assert!(enable_audiounit_scope(unit.get_inner(), DeviceType::OUTPUT, true).is_ok());
+ assert!(enable_audiounit_scope(unit.get_inner(), DeviceType::OUTPUT, false).is_ok());
+ assert!(enable_audiounit_scope(unit.get_inner(), DeviceType::INPUT, true).is_ok());
+ assert!(enable_audiounit_scope(unit.get_inner(), DeviceType::INPUT, false).is_ok());
+ } else {
+ println!("No audiounit to perform test.");
+ }
+}
+
+#[test]
+fn test_enable_audiounit_scope_for_default_output_unit() {
+ if let Some(unit) = test_create_audiounit(ComponentSubType::DefaultOutput) {
+ assert_eq!(
+ enable_audiounit_scope(unit.get_inner(), DeviceType::OUTPUT, true).unwrap_err(),
+ kAudioUnitErr_InvalidProperty
+ );
+ assert_eq!(
+ enable_audiounit_scope(unit.get_inner(), DeviceType::OUTPUT, false).unwrap_err(),
+ kAudioUnitErr_InvalidProperty
+ );
+ assert_eq!(
+ enable_audiounit_scope(unit.get_inner(), DeviceType::INPUT, true).unwrap_err(),
+ kAudioUnitErr_InvalidProperty
+ );
+ assert_eq!(
+ enable_audiounit_scope(unit.get_inner(), DeviceType::INPUT, false).unwrap_err(),
+ kAudioUnitErr_InvalidProperty
+ );
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_enable_audiounit_scope_with_null_unit() {
+ let unit: AudioUnit = ptr::null_mut();
+ assert!(enable_audiounit_scope(unit, DeviceType::INPUT, false).is_err());
+}
+
+// create_audiounit
+// ------------------------------------
+#[test]
+fn test_for_create_audiounit() {
+ let flags_list = [device_flags::DEV_INPUT, device_flags::DEV_OUTPUT];
+
+ let default_input = test_get_default_device(Scope::Input);
+ let default_output = test_get_default_device(Scope::Output);
+
+ for flags in flags_list.iter() {
+ let mut device = device_info::default();
+ device.flags |= *flags;
+
+ // Check the output scope is enabled.
+ if device.flags.contains(device_flags::DEV_OUTPUT) && default_output.is_some() {
+ device.id = default_output.unwrap();
+ let unit = create_audiounit(&device).unwrap();
+ assert!(!unit.is_null());
+ assert!(test_audiounit_scope_is_enabled(unit, Scope::Output));
+
+ // Destroy the AudioUnit.
+ unsafe {
+ AudioUnitUninitialize(unit);
+ AudioComponentInstanceDispose(unit);
+ }
+ }
+
+ // Check the input scope is enabled.
+ if device.flags.contains(device_flags::DEV_INPUT) && default_input.is_some() {
+ let device_id = default_input.unwrap();
+ device.id = device_id;
+ let unit = create_audiounit(&device).unwrap();
+ assert!(!unit.is_null());
+ assert!(test_audiounit_scope_is_enabled(unit, Scope::Input));
+ // Destroy the AudioUnit.
+ unsafe {
+ AudioUnitUninitialize(unit);
+ AudioComponentInstanceDispose(unit);
+ }
+ }
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_create_audiounit_with_unknown_scope() {
+ let device = device_info::default();
+ let _unit = create_audiounit(&device);
+}
+
+// set_buffer_size_sync
+// ------------------------------------
+#[test]
+fn test_set_buffer_size_sync() {
+ test_set_buffer_size_by_scope(Scope::Input);
+ test_set_buffer_size_by_scope(Scope::Output);
+ fn test_set_buffer_size_by_scope(scope: Scope) {
+ let unit = test_get_default_audiounit(scope.clone());
+ if unit.is_none() {
+ println!("No audiounit for {:?}.", scope);
+ return;
+ }
+ let unit = unit.unwrap();
+ let prop_scope = match scope {
+ Scope::Input => PropertyScope::Output,
+ Scope::Output => PropertyScope::Input,
+ };
+ let mut buffer_frames = test_audiounit_get_buffer_frame_size(
+ unit.get_inner(),
+ scope.clone(),
+ prop_scope.clone(),
+ )
+ .unwrap();
+ assert_ne!(buffer_frames, 0);
+ buffer_frames *= 2;
+ assert!(
+ set_buffer_size_sync(unit.get_inner(), scope.clone().into(), buffer_frames).is_ok()
+ );
+ let new_buffer_frames =
+ test_audiounit_get_buffer_frame_size(unit.get_inner(), scope.clone(), prop_scope)
+ .unwrap();
+ assert_eq!(buffer_frames, new_buffer_frames);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_set_buffer_size_sync_for_input_with_null_input_unit() {
+ test_set_buffer_size_sync_by_scope_with_null_unit(Scope::Input);
+}
+
+#[test]
+#[should_panic]
+fn test_set_buffer_size_sync_for_output_with_null_output_unit() {
+ test_set_buffer_size_sync_by_scope_with_null_unit(Scope::Output);
+}
+
+fn test_set_buffer_size_sync_by_scope_with_null_unit(scope: Scope) {
+ let unit: AudioUnit = ptr::null_mut();
+ assert!(set_buffer_size_sync(unit, scope.into(), 2048).is_err());
+}
+
+// get_volume, set_volume
+// ------------------------------------
+#[test]
+fn test_stream_get_volume() {
+ if let Some(unit) = test_get_default_audiounit(Scope::Output) {
+ let expected_volume: f32 = 0.5;
+ set_volume(unit.get_inner(), expected_volume);
+ assert_eq!(expected_volume, get_volume(unit.get_inner()).unwrap());
+ } else {
+ println!("No output audiounit.");
+ }
+}
+
+// convert_uint32_into_string
+// ------------------------------------
+#[test]
+fn test_convert_uint32_into_string() {
+ let empty = convert_uint32_into_string(0);
+ assert_eq!(empty, CString::default());
+
+ let data: u32 = ('R' as u32) << 24 | ('U' as u32) << 16 | ('S' as u32) << 8 | 'T' as u32;
+ let data_string = convert_uint32_into_string(data);
+ assert_eq!(data_string, CString::new("RUST").unwrap());
+}
+
+// get_channel_count
+// ------------------------------------
+#[test]
+fn test_get_channel_count() {
+ test_channel_count(Scope::Input);
+ test_channel_count(Scope::Output);
+
+ fn test_channel_count(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ let channels = get_channel_count(device, DeviceType::from(scope.clone())).unwrap();
+ assert!(channels > 0);
+ assert_eq!(
+ channels,
+ test_device_channels_in_scope(device, scope).unwrap()
+ );
+ } else {
+ println!("No device for {:?}.", scope);
+ }
+ }
+}
+
+#[test]
+fn test_get_channel_count_of_input_for_a_output_only_deivce() {
+ let devices = test_get_devices_in_scope(Scope::Output);
+ for device in devices {
+ // Skip in-out devices.
+ if test_device_in_scope(device, Scope::Input) {
+ continue;
+ }
+ let count = get_channel_count(device, DeviceType::INPUT).unwrap();
+ assert_eq!(count, 0);
+ }
+}
+
+#[test]
+fn test_get_channel_count_of_output_for_a_input_only_deivce() {
+ let devices = test_get_devices_in_scope(Scope::Input);
+ for device in devices {
+ // Skip in-out devices.
+ if test_device_in_scope(device, Scope::Output) {
+ continue;
+ }
+ let count = get_channel_count(device, DeviceType::OUTPUT).unwrap();
+ assert_eq!(count, 0);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_channel_count_of_unknown_device() {
+ assert!(get_channel_count(kAudioObjectUnknown, DeviceType::OUTPUT).is_err());
+}
+
+#[test]
+fn test_get_channel_count_of_inout_type() {
+ test_channel_count(Scope::Input);
+ test_channel_count(Scope::Output);
+
+ fn test_channel_count(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ assert_eq!(
+ get_channel_count(device, DeviceType::INPUT | DeviceType::OUTPUT),
+ get_channel_count(device, DeviceType::INPUT).map(|c| c + get_channel_count(
+ device,
+ DeviceType::OUTPUT
+ )
+ .unwrap_or(0))
+ );
+ } else {
+ println!("No device for {:?}.", scope);
+ }
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_channel_count_of_unknwon_type() {
+ test_channel_count(Scope::Input);
+ test_channel_count(Scope::Output);
+
+ fn test_channel_count(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ assert!(get_channel_count(device, DeviceType::UNKNOWN).is_err());
+ } else {
+ panic!("Panic by default: No device for {:?}.", scope);
+ }
+ }
+}
+
+// get_range_of_sample_rates
+// ------------------------------------
+#[test]
+fn test_get_range_of_sample_rates() {
+ test_get_range_of_sample_rates_in_scope(Scope::Input);
+ test_get_range_of_sample_rates_in_scope(Scope::Output);
+
+ fn test_get_range_of_sample_rates_in_scope(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ let ranges = test_get_available_samplerate_of_device(device);
+ for range in ranges {
+ // Surprisingly, we can get the input/output sample rates from a non-input/non-output device.
+ check_samplerates(range);
+ }
+ } else {
+ println!("No device for {:?}.", scope);
+ }
+ }
+
+ fn test_get_available_samplerate_of_device(id: AudioObjectID) -> Vec<(f64, f64)> {
+ let scopes = [
+ DeviceType::INPUT,
+ DeviceType::OUTPUT,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ];
+ let mut ranges = Vec::new();
+ for scope in scopes.iter() {
+ ranges.push(get_range_of_sample_rates(id, *scope).unwrap());
+ }
+ ranges
+ }
+
+ fn check_samplerates((min, max): (f64, f64)) {
+ assert!(min > 0.0);
+ assert!(max > 0.0);
+ assert!(min <= max);
+ }
+}
+
+// get_presentation_latency
+// ------------------------------------
+#[test]
+fn test_get_device_presentation_latency() {
+ test_get_device_presentation_latencies_in_scope(Scope::Input);
+ test_get_device_presentation_latencies_in_scope(Scope::Output);
+
+ fn test_get_device_presentation_latencies_in_scope(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ // TODO: The latencies very from devices to devices. Check nothing here.
+ let latency = get_fixed_latency(device, scope.clone().into());
+ println!(
+ "present latency on the device {} in scope {:?}: {}",
+ device, scope, latency
+ );
+ } else {
+ println!("No device for {:?}.", scope);
+ }
+ }
+}
+
+// get_device_group_id
+// ------------------------------------
+#[test]
+fn test_get_device_group_id() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ match get_device_group_id(device, DeviceType::INPUT) {
+ Ok(id) => println!("input group id: {:?}", id),
+ Err(e) => println!("No input group id. Error: {}", e),
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ match get_device_group_id(device, DeviceType::OUTPUT) {
+ Ok(id) => println!("output group id: {:?}", id),
+ Err(e) => println!("No output group id. Error: {}", e),
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+fn test_get_same_group_id_for_builtin_device_pairs() {
+ use std::collections::HashMap;
+
+ // These device sources have custom group id. See `get_custom_group_id`.
+ const IMIC: u32 = 0x696D_6963; // "imic"
+ const ISPK: u32 = 0x6973_706B; // "ispk"
+ const EMIC: u32 = 0x656D_6963; // "emic"
+ const HDPN: u32 = 0x6864_706E; // "hdpn"
+ let pairs = [(IMIC, ISPK), (EMIC, HDPN)];
+
+ let mut input_group_ids = HashMap::<u32, String>::new();
+ let input_devices = test_get_devices_in_scope(Scope::Input);
+ for device in input_devices.iter() {
+ match get_device_source(*device, DeviceType::INPUT) {
+ Ok(source) => match get_device_group_id(*device, DeviceType::INPUT) {
+ Ok(id) => assert!(input_group_ids
+ .insert(source, id.into_string().unwrap())
+ .is_none()),
+ Err(e) => assert!(input_group_ids
+ .insert(source, format!("Error {}", e))
+ .is_none()),
+ },
+ _ => {} // do nothing when failing to get source.
+ }
+ }
+
+ let mut output_group_ids = HashMap::<u32, String>::new();
+ let output_devices = test_get_devices_in_scope(Scope::Output);
+ for device in output_devices.iter() {
+ match get_device_source(*device, DeviceType::OUTPUT) {
+ Ok(source) => match get_device_group_id(*device, DeviceType::OUTPUT) {
+ Ok(id) => assert!(output_group_ids
+ .insert(source, id.into_string().unwrap())
+ .is_none()),
+ Err(e) => assert!(output_group_ids
+ .insert(source, format!("Error {}", e))
+ .is_none()),
+ },
+ _ => {} // do nothing when failing to get source.
+ }
+ }
+
+ for (input, output) in pairs.iter() {
+ let input_group_id = input_group_ids.get(input);
+ let output_group_id = output_group_ids.get(output);
+
+ if input_group_id.is_some() && output_group_id.is_some() {
+ assert_eq!(input_group_id, output_group_id);
+ }
+
+ input_group_ids.remove(input);
+ output_group_ids.remove(output);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_group_id_by_unknown_device() {
+ assert!(get_device_group_id(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_label
+// ------------------------------------
+#[test]
+fn test_get_device_label() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let name = get_device_label(device, DeviceType::INPUT).unwrap();
+ println!("input device label: {}", name.into_string());
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let name = get_device_label(device, DeviceType::OUTPUT).unwrap();
+ println!("output device label: {}", name.into_string());
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_label_by_unknown_device() {
+ assert!(get_device_label(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_global_uid
+// ------------------------------------
+#[test]
+fn test_get_device_global_uid() {
+ // Input device.
+ if let Some(input) = test_get_default_device(Scope::Input) {
+ let uid = get_device_global_uid(input).unwrap();
+ let uid = uid.into_string();
+ assert!(!uid.is_empty());
+ }
+
+ // Output device.
+ if let Some(output) = test_get_default_device(Scope::Output) {
+ let uid = get_device_global_uid(output).unwrap();
+ let uid = uid.into_string();
+ assert!(!uid.is_empty());
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_global_uid_by_unknwon_device() {
+ // Unknown device.
+ assert!(get_device_global_uid(kAudioObjectUnknown).is_err());
+}
+
+// create_cubeb_device_info
+// destroy_cubeb_device_info
+// ------------------------------------
+#[test]
+fn test_create_cubeb_device_info() {
+ use std::collections::VecDeque;
+
+ test_create_device_from_hwdev_in_scope(Scope::Input);
+ test_create_device_from_hwdev_in_scope(Scope::Output);
+
+ fn test_create_device_from_hwdev_in_scope(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ let is_input = test_device_in_scope(device, Scope::Input);
+ let is_output = test_device_in_scope(device, Scope::Output);
+ let mut results = test_create_device_infos_by_device(device);
+ assert_eq!(results.len(), 2);
+ // Input device type:
+ let input_result = results.pop_front().unwrap();
+ if is_input {
+ let mut input_device_info = input_result.unwrap();
+ check_device_info_by_device(&input_device_info, device, Scope::Input);
+ destroy_cubeb_device_info(&mut input_device_info);
+ } else {
+ assert_eq!(input_result.unwrap_err(), Error::error());
+ }
+ // Output device type:
+ let output_result = results.pop_front().unwrap();
+ if is_output {
+ let mut output_device_info = output_result.unwrap();
+ check_device_info_by_device(&output_device_info, device, Scope::Output);
+ destroy_cubeb_device_info(&mut output_device_info);
+ } else {
+ assert_eq!(output_result.unwrap_err(), Error::error());
+ }
+ } else {
+ println!("No device for {:?}.", scope);
+ }
+ }
+
+ fn test_create_device_infos_by_device(
+ id: AudioObjectID,
+ ) -> VecDeque<std::result::Result<ffi::cubeb_device_info, Error>> {
+ let dev_types = [DeviceType::INPUT, DeviceType::OUTPUT];
+ let mut results = VecDeque::new();
+ for dev_type in dev_types.iter() {
+ results.push_back(create_cubeb_device_info(id, *dev_type));
+ }
+ results
+ }
+
+ fn check_device_info_by_device(info: &ffi::cubeb_device_info, id: AudioObjectID, scope: Scope) {
+ assert!(!info.devid.is_null());
+ assert!(mem::size_of_val(&info.devid) >= mem::size_of::<AudioObjectID>());
+ assert_eq!(info.devid as AudioObjectID, id);
+ assert!(!info.device_id.is_null());
+ assert!(!info.friendly_name.is_null());
+ assert!(!info.group_id.is_null());
+
+ // TODO: Hit a kAudioHardwareUnknownPropertyError for AirPods
+ // assert!(!info.vendor_name.is_null());
+
+ // FIXME: The device is defined to input-only or output-only, but some device is in-out!
+ assert_eq!(info.device_type, DeviceType::from(scope.clone()).bits());
+ assert_eq!(info.state, ffi::CUBEB_DEVICE_STATE_ENABLED);
+ // TODO: The preference is set when the device is default input/output device if the device
+ // info is created from input/output scope. Should the preference be set if the
+ // device is a default input/output device if the device info is created from
+ // output/input scope ? The device may be a in-out device!
+ assert_eq!(info.preferred, get_cubeb_device_pref(id, scope));
+
+ assert_eq!(info.format, ffi::CUBEB_DEVICE_FMT_ALL);
+ assert_eq!(info.default_format, ffi::CUBEB_DEVICE_FMT_F32NE);
+ assert!(info.max_channels > 0);
+ assert!(info.min_rate <= info.max_rate);
+ assert!(info.min_rate <= info.default_rate);
+ assert!(info.default_rate <= info.max_rate);
+
+ assert!(info.latency_lo > 0);
+ assert!(info.latency_hi > 0);
+ assert!(info.latency_lo <= info.latency_hi);
+
+ fn get_cubeb_device_pref(id: AudioObjectID, scope: Scope) -> ffi::cubeb_device_pref {
+ let default_device = test_get_default_device(scope);
+ if default_device.is_some() && default_device.unwrap() == id {
+ ffi::CUBEB_DEVICE_PREF_ALL
+ } else {
+ ffi::CUBEB_DEVICE_PREF_NONE
+ }
+ }
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_create_device_info_by_unknown_device() {
+ assert!(create_cubeb_device_info(kAudioObjectUnknown, DeviceType::OUTPUT).is_err());
+}
+
+#[test]
+fn test_create_device_info_with_unknown_type() {
+ test_create_device_info_with_unknown_type_by_scope(Scope::Input);
+ test_create_device_info_with_unknown_type_by_scope(Scope::Output);
+
+ fn test_create_device_info_with_unknown_type_by_scope(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ assert!(create_cubeb_device_info(device, DeviceType::UNKNOWN).is_err());
+ }
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_device_destroy_empty_device() {
+ let mut device = ffi::cubeb_device_info::default();
+
+ assert!(device.device_id.is_null());
+ assert!(device.group_id.is_null());
+ assert!(device.friendly_name.is_null());
+ assert!(device.vendor_name.is_null());
+
+ // `friendly_name` must be set.
+ destroy_cubeb_device_info(&mut device);
+
+ assert!(device.device_id.is_null());
+ assert!(device.group_id.is_null());
+ assert!(device.friendly_name.is_null());
+ assert!(device.vendor_name.is_null());
+}
+
+#[test]
+fn test_create_device_from_hwdev_with_inout_type() {
+ test_create_device_from_hwdev_with_inout_type_by_scope(Scope::Input);
+ test_create_device_from_hwdev_with_inout_type_by_scope(Scope::Output);
+
+ fn test_create_device_from_hwdev_with_inout_type_by_scope(scope: Scope) {
+ if let Some(device) = test_get_default_device(scope.clone()) {
+ // Get a kAudioHardwareUnknownPropertyError in get_channel_count actually.
+ assert!(
+ create_cubeb_device_info(device, DeviceType::INPUT | DeviceType::OUTPUT).is_err()
+ );
+ } else {
+ println!("No device for {:?}.", scope);
+ }
+ }
+}
+
+// get_devices_of_type
+// ------------------------------------
+#[test]
+fn test_get_devices_of_type() {
+ use std::collections::HashSet;
+
+ let all_devices = audiounit_get_devices_of_type(DeviceType::INPUT | DeviceType::OUTPUT);
+ let input_devices = audiounit_get_devices_of_type(DeviceType::INPUT);
+ let output_devices = audiounit_get_devices_of_type(DeviceType::OUTPUT);
+
+ let mut expected_all = test_get_all_devices(DeviceFilter::ExcludeCubebAggregateAndVPIO);
+ expected_all.sort();
+ assert_eq!(all_devices, expected_all);
+ for device in all_devices.iter() {
+ if test_device_in_scope(*device, Scope::Input) {
+ assert!(input_devices.contains(device));
+ }
+ if test_device_in_scope(*device, Scope::Output) {
+ assert!(output_devices.contains(device));
+ }
+ }
+
+ let input: HashSet<AudioObjectID> = input_devices.iter().cloned().collect();
+ let output: HashSet<AudioObjectID> = output_devices.iter().cloned().collect();
+ let union: HashSet<AudioObjectID> = input.union(&output).cloned().collect();
+ let mut union_devices: Vec<AudioObjectID> = union.iter().cloned().collect();
+ union_devices.sort();
+ assert_eq!(all_devices, union_devices);
+}
+
+#[test]
+#[should_panic]
+fn test_get_devices_of_type_unknown() {
+ let no_devs = audiounit_get_devices_of_type(DeviceType::UNKNOWN);
+ assert!(no_devs.is_empty());
+}
+
+// add_devices_changed_listener
+// ------------------------------------
+#[test]
+fn test_add_devices_changed_listener() {
+ use std::collections::HashMap;
+
+ extern "C" fn inout_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ extern "C" fn in_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ extern "C" fn out_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+
+ let mut map: HashMap<DeviceType, extern "C" fn(*mut ffi::cubeb, *mut c_void)> = HashMap::new();
+ map.insert(DeviceType::INPUT, in_callback);
+ map.insert(DeviceType::OUTPUT, out_callback);
+ map.insert(DeviceType::INPUT | DeviceType::OUTPUT, inout_callback);
+
+ test_get_raw_context(|context| {
+ for (devtype, callback) in map.iter() {
+ assert!(get_devices_changed_callback(context, Scope::Input).is_none());
+ assert!(get_devices_changed_callback(context, Scope::Output).is_none());
+
+ // Register a callback within a specific scope.
+ assert!(context
+ .add_devices_changed_listener(*devtype, Some(*callback), ptr::null_mut())
+ .is_ok());
+
+ if devtype.contains(DeviceType::INPUT) {
+ let cb = get_devices_changed_callback(context, Scope::Input);
+ assert!(cb.is_some());
+ assert_eq!(cb.unwrap(), *callback);
+ } else {
+ let cb = get_devices_changed_callback(context, Scope::Input);
+ assert!(cb.is_none());
+ }
+
+ if devtype.contains(DeviceType::OUTPUT) {
+ let cb = get_devices_changed_callback(context, Scope::Output);
+ assert!(cb.is_some());
+ assert_eq!(cb.unwrap(), *callback);
+ } else {
+ let cb = get_devices_changed_callback(context, Scope::Output);
+ assert!(cb.is_none());
+ }
+
+ // Unregister the callbacks within all scopes.
+ assert!(context
+ .remove_devices_changed_listener(DeviceType::INPUT | DeviceType::OUTPUT)
+ .is_ok());
+
+ assert!(get_devices_changed_callback(context, Scope::Input).is_none());
+ assert!(get_devices_changed_callback(context, Scope::Output).is_none());
+ }
+ });
+}
+
+#[test]
+#[should_panic]
+fn test_add_devices_changed_listener_in_unknown_scope() {
+ extern "C" fn callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+
+ test_get_raw_context(|context| {
+ let _ = context.add_devices_changed_listener(
+ DeviceType::UNKNOWN,
+ Some(callback),
+ ptr::null_mut(),
+ );
+ });
+}
+
+#[test]
+#[should_panic]
+fn test_add_devices_changed_listener_with_none_callback() {
+ test_get_raw_context(|context| {
+ for devtype in &[DeviceType::INPUT, DeviceType::OUTPUT] {
+ assert!(context
+ .add_devices_changed_listener(*devtype, None, ptr::null_mut())
+ .is_ok());
+ }
+ });
+}
+
+// remove_devices_changed_listener
+// ------------------------------------
+#[test]
+fn test_remove_devices_changed_listener() {
+ use std::collections::HashMap;
+
+ extern "C" fn in_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ extern "C" fn out_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+
+ let mut map: HashMap<DeviceType, extern "C" fn(*mut ffi::cubeb, *mut c_void)> = HashMap::new();
+ map.insert(DeviceType::INPUT, in_callback);
+ map.insert(DeviceType::OUTPUT, out_callback);
+
+ test_get_raw_context(|context| {
+ for (devtype, _callback) in map.iter() {
+ assert!(get_devices_changed_callback(context, Scope::Input).is_none());
+ assert!(get_devices_changed_callback(context, Scope::Output).is_none());
+
+ // Register callbacks within all scopes.
+ for (scope, listener) in map.iter() {
+ assert!(context
+ .add_devices_changed_listener(*scope, Some(*listener), ptr::null_mut())
+ .is_ok());
+ }
+
+ let input_callback = get_devices_changed_callback(context, Scope::Input);
+ assert!(input_callback.is_some());
+ assert_eq!(
+ input_callback.unwrap(),
+ *(map.get(&DeviceType::INPUT).unwrap())
+ );
+ let output_callback = get_devices_changed_callback(context, Scope::Output);
+ assert!(output_callback.is_some());
+ assert_eq!(
+ output_callback.unwrap(),
+ *(map.get(&DeviceType::OUTPUT).unwrap())
+ );
+
+ // Unregister the callbacks within one specific scopes.
+ assert!(context.remove_devices_changed_listener(*devtype).is_ok());
+
+ if devtype.contains(DeviceType::INPUT) {
+ let cb = get_devices_changed_callback(context, Scope::Input);
+ assert!(cb.is_none());
+ } else {
+ let cb = get_devices_changed_callback(context, Scope::Input);
+ assert!(cb.is_some());
+ assert_eq!(cb.unwrap(), *(map.get(&DeviceType::INPUT).unwrap()));
+ }
+
+ if devtype.contains(DeviceType::OUTPUT) {
+ let cb = get_devices_changed_callback(context, Scope::Output);
+ assert!(cb.is_none());
+ } else {
+ let cb = get_devices_changed_callback(context, Scope::Output);
+ assert!(cb.is_some());
+ assert_eq!(cb.unwrap(), *(map.get(&DeviceType::OUTPUT).unwrap()));
+ }
+
+ // Unregister the callbacks within all scopes.
+ assert!(context
+ .remove_devices_changed_listener(DeviceType::INPUT | DeviceType::OUTPUT)
+ .is_ok());
+ }
+ });
+}
+
+#[test]
+fn test_remove_devices_changed_listener_without_adding_listeners() {
+ test_get_raw_context(|context| {
+ for devtype in &[
+ DeviceType::INPUT,
+ DeviceType::OUTPUT,
+ DeviceType::INPUT | DeviceType::OUTPUT,
+ ] {
+ assert!(context.remove_devices_changed_listener(*devtype).is_ok());
+ }
+ });
+}
+
+#[test]
+fn test_remove_devices_changed_listener_within_all_scopes() {
+ use std::collections::HashMap;
+
+ extern "C" fn inout_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ extern "C" fn in_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ extern "C" fn out_callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+
+ let mut map: HashMap<DeviceType, extern "C" fn(*mut ffi::cubeb, *mut c_void)> = HashMap::new();
+ map.insert(DeviceType::INPUT, in_callback);
+ map.insert(DeviceType::OUTPUT, out_callback);
+ map.insert(DeviceType::INPUT | DeviceType::OUTPUT, inout_callback);
+
+ test_get_raw_context(|context| {
+ for (devtype, callback) in map.iter() {
+ assert!(get_devices_changed_callback(context, Scope::Input).is_none());
+ assert!(get_devices_changed_callback(context, Scope::Output).is_none());
+
+ assert!(context
+ .add_devices_changed_listener(*devtype, Some(*callback), ptr::null_mut())
+ .is_ok());
+
+ if devtype.contains(DeviceType::INPUT) {
+ let cb = get_devices_changed_callback(context, Scope::Input);
+ assert!(cb.is_some());
+ assert_eq!(cb.unwrap(), *callback);
+ }
+
+ if devtype.contains(DeviceType::OUTPUT) {
+ let cb = get_devices_changed_callback(context, Scope::Output);
+ assert!(cb.is_some());
+ assert_eq!(cb.unwrap(), *callback);
+ }
+
+ assert!(context
+ .remove_devices_changed_listener(DeviceType::INPUT | DeviceType::OUTPUT)
+ .is_ok());
+
+ assert!(get_devices_changed_callback(context, Scope::Input).is_none());
+ assert!(get_devices_changed_callback(context, Scope::Output).is_none());
+ }
+ });
+}
+
+fn get_devices_changed_callback(
+ context: &AudioUnitContext,
+ scope: Scope,
+) -> ffi::cubeb_device_collection_changed_callback {
+ let devices_guard = context.devices.lock().unwrap();
+ match scope {
+ Scope::Input => devices_guard.input.changed_callback,
+ Scope::Output => devices_guard.output.changed_callback,
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/backlog.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/backlog.rs
new file mode 100644
index 0000000000..5342ec0f39
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/backlog.rs
@@ -0,0 +1,36 @@
+// Copyright © 2018 Mozilla Foundation
+//
+// This program is made available under an ISC-style license. See the
+// accompanying file LICENSE for details.
+use super::utils::test_get_default_raw_stream;
+use super::*;
+
+// Interface
+// ============================================================================
+// Remove these after test_ops_stream_register_device_changed_callback works.
+#[test]
+fn test_stream_register_device_changed_callback() {
+ extern "C" fn callback(_: *mut c_void) {}
+
+ test_get_default_raw_stream(|stream| {
+ assert!(stream
+ .register_device_changed_callback(Some(callback))
+ .is_ok());
+ assert!(stream.register_device_changed_callback(None).is_ok());
+ });
+}
+
+#[test]
+fn test_stream_register_device_changed_callback_twice() {
+ extern "C" fn callback1(_: *mut c_void) {}
+ extern "C" fn callback2(_: *mut c_void) {}
+
+ test_get_default_raw_stream(|stream| {
+ assert!(stream
+ .register_device_changed_callback(Some(callback1))
+ .is_ok());
+ assert!(stream
+ .register_device_changed_callback(Some(callback2))
+ .is_err());
+ });
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/device_change.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/device_change.rs
new file mode 100644
index 0000000000..c27dada7ad
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/device_change.rs
@@ -0,0 +1,885 @@
+// NOTICE:
+// Avoid running TestDeviceSwitcher with TestDevicePlugger or active full-duplex streams
+// sequentially!
+//
+// The TestDeviceSwitcher cannot work with any test that will create an aggregate device that is
+// soon being destroyed. The TestDeviceSwitcher will cache the available devices, upon it's
+// created, as the candidates for the default device. Therefore, those created aggregate devices
+// may be cached in TestDeviceSwitcher. However, those aggregate devices may be destroyed when
+// TestDeviceSwitcher is using them or they are in the cached list of TestDeviceSwitcher.
+//
+// Running those tests by setting `test-threads=1` doesn't really help (e.g.,
+// `cargo test test_register_device_changed_callback -- --ignored --nocapture --test-threads=1`).
+// The aggregate device won't be destroyed immediately when `kAudioPlugInDestroyAggregateDevice`
+// is set. As a result, the following tests requiring changing the devices will be run separately
+// in the run_tests.sh script and marked by `ignore` by default.
+
+use super::utils::{
+ get_devices_info_in_scope, test_create_device_change_listener, test_device_in_scope,
+ test_get_default_device, test_get_devices_in_scope,
+ test_get_stream_with_default_data_callback_by_type, test_ops_stream_operation,
+ test_set_default_device, Scope, StreamType, TestDevicePlugger, TestDeviceSwitcher,
+};
+use super::*;
+use std::sync::{LockResult, MutexGuard, WaitTimeoutResult};
+
+// Switch default devices used by the active streams, to test stream reinitialization
+// ================================================================================================
+#[ignore]
+#[test]
+fn test_switch_device() {
+ test_switch_device_in_scope(Scope::Input);
+ test_switch_device_in_scope(Scope::Output);
+}
+
+fn test_switch_device_in_scope(scope: Scope) {
+ println!(
+ "Switch default device for {:?} while the stream is working.",
+ scope
+ );
+
+ // Do nothing if there is no 2 available devices at least.
+ let devices = test_get_devices_in_scope(scope.clone());
+ if devices.len() < 2 {
+ println!("Need 2 devices for {:?} at least. Skip.", scope);
+ return;
+ }
+
+ let mut device_switcher = TestDeviceSwitcher::new(scope.clone());
+
+ let notifier = Arc::new(Notifier::new(0));
+ let also_notifier = notifier.clone();
+ let listener = test_create_device_change_listener(scope.clone(), move |_addresses| {
+ let mut cnt = notifier.lock().unwrap();
+ *cnt += 1;
+ notifier.notify(cnt);
+ NO_ERR
+ });
+ listener.start();
+
+ let changed_watcher = Watcher::new(&also_notifier);
+ test_get_started_stream_in_scope(scope.clone(), move |_stream| loop {
+ let mut guard = changed_watcher.lock().unwrap();
+ let start_cnt = guard.clone();
+ device_switcher.next();
+ guard = changed_watcher
+ .wait_while(guard, |cnt| *cnt == start_cnt)
+ .unwrap();
+ if *guard >= devices.len() {
+ break;
+ }
+ });
+}
+
+fn test_get_started_stream_in_scope<F>(scope: Scope, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ use std::f32::consts::PI;
+ const SAMPLE_FREQUENCY: u32 = 48_000;
+
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut stream_params = ffi::cubeb_stream_params::default();
+ stream_params.format = ffi::CUBEB_SAMPLE_S16NE;
+ stream_params.rate = SAMPLE_FREQUENCY;
+ stream_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+ stream_params.channels = 1;
+ stream_params.layout = ffi::CUBEB_LAYOUT_MONO;
+
+ let (input_params, output_params) = match scope {
+ Scope::Input => (
+ &mut stream_params as *mut ffi::cubeb_stream_params,
+ ptr::null_mut(),
+ ),
+ Scope::Output => (
+ ptr::null_mut(),
+ &mut stream_params as *mut ffi::cubeb_stream_params,
+ ),
+ };
+
+ extern "C" fn state_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ state: ffi::cubeb_state,
+ ) {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert_ne!(state, ffi::CUBEB_STATE_ERROR);
+ }
+
+ extern "C" fn input_data_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ input_buffer: *const c_void,
+ output_buffer: *mut c_void,
+ nframes: i64,
+ ) -> i64 {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert!(!input_buffer.is_null());
+ assert!(output_buffer.is_null());
+ nframes
+ }
+
+ let mut position: i64 = 0; // TODO: Use Atomic instead.
+
+ fn f32_to_i16_sample(x: f32) -> i16 {
+ (x * f32::from(i16::max_value())) as i16
+ }
+
+ extern "C" fn output_data_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ input_buffer: *const c_void,
+ output_buffer: *mut c_void,
+ nframes: i64,
+ ) -> i64 {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert!(input_buffer.is_null());
+ assert!(!output_buffer.is_null());
+
+ let buffer = unsafe {
+ let ptr = output_buffer as *mut i16;
+ let len = nframes as usize;
+ slice::from_raw_parts_mut(ptr, len)
+ };
+
+ let position = unsafe { &mut *(user_ptr as *mut i64) };
+
+ // Generate tone on the fly.
+ for data in buffer.iter_mut() {
+ let t1 = (2.0 * PI * 350.0 * (*position) as f32 / SAMPLE_FREQUENCY as f32).sin();
+ let t2 = (2.0 * PI * 440.0 * (*position) as f32 / SAMPLE_FREQUENCY as f32).sin();
+ *data = f32_to_i16_sample(0.5 * (t1 + t2));
+ *position += 1;
+ }
+
+ nframes
+ }
+
+ test_ops_stream_operation(
+ "stream",
+ ptr::null_mut(), // Use default input device.
+ input_params,
+ ptr::null_mut(), // Use default output device.
+ output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ match scope {
+ Scope::Input => Some(input_data_callback),
+ Scope::Output => Some(output_data_callback),
+ },
+ Some(state_callback),
+ &mut position as *mut i64 as *mut c_void,
+ |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ operation(stream);
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+}
+
+// Plug and unplug devices, to test device collection changed callback
+// ================================================================================================
+#[ignore]
+#[test]
+fn test_plug_and_unplug_device() {
+ test_plug_and_unplug_device_in_scope(Scope::Input);
+ test_plug_and_unplug_device_in_scope(Scope::Output);
+}
+
+fn test_plug_and_unplug_device_in_scope(scope: Scope) {
+ let default_device = test_get_default_device(scope.clone());
+ if default_device.is_none() {
+ println!("No device for {:?} to test", scope);
+ return;
+ }
+
+ println!("Run test for {:?}", scope);
+ println!("NOTICE: The test will hang if the default input or output is an aggregate device.\nWe will fix this later.");
+
+ let default_device = default_device.unwrap();
+ let is_input = test_device_in_scope(default_device, Scope::Input);
+ let is_output = test_device_in_scope(default_device, Scope::Output);
+
+ let mut context = AudioUnitContext::new();
+
+ // Register the devices-changed callbacks.
+ #[derive(Clone, PartialEq)]
+ struct Counts {
+ input: u32,
+ output: u32,
+ }
+ impl Counts {
+ fn new() -> Self {
+ Self {
+ input: 0,
+ output: 0,
+ }
+ }
+ }
+ let counts = Arc::new(Notifier::new(Counts::new()));
+ let counts_notifier_ptr = counts.as_ref() as *const Notifier<Counts>;
+
+ assert!(context
+ .register_device_collection_changed(
+ DeviceType::INPUT,
+ Some(input_changed_callback),
+ counts_notifier_ptr as *mut c_void,
+ )
+ .is_ok());
+
+ assert!(context
+ .register_device_collection_changed(
+ DeviceType::OUTPUT,
+ Some(output_changed_callback),
+ counts_notifier_ptr as *mut c_void,
+ )
+ .is_ok());
+
+ let counts_watcher = Watcher::new(&counts);
+
+ let mut device_plugger = TestDevicePlugger::new(scope).unwrap();
+
+ {
+ // Simulate adding devices and monitor the devices-changed callbacks.
+ let mut counts_guard = counts.lock().unwrap();
+ let counts_start = counts_guard.clone();
+
+ assert!(device_plugger.plug().is_ok());
+
+ counts_guard = counts_watcher
+ .wait_while(counts_guard, |counts| {
+ (is_input && counts.input == counts_start.input)
+ || (is_output && counts.output == counts_start.output)
+ })
+ .unwrap();
+
+ // Check changed count.
+ assert_eq!(counts_guard.input, if is_input { 1 } else { 0 });
+ assert_eq!(counts_guard.output, if is_output { 1 } else { 0 });
+ }
+
+ {
+ // Simulate removing devices and monitor the devices-changed callbacks.
+ let mut counts_guard = counts.lock().unwrap();
+ let counts_start = counts_guard.clone();
+
+ assert!(device_plugger.unplug().is_ok());
+
+ counts_guard = counts_watcher
+ .wait_while(counts_guard, |counts| {
+ (is_input && counts.input == counts_start.input)
+ || (is_output && counts.output == counts_start.output)
+ })
+ .unwrap();
+
+ // Check changed count.
+ assert_eq!(counts_guard.input, if is_input { 2 } else { 0 });
+ assert_eq!(counts_guard.output, if is_output { 2 } else { 0 });
+ }
+
+ extern "C" fn input_changed_callback(context: *mut ffi::cubeb, data: *mut c_void) {
+ println!(
+ "Input device collection @ {:p} is changed. Data @ {:p}",
+ context, data
+ );
+ let notifier = unsafe { &*(data as *const Notifier<Counts>) };
+ {
+ let mut counts = notifier.lock().unwrap();
+ counts.input += 1;
+ notifier.notify(counts);
+ }
+ }
+
+ extern "C" fn output_changed_callback(context: *mut ffi::cubeb, data: *mut c_void) {
+ println!(
+ "output device collection @ {:p} is changed. Data @ {:p}",
+ context, data
+ );
+ let notifier = unsafe { &*(data as *const Notifier<Counts>) };
+ {
+ let mut counts = notifier.lock().unwrap();
+ counts.output += 1;
+ notifier.notify(counts);
+ }
+ }
+
+ context.register_device_collection_changed(DeviceType::OUTPUT, None, ptr::null_mut());
+ context.register_device_collection_changed(DeviceType::INPUT, None, ptr::null_mut());
+}
+
+// Switch default devices used by the active streams, to test device changed callback
+// ================================================================================================
+#[ignore]
+#[test]
+fn test_register_device_changed_callback_to_check_default_device_changed_input() {
+ test_register_device_changed_callback_to_check_default_device_changed(StreamType::INPUT);
+}
+
+#[ignore]
+#[test]
+fn test_register_device_changed_callback_to_check_default_device_changed_output() {
+ test_register_device_changed_callback_to_check_default_device_changed(StreamType::OUTPUT);
+}
+
+#[ignore]
+#[test]
+fn test_register_device_changed_callback_to_check_default_device_changed_duplex() {
+ test_register_device_changed_callback_to_check_default_device_changed(StreamType::DUPLEX);
+}
+
+fn test_register_device_changed_callback_to_check_default_device_changed(stm_type: StreamType) {
+ println!("NOTICE: The test will hang if the default input or output is an aggregate device.\nWe will fix this later.");
+
+ let inputs = if stm_type.contains(StreamType::INPUT) {
+ let devices = test_get_devices_in_scope(Scope::Input).len();
+ if devices >= 2 {
+ Some(devices)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ let outputs = if stm_type.contains(StreamType::OUTPUT) {
+ let devices = test_get_devices_in_scope(Scope::Output).len();
+ if devices >= 2 {
+ Some(devices)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ if inputs.is_none() && outputs.is_none() {
+ println!("No enough devices to run the test!");
+ return;
+ }
+
+ let changed_count = Arc::new(Notifier::new(0u32));
+ let notifier_ptr = changed_count.as_ref() as *const Notifier<u32>;
+
+ test_get_stream_with_device_changed_callback(
+ "stream: test callback for default device changed",
+ stm_type,
+ None, // Use default input device.
+ None, // Use default output device.
+ notifier_ptr as *mut c_void,
+ state_callback,
+ device_changed_callback,
+ |stream| {
+ // If the duplex stream uses different input and output device,
+ // an aggregate device will be created and it will work for this duplex stream.
+ // This aggregate device will be added into the device list, but it won't
+ // be assigned to the default device, since the device list for setting
+ // default device is cached upon {input, output}_device_switcher is initialized.
+
+ let changed_watcher = Watcher::new(&changed_count);
+
+ if let Some(devices) = inputs {
+ let mut device_switcher = TestDeviceSwitcher::new(Scope::Input);
+ for _ in 0..devices {
+ // While the stream is re-initializing for the default device switch,
+ // switching for the default device again will be ignored.
+ while stream.switching_device.load(atomic::Ordering::SeqCst) {
+ std::hint::spin_loop()
+ }
+ let guard = changed_watcher.lock().unwrap();
+ let start_cnt = guard.clone();
+ device_switcher.next();
+ changed_watcher
+ .wait_while(guard, |cnt| *cnt == start_cnt)
+ .unwrap();
+ }
+ }
+
+ if let Some(devices) = outputs {
+ let mut device_switcher = TestDeviceSwitcher::new(Scope::Output);
+ for _ in 0..devices {
+ // While the stream is re-initializing for the default device switch,
+ // switching for the default device again will be ignored.
+ while stream.switching_device.load(atomic::Ordering::SeqCst) {
+ std::hint::spin_loop()
+ }
+ let guard = changed_watcher.lock().unwrap();
+ let start_cnt = guard.clone();
+ device_switcher.next();
+ changed_watcher
+ .wait_while(guard, |cnt| *cnt == start_cnt)
+ .unwrap();
+ }
+ }
+ },
+ );
+
+ extern "C" fn state_callback(
+ stream: *mut ffi::cubeb_stream,
+ _user_ptr: *mut c_void,
+ state: ffi::cubeb_state,
+ ) {
+ assert!(!stream.is_null());
+ assert_ne!(state, ffi::CUBEB_STATE_ERROR);
+ }
+
+ extern "C" fn device_changed_callback(data: *mut c_void) {
+ println!("Device change callback. data @ {:p}", data);
+ let notifier = unsafe { &*(data as *const Notifier<u32>) };
+ let mut count_guard = notifier.lock().unwrap();
+ *count_guard += 1;
+ notifier.notify(count_guard);
+ }
+}
+
+// Unplug the devices used by the active streams, to test
+// 1) device changed callback, or state callback
+// 2) stream reinitialization that may race with stream destroying
+// ================================================================================================
+
+// Input-only stream
+// -----------------
+
+// Unplug the non-default input device for an input stream
+// ------------------------------------------------------------------------------------------------
+
+#[ignore]
+#[test]
+fn test_destroy_input_stream_after_unplugging_a_nondefault_input_device() {
+ // The stream can be destroyed before running device-changed event handler
+ test_unplug_a_device_on_an_active_stream(StreamType::INPUT, Scope::Input, false, 0);
+}
+
+#[ignore]
+#[test]
+fn test_suspend_input_stream_by_unplugging_a_nondefault_input_device() {
+ // Expect to get an error state callback by device-changed event handler
+ test_unplug_a_device_on_an_active_stream(StreamType::INPUT, Scope::Input, false, 2000);
+}
+
+// Unplug the default input device for an input stream
+// ------------------------------------------------------------------------------------------------
+#[ignore]
+#[test]
+fn test_destroy_input_stream_after_unplugging_a_default_input_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes, at the same when
+ // the stream is being destroyed
+ test_unplug_a_device_on_an_active_stream(StreamType::INPUT, Scope::Input, true, 0);
+}
+
+#[ignore]
+#[test]
+fn test_reinit_input_stream_by_unplugging_a_default_input_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes
+ test_unplug_a_device_on_an_active_stream(StreamType::INPUT, Scope::Input, true, 2000);
+}
+
+// Output-only stream
+// ------------------
+
+// Unplug the non-default output device for an output stream
+// ------------------------------------------------------------------------------------------------
+#[ignore]
+#[test]
+fn test_destroy_output_stream_after_unplugging_a_nondefault_output_device() {
+ test_unplug_a_device_on_an_active_stream(StreamType::OUTPUT, Scope::Output, false, 0);
+}
+
+#[ignore]
+#[test]
+fn test_suspend_output_stream_by_unplugging_a_nondefault_output_device() {
+ test_unplug_a_device_on_an_active_stream(StreamType::OUTPUT, Scope::Output, false, 2000);
+}
+
+// Unplug the default output device for an output stream
+// ------------------------------------------------------------------------------------------------
+
+#[ignore]
+#[test]
+fn test_destroy_output_stream_after_unplugging_a_default_output_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes, at the same when
+ // the stream is being destroyed
+ test_unplug_a_device_on_an_active_stream(StreamType::OUTPUT, Scope::Output, true, 0);
+}
+
+#[ignore]
+#[test]
+fn test_reinit_output_stream_by_unplugging_a_default_output_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes
+ test_unplug_a_device_on_an_active_stream(StreamType::OUTPUT, Scope::Output, true, 2000);
+}
+
+// Duplex stream
+// -------------
+
+// Unplug the non-default input device for a duplex stream
+// ------------------------------------------------------------------------------------------------
+
+#[ignore]
+#[test]
+fn test_destroy_duplex_stream_after_unplugging_a_nondefault_input_device() {
+ // The stream can be destroyed before running device-changed event handler
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Input, false, 0);
+}
+
+#[ignore]
+#[test]
+fn test_suspend_duplex_stream_by_unplugging_a_nondefault_input_device() {
+ // Expect to get an error state callback by device-changed event handler
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Input, false, 2000);
+}
+
+// Unplug the non-default output device for a duplex stream
+// ------------------------------------------------------------------------------------------------
+
+#[ignore]
+#[test]
+fn test_destroy_duplex_stream_after_unplugging_a_nondefault_output_device() {
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Output, false, 0);
+}
+
+#[ignore]
+#[test]
+fn test_suspend_duplex_stream_by_unplugging_a_nondefault_output_device() {
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Output, false, 2000);
+}
+
+// Unplug the non-default in-out device for a duplex stream
+// ------------------------------------------------------------------------------------------------
+// TODO: Implement an in-out TestDevicePlugger
+
+// Unplug the default input device for a duplex stream
+// ------------------------------------------------------------------------------------------------
+
+#[ignore]
+#[test]
+fn test_destroy_duplex_stream_after_unplugging_a_default_input_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes, at the same when
+ // the stream is being destroyed
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Input, true, 0);
+}
+
+#[ignore]
+#[test]
+fn test_reinit_duplex_stream_by_unplugging_a_default_input_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Input, true, 2000);
+}
+
+// Unplug the default ouput device for a duplex stream
+// ------------------------------------------------------------------------------------------------
+
+#[ignore]
+#[test]
+fn test_destroy_duplex_stream_after_unplugging_a_default_output_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes, at the same when
+ // the stream is being destroyed
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Output, true, 0);
+}
+
+#[ignore]
+#[test]
+fn test_reinit_duplex_stream_by_unplugging_a_default_output_device() {
+ // Expect to get an device-changed callback by device-changed event handler,
+ // which will reinitialize the stream behind the scenes
+ test_unplug_a_device_on_an_active_stream(StreamType::DUPLEX, Scope::Output, true, 2000);
+}
+
+fn test_unplug_a_device_on_an_active_stream(
+ stream_type: StreamType,
+ device_scope: Scope,
+ set_device_to_default: bool,
+ wait_up_to_ms: u64,
+) {
+ let has_input = test_get_default_device(Scope::Input).is_some();
+ let has_output = test_get_default_device(Scope::Output).is_some();
+
+ if stream_type.contains(StreamType::INPUT) && !has_input {
+ println!("No input device for input or duplex stream.");
+ return;
+ }
+
+ if stream_type.contains(StreamType::OUTPUT) && !has_output {
+ println!("No output device for output or duplex stream.");
+ return;
+ }
+
+ let default_device_before_plugging = test_get_default_device(device_scope.clone()).unwrap();
+ println!(
+ "Before plugging, default {:?} device is {}",
+ device_scope, default_device_before_plugging
+ );
+
+ let mut plugger = TestDevicePlugger::new(device_scope.clone()).unwrap();
+ assert!(plugger.plug().is_ok());
+ assert_ne!(plugger.get_device_id(), kAudioObjectUnknown);
+ println!(
+ "Create plugger device: {} for {:?}",
+ plugger.get_device_id(),
+ device_scope
+ );
+
+ let default_device_after_plugging = test_get_default_device(device_scope.clone()).unwrap();
+ println!(
+ "After plugging, default {:?} device is {}",
+ device_scope, default_device_after_plugging
+ );
+
+ // The new device, plugger, is possible to be set to the default device.
+ // Before running the test, we need to set the default device to the correct one.
+ if set_device_to_default {
+ // plugger should be the default device for the test.
+ // If it's not, then set it to the default device.
+ if default_device_after_plugging != plugger.get_device_id() {
+ let prev_def_dev =
+ test_set_default_device(plugger.get_device_id(), device_scope.clone()).unwrap();
+ assert_eq!(prev_def_dev, default_device_after_plugging);
+ }
+ } else {
+ // plugger should NOT be the default device for the test.
+ // If it is, reset the default device to another one.
+ if default_device_after_plugging == plugger.get_device_id() {
+ let prev_def_dev =
+ test_set_default_device(default_device_before_plugging, device_scope.clone())
+ .unwrap();
+ assert_eq!(prev_def_dev, default_device_after_plugging);
+ }
+ }
+
+ // Ignore the return devices' info since we only need to print them.
+ let _ = get_devices_info_in_scope(device_scope.clone());
+ println!(
+ "Current default {:?} device is {}",
+ device_scope,
+ test_get_default_device(device_scope.clone()).unwrap()
+ );
+
+ let (input_device, output_device) = match device_scope {
+ Scope::Input => (
+ if set_device_to_default {
+ None // default input device.
+ } else {
+ Some(plugger.get_device_id())
+ },
+ None,
+ ),
+ Scope::Output => (
+ None,
+ if set_device_to_default {
+ None // default output device.
+ } else {
+ Some(plugger.get_device_id())
+ },
+ ),
+ };
+
+ #[derive(Clone, PartialEq)]
+ struct Data {
+ changed_count: u32,
+ states: Vec<ffi::cubeb_state>,
+ }
+
+ impl Data {
+ fn new() -> Self {
+ Self {
+ changed_count: 0,
+ states: vec![],
+ }
+ }
+ }
+
+ let notifier = Arc::new(Notifier::new(Data::new()));
+ let notifier_ptr = notifier.as_ref() as *const Notifier<Data>;
+
+ test_get_stream_with_device_changed_callback(
+ "stream: test stream reinit/destroy after unplugging a device",
+ stream_type,
+ input_device,
+ output_device,
+ notifier_ptr as *mut c_void,
+ state_callback,
+ device_changed_callback,
+ |stream| {
+ stream.start();
+
+ let changed_watcher = Watcher::new(&notifier);
+ let mut data_guard = notifier.lock().unwrap();
+ assert_eq!(data_guard.states.last().unwrap(), &ffi::CUBEB_STATE_STARTED);
+
+ println!(
+ "Stream runs on the device {} for {:?}",
+ plugger.get_device_id(),
+ device_scope
+ );
+
+ let dev = plugger.get_device_id();
+ let start_changed_count = data_guard.changed_count.clone();
+
+ assert!(plugger.unplug().is_ok());
+
+ if set_device_to_default {
+ // The stream will be reinitialized if it follows the default input or output device.
+ println!("Waiting for default device to change and reinit");
+ data_guard = changed_watcher
+ .wait_while(data_guard, |data| {
+ data.changed_count == start_changed_count
+ || data.states.last().unwrap_or(&ffi::CUBEB_STATE_ERROR)
+ != &ffi::CUBEB_STATE_STARTED
+ })
+ .unwrap();
+ } else if wait_up_to_ms > 0 {
+ // stream can be dropped immediately before device-changed callback
+ // so we only check the states if we wait for it explicitly.
+ println!("Waiting for non-default device to enter error state");
+ let (new_guard, timeout_res) = changed_watcher
+ .wait_timeout_while(data_guard, Duration::from_millis(wait_up_to_ms), |data| {
+ data.states.last().unwrap_or(&ffi::CUBEB_STATE_STARTED)
+ != &ffi::CUBEB_STATE_ERROR
+ })
+ .unwrap();
+ assert!(!timeout_res.timed_out());
+ data_guard = new_guard;
+ }
+
+ println!(
+ "Device {} for {:?} has been unplugged. The default {:?} device now is {}",
+ dev,
+ device_scope,
+ device_scope,
+ test_get_default_device(device_scope.clone()).unwrap()
+ );
+
+ println!("The stream is going to be destroyed soon");
+ },
+ );
+
+ extern "C" fn state_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ state: ffi::cubeb_state,
+ ) {
+ println!("Device change callback. user_ptr @ {:p}", user_ptr);
+ assert!(!stream.is_null());
+ println!(
+ "state: {}",
+ match state {
+ ffi::CUBEB_STATE_STARTED => "started",
+ ffi::CUBEB_STATE_STOPPED => "stopped",
+ ffi::CUBEB_STATE_DRAINED => "drained",
+ ffi::CUBEB_STATE_ERROR => "error",
+ _ => "unknown",
+ }
+ );
+ let notifier = unsafe { &mut *(user_ptr as *mut Notifier<Data>) };
+ let mut data_guard = notifier.lock().unwrap();
+ data_guard.states.push(state);
+ notifier.notify(data_guard);
+ }
+
+ extern "C" fn device_changed_callback(user_ptr: *mut c_void) {
+ println!("Device change callback. user_ptr @ {:p}", user_ptr);
+ let notifier = unsafe { &mut *(user_ptr as *mut Notifier<Data>) };
+ let mut data_guard = notifier.lock().unwrap();
+ data_guard.changed_count += 1;
+ notifier.notify(data_guard);
+ }
+}
+
+struct Notifier<T> {
+ value: Mutex<T>,
+ cvar: Condvar,
+}
+
+impl<T> Notifier<T> {
+ fn new(value: T) -> Self {
+ Self {
+ value: Mutex::new(value),
+ cvar: Condvar::new(),
+ }
+ }
+
+ fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
+ self.value.lock()
+ }
+
+ fn notify(&self, _guard: MutexGuard<'_, T>) {
+ self.cvar.notify_all();
+ }
+}
+
+struct Watcher<T: Clone + PartialEq> {
+ notifier: Arc<Notifier<T>>,
+}
+
+impl<T: Clone + PartialEq> Watcher<T> {
+ fn new(value: &Arc<Notifier<T>>) -> Self {
+ Self {
+ notifier: Arc::clone(value),
+ }
+ }
+
+ fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
+ self.notifier.lock()
+ }
+
+ fn wait_while<'a, F>(
+ &self,
+ guard: MutexGuard<'a, T>,
+ condition: F,
+ ) -> LockResult<MutexGuard<'a, T>>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ self.notifier.cvar.wait_while(guard, condition)
+ }
+
+ fn wait_timeout_while<'a, F>(
+ &self,
+ guard: MutexGuard<'a, T>,
+ dur: Duration,
+ condition: F,
+ ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ self.notifier.cvar.wait_timeout_while(guard, dur, condition)
+ }
+}
+
+fn test_get_stream_with_device_changed_callback<F>(
+ name: &'static str,
+ stm_type: StreamType,
+ input_device: Option<AudioObjectID>,
+ output_device: Option<AudioObjectID>,
+ data: *mut c_void,
+ state_callback: extern "C" fn(*mut ffi::cubeb_stream, *mut c_void, ffi::cubeb_state),
+ device_changed_callback: extern "C" fn(*mut c_void),
+ operation: F,
+) where
+ F: FnOnce(&mut AudioUnitStream),
+{
+ test_get_stream_with_default_data_callback_by_type(
+ name,
+ stm_type,
+ input_device,
+ output_device,
+ state_callback,
+ data,
+ |stream| {
+ assert!(stream
+ .register_device_changed_callback(Some(device_changed_callback))
+ .is_ok());
+ operation(stream);
+ assert!(stream.register_device_changed_callback(None).is_ok());
+ },
+ );
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/device_property.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/device_property.rs
new file mode 100644
index 0000000000..8277a7642d
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/device_property.rs
@@ -0,0 +1,473 @@
+use super::utils::{test_get_default_device, Scope};
+use super::*;
+
+// get_device_uid
+// ------------------------------------
+#[test]
+fn test_get_device_uid() {
+ // Input device.
+ if let Some(input) = test_get_default_device(Scope::Input) {
+ let uid = get_device_uid(input, DeviceType::INPUT).unwrap();
+ let uid = uid.into_string();
+ assert!(!uid.is_empty());
+ }
+
+ // Output device.
+ if let Some(output) = test_get_default_device(Scope::Output) {
+ let uid = get_device_uid(output, DeviceType::OUTPUT).unwrap();
+ let uid = uid.into_string();
+ assert!(!uid.is_empty());
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_uid_by_unknwon_device() {
+ // Unknown device.
+ assert!(get_device_uid(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_model_uid
+// ------------------------------------
+// Some devices (e.g., AirPods) fail to get model uid.
+#[test]
+fn test_get_device_model_uid() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ match get_device_model_uid(device, DeviceType::INPUT) {
+ Ok(uid) => println!("input model uid: {}", uid.into_string()),
+ Err(e) => println!("No input model uid. Error: {}", e),
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ match get_device_model_uid(device, DeviceType::OUTPUT) {
+ Ok(uid) => println!("output model uid: {}", uid.into_string()),
+ Err(e) => println!("No output model uid. Error: {}", e),
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_model_uid_by_unknown_device() {
+ assert!(get_device_model_uid(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_transport_type
+// ------------------------------------
+#[test]
+fn test_get_device_transport_type() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ match get_device_transport_type(device, DeviceType::INPUT) {
+ Ok(trans_type) => println!(
+ "input transport type: {:X}, {:?}",
+ trans_type,
+ convert_uint32_into_string(trans_type)
+ ),
+ Err(e) => println!("No input transport type. Error: {}", e),
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ match get_device_transport_type(device, DeviceType::OUTPUT) {
+ Ok(trans_type) => println!(
+ "output transport type: {:X}, {:?}",
+ trans_type,
+ convert_uint32_into_string(trans_type)
+ ),
+ Err(e) => println!("No output transport type. Error: {}", e),
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_transport_type_by_unknown_device() {
+ assert!(get_device_transport_type(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_source
+// ------------------------------------
+// Some USB headsets (e.g., Plantronic .Audio 628) fails to get data source.
+#[test]
+fn test_get_device_source() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ match get_device_source(device, DeviceType::INPUT) {
+ Ok(source) => println!(
+ "input source: {:X}, {:?}",
+ source,
+ convert_uint32_into_string(source)
+ ),
+ Err(e) => println!("No input data source. Error: {}", e),
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ match get_device_source(device, DeviceType::OUTPUT) {
+ Ok(source) => println!(
+ "output source: {:X}, {:?}",
+ source,
+ convert_uint32_into_string(source)
+ ),
+ Err(e) => println!("No output data source. Error: {}", e),
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_source_by_unknown_device() {
+ assert!(get_device_source(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_source_name
+// ------------------------------------
+#[test]
+fn test_get_device_source_name() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ match get_device_source_name(device, DeviceType::INPUT) {
+ Ok(name) => println!("input: {}", name.into_string()),
+ Err(e) => println!("No input data source name. Error: {}", e),
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ match get_device_source_name(device, DeviceType::OUTPUT) {
+ Ok(name) => println!("output: {}", name.into_string()),
+ Err(e) => println!("No output data source name. Error: {}", e),
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_source_name_by_unknown_device() {
+ assert!(get_device_source_name(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_name
+// ------------------------------------
+#[test]
+fn test_get_device_name() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let name = get_device_name(device, DeviceType::INPUT).unwrap();
+ println!("input device name: {}", name.into_string());
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let name = get_device_name(device, DeviceType::OUTPUT).unwrap();
+ println!("output device name: {}", name.into_string());
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_name_by_unknown_device() {
+ assert!(get_device_name(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_manufacturer
+// ------------------------------------
+#[test]
+fn test_get_device_manufacturer() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ // Some devices like AirPods cannot get the vendor info so we print the error directly.
+ // TODO: Replace `map` and `unwrap_or_else` by `map_or_else`
+ let name = get_device_manufacturer(device, DeviceType::INPUT)
+ .map(|name| name.into_string())
+ .unwrap_or_else(|e| format!("Error: {}", e));
+ println!("input device vendor: {}", name);
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ // Some devices like AirPods cannot get the vendor info so we print the error directly.
+ // TODO: Replace `map` and `unwrap_or_else` by `map_or_else`
+ let name = get_device_manufacturer(device, DeviceType::OUTPUT)
+ .map(|name| name.into_string())
+ .unwrap_or_else(|e| format!("Error: {}", e));
+ println!("output device vendor: {}", name);
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_manufacturer_by_unknown_device() {
+ assert!(get_device_manufacturer(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_buffer_frame_size_range
+// ------------------------------------
+#[test]
+fn test_get_device_buffer_frame_size_range() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let range = get_device_buffer_frame_size_range(device, DeviceType::INPUT).unwrap();
+ println!(
+ "range of input buffer frame size: {}-{}",
+ range.mMinimum, range.mMaximum
+ );
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let range = get_device_buffer_frame_size_range(device, DeviceType::OUTPUT).unwrap();
+ println!(
+ "range of output buffer frame size: {}-{}",
+ range.mMinimum, range.mMaximum
+ );
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_buffer_frame_size_range_by_unknown_device() {
+ assert!(get_device_buffer_frame_size_range(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_latency
+// ------------------------------------
+#[test]
+fn test_get_device_latency() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let latency = get_device_latency(device, DeviceType::INPUT).unwrap();
+ println!("latency of input device: {}", latency);
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let latency = get_device_latency(device, DeviceType::OUTPUT).unwrap();
+ println!("latency of output device: {}", latency);
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_latency_by_unknown_device() {
+ assert!(get_device_latency(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_streams
+// ------------------------------------
+#[test]
+fn test_get_device_streams() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let streams = get_device_streams(device, DeviceType::INPUT).unwrap();
+ println!("streams on the input device: {:?}", streams);
+ assert!(!streams.is_empty());
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let streams = get_device_streams(device, DeviceType::OUTPUT).unwrap();
+ println!("streams on the output device: {:?}", streams);
+ assert!(!streams.is_empty());
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_streams_by_unknown_device() {
+ assert!(get_device_streams(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_device_sample_rate
+// ------------------------------------
+#[test]
+fn test_get_device_sample_rate() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let rate = get_device_sample_rate(device, DeviceType::INPUT).unwrap();
+ println!("input sample rate: {}", rate);
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let rate = get_device_sample_rate(device, DeviceType::OUTPUT).unwrap();
+ println!("output sample rate: {}", rate);
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_device_sample_rate_by_unknown_device() {
+ assert!(get_device_sample_rate(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_ranges_of_device_sample_rate
+// ------------------------------------
+#[test]
+fn test_get_ranges_of_device_sample_rate() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let ranges = get_ranges_of_device_sample_rate(device, DeviceType::INPUT).unwrap();
+ println!("ranges of input sample rate: {:?}", ranges);
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let ranges = get_ranges_of_device_sample_rate(device, DeviceType::OUTPUT).unwrap();
+ println!("ranges of output sample rate: {:?}", ranges);
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_ranges_of_device_sample_rate_by_unknown_device() {
+ assert!(get_ranges_of_device_sample_rate(kAudioObjectUnknown, DeviceType::INPUT).is_err());
+}
+
+// get_stream_latency
+// ------------------------------------
+#[test]
+fn test_get_stream_latency() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let streams = get_device_streams(device, DeviceType::INPUT).unwrap();
+ for stream in streams {
+ let latency = get_stream_latency(stream).unwrap();
+ println!("latency of the input stream {} is {}", stream, latency);
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let streams = get_device_streams(device, DeviceType::OUTPUT).unwrap();
+ for stream in streams {
+ let latency = get_stream_latency(stream).unwrap();
+ println!("latency of the output stream {} is {}", stream, latency);
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_stream_latency_by_unknown_device() {
+ assert!(get_stream_latency(kAudioObjectUnknown).is_err());
+}
+
+// get_stream_virtual_format
+// ------------------------------------
+#[test]
+fn test_get_stream_virtual_format() {
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let streams = get_device_streams(device, DeviceType::INPUT).unwrap();
+ let formats = streams
+ .iter()
+ .map(|s| get_stream_virtual_format(*s))
+ .collect::<Vec<std::result::Result<AudioStreamBasicDescription, OSStatus>>>();
+ println!("input stream formats: {:?}", formats);
+ assert!(!formats.is_empty());
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let streams = get_device_streams(device, DeviceType::OUTPUT).unwrap();
+ let formats = streams
+ .iter()
+ .map(|s| get_stream_virtual_format(*s))
+ .collect::<Vec<std::result::Result<AudioStreamBasicDescription, OSStatus>>>();
+ println!("output stream formats: {:?}", formats);
+ assert!(!formats.is_empty());
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_stream_virtual_format_by_unknown_stream() {
+ assert!(get_stream_virtual_format(kAudioObjectUnknown).is_err());
+}
+
+// get_stream_terminal_type
+// ------------------------------------
+
+#[test]
+fn test_get_stream_terminal_type() {
+ fn terminal_type_to_device_type(terminal_type: u32) -> Option<DeviceType> {
+ #[allow(non_upper_case_globals)]
+ match terminal_type {
+ kAudioStreamTerminalTypeMicrophone
+ | kAudioStreamTerminalTypeHeadsetMicrophone
+ | kAudioStreamTerminalTypeReceiverMicrophone => Some(DeviceType::INPUT),
+ kAudioStreamTerminalTypeSpeaker
+ | kAudioStreamTerminalTypeHeadphones
+ | kAudioStreamTerminalTypeLFESpeaker
+ | kAudioStreamTerminalTypeReceiverSpeaker => Some(DeviceType::OUTPUT),
+ t if t > INPUT_UNDEFINED && t < OUTPUT_UNDEFINED => Some(DeviceType::INPUT),
+ t if t > OUTPUT_UNDEFINED && t < BIDIRECTIONAL_UNDEFINED => Some(DeviceType::OUTPUT),
+ t => {
+ println!("UNKNOWN TerminalType {:#06x}", t);
+ None
+ }
+ }
+ }
+ if let Some(device) = test_get_default_device(Scope::Input) {
+ let streams = get_device_streams(device, DeviceType::INPUT).unwrap();
+ for stream in streams {
+ assert_eq!(
+ terminal_type_to_device_type(get_stream_terminal_type(stream).unwrap()),
+ Some(DeviceType::INPUT)
+ );
+ }
+ } else {
+ println!("No input device.");
+ }
+
+ if let Some(device) = test_get_default_device(Scope::Output) {
+ let streams = get_device_streams(device, DeviceType::OUTPUT).unwrap();
+ for stream in streams {
+ assert_eq!(
+ terminal_type_to_device_type(get_stream_terminal_type(stream).unwrap()),
+ Some(DeviceType::OUTPUT)
+ );
+ }
+ } else {
+ println!("No output device.");
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_get_stream_terminal_type_by_unknown_stream() {
+ assert!(get_stream_terminal_type(kAudioObjectUnknown).is_err());
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/interfaces.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/interfaces.rs
new file mode 100644
index 0000000000..340fec002d
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/interfaces.rs
@@ -0,0 +1,1215 @@
+extern crate itertools;
+
+use self::itertools::iproduct;
+use super::utils::{
+ get_devices_info_in_scope, noop_data_callback, test_device_channels_in_scope,
+ test_get_default_device, test_ops_context_operation, test_ops_stream_operation, Scope,
+};
+use super::*;
+
+// Context Operations
+// ------------------------------------------------------------------------------------------------
+#[test]
+fn test_ops_context_init_and_destroy() {
+ test_ops_context_operation("context: init and destroy", |_context_ptr| {});
+}
+
+#[test]
+fn test_ops_context_backend_id() {
+ test_ops_context_operation("context: backend id", |context_ptr| {
+ let backend = unsafe {
+ let ptr = OPS.get_backend_id.unwrap()(context_ptr);
+ CStr::from_ptr(ptr).to_string_lossy().into_owned()
+ };
+ assert_eq!(backend, "audiounit-rust");
+ });
+}
+
+#[test]
+fn test_ops_context_max_channel_count() {
+ test_ops_context_operation("context: max channel count", |context_ptr| {
+ let output_exists = test_get_default_device(Scope::Output).is_some();
+ let mut max_channel_count = 0;
+ let r = unsafe { OPS.get_max_channel_count.unwrap()(context_ptr, &mut max_channel_count) };
+ if output_exists {
+ assert_eq!(r, ffi::CUBEB_OK);
+ assert_ne!(max_channel_count, 0);
+ } else {
+ assert_eq!(r, ffi::CUBEB_ERROR);
+ assert_eq!(max_channel_count, 0);
+ }
+ });
+}
+
+#[test]
+fn test_ops_context_min_latency() {
+ test_ops_context_operation("context: min latency", |context_ptr| {
+ let output_exists = test_get_default_device(Scope::Output).is_some();
+ let params = ffi::cubeb_stream_params::default();
+ let mut latency = u32::max_value();
+ let r = unsafe { OPS.get_min_latency.unwrap()(context_ptr, params, &mut latency) };
+ if output_exists {
+ assert_eq!(r, ffi::CUBEB_OK);
+ assert!(latency >= SAFE_MIN_LATENCY_FRAMES);
+ assert!(SAFE_MAX_LATENCY_FRAMES >= latency);
+ } else {
+ assert_eq!(r, ffi::CUBEB_ERROR);
+ assert_eq!(latency, u32::max_value());
+ }
+ });
+}
+
+#[test]
+fn test_ops_context_preferred_sample_rate() {
+ test_ops_context_operation("context: preferred sample rate", |context_ptr| {
+ let output_exists = test_get_default_device(Scope::Output).is_some();
+ let mut rate = u32::max_value();
+ let r = unsafe { OPS.get_preferred_sample_rate.unwrap()(context_ptr, &mut rate) };
+ if output_exists {
+ assert_eq!(r, ffi::CUBEB_OK);
+ assert_ne!(rate, u32::max_value());
+ assert_ne!(rate, 0);
+ } else {
+ assert_eq!(r, ffi::CUBEB_ERROR);
+ assert_eq!(rate, u32::max_value());
+ }
+ });
+}
+
+#[test]
+fn test_ops_context_supported_input_processing_params() {
+ test_ops_context_operation(
+ "context: supported input processing params",
+ |context_ptr| {
+ let mut params = ffi::CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ let r = unsafe {
+ OPS.get_supported_input_processing_params.unwrap()(context_ptr, &mut params)
+ };
+ assert_eq!(r, ffi::CUBEB_OK);
+ assert_eq!(
+ params,
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL
+ );
+ },
+ );
+}
+
+#[test]
+fn test_ops_context_enumerate_devices_unknown() {
+ test_ops_context_operation("context: enumerate devices (unknown)", |context_ptr| {
+ let mut coll = ffi::cubeb_device_collection {
+ device: ptr::null_mut(),
+ count: 0,
+ };
+ assert_eq!(
+ unsafe {
+ OPS.enumerate_devices.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_UNKNOWN,
+ &mut coll,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(coll.count, 0);
+ assert_eq!(coll.device, ptr::null_mut());
+ assert_eq!(
+ unsafe { OPS.device_collection_destroy.unwrap()(context_ptr, &mut coll) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(coll.count, 0);
+ assert_eq!(coll.device, ptr::null_mut());
+ });
+}
+
+#[test]
+fn test_ops_context_enumerate_devices_input() {
+ test_ops_context_operation("context: enumerate devices (input)", |context_ptr| {
+ let having_input = test_get_default_device(Scope::Input).is_some();
+ let mut coll = ffi::cubeb_device_collection {
+ device: ptr::null_mut(),
+ count: 0,
+ };
+ assert_eq!(
+ unsafe {
+ OPS.enumerate_devices.unwrap()(context_ptr, ffi::CUBEB_DEVICE_TYPE_INPUT, &mut coll)
+ },
+ ffi::CUBEB_OK
+ );
+ if having_input {
+ assert_ne!(coll.count, 0);
+ assert_ne!(coll.device, ptr::null_mut());
+ } else {
+ assert_eq!(coll.count, 0);
+ assert_eq!(coll.device, ptr::null_mut());
+ }
+ assert_eq!(
+ unsafe { OPS.device_collection_destroy.unwrap()(context_ptr, &mut coll) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(coll.count, 0);
+ assert_eq!(coll.device, ptr::null_mut());
+ });
+}
+
+#[test]
+fn test_ops_context_enumerate_devices_output() {
+ test_ops_context_operation("context: enumerate devices (output)", |context_ptr| {
+ let output_exists = test_get_default_device(Scope::Output).is_some();
+ let mut coll = ffi::cubeb_device_collection {
+ device: ptr::null_mut(),
+ count: 0,
+ };
+ assert_eq!(
+ unsafe {
+ OPS.enumerate_devices.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ &mut coll,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ if output_exists {
+ assert_ne!(coll.count, 0);
+ assert_ne!(coll.device, ptr::null_mut());
+ } else {
+ assert_eq!(coll.count, 0);
+ assert_eq!(coll.device, ptr::null_mut());
+ }
+ assert_eq!(
+ unsafe { OPS.device_collection_destroy.unwrap()(context_ptr, &mut coll) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(coll.count, 0);
+ assert_eq!(coll.device, ptr::null_mut());
+ });
+}
+
+#[test]
+fn test_ops_context_device_collection_destroy() {
+ // Destroy a dummy device collection, without calling enumerate_devices to allocate memory for the device collection
+ test_ops_context_operation("context: device collection destroy", |context_ptr| {
+ let mut coll = ffi::cubeb_device_collection {
+ device: ptr::null_mut(),
+ count: 0,
+ };
+ assert_eq!(
+ unsafe { OPS.device_collection_destroy.unwrap()(context_ptr, &mut coll) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(coll.device, ptr::null_mut());
+ assert_eq!(coll.count, 0);
+ });
+}
+
+#[test]
+fn test_ops_context_register_device_collection_changed_unknown() {
+ test_ops_context_operation(
+ "context: register device collection changed (unknown)",
+ |context_ptr| {
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_UNKNOWN,
+ None,
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ },
+ );
+}
+
+#[test]
+fn test_ops_context_register_device_collection_changed_twice_input() {
+ test_ops_context_register_device_collection_changed_twice(ffi::CUBEB_DEVICE_TYPE_INPUT);
+}
+
+#[test]
+fn test_ops_context_register_device_collection_changed_twice_output() {
+ test_ops_context_register_device_collection_changed_twice(ffi::CUBEB_DEVICE_TYPE_OUTPUT);
+}
+
+#[test]
+fn test_ops_context_register_device_collection_changed_twice_inout() {
+ test_ops_context_register_device_collection_changed_twice(
+ ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ );
+}
+
+fn test_ops_context_register_device_collection_changed_twice(devtype: u32) {
+ extern "C" fn callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ let label_input: &'static str = "context: register device collection changed twice (input)";
+ let label_output: &'static str = "context: register device collection changed twice (output)";
+ let label_inout: &'static str = "context: register device collection changed twice (inout)";
+ let label = if devtype == ffi::CUBEB_DEVICE_TYPE_INPUT {
+ label_input
+ } else if devtype == ffi::CUBEB_DEVICE_TYPE_OUTPUT {
+ label_output
+ } else if devtype == ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT {
+ label_inout
+ } else {
+ return;
+ };
+
+ test_ops_context_operation(label, |context_ptr| {
+ // Register a callback within the defined scope.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ devtype,
+ Some(callback),
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ devtype,
+ Some(callback),
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ // Unregister
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ devtype,
+ None,
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_context_register_device_collection_changed() {
+ extern "C" fn callback(_: *mut ffi::cubeb, _: *mut c_void) {}
+ test_ops_context_operation(
+ "context: register device collection changed",
+ |context_ptr| {
+ let devtypes: [ffi::cubeb_device_type; 3] = [
+ ffi::CUBEB_DEVICE_TYPE_INPUT,
+ ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ ];
+
+ for devtype in &devtypes {
+ // Register a callback in the defined scoped.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ *devtype,
+ Some(callback),
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ // Unregister all callbacks regardless of the scope.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ None,
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ // Register callback in the defined scoped again.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ *devtype,
+ Some(callback),
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ // Unregister callback within the defined scope.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ *devtype,
+ None,
+ ptr::null_mut(),
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ }
+ },
+ );
+}
+
+#[test]
+fn test_ops_context_register_device_collection_changed_with_a_duplex_stream() {
+ use std::thread;
+ use std::time::Duration;
+
+ extern "C" fn callback(_: *mut ffi::cubeb, got_called_ptr: *mut c_void) {
+ let got_called = unsafe { &mut *(got_called_ptr as *mut bool) };
+ *got_called = true;
+ }
+
+ test_ops_context_operation(
+ "context: register device collection changed and create a duplex stream",
+ |context_ptr| {
+ let got_called = Box::new(false);
+ let got_called_ptr = Box::into_raw(got_called);
+
+ // Register a callback monitoring both input and output device collection.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ Some(callback),
+ got_called_ptr as *mut c_void,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ // The aggregate device is very likely to be created in the system
+ // when creating a duplex stream. We need to make sure it won't trigger
+ // the callback.
+ test_default_duplex_stream_operation("duplex stream", |_stream| {
+ // Do nothing but wait for device-collection change.
+ thread::sleep(Duration::from_millis(200));
+ });
+
+ // Unregister the callback.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_INPUT | ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ None,
+ got_called_ptr as *mut c_void,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ let got_called = unsafe { Box::from_raw(got_called_ptr) };
+ assert!(!got_called.as_ref());
+ },
+ );
+}
+
+#[test]
+#[ignore]
+fn test_ops_context_register_device_collection_changed_manual() {
+ test_ops_context_operation(
+ "(manual) context: register device collection changed",
+ |context_ptr| {
+ println!("context @ {:p}", context_ptr);
+
+ struct Data {
+ context: *mut ffi::cubeb,
+ touched: u32, // TODO: Use AtomicU32 instead
+ }
+
+ extern "C" fn input_callback(context: *mut ffi::cubeb, user: *mut c_void) {
+ println!("input > context @ {:p}", context);
+ let data = unsafe { &mut (*(user as *mut Data)) };
+ assert_eq!(context, data.context);
+ data.touched += 1;
+ }
+
+ extern "C" fn output_callback(context: *mut ffi::cubeb, user: *mut c_void) {
+ println!("output > context @ {:p}", context);
+ let data = unsafe { &mut (*(user as *mut Data)) };
+ assert_eq!(context, data.context);
+ data.touched += 1;
+ }
+
+ let mut data = Data {
+ context: context_ptr,
+ touched: 0,
+ };
+
+ // Register a callback for input scope.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_INPUT,
+ Some(input_callback),
+ &mut data as *mut Data as *mut c_void,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ // Register a callback for output scope.
+ assert_eq!(
+ unsafe {
+ OPS.register_device_collection_changed.unwrap()(
+ context_ptr,
+ ffi::CUBEB_DEVICE_TYPE_OUTPUT,
+ Some(output_callback),
+ &mut data as *mut Data as *mut c_void,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+
+ while data.touched < 2 {}
+ },
+ );
+}
+
+#[test]
+fn test_ops_context_stream_init_no_stream_params() {
+ let name = "context: stream_init with no stream params";
+ test_ops_context_operation(name, |context_ptr| {
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(name).expect("Failed to create stream name");
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ ptr::null_mut(), // No output parameters.
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ assert!(stream.is_null());
+ });
+}
+
+#[test]
+fn test_ops_context_stream_init_no_input_stream_params() {
+ let name = "context: stream_init with no input stream params";
+ let input_device = test_get_default_device(Scope::Input);
+ if input_device.is_none() {
+ println!("No input device to perform input tests for \"{}\".", name);
+ return;
+ }
+ test_ops_context_operation(name, |context_ptr| {
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(name).expect("Failed to create stream name");
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ input_device.unwrap() as ffi::cubeb_devid,
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ ptr::null_mut(), // No output parameters.
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ assert!(stream.is_null());
+ });
+}
+
+#[test]
+fn test_ops_context_stream_init_no_output_stream_params() {
+ let name = "context: stream_init with no output stream params";
+ let output_device = test_get_default_device(Scope::Output);
+ if output_device.is_none() {
+ println!("No output device to perform output tests for \"{}\".", name);
+ return;
+ }
+ test_ops_context_operation(name, |context_ptr| {
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(name).expect("Failed to create stream name");
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ output_device.unwrap() as ffi::cubeb_devid,
+ ptr::null_mut(), // No output parameters.
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ assert!(stream.is_null());
+ });
+}
+
+#[test]
+fn test_ops_context_stream_init_no_data_callback() {
+ let name = "context: stream_init with no data callback";
+ test_ops_context_operation(name, |context_ptr| {
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(name).expect("Failed to create stream name");
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 44100;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ None, // No data callback.
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ assert!(stream.is_null());
+ });
+}
+
+#[test]
+fn test_ops_context_stream_init_channel_rate_combinations() {
+ let name = "context: stream_init with various channels and rates";
+ test_ops_context_operation(name, |context_ptr| {
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(name).expect("Failed to create stream name");
+
+ const MAX_NUM_CHANNELS: u32 = 32;
+ let channel_values: Vec<u32> = vec![1, 2, 3, 4, 6];
+ let freq_values: Vec<u32> = vec![16000, 24000, 44100, 48000];
+ let is_float_values: Vec<bool> = vec![false, true];
+
+ for (channels, freq, is_float) in iproduct!(channel_values, freq_values, is_float_values) {
+ assert!(channels < MAX_NUM_CHANNELS);
+ println!("--------------------------");
+ println!(
+ "Testing {} channel(s), {} Hz, {}\n",
+ channels,
+ freq,
+ if is_float { "float" } else { "short" }
+ );
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = if is_float {
+ ffi::CUBEB_SAMPLE_FLOAT32NE
+ } else {
+ ffi::CUBEB_SAMPLE_S16NE
+ };
+ output_params.rate = freq;
+ output_params.channels = channels;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback), // No data callback.
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ assert!(!stream.is_null());
+ }
+ });
+}
+
+// Stream Operations
+// ------------------------------------------------------------------------------------------------
+fn test_default_output_stream_operation<F>(name: &'static str, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 44100;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ test_ops_stream_operation(
+ name,
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ operation,
+ );
+}
+
+fn test_default_duplex_stream_operation<F>(name: &'static str, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut input_params = ffi::cubeb_stream_params::default();
+ input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ input_params.rate = 48000;
+ input_params.channels = 1;
+ input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ input_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 44100;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ test_ops_stream_operation(
+ name,
+ ptr::null_mut(), // Use default input device.
+ &mut input_params,
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ operation,
+ );
+}
+
+fn test_stereo_input_duplex_stream_operation<F>(name: &'static str, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ let mut input_devices = get_devices_info_in_scope(Scope::Input);
+ input_devices.retain(|d| test_device_channels_in_scope(d.id, Scope::Input).unwrap_or(0) >= 2);
+ if input_devices.is_empty() {
+ println!("No stereo input device present. Skipping stereo-input test.");
+ return;
+ }
+
+ let mut input_params = ffi::cubeb_stream_params::default();
+ input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ input_params.rate = 48000;
+ input_params.channels = 2;
+ input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ input_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 48000;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ test_ops_stream_operation(
+ name,
+ input_devices[0].id as ffi::cubeb_devid,
+ &mut input_params,
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ operation,
+ );
+}
+
+fn test_default_duplex_voice_stream_operation<F>(name: &'static str, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut input_params = ffi::cubeb_stream_params::default();
+ input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ input_params.rate = 44100;
+ input_params.channels = 1;
+ input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ input_params.prefs = ffi::CUBEB_STREAM_PREF_VOICE;
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 48000;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_VOICE;
+
+ test_ops_stream_operation(
+ name,
+ ptr::null_mut(), // Use default input device.
+ &mut input_params,
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ operation,
+ );
+}
+
+fn test_stereo_input_duplex_voice_stream_operation<F>(name: &'static str, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ let mut input_devices = get_devices_info_in_scope(Scope::Input);
+ input_devices.retain(|d| test_device_channels_in_scope(d.id, Scope::Input).unwrap_or(0) >= 2);
+ if input_devices.is_empty() {
+ println!("No stereo input device present. Skipping stereo-input test.");
+ return;
+ }
+
+ let mut input_params = ffi::cubeb_stream_params::default();
+ input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ input_params.rate = 44100;
+ input_params.channels = 2;
+ input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ input_params.prefs = ffi::CUBEB_STREAM_PREF_VOICE;
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 44100;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_VOICE;
+
+ test_ops_stream_operation(
+ name,
+ input_devices[0].id as ffi::cubeb_devid,
+ &mut input_params,
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ operation,
+ );
+}
+
+#[test]
+fn test_ops_stream_init_and_destroy() {
+ test_default_output_stream_operation("stream: init and destroy", |_stream| {});
+}
+
+#[test]
+fn test_ops_stream_start() {
+ test_default_output_stream_operation("stream: start", |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ });
+}
+
+#[test]
+fn test_ops_stream_stop() {
+ test_default_output_stream_operation("stream: stop", |stream| {
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ });
+}
+
+#[test]
+fn test_ops_stream_position() {
+ test_default_output_stream_operation("stream: position", |stream| {
+ let mut position = u64::max_value();
+ assert_eq!(
+ unsafe { OPS.stream_get_position.unwrap()(stream, &mut position) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(position, 0);
+ });
+}
+
+#[test]
+fn test_ops_stream_latency() {
+ test_default_output_stream_operation("stream: latency", |stream| {
+ let mut latency = u32::max_value();
+ assert_eq!(
+ unsafe { OPS.stream_get_latency.unwrap()(stream, &mut latency) },
+ ffi::CUBEB_OK
+ );
+ assert_ne!(latency, u32::max_value());
+ });
+}
+
+#[test]
+fn test_ops_stream_set_volume() {
+ test_default_output_stream_operation("stream: set volume", |stream| {
+ assert_eq!(
+ unsafe { OPS.stream_set_volume.unwrap()(stream, 0.5) },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_stream_current_device() {
+ test_default_output_stream_operation("stream: get current device and destroy it", |stream| {
+ if test_get_default_device(Scope::Input).is_none()
+ || test_get_default_device(Scope::Output).is_none()
+ {
+ println!("stream_get_current_device only works when the machine has both input and output devices");
+ return;
+ }
+
+ let mut device: *mut ffi::cubeb_device = ptr::null_mut();
+ if unsafe { OPS.stream_get_current_device.unwrap()(stream, &mut device) } != ffi::CUBEB_OK {
+ // It can happen when we fail to get the device source.
+ println!("stream_get_current_device fails. Skip this test.");
+ return;
+ }
+
+ assert!(!device.is_null());
+ // Uncomment the below to print out the results.
+ // let deviceref = unsafe { DeviceRef::from_ptr(device) };
+ // println!(
+ // "output: {}",
+ // deviceref.output_name().unwrap_or("(no device name)")
+ // );
+ // println!(
+ // "input: {}",
+ // deviceref.input_name().unwrap_or("(no device name)")
+ // );
+ assert_eq!(
+ unsafe { OPS.stream_device_destroy.unwrap()(stream, device) },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_stream_device_destroy() {
+ test_default_output_stream_operation("stream: destroy null device", |stream| {
+ assert_eq!(
+ unsafe { OPS.stream_device_destroy.unwrap()(stream, ptr::null_mut()) },
+ ffi::CUBEB_OK // It returns OK anyway.
+ );
+ });
+}
+
+#[test]
+fn test_ops_stream_register_device_changed_callback() {
+ extern "C" fn callback(_: *mut c_void) {}
+
+ test_default_output_stream_operation("stream: register device changed callback", |stream| {
+ assert_eq!(
+ unsafe { OPS.stream_register_device_changed_callback.unwrap()(stream, Some(callback)) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(
+ unsafe { OPS.stream_register_device_changed_callback.unwrap()(stream, Some(callback)) },
+ ffi::CUBEB_ERROR_INVALID_PARAMETER
+ );
+ assert_eq!(
+ unsafe { OPS.stream_register_device_changed_callback.unwrap()(stream, None) },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_stereo_input_duplex_stream_init_and_destroy() {
+ test_stereo_input_duplex_stream_operation(
+ "stereo-input duplex stream: init and destroy",
+ |_stream| {},
+ );
+}
+
+#[test]
+fn test_ops_stereo_input_duplex_stream_start() {
+ test_stereo_input_duplex_stream_operation("stereo-input duplex stream: start", |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ });
+}
+
+#[test]
+fn test_ops_stereo_input_duplex_stream_stop() {
+ test_stereo_input_duplex_stream_operation("stereo-input duplex stream: stop", |stream| {
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ });
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_init_and_destroy() {
+ test_default_duplex_voice_stream_operation(
+ "duplex voice stream: init and destroy",
+ |_stream| {},
+ );
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_start() {
+ test_default_duplex_voice_stream_operation("duplex voice stream: start", |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ });
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_stop() {
+ test_default_duplex_voice_stream_operation("duplex voice stream: stop", |stream| {
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ });
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_mute() {
+ test_default_duplex_voice_stream_operation("duplex voice stream: mute", |stream| {
+ assert_eq!(
+ unsafe { OPS.stream_set_input_mute.unwrap()(stream, 1) },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_mute_before_start() {
+ test_default_duplex_voice_stream_operation(
+ "duplex voice stream: mute before start",
+ |stream| {
+ assert_eq!(
+ unsafe { OPS.stream_set_input_mute.unwrap()(stream, 1) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_mute_before_start_with_reinit() {
+ test_default_duplex_voice_stream_operation(
+ "duplex voice stream: mute before start with reinit",
+ |stream| {
+ assert_eq!(
+ unsafe { OPS.stream_set_input_mute.unwrap()(stream, 1) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+
+ // Hacky cast, but testing this here was simplest for now.
+ let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
+ stm.reinit_async();
+ let queue = stm.queue.clone();
+ let mut mute_after_reinit = false;
+ queue.run_sync(|| {
+ let mut mute: u32 = 0;
+ let r = audio_unit_get_property(
+ stm.core_stream_data.input_unit,
+ kAUVoiceIOProperty_MuteOutput,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut mute,
+ &mut mem::size_of::<u32>(),
+ );
+ assert_eq!(r, NO_ERR);
+ mute_after_reinit = mute == 1;
+ });
+ assert_eq!(mute_after_reinit, true);
+ },
+ );
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_mute_after_start() {
+ test_default_duplex_voice_stream_operation("duplex voice stream: mute after start", |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ assert_eq!(
+ unsafe { OPS.stream_set_input_mute.unwrap()(stream, 1) },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_processing_params() {
+ test_default_duplex_voice_stream_operation("duplex voice stream: processing", |stream| {
+ let params: ffi::cubeb_input_processing_params =
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL;
+ assert_eq!(
+ unsafe { OPS.stream_set_input_processing_params.unwrap()(stream, params) },
+ ffi::CUBEB_OK
+ );
+ });
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_processing_params_before_start() {
+ test_default_duplex_voice_stream_operation(
+ "duplex voice stream: processing before start",
+ |stream| {
+ let params: ffi::cubeb_input_processing_params =
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL;
+ assert_eq!(
+ unsafe { OPS.stream_set_input_processing_params.unwrap()(stream, params) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_processing_params_before_start_with_reinit() {
+ test_default_duplex_voice_stream_operation(
+ "duplex voice stream: processing before start with reinit",
+ |stream| {
+ let params: ffi::cubeb_input_processing_params =
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL;
+ assert_eq!(
+ unsafe { OPS.stream_set_input_processing_params.unwrap()(stream, params) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+
+ // Hacky cast, but testing this here was simplest for now.
+ let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
+ stm.reinit_async();
+ let queue = stm.queue.clone();
+ let mut params_after_reinit: ffi::cubeb_input_processing_params =
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ queue.run_sync(|| {
+ let mut params: ffi::cubeb_input_processing_params =
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ let mut agc: u32 = 0;
+ let r = audio_unit_get_property(
+ stm.core_stream_data.input_unit,
+ kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut agc,
+ &mut mem::size_of::<u32>(),
+ );
+ assert_eq!(r, NO_ERR);
+ if agc == 1 {
+ params = params | ffi::CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL;
+ }
+ let mut bypass: u32 = 0;
+ let r = audio_unit_get_property(
+ stm.core_stream_data.input_unit,
+ kAUVoiceIOProperty_BypassVoiceProcessing,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut bypass,
+ &mut mem::size_of::<u32>(),
+ );
+ assert_eq!(r, NO_ERR);
+ if bypass == 0 {
+ params = params
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION;
+ }
+ params_after_reinit = params;
+ });
+ assert_eq!(params, params_after_reinit);
+ },
+ );
+}
+
+#[test]
+fn test_ops_duplex_voice_stream_set_input_processing_params_after_start() {
+ test_default_duplex_voice_stream_operation(
+ "duplex voice stream: processing after start",
+ |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ let params: ffi::cubeb_input_processing_params =
+ ffi::CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION
+ | ffi::CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL;
+ assert_eq!(
+ unsafe { OPS.stream_set_input_processing_params.unwrap()(stream, params) },
+ ffi::CUBEB_OK
+ );
+ },
+ );
+}
+
+#[test]
+fn test_ops_stereo_input_duplex_voice_stream_init_and_destroy() {
+ test_stereo_input_duplex_voice_stream_operation(
+ "stereo-input duplex voice stream: init and destroy",
+ |_stream| {},
+ );
+}
+
+#[test]
+fn test_ops_stereo_input_duplex_voice_stream_start() {
+ test_stereo_input_duplex_voice_stream_operation(
+ "stereo-input duplex voice stream: start",
+ |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+}
+
+#[test]
+fn test_ops_stereo_input_duplex_voice_stream_stop() {
+ test_stereo_input_duplex_voice_stream_operation(
+ "stereo-input duplex voice stream: stop",
+ |stream| {
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/manual.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/manual.rs
new file mode 100644
index 0000000000..b2b2241cc9
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/manual.rs
@@ -0,0 +1,614 @@
+use super::utils::{
+ test_get_devices_in_scope, test_ops_context_operation, test_ops_stream_operation, Scope,
+ StreamType, TestDeviceInfo, TestDeviceSwitcher,
+};
+use super::*;
+use std::io;
+use std::sync::atomic::AtomicBool;
+
+#[ignore]
+#[test]
+fn test_switch_output_device() {
+ use std::f32::consts::PI;
+
+ const SAMPLE_FREQUENCY: u32 = 48_000;
+
+ // Do nothing if there is no 2 available output devices at least.
+ let devices = test_get_devices_in_scope(Scope::Output);
+ if devices.len() < 2 {
+ println!("Need 2 output devices at least.");
+ return;
+ }
+
+ let mut output_device_switcher = TestDeviceSwitcher::new(Scope::Output);
+
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_S16NE;
+ output_params.rate = SAMPLE_FREQUENCY;
+ output_params.channels = 1;
+ output_params.layout = ffi::CUBEB_LAYOUT_MONO;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ // Used to calculate the tone's wave.
+ let mut position: i64 = 0; // TODO: Use Atomic instead.
+
+ test_ops_stream_operation(
+ "stream: North American dial tone",
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(data_callback),
+ Some(state_callback),
+ &mut position as *mut i64 as *mut c_void,
+ |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ println!("Start playing! Enter 's' to switch device. Enter 'q' to quit.");
+ loop {
+ let mut input = String::new();
+ let _ = io::stdin().read_line(&mut input);
+ assert_eq!(input.pop().unwrap(), '\n');
+ match input.as_str() {
+ "s" => {
+ output_device_switcher.next();
+ }
+ "q" => {
+ println!("Quit.");
+ break;
+ }
+ x => {
+ println!("Unknown command: {}", x);
+ }
+ }
+ }
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+
+ extern "C" fn state_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ state: ffi::cubeb_state,
+ ) {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert_ne!(state, ffi::CUBEB_STATE_ERROR);
+ }
+
+ extern "C" fn data_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ _input_buffer: *const c_void,
+ output_buffer: *mut c_void,
+ nframes: i64,
+ ) -> i64 {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert!(!output_buffer.is_null());
+
+ let buffer = unsafe {
+ let ptr = output_buffer as *mut i16;
+ let len = nframes as usize;
+ slice::from_raw_parts_mut(ptr, len)
+ };
+
+ let position = unsafe { &mut *(user_ptr as *mut i64) };
+
+ // Generate tone on the fly.
+ for data in buffer.iter_mut() {
+ let t1 = (2.0 * PI * 350.0 * (*position) as f32 / SAMPLE_FREQUENCY as f32).sin();
+ let t2 = (2.0 * PI * 440.0 * (*position) as f32 / SAMPLE_FREQUENCY as f32).sin();
+ *data = f32_to_i16_sample(0.5 * (t1 + t2));
+ *position += 1;
+ }
+
+ nframes
+ }
+
+ fn f32_to_i16_sample(x: f32) -> i16 {
+ (x * f32::from(i16::max_value())) as i16
+ }
+}
+
+#[ignore]
+#[test]
+fn test_device_collection_change() {
+ const DUMMY_PTR: *mut c_void = 0xDEAD_BEEF as *mut c_void;
+ let mut context = AudioUnitContext::new();
+ println!("Context allocated @ {:p}", &context);
+
+ extern "C" fn input_changed_callback(context: *mut ffi::cubeb, data: *mut c_void) {
+ println!(
+ "Input device collection @ {:p} is changed. Data @ {:p}",
+ context, data
+ );
+ assert_eq!(data, DUMMY_PTR);
+ }
+
+ extern "C" fn output_changed_callback(context: *mut ffi::cubeb, data: *mut c_void) {
+ println!(
+ "output device collection @ {:p} is changed. Data @ {:p}",
+ context, data
+ );
+ assert_eq!(data, DUMMY_PTR);
+ }
+
+ context.register_device_collection_changed(
+ DeviceType::INPUT,
+ Some(input_changed_callback),
+ DUMMY_PTR,
+ );
+
+ context.register_device_collection_changed(
+ DeviceType::OUTPUT,
+ Some(output_changed_callback),
+ DUMMY_PTR,
+ );
+
+ println!("Unplug/Plug device to see the event log.\nEnter anything to finish.");
+ let mut input = String::new();
+ let _ = std::io::stdin().read_line(&mut input);
+}
+
+#[ignore]
+#[test]
+fn test_stream_tester() {
+ test_ops_context_operation("context: stream tester", |context_ptr| {
+ let mut stream_ptr: *mut ffi::cubeb_stream = ptr::null_mut();
+ let enable_loopback = AtomicBool::new(false);
+ loop {
+ println!(
+ "commands:\n\
+ \t'q': quit\n\
+ \t'c': create a stream\n\
+ \t'd': destroy a stream\n\
+ \t's': start the created stream\n\
+ \t't': stop the created stream\n\
+ \t'r': register a device changed callback\n\
+ \t'l': set loopback (DUPLEX-only)\n\
+ \t'v': set volume\n\
+ \t'm': set input mute\n\
+ \t'p': set input processing"
+ );
+
+ let mut command = String::new();
+ let _ = io::stdin().read_line(&mut command);
+ assert_eq!(command.pop().unwrap(), '\n');
+
+ match command.as_str() {
+ "q" => {
+ println!("Quit.");
+ destroy_stream(&mut stream_ptr);
+ break;
+ }
+ "c" => create_stream(&mut stream_ptr, context_ptr, &enable_loopback),
+ "d" => destroy_stream(&mut stream_ptr),
+ "s" => start_stream(stream_ptr),
+ "t" => stop_stream(stream_ptr),
+ "r" => register_device_change_callback(stream_ptr),
+ "l" => set_loopback(stream_ptr, &enable_loopback),
+ "v" => set_volume(stream_ptr),
+ "m" => set_input_mute(stream_ptr),
+ "p" => set_input_processing(stream_ptr),
+ x => println!("Unknown command: {}", x),
+ }
+ }
+ });
+
+ fn start_stream(stream_ptr: *mut ffi::cubeb_stream) {
+ if stream_ptr.is_null() {
+ println!("No stream can start.");
+ return;
+ }
+ assert_eq!(
+ unsafe { OPS.stream_start.unwrap()(stream_ptr) },
+ ffi::CUBEB_OK
+ );
+ println!("Stream {:p} started.", stream_ptr);
+ }
+
+ fn stop_stream(stream_ptr: *mut ffi::cubeb_stream) {
+ if stream_ptr.is_null() {
+ println!("No stream can stop.");
+ return;
+ }
+ assert_eq!(
+ unsafe { OPS.stream_stop.unwrap()(stream_ptr) },
+ ffi::CUBEB_OK
+ );
+ println!("Stream {:p} stopped.", stream_ptr);
+ }
+
+ fn set_volume(stream_ptr: *mut ffi::cubeb_stream) {
+ if stream_ptr.is_null() {
+ println!("No stream can set volume.");
+ return;
+ }
+ const VOL: f32 = 0.5;
+ assert_eq!(
+ unsafe { OPS.stream_set_volume.unwrap()(stream_ptr, VOL) },
+ ffi::CUBEB_OK
+ );
+ println!("Set stream {:p} volume to {}", stream_ptr, VOL);
+ }
+
+ fn set_loopback(stream_ptr: *mut ffi::cubeb_stream, enable_loopback: &AtomicBool) {
+ if stream_ptr.is_null() {
+ println!("No stream can set loopback.");
+ return;
+ }
+ let stm = unsafe { &mut *(stream_ptr as *mut AudioUnitStream) };
+ if !stm.core_stream_data.has_input() || !stm.core_stream_data.has_output() {
+ println!("Duplex stream needed to set loopback");
+ return;
+ }
+ let mut loopback: Option<bool> = None;
+ while loopback.is_none() {
+ println!("Select action:\n1) Enable loopback, 2) Disable loopback");
+ let mut input = String::new();
+ let _ = io::stdin().read_line(&mut input);
+ assert_eq!(input.pop().unwrap(), '\n');
+ loopback = match input.as_str() {
+ "1" => Some(true),
+ "2" => Some(false),
+ _ => {
+ println!("Invalid action. Select again.\n");
+ None
+ }
+ }
+ }
+ let loopback = loopback.unwrap();
+ enable_loopback.store(loopback, Ordering::SeqCst);
+ println!(
+ "Loopback {} for stream {:p}",
+ if loopback { "enabled" } else { "disabled" },
+ stream_ptr
+ );
+ }
+
+ fn set_input_mute(stream_ptr: *mut ffi::cubeb_stream) {
+ if stream_ptr.is_null() {
+ println!("No stream can set input mute.");
+ return;
+ }
+ let stm = unsafe { &mut *(stream_ptr as *mut AudioUnitStream) };
+ if !stm.core_stream_data.has_input() {
+ println!("Input stream needed to set loopback");
+ return;
+ }
+ let mut mute: Option<bool> = None;
+ while mute.is_none() {
+ println!("Select action:\n1) Mute, 2) Unmute");
+ let mut input = String::new();
+ let _ = io::stdin().read_line(&mut input);
+ assert_eq!(input.pop().unwrap(), '\n');
+ mute = match input.as_str() {
+ "1" => Some(true),
+ "2" => Some(false),
+ _ => {
+ println!("Invalid action. Select again.\n");
+ None
+ }
+ }
+ }
+ let mute = mute.unwrap();
+ let res = unsafe { OPS.stream_set_input_mute.unwrap()(stream_ptr, mute.into()) };
+ println!(
+ "{} set stream {:p} input {}",
+ if res == ffi::CUBEB_OK {
+ "Successfully"
+ } else {
+ "Failed to"
+ },
+ stream_ptr,
+ if mute { "mute" } else { "unmute" }
+ );
+ }
+
+ fn set_input_processing(stream_ptr: *mut ffi::cubeb_stream) {
+ if stream_ptr.is_null() {
+ println!("No stream can set input processing.");
+ return;
+ }
+ let stm = unsafe { &mut *(stream_ptr as *mut AudioUnitStream) };
+ if !stm.core_stream_data.using_voice_processing_unit() {
+ println!("Duplex stream with voice processing needed to set input processing params");
+ return;
+ }
+ let mut params = InputProcessingParams::NONE;
+ {
+ let mut bypass = u32::from(true);
+ let mut size: usize = mem::size_of::<u32>();
+ assert_eq!(
+ audio_unit_get_property(
+ stm.core_stream_data.input_unit,
+ kAudioUnitProperty_BypassEffect,
+ kAudioUnitScope_Global,
+ AU_IN_BUS,
+ &mut bypass,
+ &mut size,
+ ),
+ NO_ERR
+ );
+ assert_eq!(size, mem::size_of::<u32>());
+ if bypass == 0 {
+ params.set(InputProcessingParams::ECHO_CANCELLATION, true);
+ params.set(InputProcessingParams::NOISE_SUPPRESSION, true);
+ }
+ }
+ let mut done = false;
+ while !done {
+ println!(
+ "Supported params: {:?}\nCurrent params: {:?}\nSelect action:\n\
+ \t1) Set None\n\
+ \t2) Toggle Echo Cancellation\n\
+ \t3) Toggle Noise Suppression\n\
+ \t4) Toggle Automatic Gain Control\n\
+ \t5) Toggle Voice Isolation\n\
+ \t6) Set All\n\
+ \t0) Done",
+ stm.context.supported_input_processing_params().unwrap(),
+ params
+ );
+ let mut input = String::new();
+ let _ = io::stdin().read_line(&mut input);
+ assert_eq!(input.pop().unwrap(), '\n');
+ match input.as_str() {
+ "1" => params = InputProcessingParams::NONE,
+ "2" => params.toggle(InputProcessingParams::ECHO_CANCELLATION),
+ "3" => params.toggle(InputProcessingParams::NOISE_SUPPRESSION),
+ "4" => params.toggle(InputProcessingParams::AUTOMATIC_GAIN_CONTROL),
+ "5" => params.toggle(InputProcessingParams::VOICE_ISOLATION),
+ "6" => params = InputProcessingParams::all(),
+ "0" => done = true,
+ _ => println!("Invalid action. Select again.\n"),
+ }
+ }
+ let res =
+ unsafe { OPS.stream_set_input_processing_params.unwrap()(stream_ptr, params.bits()) };
+ println!(
+ "{} set stream {:p} input processing params to {:?}",
+ if res == ffi::CUBEB_OK {
+ "Successfully"
+ } else {
+ "Failed to"
+ },
+ stream_ptr,
+ params,
+ );
+ }
+
+ fn register_device_change_callback(stream_ptr: *mut ffi::cubeb_stream) {
+ extern "C" fn callback(user_ptr: *mut c_void) {
+ println!("user pointer @ {:p}", user_ptr);
+ assert!(user_ptr.is_null());
+ }
+
+ if stream_ptr.is_null() {
+ println!("No stream for registering the callback.");
+ return;
+ }
+ assert_eq!(
+ unsafe {
+ OPS.stream_register_device_changed_callback.unwrap()(stream_ptr, Some(callback))
+ },
+ ffi::CUBEB_OK
+ );
+ println!("Stream {:p} now has a device change callback.", stream_ptr);
+ }
+
+ fn destroy_stream(stream_ptr: &mut *mut ffi::cubeb_stream) {
+ if stream_ptr.is_null() {
+ println!("No need to destroy stream.");
+ return;
+ }
+ unsafe {
+ OPS.stream_destroy.unwrap()(*stream_ptr);
+ }
+ println!("Stream {:p} destroyed.", *stream_ptr);
+ *stream_ptr = ptr::null_mut();
+ }
+
+ fn create_stream(
+ stream_ptr: &mut *mut ffi::cubeb_stream,
+ context_ptr: *mut ffi::cubeb,
+ enable_loopback: &AtomicBool,
+ ) {
+ if !stream_ptr.is_null() {
+ println!("Stream has been created.");
+ return;
+ }
+
+ let mut stream_type = StreamType::empty();
+ while stream_type.is_empty() {
+ println!("Select stream type:\n1) Input 2) Output 3) In-Out Duplex 4) Back");
+ let mut input = String::new();
+ let _ = io::stdin().read_line(&mut input);
+ assert_eq!(input.pop().unwrap(), '\n');
+ stream_type = match input.as_str() {
+ "1" => StreamType::INPUT,
+ "2" => StreamType::OUTPUT,
+ "3" => StreamType::DUPLEX,
+ "4" => {
+ println!("Do nothing.");
+ return;
+ }
+ _ => {
+ println!("Invalid type. Select again.\n");
+ StreamType::empty()
+ }
+ }
+ }
+
+ let device_selector = |scope: Scope| -> AudioObjectID {
+ loop {
+ println!(
+ "Select {} device:\n",
+ if scope == Scope::Input {
+ "input"
+ } else {
+ "output"
+ }
+ );
+ let mut list = vec![];
+ list.push(kAudioObjectUnknown);
+ println!("{:>4}: System default", 0);
+ let devices = test_get_devices_in_scope(scope.clone());
+ for (idx, device) in devices.iter().enumerate() {
+ list.push(*device);
+ let info = TestDeviceInfo::new(*device, scope.clone());
+ println!(
+ "{:>4}: {}\n\tAudioObjectID: {}\n\tuid: {}",
+ idx + 1,
+ info.label,
+ device,
+ info.uid
+ );
+ }
+
+ let mut input = String::new();
+ io::stdin().read_line(&mut input).unwrap();
+ let n: usize = match input.trim().parse() {
+ Err(_) => {
+ println!("Invalid option. Try again.\n");
+ continue;
+ }
+ Ok(n) => n,
+ };
+ if n >= list.len() {
+ println!("Invalid option. Try again.\n");
+ continue;
+ }
+ return list[n];
+ }
+ };
+
+ let mut input_params = get_dummy_stream_params(Scope::Input);
+ let mut output_params = get_dummy_stream_params(Scope::Output);
+
+ let (input_device, input_stream_params) = if stream_type.contains(StreamType::INPUT) {
+ (
+ device_selector(Scope::Input),
+ &mut input_params as *mut ffi::cubeb_stream_params,
+ )
+ } else {
+ (
+ kAudioObjectUnknown, /* default input device */
+ ptr::null_mut(),
+ )
+ };
+
+ let (output_device, output_stream_params) = if stream_type.contains(StreamType::OUTPUT) {
+ (
+ device_selector(Scope::Output),
+ &mut output_params as *mut ffi::cubeb_stream_params,
+ )
+ } else {
+ (
+ kAudioObjectUnknown, /* default output device */
+ ptr::null_mut(),
+ )
+ };
+
+ let stream_name = CString::new("stream tester").unwrap();
+
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ stream_ptr,
+ stream_name.as_ptr(),
+ input_device as ffi::cubeb_devid,
+ input_stream_params,
+ output_device as ffi::cubeb_devid,
+ output_stream_params,
+ 4096, // latency
+ Some(data_callback),
+ Some(state_callback),
+ enable_loopback as *const AtomicBool as *mut c_void, // user pointer
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ assert!(!stream_ptr.is_null());
+ println!("Stream {:p} created.", *stream_ptr);
+
+ extern "C" fn state_callback(
+ stream: *mut ffi::cubeb_stream,
+ _user_ptr: *mut c_void,
+ state: ffi::cubeb_state,
+ ) {
+ assert!(!stream.is_null());
+ let s = State::from(state);
+ println!("state: {:?}", s);
+ }
+
+ extern "C" fn data_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ input_buffer: *const c_void,
+ output_buffer: *mut c_void,
+ nframes: i64,
+ ) -> i64 {
+ assert!(!stream.is_null());
+
+ let enable_loopback = unsafe { &mut *(user_ptr as *mut AtomicBool) };
+ let loopback = enable_loopback.load(Ordering::SeqCst);
+ if loopback && !input_buffer.is_null() && !output_buffer.is_null() {
+ // Dupe the mono input to stereo
+ let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
+ assert_eq!(stm.core_stream_data.input_stream_params.channels(), 1);
+ let channels = stm.core_stream_data.output_stream_params.channels() as usize;
+ let sample_size =
+ cubeb_sample_size(stm.core_stream_data.output_stream_params.format());
+ for f in 0..(nframes as usize) {
+ let input_offset = f * sample_size;
+ let output_offset = input_offset * channels;
+ for c in 0..channels {
+ unsafe {
+ ptr::copy(
+ input_buffer.add(input_offset) as *const u8,
+ output_buffer.add(output_offset + (sample_size * c)) as *mut u8,
+ sample_size,
+ )
+ };
+ }
+ }
+ } else if !output_buffer.is_null() {
+ // Feed silence data to output buffer
+ let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
+ let channels = stm.core_stream_data.output_stream_params.channels();
+ let samples = nframes as usize * channels as usize;
+ let sample_size =
+ cubeb_sample_size(stm.core_stream_data.output_stream_params.format());
+ unsafe {
+ ptr::write_bytes(output_buffer, 0, samples * sample_size);
+ }
+ }
+
+ nframes
+ }
+
+ fn get_dummy_stream_params(scope: Scope) -> ffi::cubeb_stream_params {
+ // The stream format for input and output must be same.
+ const STREAM_FORMAT: u32 = ffi::CUBEB_SAMPLE_FLOAT32NE;
+
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut stream_params = ffi::cubeb_stream_params::default();
+ stream_params.prefs = ffi::CUBEB_STREAM_PREF_VOICE;
+ let (format, rate, channels, layout) = match scope {
+ Scope::Input => (STREAM_FORMAT, 48000, 1, ffi::CUBEB_LAYOUT_MONO),
+ Scope::Output => (STREAM_FORMAT, 44100, 2, ffi::CUBEB_LAYOUT_STEREO),
+ };
+ stream_params.format = format;
+ stream_params.rate = rate;
+ stream_params.channels = channels;
+ stream_params.layout = layout;
+ stream_params
+ }
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/mod.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/mod.rs
new file mode 100644
index 0000000000..0c193d0dc8
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/mod.rs
@@ -0,0 +1,12 @@
+use super::*;
+
+mod aggregate_device;
+mod api;
+mod backlog;
+mod device_change;
+mod device_property;
+mod interfaces;
+mod manual;
+mod parallel;
+mod tone;
+mod utils;
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/parallel.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/parallel.rs
new file mode 100644
index 0000000000..16063d0011
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/parallel.rs
@@ -0,0 +1,572 @@
+use super::utils::{
+ noop_data_callback, test_audiounit_get_buffer_frame_size, test_get_default_audiounit,
+ test_get_default_device, test_ops_context_operation, PropertyScope, Scope,
+};
+use super::*;
+use std::thread;
+
+// Ignore the test by default to avoid overwritting the buffer frame size of the device that is
+// currently used by other streams in other tests.
+#[ignore]
+#[test]
+fn test_parallel_ops_init_streams_in_parallel_input() {
+ const THREADS: u32 = 50;
+ create_streams_by_ops_in_parallel_with_different_latency(
+ THREADS,
+ StreamType::Input,
+ |streams| {
+ // All the latency frames should be the same value as the first stream's one, since the
+ // latency frames cannot be changed if another stream is operating in parallel.
+ let mut latency_frames = vec![];
+ let mut in_buffer_frame_sizes = vec![];
+
+ for stream in streams {
+ latency_frames.push(stream.latency_frames);
+
+ assert!(!stream.core_stream_data.input_unit.is_null());
+ let in_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.input_unit,
+ Scope::Input,
+ PropertyScope::Output,
+ )
+ .unwrap();
+ in_buffer_frame_sizes.push(in_buffer_frame_size);
+
+ assert!(stream.core_stream_data.output_unit.is_null());
+ }
+
+ // Make sure all the latency frames are same as the first stream's one.
+ for i in 0..latency_frames.len() - 1 {
+ assert_eq!(latency_frames[i], latency_frames[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on output scope of the input audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..in_buffer_frame_sizes.len() - 1 {
+ assert_eq!(in_buffer_frame_sizes[i], in_buffer_frame_sizes[i + 1]);
+ }
+ },
+ );
+}
+
+// Ignore the test by default to avoid overwritting the buffer frame size of the device that is
+// currently used by other streams in other tests.
+#[ignore]
+#[test]
+fn test_parallel_ops_init_streams_in_parallel_output() {
+ const THREADS: u32 = 50;
+ create_streams_by_ops_in_parallel_with_different_latency(
+ THREADS,
+ StreamType::Output,
+ |streams| {
+ // All the latency frames should be the same value as the first stream's one, since the
+ // latency frames cannot be changed if another stream is operating in parallel.
+ let mut latency_frames = vec![];
+ let mut out_buffer_frame_sizes = vec![];
+
+ for stream in streams {
+ latency_frames.push(stream.latency_frames);
+
+ assert!(stream.core_stream_data.input_unit.is_null());
+
+ assert!(!stream.core_stream_data.output_unit.is_null());
+ let out_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.output_unit,
+ Scope::Output,
+ PropertyScope::Input,
+ )
+ .unwrap();
+ out_buffer_frame_sizes.push(out_buffer_frame_size);
+ }
+
+ // Make sure all the latency frames are same as the first stream's one.
+ for i in 0..latency_frames.len() - 1 {
+ assert_eq!(latency_frames[i], latency_frames[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on input scope of the output audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..out_buffer_frame_sizes.len() - 1 {
+ assert_eq!(out_buffer_frame_sizes[i], out_buffer_frame_sizes[i + 1]);
+ }
+ },
+ );
+}
+
+// Ignore the test by default to avoid overwritting the buffer frame size of the device that is
+// currently used by other streams in other tests.
+#[ignore]
+#[test]
+fn test_parallel_ops_init_streams_in_parallel_duplex() {
+ const THREADS: u32 = 50;
+ create_streams_by_ops_in_parallel_with_different_latency(
+ THREADS,
+ StreamType::Duplex,
+ |streams| {
+ // All the latency frames should be the same value as the first stream's one, since the
+ // latency frames cannot be changed if another stream is operating in parallel.
+ let mut latency_frames = vec![];
+ let mut in_buffer_frame_sizes = vec![];
+ let mut out_buffer_frame_sizes = vec![];
+
+ for stream in streams {
+ latency_frames.push(stream.latency_frames);
+
+ assert!(!stream.core_stream_data.input_unit.is_null());
+ let in_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.input_unit,
+ Scope::Input,
+ PropertyScope::Output,
+ )
+ .unwrap();
+ in_buffer_frame_sizes.push(in_buffer_frame_size);
+
+ assert!(!stream.core_stream_data.output_unit.is_null());
+ let out_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.output_unit,
+ Scope::Output,
+ PropertyScope::Input,
+ )
+ .unwrap();
+ out_buffer_frame_sizes.push(out_buffer_frame_size);
+ }
+
+ // Make sure all the latency frames are same as the first stream's one.
+ for i in 0..latency_frames.len() - 1 {
+ assert_eq!(latency_frames[i], latency_frames[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on output scope of the input audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..in_buffer_frame_sizes.len() - 1 {
+ assert_eq!(in_buffer_frame_sizes[i], in_buffer_frame_sizes[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on input scope of the output audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..out_buffer_frame_sizes.len() - 1 {
+ assert_eq!(out_buffer_frame_sizes[i], out_buffer_frame_sizes[i + 1]);
+ }
+ },
+ );
+}
+
+fn create_streams_by_ops_in_parallel_with_different_latency<F>(
+ amount: u32,
+ stm_type: StreamType,
+ callback: F,
+) where
+ F: FnOnce(Vec<&AudioUnitStream>),
+{
+ let default_input = test_get_default_device(Scope::Input);
+ let default_output = test_get_default_device(Scope::Output);
+
+ let has_input = stm_type == StreamType::Input || stm_type == StreamType::Duplex;
+ let has_output = stm_type == StreamType::Output || stm_type == StreamType::Duplex;
+
+ if has_input && default_input.is_none() {
+ println!("No input device to perform the test.");
+ return;
+ }
+
+ if has_output && default_output.is_none() {
+ println!("No output device to perform the test.");
+ return;
+ }
+
+ test_ops_context_operation("context: init and destroy", |context_ptr| {
+ let context_ptr_value = context_ptr as usize;
+
+ let mut join_handles = vec![];
+ for i in 0..amount {
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut input_params = ffi::cubeb_stream_params::default();
+ input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ input_params.rate = 48_000;
+ input_params.channels = 1;
+ input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ input_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 44100;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ // Latency cannot be changed if another stream is operating in parallel. All the latecy
+ // should be set to the same latency value of the first stream that is operating in the
+ // context.
+ let latency_frames = SAFE_MIN_LATENCY_FRAMES + i;
+ assert!(latency_frames < SAFE_MAX_LATENCY_FRAMES);
+
+ // Create many streams within the same context. The order of the stream creation
+ // is random (The order of execution of the spawned threads is random.).assert!
+ // It's super dangerous to pass `context_ptr_value` across threads and convert it back
+ // to a pointer. However, it's the cheapest way to make sure the inside mutex works.
+ let thread_name = format!("stream {} @ context {:?}", i, context_ptr);
+ join_handles.push(
+ thread::Builder::new()
+ .name(thread_name)
+ .spawn(move || {
+ let context_ptr = context_ptr_value as *mut ffi::cubeb;
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(format!("stream {}", i)).unwrap();
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ ptr::null_mut(), // Use default input device.
+ if has_input {
+ &mut input_params
+ } else {
+ ptr::null_mut()
+ },
+ ptr::null_mut(), // Use default output device.
+ if has_output {
+ &mut output_params
+ } else {
+ ptr::null_mut()
+ },
+ latency_frames,
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ assert!(!stream.is_null());
+ stream as usize
+ })
+ .unwrap(),
+ );
+ }
+
+ let mut streams = vec![];
+ // Wait for finishing the tasks on the different threads.
+ for handle in join_handles {
+ let stream_ptr_value = handle.join().unwrap();
+ let stream = unsafe { Box::from_raw(stream_ptr_value as *mut AudioUnitStream) };
+ streams.push(stream);
+ }
+
+ let stream_refs: Vec<&AudioUnitStream> = streams.iter().map(|stm| stm.as_ref()).collect();
+ callback(stream_refs);
+ });
+}
+
+// Ignore the test by default to avoid overwritting the buffer frame size of the device that is
+// currently used by other streams in other tests.
+#[ignore]
+#[test]
+fn test_parallel_init_streams_in_parallel_input() {
+ const THREADS: u32 = 10;
+ create_streams_in_parallel_with_different_latency(THREADS, StreamType::Input, |streams| {
+ // All the latency frames should be the same value as the first stream's one, since the
+ // latency frames cannot be changed if another stream is operating in parallel.
+ let mut latency_frames = vec![];
+ let mut in_buffer_frame_sizes = vec![];
+
+ for stream in streams {
+ latency_frames.push(stream.latency_frames);
+
+ assert!(!stream.core_stream_data.input_unit.is_null());
+ let in_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.input_unit,
+ Scope::Input,
+ PropertyScope::Output,
+ )
+ .unwrap();
+ in_buffer_frame_sizes.push(in_buffer_frame_size);
+
+ assert!(stream.core_stream_data.output_unit.is_null());
+ }
+
+ // Make sure all the latency frames are same as the first stream's one.
+ for i in 0..latency_frames.len() - 1 {
+ assert_eq!(latency_frames[i], latency_frames[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on output scope of the input audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..in_buffer_frame_sizes.len() - 1 {
+ assert_eq!(in_buffer_frame_sizes[i], in_buffer_frame_sizes[i + 1]);
+ }
+ });
+}
+
+// Ignore the test by default to avoid overwritting the buffer frame size of the device that is
+// currently used by other streams in other tests.
+#[ignore]
+#[test]
+fn test_parallel_init_streams_in_parallel_output() {
+ const THREADS: u32 = 10;
+ create_streams_in_parallel_with_different_latency(THREADS, StreamType::Output, |streams| {
+ // All the latency frames should be the same value as the first stream's one, since the
+ // latency frames cannot be changed if another stream is operating in parallel.
+ let mut latency_frames = vec![];
+ let mut out_buffer_frame_sizes = vec![];
+
+ for stream in streams {
+ latency_frames.push(stream.latency_frames);
+
+ assert!(stream.core_stream_data.input_unit.is_null());
+
+ assert!(!stream.core_stream_data.output_unit.is_null());
+ let out_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.output_unit,
+ Scope::Output,
+ PropertyScope::Input,
+ )
+ .unwrap();
+ out_buffer_frame_sizes.push(out_buffer_frame_size);
+ }
+
+ // Make sure all the latency frames are same as the first stream's one.
+ for i in 0..latency_frames.len() - 1 {
+ assert_eq!(latency_frames[i], latency_frames[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on input scope of the output audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..out_buffer_frame_sizes.len() - 1 {
+ assert_eq!(out_buffer_frame_sizes[i], out_buffer_frame_sizes[i + 1]);
+ }
+ });
+}
+
+// Ignore the test by default to avoid overwritting the buffer frame size of the device that is
+// currently used by other streams in other tests.
+#[ignore]
+#[test]
+fn test_parallel_init_streams_in_parallel_duplex() {
+ const THREADS: u32 = 10;
+ create_streams_in_parallel_with_different_latency(THREADS, StreamType::Duplex, |streams| {
+ // All the latency frames should be the same value as the first stream's one, since the
+ // latency frames cannot be changed if another stream is operating in parallel.
+ let mut latency_frames = vec![];
+ let mut in_buffer_frame_sizes = vec![];
+ let mut out_buffer_frame_sizes = vec![];
+
+ for stream in streams {
+ latency_frames.push(stream.latency_frames);
+
+ assert!(!stream.core_stream_data.input_unit.is_null());
+ let in_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.input_unit,
+ Scope::Input,
+ PropertyScope::Output,
+ )
+ .unwrap();
+ in_buffer_frame_sizes.push(in_buffer_frame_size);
+
+ assert!(!stream.core_stream_data.output_unit.is_null());
+ let out_buffer_frame_size = test_audiounit_get_buffer_frame_size(
+ stream.core_stream_data.output_unit,
+ Scope::Output,
+ PropertyScope::Input,
+ )
+ .unwrap();
+ out_buffer_frame_sizes.push(out_buffer_frame_size);
+ }
+
+ // Make sure all the latency frames are same as the first stream's one.
+ for i in 0..latency_frames.len() - 1 {
+ assert_eq!(latency_frames[i], latency_frames[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on output scope of the input audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..in_buffer_frame_sizes.len() - 1 {
+ assert_eq!(in_buffer_frame_sizes[i], in_buffer_frame_sizes[i + 1]);
+ }
+
+ // Make sure all the buffer frame sizes on input scope of the output audiounit are same
+ // as the defined latency of the first initial stream.
+ for i in 0..out_buffer_frame_sizes.len() - 1 {
+ assert_eq!(out_buffer_frame_sizes[i], out_buffer_frame_sizes[i + 1]);
+ }
+ });
+}
+
+fn create_streams_in_parallel_with_different_latency<F>(
+ amount: u32,
+ stm_type: StreamType,
+ callback: F,
+) where
+ F: FnOnce(Vec<&AudioUnitStream>),
+{
+ let default_input = test_get_default_device(Scope::Input);
+ let default_output = test_get_default_device(Scope::Output);
+
+ let has_input = stm_type == StreamType::Input || stm_type == StreamType::Duplex;
+ let has_output = stm_type == StreamType::Output || stm_type == StreamType::Duplex;
+
+ if has_input && default_input.is_none() {
+ println!("No input device to perform the test.");
+ return;
+ }
+
+ if has_output && default_output.is_none() {
+ println!("No output device to perform the test.");
+ return;
+ }
+
+ let mut context = AudioUnitContext::new();
+
+ let context_ptr_value = &mut context as *mut AudioUnitContext as usize;
+
+ let mut join_handles = vec![];
+ for i in 0..amount {
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut input_params = ffi::cubeb_stream_params::default();
+ input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ input_params.rate = 48_000;
+ input_params.channels = 1;
+ input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ input_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
+ output_params.rate = 44100;
+ output_params.channels = 2;
+ output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ // Latency cannot be changed if another stream is operating in parallel. All the latecy
+ // should be set to the same latency value of the first stream that is operating in the
+ // context.
+ let latency_frames = SAFE_MIN_LATENCY_FRAMES + i;
+ assert!(latency_frames < SAFE_MAX_LATENCY_FRAMES);
+
+ // Create many streams within the same context. The order of the stream creation
+ // is random. (The order of execution of the spawned threads is random.)
+ // It's super dangerous to pass `context_ptr_value` across threads and convert it back
+ // to a reference. However, it's the cheapest way to make sure the inside mutex works.
+ let thread_name = format!("stream {} @ context {:?}", i, context_ptr_value);
+ join_handles.push(
+ thread::Builder::new()
+ .name(thread_name)
+ .spawn(move || {
+ let context = unsafe { &mut *(context_ptr_value as *mut AudioUnitContext) };
+ let input_params = unsafe { StreamParamsRef::from_ptr(&mut input_params) };
+ let output_params = unsafe { StreamParamsRef::from_ptr(&mut output_params) };
+ let stream = context
+ .stream_init(
+ None,
+ ptr::null_mut(), // Use default input device.
+ if has_input { Some(input_params) } else { None },
+ ptr::null_mut(), // Use default output device.
+ if has_output {
+ Some(output_params)
+ } else {
+ None
+ },
+ latency_frames,
+ Some(noop_data_callback),
+ None, // No state callback.
+ ptr::null_mut(), // No user data pointer.
+ )
+ .unwrap();
+ assert!(!stream.as_ptr().is_null());
+ let stream_ptr_value = stream.as_ptr() as usize;
+ // Prevent the stream from being destroyed by leaking this stream.
+ mem::forget(stream);
+ stream_ptr_value
+ })
+ .unwrap(),
+ );
+ }
+
+ let mut streams = vec![];
+ // Wait for finishing the tasks on the different threads.
+ for handle in join_handles {
+ let stream_ptr_value = handle.join().unwrap();
+ // Retake the leaked stream.
+ let stream = unsafe { Box::from_raw(stream_ptr_value as *mut AudioUnitStream) };
+ streams.push(stream);
+ }
+
+ let stream_refs: Vec<&AudioUnitStream> = streams.iter().map(|stm| stm.as_ref()).collect();
+ callback(stream_refs);
+}
+
+#[derive(Debug, PartialEq)]
+enum StreamType {
+ Input,
+ Output,
+ Duplex,
+}
+
+// This is used to interfere other active streams.
+// From this testing, it's ok to set the buffer frame size of a device that is currently used by
+// other tests. It works on OSX 10.13, not sure if it works on other versions.
+// However, other tests may check the buffer frame size they set at the same time,
+// so we ignore this by default incase those checks fail.
+#[ignore]
+#[test]
+fn test_set_buffer_frame_size_in_parallel() {
+ test_set_buffer_frame_size_in_parallel_in_scope(Scope::Input);
+ test_set_buffer_frame_size_in_parallel_in_scope(Scope::Output);
+}
+
+fn test_set_buffer_frame_size_in_parallel_in_scope(scope: Scope) {
+ const THREADS: u32 = 100;
+
+ let unit = test_get_default_audiounit(scope.clone());
+ if unit.is_none() {
+ println!("No unit for {:?}", scope);
+ return;
+ }
+
+ let (unit_scope, unit_element, prop_scope) = match scope {
+ Scope::Input => (kAudioUnitScope_Output, AU_IN_BUS, PropertyScope::Output),
+ Scope::Output => (kAudioUnitScope_Input, AU_OUT_BUS, PropertyScope::Input),
+ };
+
+ let mut units = vec![];
+ let mut join_handles = vec![];
+ for i in 0..THREADS {
+ let latency_frames = SAFE_MIN_LATENCY_FRAMES + i;
+ assert!(latency_frames < SAFE_MAX_LATENCY_FRAMES);
+ units.push(test_get_default_audiounit(scope.clone()).unwrap());
+ let unit_value = units.last().unwrap().get_inner() as usize;
+ join_handles.push(thread::spawn(move || {
+ let status = audio_unit_set_property(
+ unit_value as AudioUnit,
+ kAudioDevicePropertyBufferFrameSize,
+ unit_scope,
+ unit_element,
+ &latency_frames,
+ mem::size_of::<u32>(),
+ );
+ (latency_frames, status)
+ }));
+ }
+
+ let mut latencies = vec![];
+ let mut statuses = vec![];
+ for handle in join_handles {
+ let (latency, status) = handle.join().unwrap();
+ latencies.push(latency);
+ statuses.push(status);
+ }
+
+ let mut buffer_frames_list = vec![];
+ for unit in units.iter() {
+ buffer_frames_list.push(unit.get_buffer_frame_size(scope.clone(), prop_scope.clone()));
+ }
+
+ for status in statuses {
+ assert_eq!(status, NO_ERR);
+ }
+
+ for i in 0..buffer_frames_list.len() - 1 {
+ assert_eq!(buffer_frames_list[i], buffer_frames_list[i + 1]);
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/tone.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/tone.rs
new file mode 100644
index 0000000000..42cb9ee997
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/tone.rs
@@ -0,0 +1,215 @@
+use super::utils::{test_get_default_device, test_ops_stream_operation, Scope};
+use super::*;
+use std::sync::atomic::{AtomicI64, Ordering};
+
+#[test]
+fn test_dial_tone() {
+ use std::f32::consts::PI;
+ use std::thread;
+ use std::time::Duration;
+
+ const SAMPLE_FREQUENCY: u32 = 48_000;
+
+ // Do nothing if there is no available output device.
+ if test_get_default_device(Scope::Output).is_none() {
+ println!("No output device.");
+ return;
+ }
+
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut output_params = ffi::cubeb_stream_params::default();
+ output_params.format = ffi::CUBEB_SAMPLE_S16NE;
+ output_params.rate = SAMPLE_FREQUENCY;
+ output_params.channels = 1;
+ output_params.layout = ffi::CUBEB_LAYOUT_MONO;
+ output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+
+ struct Closure {
+ buffer_size: AtomicI64,
+ phase: i64,
+ }
+ let mut closure = Closure {
+ buffer_size: AtomicI64::new(0),
+ phase: 0,
+ };
+ let closure_ptr = &mut closure as *mut Closure as *mut c_void;
+
+ test_ops_stream_operation(
+ "stream: North American dial tone",
+ ptr::null_mut(), // Use default input device.
+ ptr::null_mut(), // No input parameters.
+ ptr::null_mut(), // Use default output device.
+ &mut output_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(data_callback),
+ Some(state_callback),
+ closure_ptr,
+ |stream| {
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+
+ #[derive(Debug)]
+ enum State {
+ WaitingForStart,
+ PositionIncreasing,
+ Paused,
+ Resumed,
+ End,
+ }
+ let mut state = State::WaitingForStart;
+ let mut position: u64 = 0;
+ let mut prev_position: u64 = 0;
+ let mut count = 0;
+ const CHECK_COUNT: i32 = 10;
+ loop {
+ thread::sleep(Duration::from_millis(50));
+ assert_eq!(
+ unsafe { OPS.stream_get_position.unwrap()(stream, &mut position) },
+ ffi::CUBEB_OK
+ );
+ println!(
+ "State: {:?}, position: {}, previous position: {}",
+ state, position, prev_position
+ );
+ match &mut state {
+ State::WaitingForStart => {
+ // It's expected to have 0 for a few iterations here: the stream can take
+ // some time to start.
+ if position != prev_position {
+ assert!(position > prev_position);
+ prev_position = position;
+ state = State::PositionIncreasing;
+ }
+ }
+ State::PositionIncreasing => {
+ // wait a few iterations, check monotony
+ if position != prev_position {
+ assert!(position > prev_position);
+ prev_position = position;
+ count += 1;
+ if count > CHECK_COUNT {
+ state = State::Paused;
+ count = 0;
+ assert_eq!(
+ unsafe { OPS.stream_stop.unwrap()(stream) },
+ ffi::CUBEB_OK
+ );
+ // Update the position once paused.
+ assert_eq!(
+ unsafe {
+ OPS.stream_get_position.unwrap()(stream, &mut position)
+ },
+ ffi::CUBEB_OK
+ );
+ prev_position = position;
+ }
+ }
+ }
+ State::Paused => {
+ // The cubeb_stream_stop call above should synchrously stop the callbacks,
+ // hence the clock, the assert below must always holds, modulo the client
+ // side interpolation.
+ assert!(
+ position == prev_position
+ || position - prev_position
+ <= closure.buffer_size.load(Ordering::SeqCst) as u64
+ );
+ count += 1;
+ prev_position = position;
+ if count > CHECK_COUNT {
+ state = State::Resumed;
+ count = 0;
+ assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
+ }
+ }
+ State::Resumed => {
+ // wait a few iterations, this can take some time to start
+ if position != prev_position {
+ assert!(position > prev_position);
+ prev_position = position;
+ count += 1;
+ if count > CHECK_COUNT {
+ state = State::End;
+ count = 0;
+ assert_eq!(
+ unsafe { OPS.stream_stop.unwrap()(stream) },
+ ffi::CUBEB_OK
+ );
+ assert_eq!(
+ unsafe {
+ OPS.stream_get_position.unwrap()(stream, &mut position)
+ },
+ ffi::CUBEB_OK
+ );
+ prev_position = position;
+ }
+ }
+ }
+ State::End => {
+ // The cubeb_stream_stop call above should synchrously stop the callbacks,
+ // hence the clock, the assert below must always holds, modulo the client
+ // side interpolation.
+ assert!(
+ position == prev_position
+ || position - prev_position
+ <= closure.buffer_size.load(Ordering::SeqCst) as u64
+ );
+ if position == prev_position {
+ count += 1;
+ if count > CHECK_COUNT {
+ break;
+ }
+ }
+ }
+ }
+ }
+ assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
+ },
+ );
+
+ extern "C" fn state_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ state: ffi::cubeb_state,
+ ) {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert_ne!(state, ffi::CUBEB_STATE_ERROR);
+ }
+
+ extern "C" fn data_callback(
+ stream: *mut ffi::cubeb_stream,
+ user_ptr: *mut c_void,
+ _input_buffer: *const c_void,
+ output_buffer: *mut c_void,
+ nframes: i64,
+ ) -> i64 {
+ assert!(!stream.is_null());
+ assert!(!user_ptr.is_null());
+ assert!(!output_buffer.is_null());
+
+ let buffer = unsafe {
+ let ptr = output_buffer as *mut i16;
+ let len = nframes as usize;
+ slice::from_raw_parts_mut(ptr, len)
+ };
+
+ let closure = unsafe { &mut *(user_ptr as *mut Closure) };
+
+ closure.buffer_size.store(nframes, Ordering::SeqCst);
+
+ // Generate tone on the fly.
+ for data in buffer.iter_mut() {
+ let t1 = (2.0 * PI * 350.0 * (closure.phase) as f32 / SAMPLE_FREQUENCY as f32).sin();
+ let t2 = (2.0 * PI * 440.0 * (closure.phase) as f32 / SAMPLE_FREQUENCY as f32).sin();
+ *data = f32_to_i16_sample(0.5 * (t1 + t2));
+ closure.phase += 1;
+ }
+
+ nframes
+ }
+
+ fn f32_to_i16_sample(x: f32) -> i16 {
+ (x * f32::from(i16::max_value())) as i16
+ }
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/tests/utils.rs b/third_party/rust/cubeb-coreaudio/src/backend/tests/utils.rs
new file mode 100644
index 0000000000..ef07aeeeb4
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/tests/utils.rs
@@ -0,0 +1,1247 @@
+use super::*;
+
+// Common Utils
+// ------------------------------------------------------------------------------------------------
+pub extern "C" fn noop_data_callback(
+ stream: *mut ffi::cubeb_stream,
+ _user_ptr: *mut c_void,
+ _input_buffer: *const c_void,
+ output_buffer: *mut c_void,
+ nframes: i64,
+) -> i64 {
+ assert!(!stream.is_null());
+
+ // Feed silence data to output buffer
+ if !output_buffer.is_null() {
+ let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
+ let channels = stm.core_stream_data.output_stream_params.channels();
+ let samples = nframes as usize * channels as usize;
+ let sample_size = cubeb_sample_size(stm.core_stream_data.output_stream_params.format());
+ unsafe {
+ ptr::write_bytes(output_buffer, 0, samples * sample_size);
+ }
+ }
+
+ nframes
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum Scope {
+ Input,
+ Output,
+}
+
+impl From<Scope> for DeviceType {
+ fn from(scope: Scope) -> Self {
+ match scope {
+ Scope::Input => DeviceType::INPUT,
+ Scope::Output => DeviceType::OUTPUT,
+ }
+ }
+}
+
+#[derive(Clone)]
+pub enum PropertyScope {
+ Input,
+ Output,
+}
+
+pub fn test_get_default_device(scope: Scope) -> Option<AudioObjectID> {
+ let address = AudioObjectPropertyAddress {
+ mSelector: match scope {
+ Scope::Input => kAudioHardwarePropertyDefaultInputDevice,
+ Scope::Output => kAudioHardwarePropertyDefaultOutputDevice,
+ },
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut devid: AudioObjectID = kAudioObjectUnknown;
+ let mut size = mem::size_of::<AudioObjectID>();
+ let status = unsafe {
+ AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut UInt32,
+ &mut devid as *mut AudioObjectID as *mut c_void,
+ )
+ };
+ if status != NO_ERR || devid == kAudioObjectUnknown {
+ return None;
+ }
+ Some(devid)
+}
+
+// TODO: Create a GetProperty trait and add a default implementation for it, then implement it
+// for TestAudioUnit so the member method like `get_buffer_frame_size` can reuse the trait
+// method get_property_data.
+#[derive(Debug)]
+pub struct TestAudioUnit(AudioUnit);
+
+impl TestAudioUnit {
+ fn new(unit: AudioUnit) -> Self {
+ assert!(!unit.is_null());
+ Self(unit)
+ }
+ pub fn get_inner(&self) -> AudioUnit {
+ self.0
+ }
+ pub fn get_buffer_frame_size(
+ &self,
+ scope: Scope,
+ prop_scope: PropertyScope,
+ ) -> std::result::Result<u32, OSStatus> {
+ test_audiounit_get_buffer_frame_size(self.0, scope, prop_scope)
+ }
+}
+
+impl Drop for TestAudioUnit {
+ fn drop(&mut self) {
+ unsafe {
+ AudioUnitUninitialize(self.0);
+ AudioComponentInstanceDispose(self.0);
+ }
+ }
+}
+
+// TODO: 1. Return Result with custom errors.
+// 2. Allow to create a in-out unit.
+pub fn test_get_default_audiounit(scope: Scope) -> Option<TestAudioUnit> {
+ let device = test_get_default_device(scope.clone());
+ let unit = test_create_audiounit(ComponentSubType::HALOutput);
+ if device.is_none() || unit.is_none() {
+ return None;
+ }
+ let unit = unit.unwrap();
+ let device = device.unwrap();
+ match scope {
+ Scope::Input => {
+ if test_enable_audiounit_in_scope(unit.get_inner(), Scope::Input, true).is_err()
+ || test_enable_audiounit_in_scope(unit.get_inner(), Scope::Output, false).is_err()
+ {
+ return None;
+ }
+ }
+ Scope::Output => {
+ if test_enable_audiounit_in_scope(unit.get_inner(), Scope::Input, false).is_err()
+ || test_enable_audiounit_in_scope(unit.get_inner(), Scope::Output, true).is_err()
+ {
+ return None;
+ }
+ }
+ }
+
+ let status = unsafe {
+ AudioUnitSetProperty(
+ unit.get_inner(),
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0, // Global bus
+ &device as *const AudioObjectID as *const c_void,
+ mem::size_of::<AudioObjectID>() as u32,
+ )
+ };
+ if status == NO_ERR {
+ Some(unit)
+ } else {
+ None
+ }
+}
+
+pub enum ComponentSubType {
+ HALOutput,
+ DefaultOutput,
+}
+
+// TODO: Return Result with custom errors.
+// Surprisingly the AudioUnit can be created even when there is no any device on the platform,
+// no matter its subtype is HALOutput or DefaultOutput.
+pub fn test_create_audiounit(unit_type: ComponentSubType) -> Option<TestAudioUnit> {
+ let desc = AudioComponentDescription {
+ componentType: kAudioUnitType_Output,
+ componentSubType: match unit_type {
+ ComponentSubType::HALOutput => kAudioUnitSubType_HALOutput,
+ ComponentSubType::DefaultOutput => kAudioUnitSubType_DefaultOutput,
+ },
+ componentManufacturer: kAudioUnitManufacturer_Apple,
+ componentFlags: 0,
+ componentFlagsMask: 0,
+ };
+ let comp = unsafe { AudioComponentFindNext(ptr::null_mut(), &desc) };
+ if comp.is_null() {
+ return None;
+ }
+ let mut unit: AudioUnit = ptr::null_mut();
+ let status = unsafe { AudioComponentInstanceNew(comp, &mut unit) };
+ // TODO: Is unit possible to be null when no error returns ?
+ if status != NO_ERR || unit.is_null() {
+ None
+ } else {
+ Some(TestAudioUnit::new(unit))
+ }
+}
+
+fn test_enable_audiounit_in_scope(
+ unit: AudioUnit,
+ scope: Scope,
+ enable: bool,
+) -> std::result::Result<(), OSStatus> {
+ assert!(!unit.is_null());
+ let (scope, element) = match scope {
+ Scope::Input => (kAudioUnitScope_Input, AU_IN_BUS),
+ Scope::Output => (kAudioUnitScope_Output, AU_OUT_BUS),
+ };
+ let on_off: u32 = if enable { 1 } else { 0 };
+ let status = unsafe {
+ AudioUnitSetProperty(
+ unit,
+ kAudioOutputUnitProperty_EnableIO,
+ scope,
+ element,
+ &on_off as *const u32 as *const c_void,
+ mem::size_of::<u32>() as u32,
+ )
+ };
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(status)
+ }
+}
+
+pub enum DeviceFilter {
+ ExcludeCubebAggregateAndVPIO,
+ IncludeAll,
+}
+pub fn test_get_all_devices(filter: DeviceFilter) -> Vec<AudioObjectID> {
+ let mut devices = Vec::new();
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioHardwarePropertyDevices,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+ let mut size: usize = 0;
+ let status = unsafe {
+ AudioObjectGetPropertyDataSize(
+ kAudioObjectSystemObject,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ )
+ };
+ // size will be 0 if there is no device at all.
+ if status != NO_ERR || size == 0 {
+ return devices;
+ }
+ assert_eq!(size % mem::size_of::<AudioObjectID>(), 0);
+ let elements = size / mem::size_of::<AudioObjectID>();
+ devices.resize(elements, kAudioObjectUnknown);
+ let status = unsafe {
+ AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ devices.as_mut_ptr() as *mut c_void,
+ )
+ };
+ if status != NO_ERR {
+ devices.clear();
+ return devices;
+ }
+ for device in devices.iter() {
+ assert_ne!(*device, kAudioObjectUnknown);
+ }
+
+ match filter {
+ DeviceFilter::ExcludeCubebAggregateAndVPIO => {
+ devices.retain(|&device| {
+ if let Ok(uid) = get_device_global_uid(device) {
+ let uid = uid.into_string();
+ !uid.contains(PRIVATE_AGGREGATE_DEVICE_NAME)
+ && !uid.contains(VOICEPROCESSING_AGGREGATE_DEVICE_NAME)
+ } else {
+ true
+ }
+ });
+ }
+ _ => {}
+ }
+
+ devices
+}
+
+pub fn test_get_devices_in_scope(scope: Scope) -> Vec<AudioObjectID> {
+ let mut devices = test_get_all_devices(DeviceFilter::ExcludeCubebAggregateAndVPIO);
+ devices.retain(|device| test_device_in_scope(*device, scope.clone()));
+ devices
+}
+
+pub fn get_devices_info_in_scope(scope: Scope) -> Vec<TestDeviceInfo> {
+ fn print_info(info: &TestDeviceInfo) {
+ println!("{:>4}: {}\n\tuid: {}", info.id, info.label, info.uid);
+ }
+
+ println!(
+ "\n{:?} devices\n\
+ --------------------",
+ scope
+ );
+
+ let mut infos = vec![];
+ let devices = test_get_devices_in_scope(scope.clone());
+ for device in devices {
+ infos.push(TestDeviceInfo::new(device, scope.clone()));
+ print_info(infos.last().unwrap());
+ }
+ println!();
+
+ infos
+}
+
+#[derive(Debug)]
+pub struct TestDeviceInfo {
+ pub id: AudioObjectID,
+ pub label: String,
+ pub uid: String,
+}
+impl TestDeviceInfo {
+ pub fn new(id: AudioObjectID, scope: Scope) -> Self {
+ Self {
+ id,
+ label: Self::get_label(id, scope.clone()),
+ uid: Self::get_uid(id, scope),
+ }
+ }
+
+ fn get_label(id: AudioObjectID, scope: Scope) -> String {
+ match get_device_uid(id, scope.into()) {
+ Ok(uid) => uid.into_string(),
+ Err(status) => format!("Unknow. Error: {}", status).to_string(),
+ }
+ }
+
+ fn get_uid(id: AudioObjectID, scope: Scope) -> String {
+ match get_device_label(id, scope.into()) {
+ Ok(label) => label.into_string(),
+ Err(status) => format!("Unknown. Error: {}", status).to_string(),
+ }
+ }
+}
+
+pub fn test_device_channels_in_scope(
+ id: AudioObjectID,
+ scope: Scope,
+) -> std::result::Result<u32, OSStatus> {
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioDevicePropertyStreamConfiguration,
+ mScope: match scope {
+ Scope::Input => kAudioDevicePropertyScopeInput,
+ Scope::Output => kAudioDevicePropertyScopeOutput,
+ },
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+ let mut size: usize = 0;
+ let status = unsafe {
+ AudioObjectGetPropertyDataSize(
+ id,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ )
+ };
+ if status != NO_ERR {
+ return Err(status);
+ }
+ if size == 0 {
+ return Ok(0);
+ }
+ let byte_len = size / mem::size_of::<u8>();
+ let mut bytes = vec![0u8; byte_len];
+ let status = unsafe {
+ AudioObjectGetPropertyData(
+ id,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ bytes.as_mut_ptr() as *mut c_void,
+ )
+ };
+ if status != NO_ERR {
+ return Err(status);
+ }
+ let buf_list = unsafe { &*(bytes.as_mut_ptr() as *mut AudioBufferList) };
+ let buf_len = buf_list.mNumberBuffers as usize;
+ if buf_len == 0 {
+ return Ok(0);
+ }
+ let buf_ptr = buf_list.mBuffers.as_ptr() as *const AudioBuffer;
+ let buffers = unsafe { slice::from_raw_parts(buf_ptr, buf_len) };
+ let mut channels: u32 = 0;
+ for buffer in buffers {
+ channels += buffer.mNumberChannels;
+ }
+ Ok(channels)
+}
+
+pub fn test_device_in_scope(id: AudioObjectID, scope: Scope) -> bool {
+ let channels = test_device_channels_in_scope(id, scope);
+ channels.is_ok() && channels.unwrap() > 0
+}
+
+pub fn test_get_all_onwed_devices(id: AudioDeviceID) -> Vec<AudioObjectID> {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioObjectPropertyOwnedObjects,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let qualifier_data_size = mem::size_of::<AudioObjectID>();
+ let class_id: AudioClassID = kAudioSubDeviceClassID;
+ let qualifier_data = &class_id;
+ let mut size: usize = 0;
+
+ unsafe {
+ assert_eq!(
+ AudioObjectGetPropertyDataSize(
+ id,
+ &address,
+ qualifier_data_size as u32,
+ qualifier_data as *const u32 as *const c_void,
+ &mut size as *mut usize as *mut u32
+ ),
+ NO_ERR
+ );
+ }
+ assert_ne!(size, 0);
+
+ let elements = size / mem::size_of::<AudioObjectID>();
+ let mut devices: Vec<AudioObjectID> = allocate_array(elements);
+
+ unsafe {
+ assert_eq!(
+ AudioObjectGetPropertyData(
+ id,
+ &address,
+ qualifier_data_size as u32,
+ qualifier_data as *const u32 as *const c_void,
+ &mut size as *mut usize as *mut u32,
+ devices.as_mut_ptr() as *mut c_void
+ ),
+ NO_ERR
+ );
+ }
+
+ devices
+}
+
+pub fn test_get_master_device(id: AudioObjectID) -> String {
+ assert_ne!(id, kAudioObjectUnknown);
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioAggregateDevicePropertyMasterSubDevice,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut master: CFStringRef = ptr::null_mut();
+ let mut size = mem::size_of::<CFStringRef>();
+ assert_eq!(
+ audio_object_get_property_data(id, &address, &mut size, &mut master),
+ NO_ERR
+ );
+ assert!(!master.is_null());
+
+ let master = StringRef::new(master as _);
+ master.into_string()
+}
+
+pub fn test_get_drift_compensations(id: AudioObjectID) -> std::result::Result<u32, OSStatus> {
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioSubDevicePropertyDriftCompensation,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+ let mut size = mem::size_of::<u32>();
+ let mut compensation = u32::max_value();
+ let status = unsafe {
+ AudioObjectGetPropertyData(
+ id,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ &mut compensation as *mut u32 as *mut c_void,
+ )
+ };
+ if status == NO_ERR {
+ Ok(compensation)
+ } else {
+ Err(status)
+ }
+}
+
+pub fn test_audiounit_scope_is_enabled(unit: AudioUnit, scope: Scope) -> bool {
+ assert!(!unit.is_null());
+ let mut has_io: UInt32 = 0;
+ let (scope, element) = match scope {
+ Scope::Input => (kAudioUnitScope_Input, AU_IN_BUS),
+ Scope::Output => (kAudioUnitScope_Output, AU_OUT_BUS),
+ };
+ let mut size = mem::size_of::<UInt32>();
+ assert_eq!(
+ audio_unit_get_property(
+ unit,
+ kAudioOutputUnitProperty_HasIO,
+ scope,
+ element,
+ &mut has_io,
+ &mut size
+ ),
+ NO_ERR
+ );
+ has_io != 0
+}
+
+pub fn test_audiounit_get_buffer_frame_size(
+ unit: AudioUnit,
+ scope: Scope,
+ prop_scope: PropertyScope,
+) -> std::result::Result<u32, OSStatus> {
+ let element = match scope {
+ Scope::Input => AU_IN_BUS,
+ Scope::Output => AU_OUT_BUS,
+ };
+ let prop_scope = match prop_scope {
+ PropertyScope::Input => kAudioUnitScope_Input,
+ PropertyScope::Output => kAudioUnitScope_Output,
+ };
+ let mut buffer_frames: u32 = 0;
+ let mut size = mem::size_of::<u32>();
+ let status = unsafe {
+ AudioUnitGetProperty(
+ unit,
+ kAudioDevicePropertyBufferFrameSize,
+ prop_scope,
+ element,
+ &mut buffer_frames as *mut u32 as *mut c_void,
+ &mut size as *mut usize as *mut u32,
+ )
+ };
+ if status == NO_ERR {
+ Ok(buffer_frames)
+ } else {
+ Err(status)
+ }
+}
+
+// Surprisingly it's ok to set
+// 1. a unknown device
+// 2. a non-input/non-output device
+// 3. the current default input/output device
+// as the new default input/output device by apple's API. We need to check the above things by ourselves.
+// This function returns an Ok containing the previous default device id on success.
+// Otherwise, it returns an Err containing the error code with OSStatus type
+pub fn test_set_default_device(
+ device: AudioObjectID,
+ scope: Scope,
+) -> std::result::Result<AudioObjectID, OSStatus> {
+ assert!(test_device_in_scope(device, scope.clone()));
+ let default = test_get_default_device(scope.clone()).unwrap();
+ if default == device {
+ // Do nothing if device is already the default device
+ return Ok(device);
+ }
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: match scope {
+ Scope::Input => kAudioHardwarePropertyDefaultInputDevice,
+ Scope::Output => kAudioHardwarePropertyDefaultOutputDevice,
+ },
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+ let size = mem::size_of::<AudioObjectID>();
+ let status = unsafe {
+ AudioObjectSetPropertyData(
+ kAudioObjectSystemObject,
+ &address,
+ 0,
+ ptr::null(),
+ size as u32,
+ &device as *const AudioObjectID as *const c_void,
+ )
+ };
+ let new_default = test_get_default_device(scope.clone()).unwrap();
+ if new_default == default {
+ Err(-1)
+ } else if status == NO_ERR {
+ Ok(default)
+ } else {
+ Err(status)
+ }
+}
+
+pub struct TestDeviceSwitcher {
+ scope: Scope,
+ devices: Vec<AudioObjectID>,
+ current_device_index: usize,
+}
+
+impl TestDeviceSwitcher {
+ pub fn new(scope: Scope) -> Self {
+ let infos = get_devices_info_in_scope(scope.clone());
+ let devices: Vec<AudioObjectID> = infos.into_iter().map(|info| info.id).collect();
+ let current = test_get_default_device(scope.clone()).unwrap();
+ let index = devices
+ .iter()
+ .position(|device| *device == current)
+ .unwrap();
+ Self {
+ scope,
+ devices,
+ current_device_index: index,
+ }
+ }
+
+ pub fn next(&mut self) {
+ let current = self.devices[self.current_device_index];
+ let next_index = (self.current_device_index + 1) % self.devices.len();
+ let next = self.devices[next_index];
+ println!(
+ "Switch device for {:?}: {} -> {}",
+ self.scope, current, next
+ );
+ match self.set_device(next) {
+ Ok(prev) => {
+ assert_eq!(prev, current);
+ self.current_device_index = next_index;
+ }
+ _ => {
+ self.devices.remove(next_index);
+ if next_index < self.current_device_index {
+ self.current_device_index -= 1;
+ }
+ self.next();
+ }
+ }
+ }
+
+ fn set_device(&self, device: AudioObjectID) -> std::result::Result<AudioObjectID, OSStatus> {
+ test_set_default_device(device, self.scope.clone())
+ }
+}
+
+pub fn test_create_device_change_listener<F>(scope: Scope, listener: F) -> TestPropertyListener<F>
+where
+ F: Fn(&[AudioObjectPropertyAddress]) -> OSStatus,
+{
+ let address = AudioObjectPropertyAddress {
+ mSelector: match scope {
+ Scope::Input => kAudioHardwarePropertyDefaultInputDevice,
+ Scope::Output => kAudioHardwarePropertyDefaultOutputDevice,
+ },
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+ TestPropertyListener::new(kAudioObjectSystemObject, address, listener)
+}
+
+pub struct TestPropertyListener<F>
+where
+ F: Fn(&[AudioObjectPropertyAddress]) -> OSStatus,
+{
+ device: AudioObjectID,
+ property: AudioObjectPropertyAddress,
+ callback: F,
+}
+
+impl<F> TestPropertyListener<F>
+where
+ F: Fn(&[AudioObjectPropertyAddress]) -> OSStatus,
+{
+ pub fn new(device: AudioObjectID, property: AudioObjectPropertyAddress, callback: F) -> Self {
+ Self {
+ device,
+ property,
+ callback,
+ }
+ }
+
+ pub fn start(&self) -> std::result::Result<(), OSStatus> {
+ let status = unsafe {
+ AudioObjectAddPropertyListener(
+ self.device,
+ &self.property,
+ Some(Self::render),
+ self as *const Self as *mut c_void,
+ )
+ };
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(status)
+ }
+ }
+
+ pub fn stop(&self) -> std::result::Result<(), OSStatus> {
+ let status = unsafe {
+ AudioObjectRemovePropertyListener(
+ self.device,
+ &self.property,
+ Some(Self::render),
+ self as *const Self as *mut c_void,
+ )
+ };
+ if status == NO_ERR {
+ Ok(())
+ } else {
+ Err(status)
+ }
+ }
+
+ extern "C" fn render(
+ id: AudioObjectID,
+ number_of_addresses: u32,
+ addresses: *const AudioObjectPropertyAddress,
+ data: *mut c_void,
+ ) -> OSStatus {
+ let listener = unsafe { &*(data as *mut Self) };
+ assert_eq!(id, listener.device);
+ let addrs = unsafe { slice::from_raw_parts(addresses, number_of_addresses as usize) };
+ (listener.callback)(addrs)
+ }
+}
+
+impl<F> Drop for TestPropertyListener<F>
+where
+ F: Fn(&[AudioObjectPropertyAddress]) -> OSStatus,
+{
+ fn drop(&mut self) {
+ self.stop();
+ }
+}
+
+// TODO: It doesn't work if default input or output is an aggregate device! Probably we need to do
+// the same thing as what audiounit_set_aggregate_sub_device_list does.
+#[derive(Debug)]
+pub struct TestDevicePlugger {
+ scope: Scope,
+ plugin_id: AudioObjectID,
+ device_id: AudioObjectID,
+}
+
+impl TestDevicePlugger {
+ pub fn new(scope: Scope) -> std::result::Result<Self, OSStatus> {
+ let plugin_id = Self::get_system_plugin_id()?;
+ Ok(Self {
+ scope,
+ plugin_id,
+ device_id: kAudioObjectUnknown,
+ })
+ }
+
+ pub fn get_device_id(&self) -> AudioObjectID {
+ self.device_id
+ }
+
+ pub fn plug(&mut self) -> std::result::Result<(), OSStatus> {
+ self.device_id = self.create_aggregate_device()?;
+ Ok(())
+ }
+
+ pub fn unplug(&mut self) -> std::result::Result<(), OSStatus> {
+ self.destroy_aggregate_device()
+ }
+
+ fn is_plugging(&self) -> bool {
+ self.device_id != kAudioObjectUnknown
+ }
+
+ fn destroy_aggregate_device(&mut self) -> std::result::Result<(), OSStatus> {
+ assert_ne!(self.plugin_id, kAudioObjectUnknown);
+ assert_ne!(self.device_id, kAudioObjectUnknown);
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioPlugInDestroyAggregateDevice,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut size: usize = 0;
+ let status = unsafe {
+ AudioObjectGetPropertyDataSize(
+ self.plugin_id,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ )
+ };
+ if status != NO_ERR {
+ return Err(status);
+ }
+ assert_ne!(size, 0);
+
+ let status = unsafe {
+ // This call can simulate removing a device.
+ AudioObjectGetPropertyData(
+ self.plugin_id,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ &mut self.device_id as *mut AudioDeviceID as *mut c_void,
+ )
+ };
+ if status == NO_ERR {
+ self.device_id = kAudioObjectUnknown;
+ Ok(())
+ } else {
+ Err(status)
+ }
+ }
+
+ fn create_aggregate_device(&self) -> std::result::Result<AudioObjectID, OSStatus> {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
+ const TEST_AGGREGATE_DEVICE_NAME: &str = "TestAggregateDevice";
+
+ assert_ne!(self.plugin_id, kAudioObjectUnknown);
+
+ let sub_devices = Self::get_sub_devices(self.scope.clone());
+ if sub_devices.is_none() {
+ return Err(kAudioCodecUnspecifiedError as OSStatus);
+ }
+ let sub_devices = sub_devices.unwrap();
+
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioPlugInCreateAggregateDevice,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut size: usize = 0;
+ let status = unsafe {
+ AudioObjectGetPropertyDataSize(
+ self.plugin_id,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ )
+ };
+ if status != NO_ERR {
+ return Err(status);
+ }
+ assert_ne!(size, 0);
+
+ let sys_time = SystemTime::now();
+ let time_id = sys_time.duration_since(UNIX_EPOCH).unwrap().as_nanos();
+ let device_name = format!("{}_{}", TEST_AGGREGATE_DEVICE_NAME, time_id);
+ let device_uid = format!("org.mozilla.{}", device_name);
+
+ let mut device_id = kAudioObjectUnknown;
+ let status = unsafe {
+ let device_dict = CFDictionaryCreateMutable(
+ kCFAllocatorDefault,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks,
+ );
+
+ // Set the name of this device.
+ let device_name = cfstringref_from_string(&device_name);
+ CFDictionaryAddValue(
+ device_dict,
+ cfstringref_from_static_string(AGGREGATE_DEVICE_NAME_KEY) as *const c_void,
+ device_name as *const c_void,
+ );
+ CFRelease(device_name as *const c_void);
+
+ // Set the uid of this device.
+ let device_uid = cfstringref_from_string(&device_uid);
+ CFDictionaryAddValue(
+ device_dict,
+ cfstringref_from_static_string(AGGREGATE_DEVICE_UID_KEY) as *const c_void,
+ device_uid as *const c_void,
+ );
+ CFRelease(device_uid as *const c_void);
+
+ // Make this device NOT private to the process creating it.
+ // On MacOS 14 devicechange events are not triggered when it is private.
+ let private_value: i32 = 0;
+ let device_private_key = CFNumberCreate(
+ kCFAllocatorDefault,
+ i64::from(kCFNumberIntType),
+ &private_value as *const i32 as *const c_void,
+ );
+ CFDictionaryAddValue(
+ device_dict,
+ cfstringref_from_static_string(AGGREGATE_DEVICE_PRIVATE_KEY) as *const c_void,
+ device_private_key as *const c_void,
+ );
+ CFRelease(device_private_key as *const c_void);
+
+ // Set this device to be a stacked aggregate (i.e. multi-output device).
+ let stacked_value: i32 = 0; // 1 for normal aggregate device.
+ let device_stacked_key = CFNumberCreate(
+ kCFAllocatorDefault,
+ i64::from(kCFNumberIntType),
+ &stacked_value as *const i32 as *const c_void,
+ );
+ CFDictionaryAddValue(
+ device_dict,
+ cfstringref_from_static_string(AGGREGATE_DEVICE_STACKED_KEY) as *const c_void,
+ device_stacked_key as *const c_void,
+ );
+ CFRelease(device_stacked_key as *const c_void);
+
+ // Set sub devices for this device.
+ CFDictionaryAddValue(
+ device_dict,
+ cfstringref_from_static_string(AGGREGATE_DEVICE_SUB_DEVICE_LIST_KEY)
+ as *const c_void,
+ sub_devices as *const c_void,
+ );
+ CFRelease(sub_devices as *const c_void);
+
+ // This call can simulate adding a device.
+ let status = AudioObjectGetPropertyData(
+ self.plugin_id,
+ &address,
+ mem::size_of_val(&device_dict) as u32,
+ &device_dict as *const CFMutableDictionaryRef as *const c_void,
+ &mut size as *mut usize as *mut u32,
+ &mut device_id as *mut AudioDeviceID as *mut c_void,
+ );
+ CFRelease(device_dict as *const c_void);
+ status
+ };
+ if status == NO_ERR {
+ assert_ne!(device_id, kAudioObjectUnknown);
+ Ok(device_id)
+ } else {
+ Err(status)
+ }
+ }
+
+ fn get_system_plugin_id() -> std::result::Result<AudioObjectID, OSStatus> {
+ let address = AudioObjectPropertyAddress {
+ mSelector: kAudioHardwarePropertyPlugInForBundleID,
+ mScope: kAudioObjectPropertyScopeGlobal,
+ mElement: kAudioObjectPropertyElementMaster,
+ };
+
+ let mut size: usize = 0;
+ let status = unsafe {
+ AudioObjectGetPropertyDataSize(
+ kAudioObjectSystemObject,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ )
+ };
+ if status != NO_ERR {
+ return Err(status);
+ }
+ assert_ne!(size, 0);
+
+ let mut plugin_id = kAudioObjectUnknown;
+ let mut in_bundle_ref = cfstringref_from_static_string("com.apple.audio.CoreAudio");
+ let mut translation_value = AudioValueTranslation {
+ mInputData: &mut in_bundle_ref as *mut CFStringRef as *mut c_void,
+ mInputDataSize: mem::size_of::<CFStringRef>() as u32,
+ mOutputData: &mut plugin_id as *mut AudioObjectID as *mut c_void,
+ mOutputDataSize: mem::size_of::<AudioObjectID>() as u32,
+ };
+ assert_eq!(size, mem::size_of_val(&translation_value));
+
+ let status = unsafe {
+ let status = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &address,
+ 0,
+ ptr::null(),
+ &mut size as *mut usize as *mut u32,
+ &mut translation_value as *mut AudioValueTranslation as *mut c_void,
+ );
+ CFRelease(in_bundle_ref as *const c_void);
+ status
+ };
+ if status == NO_ERR {
+ assert_ne!(plugin_id, kAudioObjectUnknown);
+ Ok(plugin_id)
+ } else {
+ Err(status)
+ }
+ }
+
+ // TODO: This doesn't work as what we expect when the default deivce in the scope is an
+ // aggregate device. We should get the list of all the active sub devices and put
+ // them into the array, if the device is an aggregate device. See the code in
+ // AggregateDevice::get_sub_devices and audiounit_set_aggregate_sub_device_list.
+ fn get_sub_devices(scope: Scope) -> Option<CFArrayRef> {
+ let device = test_get_default_device(scope);
+ device?;
+ let device = device.unwrap();
+ let uid = get_device_global_uid(device);
+ if uid.is_err() {
+ return None;
+ }
+ let uid = uid.unwrap();
+ unsafe {
+ let list = CFArrayCreateMutable(ptr::null(), 0, &kCFTypeArrayCallBacks);
+ let sub_device_dict = CFDictionaryCreateMutable(
+ ptr::null(),
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks,
+ );
+ CFDictionaryAddValue(
+ sub_device_dict,
+ cfstringref_from_static_string(SUB_DEVICE_UID_KEY) as *const c_void,
+ uid.get_raw() as *const c_void,
+ );
+ CFArrayAppendValue(list, sub_device_dict as *const c_void);
+ CFRelease(sub_device_dict as *const c_void);
+ Some(list)
+ }
+ }
+}
+
+impl Drop for TestDevicePlugger {
+ fn drop(&mut self) {
+ if self.is_plugging() {
+ self.unplug();
+ }
+ }
+}
+
+// Test Templates
+// ------------------------------------------------------------------------------------------------
+pub fn test_ops_context_operation<F>(name: &'static str, operation: F)
+where
+ F: FnOnce(*mut ffi::cubeb),
+{
+ let name_c_string = CString::new(name).expect("Failed to create context name");
+ let mut context = ptr::null_mut::<ffi::cubeb>();
+ assert_eq!(
+ unsafe { OPS.init.unwrap()(&mut context, name_c_string.as_ptr()) },
+ ffi::CUBEB_OK
+ );
+ assert!(!context.is_null());
+ operation(context);
+ unsafe { OPS.destroy.unwrap()(context) }
+}
+
+// The in-out stream initializeed with different device will create an aggregate_device and
+// result in firing device-collection-changed callbacks. Run in-out streams with tests
+// capturing device-collection-changed callbacks may cause troubles.
+pub fn test_ops_stream_operation<F>(
+ name: &'static str,
+ input_device: ffi::cubeb_devid,
+ input_stream_params: *mut ffi::cubeb_stream_params,
+ output_device: ffi::cubeb_devid,
+ output_stream_params: *mut ffi::cubeb_stream_params,
+ latency_frames: u32,
+ data_callback: ffi::cubeb_data_callback,
+ state_callback: ffi::cubeb_state_callback,
+ user_ptr: *mut c_void,
+ operation: F,
+) where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ test_ops_context_operation("context: stream operation", |context_ptr| {
+ // Do nothing if there is no input/output device to perform input/output tests.
+ if !input_stream_params.is_null() && test_get_default_device(Scope::Input).is_none() {
+ println!("No input device to perform input tests for \"{}\".", name);
+ return;
+ }
+
+ if !output_stream_params.is_null() && test_get_default_device(Scope::Output).is_none() {
+ println!("No output device to perform output tests for \"{}\".", name);
+ return;
+ }
+
+ let mut stream: *mut ffi::cubeb_stream = ptr::null_mut();
+ let stream_name = CString::new(name).expect("Failed to create stream name");
+ assert_eq!(
+ unsafe {
+ OPS.stream_init.unwrap()(
+ context_ptr,
+ &mut stream,
+ stream_name.as_ptr(),
+ input_device,
+ input_stream_params,
+ output_device,
+ output_stream_params,
+ latency_frames,
+ data_callback,
+ state_callback,
+ user_ptr,
+ )
+ },
+ ffi::CUBEB_OK
+ );
+ assert!(!stream.is_null());
+ operation(stream);
+ unsafe {
+ OPS.stream_destroy.unwrap()(stream);
+ }
+ });
+}
+
+pub fn test_get_raw_context<F>(operation: F)
+where
+ F: FnOnce(&mut AudioUnitContext),
+{
+ let mut context = AudioUnitContext::new();
+ operation(&mut context);
+}
+
+pub fn test_get_default_raw_stream<F>(operation: F)
+where
+ F: FnOnce(&mut AudioUnitStream),
+{
+ test_get_raw_stream(ptr::null_mut(), None, None, 0, operation);
+}
+
+fn test_get_raw_stream<F>(
+ user_ptr: *mut c_void,
+ data_callback: ffi::cubeb_data_callback,
+ state_callback: ffi::cubeb_state_callback,
+ latency_frames: u32,
+ operation: F,
+) where
+ F: FnOnce(&mut AudioUnitStream),
+{
+ let mut context = AudioUnitContext::new();
+
+ // Add a stream to the context since we are about to create one.
+ // AudioUnitStream::drop() will check the context has at least one stream.
+ let global_latency_frames = context.update_latency_by_adding_stream(latency_frames);
+
+ let mut stream = AudioUnitStream::new(
+ &mut context,
+ user_ptr,
+ data_callback,
+ state_callback,
+ global_latency_frames.unwrap(),
+ );
+ stream.core_stream_data = CoreStreamData::new(&stream, None, None);
+
+ operation(&mut stream);
+}
+
+pub fn test_get_stream_with_default_data_callback_by_type<F>(
+ name: &'static str,
+ stm_type: StreamType,
+ input_device: Option<AudioObjectID>,
+ output_device: Option<AudioObjectID>,
+ state_callback: extern "C" fn(*mut ffi::cubeb_stream, *mut c_void, ffi::cubeb_state),
+ data: *mut c_void,
+ operation: F,
+) where
+ F: FnOnce(&mut AudioUnitStream),
+{
+ let mut input_params = get_dummy_stream_params(Scope::Input);
+ let mut output_params = get_dummy_stream_params(Scope::Output);
+
+ let in_params = if stm_type.contains(StreamType::INPUT) {
+ &mut input_params as *mut ffi::cubeb_stream_params
+ } else {
+ ptr::null_mut()
+ };
+ let out_params = if stm_type.contains(StreamType::OUTPUT) {
+ &mut output_params as *mut ffi::cubeb_stream_params
+ } else {
+ ptr::null_mut()
+ };
+ let in_device = if let Some(id) = input_device {
+ id as ffi::cubeb_devid
+ } else {
+ ptr::null_mut()
+ };
+ let out_device = if let Some(id) = output_device {
+ id as ffi::cubeb_devid
+ } else {
+ ptr::null_mut()
+ };
+
+ test_ops_stream_operation_with_default_data_callback(
+ name,
+ in_device,
+ in_params,
+ out_device,
+ out_params,
+ state_callback,
+ data,
+ |stream| {
+ let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
+ operation(stm);
+ },
+ );
+}
+
+bitflags! {
+ pub struct StreamType: u8 {
+ const INPUT = 0x01;
+ const OUTPUT = 0x02;
+ const DUPLEX = 0x03;
+ }
+}
+
+fn get_dummy_stream_params(scope: Scope) -> ffi::cubeb_stream_params {
+ // The stream format for input and output must be same.
+ const STREAM_FORMAT: u32 = ffi::CUBEB_SAMPLE_FLOAT32NE;
+
+ // Make sure the parameters meet the requirements of AudioUnitContext::stream_init
+ // (in the comments).
+ let mut stream_params = ffi::cubeb_stream_params::default();
+ stream_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
+ let (format, rate, channels, layout) = match scope {
+ Scope::Input => (STREAM_FORMAT, 48000, 1, ffi::CUBEB_LAYOUT_MONO),
+ Scope::Output => (STREAM_FORMAT, 44100, 2, ffi::CUBEB_LAYOUT_STEREO),
+ };
+ stream_params.format = format;
+ stream_params.rate = rate;
+ stream_params.channels = channels;
+ stream_params.layout = layout;
+ stream_params
+}
+
+fn test_ops_stream_operation_with_default_data_callback<F>(
+ name: &'static str,
+ input_device: ffi::cubeb_devid,
+ input_stream_params: *mut ffi::cubeb_stream_params,
+ output_device: ffi::cubeb_devid,
+ output_stream_params: *mut ffi::cubeb_stream_params,
+ state_callback: extern "C" fn(*mut ffi::cubeb_stream, *mut c_void, ffi::cubeb_state),
+ data: *mut c_void,
+ operation: F,
+) where
+ F: FnOnce(*mut ffi::cubeb_stream),
+{
+ test_ops_stream_operation(
+ name,
+ input_device,
+ input_stream_params,
+ output_device,
+ output_stream_params,
+ 4096, // TODO: Get latency by get_min_latency instead ?
+ Some(noop_data_callback),
+ Some(state_callback),
+ data,
+ operation,
+ );
+}
diff --git a/third_party/rust/cubeb-coreaudio/src/backend/utils.rs b/third_party/rust/cubeb-coreaudio/src/backend/utils.rs
new file mode 100644
index 0000000000..246e337ef5
--- /dev/null
+++ b/third_party/rust/cubeb-coreaudio/src/backend/utils.rs
@@ -0,0 +1,107 @@
+// Copyright © 2018 Mozilla Foundation
+//
+// This program is made available under an ISC-style license. See the
+// accompanying file LICENSE for details.
+use cubeb_backend::SampleFormat as fmt;
+use std::mem;
+
+pub fn allocate_array_by_size<T: Clone + Default>(size: usize) -> Vec<T> {
+ assert_eq!(size % mem::size_of::<T>(), 0);
+ let elements = size / mem::size_of::<T>();
+ allocate_array::<T>(elements)
+}
+
+pub fn allocate_array<T: Clone + Default>(elements: usize) -> Vec<T> {
+ vec![T::default(); elements]
+}
+
+pub fn forget_vec<T>(v: Vec<T>) -> (*mut T, usize) {
+ // Drop any excess capacity by into_boxed_slice.
+ let mut slice = v.into_boxed_slice();
+ let ptr_and_len = (slice.as_mut_ptr(), slice.len());
+ mem::forget(slice); // Leak the memory to the external code.
+ ptr_and_len
+}
+
+#[inline]
+pub fn retake_forgotten_vec<T>(ptr: *mut T, len: usize) -> Vec<T> {
+ unsafe { Vec::from_raw_parts(ptr, len, len) }
+}
+
+pub fn cubeb_sample_size(format: fmt) -> usize {
+ match format {
+ fmt::S16LE | fmt::S16BE | fmt::S16NE => mem::size_of::<i16>(),
+ fmt::Float32LE | fmt::Float32BE | fmt::Float32NE => mem::size_of::<f32>(),
+ }
+}
+
+pub struct Finalizer<F: FnOnce()>(Option<F>);
+
+impl<F: FnOnce()> Finalizer<F> {
+ pub fn dismiss(&mut self) {
+ let _ = self.0.take();
+ assert!(self.0.is_none());
+ }
+}
+
+impl<F: FnOnce()> Drop for Finalizer<F> {
+ fn drop(&mut self) {
+ if let Some(f) = self.0.take() {
+ f();
+ }
+ }
+}
+
+pub fn finally<F: FnOnce()>(f: F) -> Finalizer<F> {
+ Finalizer(Some(f))
+}
+
+#[test]
+fn test_forget_vec_and_retake_it() {
+ let expected: Vec<u32> = (10..20).collect();
+ let leaked = expected.clone();
+ let (ptr, len) = forget_vec(leaked);
+ let retaken = retake_forgotten_vec(ptr, len);
+ for (idx, data) in retaken.iter().enumerate() {
+ assert_eq!(*data, expected[idx]);
+ }
+}
+
+#[test]
+fn test_cubeb_sample_size() {
+ let pairs = [
+ (fmt::S16LE, mem::size_of::<i16>()),
+ (fmt::S16BE, mem::size_of::<i16>()),
+ (fmt::S16NE, mem::size_of::<i16>()),
+ (fmt::Float32LE, mem::size_of::<f32>()),
+ (fmt::Float32BE, mem::size_of::<f32>()),
+ (fmt::Float32NE, mem::size_of::<f32>()),
+ ];
+
+ for pair in pairs.iter() {
+ let (fotmat, size) = pair;
+ assert_eq!(cubeb_sample_size(*fotmat), *size);
+ }
+}
+
+#[test]
+fn test_finally() {
+ let mut x = 0;
+
+ {
+ let y = &mut x;
+ let _finally = finally(|| {
+ *y = 100;
+ });
+ }
+ assert_eq!(x, 100);
+
+ {
+ let y = &mut x;
+ let mut finally = finally(|| {
+ *y = 200;
+ });
+ finally.dismiss();
+ }
+ assert_eq!(x, 100);
+}