diff --git a/examples/duplex.rs b/examples/duplex.rs index 4e007c1..76419a3 100644 --- a/examples/duplex.rs +++ b/examples/duplex.rs @@ -1,10 +1,24 @@ use crate::util::sine::SineWave; use anyhow::Result; -use interflow::duplex::AudioDuplexCallback; -use interflow::prelude::*; +use interflow::{duplex::DuplexStreamConfig, prelude::*}; mod util; +#[cfg(os_alsa)] +fn main() -> Result<()> { + env_logger::init(); + + let device = default_duplex_device(); + let mut config = device.default_duplex_config().unwrap(); + config.buffer_size_range = (Some(128), Some(512)); + let stream = device.create_duplex_stream(config, RingMod::new()).unwrap(); + println!("Press Enter to stop"); + std::io::stdin().read_line(&mut String::new())?; + stream.eject().unwrap(); + Ok(()) +} + +#[cfg(not(os_alsa))] fn main() -> Result<()> { let input = default_input_device(); let output = default_output_device(); @@ -40,8 +54,12 @@ impl AudioDuplexCallback for RingMod { input: AudioInput, mut output: AudioOutput, ) { + if input.buffer.num_samples() < output.buffer.num_samples() { + log::error!("Input underrun"); + } let sr = context.stream_config.samplerate as f32; - for i in 0..output.buffer.num_samples() { + let num_samples = output.buffer.num_samples().min(input.buffer.num_samples()); + for i in 0..num_samples { let inp = input.buffer.get_frame(i)[0]; let c = self.carrier.next_sample(sr); output.buffer.set_mono(i, inp * c); diff --git a/examples/enumerate_alsa.rs b/examples/enumerate_alsa.rs index 2c38560..49e802e 100644 --- a/examples/enumerate_alsa.rs +++ b/examples/enumerate_alsa.rs @@ -1,3 +1,5 @@ +use crate::util::enumerate::enumerate_duplex_devices; + mod util; #[cfg(os_alsa)] @@ -7,7 +9,9 @@ fn main() -> Result<(), Box> { env_logger::init(); - enumerate_devices(AlsaDriver) + enumerate_devices(AlsaDriver)?; + enumerate_duplex_devices(AlsaDriver)?; + Ok(()) } #[cfg(not(os_alsa))] diff --git a/examples/loopback.rs b/examples/loopback.rs index db72372..bc8e904 100644 --- a/examples/loopback.rs +++ b/examples/loopback.rs @@ -1,10 +1,28 @@ use crate::util::meter::PeakMeter; use crate::util::AtomicF32; use anyhow::Result; -use interflow::prelude::*; +use interflow::{duplex::DuplexStreamConfig, prelude::*}; use std::sync::Arc; + mod util; +#[cfg(os_alsa)] +fn main() -> Result<()> { + env_logger::init(); + + let device = default_duplex_device(); + let mut config = device.default_duplex_config().unwrap(); + config.buffer_size_range = (Some(128), Some(512)); + let value = Arc::new(AtomicF32::new(0.0)); + let stream = device + .create_duplex_stream(config, Loopback::new(44100., value.clone())) + .unwrap(); + util::display_peakmeter(value)?; + stream.eject().unwrap(); + Ok(()) +} + +#[cfg(not(os_alsa))] fn main() -> Result<()> { env_logger::init(); diff --git a/examples/util/enumerate.rs b/examples/util/enumerate.rs index c01032f..3e7d9cc 100644 --- a/examples/util/enumerate.rs +++ b/examples/util/enumerate.rs @@ -1,4 +1,4 @@ -use interflow::{AudioDevice, AudioDriver, DeviceType}; +use interflow::prelude::*; use std::error::Error; pub fn enumerate_devices(driver: Driver) -> Result<(), Box> @@ -20,7 +20,28 @@ where eprintln!("All devices"); for device in driver.list_devices()? { - eprintln!("\t{} ({:?})", device.name(), device.device_type()); + eprintln!("\t{}", device.name()); + } + Ok(()) +} + +pub fn enumerate_duplex_devices( + driver: Driver, +) -> Result<(), Box> +where + ::Error: 'static, +{ + eprintln!("Driver name : {}", Driver::DISPLAY_NAME); + eprintln!("Driver version: {}", driver.version()?); + if let Some(device) = driver.default_duplex_device()? { + eprintln!("Default duplex device: {}", device.name()); + } else { + eprintln!("No default duplex device"); + } + + eprintln!("All duplex devices"); + for device in driver.list_duplex_devices()? { + eprintln!("\t{}", device.name()); } Ok(()) } diff --git a/examples/util/sine.rs b/examples/util/sine.rs index f780329..929f81a 100644 --- a/examples/util/sine.rs +++ b/examples/util/sine.rs @@ -1,4 +1,4 @@ -use interflow::{AudioCallbackContext, AudioOutput, AudioOutputCallback}; +use interflow::prelude::*; use std::f32::consts::TAU; pub struct SineWave { diff --git a/flake.nix b/flake.nix index f946b41..7e70265 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,10 @@ }; devShells.default = pkgs.clangStdenv.mkDerivation { name = "interflow-devshell"; - buildInputs = buildInputs ++ nativeBuildInputs; + buildInputs = buildInputs ++ nativeBuildInputs ++ (with pkgs; [pre-commit]); + shellHook = '' + pre-commit install + ''; inherit LIBCLANG_PATH; }; } diff --git a/src/backends/alsa/device.rs b/src/backends/alsa/device.rs index 509b3b8..0648c0d 100644 --- a/src/backends/alsa/device.rs +++ b/src/backends/alsa/device.rs @@ -1,8 +1,10 @@ -use crate::backends::alsa::stream::AlsaStream; use crate::backends::alsa::AlsaError; +use crate::device::Channel; +use crate::device::{AudioDevice, AudioInputDevice, AudioOutputDevice, DeviceType}; +use crate::stream::{AudioInputCallback, AudioOutputCallback, StreamConfig}; use crate::{ - AudioDevice, AudioInputCallback, AudioInputDevice, AudioOutputCallback, AudioOutputDevice, - Channel, DeviceType, StreamConfig, + backends::alsa::stream::AlsaStream, device::AudioDuplexDevice, duplex::AudioDuplexCallback, + SendEverywhereButOnWeb, }; use alsa::{pcm, PCM}; use std::borrow::Cow; @@ -17,6 +19,23 @@ pub struct AlsaDevice { pub(super) direction: alsa::Direction, } +impl AlsaDevice { + fn channel_map(&self, requested_direction: alsa::Direction) -> impl Iterator { + let max_channels = if self.direction == requested_direction { + self.pcm + .hw_params_current() + .and_then(|hwp| hwp.get_channels_max()) + .unwrap_or(0) + } else { + 0 + }; + (0..max_channels as usize).map(|i| Channel { + index: i, + name: Cow::Owned(format!("Channel {}", i)), + }) + } +} + impl fmt::Debug for AlsaDevice { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("AlsaDevice") @@ -60,6 +79,10 @@ impl AudioDevice for AlsaDevice { } impl AudioInputDevice for AlsaDevice { + fn input_channel_map(&self) -> impl Iterator { + [].into_iter() + } + type StreamHandle = AlsaStream; fn default_input_config(&self) -> Result { @@ -76,6 +99,10 @@ impl AudioInputDevice for AlsaDevice { } impl AudioOutputDevice for AlsaDevice { + fn output_channel_map(&self) -> impl Iterator { + [].into_iter() + } + type StreamHandle = AlsaStream; fn default_output_config(&self) -> Result { @@ -103,8 +130,8 @@ impl AlsaDevice { } pub(super) fn new(name: &str, direction: alsa::Direction) -> Result { - let pcm = PCM::new(name, direction, true)?; - let pcm = Rc::new(pcm); + log::info!("Opening device: {name}, direction {direction:?}"); + let pcm = Rc::new(PCM::new(name, direction, true)?); Ok(Self { name: name.to_string(), direction, @@ -117,11 +144,12 @@ impl AlsaDevice { hwp.set_channels(config.channels as _)?; hwp.set_rate(config.samplerate as _, alsa::ValueOr::Nearest)?; if let Some(min) = config.buffer_size_range.0 { - hwp.set_buffer_size_min(min as _)?; + hwp.set_buffer_size_min(min as pcm::Frames * 2)?; } if let Some(max) = config.buffer_size_range.1 { - hwp.set_buffer_size_max(max as _)?; + hwp.set_buffer_size_max(max as pcm::Frames * 2)?; } + hwp.set_periods(2, alsa::ValueOr::Nearest)?; hwp.set_format(pcm::Format::float())?; hwp.set_access(pcm::Access::RWInterleaved)?; Ok(hwp) @@ -139,6 +167,7 @@ impl AlsaDevice { log::debug!("Apply config: hwp {hwp:#?}"); + swp.set_avail_min(hwp.get_period_size()?)?; swp.set_start_threshold(hwp.get_buffer_size()?)?; self.pcm.sw_params(&swp)?; log::debug!("Apply config: swp {swp:#?}"); @@ -146,6 +175,16 @@ impl AlsaDevice { Ok((hwp, swp, io)) } + pub(super) fn ensure_state(&self, hwp: &pcm::HwParams) -> Result { + match self.pcm.state() { + pcm::State::Suspended if hwp.can_resume() => self.pcm.resume()?, + pcm::State::Suspended => self.pcm.prepare()?, + pcm::State::Paused => return Ok(true), + _ => {} + } + Ok(false) + } + fn default_config(&self) -> Result { let samplerate = 48e3; // Default ALSA sample rate let channel_count = 2; // Stereo stream @@ -158,3 +197,77 @@ impl AlsaDevice { }) } } + +pub struct AlsaDuplexDevice { + pub(super) input: AlsaDevice, + pub(super) output: AlsaDevice, +} + +impl AudioDevice for AlsaDuplexDevice { + type Error = AlsaError; + + fn name(&self) -> Cow { + Cow::Owned(format!("{} / {}", self.input.name(), self.output.name())) + } + + fn is_config_supported(&self, config: &StreamConfig) -> bool { + let Ok((hwp, _, _)) = self.output.apply_config(config) else { + return false; + }; + let Ok(period) = hwp.get_period_size() else { + return false; + }; + let period = period as usize; + self.input + .apply_config(&StreamConfig { + buffer_size_range: (Some(period), Some(period)), + ..*config + }) + .is_ok() + } + + fn enumerate_configurations(&self) -> Option> { + Some( + self.output + .enumerate_configurations()? + .into_iter() + .filter(|config| self.is_config_supported(config)), + ) + } +} + +impl AudioDuplexDevice for AlsaDuplexDevice { + type StreamHandle = AlsaStream; + + fn default_duplex_config(&self) -> Result { + self.output.default_output_config() + } + + fn create_duplex_stream( + &self, + config: StreamConfig, + callback: Callback, + ) -> Result<::StreamHandle, Self::Error> { + AlsaStream::new_duplex( + config, + self.input.name.clone(), + self.output.name.clone(), + callback, + ) + } +} + +impl AlsaDuplexDevice { + /// Create a new duplex device from an input and output device. + pub fn new(input: AlsaDevice, output: AlsaDevice) -> Self { + Self { input, output } + } + + /// Create a full-duplex device from the given name. + pub fn full_duplex(name: &str) -> Result { + Ok(Self::new( + AlsaDevice::new(name, alsa::Direction::Capture)?, + AlsaDevice::new(name, alsa::Direction::Playback)?, + )) + } +} diff --git a/src/backends/alsa/duplex.rs b/src/backends/alsa/duplex.rs new file mode 100644 index 0000000..2f44d25 --- /dev/null +++ b/src/backends/alsa/duplex.rs @@ -0,0 +1,143 @@ +use crate::channel_map::{Bitset, ChannelMap32}; +use crate::duplex::AudioDuplexCallback; +use crate::prelude::alsa::device::AlsaDevice; +use crate::prelude::alsa::stream::AlsaStream; +use crate::timestamp::Timestamp; +use crate::{ + audio_buffer::{AudioMut, AudioRef}, + backends::alsa::AlsaError, + stream::{AudioCallbackContext, AudioInput, AudioOutput, StreamConfig}, +}; +use alsa::{pcm, PollDescriptors}; +use std::sync::Arc; +use std::time::Duration; + +impl AlsaStream { + pub fn new_duplex( + stream_config: StreamConfig, + input_name: String, + output_name: String, + mut callback: Callback, + ) -> Result { + { + let (tx, rx) = super::triggerfd::trigger()?; + let join_handle = std::thread::spawn({ + move || { + let output_device = AlsaDevice::new(&output_name, alsa::Direction::Playback)?; + let (output_hwp, _, output_io) = output_device.apply_config(&stream_config)?; + let (_, period_size) = output_device.pcm.get_params()?; + let out_periods = period_size as usize; + log::info!("[Output] Period size : {out_periods}"); + let out_channels = output_hwp.get_channels()? as usize; + log::info!("[Output] Num channels: {out_channels}"); + let out_samplerate = output_hwp.get_rate()? as f64; + log::info!("[Output] Sample rate : {out_samplerate}"); + let output_config = StreamConfig { + samplerate: out_samplerate, + channels: ChannelMap32::default() + .with_indices(std::iter::repeat(1).take(out_channels)), + buffer_size_range: (Some(out_periods), Some(out_periods)), + exclusive: false, + }; + let mut out_timestamp = Timestamp::new(out_samplerate); + let mut out_buffer = vec![0f32; out_periods * out_channels]; + let out_latency = out_periods as f64 / out_samplerate; + output_device.pcm.prepare()?; + if output_device.pcm.state() != pcm::State::Running { + output_device.pcm.start()?; + } + + let input_device = AlsaDevice::new(&input_name, alsa::Direction::Capture)?; + let (input_hwp, _, input_io) = input_device.apply_config(&output_config)?; + let (_, period_size) = input_device.pcm.get_params()?; + let in_periods = period_size as usize; + log::info!("[Input] Period size : {in_periods}"); + let in_channels = input_hwp.get_channels()? as usize; + log::info!("[Input] Num channels: {in_channels}"); + let in_samplerate = input_hwp.get_rate()? as f64; + log::info!("[Input] Sample rate : {in_samplerate}"); + let mut in_timestamp = Timestamp::new(in_samplerate); + let mut in_buffer = vec![0f32; in_periods * in_channels]; + let in_latency = in_periods as f64 / in_samplerate; + input_device.pcm.prepare()?; + if input_device.pcm.state() != pcm::State::Running { + input_device.pcm.start()?; + } + let mut poll_descriptors = { + let mut buf = vec![rx.as_pollfd()]; + let num_descriptors = input_device.pcm.count() + output_device.pcm.count(); + buf.extend( + std::iter::repeat(libc::pollfd { + fd: 0, + events: 0, + revents: 0, + }) + .take(num_descriptors), + ); + buf + }; + + let _try = || loop { + let out_frames = output_device.pcm.avail_update()? as usize; + let in_frames = input_device.pcm.avail_update()? as usize; + if out_frames == 0 && in_frames == 0 { + let latency = in_latency.min(out_latency).round() as i32; + if alsa::poll::poll(&mut poll_descriptors, latency)? > 0 { + log::debug!("Eject requested, returning ownership of callback"); + break Ok(callback); + } + continue; + } + + log::debug!("[Output] Frames available: {out_frames}"); + let out_frames = std::cmp::min(out_frames, out_periods); + let out_len = out_frames * out_channels; + let in_frames = std::cmp::min(in_frames, in_periods); + let in_len = in_frames * in_channels; + + if let Err(err) = input_io.readi(&mut in_buffer[..in_len]) { + input_device.pcm.try_recover(err, true)?; + } + + let context = AudioCallbackContext { + timestamp: out_timestamp, + stream_config: output_config, + }; + let input = AudioInput { + timestamp: in_timestamp, + buffer: AudioRef::from_interleaved(&in_buffer[..in_len], in_channels) + .unwrap(), + }; + let output = AudioOutput { + timestamp: out_timestamp, + buffer: AudioMut::from_interleaved_mut( + &mut out_buffer[..out_len], + out_channels, + ) + .unwrap(), + }; + callback.on_audio_data(context, input, output); + + if let Err(err) = output_io.writei(&out_buffer[..out_len]) { + output_device.pcm.try_recover(err, true)?; + } + + in_timestamp += in_frames as u64; + out_timestamp += out_frames as u64; + + if input_device.ensure_state(&input_hwp)? + || output_device.ensure_state(&output_hwp)? + { + std::thread::sleep(Duration::from_secs(1)); + } + }; + _try().inspect_err(|err| log::error!("Error in duplex thread: {:?}", err)) + } + }); + Ok(Self { + eject_trigger: Arc::new(tx), + join_handle, + }) + } + } +} diff --git a/src/backends/alsa/input.rs b/src/backends/alsa/input.rs index 19ec8b9..7c02a4c 100644 --- a/src/backends/alsa/input.rs +++ b/src/backends/alsa/input.rs @@ -2,7 +2,8 @@ use crate::audio_buffer::AudioRef; use crate::backends::alsa::stream::AlsaStream; use crate::backends::alsa::AlsaError; use crate::prelude::alsa::device::AlsaDevice; -use crate::{AudioCallbackContext, AudioInput, AudioInputCallback, StreamConfig}; +use crate::stream::AudioInput; +use crate::stream::{AudioCallbackContext, AudioInputCallback, StreamConfig}; impl AlsaStream { pub(super) fn new_input( diff --git a/src/backends/alsa/mod.rs b/src/backends/alsa/mod.rs index 79c0169..4e4ea02 100644 --- a/src/backends/alsa/mod.rs +++ b/src/backends/alsa/mod.rs @@ -5,13 +5,15 @@ //! (PulseAudio, PipeWire) offer ALSA-compatible APIs so that older software can still access the //! audio devices through them. -use crate::{AudioDriver, DeviceType}; +use crate::driver::AudioDriver; +use crate::{device::DeviceType, driver::AudioDuplexDriver}; use alsa::device_name::HintIter; -use device::AlsaDevice; +use device::{AlsaDevice, AlsaDuplexDevice}; use std::borrow::Cow; use thiserror::Error; mod device; +mod duplex; mod input; mod output; mod stream; @@ -58,3 +60,32 @@ impl AudioDriver for AlsaDriver { .filter_map(|hint| AlsaDevice::new(hint.name.as_ref()?, hint.direction?).ok())) } } + +impl AudioDuplexDriver for AlsaDriver { + type DuplexDevice = AlsaDuplexDevice; + + fn default_duplex_device(&self) -> Result, Self::Error> { + let Some(input) = self.default_device(DeviceType::Input)? else { + return Ok(None); + }; + let Some(output) = self.default_device(DeviceType::Output)? else { + return Ok(None); + }; + Ok(Some(AlsaDuplexDevice::new(input, output))) + } + + fn list_duplex_devices( + &self, + ) -> Result, Self::Error> { + Ok(HintIter::new(None, c"pcm")? + .filter_map(|hint| AlsaDuplexDevice::full_duplex(hint.name.as_ref()?).ok())) + } + + fn device_from_input_output( + &self, + input: Self::Device, + output: Self::Device, + ) -> Result { + Ok(AlsaDuplexDevice::new(input, output)) + } +} diff --git a/src/backends/alsa/output.rs b/src/backends/alsa/output.rs index 029f95e..bf5fc58 100644 --- a/src/backends/alsa/output.rs +++ b/src/backends/alsa/output.rs @@ -2,7 +2,8 @@ use crate::audio_buffer::AudioMut; use crate::backends::alsa::stream::AlsaStream; use crate::backends::alsa::AlsaError; use crate::prelude::alsa::device::AlsaDevice; -use crate::{AudioCallbackContext, AudioOutput, AudioOutputCallback, StreamConfig}; +use crate::stream::AudioOutput; +use crate::stream::{AudioCallbackContext, AudioOutputCallback, StreamConfig}; impl AlsaStream { pub(super) fn new_output( diff --git a/src/backends/alsa/stream.rs b/src/backends/alsa/stream.rs index ac649c1..5973c62 100644 --- a/src/backends/alsa/stream.rs +++ b/src/backends/alsa/stream.rs @@ -1,8 +1,8 @@ use crate::backends::alsa::device::AlsaDevice; use crate::backends::alsa::{triggerfd, AlsaError}; use crate::channel_map::{Bitset, ChannelMap32}; +use crate::stream::{AudioStreamHandle, StreamConfig}; use crate::timestamp::Timestamp; -use crate::{AudioStreamHandle, StreamConfig}; use alsa::pcm; use alsa::PollDescriptors; use std::sync::Arc; @@ -69,8 +69,7 @@ impl AlsaStream { log::info!("Sample rate : {samplerate}"); let stream_config = StreamConfig { samplerate, - channels: ChannelMap32::default() - .with_indices(std::iter::repeat(1).take(num_channels)), + channels: ChannelMap32::default().with_indices(0..num_channels), buffer_size_range: (Some(period_size), Some(period_size)), exclusive: false, }; diff --git a/src/backends/coreaudio.rs b/src/backends/coreaudio.rs index 179ed48..e2db359 100644 --- a/src/backends/coreaudio.rs +++ b/src/backends/coreaudio.rs @@ -19,13 +19,15 @@ use thiserror::Error; use crate::audio_buffer::{AudioBuffer, Sample}; use crate::channel_map::Bitset; -use crate::prelude::ChannelMap32; -use crate::timestamp::Timestamp; -use crate::{ - AudioCallbackContext, AudioDevice, AudioDriver, AudioInput, AudioInputCallback, - AudioInputDevice, AudioOutput, AudioOutputCallback, AudioOutputDevice, AudioStreamHandle, - Channel, DeviceType, SendEverywhereButOnWeb, StreamConfig, +use crate::channel_map::ChannelMap32; +use crate::device::{AudioDevice, AudioInputDevice, AudioOutputDevice, Channel, DeviceType}; +use crate::driver::AudioDriver; +use crate::stream::{ + AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, + AudioStreamHandle, StreamConfig, }; +use crate::timestamp::Timestamp; +use crate::SendEverywhereButOnWeb; /// Type of errors from the CoreAudio backend #[derive(Debug, Error)] @@ -184,6 +186,23 @@ fn input_stream_format(sample_rate: f64, channels: ChannelMap32) -> StreamFormat impl AudioInputDevice for CoreAudioDevice { type StreamHandle = CoreAudioStream; + fn input_channel_map(&self) -> impl Iterator { + let channels = match audio_unit_from_device_id(self.device_id, true) { + Err(err) => { + eprintln!("CoreAudio error getting audio unit: {err}"); + 0 + } + Ok(audio_unit) => { + let stream_format = audio_unit.input_stream_format().unwrap(); + stream_format.channels as usize + } + }; + (0..channels).map(|ch| Channel { + index: ch, + name: Cow::Owned(format!("Channel {}", ch)), + }) + } + fn default_input_config(&self) -> Result { let audio_unit = audio_unit_from_device_id(self.device_id, true)?; let samplerate = audio_unit.get_property::( @@ -218,6 +237,23 @@ fn output_stream_format(sample_rate: f64, channels: ChannelMap32) -> StreamForma } impl AudioOutputDevice for CoreAudioDevice { + fn output_channel_map(&self) -> impl Iterator { + let channels = match audio_unit_from_device_id(self.device_id, false) { + Err(err) => { + eprintln!("CoreAudio error getting audio unit: {err}"); + 0 + } + Ok(audio_unit) => { + let stream_format = audio_unit.output_stream_format().unwrap(); + stream_format.channels as usize + } + }; + (0..channels).map(|ch| Channel { + index: ch, + name: Cow::Owned(format!("Channel {}", ch)), + }) + } + type StreamHandle = CoreAudioStream; fn default_output_config(&self) -> Result { diff --git a/src/backends/mod.rs b/src/backends/mod.rs index 6e8289d..009a8c1 100644 --- a/src/backends/mod.rs +++ b/src/backends/mod.rs @@ -5,7 +5,9 @@ //! Each backend is provided in its own submodule. Types should be public so that the user isn't //! limited to going through the main API if they want to choose a specific backend. -use crate::{AudioDriver, AudioInputDevice, AudioOutputDevice, DeviceType}; +use crate::device::DeviceType; +use crate::device::{AudioInputDevice, AudioOutputDevice}; +use crate::driver::AudioDriver; #[cfg(unsupported)] compile_error!("Unsupported platform (supports ALSA, CoreAudio, and WASAPI)"); @@ -111,3 +113,38 @@ pub fn default_output_device() -> impl AudioOutputDevice { #[cfg(os_wasapi)] return default_output_device_from(&wasapi::WasapiDriver); } + +/// Default duplex device from the default driver of this platform. +/// +/// "Default" here means both in terms of platform support but also can include runtime selection. +/// Therefore, it is better to use this method directly rather than first getting the default +/// driver from [`default_driver`]. +#[allow(clippy::non_minimal_cfg)] +#[allow(clippy::needless_return)] +#[cfg(any(os_alsa))] +pub fn default_duplex_device() -> impl crate::device::AudioDuplexDevice { + #[cfg(os_alsa)] + return default_duplex_device_from(&alsa::AlsaDriver); +} + +/// Returns the default duplex device for the given audio driver. +/// +/// The default device is usually the one the user has selected in its system settings. +pub fn default_duplex_device_from( + driver: &D, +) -> D::DuplexDevice +where + D::Device: AudioInputDevice + AudioOutputDevice, +{ + driver + .default_duplex_device() + .expect("Audio driver error") + .unwrap_or_else(|| { + driver + .device_from_input_output( + default_input_device_from(driver), + default_output_device_from(driver), + ) + .expect("Audio driver error") + }) +} diff --git a/src/backends/pipewire/device.rs b/src/backends/pipewire/device.rs index 6cec8c5..71b49ee 100644 --- a/src/backends/pipewire/device.rs +++ b/src/backends/pipewire/device.rs @@ -1,8 +1,9 @@ use super::stream::StreamHandle; -use crate::backends::pipewire::error::PipewireError; use crate::{ - AudioDevice, AudioInputCallback, AudioInputDevice, AudioOutputCallback, AudioOutputDevice, - Channel, DeviceType, SendEverywhereButOnWeb, StreamConfig, + backends::pipewire::error::PipewireError, + device::{AudioDevice, AudioInputDevice, AudioOutputDevice, DeviceType}, + stream::{AudioInputCallback, AudioOutputCallback, StreamConfig}, + SendEverywhereButOnWeb, }; use pipewire::context::Context; use pipewire::main_loop::MainLoop; @@ -33,14 +34,6 @@ impl AudioDevice for PipewireDevice { } } - fn device_type(&self) -> DeviceType { - self.device_type - } - - fn channel_map(&self) -> impl IntoIterator { - [] - } - fn is_config_supported(&self, _config: &StreamConfig) -> bool { true } @@ -69,6 +62,10 @@ impl AudioInputDevice for PipewireDevice { ) -> Result, Self::Error> { StreamHandle::new_input(&self.stream_name, stream_config, callback) } + + fn input_channel_map(&self) -> impl Iterator { + [].into_iter() + } } impl AudioOutputDevice for PipewireDevice { @@ -90,6 +87,10 @@ impl AudioOutputDevice for PipewireDevice { ) -> Result, Self::Error> { StreamHandle::new_output(&self.stream_name, stream_config, callback) } + + fn output_channel_map(&self) -> impl Iterator { + [].into_iter() + } } impl PipewireDevice { diff --git a/src/backends/pipewire/driver.rs b/src/backends/pipewire/driver.rs index abcbac7..93671a2 100644 --- a/src/backends/pipewire/driver.rs +++ b/src/backends/pipewire/driver.rs @@ -1,7 +1,6 @@ use super::error::PipewireError; -use crate::backends::pipewire::device::PipewireDevice; use crate::backends::pipewire::utils; -use crate::{AudioDriver, DeviceType}; +use crate::{backends::pipewire::device::PipewireDevice, device::DeviceType, driver::AudioDriver}; use std::borrow::Cow; use std::marker::PhantomData; diff --git a/src/backends/pipewire/stream.rs b/src/backends/pipewire/stream.rs index 72ef0ec..1b4010a 100644 --- a/src/backends/pipewire/stream.rs +++ b/src/backends/pipewire/stream.rs @@ -1,10 +1,12 @@ -use crate::audio_buffer::{AudioMut, AudioRef}; use crate::backends::pipewire::error::PipewireError; use crate::channel_map::Bitset; use crate::timestamp::Timestamp; use crate::{ - AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, - AudioStreamHandle, StreamConfig, + audio_buffer::{AudioMut, AudioRef}, + stream::{ + AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, + AudioStreamHandle, StreamConfig, + }, }; use libspa::buffer::Data; use libspa::param::audio::{AudioFormat, AudioInfoRaw}; diff --git a/src/backends/pipewire/utils.rs b/src/backends/pipewire/utils.rs index 7874010..0713a9e 100644 --- a/src/backends/pipewire/utils.rs +++ b/src/backends/pipewire/utils.rs @@ -1,5 +1,4 @@ -use crate::backends::pipewire::error::PipewireError; -use crate::DeviceType; +use crate::{backends::pipewire::error::PipewireError, device::DeviceType}; use libspa::utils::dict::DictRef; use pipewire::context::Context; use pipewire::main_loop::MainLoop; diff --git a/src/backends/wasapi/device.rs b/src/backends/wasapi/device.rs index ebc8d68..2563039 100644 --- a/src/backends/wasapi/device.rs +++ b/src/backends/wasapi/device.rs @@ -1,11 +1,10 @@ use super::{error, stream}; use crate::backends::wasapi::stream::WasapiStream; use crate::channel_map::Bitset; +use crate::device::Channel; +use crate::device::{AudioDevice, AudioInputDevice, AudioOutputDevice, DeviceType}; use crate::prelude::wasapi::util::WasapiMMDevice; -use crate::{ - AudioDevice, AudioInputCallback, AudioInputDevice, AudioOutputCallback, AudioOutputDevice, - Channel, DeviceType, StreamConfig, -}; +use crate::stream::{AudioInputCallback, AudioOutputCallback, StreamConfig}; use std::borrow::Cow; use windows::Win32::Media::Audio; @@ -38,14 +37,6 @@ impl AudioDevice for WasapiDevice { } } - fn device_type(&self) -> DeviceType { - self.device_type - } - - fn channel_map(&self) -> impl IntoIterator { - [] - } - fn is_config_supported(&self, config: &StreamConfig) -> bool { self.device_type.contains(DeviceType::OUTPUT) && stream::is_output_config_supported(self.device.clone(), config) @@ -59,6 +50,10 @@ impl AudioDevice for WasapiDevice { impl AudioInputDevice for WasapiDevice { type StreamHandle = WasapiStream; + fn input_channel_map(&self) -> impl Iterator { + [].into_iter() + } + fn default_input_config(&self) -> Result { let audio_client = self.device.activate::()?; let format = unsafe { audio_client.GetMixFormat()?.read_unaligned() }; @@ -89,6 +84,10 @@ impl AudioInputDevice for WasapiDevice { impl AudioOutputDevice for WasapiDevice { type StreamHandle = WasapiStream; + fn output_channel_map(&self) -> impl Iterator { + [].into_iter() + } + fn default_output_config(&self) -> Result { let audio_client = self.device.activate::()?; let format = unsafe { audio_client.GetMixFormat()?.read_unaligned() }; diff --git a/src/backends/wasapi/driver.rs b/src/backends/wasapi/driver.rs index f505d1d..0644d4a 100644 --- a/src/backends/wasapi/driver.rs +++ b/src/backends/wasapi/driver.rs @@ -7,7 +7,8 @@ use windows::Win32::System::Com; use super::{error, util}; -use crate::{AudioDriver, DeviceType}; +use crate::device::DeviceType; +use crate::driver::AudioDriver; /// The WASAPI driver. #[derive(Debug, Clone, Default)] diff --git a/src/backends/wasapi/stream.rs b/src/backends/wasapi/stream.rs index 815851e..0317552 100644 --- a/src/backends/wasapi/stream.rs +++ b/src/backends/wasapi/stream.rs @@ -1,12 +1,11 @@ use super::error; -use crate::audio_buffer::AudioMut; +use crate::audio_buffer::{AudioMut, AudioRef}; use crate::backends::wasapi::util::WasapiMMDevice; use crate::channel_map::Bitset; -use crate::prelude::{AudioRef, Timestamp}; -use crate::{ - AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, - AudioStreamHandle, StreamConfig, +use crate::stream::{ + AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, AudioStreamHandle, StreamConfig }; +use crate::timestamp::Timestamp; use duplicate::duplicate_item; use std::marker::PhantomData; use std::ptr::NonNull; @@ -483,7 +482,7 @@ pub(crate) fn is_output_config_supported( device: WasapiMMDevice, stream_config: &StreamConfig, ) -> bool { - let mut try_ = || unsafe { + let try_ = || unsafe { let audio_client: Audio::IAudioClient = device.activate()?; let sharemode = if stream_config.exclusive { Audio::AUDCLNT_SHAREMODE_EXCLUSIVE diff --git a/src/device.rs b/src/device.rs new file mode 100644 index 0000000..8ae8cd2 --- /dev/null +++ b/src/device.rs @@ -0,0 +1,198 @@ +use bitflags::bitflags; +use crate::duplex::AudioDuplexCallback; +use crate::stream::{AudioInputCallback, AudioOutputCallback, AudioStreamHandle, StreamConfig}; +use crate::SendEverywhereButOnWeb; +use std::borrow::Cow; + +/// Trait for types describing audio devices. Audio devices have zero or more inputs and outputs, +/// and depending on the driver, can be duplex devices which can provide both of them at the same +/// time natively. +pub trait AudioDevice { + /// Type of errors that can happen when using this device. + type Error: std::error::Error; + + /// Device display name + fn name(&self) -> Cow; + + /// Not all configuration values make sense for a particular device, and this method tests a + /// configuration to see if it can be used in an audio stream. + fn is_config_supported(&self, config: &StreamConfig) -> bool; + + /// Enumerate all possible configurations this device supports. If that is not provided by + /// the device, and not easily generated manually, this will return `None`. + fn enumerate_configurations(&self) -> Option>; +} + +/// Trait for types which can provide input streams. +/// +/// Input devices require a [`AudioInputCallback`] which receives the audio data from the input +/// device, and processes it. +pub trait AudioInputDevice: AudioDevice { + /// Map of input channels. This can be used to get the index of channels to open when creating a stream. + fn input_channel_map(&self) -> impl Iterator; + + /// Type of the resulting stream. This stream can be used to control the audio processing + /// externally, or stop it completely and give back ownership of the callback with + /// [`AudioStreamHandle::eject`]. + type StreamHandle: AudioStreamHandle; + + /// Return the default configuration for an input stream. + fn default_input_config(&self) -> Result; + + /// Creates an input stream with the provided stream configuration. For this call to be + /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided + /// configuration. + /// + /// An input callback is required to process the audio, whose ownership will be transferred + /// to the audio stream. + fn create_input_stream( + &self, + stream_config: StreamConfig, + callback: Callback, + ) -> Result, Self::Error>; + + /// Creates an input stream from the default configuration given by [`Self::default_input_configuration`]. + fn default_input_stream( + &self, + callback: Callback, + ) -> Result, Self::Error> { + self.create_input_stream(self.default_input_config()?, callback) + } +} + +/// Trait for types which can provide output streams. +/// +/// Output devices require a [`AudioOutputCallback`] which receives the audio data from the output +/// device, and processes it. +pub trait AudioOutputDevice: AudioDevice { + /// Map of output channels. This can be used to get the index of channels to open when creating a stream. + fn output_channel_map(&self) -> impl Iterator; + + /// Type of the resulting stream. This stream can be used to control the audio processing + /// externally, or stop it completely and give back ownership of the callback with + /// [`AudioStreamHandle::eject`]. + type StreamHandle: AudioStreamHandle; + + /// Return the default configuration for an output stream. + fn default_output_config(&self) -> Result; + + /// Creates an output stream with the provided stream configuration. For this call to be + /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided + /// configuration. + /// + /// An output callback is required to process the audio, whose ownership will be transferred + /// to the audio stream. + fn create_output_stream( + &self, + stream_config: StreamConfig, + callback: Callback, + ) -> Result, Self::Error>; + + /// Creates an output stream from the default configuration given by [`Self::default_output_configuration`]. + fn default_output_stream( + &self, + callback: Callback, + ) -> Result, Self::Error> { + self.create_output_stream(self.default_output_config()?, callback) + } +} + +/// Trait for types which can provide duplex streams. +/// +/// Output devices require a [`AudioDuplexCallback`] which receives the audio data from the device, and processes it. +pub trait AudioDuplexDevice: AudioDevice { + /// Type of the resulting stream. This stream can be used to control the audio processing + /// externally, or stop it completely and give back ownership of the callback with + /// [`AudioStreamHandle::eject`]. + type StreamHandle: AudioStreamHandle; + + /// Return the default configuration for a duplex stream. + fn default_duplex_config(&self) -> Result; + + /// Creates a duplex stream with the provided stream configuration. For this call to be + /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided + /// configuration. + /// + /// A duplex callback is required to process the audio, whose ownership will be transferred + /// to the audio stream. + fn create_duplex_stream( + &self, + config: StreamConfig, + callback: Callback, + ) -> Result<::StreamHandle, Self::Error>; + + /// Creates a duplex stream from the default configuration given by [`Self::default_duplex_configuration`]. + fn default_duplex_stream( + &self, + callback: Callback, + ) -> Result<::StreamHandle, Self::Error> { + self.create_duplex_stream(self.default_duplex_config()?, callback) + } +} + + +bitflags! { + /// Represents the types/capabilities of an audio device. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + pub struct DeviceType: u32 { + /// Device supports audio input. + const INPUT = 1 << 0; + + /// Device supports audio output. + const OUTPUT = 1 << 1; + + /// Physical audio device (hardware). + const PHYSICAL = 1 << 2; + + /// Virtual/software application device. + const APPLICATION = 1 << 3; + + /// This device is set as default + const DEFAULT = 1 << 4; + + /// Device that supports both input and output. + const DUPLEX = Self::INPUT.bits() | Self::OUTPUT.bits(); + } +} + + +impl DeviceType { + /// Returns true if this device type has the input capability. + pub fn is_input(&self) -> bool { + self.contains(Self::INPUT) + } + + /// Returns true if this device type has the output capability. + pub fn is_output(&self) -> bool { + self.contains(Self::OUTPUT) + } + + /// Returns true if this device type is a physical device. + pub fn is_physical(&self) -> bool { + self.contains(Self::PHYSICAL) + } + + /// Returns true if this device type is an application/virtual device. + pub fn is_application(&self) -> bool { + self.contains(Self::APPLICATION) + } + + /// Returns true if this device is set as default + pub fn is_default(&self) -> bool { + self.contains(Self::DEFAULT) + } + + /// Returns true if this device type supports both input and output. + pub fn is_duplex(&self) -> bool { + self.contains(Self::DUPLEX) + } +} + +/// Audio channel description. +#[derive(Debug, Clone)] +pub struct Channel<'a> { + /// Index of the channel in the device + pub index: usize, + /// Display name for the channel, if available, else a generic name like "Channel 1" + pub name: Cow<'a, str>, +} diff --git a/src/driver.rs b/src/driver.rs new file mode 100644 index 0000000..ef6a6b0 --- /dev/null +++ b/src/driver.rs @@ -0,0 +1,52 @@ +use crate::device::DeviceType; +use crate::device::{AudioDevice, AudioDuplexDevice}; +use std::borrow::Cow; + +/// Audio drivers provide access to the inputs and outputs of physical devices. +/// Several drivers might provide the same accesses, some sharing it with other applications, +/// while others work in exclusive mode. +pub trait AudioDriver { + /// Type of errors that can happen when using this audio driver. + type Error: std::error::Error; + /// Type of audio devices this driver provides. + type Device: AudioDevice; + + /// Driver display name. + const DISPLAY_NAME: &'static str; + + /// Runtime version of the audio driver. If there is a difference between "client" and + /// "server" versions, then this should reflect the server version. + fn version(&self) -> Result, Self::Error>; + + /// Default device of the given type. This is most often tied to the audio settings at the + /// operating system level. + fn default_device(&self, device_type: DeviceType) -> Result, Self::Error>; + + /// List all devices available through this audio driver. + fn list_devices(&self) -> Result, Self::Error>; +} + +/// Audio drivers that support duplex (simultaneous input/output) devices. +/// This extends the basic [`AudioDriver`] trait with duplex-specific functionality. +pub trait AudioDuplexDriver: AudioDriver { + /// Type of duplex audio devices this driver provides. + type DuplexDevice: AudioDuplexDevice; + + /// Returns the default duplex device for this driver, if one exists. + /// This is typically determined by the system's audio settings. + fn default_duplex_device(&self) -> Result, Self::Error>; + + /// Lists all available duplex devices supported by this driver. + /// Returns an iterator over the duplex devices. + fn list_duplex_devices( + &self, + ) -> Result, Self::Error>; + + /// Creates a duplex device from separate input and output devices. + /// This allows combining independent input and output devices into a single duplex device. + fn device_from_input_output( + &self, + input: Self::Device, + output: Self::Device, + ) -> Result; +} diff --git a/src/duplex.rs b/src/duplex.rs index 027d111..6cd4e9b 100644 --- a/src/duplex.rs +++ b/src/duplex.rs @@ -2,13 +2,13 @@ //! //! This module includes a proxy for gathering an input audio stream, and optionally process it to resample it to the //! output sample rate. -use crate::audio_buffer::AudioRef; -use crate::channel_map::Bitset; -use crate::{ - AudioCallbackContext, AudioDevice, AudioInput, AudioInputCallback, AudioInputDevice, - AudioOutput, AudioOutputCallback, AudioOutputDevice, AudioStreamHandle, SendEverywhereButOnWeb, - StreamConfig, +use crate::device::{AudioInputDevice, AudioOutputDevice}; +use crate::stream::{ + AudioCallbackContext, AudioInputCallback, AudioOutputCallback, AudioStreamHandle, StreamConfig, }; +use crate::stream::{AudioInput, AudioOutput}; +use crate::SendEverywhereButOnWeb; +use crate::{audio_buffer::AudioRef, channel_map::Bitset, device::AudioDevice}; use fixed_resample::{PushStatus, ReadStatus, ResamplingChannelConfig}; use std::error::Error; use std::num::NonZeroUsize; @@ -33,12 +33,19 @@ pub trait AudioDuplexCallback: 'static + SendEverywhereButOnWeb { } /// Type which handles both a duplex stream handle. +/// +/// # Type Parameters +/// +/// * `Callback` - The type of the callback implementation +/// * `Error` - The type of error that can occur pub struct DuplexStream { _input_stream: Box>, _output_stream: Box, Error = Error>>, } /// Input proxy for transferring an input signal to a separate output callback to be processed as a duplex stream. +/// +/// This struct handles the resampling of input audio data to match the output sample rate. pub struct InputProxy { producer: Option>, receive_output_samplerate: rtrb::Consumer, @@ -70,6 +77,8 @@ impl InputProxy { impl AudioInputCallback for InputProxy { /// Processes incoming audio data and stores it in the internal buffer. /// + /// This method handles sample rate conversion between input and output streams. + /// /// Handles sample rate conversion between input and output streams. /// /// # Arguments @@ -142,6 +151,11 @@ impl AudioInputCallback for InputProxy { #[derive(Debug, Error)] #[error(transparent)] /// Represents errors that can occur during duplex stream operations. +/// +/// # Type Parameters +/// +/// * `InputError` - The type of error that can occur in the input stream +/// * `OutputError` - The type of error that can occur in the output stream pub enum DuplexCallbackError { /// No input channels given #[error("No input channels given")] @@ -155,6 +169,8 @@ pub enum DuplexCallbackError { } /// [`AudioOutputCallback`] implementation for which runs the provided [`AudioDuplexCallback`]. +/// +/// This struct handles the processing of audio data in a duplex stream. pub struct DuplexCallback { input: Option>, receive_consumer: rtrb::Consumer>, @@ -230,6 +246,10 @@ impl AudioOutputCallback for DuplexCallback` @@ -238,7 +258,7 @@ impl AudioOutputCallback for DuplexCallback = Result< /// # Example /// /// ```no_run -/// use interflow::duplex::AudioDuplexCallback; +/// use interflow::duplex::{AudioDuplexCallback, DuplexStreamConfig}; /// use interflow::prelude::*; /// /// struct MyCallback; diff --git a/src/lib.rs b/src/lib.rs index f8cdd06..c26369b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,6 @@ #![doc = include_str!("../README.md")] #![warn(missing_docs)] -use bitflags::bitflags; use std::borrow::Cow; use std::fmt; use std::fmt::Formatter; @@ -9,37 +8,18 @@ use std::fmt::Formatter; use crate::audio_buffer::{AudioMut, AudioRef}; use crate::channel_map::ChannelMap32; use crate::timestamp::Timestamp; +use crate::device::DeviceType; pub mod audio_buffer; pub mod backends; pub mod channel_map; +pub mod device; +pub mod driver; pub mod duplex; pub mod prelude; +pub mod stream; pub mod timestamp; -bitflags! { - /// Represents the types/capabilities of an audio device. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] - pub struct DeviceType: u32 { - /// Device supports audio input. - const INPUT = 1 << 0; - - /// Device supports audio output. - const OUTPUT = 1 << 1; - - /// Physical audio device (hardware). - const PHYSICAL = 1 << 2; - - /// Virtual/software application device. - const APPLICATION = 1 << 3; - - /// This device is set as default - const DEFAULT = 1 << 4; - - /// Device that supports both input and output. - const DUPLEX = Self::INPUT.bits() | Self::OUTPUT.bits(); - } -} /// Audio drivers provide access to the inputs and outputs of devices. /// Several drivers might provide the same accesses, some sharing it with other applications, @@ -65,37 +45,6 @@ pub trait AudioDriver { fn list_devices(&self) -> Result, Self::Error>; } -impl DeviceType { - /// Returns true if this device type has the input capability. - pub fn is_input(&self) -> bool { - self.contains(Self::INPUT) - } - - /// Returns true if this device type has the output capability. - pub fn is_output(&self) -> bool { - self.contains(Self::OUTPUT) - } - - /// Returns true if this device type is a physical device. - pub fn is_physical(&self) -> bool { - self.contains(Self::PHYSICAL) - } - - /// Returns true if this device type is an application/virtual device. - pub fn is_application(&self) -> bool { - self.contains(Self::APPLICATION) - } - - /// Returns true if this device is set as default - pub fn is_default(&self) -> bool { - self.contains(Self::DEFAULT) - } - - /// Returns true if this device type supports both input and output. - pub fn is_duplex(&self) -> bool { - self.contains(Self::DUPLEX) - } -} /// Configuration for an audio stream. #[derive(Debug, Clone, Copy, PartialEq)] @@ -155,7 +104,8 @@ pub trait AudioDevice { } /// Marker trait for values which are [Send] everywhere but on the web (as WASM does not yet have -/// web targets. +/// proper threads, and implementation of audio engines on WASM are either separate modules or a single module in a +/// push configuration). /// /// This should only be used to define the traits and should not be relied upon in external code. /// @@ -166,7 +116,8 @@ pub trait SendEverywhereButOnWeb: 'static + Send {} impl SendEverywhereButOnWeb for T {} /// Marker trait for values which are [Send] everywhere but on the web (as WASM does not yet have -/// web targets. +/// proper threads, and implementation of audio engines on WASM are either separate modules or a single module in a +/// push configuration). /// /// This should only be used to define the traits and should not be relied upon in external code. /// @@ -175,134 +126,3 @@ impl SendEverywhereButOnWeb for T {} pub trait SendEverywhereButOnWeb {} #[cfg(wasm)] impl SendEverywhereButOnWeb for T {} - -/// Trait for types which can provide input streams. -/// -/// Input devices require a [`AudioInputCallback`] which receives the audio data from the input -/// device, and processes it. -pub trait AudioInputDevice: AudioDevice { - /// Type of the resulting stream. This stream can be used to control the audio processing - /// externally, or stop it completely and give back ownership of the callback with - /// [`AudioStreamHandle::eject`]. - type StreamHandle: AudioStreamHandle; - - /// Return the default configuration for this device, if there is one. The returned configuration *must* be - /// valid according to [`Self::is_config_supported`]. - fn default_input_config(&self) -> Result; - - /// Creates an input stream with the provided stream configuration. For this call to be - /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided - /// configuration. - /// - /// An input callback is required to process the audio, whose ownership will be transferred - /// to the audio stream. - fn create_input_stream( - &self, - stream_config: StreamConfig, - callback: Callback, - ) -> Result, Self::Error>; - - /// Create an input stream with the default configuration (as returned by [`Self::default_input_config`]). - /// - /// # Arguments - /// - /// - `callback`: Callback to process the audio input - fn default_input_stream( - &self, - callback: Callback, - ) -> Result, Self::Error> { - self.create_input_stream(self.default_input_config()?, callback) - } -} - -/// Trait for types which can provide output streams. -/// -/// Output devices require a [`AudioOutputCallback`] which receives the audio data from the output -/// device, and processes it. -pub trait AudioOutputDevice: AudioDevice { - /// Type of the resulting stream. This stream can be used to control the audio processing - /// externally, or stop it completely and give back ownership of the callback with - /// [`AudioStreamHandle::eject`]. - type StreamHandle: AudioStreamHandle; - - /// Return the default output configuration for this device, if it exists - fn default_output_config(&self) -> Result; - - /// Creates an output stream with the provided stream configuration. For this call to be - /// valid, [`AudioDevice::is_config_supported`] should have returned `true` on the provided - /// configuration. - /// - /// An output callback is required to process the audio, whose ownership will be transferred - /// to the audio stream. - fn create_output_stream( - &self, - stream_config: StreamConfig, - callback: Callback, - ) -> Result, Self::Error>; - - /// Create an output stream using the default configuration as returned by [`Self::default_output_config`]. - /// - /// # Arguments - /// - /// - `callback`: Output callback to generate audio data with. - fn default_output_stream( - &self, - callback: Callback, - ) -> Result, Self::Error> { - self.create_output_stream(self.default_output_config()?, callback) - } -} - -/// Trait for types which handles an audio stream (input or output). -pub trait AudioStreamHandle { - /// Type of errors which have caused the stream to fail. - type Error: std::error::Error; - - /// Eject the stream, returning ownership of the callback. - /// - /// An error can occur when an irrecoverable error has occured and ownership has been lost - /// already. - fn eject(self) -> Result; -} - -#[duplicate::duplicate_item( - name bufty; - [AudioInput] [AudioRef < 'a, T >]; - [AudioOutput] [AudioMut < 'a, T >]; -)] -/// Plain-old-data object holding references to the audio buffer and the associated time-keeping -/// [`Timestamp`]. This timestamp is associated with the stream, and in the cases where the -/// driver provides timing information, it is used instead of relying on sample-counting. -pub struct name<'a, T> { - /// Associated time stamp for this callback. The time represents the duration for which the - /// stream has been opened, and is either provided by the driver if available, or is kept up - /// manually by the library. - pub timestamp: Timestamp, - /// Audio buffer data. - pub buffer: bufty, -} - -/// Plain-old-data object holding the passed-in stream configuration, as well as a general -/// callback timestamp, which can be different from the input and output streams in case of -/// cross-stream latencies; differences in timing can indicate desync. -pub struct AudioCallbackContext { - /// Passed-in stream configuration. Values have been updated where necessary to correspond to - /// the actual stream properties. - pub stream_config: StreamConfig, - /// Callback-wide timestamp. - pub timestamp: Timestamp, -} - -/// Trait of types which process input audio data. This is the trait that users will want to -/// implement when processing an input device. -pub trait AudioInputCallback { - /// Callback called when input data is available to be processed. - fn on_input_data(&mut self, context: AudioCallbackContext, input: AudioInput); -} - -/// Trait of types which process output audio data. This is the trait that users will want to -/// implement when processing an output device. -pub trait AudioOutputCallback { - /// Callback called when output data is available to be processed. - fn on_output_data(&mut self, context: AudioCallbackContext, input: AudioOutput); -} diff --git a/src/prelude.rs b/src/prelude.rs index 1588298..076e175 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -4,7 +4,12 @@ #[cfg(os_wasapi)] pub use crate::backends::wasapi::prelude::*; pub use crate::backends::*; -pub use crate::duplex::{ - create_duplex_stream, AudioDuplexCallback, DuplexStreamConfig, DuplexStreamHandle, -}; pub use crate::*; + +pub use device::{AudioDevice, AudioDuplexDevice, AudioInputDevice, AudioOutputDevice, DeviceType}; +pub use driver::{AudioDriver, AudioDuplexDriver}; +pub use duplex::{create_duplex_stream, AudioDuplexCallback}; +pub use stream::{ + AudioCallbackContext, AudioInput, AudioInputCallback, AudioOutput, AudioOutputCallback, + AudioStreamHandle, StreamConfig, +}; diff --git a/src/stream.rs b/src/stream.rs new file mode 100644 index 0000000..3524700 --- /dev/null +++ b/src/stream.rs @@ -0,0 +1,79 @@ +use crate::audio_buffer::{AudioMut, AudioRef}; +use crate::channel_map::ChannelMap32; +use crate::timestamp::Timestamp; + +/// Configuration for an audio stream. +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct StreamConfig { + /// Configured sample rate of the requested stream. The opened stream can have a different + /// sample rate, so don't rely on this parameter being correct at runtime. + pub samplerate: f64, + /// Map of channels requested by the stream. Entries correspond in order to + /// [AudioDevice::channel_map]. + /// + /// Some drivers allow specifying which channels are going to be opened and available through + /// the audio buffers. For other drivers, only the number of requested channels is used, and + /// order does not matter. + pub channels: ChannelMap32, + /// Range of preferential buffer sizes. The library will make a bast-effort attempt at + /// honoring this setting, and in future versions may provide additional buffering to ensure + /// it, but for now you should not make assumptions on buffer sizes based on this setting. + pub buffer_size_range: (Option, Option), + /// Whether the device should be exclusively held (meaning no other application can open the + /// same device). + pub exclusive: bool, +} + +#[duplicate::duplicate_item( + name bufty; + [AudioInput] [AudioRef < 'a, T >]; + [AudioOutput] [AudioMut < 'a, T >]; +)] +/// Plain-old-data object holding references to the audio buffer and the associated time-keeping +/// [`Timestamp`]. This timestamp is associated with the stream, and in the cases where the +/// driver provides timing information, it is used instead of relying on sample-counting. +pub struct name<'a, T> { + /// Associated time stamp for this callback. The time represents the duration for which the + /// stream has been opened, and is either provided by the driver if available, or is kept up + /// manually by the library. + pub timestamp: Timestamp, + /// Audio buffer data. + pub buffer: bufty, +} + +/// Trait for types which handles an audio stream (input or output). +pub trait AudioStreamHandle { + /// Type of errors which have caused the stream to fail. + type Error: std::error::Error; + + /// Eject the stream, returning ownership of the callback. + /// + /// An error can occur when an irrecoverable error has occurred and ownership has been lost + /// already. + fn eject(self) -> Result; +} + +/// Plain-old-data object holding the passed-in stream configuration, as well as a general +/// callback timestamp, which can be different from the input and output streams in case of +/// cross-stream latencies; differences in timing can indicate desync. +pub struct AudioCallbackContext { + /// Passed-in stream configuration. Values have been updated where necessary to correspond to + /// the actual stream properties. + pub stream_config: StreamConfig, + /// Callback-wide timestamp. + pub timestamp: Timestamp, +} + +/// Trait of types which process input audio data. This is the trait that users will want to +/// implement when processing an input device. +pub trait AudioInputCallback { + /// Callback called when input data is available to be processed. + fn on_input_data(&mut self, context: AudioCallbackContext, input: AudioInput); +} + +/// Trait of types which process output audio data. This is the trait that users will want to +/// implement when processing an output device. +pub trait AudioOutputCallback { + /// Callback called when output data is available to be processed. + fn on_output_data(&mut self, context: AudioCallbackContext, input: AudioOutput); +}