From 2cccf99d505a5055ee099f9ddbe92b59f89f6177 Mon Sep 17 00:00:00 2001 From: Michael Hills Date: Sun, 11 Oct 2020 23:27:27 +1100 Subject: [PATCH 1/3] Refactor coreaudio host and add iOS support + ios-feedback example --- Cargo.toml | 7 +- examples/ios-feedback/Cargo.toml | 17 + examples/ios-feedback/README.md | 25 + examples/ios-feedback/build_rust_deps.sh | 12 + .../project.pbxproj | 429 ++++++++ .../contents.xcworkspacedata | 7 + .../xcshareddata/IDEWorkspaceChecks.plist | 8 + .../xcschemes/xcschememanagement.plist | 19 + examples/ios-feedback/ios-src/AppDelegate.h | 15 + examples/ios-feedback/ios-src/AppDelegate.m | 49 + .../Base.lproj/LaunchScreen.storyboard | 25 + .../ios-src/Base.lproj/Main.storyboard | 24 + examples/ios-feedback/ios-src/Info.plist | 49 + .../ios-feedback/ios-src/ViewController.h | 14 + .../ios-feedback/ios-src/ViewController.m | 22 + examples/ios-feedback/ios-src/main.m | 18 + examples/ios-feedback/src/feedback.rs | 108 ++ examples/ios-feedback/src/lib.rs | 6 + src/host/coreaudio/ios/enumerate.rs | 43 + src/host/coreaudio/ios/mod.rs | 430 ++++++++ src/host/coreaudio/{ => macos}/enumerate.rs | 4 +- src/host/coreaudio/macos/mod.rs | 842 ++++++++++++++++ src/host/coreaudio/mod.rs | 919 +----------------- 23 files changed, 2218 insertions(+), 874 deletions(-) create mode 100644 examples/ios-feedback/Cargo.toml create mode 100644 examples/ios-feedback/README.md create mode 100755 examples/ios-feedback/build_rust_deps.sh create mode 100644 examples/ios-feedback/cpal-ios-example.xcodeproj/project.pbxproj create mode 100644 examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist create mode 100644 examples/ios-feedback/cpal-ios-example.xcodeproj/xcuserdata/mikeh.xcuserdatad/xcschemes/xcschememanagement.plist create mode 100644 examples/ios-feedback/ios-src/AppDelegate.h create mode 100644 examples/ios-feedback/ios-src/AppDelegate.m create mode 100644 examples/ios-feedback/ios-src/Base.lproj/LaunchScreen.storyboard create mode 100644 examples/ios-feedback/ios-src/Base.lproj/Main.storyboard create mode 100644 examples/ios-feedback/ios-src/Info.plist create mode 100644 examples/ios-feedback/ios-src/ViewController.h create mode 100644 examples/ios-feedback/ios-src/ViewController.m create mode 100644 examples/ios-feedback/ios-src/main.m create mode 100644 examples/ios-feedback/src/feedback.rs create mode 100644 examples/ios-feedback/src/lib.rs create mode 100644 src/host/coreaudio/ios/enumerate.rs create mode 100644 src/host/coreaudio/ios/mod.rs rename src/host/coreaudio/{ => macos}/enumerate.rs (98%) create mode 100644 src/host/coreaudio/macos/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 0e8f68c00..7eeb2ce40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,10 +35,15 @@ parking_lot = "0.11" jack = { version = "0.6.5", optional = true } [target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies] -coreaudio-rs = { version = "0.9.1", default-features = false, features = ["audio_unit", "core_audio"] } core-foundation-sys = "0.6.2" # For linking to CoreFoundation.framework and handling device name `CFString`s. mach = "0.3" # For access to mach_timebase type. +[target.'cfg(target_os = "macos")'.dependencies] +coreaudio-rs = { version = "0.10.0", default-features = false, features = ["audio_unit", "core_audio"] } + +[target.'cfg(target_os = "ios")'.dependencies] +coreaudio-rs = { version = "0.10.0", default-features = false, features = ["audio_unit", "core_audio", "audio_toolbox"] } + [target.'cfg(target_os = "emscripten")'.dependencies] stdweb = { version = "0.1.3", default-features = false } diff --git a/examples/ios-feedback/Cargo.toml b/examples/ios-feedback/Cargo.toml new file mode 100644 index 000000000..af9637b84 --- /dev/null +++ b/examples/ios-feedback/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cpal-ios-example" +version = "0.1.0" +authors = ["Michael Hills "] +edition = "2018" + +[lib] +name = "cpal_ios_example" +crate-type = ["staticlib"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +cpal = { path = "../.." } +anyhow = "1.0.12" +ringbuf = "0.1.6" + diff --git a/examples/ios-feedback/README.md b/examples/ios-feedback/README.md new file mode 100644 index 000000000..dc6998ebc --- /dev/null +++ b/examples/ios-feedback/README.md @@ -0,0 +1,25 @@ +# iOS Feedback Example + +This example is an Xcode project that exercises both input and output +audio streams. Audio samples are read in from your micrphone and then +routed to your audio output device with a small but noticeable delay +so you can verify it is working correctly. + +To build the example you will need to still `cargo-lipo`. While not +necessary for building iOS binaries, it is used to build a universal +binary (x86 for simulator and aarch64 for device.) + +``` +cargo install cargo-lipo +``` + +Then open the XCode project and click run. A hook in the iOS application +lifecycle calls into the Rust code to start the input/output feedback +loop and immediately returns back control. + +Before calling into Rust, the AVAudioSession category is configured. +This is important for controlling how audio is shared with the rest +of the system when your app is in the foreground. One example is +controlling whether other apps can play music in the background. +More information [here](https://developer.apple.com/library/archive/documentation/Audio/Conceptual/AudioSessionProgrammingGuide/AudioSessionCategoriesandModes/AudioSessionCategoriesandModes.html#//apple_ref/doc/uid/TP40007875-CH10). + diff --git a/examples/ios-feedback/build_rust_deps.sh b/examples/ios-feedback/build_rust_deps.sh new file mode 100755 index 000000000..aa5ee3134 --- /dev/null +++ b/examples/ios-feedback/build_rust_deps.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +PATH=$PATH:$HOME/.cargo/bin + +# If you want your build to run faster, add a "--targets x86_64-apple-ios" for just using the ios simulator. +if [ -n ${IOS_TARGETS} ]; then + cargo lipo --targets ${IOS_TARGETS} +else + cargo lipo +fi diff --git a/examples/ios-feedback/cpal-ios-example.xcodeproj/project.pbxproj b/examples/ios-feedback/cpal-ios-example.xcodeproj/project.pbxproj new file mode 100644 index 000000000..fde407c6a --- /dev/null +++ b/examples/ios-feedback/cpal-ios-example.xcodeproj/project.pbxproj @@ -0,0 +1,429 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 50; + objects = { + +/* Begin PBXBuildFile section */ + 57AB5AF3252767460040DE8C /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 57AB5AF2252767460040DE8C /* AVFoundation.framework */; }; + 57AB5B07252769700040DE8C /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 57AB5AFE252769700040DE8C /* ViewController.m */; }; + 57AB5B08252769700040DE8C /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 57AB5AFF252769700040DE8C /* LaunchScreen.storyboard */; }; + 57AB5B09252769700040DE8C /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 57AB5B01252769700040DE8C /* Main.storyboard */; }; + 57AB5B0A252769700040DE8C /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 57AB5B03252769700040DE8C /* main.m */; }; + 57AB5B0B252769700040DE8C /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 57AB5B04252769700040DE8C /* AppDelegate.m */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 57AB5AEE252766820040DE8C /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 57AB5AC2252762C00040DE8C /* Project object */; + proxyType = 1; + remoteGlobalIDString = 57AB5AE9252766240040DE8C; + remoteInfo = cargo_ios; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 57AB5ACA252762C10040DE8C /* cpal-ios-example.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "cpal-ios-example.app"; sourceTree = BUILT_PRODUCTS_DIR; }; + 57AB5AF2252767460040DE8C /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; }; + 57AB5AFD252769700040DE8C /* AppDelegate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + 57AB5AFE252769700040DE8C /* ViewController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = ""; }; + 57AB5B00252769700040DE8C /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; + 57AB5B02252769700040DE8C /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; + 57AB5B03252769700040DE8C /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + 57AB5B04252769700040DE8C /* AppDelegate.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + 57AB5B05252769700040DE8C /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 57AB5B06252769700040DE8C /* ViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ViewController.h; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 57AB5AC7252762C10040DE8C /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 57AB5AF3252767460040DE8C /* AVFoundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 57AB5AC1252762C00040DE8C = { + isa = PBXGroup; + children = ( + 57AB5AFC252769700040DE8C /* ios-src */, + 57AB5ACB252762C10040DE8C /* Products */, + 57AB5AF1252767460040DE8C /* Frameworks */, + ); + sourceTree = ""; + }; + 57AB5ACB252762C10040DE8C /* Products */ = { + isa = PBXGroup; + children = ( + 57AB5ACA252762C10040DE8C /* cpal-ios-example.app */, + ); + name = Products; + sourceTree = ""; + }; + 57AB5AF1252767460040DE8C /* Frameworks */ = { + isa = PBXGroup; + children = ( + 57AB5AF2252767460040DE8C /* AVFoundation.framework */, + ); + name = Frameworks; + sourceTree = ""; + }; + 57AB5AFC252769700040DE8C /* ios-src */ = { + isa = PBXGroup; + children = ( + 57AB5AFD252769700040DE8C /* AppDelegate.h */, + 57AB5B04252769700040DE8C /* AppDelegate.m */, + 57AB5B06252769700040DE8C /* ViewController.h */, + 57AB5AFE252769700040DE8C /* ViewController.m */, + 57AB5AFF252769700040DE8C /* LaunchScreen.storyboard */, + 57AB5B01252769700040DE8C /* Main.storyboard */, + 57AB5B05252769700040DE8C /* Info.plist */, + 57AB5B03252769700040DE8C /* main.m */, + ); + path = "ios-src"; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXLegacyTarget section */ + 57AB5AE9252766240040DE8C /* cargo_ios */ = { + isa = PBXLegacyTarget; + buildArgumentsString = build_rust_deps.sh; + buildConfigurationList = 57AB5AEA252766240040DE8C /* Build configuration list for PBXLegacyTarget "cargo_ios" */; + buildPhases = ( + ); + buildToolPath = /bin/sh; + buildWorkingDirectory = .; + dependencies = ( + ); + name = cargo_ios; + passBuildSettingsInEnvironment = 1; + productName = cargo_ios; + }; +/* End PBXLegacyTarget section */ + +/* Begin PBXNativeTarget section */ + 57AB5AC9252762C10040DE8C /* cpal-ios-example */ = { + isa = PBXNativeTarget; + buildConfigurationList = 57AB5AE3252762C30040DE8C /* Build configuration list for PBXNativeTarget "cpal-ios-example" */; + buildPhases = ( + 57AB5AC6252762C10040DE8C /* Sources */, + 57AB5AC7252762C10040DE8C /* Frameworks */, + 57AB5AC8252762C10040DE8C /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + 57AB5AEF252766820040DE8C /* PBXTargetDependency */, + ); + name = "cpal-ios-example"; + productName = "cpal-ios-example"; + productReference = 57AB5ACA252762C10040DE8C /* cpal-ios-example.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 57AB5AC2252762C00040DE8C /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 1200; + TargetAttributes = { + 57AB5AC9252762C10040DE8C = { + CreatedOnToolsVersion = 12.0.1; + }; + 57AB5AE9252766240040DE8C = { + CreatedOnToolsVersion = 12.0.1; + }; + }; + }; + buildConfigurationList = 57AB5AC5252762C00040DE8C /* Build configuration list for PBXProject "cpal-ios-example" */; + compatibilityVersion = "Xcode 9.3"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 57AB5AC1252762C00040DE8C; + productRefGroup = 57AB5ACB252762C10040DE8C /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 57AB5AC9252762C10040DE8C /* cpal-ios-example */, + 57AB5AE9252766240040DE8C /* cargo_ios */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 57AB5AC8252762C10040DE8C /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 57AB5B09252769700040DE8C /* Main.storyboard in Resources */, + 57AB5B08252769700040DE8C /* LaunchScreen.storyboard in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 57AB5AC6252762C10040DE8C /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 57AB5B0A252769700040DE8C /* main.m in Sources */, + 57AB5B0B252769700040DE8C /* AppDelegate.m in Sources */, + 57AB5B07252769700040DE8C /* ViewController.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 57AB5AEF252766820040DE8C /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 57AB5AE9252766240040DE8C /* cargo_ios */; + targetProxy = 57AB5AEE252766820040DE8C /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + 57AB5AFF252769700040DE8C /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 57AB5B00252769700040DE8C /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = ""; + }; + 57AB5B01252769700040DE8C /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 57AB5B02252769700040DE8C /* Base */, + ); + name = Main.storyboard; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 57AB5AE1252762C30040DE8C /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + }; + name = Debug; + }; + 57AB5AE2252762C30040DE8C /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = iphoneos; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 57AB5AE4252762C30040DE8C /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + INFOPLIST_FILE = "ios-src/Info.plist"; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = target/universal/debug; + OTHER_LDFLAGS = "-lcpal_ios_example"; + OTHER_LIBTOOLFLAGS = ""; + PRODUCT_BUNDLE_IDENTIFIER = "cpal.cpal-ios-example"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 57AB5AE5252762C30040DE8C /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + INFOPLIST_FILE = "ios-src/Info.plist"; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = target/universal/release; + OTHER_LDFLAGS = "-lcpal_ios_example"; + OTHER_LIBTOOLFLAGS = ""; + PRODUCT_BUNDLE_IDENTIFIER = "cpal.cpal-ios-example"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; + 57AB5AEB252766240040DE8C /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + DEBUGGING_SYMBOLS = YES; + DEBUG_INFORMATION_FORMAT = dwarf; + GCC_GENERATE_DEBUGGING_SYMBOLS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + OTHER_CFLAGS = ""; + OTHER_LDFLAGS = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 57AB5AEC252766240040DE8C /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + OTHER_CFLAGS = ""; + OTHER_LDFLAGS = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 57AB5AC5252762C00040DE8C /* Build configuration list for PBXProject "cpal-ios-example" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 57AB5AE1252762C30040DE8C /* Debug */, + 57AB5AE2252762C30040DE8C /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 57AB5AE3252762C30040DE8C /* Build configuration list for PBXNativeTarget "cpal-ios-example" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 57AB5AE4252762C30040DE8C /* Debug */, + 57AB5AE5252762C30040DE8C /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 57AB5AEA252766240040DE8C /* Build configuration list for PBXLegacyTarget "cargo_ios" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 57AB5AEB252766240040DE8C /* Debug */, + 57AB5AEC252766240040DE8C /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 57AB5AC2252762C00040DE8C /* Project object */; +} diff --git a/examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 000000000..919434a62 --- /dev/null +++ b/examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 000000000..18d981003 --- /dev/null +++ b/examples/ios-feedback/cpal-ios-example.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ + + + + + IDEDidComputeMac32BitWarning + + + diff --git a/examples/ios-feedback/cpal-ios-example.xcodeproj/xcuserdata/mikeh.xcuserdatad/xcschemes/xcschememanagement.plist b/examples/ios-feedback/cpal-ios-example.xcodeproj/xcuserdata/mikeh.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 000000000..74b2bc2bd --- /dev/null +++ b/examples/ios-feedback/cpal-ios-example.xcodeproj/xcuserdata/mikeh.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,19 @@ + + + + + SchemeUserState + + cargo_ios.xcscheme_^#shared#^_ + + orderHint + 0 + + cpal-ios-example.xcscheme_^#shared#^_ + + orderHint + 1 + + + + diff --git a/examples/ios-feedback/ios-src/AppDelegate.h b/examples/ios-feedback/ios-src/AppDelegate.h new file mode 100644 index 000000000..0ecf7cb14 --- /dev/null +++ b/examples/ios-feedback/ios-src/AppDelegate.h @@ -0,0 +1,15 @@ +// +// AppDelegate.h +// cpal-ios-example +// +// Created by Michael Hills on 2/10/20. +// + +#import + +@interface AppDelegate : UIResponder + +@property (strong, nonatomic) UIWindow *window; + +@end + diff --git a/examples/ios-feedback/ios-src/AppDelegate.m b/examples/ios-feedback/ios-src/AppDelegate.m new file mode 100644 index 000000000..18bf183e3 --- /dev/null +++ b/examples/ios-feedback/ios-src/AppDelegate.m @@ -0,0 +1,49 @@ +// +// AppDelegate.m +// cpal-ios-example +// +// Created by Michael Hills on 2/10/20. +// + +#import "AppDelegate.h" +@import AVFoundation; + +void rust_ios_main(void); + + +@interface AppDelegate () + +@end + +@implementation AppDelegate + + + +- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { + // Override point for customization after application launch. + + NSError *error; + BOOL success; + + // It is necessary to access the sharedInstance so that calls to AudioSessionGetProperty + // will work. + AVAudioSession *session = AVAudioSession.sharedInstance; + // Setting up the category is not necessary, but generally advised. + // Since this demo records and plays, lets use AVAudioSessionCategoryPlayAndRecord. + // Also default to speaker as defaulting to the phone earpiece would be unusual. + // Allowing bluetooth should direct audio to your bluetooth headset. + success = [session setCategory:AVAudioSessionCategoryPlayAndRecord + withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker | AVAudioSessionCategoryOptionAllowBluetooth + error:&error]; + + if (success) { + NSLog(@"Calling rust_ios_main()"); + rust_ios_main(); + } else { + NSLog(@"Failed to configure audio session category"); + } + + return YES; +} + +@end diff --git a/examples/ios-feedback/ios-src/Base.lproj/LaunchScreen.storyboard b/examples/ios-feedback/ios-src/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 000000000..865e9329f --- /dev/null +++ b/examples/ios-feedback/ios-src/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/ios-feedback/ios-src/Base.lproj/Main.storyboard b/examples/ios-feedback/ios-src/Base.lproj/Main.storyboard new file mode 100644 index 000000000..808a21ce7 --- /dev/null +++ b/examples/ios-feedback/ios-src/Base.lproj/Main.storyboard @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/ios-feedback/ios-src/Info.plist b/examples/ios-feedback/ios-src/Info.plist new file mode 100644 index 000000000..49f0347b6 --- /dev/null +++ b/examples/ios-feedback/ios-src/Info.plist @@ -0,0 +1,49 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + $(PRODUCT_BUNDLE_PACKAGE_TYPE) + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + LSRequiresIPhoneOS + + UIApplicationSupportsIndirectInputEvents + + UILaunchStoryboardName + LaunchScreen + UIMainStoryboardFile + Main + UIRequiredDeviceCapabilities + + armv7 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + NSMicrophoneUsageDescription + cpal feedback demo + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + + diff --git a/examples/ios-feedback/ios-src/ViewController.h b/examples/ios-feedback/ios-src/ViewController.h new file mode 100644 index 000000000..45d97b48d --- /dev/null +++ b/examples/ios-feedback/ios-src/ViewController.h @@ -0,0 +1,14 @@ +// +// ViewController.h +// cpal-ios-example +// +// Created by Michael Hills on 2/10/20. +// + +#import + +@interface ViewController : UIViewController + + +@end + diff --git a/examples/ios-feedback/ios-src/ViewController.m b/examples/ios-feedback/ios-src/ViewController.m new file mode 100644 index 000000000..d2f9c2f0d --- /dev/null +++ b/examples/ios-feedback/ios-src/ViewController.m @@ -0,0 +1,22 @@ +// +// ViewController.m +// cpal-ios-example +// +// Created by Michael Hills on 2/10/20. +// + +#import "ViewController.h" + +@interface ViewController () + +@end + +@implementation ViewController + +- (void)viewDidLoad { + [super viewDidLoad]; + // Do any additional setup after loading the view. +} + + +@end diff --git a/examples/ios-feedback/ios-src/main.m b/examples/ios-feedback/ios-src/main.m new file mode 100644 index 000000000..0b1bb75ff --- /dev/null +++ b/examples/ios-feedback/ios-src/main.m @@ -0,0 +1,18 @@ +// +// main.m +// cpal-ios-example +// +// Created by Michael Hills on 2/10/20. +// + +#import +#import "AppDelegate.h" + +int main(int argc, char * argv[]) { + NSString * appDelegateClassName; + @autoreleasepool { + // Setup code that might create autoreleased objects goes here. + appDelegateClassName = NSStringFromClass([AppDelegate class]); + } + return UIApplicationMain(argc, argv, nil, appDelegateClassName); +} diff --git a/examples/ios-feedback/src/feedback.rs b/examples/ios-feedback/src/feedback.rs new file mode 100644 index 000000000..1a190e9ed --- /dev/null +++ b/examples/ios-feedback/src/feedback.rs @@ -0,0 +1,108 @@ +//! Feeds back the input stream directly into the output stream. +//! +//! Assumes that the input and output devices can use the same stream configuration and that they +//! support the f32 sample format. +//! +//! Uses a delay of `LATENCY_MS` milliseconds in case the default input and output streams are not +//! precisely synchronised. + +extern crate anyhow; +extern crate cpal; +extern crate ringbuf; + +use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; +use ringbuf::RingBuffer; + +const LATENCY_MS: f32 = 1000.0; + +pub fn run_example() -> Result<(), anyhow::Error> { + let host = cpal::default_host(); + + // Default devices. + let input_device = host + .default_input_device() + .expect("failed to get default input device"); + let output_device = host + .default_output_device() + .expect("failed to get default output device"); + println!("Using default input device: \"{}\"", input_device.name()?); + println!("Using default output device: \"{}\"", output_device.name()?); + + // We'll try and use the same configuration between streams to keep it simple. + let config: cpal::StreamConfig = input_device.default_input_config()?.into(); + + // Create a delay in case the input and output devices aren't synced. + let latency_frames = (LATENCY_MS / 1_000.0) * config.sample_rate.0 as f32; + let latency_samples = latency_frames as usize * config.channels as usize; + + // The buffer to share samples + let ring = RingBuffer::new(latency_samples * 2); + let (mut producer, mut consumer) = ring.split(); + + // Fill the samples with 0.0 equal to the length of the delay. + for _ in 0..latency_samples { + // The ring buffer has twice as much space as necessary to add latency here, + // so this should never fail + producer.push(0.0).unwrap(); + } + + let input_data_fn = move |data: &[f32], _: &cpal::InputCallbackInfo| { + let mut output_fell_behind = false; + for &sample in data { + if producer.push(sample).is_err() { + output_fell_behind = true; + } + } + if output_fell_behind { + eprintln!("output stream fell behind: try increasing latency"); + } + }; + + let output_data_fn = move |data: &mut [f32], _: &cpal::OutputCallbackInfo| { + let mut input_fell_behind = None; + for sample in data { + *sample = match consumer.pop() { + Ok(s) => s, + Err(err) => { + input_fell_behind = Some(err); + 0.0 + } + }; + } + if let Some(err) = input_fell_behind { + eprintln!( + "input stream fell behind: {:?}: try increasing latency", + err + ); + } + }; + + // Build streams. + println!( + "Attempting to build both streams with f32 samples and `{:?}`.", + config + ); + println!("setup is"); + let input_stream = input_device.build_input_stream(&config, input_data_fn, err_fn)?; + println!("setup os"); + let output_stream = output_device.build_output_stream(&config, output_data_fn, err_fn)?; + println!("Successfully built streams."); + + // Play the streams. + println!( + "Starting the input and output streams with `{}` milliseconds of latency.", + LATENCY_MS + ); + input_stream.play()?; + output_stream.play()?; + + // for the purposes of this demo, leak these so that after returning the audio units will + // keep running + std::mem::forget(input_stream); + std::mem::forget(output_stream); + Ok(()) +} + +fn err_fn(err: cpal::StreamError) { + eprintln!("an error occurred on stream: {}", err); +} diff --git a/examples/ios-feedback/src/lib.rs b/examples/ios-feedback/src/lib.rs new file mode 100644 index 000000000..091f8f7c8 --- /dev/null +++ b/examples/ios-feedback/src/lib.rs @@ -0,0 +1,6 @@ +mod feedback; + +#[no_mangle] +pub extern "C" fn rust_ios_main() { + feedback::run_example().unwrap(); +} diff --git a/src/host/coreaudio/ios/enumerate.rs b/src/host/coreaudio/ios/enumerate.rs new file mode 100644 index 000000000..850649632 --- /dev/null +++ b/src/host/coreaudio/ios/enumerate.rs @@ -0,0 +1,43 @@ +use std::vec::IntoIter as VecIntoIter; + +use DevicesError; +use SupportedStreamConfigRange; + +use super::Device; + +pub type SupportedInputConfigs = ::std::vec::IntoIter; +pub type SupportedOutputConfigs = ::std::vec::IntoIter; + +// TODO: Support enumerating earpiece vs headset vs speaker etc? +pub struct Devices(VecIntoIter); + +impl Devices { + pub fn new() -> Result { + Ok(Self::default()) + } +} + +impl Default for Devices { + fn default() -> Devices { + Devices(vec![Device].into_iter()) + } +} + +impl Iterator for Devices { + type Item = Device; + + #[inline] + fn next(&mut self) -> Option { + self.0.next() + } +} + +#[inline] +pub fn default_input_device() -> Option { + Some(Device) +} + +#[inline] +pub fn default_output_device() -> Option { + Some(Device) +} diff --git a/src/host/coreaudio/ios/mod.rs b/src/host/coreaudio/ios/mod.rs new file mode 100644 index 000000000..9ad774ef2 --- /dev/null +++ b/src/host/coreaudio/ios/mod.rs @@ -0,0 +1,430 @@ +//! +//! coreaudio on iOS looks a bit different than on macOS. A lot of configuration needs to use +//! the AVAudioSession objc API which doesn't exist on macOS. +//! +//! TODO: +//! - Use AVAudioSession to enumerate (and set) buffer size / sample rate / number of channels +//! + +extern crate core_foundation_sys; +extern crate coreaudio; + +use std::cell::RefCell; + +use self::coreaudio::audio_unit::render_callback::data; +use self::coreaudio::audio_unit::{render_callback, AudioUnit, Element, Scope}; +use self::coreaudio::sys::{ + kAudioOutputUnitProperty_EnableIO, kAudioUnitProperty_StreamFormat, AudioBuffer, + AudioStreamBasicDescription, +}; + +use super::{asbd_from_config, frames_to_duration, host_time_to_stream_instant}; +use traits::{DeviceTrait, HostTrait, StreamTrait}; + +use crate::{ + BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError, + DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError, + PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, SupportedBufferSize, + SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError, +}; + +use self::enumerate::{ + default_input_device, default_output_device, Devices, SupportedInputConfigs, + SupportedOutputConfigs, +}; +use std::slice; + +pub mod enumerate; + +const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100); + +// These days the default of iOS is now F32 and no longer I16 +const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Device; + +pub struct Host; + +impl Host { + pub fn new() -> Result { + Ok(Host) + } +} + +impl HostTrait for Host { + type Devices = Devices; + type Device = Device; + + fn is_available() -> bool { + true + } + + fn devices(&self) -> Result { + Devices::new() + } + + fn default_input_device(&self) -> Option { + default_input_device() + } + + fn default_output_device(&self) -> Option { + default_output_device() + } +} + +impl Device { + #[inline] + fn name(&self) -> Result { + Ok("Default Device".to_owned()) + } + + #[inline] + fn supported_input_configs( + &self, + ) -> Result { + // TODO: query AVAudioSession for parameters, some values like sample rate and buffer size + // probably need to be tested but channels can be enumerated. + + // setup an audio unit for recording, and then pull some default parameters off it + + let mut audio_unit = create_audio_unit()?; + audio_unit.uninitialize()?; + configure_for_recording(&mut audio_unit)?; + audio_unit.initialize()?; + + let id = kAudioUnitProperty_StreamFormat; + let asbd: AudioStreamBasicDescription = + audio_unit.get_property(id, Scope::Input, Element::Input)?; + + let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 }; + + Ok(vec![SupportedStreamConfigRange { + channels: asbd.mChannelsPerFrame as u16, + min_sample_rate: SampleRate(asbd.mSampleRate as u32), + max_sample_rate: SampleRate(asbd.mSampleRate as u32), + buffer_size: buffer_size.clone(), + sample_format: SUPPORTED_SAMPLE_FORMAT, + }] + .into_iter()) + } + + #[inline] + fn supported_output_configs( + &self, + ) -> Result { + // TODO: query AVAudioSession for parameters, some values like sample rate and buffer size + // probably need to be tested but channels can be enumerated. + + // setup an audio unit, and then pull some default parameters off it + + let audio_unit = create_audio_unit()?; + let id = kAudioUnitProperty_StreamFormat; + let asbd: AudioStreamBasicDescription = + audio_unit.get_property(id, Scope::Output, Element::Output)?; + + let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 }; + let configs: Vec<_> = (1..=asbd.mChannelsPerFrame as u16) + .map(|channels| SupportedStreamConfigRange { + channels, + min_sample_rate: SampleRate(asbd.mSampleRate as u32), + max_sample_rate: SampleRate(asbd.mSampleRate as u32), + buffer_size: buffer_size.clone(), + sample_format: SUPPORTED_SAMPLE_FORMAT, + }) + .collect(); + Ok(configs.into_iter()) + } + + #[inline] + fn default_input_config(&self) -> Result { + const EXPECT: &str = "expected at least one valid coreaudio stream config"; + let config = self + .supported_input_configs() + .expect(EXPECT) + .max_by(|a, b| a.cmp_default_heuristics(b)) + .unwrap() + .with_sample_rate(DEFAULT_SAMPLE_RATE); + + Ok(config) + } + + #[inline] + fn default_output_config(&self) -> Result { + const EXPECT: &str = "expected at least one valid coreaudio stream config"; + let config = self + .supported_output_configs() + .expect(EXPECT) + .max_by(|a, b| a.cmp_default_heuristics(b)) + .unwrap() + .with_sample_rate(DEFAULT_SAMPLE_RATE); + + Ok(config) + } +} + +impl DeviceTrait for Device { + type SupportedInputConfigs = SupportedInputConfigs; + type SupportedOutputConfigs = SupportedOutputConfigs; + type Stream = Stream; + + #[inline] + fn name(&self) -> Result { + Device::name(self) + } + + #[inline] + fn supported_input_configs( + &self, + ) -> Result { + Device::supported_input_configs(self) + } + + #[inline] + fn supported_output_configs( + &self, + ) -> Result { + Device::supported_output_configs(self) + } + + #[inline] + fn default_input_config(&self) -> Result { + Device::default_input_config(self) + } + + #[inline] + fn default_output_config(&self) -> Result { + Device::default_output_config(self) + } + + fn build_input_stream_raw( + &self, + config: &StreamConfig, + sample_format: SampleFormat, + mut data_callback: D, + mut error_callback: E, + ) -> Result + where + D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + // The scope and element for working with a device's input stream. + let scope = Scope::Output; + let element = Element::Input; + + let mut audio_unit = create_audio_unit()?; + audio_unit.uninitialize()?; + configure_for_recording(&mut audio_unit)?; + audio_unit.initialize()?; + + // Set the stream in interleaved mode. + let asbd = asbd_from_config(config, sample_format); + audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?; + + // Set the buffersize + match config.buffer_size { + BufferSize::Fixed(_) => { + return Err(BuildStreamError::StreamConfigNotSupported); + } + BufferSize::Default => (), + } + + // Register the callback that is being called by coreaudio whenever it needs data to be + // fed to the audio buffer. + let bytes_per_channel = sample_format.sample_size(); + let sample_rate = config.sample_rate; + type Args = render_callback::Args; + audio_unit.set_input_callback(move |args: Args| unsafe { + let ptr = (*args.data.data).mBuffers.as_ptr() as *const AudioBuffer; + let len = (*args.data.data).mNumberBuffers as usize; + let buffers: &[AudioBuffer] = slice::from_raw_parts(ptr, len); + + // There is only 1 buffer when using interleaved channels + let AudioBuffer { + mNumberChannels: channels, + mDataByteSize: data_byte_size, + mData: data, + } = buffers[0]; + + let data = data as *mut (); + let len = (data_byte_size as usize / bytes_per_channel) as usize; + let data = Data::from_parts(data, len, sample_format); + + // TODO: Need a better way to get delay, for now we assume a double-buffer offset. + let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) { + Err(err) => { + error_callback(err.into()); + return Err(()); + } + Ok(cb) => cb, + }; + let buffer_frames = len / channels as usize; + let delay = frames_to_duration(buffer_frames, sample_rate); + let capture = callback + .sub(delay) + .expect("`capture` occurs before origin of alsa `StreamInstant`"); + let timestamp = crate::InputStreamTimestamp { callback, capture }; + + let info = InputCallbackInfo { timestamp }; + data_callback(&data, &info); + Ok(()) + })?; + + audio_unit.start()?; + + Ok(Stream::new(StreamInner { + playing: true, + audio_unit, + })) + } + + /// Create an output stream. + fn build_output_stream_raw( + &self, + config: &StreamConfig, + sample_format: SampleFormat, + mut data_callback: D, + mut error_callback: E, + ) -> Result + where + D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + match config.buffer_size { + BufferSize::Fixed(_) => { + return Err(BuildStreamError::StreamConfigNotSupported); + } + BufferSize::Default => (), + }; + + let mut audio_unit = create_audio_unit()?; + + // The scope and element for working with a device's output stream. + let scope = Scope::Input; + let element = Element::Output; + + // Set the stream in interleaved mode. + let asbd = asbd_from_config(config, sample_format); + audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?; + + // Register the callback that is being called by coreaudio whenever it needs data to be + // fed to the audio buffer. + let bytes_per_channel = sample_format.sample_size(); + let sample_rate = config.sample_rate; + type Args = render_callback::Args; + audio_unit.set_render_callback(move |args: Args| unsafe { + // If `run()` is currently running, then a callback will be available from this list. + // Otherwise, we just fill the buffer with zeroes and return. + + let AudioBuffer { + mNumberChannels: channels, + mDataByteSize: data_byte_size, + mData: data, + } = (*args.data.data).mBuffers[0]; + + let data = data as *mut (); + let len = (data_byte_size as usize / bytes_per_channel) as usize; + let mut data = Data::from_parts(data, len, sample_format); + + let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) { + Err(err) => { + error_callback(err.into()); + return Err(()); + } + Ok(cb) => cb, + }; + // TODO: Need a better way to get delay, for now we assume a double-buffer offset. + let buffer_frames = len / channels as usize; + let delay = frames_to_duration(buffer_frames, sample_rate); + let playback = callback + .add(delay) + .expect("`playback` occurs beyond representation supported by `StreamInstant`"); + let timestamp = crate::OutputStreamTimestamp { callback, playback }; + + let info = OutputCallbackInfo { timestamp }; + data_callback(&mut data, &info); + Ok(()) + })?; + + audio_unit.start()?; + + Ok(Stream::new(StreamInner { + playing: true, + audio_unit, + })) + } +} + +pub struct Stream { + inner: RefCell, +} + +impl Stream { + fn new(inner: StreamInner) -> Self { + Self { + inner: RefCell::new(inner), + } + } +} + +impl StreamTrait for Stream { + fn play(&self) -> Result<(), PlayStreamError> { + let mut stream = self.inner.borrow_mut(); + + if !stream.playing { + if let Err(e) = stream.audio_unit.start() { + let description = format!("{}", e); + let err = BackendSpecificError { description }; + return Err(err.into()); + } + stream.playing = true; + } + Ok(()) + } + + fn pause(&self) -> Result<(), PauseStreamError> { + let mut stream = self.inner.borrow_mut(); + + if stream.playing { + if let Err(e) = stream.audio_unit.stop() { + let description = format!("{}", e); + let err = BackendSpecificError { description }; + return Err(err.into()); + } + + stream.playing = false; + } + Ok(()) + } +} + +struct StreamInner { + playing: bool, + audio_unit: AudioUnit, +} + +fn create_audio_unit() -> Result { + AudioUnit::new(coreaudio::audio_unit::IOType::RemoteIO) +} + +fn configure_for_recording(audio_unit: &mut AudioUnit) -> Result<(), coreaudio::Error> { + // Enable mic recording + let enable_input = 1u32; + audio_unit.set_property( + kAudioOutputUnitProperty_EnableIO, + Scope::Input, + Element::Input, + Some(&enable_input), + )?; + + // Disable output + let disable_output = 0u32; + audio_unit.set_property( + kAudioOutputUnitProperty_EnableIO, + Scope::Output, + Element::Output, + Some(&disable_output), + )?; + + Ok(()) +} diff --git a/src/host/coreaudio/enumerate.rs b/src/host/coreaudio/macos/enumerate.rs similarity index 98% rename from src/host/coreaudio/enumerate.rs rename to src/host/coreaudio/macos/enumerate.rs index f7b3e7887..d28c1b58d 100644 --- a/src/host/coreaudio/enumerate.rs +++ b/src/host/coreaudio/macos/enumerate.rs @@ -1,4 +1,6 @@ -use super::coreaudio::sys::{ +extern crate coreaudio; + +use self::coreaudio::sys::{ kAudioHardwareNoError, kAudioHardwarePropertyDefaultInputDevice, kAudioHardwarePropertyDefaultOutputDevice, kAudioHardwarePropertyDevices, kAudioObjectPropertyElementMaster, kAudioObjectPropertyScopeGlobal, kAudioObjectSystemObject, diff --git a/src/host/coreaudio/macos/mod.rs b/src/host/coreaudio/macos/mod.rs new file mode 100644 index 000000000..dc99b5f0c --- /dev/null +++ b/src/host/coreaudio/macos/mod.rs @@ -0,0 +1,842 @@ +extern crate core_foundation_sys; +extern crate coreaudio; + +use super::{asbd_from_config, check_os_status, frames_to_duration, host_time_to_stream_instant}; + +use self::core_foundation_sys::string::{CFStringGetCString, CFStringGetCStringPtr, CFStringRef}; +use self::coreaudio::audio_unit::render_callback::{self, data}; +use self::coreaudio::audio_unit::{AudioUnit, Element, Scope}; +use self::coreaudio::sys::{ + kAudioDevicePropertyAvailableNominalSampleRates, kAudioDevicePropertyBufferFrameSize, + kAudioDevicePropertyBufferFrameSizeRange, kAudioDevicePropertyDeviceNameCFString, + kAudioDevicePropertyNominalSampleRate, kAudioDevicePropertyScopeOutput, + kAudioDevicePropertyStreamConfiguration, kAudioDevicePropertyStreamFormat, + kAudioObjectPropertyElementMaster, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyScopeInput, kAudioObjectPropertyScopeOutput, + kAudioOutputUnitProperty_CurrentDevice, kAudioOutputUnitProperty_EnableIO, + kAudioUnitProperty_StreamFormat, kCFStringEncodingUTF8, AudioBuffer, AudioBufferList, + AudioDeviceID, AudioObjectAddPropertyListener, AudioObjectGetPropertyData, + AudioObjectGetPropertyDataSize, AudioObjectID, AudioObjectPropertyAddress, + AudioObjectPropertyScope, AudioObjectRemovePropertyListener, AudioObjectSetPropertyData, + AudioStreamBasicDescription, AudioValueRange, OSStatus, +}; +use crate::traits::{DeviceTrait, HostTrait, StreamTrait}; +use crate::{ + BackendSpecificError, BufferSize, BuildStreamError, ChannelCount, Data, + DefaultStreamConfigError, DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, + PauseStreamError, PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, + SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange, + SupportedStreamConfigsError, +}; +use std::cell::RefCell; +use std::ffi::CStr; +use std::fmt; +use std::mem; +use std::os::raw::c_char; +use std::ptr::null; +use std::slice; +use std::thread; +use std::time::Duration; + +pub use self::enumerate::{ + default_input_device, default_output_device, Devices, SupportedInputConfigs, + SupportedOutputConfigs, +}; + +pub mod enumerate; + +/// Coreaudio host, the default host on macOS. +#[derive(Debug)] +pub struct Host; + +impl Host { + pub fn new() -> Result { + Ok(Host) + } +} + +impl HostTrait for Host { + type Devices = Devices; + type Device = Device; + + fn is_available() -> bool { + // Assume coreaudio is always available + true + } + + fn devices(&self) -> Result { + Devices::new() + } + + fn default_input_device(&self) -> Option { + default_input_device() + } + + fn default_output_device(&self) -> Option { + default_output_device() + } +} + +impl DeviceTrait for Device { + type SupportedInputConfigs = SupportedInputConfigs; + type SupportedOutputConfigs = SupportedOutputConfigs; + type Stream = Stream; + + fn name(&self) -> Result { + Device::name(self) + } + + fn supported_input_configs( + &self, + ) -> Result { + Device::supported_input_configs(self) + } + + fn supported_output_configs( + &self, + ) -> Result { + Device::supported_output_configs(self) + } + + fn default_input_config(&self) -> Result { + Device::default_input_config(self) + } + + fn default_output_config(&self) -> Result { + Device::default_output_config(self) + } + + fn build_input_stream_raw( + &self, + config: &StreamConfig, + sample_format: SampleFormat, + data_callback: D, + error_callback: E, + ) -> Result + where + D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + Device::build_input_stream_raw(self, config, sample_format, data_callback, error_callback) + } + + fn build_output_stream_raw( + &self, + config: &StreamConfig, + sample_format: SampleFormat, + data_callback: D, + error_callback: E, + ) -> Result + where + D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + Device::build_output_stream_raw(self, config, sample_format, data_callback, error_callback) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct Device { + pub(crate) audio_device_id: AudioDeviceID, +} + +impl Device { + fn name(&self) -> Result { + let property_address = AudioObjectPropertyAddress { + mSelector: kAudioDevicePropertyDeviceNameCFString, + mScope: kAudioDevicePropertyScopeOutput, + mElement: kAudioObjectPropertyElementMaster, + }; + let device_name: CFStringRef = null(); + let data_size = mem::size_of::(); + let c_str = unsafe { + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + &device_name as *const _ as *mut _, + ); + check_os_status(status)?; + + let c_string: *const c_char = CFStringGetCStringPtr(device_name, kCFStringEncodingUTF8); + if c_string.is_null() { + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + &device_name as *const _ as *mut _, + ); + check_os_status(status)?; + let mut buf: [i8; 255] = [0; 255]; + let result = CFStringGetCString( + device_name, + buf.as_mut_ptr(), + buf.len() as _, + kCFStringEncodingUTF8, + ); + if result == 0 { + let description = + "core foundation failed to return device name string".to_string(); + let err = BackendSpecificError { description }; + return Err(err.into()); + } + let name: &CStr = CStr::from_ptr(buf.as_ptr()); + return Ok(name.to_str().unwrap().to_owned()); + } + CStr::from_ptr(c_string as *mut _) + }; + Ok(c_str.to_string_lossy().into_owned()) + } + + // Logic re-used between `supported_input_configs` and `supported_output_configs`. + #[allow(clippy::cast_ptr_alignment)] + fn supported_configs( + &self, + scope: AudioObjectPropertyScope, + ) -> Result { + let mut property_address = AudioObjectPropertyAddress { + mSelector: kAudioDevicePropertyStreamConfiguration, + mScope: scope, + mElement: kAudioObjectPropertyElementMaster, + }; + + unsafe { + // Retrieve the devices audio buffer list. + let data_size = 0u32; + let status = AudioObjectGetPropertyDataSize( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + ); + check_os_status(status)?; + + let mut audio_buffer_list: Vec = vec![]; + audio_buffer_list.reserve_exact(data_size as usize); + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + audio_buffer_list.as_mut_ptr() as *mut _, + ); + check_os_status(status)?; + + let audio_buffer_list = audio_buffer_list.as_mut_ptr() as *mut AudioBufferList; + + // If there's no buffers, skip. + if (*audio_buffer_list).mNumberBuffers == 0 { + return Ok(vec![].into_iter()); + } + + // Count the number of channels as the sum of all channels in all output buffers. + let n_buffers = (*audio_buffer_list).mNumberBuffers as usize; + let first: *const AudioBuffer = (*audio_buffer_list).mBuffers.as_ptr(); + let buffers: &'static [AudioBuffer] = slice::from_raw_parts(first, n_buffers); + let mut n_channels = 0; + for buffer in buffers { + n_channels += buffer.mNumberChannels as usize; + } + + // TODO: macOS should support U8, I16, I32, F32 and F64. This should allow for using + // I16 but just use F32 for now as its the default anyway. + let sample_format = SampleFormat::F32; + + // Get available sample rate ranges. + property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; + let data_size = 0u32; + let status = AudioObjectGetPropertyDataSize( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + ); + check_os_status(status)?; + + let n_ranges = data_size as usize / mem::size_of::(); + let mut ranges: Vec = vec![]; + ranges.reserve_exact(data_size as usize); + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + ranges.as_mut_ptr() as *mut _, + ); + check_os_status(status)?; + + let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _; + let ranges: &'static [AudioValueRange] = slice::from_raw_parts(ranges, n_ranges); + + let audio_unit = audio_unit_from_device(self, true)?; + let buffer_size = get_io_buffer_frame_size_range(&audio_unit)?; + + // Collect the supported formats for the device. + let mut fmts = vec![]; + for range in ranges { + let fmt = SupportedStreamConfigRange { + channels: n_channels as ChannelCount, + min_sample_rate: SampleRate(range.mMinimum as _), + max_sample_rate: SampleRate(range.mMaximum as _), + buffer_size: buffer_size.clone(), + sample_format, + }; + fmts.push(fmt); + } + + Ok(fmts.into_iter()) + } + } + + fn supported_input_configs( + &self, + ) -> Result { + self.supported_configs(kAudioObjectPropertyScopeInput) + } + + fn supported_output_configs( + &self, + ) -> Result { + self.supported_configs(kAudioObjectPropertyScopeOutput) + } + + fn default_config( + &self, + scope: AudioObjectPropertyScope, + ) -> Result { + fn default_config_error_from_os_status( + status: OSStatus, + ) -> Result<(), DefaultStreamConfigError> { + let err = match coreaudio::Error::from_os_status(status) { + Err(err) => err, + Ok(_) => return Ok(()), + }; + match err { + coreaudio::Error::AudioUnit( + coreaudio::error::AudioUnitError::FormatNotSupported, + ) + | coreaudio::Error::AudioCodec(_) + | coreaudio::Error::AudioFormat(_) => { + Err(DefaultStreamConfigError::StreamTypeNotSupported) + } + coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::NoConnection) => { + Err(DefaultStreamConfigError::DeviceNotAvailable) + } + err => { + let description = format!("{}", err); + let err = BackendSpecificError { description }; + Err(err.into()) + } + } + } + + let property_address = AudioObjectPropertyAddress { + mSelector: kAudioDevicePropertyStreamFormat, + mScope: scope, + mElement: kAudioObjectPropertyElementMaster, + }; + + unsafe { + let asbd: AudioStreamBasicDescription = mem::zeroed(); + let data_size = mem::size_of::() as u32; + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + &asbd as *const _ as *mut _, + ); + default_config_error_from_os_status(status)?; + + let sample_format = { + let audio_format = coreaudio::audio_unit::AudioFormat::from_format_and_flag( + asbd.mFormatID, + Some(asbd.mFormatFlags), + ); + let flags = match audio_format { + Some(coreaudio::audio_unit::AudioFormat::LinearPCM(flags)) => flags, + _ => return Err(DefaultStreamConfigError::StreamTypeNotSupported), + }; + let maybe_sample_format = + coreaudio::audio_unit::SampleFormat::from_flags_and_bytes_per_frame( + flags, + asbd.mBytesPerFrame, + ); + match maybe_sample_format { + Some(coreaudio::audio_unit::SampleFormat::F32) => SampleFormat::F32, + Some(coreaudio::audio_unit::SampleFormat::I16) => SampleFormat::I16, + _ => return Err(DefaultStreamConfigError::StreamTypeNotSupported), + } + }; + + let audio_unit = audio_unit_from_device(self, true)?; + let buffer_size = get_io_buffer_frame_size_range(&audio_unit)?; + + let config = SupportedStreamConfig { + sample_rate: SampleRate(asbd.mSampleRate as _), + channels: asbd.mChannelsPerFrame as _, + buffer_size, + sample_format, + }; + Ok(config) + } + } + + fn default_input_config(&self) -> Result { + self.default_config(kAudioObjectPropertyScopeInput) + } + + fn default_output_config(&self) -> Result { + self.default_config(kAudioObjectPropertyScopeOutput) + } +} + +impl fmt::Debug for Device { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Device") + .field("audio_device_id", &self.audio_device_id) + .field("name", &self.name()) + .finish() + } +} + +struct StreamInner { + playing: bool, + audio_unit: AudioUnit, + // Track the device with which the audio unit was spawned. + // + // We must do this so that we can avoid changing the device sample rate if there is already + // a stream associated with the device. + #[allow(dead_code)] + device_id: AudioDeviceID, +} + +fn audio_unit_from_device(device: &Device, input: bool) -> Result { + let mut audio_unit = AudioUnit::new(coreaudio::audio_unit::IOType::HalOutput)?; + + if input { + // Enable input processing. + let enable_input = 1u32; + audio_unit.set_property( + kAudioOutputUnitProperty_EnableIO, + Scope::Input, + Element::Input, + Some(&enable_input), + )?; + + // Disable output processing. + let disable_output = 0u32; + audio_unit.set_property( + kAudioOutputUnitProperty_EnableIO, + Scope::Output, + Element::Output, + Some(&disable_output), + )?; + } + + audio_unit.set_property( + kAudioOutputUnitProperty_CurrentDevice, + Scope::Global, + Element::Output, + Some(&device.audio_device_id), + )?; + + Ok(audio_unit) +} + +impl Device { + #[allow(clippy::cast_ptr_alignment)] + #[allow(clippy::while_immutable_condition)] + #[allow(clippy::float_cmp)] + fn build_input_stream_raw( + &self, + config: &StreamConfig, + sample_format: SampleFormat, + mut data_callback: D, + mut error_callback: E, + ) -> Result + where + D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + // The scope and element for working with a device's input stream. + let scope = Scope::Output; + let element = Element::Input; + + // Check whether or not we need to change the device sample rate to suit the one specified for the stream. + unsafe { + // Get the current sample rate. + let mut property_address = AudioObjectPropertyAddress { + mSelector: kAudioDevicePropertyNominalSampleRate, + mScope: kAudioObjectPropertyScopeGlobal, + mElement: kAudioObjectPropertyElementMaster, + }; + let sample_rate: f64 = 0.0; + let data_size = mem::size_of::() as u32; + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + &sample_rate as *const _ as *mut _, + ); + coreaudio::Error::from_os_status(status)?; + + // If the requested sample rate is different to the device sample rate, update the device. + if sample_rate as u32 != config.sample_rate.0 { + // Get available sample rate ranges. + property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; + let data_size = 0u32; + let status = AudioObjectGetPropertyDataSize( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + ); + coreaudio::Error::from_os_status(status)?; + let n_ranges = data_size as usize / mem::size_of::(); + let mut ranges: Vec = vec![]; + ranges.reserve_exact(data_size as usize); + let status = AudioObjectGetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + ranges.as_mut_ptr() as *mut _, + ); + coreaudio::Error::from_os_status(status)?; + let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _; + let ranges: &'static [AudioValueRange] = slice::from_raw_parts(ranges, n_ranges); + + // Now that we have the available ranges, pick the one matching the desired rate. + let sample_rate = config.sample_rate.0; + let maybe_index = ranges.iter().position(|r| { + r.mMinimum as u32 == sample_rate && r.mMaximum as u32 == sample_rate + }); + let range_index = match maybe_index { + None => return Err(BuildStreamError::StreamConfigNotSupported), + Some(i) => i, + }; + + // Update the property selector to specify the nominal sample rate. + property_address.mSelector = kAudioDevicePropertyNominalSampleRate; + + // Setting the sample rate of a device is an asynchronous process in coreaudio. + // + // Thus we are required to set a `listener` so that we may be notified when the + // change occurs. + unsafe extern "C" fn rate_listener( + device_id: AudioObjectID, + _n_addresses: u32, + _properties: *const AudioObjectPropertyAddress, + rate_ptr: *mut ::std::os::raw::c_void, + ) -> OSStatus { + let rate_ptr: *const f64 = rate_ptr as *const _; + let data_size = mem::size_of::(); + let property_address = AudioObjectPropertyAddress { + mSelector: kAudioDevicePropertyNominalSampleRate, + mScope: kAudioObjectPropertyScopeGlobal, + mElement: kAudioObjectPropertyElementMaster, + }; + AudioObjectGetPropertyData( + device_id, + &property_address as *const _, + 0, + null(), + &data_size as *const _ as *mut _, + rate_ptr as *const _ as *mut _, + ) + } + + // Add our sample rate change listener callback. + let reported_rate: f64 = 0.0; + let status = AudioObjectAddPropertyListener( + self.audio_device_id, + &property_address as *const _, + Some(rate_listener), + &reported_rate as *const _ as *mut _, + ); + coreaudio::Error::from_os_status(status)?; + + // Finally, set the sample rate. + let sample_rate = sample_rate as f64; + let status = AudioObjectSetPropertyData( + self.audio_device_id, + &property_address as *const _, + 0, + null(), + data_size, + &ranges[range_index] as *const _ as *const _, + ); + coreaudio::Error::from_os_status(status)?; + + // Wait for the reported_rate to change. + // + // This should not take longer than a few ms, but we timeout after 1 sec just in case. + // + // WARNING: a reference to reported_rate is unsafely captured above, + // and the loop below assumes it can change - but compiler does not know that! + // + let timer = ::std::time::Instant::now(); + while sample_rate != reported_rate { + if timer.elapsed() > Duration::from_secs(1) { + let description = + "timeout waiting for sample rate update for device".into(); + let err = BackendSpecificError { description }; + return Err(err.into()); + } + thread::sleep(Duration::from_millis(5)); + } + + // Remove the `rate_listener` callback. + let status = AudioObjectRemovePropertyListener( + self.audio_device_id, + &property_address as *const _, + Some(rate_listener), + &reported_rate as *const _ as *mut _, + ); + coreaudio::Error::from_os_status(status)?; + } + } + + let mut audio_unit = audio_unit_from_device(self, true)?; + + // Set the stream in interleaved mode. + let asbd = asbd_from_config(config, sample_format); + audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?; + + // Set the buffersize + match config.buffer_size { + BufferSize::Fixed(v) => { + let buffer_size_range = get_io_buffer_frame_size_range(&audio_unit)?; + match buffer_size_range { + SupportedBufferSize::Range { min, max } => { + if v >= min && v <= max { + audio_unit.set_property( + kAudioDevicePropertyBufferFrameSize, + scope, + element, + Some(&v), + )? + } else { + return Err(BuildStreamError::StreamConfigNotSupported); + } + } + SupportedBufferSize::Unknown => (), + } + } + BufferSize::Default => (), + } + + // Register the callback that is being called by coreaudio whenever it needs data to be + // fed to the audio buffer. + let bytes_per_channel = sample_format.sample_size(); + let sample_rate = config.sample_rate; + type Args = render_callback::Args; + audio_unit.set_input_callback(move |args: Args| unsafe { + let ptr = (*args.data.data).mBuffers.as_ptr() as *const AudioBuffer; + let len = (*args.data.data).mNumberBuffers as usize; + let buffers: &[AudioBuffer] = slice::from_raw_parts(ptr, len); + + // TODO: Perhaps loop over all buffers instead? + let AudioBuffer { + mNumberChannels: channels, + mDataByteSize: data_byte_size, + mData: data, + } = buffers[0]; + + let data = data as *mut (); + let len = (data_byte_size as usize / bytes_per_channel) as usize; + let data = Data::from_parts(data, len, sample_format); + + // TODO: Need a better way to get delay, for now we assume a double-buffer offset. + let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) { + Err(err) => { + error_callback(err.into()); + return Err(()); + } + Ok(cb) => cb, + }; + let buffer_frames = len / channels as usize; + let delay = frames_to_duration(buffer_frames, sample_rate); + let capture = callback + .sub(delay) + .expect("`capture` occurs before origin of alsa `StreamInstant`"); + let timestamp = crate::InputStreamTimestamp { callback, capture }; + + let info = InputCallbackInfo { timestamp }; + data_callback(&data, &info); + Ok(()) + })?; + + audio_unit.start()?; + + Ok(Stream::new(StreamInner { + playing: true, + audio_unit, + device_id: self.audio_device_id, + })) + } + + fn build_output_stream_raw( + &self, + config: &StreamConfig, + sample_format: SampleFormat, + mut data_callback: D, + mut error_callback: E, + ) -> Result + where + D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + let mut audio_unit = audio_unit_from_device(self, false)?; + + // The scope and element for working with a device's output stream. + let scope = Scope::Input; + let element = Element::Output; + + // Set the stream in interleaved mode. + let asbd = asbd_from_config(config, sample_format); + audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?; + + // Set the buffersize + match config.buffer_size { + BufferSize::Fixed(v) => { + let buffer_size_range = get_io_buffer_frame_size_range(&audio_unit)?; + match buffer_size_range { + SupportedBufferSize::Range { min, max } => { + if v >= min && v <= max { + audio_unit.set_property( + kAudioDevicePropertyBufferFrameSize, + scope, + element, + Some(&v), + )? + } else { + return Err(BuildStreamError::StreamConfigNotSupported); + } + } + SupportedBufferSize::Unknown => (), + } + } + BufferSize::Default => (), + } + + // Register the callback that is being called by coreaudio whenever it needs data to be + // fed to the audio buffer. + let bytes_per_channel = sample_format.sample_size(); + let sample_rate = config.sample_rate; + type Args = render_callback::Args; + audio_unit.set_render_callback(move |args: Args| unsafe { + // If `run()` is currently running, then a callback will be available from this list. + // Otherwise, we just fill the buffer with zeroes and return. + + let AudioBuffer { + mNumberChannels: channels, + mDataByteSize: data_byte_size, + mData: data, + } = (*args.data.data).mBuffers[0]; + + let data = data as *mut (); + let len = (data_byte_size as usize / bytes_per_channel) as usize; + let mut data = Data::from_parts(data, len, sample_format); + + let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) { + Err(err) => { + error_callback(err.into()); + return Err(()); + } + Ok(cb) => cb, + }; + // TODO: Need a better way to get delay, for now we assume a double-buffer offset. + let buffer_frames = len / channels as usize; + let delay = frames_to_duration(buffer_frames, sample_rate); + let playback = callback + .add(delay) + .expect("`playback` occurs beyond representation supported by `StreamInstant`"); + let timestamp = crate::OutputStreamTimestamp { callback, playback }; + + let info = OutputCallbackInfo { timestamp }; + data_callback(&mut data, &info); + Ok(()) + })?; + + audio_unit.start()?; + + Ok(Stream::new(StreamInner { + playing: true, + audio_unit, + device_id: self.audio_device_id, + })) + } +} + +pub struct Stream { + inner: RefCell, +} + +impl Stream { + fn new(inner: StreamInner) -> Self { + Self { + inner: RefCell::new(inner), + } + } +} + +impl StreamTrait for Stream { + fn play(&self) -> Result<(), PlayStreamError> { + let mut stream = self.inner.borrow_mut(); + + if !stream.playing { + if let Err(e) = stream.audio_unit.start() { + let description = format!("{}", e); + let err = BackendSpecificError { description }; + return Err(err.into()); + } + stream.playing = true; + } + Ok(()) + } + + fn pause(&self) -> Result<(), PauseStreamError> { + let mut stream = self.inner.borrow_mut(); + + if stream.playing { + if let Err(e) = stream.audio_unit.stop() { + let description = format!("{}", e); + let err = BackendSpecificError { description }; + return Err(err.into()); + } + + stream.playing = false; + } + Ok(()) + } +} + +fn get_io_buffer_frame_size_range( + audio_unit: &AudioUnit, +) -> Result { + let buffer_size_range: AudioValueRange = audio_unit.get_property( + kAudioDevicePropertyBufferFrameSizeRange, + Scope::Global, + Element::Output, + )?; + + Ok(SupportedBufferSize::Range { + min: buffer_size_range.mMinimum as u32, + max: buffer_size_range.mMaximum as u32, + }) +} diff --git a/src/host/coreaudio/mod.rs b/src/host/coreaudio/mod.rs index f94746261..3263b876f 100644 --- a/src/host/coreaudio/mod.rs +++ b/src/host/coreaudio/mod.rs @@ -1,460 +1,44 @@ -extern crate core_foundation_sys; extern crate coreaudio; -use self::core_foundation_sys::string::{CFStringGetCString, CFStringGetCStringPtr, CFStringRef}; -use self::coreaudio::audio_unit::render_callback::{self, data}; -use self::coreaudio::audio_unit::{AudioUnit, Element, Scope}; use self::coreaudio::sys::{ - kAudioDevicePropertyAvailableNominalSampleRates, kAudioDevicePropertyBufferFrameSize, - kAudioDevicePropertyBufferFrameSizeRange, kAudioDevicePropertyDeviceNameCFString, - kAudioDevicePropertyNominalSampleRate, kAudioDevicePropertyScopeOutput, - kAudioDevicePropertyStreamConfiguration, kAudioDevicePropertyStreamFormat, kAudioFormatFlagIsFloat, kAudioFormatFlagIsPacked, kAudioFormatLinearPCM, - kAudioObjectPropertyElementMaster, kAudioObjectPropertyScopeGlobal, - kAudioObjectPropertyScopeInput, kAudioObjectPropertyScopeOutput, - kAudioOutputUnitProperty_CurrentDevice, kAudioOutputUnitProperty_EnableIO, - kAudioUnitProperty_StreamFormat, kCFStringEncodingUTF8, AudioBuffer, AudioBufferList, - AudioDeviceID, AudioObjectAddPropertyListener, AudioObjectGetPropertyData, - AudioObjectGetPropertyDataSize, AudioObjectID, AudioObjectPropertyAddress, - AudioObjectPropertyScope, AudioObjectRemovePropertyListener, AudioObjectSetPropertyData, - AudioStreamBasicDescription, AudioValueRange, OSStatus, + AudioStreamBasicDescription, OSStatus, }; -use crate::traits::{DeviceTrait, HostTrait, StreamTrait}; -use crate::{ - BackendSpecificError, BufferSize, BuildStreamError, ChannelCount, Data, - DefaultStreamConfigError, DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, - PauseStreamError, PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, - SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange, - SupportedStreamConfigsError, -}; -use std::cell::RefCell; -use std::ffi::CStr; -use std::fmt; -use std::mem; -use std::os::raw::c_char; -use std::ptr::null; -use std::slice; -use std::thread; -use std::time::Duration; -mod enumerate; +use DefaultStreamConfigError; +use {BuildStreamError, SupportedStreamConfigsError}; -pub use self::enumerate::{ - default_input_device, default_output_device, Devices, SupportedInputConfigs, - SupportedOutputConfigs, -}; +use crate::{BackendSpecificError, SampleFormat, StreamConfig}; -/// Coreaudio host, the default host on macOS and iOS. -#[derive(Debug)] -pub struct Host; +#[cfg(target_os = "ios")] +mod ios; +#[cfg(target_os = "macos")] +mod macos; -impl Host { - pub fn new() -> Result { - Ok(Host) - } -} - -impl HostTrait for Host { - type Devices = Devices; - type Device = Device; - - fn is_available() -> bool { - // Assume coreaudio is always available on macOS and iOS. - true - } - - fn devices(&self) -> Result { - Devices::new() - } - - fn default_input_device(&self) -> Option { - default_input_device() - } - - fn default_output_device(&self) -> Option { - default_output_device() - } -} - -impl DeviceTrait for Device { - type SupportedInputConfigs = SupportedInputConfigs; - type SupportedOutputConfigs = SupportedOutputConfigs; - type Stream = Stream; - - fn name(&self) -> Result { - Device::name(self) - } - - fn supported_input_configs( - &self, - ) -> Result { - Device::supported_input_configs(self) - } - - fn supported_output_configs( - &self, - ) -> Result { - Device::supported_output_configs(self) - } - - fn default_input_config(&self) -> Result { - Device::default_input_config(self) - } - - fn default_output_config(&self) -> Result { - Device::default_output_config(self) - } - - fn build_input_stream_raw( - &self, - config: &StreamConfig, - sample_format: SampleFormat, - data_callback: D, - error_callback: E, - ) -> Result - where - D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, - E: FnMut(StreamError) + Send + 'static, - { - Device::build_input_stream_raw(self, config, sample_format, data_callback, error_callback) - } - - fn build_output_stream_raw( - &self, - config: &StreamConfig, - sample_format: SampleFormat, - data_callback: D, - error_callback: E, - ) -> Result - where - D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, - E: FnMut(StreamError) + Send + 'static, - { - Device::build_output_stream_raw(self, config, sample_format, data_callback, error_callback) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct Device { - audio_device_id: AudioDeviceID, -} - -impl Device { - fn name(&self) -> Result { - let property_address = AudioObjectPropertyAddress { - mSelector: kAudioDevicePropertyDeviceNameCFString, - mScope: kAudioDevicePropertyScopeOutput, - mElement: kAudioObjectPropertyElementMaster, - }; - let device_name: CFStringRef = null(); - let data_size = mem::size_of::(); - let c_str = unsafe { - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - &device_name as *const _ as *mut _, - ); - check_os_status(status)?; - - let c_string: *const c_char = CFStringGetCStringPtr(device_name, kCFStringEncodingUTF8); - if c_string.is_null() { - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - &device_name as *const _ as *mut _, - ); - check_os_status(status)?; - let mut buf: [i8; 255] = [0; 255]; - let result = CFStringGetCString( - device_name, - buf.as_mut_ptr(), - buf.len() as _, - kCFStringEncodingUTF8, - ); - if result == 0 { - let description = - "core foundation failed to return device name string".to_string(); - let err = BackendSpecificError { description }; - return Err(err.into()); - } - let name: &CStr = CStr::from_ptr(buf.as_ptr()); - return Ok(name.to_str().unwrap().to_owned()); - } - CStr::from_ptr(c_string as *mut _) - }; - Ok(c_str.to_string_lossy().into_owned()) - } - - // Logic re-used between `supported_input_configs` and `supported_output_configs`. - #[allow(clippy::cast_ptr_alignment)] - fn supported_configs( - &self, - scope: AudioObjectPropertyScope, - ) -> Result { - let mut property_address = AudioObjectPropertyAddress { - mSelector: kAudioDevicePropertyStreamConfiguration, - mScope: scope, - mElement: kAudioObjectPropertyElementMaster, - }; - - unsafe { - // Retrieve the devices audio buffer list. - let data_size = 0u32; - let status = AudioObjectGetPropertyDataSize( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - ); - check_os_status(status)?; - - let mut audio_buffer_list: Vec = vec![]; - audio_buffer_list.reserve_exact(data_size as usize); - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - audio_buffer_list.as_mut_ptr() as *mut _, - ); - check_os_status(status)?; - - let audio_buffer_list = audio_buffer_list.as_mut_ptr() as *mut AudioBufferList; - - // If there's no buffers, skip. - if (*audio_buffer_list).mNumberBuffers == 0 { - return Ok(vec![].into_iter()); - } - - // Count the number of channels as the sum of all channels in all output buffers. - let n_buffers = (*audio_buffer_list).mNumberBuffers as usize; - let first: *const AudioBuffer = (*audio_buffer_list).mBuffers.as_ptr(); - let buffers: &'static [AudioBuffer] = slice::from_raw_parts(first, n_buffers); - let mut n_channels = 0; - for buffer in buffers { - n_channels += buffer.mNumberChannels as usize; - } - - // AFAIK the sample format should always be f32 on macos and i16 on iOS? Feel free to - // fix this if more pcm formats are supported. - let sample_format = if cfg!(target_os = "ios") { - SampleFormat::I16 - } else { - SampleFormat::F32 - }; - - // Get available sample rate ranges. - property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; - let data_size = 0u32; - let status = AudioObjectGetPropertyDataSize( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - ); - check_os_status(status)?; - - let n_ranges = data_size as usize / mem::size_of::(); - let mut ranges: Vec = vec![]; - ranges.reserve_exact(data_size as usize); - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - ranges.as_mut_ptr() as *mut _, - ); - check_os_status(status)?; - - let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _; - let ranges: &'static [AudioValueRange] = slice::from_raw_parts(ranges, n_ranges); - - let audio_unit = audio_unit_from_device(self, true)?; - let buffer_size = get_io_buffer_frame_size_range(&audio_unit)?; - - // Collect the supported formats for the device. - let mut fmts = vec![]; - for range in ranges { - let fmt = SupportedStreamConfigRange { - channels: n_channels as ChannelCount, - min_sample_rate: SampleRate(range.mMinimum as _), - max_sample_rate: SampleRate(range.mMaximum as _), - buffer_size: buffer_size.clone(), - sample_format, - }; - fmts.push(fmt); - } - - Ok(fmts.into_iter()) - } - } - - fn supported_input_configs( - &self, - ) -> Result { - self.supported_configs(kAudioObjectPropertyScopeInput) - } - - fn supported_output_configs( - &self, - ) -> Result { - self.supported_configs(kAudioObjectPropertyScopeOutput) - } - - fn default_config( - &self, - scope: AudioObjectPropertyScope, - ) -> Result { - fn default_config_error_from_os_status( - status: OSStatus, - ) -> Result<(), DefaultStreamConfigError> { - let err = match coreaudio::Error::from_os_status(status) { - Err(err) => err, - Ok(_) => return Ok(()), - }; - match err { - coreaudio::Error::AudioUnit( - coreaudio::error::AudioUnitError::FormatNotSupported, - ) - | coreaudio::Error::AudioCodec(_) - | coreaudio::Error::AudioFormat(_) => { - Err(DefaultStreamConfigError::StreamTypeNotSupported) - } - coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::NoConnection) => { - Err(DefaultStreamConfigError::DeviceNotAvailable) - } - err => { - let description = format!("{}", err); - let err = BackendSpecificError { description }; - Err(err.into()) - } - } - } - - let property_address = AudioObjectPropertyAddress { - mSelector: kAudioDevicePropertyStreamFormat, - mScope: scope, - mElement: kAudioObjectPropertyElementMaster, - }; - - unsafe { - let asbd: AudioStreamBasicDescription = mem::zeroed(); - let data_size = mem::size_of::() as u32; - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - &asbd as *const _ as *mut _, - ); - default_config_error_from_os_status(status)?; - - let sample_format = { - let audio_format = coreaudio::audio_unit::AudioFormat::from_format_and_flag( - asbd.mFormatID, - Some(asbd.mFormatFlags), - ); - let flags = match audio_format { - Some(coreaudio::audio_unit::AudioFormat::LinearPCM(flags)) => flags, - _ => return Err(DefaultStreamConfigError::StreamTypeNotSupported), - }; - let maybe_sample_format = - coreaudio::audio_unit::SampleFormat::from_flags_and_bytes_per_frame( - flags, - asbd.mBytesPerFrame, - ); - match maybe_sample_format { - Some(coreaudio::audio_unit::SampleFormat::F32) => SampleFormat::F32, - Some(coreaudio::audio_unit::SampleFormat::I16) => SampleFormat::I16, - _ => return Err(DefaultStreamConfigError::StreamTypeNotSupported), - } - }; - - let audio_unit = audio_unit_from_device(self, true)?; - let buffer_size = get_io_buffer_frame_size_range(&audio_unit)?; - - let config = SupportedStreamConfig { - sample_rate: SampleRate(asbd.mSampleRate as _), - channels: asbd.mChannelsPerFrame as _, - buffer_size, - sample_format, - }; - Ok(config) - } - } - - fn default_input_config(&self) -> Result { - self.default_config(kAudioObjectPropertyScopeInput) - } - - fn default_output_config(&self) -> Result { - self.default_config(kAudioObjectPropertyScopeOutput) - } -} +#[cfg(target_os = "ios")] +pub use self::ios::{ + enumerate::{Devices, SupportedInputConfigs, SupportedOutputConfigs}, + Device, Host, Stream, +}; -impl fmt::Debug for Device { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Device") - .field("audio_device_id", &self.audio_device_id) - .field("name", &self.name()) - .finish() - } -} +#[cfg(target_os = "macos")] +pub use self::macos::{ + enumerate::{Devices, SupportedInputConfigs, SupportedOutputConfigs}, + Device, Host, Stream, +}; -struct StreamInner { - playing: bool, - audio_unit: AudioUnit, - // Track the device with which the audio unit was spawned. - // - // We must do this so that we can avoid changing the device sample rate if there is already - // a stream associated with the device. - #[allow(dead_code)] - device_id: AudioDeviceID, -} +/// Common helper methods used by both macOS and iOS -// TODO need stronger error identification -impl From for BuildStreamError { - fn from(err: coreaudio::Error) -> BuildStreamError { - match err { - coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat - | coreaudio::Error::NoKnownSubtype - | coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::FormatNotSupported) - | coreaudio::Error::AudioCodec(_) - | coreaudio::Error::AudioFormat(_) => BuildStreamError::StreamConfigNotSupported, - _ => BuildStreamError::DeviceNotAvailable, +fn check_os_status(os_status: OSStatus) -> Result<(), BackendSpecificError> { + match coreaudio::Error::from_os_status(os_status) { + Ok(()) => Ok(()), + Err(err) => { + let description = err.to_string(); + Err(BackendSpecificError { description }) } } } -impl From for SupportedStreamConfigsError { - fn from(err: coreaudio::Error) -> SupportedStreamConfigsError { - let description = format!("{}", err); - let err = BackendSpecificError { description }; - // Check for possible DeviceNotAvailable variant - SupportedStreamConfigsError::BackendSpecific { err } - } -} - -impl From for DefaultStreamConfigError { - fn from(err: coreaudio::Error) -> DefaultStreamConfigError { - let description = format!("{}", err); - let err = BackendSpecificError { description }; - // Check for possible DeviceNotAvailable variant - DefaultStreamConfigError::BackendSpecific { err } - } -} - // Create a coreaudio AudioStreamBasicDescription from a CPAL Format. fn asbd_from_config( config: &StreamConfig, @@ -484,379 +68,6 @@ fn asbd_from_config( } } -fn audio_unit_from_device(device: &Device, input: bool) -> Result { - let mut audio_unit = { - let au_type = if cfg!(target_os = "ios") { - // The HalOutput unit isn't available in iOS unfortunately. - // RemoteIO is a sensible replacement. - // See https://goo.gl/CWwRTx - coreaudio::audio_unit::IOType::RemoteIO - } else { - coreaudio::audio_unit::IOType::HalOutput - }; - AudioUnit::new(au_type)? - }; - - if input { - // Enable input processing. - let enable_input = 1u32; - audio_unit.set_property( - kAudioOutputUnitProperty_EnableIO, - Scope::Input, - Element::Input, - Some(&enable_input), - )?; - - // Disable output processing. - let disable_output = 0u32; - audio_unit.set_property( - kAudioOutputUnitProperty_EnableIO, - Scope::Output, - Element::Output, - Some(&disable_output), - )?; - } - - audio_unit.set_property( - kAudioOutputUnitProperty_CurrentDevice, - Scope::Global, - Element::Output, - Some(&device.audio_device_id), - )?; - - Ok(audio_unit) -} - -impl Device { - #[allow(clippy::cast_ptr_alignment)] - #[allow(clippy::while_immutable_condition)] - #[allow(clippy::float_cmp)] - fn build_input_stream_raw( - &self, - config: &StreamConfig, - sample_format: SampleFormat, - mut data_callback: D, - mut error_callback: E, - ) -> Result - where - D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, - E: FnMut(StreamError) + Send + 'static, - { - // The scope and element for working with a device's input stream. - let scope = Scope::Output; - let element = Element::Input; - - // Check whether or not we need to change the device sample rate to suit the one specified for the stream. - unsafe { - // Get the current sample rate. - let mut property_address = AudioObjectPropertyAddress { - mSelector: kAudioDevicePropertyNominalSampleRate, - mScope: kAudioObjectPropertyScopeGlobal, - mElement: kAudioObjectPropertyElementMaster, - }; - let sample_rate: f64 = 0.0; - let data_size = mem::size_of::() as u32; - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - &sample_rate as *const _ as *mut _, - ); - coreaudio::Error::from_os_status(status)?; - - // If the requested sample rate is different to the device sample rate, update the device. - if sample_rate as u32 != config.sample_rate.0 { - // Get available sample rate ranges. - property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; - let data_size = 0u32; - let status = AudioObjectGetPropertyDataSize( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - ); - coreaudio::Error::from_os_status(status)?; - let n_ranges = data_size as usize / mem::size_of::(); - let mut ranges: Vec = vec![]; - ranges.reserve_exact(data_size as usize); - let status = AudioObjectGetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - ranges.as_mut_ptr() as *mut _, - ); - coreaudio::Error::from_os_status(status)?; - let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _; - let ranges: &'static [AudioValueRange] = slice::from_raw_parts(ranges, n_ranges); - - // Now that we have the available ranges, pick the one matching the desired rate. - let sample_rate = config.sample_rate.0; - let maybe_index = ranges.iter().position(|r| { - r.mMinimum as u32 == sample_rate && r.mMaximum as u32 == sample_rate - }); - let range_index = match maybe_index { - None => return Err(BuildStreamError::StreamConfigNotSupported), - Some(i) => i, - }; - - // Update the property selector to specify the nominal sample rate. - property_address.mSelector = kAudioDevicePropertyNominalSampleRate; - - // Setting the sample rate of a device is an asynchronous process in coreaudio. - // - // Thus we are required to set a `listener` so that we may be notified when the - // change occurs. - unsafe extern "C" fn rate_listener( - device_id: AudioObjectID, - _n_addresses: u32, - _properties: *const AudioObjectPropertyAddress, - rate_ptr: *mut ::std::os::raw::c_void, - ) -> OSStatus { - let rate_ptr: *const f64 = rate_ptr as *const _; - let data_size = mem::size_of::(); - let property_address = AudioObjectPropertyAddress { - mSelector: kAudioDevicePropertyNominalSampleRate, - mScope: kAudioObjectPropertyScopeGlobal, - mElement: kAudioObjectPropertyElementMaster, - }; - AudioObjectGetPropertyData( - device_id, - &property_address as *const _, - 0, - null(), - &data_size as *const _ as *mut _, - rate_ptr as *const _ as *mut _, - ) - } - - // Add our sample rate change listener callback. - let reported_rate: f64 = 0.0; - let status = AudioObjectAddPropertyListener( - self.audio_device_id, - &property_address as *const _, - Some(rate_listener), - &reported_rate as *const _ as *mut _, - ); - coreaudio::Error::from_os_status(status)?; - - // Finally, set the sample rate. - let sample_rate = sample_rate as f64; - let status = AudioObjectSetPropertyData( - self.audio_device_id, - &property_address as *const _, - 0, - null(), - data_size, - &ranges[range_index] as *const _ as *const _, - ); - coreaudio::Error::from_os_status(status)?; - - // Wait for the reported_rate to change. - // - // This should not take longer than a few ms, but we timeout after 1 sec just in case. - // - // WARNING: a reference to reported_rate is unsafely captured above, - // and the loop below assumes it can change - but compiler does not know that! - // - let timer = ::std::time::Instant::now(); - while sample_rate != reported_rate { - if timer.elapsed() > Duration::from_secs(1) { - let description = - "timeout waiting for sample rate update for device".into(); - let err = BackendSpecificError { description }; - return Err(err.into()); - } - thread::sleep(Duration::from_millis(5)); - } - - // Remove the `rate_listener` callback. - let status = AudioObjectRemovePropertyListener( - self.audio_device_id, - &property_address as *const _, - Some(rate_listener), - &reported_rate as *const _ as *mut _, - ); - coreaudio::Error::from_os_status(status)?; - } - } - - let mut audio_unit = audio_unit_from_device(self, true)?; - - // Set the stream in interleaved mode. - let asbd = asbd_from_config(config, sample_format); - audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?; - - // Set the buffersize - match config.buffer_size { - BufferSize::Fixed(v) => { - let buffer_size_range = get_io_buffer_frame_size_range(&audio_unit)?; - match buffer_size_range { - SupportedBufferSize::Range { min, max } => { - if v >= min && v <= max { - audio_unit.set_property( - kAudioDevicePropertyBufferFrameSize, - scope, - element, - Some(&v), - )? - } else { - return Err(BuildStreamError::StreamConfigNotSupported); - } - } - SupportedBufferSize::Unknown => (), - } - } - BufferSize::Default => (), - } - - // Register the callback that is being called by coreaudio whenever it needs data to be - // fed to the audio buffer. - let bytes_per_channel = sample_format.sample_size(); - let sample_rate = config.sample_rate; - type Args = render_callback::Args; - audio_unit.set_input_callback(move |args: Args| unsafe { - let ptr = (*args.data.data).mBuffers.as_ptr() as *const AudioBuffer; - let len = (*args.data.data).mNumberBuffers as usize; - let buffers: &[AudioBuffer] = slice::from_raw_parts(ptr, len); - - // TODO: Perhaps loop over all buffers instead? - let AudioBuffer { - mNumberChannels: channels, - mDataByteSize: data_byte_size, - mData: data, - } = buffers[0]; - - let data = data as *mut (); - let len = (data_byte_size as usize / bytes_per_channel) as usize; - let data = Data::from_parts(data, len, sample_format); - - // TODO: Need a better way to get delay, for now we assume a double-buffer offset. - let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) { - Err(err) => { - error_callback(err.into()); - return Err(()); - } - Ok(cb) => cb, - }; - let buffer_frames = len / channels as usize; - let delay = frames_to_duration(buffer_frames, sample_rate); - let capture = callback - .sub(delay) - .expect("`capture` occurs before origin of alsa `StreamInstant`"); - let timestamp = crate::InputStreamTimestamp { callback, capture }; - - let info = InputCallbackInfo { timestamp }; - data_callback(&data, &info); - Ok(()) - })?; - - audio_unit.start()?; - - Ok(Stream::new(StreamInner { - playing: true, - audio_unit, - device_id: self.audio_device_id, - })) - } - - fn build_output_stream_raw( - &self, - config: &StreamConfig, - sample_format: SampleFormat, - mut data_callback: D, - mut error_callback: E, - ) -> Result - where - D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, - E: FnMut(StreamError) + Send + 'static, - { - let mut audio_unit = audio_unit_from_device(self, false)?; - - // The scope and element for working with a device's output stream. - let scope = Scope::Input; - let element = Element::Output; - - // Set the stream in interleaved mode. - let asbd = asbd_from_config(config, sample_format); - audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?; - - // Set the buffersize - match config.buffer_size { - BufferSize::Fixed(v) => { - let buffer_size_range = get_io_buffer_frame_size_range(&audio_unit)?; - match buffer_size_range { - SupportedBufferSize::Range { min, max } => { - if v >= min && v <= max { - audio_unit.set_property( - kAudioDevicePropertyBufferFrameSize, - scope, - element, - Some(&v), - )? - } else { - return Err(BuildStreamError::StreamConfigNotSupported); - } - } - SupportedBufferSize::Unknown => (), - } - } - BufferSize::Default => (), - } - - // Register the callback that is being called by coreaudio whenever it needs data to be - // fed to the audio buffer. - let bytes_per_channel = sample_format.sample_size(); - let sample_rate = config.sample_rate; - type Args = render_callback::Args; - audio_unit.set_render_callback(move |args: Args| unsafe { - // If `run()` is currently running, then a callback will be available from this list. - // Otherwise, we just fill the buffer with zeroes and return. - - let AudioBuffer { - mNumberChannels: channels, - mDataByteSize: data_byte_size, - mData: data, - } = (*args.data.data).mBuffers[0]; - - let data = data as *mut (); - let len = (data_byte_size as usize / bytes_per_channel) as usize; - let mut data = Data::from_parts(data, len, sample_format); - - let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) { - Err(err) => { - error_callback(err.into()); - return Err(()); - } - Ok(cb) => cb, - }; - // TODO: Need a better way to get delay, for now we assume a double-buffer offset. - let buffer_frames = len / channels as usize; - let delay = frames_to_duration(buffer_frames, sample_rate); - let playback = callback - .add(delay) - .expect("`playback` occurs beyond representation supported by `StreamInstant`"); - let timestamp = crate::OutputStreamTimestamp { callback, playback }; - - let info = OutputCallbackInfo { timestamp }; - data_callback(&mut data, &info); - Ok(()) - })?; - - audio_unit.start()?; - - Ok(Stream::new(StreamInner { - playing: true, - audio_unit, - device_id: self.audio_device_id, - })) - } -} - fn host_time_to_stream_instant( m_host_time: u64, ) -> Result { @@ -877,70 +88,34 @@ fn frames_to_duration(frames: usize, rate: crate::SampleRate) -> std::time::Dura std::time::Duration::new(secs, nanos) } -pub struct Stream { - inner: RefCell, -} - -impl Stream { - fn new(inner: StreamInner) -> Self { - Self { - inner: RefCell::new(inner), +// TODO need stronger error identification +impl From for BuildStreamError { + fn from(err: coreaudio::Error) -> BuildStreamError { + match err { + coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat + | coreaudio::Error::NoKnownSubtype + | coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::FormatNotSupported) + | coreaudio::Error::AudioCodec(_) + | coreaudio::Error::AudioFormat(_) => BuildStreamError::StreamConfigNotSupported, + _ => BuildStreamError::DeviceNotAvailable, } } } -impl StreamTrait for Stream { - fn play(&self) -> Result<(), PlayStreamError> { - let mut stream = self.inner.borrow_mut(); - - if !stream.playing { - if let Err(e) = stream.audio_unit.start() { - let description = format!("{}", e); - let err = BackendSpecificError { description }; - return Err(err.into()); - } - stream.playing = true; - } - Ok(()) - } - - fn pause(&self) -> Result<(), PauseStreamError> { - let mut stream = self.inner.borrow_mut(); - - if stream.playing { - if let Err(e) = stream.audio_unit.stop() { - let description = format!("{}", e); - let err = BackendSpecificError { description }; - return Err(err.into()); - } - - stream.playing = false; - } - Ok(()) +impl From for SupportedStreamConfigsError { + fn from(err: coreaudio::Error) -> SupportedStreamConfigsError { + let description = format!("{}", err); + let err = BackendSpecificError { description }; + // Check for possible DeviceNotAvailable variant + SupportedStreamConfigsError::BackendSpecific { err } } } -fn check_os_status(os_status: OSStatus) -> Result<(), BackendSpecificError> { - match coreaudio::Error::from_os_status(os_status) { - Ok(()) => Ok(()), - Err(err) => { - let description = err.to_string(); - Err(BackendSpecificError { description }) - } +impl From for DefaultStreamConfigError { + fn from(err: coreaudio::Error) -> DefaultStreamConfigError { + let description = format!("{}", err); + let err = BackendSpecificError { description }; + // Check for possible DeviceNotAvailable variant + DefaultStreamConfigError::BackendSpecific { err } } } - -fn get_io_buffer_frame_size_range( - audio_unit: &AudioUnit, -) -> Result { - let buffer_size_range: AudioValueRange = audio_unit.get_property( - kAudioDevicePropertyBufferFrameSizeRange, - Scope::Global, - Element::Output, - )?; - - Ok(SupportedBufferSize::Range { - min: buffer_size_range.mMinimum as u32, - max: buffer_size_range.mMaximum as u32, - }) -} From b9b36d6a9a6ce1aeda5c7bcce4f2bc5e22d3529a Mon Sep 17 00:00:00 2001 From: Michael Hills Date: Sun, 25 Oct 2020 00:31:13 +1100 Subject: [PATCH 2/3] iOS default sample rate pulled from audio unit --- examples/ios-feedback/src/feedback.rs | 4 +- src/host/coreaudio/ios/mod.rs | 101 +++++++++++++------------- 2 files changed, 53 insertions(+), 52 deletions(-) diff --git a/examples/ios-feedback/src/feedback.rs b/examples/ios-feedback/src/feedback.rs index 1a190e9ed..457cda239 100644 --- a/examples/ios-feedback/src/feedback.rs +++ b/examples/ios-feedback/src/feedback.rs @@ -82,9 +82,9 @@ pub fn run_example() -> Result<(), anyhow::Error> { "Attempting to build both streams with f32 samples and `{:?}`.", config ); - println!("setup is"); + println!("Setup input stream"); let input_stream = input_device.build_input_stream(&config, input_data_fn, err_fn)?; - println!("setup os"); + println!("Setup output stream"); let output_stream = output_device.build_output_stream(&config, output_data_fn, err_fn)?; println!("Successfully built streams."); diff --git a/src/host/coreaudio/ios/mod.rs b/src/host/coreaudio/ios/mod.rs index 9ad774ef2..6c11f81cb 100644 --- a/src/host/coreaudio/ios/mod.rs +++ b/src/host/coreaudio/ios/mod.rs @@ -3,7 +3,8 @@ //! the AVAudioSession objc API which doesn't exist on macOS. //! //! TODO: -//! - Use AVAudioSession to enumerate (and set) buffer size / sample rate / number of channels +//! - Use AVAudioSession to enumerate buffer size / sample rate / number of channels and set +//! buffer size. //! extern crate core_foundation_sys; @@ -36,8 +37,6 @@ use std::slice; pub mod enumerate; -const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100); - // These days the default of iOS is now F32 and no longer I16 const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32; @@ -84,26 +83,15 @@ impl Device { &self, ) -> Result { // TODO: query AVAudioSession for parameters, some values like sample rate and buffer size - // probably need to be tested but channels can be enumerated. - - // setup an audio unit for recording, and then pull some default parameters off it - - let mut audio_unit = create_audio_unit()?; - audio_unit.uninitialize()?; - configure_for_recording(&mut audio_unit)?; - audio_unit.initialize()?; - - let id = kAudioUnitProperty_StreamFormat; - let asbd: AudioStreamBasicDescription = - audio_unit.get_property(id, Scope::Input, Element::Input)?; - - let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 }; + // probably need to actually be set to see if it works, but channels can be enumerated. + let asbd: AudioStreamBasicDescription = default_input_asbd()?; + let stream_config = stream_config_from_asbd(asbd); Ok(vec![SupportedStreamConfigRange { - channels: asbd.mChannelsPerFrame as u16, - min_sample_rate: SampleRate(asbd.mSampleRate as u32), - max_sample_rate: SampleRate(asbd.mSampleRate as u32), - buffer_size: buffer_size.clone(), + channels: stream_config.channels, + min_sample_rate: stream_config.sample_rate, + max_sample_rate: stream_config.sample_rate, + buffer_size: stream_config.buffer_size.clone(), sample_format: SUPPORTED_SAMPLE_FORMAT, }] .into_iter()) @@ -114,22 +102,17 @@ impl Device { &self, ) -> Result { // TODO: query AVAudioSession for parameters, some values like sample rate and buffer size - // probably need to be tested but channels can be enumerated. + // probably need to actually be set to see if it works, but channels can be enumerated. - // setup an audio unit, and then pull some default parameters off it + let asbd: AudioStreamBasicDescription = default_output_asbd()?; + let stream_config = stream_config_from_asbd(asbd); - let audio_unit = create_audio_unit()?; - let id = kAudioUnitProperty_StreamFormat; - let asbd: AudioStreamBasicDescription = - audio_unit.get_property(id, Scope::Output, Element::Output)?; - - let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 }; let configs: Vec<_> = (1..=asbd.mChannelsPerFrame as u16) .map(|channels| SupportedStreamConfigRange { channels, - min_sample_rate: SampleRate(asbd.mSampleRate as u32), - max_sample_rate: SampleRate(asbd.mSampleRate as u32), - buffer_size: buffer_size.clone(), + min_sample_rate: stream_config.sample_rate, + max_sample_rate: stream_config.sample_rate, + buffer_size: stream_config.buffer_size.clone(), sample_format: SUPPORTED_SAMPLE_FORMAT, }) .collect(); @@ -138,28 +121,16 @@ impl Device { #[inline] fn default_input_config(&self) -> Result { - const EXPECT: &str = "expected at least one valid coreaudio stream config"; - let config = self - .supported_input_configs() - .expect(EXPECT) - .max_by(|a, b| a.cmp_default_heuristics(b)) - .unwrap() - .with_sample_rate(DEFAULT_SAMPLE_RATE); - - Ok(config) + let asbd: AudioStreamBasicDescription = default_input_asbd()?; + let stream_config = stream_config_from_asbd(asbd); + Ok(stream_config) } #[inline] fn default_output_config(&self) -> Result { - const EXPECT: &str = "expected at least one valid coreaudio stream config"; - let config = self - .supported_output_configs() - .expect(EXPECT) - .max_by(|a, b| a.cmp_default_heuristics(b)) - .unwrap() - .with_sample_rate(DEFAULT_SAMPLE_RATE); - - Ok(config) + let asbd: AudioStreamBasicDescription = default_output_asbd()?; + let stream_config = stream_config_from_asbd(asbd); + Ok(stream_config) } } @@ -428,3 +399,33 @@ fn configure_for_recording(audio_unit: &mut AudioUnit) -> Result<(), coreaudio:: Ok(()) } + +fn default_output_asbd() -> Result { + let audio_unit = create_audio_unit()?; + let id = kAudioUnitProperty_StreamFormat; + let asbd: AudioStreamBasicDescription = + audio_unit.get_property(id, Scope::Output, Element::Output)?; + Ok(asbd) +} + +fn default_input_asbd() -> Result { + let mut audio_unit = create_audio_unit()?; + audio_unit.uninitialize()?; + configure_for_recording(&mut audio_unit)?; + audio_unit.initialize()?; + + let id = kAudioUnitProperty_StreamFormat; + let asbd: AudioStreamBasicDescription = + audio_unit.get_property(id, Scope::Input, Element::Input)?; + Ok(asbd) +} + +fn stream_config_from_asbd(asbd: AudioStreamBasicDescription) -> SupportedStreamConfig { + let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 }; + SupportedStreamConfig { + channels: asbd.mChannelsPerFrame as u16, + sample_rate: SampleRate(asbd.mSampleRate as u32), + buffer_size: buffer_size.clone(), + sample_format: SUPPORTED_SAMPLE_FORMAT, + } +} From f9f6990d9e570920cb9c34807344dbba0364c9eb Mon Sep 17 00:00:00 2001 From: Michael Hills Date: Sat, 5 Dec 2020 23:14:13 +1100 Subject: [PATCH 3/3] Add iOS build to CI --- .github/workflows/cpal.yml | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cpal.yml b/.github/workflows/cpal.yml index 783f95ef0..8c637b632 100644 --- a/.github/workflows/cpal.yml +++ b/.github/workflows/cpal.yml @@ -135,14 +135,14 @@ jobs: with: command: check use-cross: true - args: --target armv7-unknown-linux-gnueabihf --workspace --all-features --verbose + args: --target armv7-unknown-linux-gnueabihf --workspace --all-features --verbose - name: Test all features for armv7 uses: actions-rs/cargo@v1 with: command: test use-cross: true - args: --target armv7-unknown-linux-gnueabihf --workspace --all-features --verbose + args: --target armv7-unknown-linux-gnueabihf --workspace --all-features --verbose asmjs-wasm32-test: strategy: @@ -287,3 +287,23 @@ jobs: run: cargo install cargo-apk - name: Build APK run: cargo apk build --example android + + ios-build: + runs-on: macOS-latest + steps: + - uses: actions/checkout@v2 + - name: Install llvm and clang + run: brew install llvm + - name: Install stable + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + - name: Add iOS targets + run: rustup target add aarch64-apple-ios x86_64-apple-ios + - name: Install cargo lipo + run: cargo install cargo-lipo + - name: Build iphonesimulator feedback example + run: cd examples/ios-feedback && xcodebuild -scheme cpal-ios-example -configuration Debug -derivedDataPath build -sdk iphonesimulator +