1 /* 2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #ifndef SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ 12 #define SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ 13 14 #include <AudioUnit/AudioUnit.h> 15 16 namespace webrtc { 17 namespace ios_adm { 18 19 class VoiceProcessingAudioUnitObserver { 20 public: 21 // Callback function called on a real-time priority I/O thread from the audio 22 // unit. This method is used to signal that recorded audio is available. 23 virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, 24 const AudioTimeStamp* time_stamp, 25 UInt32 bus_number, 26 UInt32 num_frames, 27 AudioBufferList* io_data) = 0; 28 29 // Callback function called on a real-time priority I/O thread from the audio 30 // unit. This method is used to provide audio samples to the audio unit. 31 virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags, 32 const AudioTimeStamp* time_stamp, 33 UInt32 bus_number, 34 UInt32 num_frames, 35 AudioBufferList* io_data) = 0; 36 37 protected: ~VoiceProcessingAudioUnitObserver()38 ~VoiceProcessingAudioUnitObserver() {} 39 }; 40 41 // Convenience class to abstract away the management of a Voice Processing 42 // I/O Audio Unit. The Voice Processing I/O unit has the same characteristics 43 // as the Remote I/O unit (supports full duplex low-latency audio input and 44 // output) and adds AEC for for two-way duplex communication. It also adds AGC, 45 // adjustment of voice-processing quality, and muting. Hence, ideal for 46 // VoIP applications. 47 class VoiceProcessingAudioUnit { 48 public: 49 VoiceProcessingAudioUnit(bool bypass_voice_processing, 50 VoiceProcessingAudioUnitObserver* observer); 51 ~VoiceProcessingAudioUnit(); 52 53 // TODO(tkchin): enum for state and state checking. 54 enum State : int32_t { 55 // Init() should be called. 56 kInitRequired, 57 // Audio unit created but not initialized. 58 kUninitialized, 59 // Initialized but not started. Equivalent to stopped. 60 kInitialized, 61 // Initialized and started. 62 kStarted, 63 }; 64 65 // Number of bytes per audio sample for 16-bit signed integer representation. 66 static const UInt32 kBytesPerSample; 67 68 // Initializes this class by creating the underlying audio unit instance. 69 // Creates a Voice-Processing I/O unit and configures it for full-duplex 70 // audio. The selected stream format is selected to avoid internal resampling 71 // and to match the 10ms callback rate for WebRTC as well as possible. 72 // Does not intialize the audio unit. 73 bool Init(); 74 75 VoiceProcessingAudioUnit::State GetState() const; 76 77 // Initializes the underlying audio unit with the given sample rate. 78 bool Initialize(Float64 sample_rate); 79 80 // Starts the underlying audio unit. 81 OSStatus Start(); 82 83 // Stops the underlying audio unit. 84 bool Stop(); 85 86 // Uninitializes the underlying audio unit. 87 bool Uninitialize(); 88 89 // Calls render on the underlying audio unit. 90 OSStatus Render(AudioUnitRenderActionFlags* flags, 91 const AudioTimeStamp* time_stamp, 92 UInt32 output_bus_number, 93 UInt32 num_frames, 94 AudioBufferList* io_data); 95 96 private: 97 // The C API used to set callbacks requires static functions. When these are 98 // called, they will invoke the relevant instance method by casting 99 // in_ref_con to VoiceProcessingAudioUnit*. 100 static OSStatus OnGetPlayoutData(void* in_ref_con, 101 AudioUnitRenderActionFlags* flags, 102 const AudioTimeStamp* time_stamp, 103 UInt32 bus_number, 104 UInt32 num_frames, 105 AudioBufferList* io_data); 106 static OSStatus OnDeliverRecordedData(void* in_ref_con, 107 AudioUnitRenderActionFlags* flags, 108 const AudioTimeStamp* time_stamp, 109 UInt32 bus_number, 110 UInt32 num_frames, 111 AudioBufferList* io_data); 112 113 // Notifies observer that samples are needed for playback. 114 OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags, 115 const AudioTimeStamp* time_stamp, 116 UInt32 bus_number, 117 UInt32 num_frames, 118 AudioBufferList* io_data); 119 // Notifies observer that recorded samples are available for render. 120 OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags, 121 const AudioTimeStamp* time_stamp, 122 UInt32 bus_number, 123 UInt32 num_frames, 124 AudioBufferList* io_data); 125 126 // Returns the predetermined format with a specific sample rate. See 127 // implementation file for details on format. 128 AudioStreamBasicDescription GetFormat(Float64 sample_rate) const; 129 130 // Deletes the underlying audio unit. 131 void DisposeAudioUnit(); 132 133 const bool bypass_voice_processing_; 134 VoiceProcessingAudioUnitObserver* observer_; 135 AudioUnit vpio_unit_; 136 VoiceProcessingAudioUnit::State state_; 137 }; 138 } // namespace ios_adm 139 } // namespace webrtc 140 141 #endif // SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ 142