1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "modules/audio_device/mac/audio_device_mac.h"
12
13 #include <ApplicationServices/ApplicationServices.h>
14 #include <mach/mach.h> // mach_task_self()
15 #include <sys/sysctl.h> // sysctlbyname()
16
17 #include <memory>
18
19 #include "modules/audio_device/audio_device_config.h"
20 #include "modules/third_party/portaudio/pa_ringbuffer.h"
21 #include "rtc_base/arraysize.h"
22 #include "rtc_base/checks.h"
23 #include "rtc_base/platform_thread.h"
24 #include "rtc_base/system/arch.h"
25
26 namespace webrtc {
27
28 #define WEBRTC_CA_RETURN_ON_ERR(expr) \
29 do { \
30 err = expr; \
31 if (err != noErr) { \
32 logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
33 return -1; \
34 } \
35 } while (0)
36
37 #define WEBRTC_CA_LOG_ERR(expr) \
38 do { \
39 err = expr; \
40 if (err != noErr) { \
41 logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
42 } \
43 } while (0)
44
45 #define WEBRTC_CA_LOG_WARN(expr) \
46 do { \
47 err = expr; \
48 if (err != noErr) { \
49 logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
50 } \
51 } while (0)
52
53 enum { MaxNumberDevices = 64 };
54
55 // CoreAudio errors are best interpreted as four character strings.
logCAMsg(const rtc::LoggingSeverity sev,const char * msg,const char * err)56 void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev,
57 const char* msg,
58 const char* err) {
59 RTC_DCHECK(msg != NULL);
60 RTC_DCHECK(err != NULL);
61
62 #ifdef WEBRTC_ARCH_BIG_ENDIAN
63 switch (sev) {
64 case rtc::LS_ERROR:
65 RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3];
66 break;
67 case rtc::LS_WARNING:
68 RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2]
69 << err[3];
70 break;
71 case rtc::LS_VERBOSE:
72 RTC_LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2]
73 << err[3];
74 break;
75 default:
76 break;
77 }
78 #else
79 // We need to flip the characters in this case.
80 switch (sev) {
81 case rtc::LS_ERROR:
82 RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
83 break;
84 case rtc::LS_WARNING:
85 RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1]
86 << err[0];
87 break;
88 case rtc::LS_VERBOSE:
89 RTC_LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1]
90 << err[0];
91 break;
92 default:
93 break;
94 }
95 #endif
96 }
97
AudioDeviceMac()98 AudioDeviceMac::AudioDeviceMac()
99 : _ptrAudioBuffer(NULL),
100 _mixerManager(),
101 _inputDeviceIndex(0),
102 _outputDeviceIndex(0),
103 _inputDeviceID(kAudioObjectUnknown),
104 _outputDeviceID(kAudioObjectUnknown),
105 _inputDeviceIsSpecified(false),
106 _outputDeviceIsSpecified(false),
107 _recChannels(N_REC_CHANNELS),
108 _playChannels(N_PLAY_CHANNELS),
109 _captureBufData(NULL),
110 _renderBufData(NULL),
111 _initialized(false),
112 _isShutDown(false),
113 _recording(false),
114 _playing(false),
115 _recIsInitialized(false),
116 _playIsInitialized(false),
117 _renderDeviceIsAlive(1),
118 _captureDeviceIsAlive(1),
119 _twoDevices(true),
120 _doStop(false),
121 _doStopRec(false),
122 _macBookPro(false),
123 _macBookProPanRight(false),
124 _captureLatencyUs(0),
125 _renderLatencyUs(0),
126 _captureDelayUs(0),
127 _renderDelayUs(0),
128 _renderDelayOffsetSamples(0),
129 _paCaptureBuffer(NULL),
130 _paRenderBuffer(NULL),
131 _captureBufSizeSamples(0),
132 _renderBufSizeSamples(0),
133 prev_key_state_() {
134 RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
135
136 memset(_renderConvertData, 0, sizeof(_renderConvertData));
137 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
138 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
139 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
140 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
141 }
142
~AudioDeviceMac()143 AudioDeviceMac::~AudioDeviceMac() {
144 RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
145
146 if (!_isShutDown) {
147 Terminate();
148 }
149
150 RTC_DCHECK(capture_worker_thread_.empty());
151 RTC_DCHECK(render_worker_thread_.empty());
152
153 if (_paRenderBuffer) {
154 delete _paRenderBuffer;
155 _paRenderBuffer = NULL;
156 }
157
158 if (_paCaptureBuffer) {
159 delete _paCaptureBuffer;
160 _paCaptureBuffer = NULL;
161 }
162
163 if (_renderBufData) {
164 delete[] _renderBufData;
165 _renderBufData = NULL;
166 }
167
168 if (_captureBufData) {
169 delete[] _captureBufData;
170 _captureBufData = NULL;
171 }
172
173 kern_return_t kernErr = KERN_SUCCESS;
174 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
175 if (kernErr != KERN_SUCCESS) {
176 RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
177 }
178
179 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
180 if (kernErr != KERN_SUCCESS) {
181 RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
182 }
183 }
184
185 // ============================================================================
186 // API
187 // ============================================================================
188
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)189 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
190 MutexLock lock(&mutex_);
191
192 _ptrAudioBuffer = audioBuffer;
193
194 // inform the AudioBuffer about default settings for this implementation
195 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
196 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
197 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
198 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
199 }
200
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const201 int32_t AudioDeviceMac::ActiveAudioLayer(
202 AudioDeviceModule::AudioLayer& audioLayer) const {
203 audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
204 return 0;
205 }
206
Init()207 AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
208 MutexLock lock(&mutex_);
209
210 if (_initialized) {
211 return InitStatus::OK;
212 }
213
214 OSStatus err = noErr;
215
216 _isShutDown = false;
217
218 // PortAudio ring buffers require an elementCount which is a power of two.
219 if (_renderBufData == NULL) {
220 UInt32 powerOfTwo = 1;
221 while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) {
222 powerOfTwo <<= 1;
223 }
224 _renderBufSizeSamples = powerOfTwo;
225 _renderBufData = new SInt16[_renderBufSizeSamples];
226 }
227
228 if (_paRenderBuffer == NULL) {
229 _paRenderBuffer = new PaUtilRingBuffer;
230 ring_buffer_size_t bufSize = -1;
231 bufSize = PaUtil_InitializeRingBuffer(
232 _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
233 if (bufSize == -1) {
234 RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
235 return InitStatus::PLAYOUT_ERROR;
236 }
237 }
238
239 if (_captureBufData == NULL) {
240 UInt32 powerOfTwo = 1;
241 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) {
242 powerOfTwo <<= 1;
243 }
244 _captureBufSizeSamples = powerOfTwo;
245 _captureBufData = new Float32[_captureBufSizeSamples];
246 }
247
248 if (_paCaptureBuffer == NULL) {
249 _paCaptureBuffer = new PaUtilRingBuffer;
250 ring_buffer_size_t bufSize = -1;
251 bufSize =
252 PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
253 _captureBufSizeSamples, _captureBufData);
254 if (bufSize == -1) {
255 RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
256 return InitStatus::RECORDING_ERROR;
257 }
258 }
259
260 kern_return_t kernErr = KERN_SUCCESS;
261 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
262 SYNC_POLICY_FIFO, 0);
263 if (kernErr != KERN_SUCCESS) {
264 RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
265 return InitStatus::OTHER_ERROR;
266 }
267
268 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
269 SYNC_POLICY_FIFO, 0);
270 if (kernErr != KERN_SUCCESS) {
271 RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
272 return InitStatus::OTHER_ERROR;
273 }
274
275 // Setting RunLoop to NULL here instructs HAL to manage its own thread for
276 // notifications. This was the default behaviour on OS X 10.5 and earlier,
277 // but now must be explicitly specified. HAL would otherwise try to use the
278 // main thread to issue notifications.
279 AudioObjectPropertyAddress propertyAddress = {
280 kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
281 kAudioObjectPropertyElementMaster};
282 CFRunLoopRef runLoop = NULL;
283 UInt32 size = sizeof(CFRunLoopRef);
284 int aoerr = AudioObjectSetPropertyData(
285 kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop);
286 if (aoerr != noErr) {
287 RTC_LOG(LS_ERROR) << "Error in AudioObjectSetPropertyData: "
288 << (const char*)&aoerr;
289 return InitStatus::OTHER_ERROR;
290 }
291
292 // Listen for any device changes.
293 propertyAddress.mSelector = kAudioHardwarePropertyDevices;
294 WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(
295 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
296
297 // Determine if this is a MacBook Pro
298 _macBookPro = false;
299 _macBookProPanRight = false;
300 char buf[128];
301 size_t length = sizeof(buf);
302 memset(buf, 0, length);
303
304 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
305 if (intErr != 0) {
306 RTC_LOG(LS_ERROR) << "Error in sysctlbyname(): " << err;
307 } else {
308 RTC_LOG(LS_VERBOSE) << "Hardware model: " << buf;
309 if (strncmp(buf, "MacBookPro", 10) == 0) {
310 _macBookPro = true;
311 }
312 }
313
314 _initialized = true;
315
316 return InitStatus::OK;
317 }
318
Terminate()319 int32_t AudioDeviceMac::Terminate() {
320 if (!_initialized) {
321 return 0;
322 }
323
324 if (_recording) {
325 RTC_LOG(LS_ERROR) << "Recording must be stopped";
326 return -1;
327 }
328
329 if (_playing) {
330 RTC_LOG(LS_ERROR) << "Playback must be stopped";
331 return -1;
332 }
333
334 MutexLock lock(&mutex_);
335 _mixerManager.Close();
336
337 OSStatus err = noErr;
338 int retVal = 0;
339
340 AudioObjectPropertyAddress propertyAddress = {
341 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
342 kAudioObjectPropertyElementMaster};
343 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
344 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
345
346 err = AudioHardwareUnload();
347 if (err != noErr) {
348 logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
349 (const char*)&err);
350 retVal = -1;
351 }
352
353 _isShutDown = true;
354 _initialized = false;
355 _outputDeviceIsSpecified = false;
356 _inputDeviceIsSpecified = false;
357
358 return retVal;
359 }
360
Initialized() const361 bool AudioDeviceMac::Initialized() const {
362 return (_initialized);
363 }
364
SpeakerIsAvailable(bool & available)365 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) {
366 MutexLock lock(&mutex_);
367 return SpeakerIsAvailableLocked(available);
368 }
369
SpeakerIsAvailableLocked(bool & available)370 int32_t AudioDeviceMac::SpeakerIsAvailableLocked(bool& available) {
371 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
372
373 // Make an attempt to open up the
374 // output mixer corresponding to the currently selected output device.
375 //
376 if (!wasInitialized && InitSpeakerLocked() == -1) {
377 available = false;
378 return 0;
379 }
380
381 // Given that InitSpeaker was successful, we know that a valid speaker
382 // exists.
383 available = true;
384
385 // Close the initialized output mixer
386 //
387 if (!wasInitialized) {
388 _mixerManager.CloseSpeaker();
389 }
390
391 return 0;
392 }
393
InitSpeaker()394 int32_t AudioDeviceMac::InitSpeaker() {
395 MutexLock lock(&mutex_);
396 return InitSpeakerLocked();
397 }
398
InitSpeakerLocked()399 int32_t AudioDeviceMac::InitSpeakerLocked() {
400 if (_playing) {
401 return -1;
402 }
403
404 if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) {
405 return -1;
406 }
407
408 if (_inputDeviceID == _outputDeviceID) {
409 _twoDevices = false;
410 } else {
411 _twoDevices = true;
412 }
413
414 if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) {
415 return -1;
416 }
417
418 return 0;
419 }
420
MicrophoneIsAvailable(bool & available)421 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) {
422 MutexLock lock(&mutex_);
423 return MicrophoneIsAvailableLocked(available);
424 }
425
MicrophoneIsAvailableLocked(bool & available)426 int32_t AudioDeviceMac::MicrophoneIsAvailableLocked(bool& available) {
427 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
428
429 // Make an attempt to open up the
430 // input mixer corresponding to the currently selected output device.
431 //
432 if (!wasInitialized && InitMicrophoneLocked() == -1) {
433 available = false;
434 return 0;
435 }
436
437 // Given that InitMicrophone was successful, we know that a valid microphone
438 // exists.
439 available = true;
440
441 // Close the initialized input mixer
442 //
443 if (!wasInitialized) {
444 _mixerManager.CloseMicrophone();
445 }
446
447 return 0;
448 }
449
InitMicrophone()450 int32_t AudioDeviceMac::InitMicrophone() {
451 MutexLock lock(&mutex_);
452 return InitMicrophoneLocked();
453 }
454
InitMicrophoneLocked()455 int32_t AudioDeviceMac::InitMicrophoneLocked() {
456 if (_recording) {
457 return -1;
458 }
459
460 if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) {
461 return -1;
462 }
463
464 if (_inputDeviceID == _outputDeviceID) {
465 _twoDevices = false;
466 } else {
467 _twoDevices = true;
468 }
469
470 if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) {
471 return -1;
472 }
473
474 return 0;
475 }
476
SpeakerIsInitialized() const477 bool AudioDeviceMac::SpeakerIsInitialized() const {
478 return (_mixerManager.SpeakerIsInitialized());
479 }
480
MicrophoneIsInitialized() const481 bool AudioDeviceMac::MicrophoneIsInitialized() const {
482 return (_mixerManager.MicrophoneIsInitialized());
483 }
484
SpeakerVolumeIsAvailable(bool & available)485 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) {
486 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
487
488 // Make an attempt to open up the
489 // output mixer corresponding to the currently selected output device.
490 //
491 if (!wasInitialized && InitSpeaker() == -1) {
492 // If we end up here it means that the selected speaker has no volume
493 // control.
494 available = false;
495 return 0;
496 }
497
498 // Given that InitSpeaker was successful, we know that a volume control exists
499 //
500 available = true;
501
502 // Close the initialized output mixer
503 //
504 if (!wasInitialized) {
505 _mixerManager.CloseSpeaker();
506 }
507
508 return 0;
509 }
510
SetSpeakerVolume(uint32_t volume)511 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) {
512 return (_mixerManager.SetSpeakerVolume(volume));
513 }
514
SpeakerVolume(uint32_t & volume) const515 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
516 uint32_t level(0);
517
518 if (_mixerManager.SpeakerVolume(level) == -1) {
519 return -1;
520 }
521
522 volume = level;
523 return 0;
524 }
525
MaxSpeakerVolume(uint32_t & maxVolume) const526 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
527 uint32_t maxVol(0);
528
529 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
530 return -1;
531 }
532
533 maxVolume = maxVol;
534 return 0;
535 }
536
MinSpeakerVolume(uint32_t & minVolume) const537 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const {
538 uint32_t minVol(0);
539
540 if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
541 return -1;
542 }
543
544 minVolume = minVol;
545 return 0;
546 }
547
SpeakerMuteIsAvailable(bool & available)548 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) {
549 bool isAvailable(false);
550 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
551
552 // Make an attempt to open up the
553 // output mixer corresponding to the currently selected output device.
554 //
555 if (!wasInitialized && InitSpeaker() == -1) {
556 // If we end up here it means that the selected speaker has no volume
557 // control, hence it is safe to state that there is no mute control
558 // already at this stage.
559 available = false;
560 return 0;
561 }
562
563 // Check if the selected speaker has a mute control
564 //
565 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
566
567 available = isAvailable;
568
569 // Close the initialized output mixer
570 //
571 if (!wasInitialized) {
572 _mixerManager.CloseSpeaker();
573 }
574
575 return 0;
576 }
577
SetSpeakerMute(bool enable)578 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) {
579 return (_mixerManager.SetSpeakerMute(enable));
580 }
581
SpeakerMute(bool & enabled) const582 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const {
583 bool muted(0);
584
585 if (_mixerManager.SpeakerMute(muted) == -1) {
586 return -1;
587 }
588
589 enabled = muted;
590 return 0;
591 }
592
MicrophoneMuteIsAvailable(bool & available)593 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) {
594 bool isAvailable(false);
595 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
596
597 // Make an attempt to open up the
598 // input mixer corresponding to the currently selected input device.
599 //
600 if (!wasInitialized && InitMicrophone() == -1) {
601 // If we end up here it means that the selected microphone has no volume
602 // control, hence it is safe to state that there is no boost control
603 // already at this stage.
604 available = false;
605 return 0;
606 }
607
608 // Check if the selected microphone has a mute control
609 //
610 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
611 available = isAvailable;
612
613 // Close the initialized input mixer
614 //
615 if (!wasInitialized) {
616 _mixerManager.CloseMicrophone();
617 }
618
619 return 0;
620 }
621
SetMicrophoneMute(bool enable)622 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) {
623 return (_mixerManager.SetMicrophoneMute(enable));
624 }
625
MicrophoneMute(bool & enabled) const626 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const {
627 bool muted(0);
628
629 if (_mixerManager.MicrophoneMute(muted) == -1) {
630 return -1;
631 }
632
633 enabled = muted;
634 return 0;
635 }
636
StereoRecordingIsAvailable(bool & available)637 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) {
638 bool isAvailable(false);
639 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
640
641 if (!wasInitialized && InitMicrophone() == -1) {
642 // Cannot open the specified device
643 available = false;
644 return 0;
645 }
646
647 // Check if the selected microphone can record stereo
648 //
649 _mixerManager.StereoRecordingIsAvailable(isAvailable);
650 available = isAvailable;
651
652 // Close the initialized input mixer
653 //
654 if (!wasInitialized) {
655 _mixerManager.CloseMicrophone();
656 }
657
658 return 0;
659 }
660
SetStereoRecording(bool enable)661 int32_t AudioDeviceMac::SetStereoRecording(bool enable) {
662 if (enable)
663 _recChannels = 2;
664 else
665 _recChannels = 1;
666
667 return 0;
668 }
669
StereoRecording(bool & enabled) const670 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const {
671 if (_recChannels == 2)
672 enabled = true;
673 else
674 enabled = false;
675
676 return 0;
677 }
678
StereoPlayoutIsAvailable(bool & available)679 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) {
680 bool isAvailable(false);
681 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
682
683 if (!wasInitialized && InitSpeaker() == -1) {
684 // Cannot open the specified device
685 available = false;
686 return 0;
687 }
688
689 // Check if the selected microphone can record stereo
690 //
691 _mixerManager.StereoPlayoutIsAvailable(isAvailable);
692 available = isAvailable;
693
694 // Close the initialized input mixer
695 //
696 if (!wasInitialized) {
697 _mixerManager.CloseSpeaker();
698 }
699
700 return 0;
701 }
702
SetStereoPlayout(bool enable)703 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) {
704 if (enable)
705 _playChannels = 2;
706 else
707 _playChannels = 1;
708
709 return 0;
710 }
711
StereoPlayout(bool & enabled) const712 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const {
713 if (_playChannels == 2)
714 enabled = true;
715 else
716 enabled = false;
717
718 return 0;
719 }
720
MicrophoneVolumeIsAvailable(bool & available)721 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) {
722 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
723
724 // Make an attempt to open up the
725 // input mixer corresponding to the currently selected output device.
726 //
727 if (!wasInitialized && InitMicrophone() == -1) {
728 // If we end up here it means that the selected microphone has no volume
729 // control.
730 available = false;
731 return 0;
732 }
733
734 // Given that InitMicrophone was successful, we know that a volume control
735 // exists
736 //
737 available = true;
738
739 // Close the initialized input mixer
740 //
741 if (!wasInitialized) {
742 _mixerManager.CloseMicrophone();
743 }
744
745 return 0;
746 }
747
SetMicrophoneVolume(uint32_t volume)748 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) {
749 return (_mixerManager.SetMicrophoneVolume(volume));
750 }
751
MicrophoneVolume(uint32_t & volume) const752 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
753 uint32_t level(0);
754
755 if (_mixerManager.MicrophoneVolume(level) == -1) {
756 RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level";
757 return -1;
758 }
759
760 volume = level;
761 return 0;
762 }
763
MaxMicrophoneVolume(uint32_t & maxVolume) const764 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
765 uint32_t maxVol(0);
766
767 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
768 return -1;
769 }
770
771 maxVolume = maxVol;
772 return 0;
773 }
774
MinMicrophoneVolume(uint32_t & minVolume) const775 int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const {
776 uint32_t minVol(0);
777
778 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
779 return -1;
780 }
781
782 minVolume = minVol;
783 return 0;
784 }
785
PlayoutDevices()786 int16_t AudioDeviceMac::PlayoutDevices() {
787 AudioDeviceID playDevices[MaxNumberDevices];
788 return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
789 MaxNumberDevices);
790 }
791
SetPlayoutDevice(uint16_t index)792 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
793 MutexLock lock(&mutex_);
794
795 if (_playIsInitialized) {
796 return -1;
797 }
798
799 AudioDeviceID playDevices[MaxNumberDevices];
800 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
801 playDevices, MaxNumberDevices);
802 RTC_LOG(LS_VERBOSE) << "number of available waveform-audio output devices is "
803 << nDevices;
804
805 if (index > (nDevices - 1)) {
806 RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
807 << "]";
808 return -1;
809 }
810
811 _outputDeviceIndex = index;
812 _outputDeviceIsSpecified = true;
813
814 return 0;
815 }
816
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)817 int32_t AudioDeviceMac::SetPlayoutDevice(
818 AudioDeviceModule::WindowsDeviceType /*device*/) {
819 RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
820 return -1;
821 }
822
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])823 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index,
824 char name[kAdmMaxDeviceNameSize],
825 char guid[kAdmMaxGuidSize]) {
826 const uint16_t nDevices(PlayoutDevices());
827
828 if ((index > (nDevices - 1)) || (name == NULL)) {
829 return -1;
830 }
831
832 memset(name, 0, kAdmMaxDeviceNameSize);
833
834 if (guid != NULL) {
835 memset(guid, 0, kAdmMaxGuidSize);
836 }
837
838 return GetDeviceName(kAudioDevicePropertyScopeOutput, index,
839 rtc::ArrayView<char>(name, kAdmMaxDeviceNameSize));
840 }
841
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])842 int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index,
843 char name[kAdmMaxDeviceNameSize],
844 char guid[kAdmMaxGuidSize]) {
845 const uint16_t nDevices(RecordingDevices());
846
847 if ((index > (nDevices - 1)) || (name == NULL)) {
848 return -1;
849 }
850
851 memset(name, 0, kAdmMaxDeviceNameSize);
852
853 if (guid != NULL) {
854 memset(guid, 0, kAdmMaxGuidSize);
855 }
856
857 return GetDeviceName(kAudioDevicePropertyScopeInput, index,
858 rtc::ArrayView<char>(name, kAdmMaxDeviceNameSize));
859 }
860
RecordingDevices()861 int16_t AudioDeviceMac::RecordingDevices() {
862 AudioDeviceID recDevices[MaxNumberDevices];
863 return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
864 MaxNumberDevices);
865 }
866
SetRecordingDevice(uint16_t index)867 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
868 if (_recIsInitialized) {
869 return -1;
870 }
871
872 AudioDeviceID recDevices[MaxNumberDevices];
873 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
874 recDevices, MaxNumberDevices);
875 RTC_LOG(LS_VERBOSE) << "number of available waveform-audio input devices is "
876 << nDevices;
877
878 if (index > (nDevices - 1)) {
879 RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
880 << "]";
881 return -1;
882 }
883
884 _inputDeviceIndex = index;
885 _inputDeviceIsSpecified = true;
886
887 return 0;
888 }
889
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)890 int32_t AudioDeviceMac::SetRecordingDevice(
891 AudioDeviceModule::WindowsDeviceType /*device*/) {
892 RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
893 return -1;
894 }
895
PlayoutIsAvailable(bool & available)896 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) {
897 available = true;
898
899 // Try to initialize the playout side
900 if (InitPlayout() == -1) {
901 available = false;
902 }
903
904 // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
905 // We must actually start playout here in order to have the IOProc
906 // deleted by calling StopPlayout().
907 if (StartPlayout() == -1) {
908 available = false;
909 }
910
911 // Cancel effect of initialization
912 if (StopPlayout() == -1) {
913 available = false;
914 }
915
916 return 0;
917 }
918
RecordingIsAvailable(bool & available)919 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) {
920 available = true;
921
922 // Try to initialize the recording side
923 if (InitRecording() == -1) {
924 available = false;
925 }
926
927 // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
928 // We must actually start recording here in order to have the IOProc
929 // deleted by calling StopRecording().
930 if (StartRecording() == -1) {
931 available = false;
932 }
933
934 // Cancel effect of initialization
935 if (StopRecording() == -1) {
936 available = false;
937 }
938
939 return 0;
940 }
941
InitPlayout()942 int32_t AudioDeviceMac::InitPlayout() {
943 RTC_LOG(LS_INFO) << "InitPlayout";
944 MutexLock lock(&mutex_);
945
946 if (_playing) {
947 return -1;
948 }
949
950 if (!_outputDeviceIsSpecified) {
951 return -1;
952 }
953
954 if (_playIsInitialized) {
955 return 0;
956 }
957
958 // Initialize the speaker (devices might have been added or removed)
959 if (InitSpeakerLocked() == -1) {
960 RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
961 }
962
963 if (!MicrophoneIsInitialized()) {
964 // Make this call to check if we are using
965 // one or two devices (_twoDevices)
966 bool available = false;
967 if (MicrophoneIsAvailableLocked(available) == -1) {
968 RTC_LOG(LS_WARNING) << "MicrophoneIsAvailable() failed";
969 }
970 }
971
972 PaUtil_FlushRingBuffer(_paRenderBuffer);
973
974 OSStatus err = noErr;
975 UInt32 size = 0;
976 _renderDelayOffsetSamples = 0;
977 _renderDelayUs = 0;
978 _renderLatencyUs = 0;
979 _renderDeviceIsAlive = 1;
980 _doStop = false;
981
982 // The internal microphone of a MacBook Pro is located under the left speaker
983 // grille. When the internal speakers are in use, we want to fully stereo
984 // pan to the right.
985 AudioObjectPropertyAddress propertyAddress = {
986 kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
987 if (_macBookPro) {
988 _macBookProPanRight = false;
989 Boolean hasProperty =
990 AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
991 if (hasProperty) {
992 UInt32 dataSource = 0;
993 size = sizeof(dataSource);
994 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(
995 _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource));
996
997 if (dataSource == 'ispk') {
998 _macBookProPanRight = true;
999 RTC_LOG(LS_VERBOSE)
1000 << "MacBook Pro using internal speakers; stereo panning right";
1001 } else {
1002 RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
1003 }
1004
1005 // Add a listener to determine if the status changes.
1006 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1007 _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1008 }
1009 }
1010
1011 // Get current stream description
1012 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1013 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
1014 size = sizeof(_outStreamFormat);
1015 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1016 _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
1017
1018 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
1019 logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
1020 (const char*)&_outStreamFormat.mFormatID);
1021 return -1;
1022 }
1023
1024 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1025 RTC_LOG(LS_ERROR)
1026 << "Too many channels on output device (mChannelsPerFrame = "
1027 << _outStreamFormat.mChannelsPerFrame << ")";
1028 return -1;
1029 }
1030
1031 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
1032 RTC_LOG(LS_ERROR) << "Non-interleaved audio data is not supported."
1033 "AudioHardware streams should not have this format.";
1034 return -1;
1035 }
1036
1037 RTC_LOG(LS_VERBOSE) << "Ouput stream format:";
1038 RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate
1039 << ", mChannelsPerFrame = "
1040 << _outStreamFormat.mChannelsPerFrame;
1041 RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = "
1042 << _outStreamFormat.mBytesPerPacket
1043 << ", mFramesPerPacket = "
1044 << _outStreamFormat.mFramesPerPacket;
1045 RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame
1046 << ", mBitsPerChannel = "
1047 << _outStreamFormat.mBitsPerChannel;
1048 RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags;
1049 logCAMsg(rtc::LS_VERBOSE, "mFormatID",
1050 (const char*)&_outStreamFormat.mFormatID);
1051
1052 // Our preferred format to work with.
1053 if (_outStreamFormat.mChannelsPerFrame < 2) {
1054 // Disable stereo playout when we only have one channel on the device.
1055 _playChannels = 1;
1056 RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
1057 }
1058 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
1059
1060 // Listen for format changes.
1061 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1062 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1063 _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1064
1065 // Listen for processor overloads.
1066 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1067 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1068 _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1069
1070 if (_twoDevices || !_recIsInitialized) {
1071 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1072 _outputDeviceID, deviceIOProc, this, &_deviceIOProcID));
1073 }
1074
1075 _playIsInitialized = true;
1076
1077 return 0;
1078 }
1079
InitRecording()1080 int32_t AudioDeviceMac::InitRecording() {
1081 RTC_LOG(LS_INFO) << "InitRecording";
1082 MutexLock lock(&mutex_);
1083
1084 if (_recording) {
1085 return -1;
1086 }
1087
1088 if (!_inputDeviceIsSpecified) {
1089 return -1;
1090 }
1091
1092 if (_recIsInitialized) {
1093 return 0;
1094 }
1095
1096 // Initialize the microphone (devices might have been added or removed)
1097 if (InitMicrophoneLocked() == -1) {
1098 RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
1099 }
1100
1101 if (!SpeakerIsInitialized()) {
1102 // Make this call to check if we are using
1103 // one or two devices (_twoDevices)
1104 bool available = false;
1105 if (SpeakerIsAvailableLocked(available) == -1) {
1106 RTC_LOG(LS_WARNING) << "SpeakerIsAvailable() failed";
1107 }
1108 }
1109
1110 OSStatus err = noErr;
1111 UInt32 size = 0;
1112
1113 PaUtil_FlushRingBuffer(_paCaptureBuffer);
1114
1115 _captureDelayUs = 0;
1116 _captureLatencyUs = 0;
1117 _captureDeviceIsAlive = 1;
1118 _doStopRec = false;
1119
1120 // Get current stream description
1121 AudioObjectPropertyAddress propertyAddress = {
1122 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
1123 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
1124 size = sizeof(_inStreamFormat);
1125 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1126 _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
1127
1128 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
1129 logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
1130 (const char*)&_inStreamFormat.mFormatID);
1131 return -1;
1132 }
1133
1134 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1135 RTC_LOG(LS_ERROR)
1136 << "Too many channels on input device (mChannelsPerFrame = "
1137 << _inStreamFormat.mChannelsPerFrame << ")";
1138 return -1;
1139 }
1140
1141 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
1142 _inStreamFormat.mSampleRate / 100 *
1143 N_BLOCKS_IO;
1144 if (io_block_size_samples > _captureBufSizeSamples) {
1145 RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
1146 << ") is larger than ring buffer ("
1147 << _captureBufSizeSamples << ")";
1148 return -1;
1149 }
1150
1151 RTC_LOG(LS_VERBOSE) << "Input stream format:";
1152 RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate
1153 << ", mChannelsPerFrame = "
1154 << _inStreamFormat.mChannelsPerFrame;
1155 RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket
1156 << ", mFramesPerPacket = "
1157 << _inStreamFormat.mFramesPerPacket;
1158 RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame
1159 << ", mBitsPerChannel = "
1160 << _inStreamFormat.mBitsPerChannel;
1161 RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags;
1162 logCAMsg(rtc::LS_VERBOSE, "mFormatID",
1163 (const char*)&_inStreamFormat.mFormatID);
1164
1165 // Our preferred format to work with
1166 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
1167 _inDesiredFormat.mChannelsPerFrame = 2;
1168 } else {
1169 // Disable stereo recording when we only have one channel on the device.
1170 _inDesiredFormat.mChannelsPerFrame = 1;
1171 _recChannels = 1;
1172 RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
1173 }
1174
1175 if (_ptrAudioBuffer) {
1176 // Update audio buffer with the selected parameters
1177 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
1178 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
1179 }
1180
1181 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
1182 _inDesiredFormat.mBytesPerPacket =
1183 _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1184 _inDesiredFormat.mFramesPerPacket = 1;
1185 _inDesiredFormat.mBytesPerFrame =
1186 _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1187 _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1188
1189 _inDesiredFormat.mFormatFlags =
1190 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
1191 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1192 _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1193 #endif
1194 _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1195
1196 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
1197 &_captureConverter));
1198
1199 // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
1200 // TODO(xians): investigate this block.
1201 UInt32 bufByteCount =
1202 (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO *
1203 _inStreamFormat.mChannelsPerFrame * sizeof(Float32));
1204 if (_inStreamFormat.mFramesPerPacket != 0) {
1205 if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) {
1206 bufByteCount =
1207 ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) *
1208 _inStreamFormat.mFramesPerPacket;
1209 }
1210 }
1211
1212 // Ensure the buffer size is within the acceptable range provided by the
1213 // device.
1214 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1215 AudioValueRange range;
1216 size = sizeof(range);
1217 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1218 _inputDeviceID, &propertyAddress, 0, NULL, &size, &range));
1219 if (range.mMinimum > bufByteCount) {
1220 bufByteCount = range.mMinimum;
1221 } else if (range.mMaximum < bufByteCount) {
1222 bufByteCount = range.mMaximum;
1223 }
1224
1225 propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1226 size = sizeof(bufByteCount);
1227 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
1228 _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
1229
1230 // Get capture device latency
1231 propertyAddress.mSelector = kAudioDevicePropertyLatency;
1232 UInt32 latency = 0;
1233 size = sizeof(UInt32);
1234 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1235 _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1236 _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
1237
1238 // Get capture stream latency
1239 propertyAddress.mSelector = kAudioDevicePropertyStreams;
1240 AudioStreamID stream = 0;
1241 size = sizeof(AudioStreamID);
1242 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1243 _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
1244 propertyAddress.mSelector = kAudioStreamPropertyLatency;
1245 size = sizeof(UInt32);
1246 latency = 0;
1247 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1248 _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1249 _captureLatencyUs +=
1250 (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
1251
1252 // Listen for format changes
1253 // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
1254 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1255 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1256 _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1257
1258 // Listen for processor overloads
1259 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1260 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1261 _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1262
1263 if (_twoDevices) {
1264 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1265 _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID));
1266 } else if (!_playIsInitialized) {
1267 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1268 _inputDeviceID, deviceIOProc, this, &_deviceIOProcID));
1269 }
1270
1271 // Mark recording side as initialized
1272 _recIsInitialized = true;
1273
1274 return 0;
1275 }
1276
StartRecording()1277 int32_t AudioDeviceMac::StartRecording() {
1278 RTC_LOG(LS_INFO) << "StartRecording";
1279 MutexLock lock(&mutex_);
1280
1281 if (!_recIsInitialized) {
1282 return -1;
1283 }
1284
1285 if (_recording) {
1286 return 0;
1287 }
1288
1289 if (!_initialized) {
1290 RTC_LOG(LS_ERROR) << "Recording worker thread has not been started";
1291 return -1;
1292 }
1293
1294 RTC_DCHECK(capture_worker_thread_.empty());
1295 capture_worker_thread_ = rtc::PlatformThread::SpawnJoinable(
1296 [this] {
1297 while (CaptureWorkerThread()) {
1298 }
1299 },
1300 "CaptureWorkerThread",
1301 rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
1302
1303 OSStatus err = noErr;
1304 if (_twoDevices) {
1305 WEBRTC_CA_RETURN_ON_ERR(
1306 AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
1307 } else if (!_playing) {
1308 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
1309 }
1310
1311 _recording = true;
1312
1313 return 0;
1314 }
1315
StopRecording()1316 int32_t AudioDeviceMac::StopRecording() {
1317 RTC_LOG(LS_INFO) << "StopRecording";
1318 MutexLock lock(&mutex_);
1319
1320 if (!_recIsInitialized) {
1321 return 0;
1322 }
1323
1324 OSStatus err = noErr;
1325 int32_t captureDeviceIsAlive = _captureDeviceIsAlive;
1326 if (_twoDevices && captureDeviceIsAlive == 1) {
1327 // Recording side uses its own dedicated device and IOProc.
1328 if (_recording) {
1329 _recording = false;
1330 _doStopRec = true; // Signal to io proc to stop audio device
1331 mutex_.Unlock(); // Cannot be under lock, risk of deadlock
1332 if (!_stopEventRec.Wait(TimeDelta::Seconds(2))) {
1333 MutexLock lockScoped(&mutex_);
1334 RTC_LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
1335 "We may have failed to detect a device removal.";
1336 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
1337 WEBRTC_CA_LOG_WARN(
1338 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
1339 }
1340 mutex_.Lock();
1341 _doStopRec = false;
1342 RTC_LOG(LS_INFO) << "Recording stopped (input device)";
1343 } else if (_recIsInitialized) {
1344 WEBRTC_CA_LOG_WARN(
1345 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
1346 RTC_LOG(LS_INFO) << "Recording uninitialized (input device)";
1347 }
1348 } else {
1349 // We signal a stop for a shared device even when rendering has
1350 // not yet ended. This is to ensure the IOProc will return early as
1351 // intended (by checking `_recording`) before accessing
1352 // resources we free below (e.g. the capture converter).
1353 //
1354 // In the case of a shared devcie, the IOProc will verify
1355 // rendering has ended before stopping itself.
1356 if (_recording && captureDeviceIsAlive == 1) {
1357 _recording = false;
1358 _doStop = true; // Signal to io proc to stop audio device
1359 mutex_.Unlock(); // Cannot be under lock, risk of deadlock
1360 if (!_stopEvent.Wait(TimeDelta::Seconds(2))) {
1361 MutexLock lockScoped(&mutex_);
1362 RTC_LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
1363 "We may have failed to detect a device removal.";
1364 // We assume rendering on a shared device has stopped as well if
1365 // the IOProc times out.
1366 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
1367 WEBRTC_CA_LOG_WARN(
1368 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1369 }
1370 mutex_.Lock();
1371 _doStop = false;
1372 RTC_LOG(LS_INFO) << "Recording stopped (shared device)";
1373 } else if (_recIsInitialized && !_playing && !_playIsInitialized) {
1374 WEBRTC_CA_LOG_WARN(
1375 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1376 RTC_LOG(LS_INFO) << "Recording uninitialized (shared device)";
1377 }
1378 }
1379
1380 // Setting this signal will allow the worker thread to be stopped.
1381 _captureDeviceIsAlive = 0;
1382
1383 if (!capture_worker_thread_.empty()) {
1384 mutex_.Unlock();
1385 capture_worker_thread_.Finalize();
1386 mutex_.Lock();
1387 }
1388
1389 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
1390
1391 // Remove listeners.
1392 AudioObjectPropertyAddress propertyAddress = {
1393 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
1394 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1395 _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1396
1397 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1398 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1399 _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1400
1401 _recIsInitialized = false;
1402 _recording = false;
1403
1404 return 0;
1405 }
1406
RecordingIsInitialized() const1407 bool AudioDeviceMac::RecordingIsInitialized() const {
1408 return (_recIsInitialized);
1409 }
1410
Recording() const1411 bool AudioDeviceMac::Recording() const {
1412 return (_recording);
1413 }
1414
PlayoutIsInitialized() const1415 bool AudioDeviceMac::PlayoutIsInitialized() const {
1416 return (_playIsInitialized);
1417 }
1418
StartPlayout()1419 int32_t AudioDeviceMac::StartPlayout() {
1420 RTC_LOG(LS_INFO) << "StartPlayout";
1421 MutexLock lock(&mutex_);
1422
1423 if (!_playIsInitialized) {
1424 return -1;
1425 }
1426
1427 if (_playing) {
1428 return 0;
1429 }
1430
1431 RTC_DCHECK(render_worker_thread_.empty());
1432 render_worker_thread_ = rtc::PlatformThread::SpawnJoinable(
1433 [this] {
1434 while (RenderWorkerThread()) {
1435 }
1436 },
1437 "RenderWorkerThread",
1438 rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
1439
1440 if (_twoDevices || !_recording) {
1441 OSStatus err = noErr;
1442 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
1443 }
1444 _playing = true;
1445
1446 return 0;
1447 }
1448
StopPlayout()1449 int32_t AudioDeviceMac::StopPlayout() {
1450 RTC_LOG(LS_INFO) << "StopPlayout";
1451 MutexLock lock(&mutex_);
1452
1453 if (!_playIsInitialized) {
1454 return 0;
1455 }
1456
1457 OSStatus err = noErr;
1458 int32_t renderDeviceIsAlive = _renderDeviceIsAlive;
1459 if (_playing && renderDeviceIsAlive == 1) {
1460 // We signal a stop for a shared device even when capturing has not
1461 // yet ended. This is to ensure the IOProc will return early as
1462 // intended (by checking `_playing`) before accessing resources we
1463 // free below (e.g. the render converter).
1464 //
1465 // In the case of a shared device, the IOProc will verify capturing
1466 // has ended before stopping itself.
1467 _playing = false;
1468 _doStop = true; // Signal to io proc to stop audio device
1469 mutex_.Unlock(); // Cannot be under lock, risk of deadlock
1470 if (!_stopEvent.Wait(TimeDelta::Seconds(2))) {
1471 MutexLock lockScoped(&mutex_);
1472 RTC_LOG(LS_WARNING) << "Timed out stopping the render IOProc."
1473 "We may have failed to detect a device removal.";
1474
1475 // We assume capturing on a shared device has stopped as well if the
1476 // IOProc times out.
1477 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
1478 WEBRTC_CA_LOG_WARN(
1479 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1480 }
1481 mutex_.Lock();
1482 _doStop = false;
1483 RTC_LOG(LS_INFO) << "Playout stopped";
1484 } else if (_twoDevices && _playIsInitialized) {
1485 WEBRTC_CA_LOG_WARN(
1486 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1487 RTC_LOG(LS_INFO) << "Playout uninitialized (output device)";
1488 } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
1489 WEBRTC_CA_LOG_WARN(
1490 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1491 RTC_LOG(LS_INFO) << "Playout uninitialized (shared device)";
1492 }
1493
1494 // Setting this signal will allow the worker thread to be stopped.
1495 _renderDeviceIsAlive = 0;
1496 if (!render_worker_thread_.empty()) {
1497 mutex_.Unlock();
1498 render_worker_thread_.Finalize();
1499 mutex_.Lock();
1500 }
1501
1502 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
1503
1504 // Remove listeners.
1505 AudioObjectPropertyAddress propertyAddress = {
1506 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0};
1507 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1508 _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1509
1510 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1511 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1512 _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1513
1514 if (_macBookPro) {
1515 Boolean hasProperty =
1516 AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
1517 if (hasProperty) {
1518 propertyAddress.mSelector = kAudioDevicePropertyDataSource;
1519 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1520 _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1521 }
1522 }
1523
1524 _playIsInitialized = false;
1525 _playing = false;
1526
1527 return 0;
1528 }
1529
PlayoutDelay(uint16_t & delayMS) const1530 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const {
1531 int32_t renderDelayUs = _renderDelayUs;
1532 delayMS =
1533 static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
1534 return 0;
1535 }
1536
Playing() const1537 bool AudioDeviceMac::Playing() const {
1538 return (_playing);
1539 }
1540
1541 // ============================================================================
1542 // Private Methods
1543 // ============================================================================
1544
GetNumberDevices(const AudioObjectPropertyScope scope,AudioDeviceID scopedDeviceIds[],const uint32_t deviceListLength)1545 int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
1546 AudioDeviceID scopedDeviceIds[],
1547 const uint32_t deviceListLength) {
1548 OSStatus err = noErr;
1549
1550 AudioObjectPropertyAddress propertyAddress = {
1551 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
1552 kAudioObjectPropertyElementMaster};
1553 UInt32 size = 0;
1554 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
1555 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
1556 if (size == 0) {
1557 RTC_LOG(LS_WARNING) << "No devices";
1558 return 0;
1559 }
1560
1561 UInt32 numberDevices = size / sizeof(AudioDeviceID);
1562 const auto deviceIds = std::make_unique<AudioDeviceID[]>(numberDevices);
1563 AudioBufferList* bufferList = NULL;
1564 UInt32 numberScopedDevices = 0;
1565
1566 // First check if there is a default device and list it
1567 UInt32 hardwareProperty = 0;
1568 if (scope == kAudioDevicePropertyScopeOutput) {
1569 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
1570 } else {
1571 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
1572 }
1573
1574 AudioObjectPropertyAddress propertyAddressDefault = {
1575 hardwareProperty, kAudioObjectPropertyScopeGlobal,
1576 kAudioObjectPropertyElementMaster};
1577
1578 AudioDeviceID usedID;
1579 UInt32 uintSize = sizeof(UInt32);
1580 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
1581 &propertyAddressDefault, 0,
1582 NULL, &uintSize, &usedID));
1583 if (usedID != kAudioDeviceUnknown) {
1584 scopedDeviceIds[numberScopedDevices] = usedID;
1585 numberScopedDevices++;
1586 } else {
1587 RTC_LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown";
1588 }
1589
1590 // Then list the rest of the devices
1591 bool listOK = true;
1592
1593 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
1594 &propertyAddress, 0, NULL, &size,
1595 deviceIds.get()));
1596 if (err != noErr) {
1597 listOK = false;
1598 } else {
1599 propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
1600 propertyAddress.mScope = scope;
1601 propertyAddress.mElement = 0;
1602 for (UInt32 i = 0; i < numberDevices; i++) {
1603 // Check for input channels
1604 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(
1605 deviceIds[i], &propertyAddress, 0, NULL, &size));
1606 if (err == kAudioHardwareBadDeviceError) {
1607 // This device doesn't actually exist; continue iterating.
1608 continue;
1609 } else if (err != noErr) {
1610 listOK = false;
1611 break;
1612 }
1613
1614 bufferList = (AudioBufferList*)malloc(size);
1615 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
1616 deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList));
1617 if (err != noErr) {
1618 listOK = false;
1619 break;
1620 }
1621
1622 if (bufferList->mNumberBuffers > 0) {
1623 if (numberScopedDevices >= deviceListLength) {
1624 RTC_LOG(LS_ERROR) << "Device list is not long enough";
1625 listOK = false;
1626 break;
1627 }
1628
1629 scopedDeviceIds[numberScopedDevices] = deviceIds[i];
1630 numberScopedDevices++;
1631 }
1632
1633 free(bufferList);
1634 bufferList = NULL;
1635 } // for
1636 }
1637
1638 if (!listOK) {
1639 if (bufferList) {
1640 free(bufferList);
1641 bufferList = NULL;
1642 }
1643 return -1;
1644 }
1645
1646 return numberScopedDevices;
1647 }
1648
GetDeviceName(const AudioObjectPropertyScope scope,const uint16_t index,rtc::ArrayView<char> name)1649 int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
1650 const uint16_t index,
1651 rtc::ArrayView<char> name) {
1652 OSStatus err = noErr;
1653 AudioDeviceID deviceIds[MaxNumberDevices];
1654
1655 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
1656 if (numberDevices < 0) {
1657 return -1;
1658 } else if (numberDevices == 0) {
1659 RTC_LOG(LS_ERROR) << "No devices";
1660 return -1;
1661 }
1662
1663 // If the number is below the number of devices, assume it's "WEBRTC ID"
1664 // otherwise assume it's a CoreAudio ID
1665 AudioDeviceID usedID;
1666
1667 // Check if there is a default device
1668 bool isDefaultDevice = false;
1669 if (index == 0) {
1670 UInt32 hardwareProperty = 0;
1671 if (scope == kAudioDevicePropertyScopeOutput) {
1672 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
1673 } else {
1674 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
1675 }
1676 AudioObjectPropertyAddress propertyAddress = {
1677 hardwareProperty, kAudioObjectPropertyScopeGlobal,
1678 kAudioObjectPropertyElementMaster};
1679 UInt32 size = sizeof(UInt32);
1680 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1681 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
1682 if (usedID == kAudioDeviceUnknown) {
1683 RTC_LOG(LS_WARNING) << "GetDeviceName(): Default device unknown";
1684 } else {
1685 isDefaultDevice = true;
1686 }
1687 }
1688
1689 AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName,
1690 scope, 0};
1691
1692 if (isDefaultDevice) {
1693 std::array<char, kAdmMaxDeviceNameSize> devName;
1694 UInt32 len = devName.size();
1695
1696 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1697 usedID, &propertyAddress, 0, NULL, &len, devName.data()));
1698
1699 rtc::SimpleStringBuilder ss(name);
1700 ss.AppendFormat("default (%s)", devName.data());
1701 } else {
1702 if (index < numberDevices) {
1703 usedID = deviceIds[index];
1704 } else {
1705 usedID = index;
1706 }
1707 UInt32 len = name.size();
1708
1709 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1710 usedID, &propertyAddress, 0, NULL, &len, name.data()));
1711 }
1712
1713 return 0;
1714 }
1715
InitDevice(const uint16_t userDeviceIndex,AudioDeviceID & deviceId,const bool isInput)1716 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
1717 AudioDeviceID& deviceId,
1718 const bool isInput) {
1719 OSStatus err = noErr;
1720 UInt32 size = 0;
1721 AudioObjectPropertyScope deviceScope;
1722 AudioObjectPropertySelector defaultDeviceSelector;
1723 AudioDeviceID deviceIds[MaxNumberDevices];
1724
1725 if (isInput) {
1726 deviceScope = kAudioDevicePropertyScopeInput;
1727 defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
1728 } else {
1729 deviceScope = kAudioDevicePropertyScopeOutput;
1730 defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
1731 }
1732
1733 AudioObjectPropertyAddress propertyAddress = {
1734 defaultDeviceSelector, kAudioObjectPropertyScopeGlobal,
1735 kAudioObjectPropertyElementMaster};
1736
1737 // Get the actual device IDs
1738 int numberDevices =
1739 GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices);
1740 if (numberDevices < 0) {
1741 return -1;
1742 } else if (numberDevices == 0) {
1743 RTC_LOG(LS_ERROR) << "InitDevice(): No devices";
1744 return -1;
1745 }
1746
1747 bool isDefaultDevice = false;
1748 deviceId = kAudioDeviceUnknown;
1749 if (userDeviceIndex == 0) {
1750 // Try to use default system device
1751 size = sizeof(AudioDeviceID);
1752 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1753 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
1754 if (deviceId == kAudioDeviceUnknown) {
1755 RTC_LOG(LS_WARNING) << "No default device exists";
1756 } else {
1757 isDefaultDevice = true;
1758 }
1759 }
1760
1761 if (!isDefaultDevice) {
1762 deviceId = deviceIds[userDeviceIndex];
1763 }
1764
1765 // Obtain device name and manufacturer for logging.
1766 // Also use this as a test to ensure a user-set device ID is valid.
1767 char devName[128];
1768 char devManf[128];
1769 memset(devName, 0, sizeof(devName));
1770 memset(devManf, 0, sizeof(devManf));
1771
1772 propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
1773 propertyAddress.mScope = deviceScope;
1774 propertyAddress.mElement = 0;
1775 size = sizeof(devName);
1776 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
1777 0, NULL, &size, devName));
1778
1779 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
1780 size = sizeof(devManf);
1781 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
1782 0, NULL, &size, devManf));
1783
1784 if (isInput) {
1785 RTC_LOG(LS_INFO) << "Input device: " << devManf << " " << devName;
1786 } else {
1787 RTC_LOG(LS_INFO) << "Output device: " << devManf << " " << devName;
1788 }
1789
1790 return 0;
1791 }
1792
SetDesiredPlayoutFormat()1793 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
1794 // Our preferred format to work with.
1795 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
1796 _outDesiredFormat.mChannelsPerFrame = _playChannels;
1797
1798 if (_ptrAudioBuffer) {
1799 // Update audio buffer with the selected parameters.
1800 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
1801 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
1802 }
1803
1804 _renderDelayOffsetSamples =
1805 _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
1806 _outDesiredFormat.mChannelsPerFrame;
1807
1808 _outDesiredFormat.mBytesPerPacket =
1809 _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1810 // In uncompressed audio, a packet is one frame.
1811 _outDesiredFormat.mFramesPerPacket = 1;
1812 _outDesiredFormat.mBytesPerFrame =
1813 _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1814 _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1815
1816 _outDesiredFormat.mFormatFlags =
1817 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
1818 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1819 _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1820 #endif
1821 _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1822
1823 OSStatus err = noErr;
1824 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(
1825 &_outDesiredFormat, &_outStreamFormat, &_renderConverter));
1826
1827 // Try to set buffer size to desired value set to 20ms.
1828 const uint16_t kPlayBufDelayFixed = 20;
1829 UInt32 bufByteCount = static_cast<UInt32>(
1830 (_outStreamFormat.mSampleRate / 1000.0) * kPlayBufDelayFixed *
1831 _outStreamFormat.mChannelsPerFrame * sizeof(Float32));
1832 if (_outStreamFormat.mFramesPerPacket != 0) {
1833 if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) {
1834 bufByteCount = (static_cast<UInt32>(bufByteCount /
1835 _outStreamFormat.mFramesPerPacket) +
1836 1) *
1837 _outStreamFormat.mFramesPerPacket;
1838 }
1839 }
1840
1841 // Ensure the buffer size is within the range provided by the device.
1842 AudioObjectPropertyAddress propertyAddress = {
1843 kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
1844 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1845 AudioValueRange range;
1846 UInt32 size = sizeof(range);
1847 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1848 _outputDeviceID, &propertyAddress, 0, NULL, &size, &range));
1849 if (range.mMinimum > bufByteCount) {
1850 bufByteCount = range.mMinimum;
1851 } else if (range.mMaximum < bufByteCount) {
1852 bufByteCount = range.mMaximum;
1853 }
1854
1855 propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1856 size = sizeof(bufByteCount);
1857 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
1858 _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
1859
1860 // Get render device latency.
1861 propertyAddress.mSelector = kAudioDevicePropertyLatency;
1862 UInt32 latency = 0;
1863 size = sizeof(UInt32);
1864 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1865 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1866 _renderLatencyUs =
1867 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
1868
1869 // Get render stream latency.
1870 propertyAddress.mSelector = kAudioDevicePropertyStreams;
1871 AudioStreamID stream = 0;
1872 size = sizeof(AudioStreamID);
1873 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1874 _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
1875 propertyAddress.mSelector = kAudioStreamPropertyLatency;
1876 size = sizeof(UInt32);
1877 latency = 0;
1878 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1879 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1880 _renderLatencyUs +=
1881 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
1882
1883 RTC_LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
1884 << _renderDelayOffsetSamples
1885 << ", _renderDelayUs=" << _renderDelayUs
1886 << ", _renderLatencyUs=" << _renderLatencyUs;
1887 return 0;
1888 }
1889
objectListenerProc(AudioObjectID objectId,UInt32 numberAddresses,const AudioObjectPropertyAddress addresses[],void * clientData)1890 OSStatus AudioDeviceMac::objectListenerProc(
1891 AudioObjectID objectId,
1892 UInt32 numberAddresses,
1893 const AudioObjectPropertyAddress addresses[],
1894 void* clientData) {
1895 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
1896 RTC_DCHECK(ptrThis != NULL);
1897
1898 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
1899
1900 // AudioObjectPropertyListenerProc functions are supposed to return 0
1901 return 0;
1902 }
1903
implObjectListenerProc(const AudioObjectID objectId,const UInt32 numberAddresses,const AudioObjectPropertyAddress addresses[])1904 OSStatus AudioDeviceMac::implObjectListenerProc(
1905 const AudioObjectID objectId,
1906 const UInt32 numberAddresses,
1907 const AudioObjectPropertyAddress addresses[]) {
1908 RTC_LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()";
1909
1910 for (UInt32 i = 0; i < numberAddresses; i++) {
1911 if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
1912 HandleDeviceChange();
1913 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) {
1914 HandleStreamFormatChange(objectId, addresses[i]);
1915 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) {
1916 HandleDataSourceChange(objectId, addresses[i]);
1917 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) {
1918 HandleProcessorOverload(addresses[i]);
1919 }
1920 }
1921
1922 return 0;
1923 }
1924
HandleDeviceChange()1925 int32_t AudioDeviceMac::HandleDeviceChange() {
1926 OSStatus err = noErr;
1927
1928 RTC_LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices";
1929
1930 // A device has changed. Check if our registered devices have been removed.
1931 // Ensure the devices have been initialized, meaning the IDs are valid.
1932 if (MicrophoneIsInitialized()) {
1933 AudioObjectPropertyAddress propertyAddress = {
1934 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0};
1935 UInt32 deviceIsAlive = 1;
1936 UInt32 size = sizeof(UInt32);
1937 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL,
1938 &size, &deviceIsAlive);
1939
1940 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
1941 RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)";
1942 _captureDeviceIsAlive = 0;
1943 _mixerManager.CloseMicrophone();
1944 } else if (err != noErr) {
1945 logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
1946 (const char*)&err);
1947 return -1;
1948 }
1949 }
1950
1951 if (SpeakerIsInitialized()) {
1952 AudioObjectPropertyAddress propertyAddress = {
1953 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0};
1954 UInt32 deviceIsAlive = 1;
1955 UInt32 size = sizeof(UInt32);
1956 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL,
1957 &size, &deviceIsAlive);
1958
1959 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
1960 RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)";
1961 _renderDeviceIsAlive = 0;
1962 _mixerManager.CloseSpeaker();
1963 } else if (err != noErr) {
1964 logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
1965 (const char*)&err);
1966 return -1;
1967 }
1968 }
1969
1970 return 0;
1971 }
1972
HandleStreamFormatChange(const AudioObjectID objectId,const AudioObjectPropertyAddress propertyAddress)1973 int32_t AudioDeviceMac::HandleStreamFormatChange(
1974 const AudioObjectID objectId,
1975 const AudioObjectPropertyAddress propertyAddress) {
1976 OSStatus err = noErr;
1977
1978 RTC_LOG(LS_VERBOSE) << "Stream format changed";
1979
1980 if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
1981 return 0;
1982 }
1983
1984 // Get the new device format
1985 AudioStreamBasicDescription streamFormat;
1986 UInt32 size = sizeof(streamFormat);
1987 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1988 objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
1989
1990 if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
1991 logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
1992 (const char*)&streamFormat.mFormatID);
1993 return -1;
1994 }
1995
1996 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1997 RTC_LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = "
1998 << streamFormat.mChannelsPerFrame << ")";
1999 return -1;
2000 }
2001
2002 if (_ptrAudioBuffer && streamFormat.mChannelsPerFrame != _recChannels) {
2003 RTC_LOG(LS_ERROR) << "Changing channels not supported (mChannelsPerFrame = "
2004 << streamFormat.mChannelsPerFrame << ")";
2005 return -1;
2006 }
2007
2008 RTC_LOG(LS_VERBOSE) << "Stream format:";
2009 RTC_LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate
2010 << ", mChannelsPerFrame = "
2011 << streamFormat.mChannelsPerFrame;
2012 RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket
2013 << ", mFramesPerPacket = "
2014 << streamFormat.mFramesPerPacket;
2015 RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
2016 << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
2017 RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
2018 logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
2019
2020 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
2021 const int io_block_size_samples = streamFormat.mChannelsPerFrame *
2022 streamFormat.mSampleRate / 100 *
2023 N_BLOCKS_IO;
2024 if (io_block_size_samples > _captureBufSizeSamples) {
2025 RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
2026 << ") is larger than ring buffer ("
2027 << _captureBufSizeSamples << ")";
2028 return -1;
2029 }
2030
2031 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
2032
2033 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
2034 _inDesiredFormat.mChannelsPerFrame = 2;
2035 } else {
2036 // Disable stereo recording when we only have one channel on the device.
2037 _inDesiredFormat.mChannelsPerFrame = 1;
2038 _recChannels = 1;
2039 RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
2040 }
2041
2042 // Recreate the converter with the new format
2043 // TODO(xians): make this thread safe
2044 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
2045
2046 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
2047 &_captureConverter));
2048 } else {
2049 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
2050
2051 // Our preferred format to work with
2052 if (_outStreamFormat.mChannelsPerFrame < 2) {
2053 _playChannels = 1;
2054 RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
2055 }
2056 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
2057 }
2058 return 0;
2059 }
2060
HandleDataSourceChange(const AudioObjectID objectId,const AudioObjectPropertyAddress propertyAddress)2061 int32_t AudioDeviceMac::HandleDataSourceChange(
2062 const AudioObjectID objectId,
2063 const AudioObjectPropertyAddress propertyAddress) {
2064 OSStatus err = noErr;
2065
2066 if (_macBookPro &&
2067 propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
2068 RTC_LOG(LS_VERBOSE) << "Data source changed";
2069
2070 _macBookProPanRight = false;
2071 UInt32 dataSource = 0;
2072 UInt32 size = sizeof(UInt32);
2073 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
2074 objectId, &propertyAddress, 0, NULL, &size, &dataSource));
2075 if (dataSource == 'ispk') {
2076 _macBookProPanRight = true;
2077 RTC_LOG(LS_VERBOSE)
2078 << "MacBook Pro using internal speakers; stereo panning right";
2079 } else {
2080 RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
2081 }
2082 }
2083
2084 return 0;
2085 }
HandleProcessorOverload(const AudioObjectPropertyAddress propertyAddress)2086 int32_t AudioDeviceMac::HandleProcessorOverload(
2087 const AudioObjectPropertyAddress propertyAddress) {
2088 // TODO(xians): we probably want to notify the user in some way of the
2089 // overload. However, the Windows interpretations of these errors seem to
2090 // be more severe than what ProcessorOverload is thrown for.
2091 //
2092 // We don't log the notification, as it's sent from the HAL's IO thread. We
2093 // don't want to slow it down even further.
2094 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
2095 // RTC_LOG(LS_WARNING) << "Capture processor // overload";
2096 //_callback->ProblemIsReported(
2097 // SndCardStreamObserver::ERecordingProblem);
2098 } else {
2099 // RTC_LOG(LS_WARNING) << "Render processor overload";
2100 //_callback->ProblemIsReported(
2101 // SndCardStreamObserver::EPlaybackProblem);
2102 }
2103
2104 return 0;
2105 }
2106
2107 // ============================================================================
2108 // Thread Methods
2109 // ============================================================================
2110
deviceIOProc(AudioDeviceID,const AudioTimeStamp *,const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList * outputData,const AudioTimeStamp * outputTime,void * clientData)2111 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID,
2112 const AudioTimeStamp*,
2113 const AudioBufferList* inputData,
2114 const AudioTimeStamp* inputTime,
2115 AudioBufferList* outputData,
2116 const AudioTimeStamp* outputTime,
2117 void* clientData) {
2118 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
2119 RTC_DCHECK(ptrThis != NULL);
2120
2121 ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
2122
2123 // AudioDeviceIOProc functions are supposed to return 0
2124 return 0;
2125 }
2126
outConverterProc(AudioConverterRef,UInt32 * numberDataPackets,AudioBufferList * data,AudioStreamPacketDescription **,void * userData)2127 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
2128 UInt32* numberDataPackets,
2129 AudioBufferList* data,
2130 AudioStreamPacketDescription**,
2131 void* userData) {
2132 AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData;
2133 RTC_DCHECK(ptrThis != NULL);
2134
2135 return ptrThis->implOutConverterProc(numberDataPackets, data);
2136 }
2137
inDeviceIOProc(AudioDeviceID,const AudioTimeStamp *,const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList *,const AudioTimeStamp *,void * clientData)2138 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID,
2139 const AudioTimeStamp*,
2140 const AudioBufferList* inputData,
2141 const AudioTimeStamp* inputTime,
2142 AudioBufferList*,
2143 const AudioTimeStamp*,
2144 void* clientData) {
2145 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
2146 RTC_DCHECK(ptrThis != NULL);
2147
2148 ptrThis->implInDeviceIOProc(inputData, inputTime);
2149
2150 // AudioDeviceIOProc functions are supposed to return 0
2151 return 0;
2152 }
2153
inConverterProc(AudioConverterRef,UInt32 * numberDataPackets,AudioBufferList * data,AudioStreamPacketDescription **,void * userData)2154 OSStatus AudioDeviceMac::inConverterProc(
2155 AudioConverterRef,
2156 UInt32* numberDataPackets,
2157 AudioBufferList* data,
2158 AudioStreamPacketDescription** /*dataPacketDescription*/,
2159 void* userData) {
2160 AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData);
2161 RTC_DCHECK(ptrThis != NULL);
2162
2163 return ptrThis->implInConverterProc(numberDataPackets, data);
2164 }
2165
implDeviceIOProc(const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList * outputData,const AudioTimeStamp * outputTime)2166 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
2167 const AudioTimeStamp* inputTime,
2168 AudioBufferList* outputData,
2169 const AudioTimeStamp* outputTime) {
2170 OSStatus err = noErr;
2171 UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
2172 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2173
2174 if (!_twoDevices && _recording) {
2175 implInDeviceIOProc(inputData, inputTime);
2176 }
2177
2178 // Check if we should close down audio device
2179 // Double-checked locking optimization to remove locking overhead
2180 if (_doStop) {
2181 MutexLock lock(&mutex_);
2182 if (_doStop) {
2183 if (_twoDevices || (!_recording && !_playing)) {
2184 // In the case of a shared device, the single driving ioProc
2185 // is stopped here
2186 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
2187 WEBRTC_CA_LOG_WARN(
2188 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
2189 if (err == noErr) {
2190 RTC_LOG(LS_VERBOSE) << "Playout or shared device stopped";
2191 }
2192 }
2193
2194 _doStop = false;
2195 _stopEvent.Set();
2196 return 0;
2197 }
2198 }
2199
2200 if (!_playing) {
2201 // This can be the case when a shared device is capturing but not
2202 // rendering. We allow the checks above before returning to avoid a
2203 // timeout when capturing is stopped.
2204 return 0;
2205 }
2206
2207 RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
2208 UInt32 size =
2209 outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame;
2210
2211 // TODO(xians): signal an error somehow?
2212 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
2213 this, &size, outputData, NULL);
2214 if (err != noErr) {
2215 if (err == 1) {
2216 // This is our own error.
2217 RTC_LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
2218 return 1;
2219 } else {
2220 logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
2221 (const char*)&err);
2222 return 1;
2223 }
2224 }
2225
2226 ring_buffer_size_t bufSizeSamples =
2227 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
2228
2229 int32_t renderDelayUs =
2230 static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5);
2231 renderDelayUs += static_cast<int32_t>(
2232 (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame /
2233 _outDesiredFormat.mSampleRate +
2234 0.5);
2235
2236 _renderDelayUs = renderDelayUs;
2237
2238 return 0;
2239 }
2240
implOutConverterProc(UInt32 * numberDataPackets,AudioBufferList * data)2241 OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
2242 AudioBufferList* data) {
2243 RTC_DCHECK(data->mNumberBuffers == 1);
2244 ring_buffer_size_t numSamples =
2245 *numberDataPackets * _outDesiredFormat.mChannelsPerFrame;
2246
2247 data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
2248 // Always give the converter as much as it wants, zero padding as required.
2249 data->mBuffers->mDataByteSize =
2250 *numberDataPackets * _outDesiredFormat.mBytesPerPacket;
2251 data->mBuffers->mData = _renderConvertData;
2252 memset(_renderConvertData, 0, sizeof(_renderConvertData));
2253
2254 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
2255
2256 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
2257 if (kernErr != KERN_SUCCESS) {
2258 RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
2259 return 1;
2260 }
2261
2262 return 0;
2263 }
2264
implInDeviceIOProc(const AudioBufferList * inputData,const AudioTimeStamp * inputTime)2265 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
2266 const AudioTimeStamp* inputTime) {
2267 OSStatus err = noErr;
2268 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
2269 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2270
2271 // Check if we should close down audio device
2272 // Double-checked locking optimization to remove locking overhead
2273 if (_doStopRec) {
2274 MutexLock lock(&mutex_);
2275 if (_doStopRec) {
2276 // This will be signalled only when a shared device is not in use.
2277 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
2278 WEBRTC_CA_LOG_WARN(
2279 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
2280 if (err == noErr) {
2281 RTC_LOG(LS_VERBOSE) << "Recording device stopped";
2282 }
2283
2284 _doStopRec = false;
2285 _stopEventRec.Set();
2286 return 0;
2287 }
2288 }
2289
2290 if (!_recording) {
2291 // Allow above checks to avoid a timeout on stopping capture.
2292 return 0;
2293 }
2294
2295 ring_buffer_size_t bufSizeSamples =
2296 PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
2297
2298 int32_t captureDelayUs =
2299 static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5);
2300 captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) /
2301 _inStreamFormat.mChannelsPerFrame /
2302 _inStreamFormat.mSampleRate +
2303 0.5);
2304
2305 _captureDelayUs = captureDelayUs;
2306
2307 RTC_DCHECK(inputData->mNumberBuffers == 1);
2308 ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize *
2309 _inStreamFormat.mChannelsPerFrame /
2310 _inStreamFormat.mBytesPerPacket;
2311 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
2312 numSamples);
2313
2314 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
2315 if (kernErr != KERN_SUCCESS) {
2316 RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
2317 }
2318
2319 return err;
2320 }
2321
implInConverterProc(UInt32 * numberDataPackets,AudioBufferList * data)2322 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
2323 AudioBufferList* data) {
2324 RTC_DCHECK(data->mNumberBuffers == 1);
2325 ring_buffer_size_t numSamples =
2326 *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
2327
2328 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) {
2329 mach_timespec_t timeout;
2330 timeout.tv_sec = 0;
2331 timeout.tv_nsec = TIMER_PERIOD_MS;
2332
2333 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
2334 if (kernErr == KERN_OPERATION_TIMED_OUT) {
2335 int32_t signal = _captureDeviceIsAlive;
2336 if (signal == 0) {
2337 // The capture device is no longer alive; stop the worker thread.
2338 *numberDataPackets = 0;
2339 return 1;
2340 }
2341 } else if (kernErr != KERN_SUCCESS) {
2342 RTC_LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr;
2343 }
2344 }
2345
2346 // Pass the read pointer directly to the converter to avoid a memcpy.
2347 void* dummyPtr;
2348 ring_buffer_size_t dummySize;
2349 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
2350 &data->mBuffers->mData, &numSamples,
2351 &dummyPtr, &dummySize);
2352 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
2353
2354 data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
2355 *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
2356 data->mBuffers->mDataByteSize =
2357 *numberDataPackets * _inStreamFormat.mBytesPerPacket;
2358
2359 return 0;
2360 }
2361
RenderWorkerThread()2362 bool AudioDeviceMac::RenderWorkerThread() {
2363 ring_buffer_size_t numSamples =
2364 ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
2365 while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) -
2366 _renderDelayOffsetSamples <
2367 numSamples) {
2368 mach_timespec_t timeout;
2369 timeout.tv_sec = 0;
2370 timeout.tv_nsec = TIMER_PERIOD_MS;
2371
2372 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
2373 if (kernErr == KERN_OPERATION_TIMED_OUT) {
2374 int32_t signal = _renderDeviceIsAlive;
2375 if (signal == 0) {
2376 // The render device is no longer alive; stop the worker thread.
2377 return false;
2378 }
2379 } else if (kernErr != KERN_SUCCESS) {
2380 RTC_LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr;
2381 }
2382 }
2383
2384 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
2385
2386 if (!_ptrAudioBuffer) {
2387 RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
2388 return false;
2389 }
2390
2391 // Ask for new PCM data to be played out using the AudioDeviceBuffer.
2392 uint32_t nSamples =
2393 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
2394
2395 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
2396 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
2397 RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")";
2398 }
2399
2400 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
2401
2402 SInt16* pPlayBuffer = (SInt16*)&playBuffer;
2403 if (_macBookProPanRight && (_playChannels == 2)) {
2404 // Mix entirely into the right channel and zero the left channel.
2405 SInt32 sampleInt32 = 0;
2406 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) {
2407 sampleInt32 = pPlayBuffer[sampleIdx];
2408 sampleInt32 += pPlayBuffer[sampleIdx + 1];
2409 sampleInt32 /= 2;
2410
2411 if (sampleInt32 > 32767) {
2412 sampleInt32 = 32767;
2413 } else if (sampleInt32 < -32768) {
2414 sampleInt32 = -32768;
2415 }
2416
2417 pPlayBuffer[sampleIdx] = 0;
2418 pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32);
2419 }
2420 }
2421
2422 PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
2423
2424 return true;
2425 }
2426
CaptureWorkerThread()2427 bool AudioDeviceMac::CaptureWorkerThread() {
2428 OSStatus err = noErr;
2429 UInt32 noRecSamples =
2430 ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame;
2431 SInt16 recordBuffer[noRecSamples];
2432 UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
2433
2434 AudioBufferList engineBuffer;
2435 engineBuffer.mNumberBuffers = 1; // Interleaved channels.
2436 engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
2437 engineBuffer.mBuffers->mDataByteSize =
2438 _inDesiredFormat.mBytesPerPacket * noRecSamples;
2439 engineBuffer.mBuffers->mData = recordBuffer;
2440
2441 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
2442 this, &size, &engineBuffer, NULL);
2443 if (err != noErr) {
2444 if (err == 1) {
2445 // This is our own error.
2446 return false;
2447 } else {
2448 logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
2449 (const char*)&err);
2450 return false;
2451 }
2452 }
2453
2454 // TODO(xians): what if the returned size is incorrect?
2455 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
2456 int32_t msecOnPlaySide;
2457 int32_t msecOnRecordSide;
2458
2459 int32_t captureDelayUs = _captureDelayUs;
2460 int32_t renderDelayUs = _renderDelayUs;
2461
2462 msecOnPlaySide =
2463 static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
2464 msecOnRecordSide =
2465 static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
2466
2467 if (!_ptrAudioBuffer) {
2468 RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
2469 return false;
2470 }
2471
2472 // store the recorded buffer (no action will be taken if the
2473 // #recorded samples is not a full buffer)
2474 _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size);
2475 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide);
2476 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2477
2478 // deliver recorded samples at specified sample rate, mic level etc.
2479 // to the observer using callback
2480 _ptrAudioBuffer->DeliverRecordedData();
2481 }
2482
2483 return true;
2484 }
2485
KeyPressed()2486 bool AudioDeviceMac::KeyPressed() {
2487 bool key_down = false;
2488 // Loop through all Mac virtual key constant values.
2489 for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_);
2490 ++key_index) {
2491 bool keyState =
2492 CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index);
2493 // A false -> true change in keymap means a key is pressed.
2494 key_down |= (keyState && !prev_key_state_[key_index]);
2495 // Save current state.
2496 prev_key_state_[key_index] = keyState;
2497 }
2498 return key_down;
2499 }
2500 } // namespace webrtc
2501