1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamInternal"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include <stdint.h>
24
25 #include <binder/IServiceManager.h>
26
27 #include <aaudio/AAudio.h>
28 #include <cutils/properties.h>
29
30 #include <media/AudioParameter.h>
31 #include <media/AudioSystem.h>
32 #include <media/MediaMetricsItem.h>
33 #include <utils/Trace.h>
34
35 #include "AudioEndpointParcelable.h"
36 #include "binding/AAudioBinderClient.h"
37 #include "binding/AAudioStreamRequest.h"
38 #include "binding/AAudioStreamConfiguration.h"
39 #include "binding/AAudioServiceMessage.h"
40 #include "core/AudioGlobal.h"
41 #include "core/AudioStreamBuilder.h"
42 #include "fifo/FifoBuffer.h"
43 #include "utility/AudioClock.h"
44 #include <media/AidlConversion.h>
45
46 #include "AudioStreamInternal.h"
47
48 // We do this after the #includes because if a header uses ALOG.
49 // it would fail on the reference to mInService.
50 #undef LOG_TAG
51 // This file is used in both client and server processes.
52 // This is needed to make sense of the logs more easily.
53 #define LOG_TAG (mInService ? "AudioStreamInternal_Service" : "AudioStreamInternal_Client")
54
55 using android::content::AttributionSourceState;
56
57 using namespace aaudio;
58
59 #define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
60
61 // Wait at least this many times longer than the operation should take.
62 #define MIN_TIMEOUT_OPERATIONS 4
63
64 #define LOG_TIMESTAMPS 0
65
66 // Minimum number of bursts to use when sample rate conversion is used.
67 #define MIN_SAMPLE_RATE_CONVERSION_NUM_BURSTS 3
68
AudioStreamInternal(AAudioServiceInterface & serviceInterface,bool inService)69 AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
70 : AudioStream()
71 , mClockModel()
72 , mInService(inService)
73 , mServiceInterface(serviceInterface)
74 , mAtomicInternalTimestamp()
75 , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
76 , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
77 {
78
79 }
80
~AudioStreamInternal()81 AudioStreamInternal::~AudioStreamInternal() {
82 ALOGD("%s() %p called", __func__, this);
83 }
84
open(const AudioStreamBuilder & builder)85 aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
86
87 aaudio_result_t result = AAUDIO_OK;
88 AAudioStreamRequest request;
89 AAudioStreamConfiguration configurationOutput;
90
91 if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
92 ALOGE("%s - already open! state = %d", __func__, getState());
93 return AAUDIO_ERROR_INVALID_STATE;
94 }
95
96 // Copy requested parameters to the stream.
97 result = AudioStream::open(builder);
98 if (result < 0) {
99 return result;
100 }
101
102 const audio_format_t requestedFormat = getFormat();
103 // We have to do volume scaling. So we prefer FLOAT format.
104 if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
105 setFormat(AUDIO_FORMAT_PCM_FLOAT);
106 }
107 // Request FLOAT for the shared mixer or the device.
108 request.getConfiguration().setFormat(AUDIO_FORMAT_PCM_FLOAT);
109
110 // TODO b/182392769: use attribution source util
111 AttributionSourceState attributionSource;
112 attributionSource.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
113 attributionSource.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
114 attributionSource.packageName = builder.getOpPackageName();
115 attributionSource.attributionTag = builder.getAttributionTag();
116 attributionSource.token = sp<android::BBinder>::make();
117
118 // Build the request to send to the server.
119 request.setAttributionSource(attributionSource);
120 request.setSharingModeMatchRequired(isSharingModeMatchRequired());
121 request.setInService(isInService());
122
123 request.getConfiguration().setDeviceIds(getDeviceIds());
124 request.getConfiguration().setSampleRate(getSampleRate());
125 request.getConfiguration().setDirection(getDirection());
126 request.getConfiguration().setSharingMode(getSharingMode());
127 request.getConfiguration().setChannelMask(getChannelMask());
128
129 request.getConfiguration().setUsage(getUsage());
130 request.getConfiguration().setContentType(getContentType());
131 request.getConfiguration().setTags(getTags());
132 request.getConfiguration().setSpatializationBehavior(getSpatializationBehavior());
133 request.getConfiguration().setIsContentSpatialized(isContentSpatialized());
134 request.getConfiguration().setInputPreset(getInputPreset());
135 request.getConfiguration().setPrivacySensitive(isPrivacySensitive());
136
137 // When sample rate conversion is needed, we use the device sample rate instead of the
138 // requested sample rate to scale the capacity in configureDataInformation().
139 // Thus, we should scale the capacity here to cancel out the (sampleRate / deviceSampleRate)
140 // scaling there.
141 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity()
142 * 48000 / getSampleRate());
143
144 mServiceStreamHandleInfo = mServiceInterface.openStream(request, configurationOutput);
145 if (getServiceHandle() < 0
146 && (request.getConfiguration().getSamplesPerFrame() == 1
147 || request.getConfiguration().getChannelMask() == AAUDIO_CHANNEL_MONO)
148 && getDirection() == AAUDIO_DIRECTION_OUTPUT
149 && !isInService()) {
150 // if that failed then try switching from mono to stereo if OUTPUT.
151 // Only do this in the client. Otherwise we end up with a mono mixer in the service
152 // that writes to a stereo MMAP stream.
153 ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
154 __func__, getServiceHandle());
155 request.getConfiguration().setChannelMask(AAUDIO_CHANNEL_STEREO);
156 mServiceStreamHandleInfo = mServiceInterface.openStream(request, configurationOutput);
157 }
158 if (getServiceHandle() < 0) {
159 return getServiceHandle();
160 }
161
162 // This must match the key generated in oboeservice/AAudioServiceStreamBase.cpp
163 // so the client can have permission to log.
164 if (!mInService) {
165 // No need to log if it is from service side.
166 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_STREAM)
167 + std::to_string(getServiceHandle());
168 }
169
170 android::mediametrics::LogItem(mMetricsId)
171 .set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
172 AudioGlobal_convertPerformanceModeToText(builder.getPerformanceMode()))
173 .set(AMEDIAMETRICS_PROP_SHARINGMODE,
174 AudioGlobal_convertSharingModeToText(builder.getSharingMode()))
175 .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT,
176 android::toString(requestedFormat).c_str()).record();
177
178 result = configurationOutput.validate();
179 if (result != AAUDIO_OK) {
180 goto error;
181 }
182 // Save results of the open.
183 if (getChannelMask() == AAUDIO_UNSPECIFIED) {
184 setChannelMask(configurationOutput.getChannelMask());
185 }
186
187 setDeviceIds(configurationOutput.getDeviceIds());
188 setSessionId(configurationOutput.getSessionId());
189 setSharingMode(configurationOutput.getSharingMode());
190
191 setUsage(configurationOutput.getUsage());
192 setContentType(configurationOutput.getContentType());
193 setTags(configurationOutput.getTags());
194 setSpatializationBehavior(configurationOutput.getSpatializationBehavior());
195 setIsContentSpatialized(configurationOutput.isContentSpatialized());
196 setInputPreset(configurationOutput.getInputPreset());
197
198 setDeviceSampleRate(configurationOutput.getSampleRate());
199
200 if (getSampleRate() == AAUDIO_UNSPECIFIED) {
201 setSampleRate(configurationOutput.getSampleRate());
202 }
203
204 // Save device format so we can do format conversion and volume scaling together.
205 setDeviceFormat(configurationOutput.getFormat());
206 setDeviceSamplesPerFrame(configurationOutput.getSamplesPerFrame());
207
208 setHardwareSamplesPerFrame(configurationOutput.getHardwareSamplesPerFrame());
209 setHardwareSampleRate(configurationOutput.getHardwareSampleRate());
210 setHardwareFormat(configurationOutput.getHardwareFormat());
211
212 result = mServiceInterface.getStreamDescription(mServiceStreamHandleInfo, mEndPointParcelable);
213 if (result != AAUDIO_OK) {
214 goto error;
215 }
216
217 // Resolve parcelable into a descriptor.
218 result = mEndPointParcelable.resolve(&mEndpointDescriptor);
219 if (result != AAUDIO_OK) {
220 goto error;
221 }
222
223 // Configure endpoint based on descriptor.
224 mAudioEndpoint = std::make_unique<AudioEndpoint>();
225 result = mAudioEndpoint->configure(&mEndpointDescriptor, getDirection());
226 if (result != AAUDIO_OK) {
227 goto error;
228 }
229
230 if ((result = configureDataInformation(builder.getFramesPerDataCallback())) != AAUDIO_OK) {
231 goto error;
232 }
233
234 setState(AAUDIO_STREAM_STATE_OPEN);
235
236 return result;
237
238 error:
239 safeReleaseClose();
240 return result;
241 }
242
configureDataInformation(int32_t callbackFrames)243 aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFrames) {
244 int32_t originalFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
245 int32_t deviceFramesPerBurst = originalFramesPerBurst;
246
247 // Scale up the burst size to meet the minimum equivalent in microseconds.
248 // This is to avoid waking the CPU too often when the HW burst is very small
249 // or at high sample rates. The actual number of frames that we call back to
250 // the app with will be 0 < N <= framesPerBurst so round up the division.
251 int32_t burstMicros = 0;
252 const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
253 do {
254 if (burstMicros > 0) { // skip first loop
255 deviceFramesPerBurst *= 2;
256 }
257 burstMicros = deviceFramesPerBurst * static_cast<int64_t>(1000000) / getDeviceSampleRate();
258 } while (burstMicros < burstMinMicros);
259 ALOGD("%s() original HW burst = %d, minMicros = %d => SW burst = %d\n",
260 __func__, originalFramesPerBurst, burstMinMicros, deviceFramesPerBurst);
261
262 // Validate final burst size.
263 if (deviceFramesPerBurst < MIN_FRAMES_PER_BURST
264 || deviceFramesPerBurst > MAX_FRAMES_PER_BURST) {
265 ALOGE("%s - deviceFramesPerBurst out of range = %d", __func__, deviceFramesPerBurst);
266 return AAUDIO_ERROR_OUT_OF_RANGE;
267 }
268
269 // Calculate the application framesPerBurst from the deviceFramesPerBurst
270 int32_t framesPerBurst = (static_cast<int64_t>(deviceFramesPerBurst) * getSampleRate() +
271 getDeviceSampleRate() - 1) / getDeviceSampleRate();
272
273 setDeviceFramesPerBurst(deviceFramesPerBurst);
274 setFramesPerBurst(framesPerBurst); // only save good value
275
276 mDeviceBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
277
278 mBufferCapacityInFrames = static_cast<int64_t>(mDeviceBufferCapacityInFrames)
279 * getSampleRate() / getDeviceSampleRate();
280 if (mBufferCapacityInFrames < getFramesPerBurst()
281 || mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
282 ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
283 return AAUDIO_ERROR_OUT_OF_RANGE;
284 }
285
286 mClockModel.setSampleRate(getDeviceSampleRate());
287 mClockModel.setFramesPerBurst(deviceFramesPerBurst);
288
289 if (isDataCallbackSet()) {
290 mCallbackFrames = callbackFrames;
291 if (mCallbackFrames > getBufferCapacity() / 2) {
292 ALOGW("%s - framesPerCallback too big = %d, capacity = %d",
293 __func__, mCallbackFrames, getBufferCapacity());
294 return AAUDIO_ERROR_OUT_OF_RANGE;
295 } else if (mCallbackFrames < 0) {
296 ALOGW("%s - framesPerCallback negative", __func__);
297 return AAUDIO_ERROR_OUT_OF_RANGE;
298 }
299 if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
300 mCallbackFrames = getFramesPerBurst();
301 }
302
303 const int32_t callbackBufferSize = mCallbackFrames * getBytesPerFrame();
304 mCallbackBuffer = std::make_unique<uint8_t[]>(callbackBufferSize);
305 }
306
307 // Exclusive output streams should combine channels when mono audio adjustment
308 // is enabled. They should also adjust for audio balance.
309 if ((getDirection() == AAUDIO_DIRECTION_OUTPUT) &&
310 (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE)) {
311 bool isMasterMono = false;
312 android::AudioSystem::getMasterMono(&isMasterMono);
313 setRequireMonoBlend(isMasterMono);
314 float audioBalance = 0;
315 android::AudioSystem::getMasterBalance(&audioBalance);
316 setAudioBalance(audioBalance);
317 }
318
319 // For debugging and analyzing the distribution of MMAP timestamps.
320 // For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
321 // For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
322 // You can use this offset to reduce glitching.
323 // You can also use this offset to force glitching. By iterating over multiple
324 // values you can reveal the distribution of the hardware timing jitter.
325 if (mAudioEndpoint->isFreeRunning()) { // MMAP?
326 int32_t offsetMicros = (getDirection() == AAUDIO_DIRECTION_OUTPUT)
327 ? AAudioProperty_getOutputMMapOffsetMicros()
328 : AAudioProperty_getInputMMapOffsetMicros();
329 // This log is used to debug some tricky glitch issues. Please leave.
330 ALOGD_IF(offsetMicros, "%s() - %s mmap offset = %d micros",
331 __func__,
332 (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "output" : "input",
333 offsetMicros);
334 mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
335 }
336
337 // Default buffer size to match Q
338 setBufferSize(mBufferCapacityInFrames / 2);
339 return AAUDIO_OK;
340 }
341
342 // This must be called under mStreamLock.
release_l()343 aaudio_result_t AudioStreamInternal::release_l() {
344 aaudio_result_t result = AAUDIO_OK;
345 ALOGD("%s(): mServiceStreamHandle = 0x%08X", __func__, getServiceHandle());
346 if (getServiceHandle() != AAUDIO_HANDLE_INVALID) {
347 // Don't release a stream while it is running. Stop it first.
348 // If DISCONNECTED then we should still try to stop in case the
349 // error callback is still running.
350 if (isActive() || isDisconnected()) {
351 requestStop_l();
352 }
353
354 logReleaseBufferState();
355
356 setState(AAUDIO_STREAM_STATE_CLOSING);
357 auto serviceStreamHandleInfo = mServiceStreamHandleInfo;
358 mServiceStreamHandleInfo = AAudioHandleInfo();
359
360 mServiceInterface.closeStream(serviceStreamHandleInfo);
361 mCallbackBuffer.reset();
362
363 // Update local frame counters so we can query them after releasing the endpoint.
364 getFramesRead();
365 getFramesWritten();
366 mAudioEndpoint.reset();
367 result = mEndPointParcelable.close();
368 aaudio_result_t result2 = AudioStream::release_l();
369 return (result != AAUDIO_OK) ? result : result2;
370 } else {
371 return AAUDIO_ERROR_INVALID_HANDLE;
372 }
373 }
374
aaudio_callback_thread_proc(void * context)375 static void *aaudio_callback_thread_proc(void *context)
376 {
377 AudioStreamInternal *stream = (AudioStreamInternal *)context;
378 //LOGD("oboe_callback_thread, stream = %p", stream);
379 if (stream != nullptr) {
380 return stream->callbackLoop();
381 } else {
382 return nullptr;
383 }
384 }
385
exitStandby_l()386 aaudio_result_t AudioStreamInternal::exitStandby_l() {
387 AudioEndpointParcelable endpointParcelable;
388 // The stream is in standby mode, copy all available data and then close the duplicated
389 // shared file descriptor so that it won't cause issue when the HAL try to reallocate new
390 // shared file descriptor when exiting from standby.
391 // Cache current read counter, which will be reset to new read and write counter
392 // when the new data queue and endpoint are reconfigured.
393 const android::fifo_counter_t readCounter = mAudioEndpoint->getDataReadCounter();
394 // Cache the buffer size which may be from client.
395 const int32_t previousBufferSize = mBufferSizeInFrames;
396 // Copy all available data from current data queue.
397 uint8_t buffer[getDeviceBufferCapacity() * getBytesPerFrame()];
398 android::fifo_frames_t fullFramesAvailable = mAudioEndpoint->read(buffer,
399 getDeviceBufferCapacity());
400 // Before releasing the data queue, update the frames read and written.
401 getFramesRead();
402 getFramesWritten();
403 // Call freeDataQueue() here because the following call to
404 // closeDataFileDescriptor() will invalidate the pointers used by the data queue.
405 mAudioEndpoint->freeDataQueue();
406 mEndPointParcelable.closeDataFileDescriptor();
407 aaudio_result_t result = mServiceInterface.exitStandby(
408 mServiceStreamHandleInfo, endpointParcelable);
409 if (result != AAUDIO_OK) {
410 ALOGE("Failed to exit standby, error=%d", result);
411 goto exit;
412 }
413 // Reconstruct data queue descriptor using new shared file descriptor.
414 result = mEndPointParcelable.updateDataFileDescriptor(&endpointParcelable);
415 if (result != AAUDIO_OK) {
416 ALOGE("%s failed to update data file descriptor, error=%d", __func__, result);
417 goto exit;
418 }
419 result = mEndPointParcelable.resolveDataQueue(&mEndpointDescriptor.dataQueueDescriptor);
420 if (result != AAUDIO_OK) {
421 ALOGE("Failed to resolve data queue after exiting standby, error=%d", result);
422 goto exit;
423 }
424 // Reconfigure audio endpoint with new data queue descriptor.
425 mAudioEndpoint->configureDataQueue(
426 mEndpointDescriptor.dataQueueDescriptor, getDirection());
427 // Set read and write counters with previous read counter, the later write action
428 // will make the counter at the correct place.
429 mAudioEndpoint->setDataReadCounter(readCounter);
430 mAudioEndpoint->setDataWriteCounter(readCounter);
431 result = configureDataInformation(mCallbackFrames);
432 if (result != AAUDIO_OK) {
433 ALOGE("Failed to configure data information after exiting standby, error=%d", result);
434 goto exit;
435 }
436 // Write data from previous data buffer to new endpoint.
437 if (const android::fifo_frames_t framesWritten =
438 mAudioEndpoint->write(buffer, fullFramesAvailable);
439 framesWritten != fullFramesAvailable) {
440 ALOGW("Some data lost after exiting standby, frames written: %d, "
441 "frames to write: %d", framesWritten, fullFramesAvailable);
442 }
443 // Reset previous buffer size as it may be requested by the client.
444 setBufferSize(previousBufferSize);
445
446 exit:
447 return result;
448 }
449
450 /*
451 * It normally takes about 20-30 msec to start a stream on the server.
452 * But the first time can take as much as 200-300 msec. The HW
453 * starts right away so by the time the client gets a chance to write into
454 * the buffer, it is already in a deep underflow state. That can cause the
455 * XRunCount to be non-zero, which could lead an app to tune its latency higher.
456 * To avoid this problem, we set a request for the processing code to start the
457 * client stream at the same position as the server stream.
458 * The processing code will then save the current offset
459 * between client and server and apply that to any position given to the app.
460 */
requestStart_l()461 aaudio_result_t AudioStreamInternal::requestStart_l()
462 {
463 int64_t startTime;
464 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
465 ALOGD("requestStart() mServiceStreamHandle invalid");
466 return AAUDIO_ERROR_INVALID_STATE;
467 }
468 if (isActive()) {
469 ALOGD("requestStart() already active");
470 return AAUDIO_ERROR_INVALID_STATE;
471 }
472
473 if (isDisconnected()) {
474 ALOGD("requestStart() but DISCONNECTED");
475 return AAUDIO_ERROR_DISCONNECTED;
476 }
477 const aaudio_stream_state_t originalState = getState();
478 setState(AAUDIO_STREAM_STATE_STARTING);
479
480 // Clear any stale timestamps from the previous run.
481 drainTimestampsFromService();
482
483 prepareBuffersForStart(); // tell subclasses to get ready
484
485 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandleInfo);
486 if (result == AAUDIO_ERROR_STANDBY) {
487 // The stream is at standby mode. Need to exit standby before starting the stream.
488 result = exitStandby_l();
489 if (result == AAUDIO_OK) {
490 result = mServiceInterface.startStream(mServiceStreamHandleInfo);
491 }
492 }
493 if (result != AAUDIO_OK) {
494 ALOGD("%s() error = %d, stream was probably stolen", __func__, result);
495 // Stealing was added in R. Coerce result to improve backward compatibility.
496 result = AAUDIO_ERROR_DISCONNECTED;
497 setDisconnected();
498 }
499
500 startTime = AudioClock::getNanoseconds();
501 mClockModel.start(startTime);
502 mNeedCatchUp.request(); // Ask data processing code to catch up when first timestamp received.
503
504 // Start data callback thread.
505 if (result == AAUDIO_OK && isDataCallbackSet()) {
506 // Launch the callback loop thread.
507 int64_t periodNanos = mCallbackFrames
508 * AAUDIO_NANOS_PER_SECOND
509 / getSampleRate();
510 mCallbackEnabled.store(true);
511 result = createThread_l(periodNanos, aaudio_callback_thread_proc, this);
512 }
513 if (result != AAUDIO_OK) {
514 setState(originalState);
515 }
516 return result;
517 }
518
calculateReasonableTimeout(int32_t framesPerOperation)519 int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
520
521 // Wait for at least a second or some number of callbacks to join the thread.
522 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
523 * framesPerOperation
524 * AAUDIO_NANOS_PER_SECOND)
525 / getSampleRate();
526 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
527 timeoutNanoseconds = MIN_TIMEOUT_NANOS;
528 }
529 return timeoutNanoseconds;
530 }
531
calculateReasonableTimeout()532 int64_t AudioStreamInternal::calculateReasonableTimeout() {
533 return calculateReasonableTimeout(getFramesPerBurst());
534 }
535
536 // This must be called under mStreamLock.
stopCallback_l()537 aaudio_result_t AudioStreamInternal::stopCallback_l()
538 {
539 if (isDataCallbackSet() && (isActive() || isDisconnected())) {
540 mCallbackEnabled.store(false);
541 aaudio_result_t result = joinThread_l(nullptr); // may temporarily unlock mStreamLock
542 if (result == AAUDIO_ERROR_INVALID_HANDLE) {
543 ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
544 result = AAUDIO_OK;
545 }
546 return result;
547 } else {
548 ALOGD("%s() skipped, isDataCallbackSet() = %d, isActive() = %d, getState() = %d", __func__,
549 isDataCallbackSet(), isActive(), getState());
550 return AAUDIO_OK;
551 }
552 }
553
requestStop_l()554 aaudio_result_t AudioStreamInternal::requestStop_l() {
555 aaudio_result_t result = stopCallback_l();
556 if (result != AAUDIO_OK) {
557 ALOGW("%s() stop callback returned %d, returning early", __func__, result);
558 return result;
559 }
560 // The stream may have been unlocked temporarily to let a callback finish
561 // and the callback may have stopped the stream.
562 // Check to make sure the stream still needs to be stopped.
563 // See also AudioStream::safeStop_l().
564 if (!(isActive() || isDisconnected())) {
565 ALOGD("%s() returning early, not active or disconnected", __func__);
566 return AAUDIO_OK;
567 }
568
569 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
570 ALOGW("%s() mServiceStreamHandle invalid = 0x%08X",
571 __func__, getServiceHandle());
572 return AAUDIO_ERROR_INVALID_STATE;
573 }
574
575 // For playback, sleep until all the audio data has played.
576 // Then clear the buffer to prevent noise.
577 prepareBuffersForStop();
578
579 mClockModel.stop(AudioClock::getNanoseconds());
580 setState(AAUDIO_STREAM_STATE_STOPPING);
581 mAtomicInternalTimestamp.clear();
582
583 #if 0
584 // Simulate very slow CPU, force race condition where the
585 // DSP keeps playing after we stop writing.
586 AudioClock::sleepForNanos(800 * AAUDIO_NANOS_PER_MILLISECOND);
587 #endif
588
589 result = mServiceInterface.stopStream(mServiceStreamHandleInfo);
590 if (result == AAUDIO_ERROR_INVALID_HANDLE) {
591 ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
592 result = AAUDIO_OK;
593 }
594 return result;
595 }
596
registerThread()597 aaudio_result_t AudioStreamInternal::registerThread() {
598 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
599 ALOGW("%s() mServiceStreamHandle invalid", __func__);
600 return AAUDIO_ERROR_INVALID_STATE;
601 }
602 return mServiceInterface.registerAudioThread(mServiceStreamHandleInfo,
603 gettid(),
604 getPeriodNanoseconds());
605 }
606
unregisterThread()607 aaudio_result_t AudioStreamInternal::unregisterThread() {
608 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
609 ALOGW("%s() mServiceStreamHandle invalid", __func__);
610 return AAUDIO_ERROR_INVALID_STATE;
611 }
612 return mServiceInterface.unregisterAudioThread(mServiceStreamHandleInfo, gettid());
613 }
614
startClient(const android::AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * portHandle)615 aaudio_result_t AudioStreamInternal::startClient(const android::AudioClient& client,
616 const audio_attributes_t *attr,
617 audio_port_handle_t *portHandle) {
618 ALOGV("%s() called", __func__);
619 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
620 ALOGE("%s() getServiceHandle() is invalid", __func__);
621 return AAUDIO_ERROR_INVALID_STATE;
622 }
623 aaudio_result_t result = mServiceInterface.startClient(mServiceStreamHandleInfo,
624 client, attr, portHandle);
625 ALOGV("%s(), got %d, returning %d", __func__, *portHandle, result);
626 return result;
627 }
628
stopClient(audio_port_handle_t portHandle)629 aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t portHandle) {
630 ALOGV("%s(%d) called", __func__, portHandle);
631 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
632 ALOGE("%s(%d) getServiceHandle() is invalid", __func__, portHandle);
633 return AAUDIO_ERROR_INVALID_STATE;
634 }
635 aaudio_result_t result = mServiceInterface.stopClient(mServiceStreamHandleInfo, portHandle);
636 ALOGV("%s(%d) returning %d", __func__, portHandle, result);
637 return result;
638 }
639
getTimestamp(clockid_t,int64_t * framePosition,int64_t * timeNanoseconds)640 aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t /*clockId*/,
641 int64_t *framePosition,
642 int64_t *timeNanoseconds) {
643 // Generated in server and passed to client. Return latest.
644 if (mAtomicInternalTimestamp.isValid()) {
645 Timestamp timestamp = mAtomicInternalTimestamp.read();
646 // This should not overflow as timestamp.getPosition() should be a position in a buffer and
647 // not the actual timestamp. timestamp.getNanoseconds() below uses the actual timestamp.
648 // At 48000 Hz we can run for over 100 years before overflowing the int64_t.
649 int64_t position = (timestamp.getPosition() + mFramesOffsetFromService) * getSampleRate() /
650 getDeviceSampleRate();
651 if (position >= 0) {
652 *framePosition = position;
653 *timeNanoseconds = timestamp.getNanoseconds();
654 return AAUDIO_OK;
655 }
656 }
657 return AAUDIO_ERROR_INVALID_STATE;
658 }
659
logTimestamp(AAudioServiceMessage & command)660 void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
661 static int64_t oldPosition = 0;
662 static int64_t oldTime = 0;
663 int64_t framePosition = command.timestamp.position;
664 int64_t nanoTime = command.timestamp.timestamp;
665 ALOGD("logTimestamp: timestamp says framePosition = %8lld at nanoTime %lld",
666 (long long) framePosition,
667 (long long) nanoTime);
668 int64_t nanosDelta = nanoTime - oldTime;
669 if (nanosDelta > 0 && oldTime > 0) {
670 int64_t framesDelta = framePosition - oldPosition;
671 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
672 ALOGD("logTimestamp: framesDelta = %8lld, nanosDelta = %8lld, rate = %lld",
673 (long long) framesDelta, (long long) nanosDelta, (long long) rate);
674 }
675 oldPosition = framePosition;
676 oldTime = nanoTime;
677 }
678
onTimestampService(AAudioServiceMessage * message)679 aaudio_result_t AudioStreamInternal::onTimestampService(AAudioServiceMessage *message) {
680 #if LOG_TIMESTAMPS
681 logTimestamp(*message);
682 #endif
683 processTimestamp(message->timestamp.position,
684 message->timestamp.timestamp + mTimeOffsetNanos);
685 return AAUDIO_OK;
686 }
687
onTimestampHardware(AAudioServiceMessage * message)688 aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) {
689 Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp);
690 mAtomicInternalTimestamp.write(timestamp);
691 return AAUDIO_OK;
692 }
693
onEventFromServer(AAudioServiceMessage * message)694 aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
695 aaudio_result_t result = AAUDIO_OK;
696 switch (message->event.event) {
697 case AAUDIO_SERVICE_EVENT_STARTED:
698 ALOGD("%s - got AAUDIO_SERVICE_EVENT_STARTED", __func__);
699 if (getState() == AAUDIO_STREAM_STATE_STARTING) {
700 setState(AAUDIO_STREAM_STATE_STARTED);
701 }
702 mPlayerBase->triggerPortIdUpdate(static_cast<audio_port_handle_t>(
703 message->event.dataLong));
704 break;
705 case AAUDIO_SERVICE_EVENT_PAUSED:
706 ALOGD("%s - got AAUDIO_SERVICE_EVENT_PAUSED", __func__);
707 if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
708 setState(AAUDIO_STREAM_STATE_PAUSED);
709 }
710 break;
711 case AAUDIO_SERVICE_EVENT_STOPPED:
712 ALOGD("%s - got AAUDIO_SERVICE_EVENT_STOPPED", __func__);
713 if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
714 setState(AAUDIO_STREAM_STATE_STOPPED);
715 }
716 break;
717 case AAUDIO_SERVICE_EVENT_FLUSHED:
718 ALOGD("%s - got AAUDIO_SERVICE_EVENT_FLUSHED", __func__);
719 if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
720 setState(AAUDIO_STREAM_STATE_FLUSHED);
721 onFlushFromServer();
722 }
723 break;
724 case AAUDIO_SERVICE_EVENT_DISCONNECTED:
725 // Prevent hardware from looping on old data and making buzzing sounds.
726 if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
727 mAudioEndpoint->eraseDataMemory();
728 }
729 result = AAUDIO_ERROR_DISCONNECTED;
730 setDisconnected();
731 ALOGW("%s - AAUDIO_SERVICE_EVENT_DISCONNECTED - FIFO cleared", __func__);
732 break;
733 case AAUDIO_SERVICE_EVENT_VOLUME:
734 ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
735 mStreamVolume = (float)message->event.dataDouble;
736 doSetVolume();
737 break;
738 case AAUDIO_SERVICE_EVENT_XRUN:
739 mXRunCount = static_cast<int32_t>(message->event.dataLong);
740 break;
741 default:
742 ALOGE("%s - Unrecognized event = %d", __func__, (int) message->event.event);
743 break;
744 }
745 return result;
746 }
747
drainTimestampsFromService()748 aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {
749 aaudio_result_t result = AAUDIO_OK;
750
751 while (result == AAUDIO_OK) {
752 AAudioServiceMessage message;
753 if (!mAudioEndpoint) {
754 break;
755 }
756 if (mAudioEndpoint->readUpCommand(&message) != 1) {
757 break; // no command this time, no problem
758 }
759 switch (message.what) {
760 // ignore most messages
761 case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
762 case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
763 break;
764
765 case AAudioServiceMessage::code::EVENT:
766 result = onEventFromServer(&message);
767 break;
768
769 default:
770 ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
771 result = AAUDIO_ERROR_INTERNAL;
772 break;
773 }
774 }
775 return result;
776 }
777
778 // Process all the commands coming from the server.
processCommands()779 aaudio_result_t AudioStreamInternal::processCommands() {
780 aaudio_result_t result = AAUDIO_OK;
781
782 while (result == AAUDIO_OK) {
783 AAudioServiceMessage message;
784 if (!mAudioEndpoint) {
785 break;
786 }
787 if (mAudioEndpoint->readUpCommand(&message) != 1) {
788 break; // no command this time, no problem
789 }
790 switch (message.what) {
791 case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
792 result = onTimestampService(&message);
793 break;
794
795 case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
796 result = onTimestampHardware(&message);
797 break;
798
799 case AAudioServiceMessage::code::EVENT:
800 result = onEventFromServer(&message);
801 break;
802
803 default:
804 ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
805 result = AAUDIO_ERROR_INTERNAL;
806 break;
807 }
808 }
809 return result;
810 }
811
812 // Read or write the data, block if needed and timeoutMillis > 0
processData(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)813 aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
814 int64_t timeoutNanoseconds)
815 {
816 if (isDisconnected()) {
817 return AAUDIO_ERROR_DISCONNECTED;
818 }
819 if (!mInService &&
820 AAudioBinderClient::getInstance().getServiceLifetimeId() != getServiceLifetimeId()) {
821 // The service lifetime id will be changed whenever the binder died. In that case, if
822 // the service lifetime id from AAudioBinderClient is different from the cached one,
823 // returns AAUDIO_ERROR_DISCONNECTED.
824 // Note that only compare the service lifetime id if it is not in service as the streams
825 // in service will all be gone when aaudio service dies.
826 mClockModel.stop(AudioClock::getNanoseconds());
827 // Set the stream as disconnected as the service lifetime id will only change when
828 // the binder dies.
829 setDisconnected();
830 return AAUDIO_ERROR_DISCONNECTED;
831 }
832 const char * traceName = "aaProc";
833 const char * fifoName = "aaRdy";
834 ATRACE_BEGIN(traceName);
835 if (ATRACE_ENABLED()) {
836 int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
837 ATRACE_INT(fifoName, fullFrames);
838 }
839
840 aaudio_result_t result = AAUDIO_OK;
841 int32_t loopCount = 0;
842 uint8_t* audioData = (uint8_t*)buffer;
843 int64_t currentTimeNanos = AudioClock::getNanoseconds();
844 const int64_t entryTimeNanos = currentTimeNanos;
845 const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
846 int32_t framesLeft = numFrames;
847
848 // Loop until all the data has been processed or until a timeout occurs.
849 while (framesLeft > 0) {
850 // The call to processDataNow() will not block. It will just process as much as it can.
851 int64_t wakeTimeNanos = 0;
852 aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
853 currentTimeNanos, &wakeTimeNanos);
854 if (framesProcessed < 0) {
855 result = framesProcessed;
856 break;
857 }
858 framesLeft -= (int32_t) framesProcessed;
859 audioData += framesProcessed * getBytesPerFrame();
860
861 // Should we block?
862 if (timeoutNanoseconds == 0) {
863 break; // don't block
864 } else if (wakeTimeNanos != 0) {
865 if (!mAudioEndpoint->isFreeRunning()) {
866 // If there is software on the other end of the FIFO then it may get delayed.
867 // So wake up just a little after we expect it to be ready.
868 wakeTimeNanos += mWakeupDelayNanos;
869 }
870
871 currentTimeNanos = AudioClock::getNanoseconds();
872 int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
873 // Guarantee a minimum sleep time.
874 if (wakeTimeNanos < earliestWakeTime) {
875 wakeTimeNanos = earliestWakeTime;
876 }
877
878 if (wakeTimeNanos > deadlineNanos) {
879 // If we time out, just return the framesWritten so far.
880 ALOGW("processData(): entered at %lld nanos, currently %lld",
881 (long long) entryTimeNanos, (long long) currentTimeNanos);
882 ALOGW("processData(): TIMEOUT after %lld nanos",
883 (long long) timeoutNanoseconds);
884 ALOGW("processData(): wakeTime = %lld, deadline = %lld nanos",
885 (long long) wakeTimeNanos, (long long) deadlineNanos);
886 ALOGW("processData(): past deadline by %d micros",
887 (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
888 mClockModel.dump();
889 mAudioEndpoint->dump();
890 break;
891 }
892
893 if (ATRACE_ENABLED()) {
894 int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
895 ATRACE_INT(fifoName, fullFrames);
896 int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
897 ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
898 }
899
900 AudioClock::sleepUntilNanoTime(wakeTimeNanos);
901 currentTimeNanos = AudioClock::getNanoseconds();
902 }
903 }
904
905 if (ATRACE_ENABLED()) {
906 int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
907 ATRACE_INT(fifoName, fullFrames);
908 }
909
910 // return error or framesProcessed
911 (void) loopCount;
912 ATRACE_END();
913 return (result < 0) ? result : numFrames - framesLeft;
914 }
915
processTimestamp(uint64_t position,int64_t time)916 void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
917 mClockModel.processTimestamp(position, time);
918 }
919
setBufferSize(int32_t requestedFrames)920 aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
921 const int32_t maximumSize = getBufferCapacity() - getFramesPerBurst();
922 int32_t adjustedFrames = std::min(requestedFrames, maximumSize);
923 // Buffer sizes should always be a multiple of framesPerBurst.
924 int32_t numBursts = (static_cast<int64_t>(adjustedFrames) + getFramesPerBurst() - 1) /
925 getFramesPerBurst();
926
927 // Use at least one burst
928 if (numBursts == 0) {
929 numBursts = 1;
930 }
931
932 // Set a minimum number of bursts if sample rate conversion is used.
933 if ((getSampleRate() != getDeviceSampleRate()) &&
934 (numBursts < MIN_SAMPLE_RATE_CONVERSION_NUM_BURSTS)) {
935 numBursts = MIN_SAMPLE_RATE_CONVERSION_NUM_BURSTS;
936 }
937
938 if (mAudioEndpoint) {
939 // Clip against the actual size from the endpoint.
940 int32_t actualFramesDevice = 0;
941 int32_t maximumFramesDevice = getDeviceBufferCapacity() - getDeviceFramesPerBurst();
942 // Set to maximum size so we can write extra data when ready in order to reduce glitches.
943 // The amount we keep in the buffer is controlled by mBufferSizeInFrames.
944 mAudioEndpoint->setBufferSizeInFrames(maximumFramesDevice, &actualFramesDevice);
945 int32_t actualNumBursts = actualFramesDevice / getDeviceFramesPerBurst();
946 numBursts = std::min(numBursts, actualNumBursts);
947 }
948
949 const int32_t bufferSizeInFrames = numBursts * getFramesPerBurst();
950 const int32_t deviceBufferSizeInFrames = numBursts * getDeviceFramesPerBurst();
951
952 if (deviceBufferSizeInFrames != mDeviceBufferSizeInFrames) {
953 android::mediametrics::LogItem(mMetricsId)
954 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
955 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, deviceBufferSizeInFrames)
956 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
957 .record();
958 }
959
960 mBufferSizeInFrames = bufferSizeInFrames;
961 mDeviceBufferSizeInFrames = deviceBufferSizeInFrames;
962 ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
963 return (aaudio_result_t) adjustedFrames;
964 }
965
getBufferSize() const966 int32_t AudioStreamInternal::getBufferSize() const {
967 return mBufferSizeInFrames;
968 }
969
getDeviceBufferSize() const970 int32_t AudioStreamInternal::getDeviceBufferSize() const {
971 return mDeviceBufferSizeInFrames;
972 }
973
getBufferCapacity() const974 int32_t AudioStreamInternal::getBufferCapacity() const {
975 return mBufferCapacityInFrames;
976 }
977
getDeviceBufferCapacity() const978 int32_t AudioStreamInternal::getDeviceBufferCapacity() const {
979 return mDeviceBufferCapacityInFrames;
980 }
981
isClockModelInControl() const982 bool AudioStreamInternal::isClockModelInControl() const {
983 return isActive() && mAudioEndpoint->isFreeRunning() && mClockModel.isRunning();
984 }
985