1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media.audio.cts; 18 19 import static com.google.common.truth.Truth.assertWithMessage; 20 21 import static org.junit.Assert.assertEquals; 22 import static org.junit.Assert.assertFalse; 23 import static org.junit.Assert.assertNotNull; 24 import static org.junit.Assert.assertNull; 25 import static org.junit.Assert.assertThrows; 26 import static org.junit.Assert.assertTrue; 27 import static org.junit.Assert.fail; 28 29 import android.Manifest; 30 import android.app.ActivityManager; 31 import android.content.Context; 32 import android.content.pm.PackageManager; 33 import android.media.AudioDeviceInfo; 34 import android.media.AudioFormat; 35 import android.media.AudioManager; 36 import android.media.AudioRecord; 37 import android.media.AudioRecord.OnRecordPositionUpdateListener; 38 import android.media.AudioRecordingConfiguration; 39 import android.media.AudioSystem; 40 import android.media.AudioTimestamp; 41 import android.media.MediaFormat; 42 import android.media.MediaRecorder; 43 import android.media.MicrophoneDirection; 44 import android.media.MicrophoneInfo; 45 import android.media.cts.AudioHelper; 46 import android.media.cts.StreamUtils; 47 import android.media.metrics.LogSessionId; 48 import android.media.metrics.MediaMetricsManager; 49 import android.media.metrics.RecordingSession; 50 import android.os.Handler; 51 import android.os.Looper; 52 import android.os.Message; 53 import android.os.PersistableBundle; 54 import android.os.Process; 55 import android.os.SystemClock; 56 import android.platform.test.annotations.Presubmit; 57 import android.util.Log; 58 59 import androidx.test.InstrumentationRegistry; 60 import androidx.test.runner.AndroidJUnit4; 61 62 import com.android.compatibility.common.util.CddTest; 63 import com.android.compatibility.common.util.DeviceReportLog; 64 import com.android.compatibility.common.util.NonMainlineTest; 65 import com.android.compatibility.common.util.ResultType; 66 import com.android.compatibility.common.util.ResultUnit; 67 import com.android.compatibility.common.util.SystemUtil; 68 69 import com.google.common.collect.Range; 70 71 import org.junit.After; 72 import org.junit.Before; 73 import org.junit.Test; 74 import org.junit.runner.RunWith; 75 76 import java.io.IOException; 77 import java.nio.ByteBuffer; 78 import java.nio.ShortBuffer; 79 import java.util.ArrayList; 80 import java.util.List; 81 import java.util.concurrent.Executor; 82 import java.util.function.BiFunction; 83 84 @NonMainlineTest 85 @RunWith(AndroidJUnit4.class) 86 public class AudioRecordTest { 87 private final static String TAG = "AudioRecordTest"; 88 private static final String REPORT_LOG_NAME = "CtsMediaAudioTestCases"; 89 private AudioRecord mAudioRecord; 90 private AudioManager mAudioManager; 91 private static final int SAMPLING_RATE_HZ = 44100; 92 private boolean mIsOnMarkerReachedCalled; 93 private boolean mIsOnPeriodicNotificationCalled; 94 private boolean mIsHandleMessageCalled; 95 private Looper mLooper; 96 // For doTest 97 private int mMarkerPeriodInFrames; 98 private int mMarkerPosition; 99 private Handler mHandler = new Handler(Looper.getMainLooper()) { 100 @Override 101 public void handleMessage(Message msg) { 102 mIsHandleMessageCalled = true; 103 super.handleMessage(msg); 104 } 105 }; 106 private static final int RECORD_DURATION_MS = 500; 107 private static final int TEST_TIMING_TOLERANCE_MS = 70; 108 109 @Before setUp()110 public void setUp() throws Exception { 111 if (!hasMicrophone()) { 112 return; 113 } 114 mAudioManager = InstrumentationRegistry .getInstrumentation() 115 .getContext().getSystemService(AudioManager.class); 116 /* 117 * InstrumentationTestRunner.onStart() calls Looper.prepare(), which creates a looper 118 * for the current thread. However, since we don't actually call loop() in the test, 119 * any messages queued with that looper will never be consumed. Therefore, we must 120 * create the instance in another thread, either without a looper, so the main looper is 121 * used, or with an active looper. 122 */ 123 Thread t = new Thread() { 124 @Override 125 public void run() { 126 Looper.prepare(); 127 mLooper = Looper.myLooper(); 128 synchronized(this) { 129 mAudioRecord = new AudioRecord.Builder() 130 .setAudioFormat(new AudioFormat.Builder() 131 .setSampleRate(SAMPLING_RATE_HZ) 132 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 133 .setChannelMask(AudioFormat.CHANNEL_IN_MONO).build()) 134 .setAudioSource(MediaRecorder.AudioSource.DEFAULT) 135 .setBufferSizeInBytes( 136 AudioRecord.getMinBufferSize(SAMPLING_RATE_HZ, 137 AudioFormat.CHANNEL_IN_MONO, 138 AudioFormat.ENCODING_PCM_16BIT) * 10) 139 .build(); 140 this.notify(); 141 } 142 Looper.loop(); 143 } 144 }; 145 synchronized(t) { 146 t.start(); // will block until we wait 147 t.wait(); 148 } 149 assertNotNull(mAudioRecord); 150 } 151 152 @After tearDown()153 public void tearDown() throws Exception { 154 if (hasMicrophone()) { 155 mAudioRecord.release(); 156 mLooper.quit(); 157 } 158 } 159 reset()160 private void reset() { 161 mIsOnMarkerReachedCalled = false; 162 mIsOnPeriodicNotificationCalled = false; 163 mIsHandleMessageCalled = false; 164 } 165 166 @Test testAudioRecordProperties()167 public void testAudioRecordProperties() throws Exception { 168 if (!hasMicrophone()) { 169 return; 170 } 171 assertEquals(AudioFormat.ENCODING_PCM_16BIT, mAudioRecord.getAudioFormat()); 172 assertEquals(MediaRecorder.AudioSource.DEFAULT, mAudioRecord.getAudioSource()); 173 assertEquals(1, mAudioRecord.getChannelCount()); 174 assertEquals(AudioFormat.CHANNEL_IN_MONO, 175 mAudioRecord.getChannelConfiguration()); 176 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 177 assertEquals(SAMPLING_RATE_HZ, mAudioRecord.getSampleRate()); 178 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 179 180 int bufferSize = AudioRecord.getMinBufferSize(SAMPLING_RATE_HZ, 181 AudioFormat.CHANNEL_CONFIGURATION_DEFAULT, AudioFormat.ENCODING_PCM_16BIT); 182 assertTrue(bufferSize > 0); 183 } 184 185 @Test testAudioRecordOP()186 public void testAudioRecordOP() throws Exception { 187 if (!hasMicrophone()) { 188 return; 189 } 190 final int SLEEP_TIME = 10; 191 final int RECORD_TIME = 5000; 192 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 193 194 int markerInFrames = mAudioRecord.getSampleRate() / 2; 195 assertEquals(AudioRecord.SUCCESS, 196 mAudioRecord.setNotificationMarkerPosition(markerInFrames)); 197 assertEquals(markerInFrames, mAudioRecord.getNotificationMarkerPosition()); 198 int periodInFrames = mAudioRecord.getSampleRate(); 199 assertEquals(AudioRecord.SUCCESS, 200 mAudioRecord.setPositionNotificationPeriod(periodInFrames)); 201 assertEquals(periodInFrames, mAudioRecord.getPositionNotificationPeriod()); 202 OnRecordPositionUpdateListener listener = new OnRecordPositionUpdateListener() { 203 204 public void onMarkerReached(AudioRecord recorder) { 205 mIsOnMarkerReachedCalled = true; 206 } 207 208 public void onPeriodicNotification(AudioRecord recorder) { 209 mIsOnPeriodicNotificationCalled = true; 210 } 211 }; 212 mAudioRecord.setRecordPositionUpdateListener(listener); 213 214 // use byte array as buffer 215 final int BUFFER_SIZE = 102400; 216 byte[] byteData = new byte[BUFFER_SIZE]; 217 long time = System.currentTimeMillis(); 218 mAudioRecord.startRecording(); 219 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 220 while (System.currentTimeMillis() - time < RECORD_TIME) { 221 Thread.sleep(SLEEP_TIME); 222 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 223 } 224 mAudioRecord.stop(); 225 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 226 assertTrue(mIsOnMarkerReachedCalled); 227 assertTrue(mIsOnPeriodicNotificationCalled); 228 reset(); 229 230 // use short array as buffer 231 short[] shortData = new short[BUFFER_SIZE]; 232 time = System.currentTimeMillis(); 233 mAudioRecord.startRecording(); 234 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 235 while (System.currentTimeMillis() - time < RECORD_TIME) { 236 Thread.sleep(SLEEP_TIME); 237 mAudioRecord.read(shortData, 0, BUFFER_SIZE); 238 } 239 mAudioRecord.stop(); 240 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 241 assertTrue(mIsOnMarkerReachedCalled); 242 assertTrue(mIsOnPeriodicNotificationCalled); 243 reset(); 244 245 // use ByteBuffer as buffer 246 ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 247 time = System.currentTimeMillis(); 248 mAudioRecord.startRecording(); 249 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 250 while (System.currentTimeMillis() - time < RECORD_TIME) { 251 Thread.sleep(SLEEP_TIME); 252 mAudioRecord.read(byteBuffer, BUFFER_SIZE); 253 } 254 mAudioRecord.stop(); 255 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 256 assertTrue(mIsOnMarkerReachedCalled); 257 assertTrue(mIsOnPeriodicNotificationCalled); 258 reset(); 259 260 // use handler 261 final Handler handler = new Handler(Looper.getMainLooper()) { 262 @Override 263 public void handleMessage(Message msg) { 264 mIsHandleMessageCalled = true; 265 super.handleMessage(msg); 266 } 267 }; 268 269 mAudioRecord.setRecordPositionUpdateListener(listener, handler); 270 time = System.currentTimeMillis(); 271 mAudioRecord.startRecording(); 272 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 273 while (System.currentTimeMillis() - time < RECORD_TIME) { 274 Thread.sleep(SLEEP_TIME); 275 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 276 } 277 mAudioRecord.stop(); 278 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 279 assertTrue(mIsOnMarkerReachedCalled); 280 assertTrue(mIsOnPeriodicNotificationCalled); 281 // The handler argument is only ever used for getting the associated Looper 282 assertFalse(mIsHandleMessageCalled); 283 284 mAudioRecord.release(); 285 assertEquals(AudioRecord.STATE_UNINITIALIZED, mAudioRecord.getState()); 286 } 287 288 @Test testAudioRecordResamplerMono8Bit()289 public void testAudioRecordResamplerMono8Bit() throws Exception { 290 doTest("resampler_mono_8bit", true /*localRecord*/, false /*customHandler*/, 291 1 /*periodsPerSecond*/, 1 /*markerPeriodsPerSecond*/, 292 false /*useByteBuffer*/, false /*blocking*/, 293 false /*auditRecording*/, false /*isChannelIndex*/, 88200 /*TEST_SR*/, 294 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_8BIT); 295 } 296 297 @Test testAudioRecordResamplerStereo8Bit()298 public void testAudioRecordResamplerStereo8Bit() throws Exception { 299 doTest("resampler_stereo_8bit", true /*localRecord*/, false /*customHandler*/, 300 0 /*periodsPerSecond*/, 3 /*markerPeriodsPerSecond*/, 301 true /*useByteBuffer*/, true /*blocking*/, 302 false /*auditRecording*/, false /*isChannelIndex*/, 45000 /*TEST_SR*/, 303 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_8BIT); 304 } 305 306 @Presubmit 307 @Test testAudioRecordLocalMono16BitShort()308 public void testAudioRecordLocalMono16BitShort() throws Exception { 309 doTest("local_mono_16bit_short", true /*localRecord*/, false /*customHandler*/, 310 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 311 false /*useByteBuffer*/, true /*blocking*/, 312 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 313 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, 500 /*TEST_TIME_MS*/); 314 } 315 316 @Test testAudioRecordLocalMono16Bit()317 public void testAudioRecordLocalMono16Bit() throws Exception { 318 doTest("local_mono_16bit", true /*localRecord*/, false /*customHandler*/, 319 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 320 false /*useByteBuffer*/, true /*blocking*/, 321 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 322 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); 323 } 324 325 @Test testAudioRecordStereo16Bit()326 public void testAudioRecordStereo16Bit() throws Exception { 327 doTest("stereo_16bit", false /*localRecord*/, false /*customHandler*/, 328 2 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 329 false /*useByteBuffer*/, false /*blocking*/, 330 false /*auditRecording*/, false /*isChannelIndex*/, 17000 /*TEST_SR*/, 331 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT); 332 } 333 334 @Test testAudioRecordMonoFloat()335 public void testAudioRecordMonoFloat() throws Exception { 336 doTest("mono_float", false /*localRecord*/, true /*customHandler*/, 337 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 338 false /*useByteBuffer*/, true /*blocking*/, 339 false /*auditRecording*/, false /*isChannelIndex*/, 32000 /*TEST_SR*/, 340 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_FLOAT); 341 } 342 343 // Test audio record stereo float with maximum supported sample rate. 344 @Test testAudioRecordStereoFloatMaxSampleRate()345 public void testAudioRecordStereoFloatMaxSampleRate() throws Exception { 346 doTest("stereo_float", false /*localRecord*/, false /*customHandler*/, 347 2 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 348 false /*useByteBuffer*/, false /*blocking*/, 349 false /*auditRecording*/, false /*isChannelIndex*/, AudioSystem.SAMPLE_RATE_HZ_MAX, 350 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 351 } 352 353 // Test audio record stereo 16 bits with minimum supported sample rate. 354 @Test testAudioRecordStereo16BitMinSampleRate()355 public void testAudioRecordStereo16BitMinSampleRate() throws Exception { 356 doTest("stereo_16bit", true /*localRecord*/, true /*customHandler*/, 357 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 358 false /*useByteBuffer*/, false /*blocking*/, 359 false /*auditRecording*/, true /*isChannelIndex*/, AudioSystem.SAMPLE_RATE_HZ_MIN, 360 AudioFormat.CHANNEL_IN_STEREO, 361 AudioFormat.ENCODING_PCM_16BIT); 362 } 363 364 @Test testAudioRecordLocalNonblockingStereoFloat()365 public void testAudioRecordLocalNonblockingStereoFloat() throws Exception { 366 doTest("local_nonblocking_stereo_float", true /*localRecord*/, true /*customHandler*/, 367 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 368 false /*useByteBuffer*/, false /*blocking*/, 369 false /*auditRecording*/, false /*isChannelIndex*/, 48000 /*TEST_SR*/, 370 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 371 } 372 373 // Audit modes work best with non-blocking mode 374 @Test testAudioRecordAuditByteBufferResamplerStereoFloat()375 public void testAudioRecordAuditByteBufferResamplerStereoFloat() throws Exception { 376 if (isLowRamDevice()) { 377 return; // skip. FIXME: reenable when AF memory allocation is updated. 378 } 379 doTest("audit_byte_buffer_resampler_stereo_float", 380 false /*localRecord*/, true /*customHandler*/, 381 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 382 true /*useByteBuffer*/, false /*blocking*/, 383 true /*auditRecording*/, false /*isChannelIndex*/, 96000 /*TEST_SR*/, 384 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 385 } 386 387 @Test testAudioRecordAuditChannelIndexMonoFloat()388 public void testAudioRecordAuditChannelIndexMonoFloat() throws Exception { 389 doTest("audit_channel_index_mono_float", true /*localRecord*/, true /*customHandler*/, 390 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 391 false /*useByteBuffer*/, false /*blocking*/, 392 true /*auditRecording*/, true /*isChannelIndex*/, 47000 /*TEST_SR*/, 393 (1 << 0) /* 1 channel */, AudioFormat.ENCODING_PCM_FLOAT); 394 } 395 396 // Audit buffers can run out of space with high sample rate, 397 // so keep the channels and pcm encoding low 398 @Test testAudioRecordAuditChannelIndex2()399 public void testAudioRecordAuditChannelIndex2() throws Exception { 400 if (isLowRamDevice()) { 401 return; // skip. FIXME: reenable when AF memory allocation is updated. 402 } 403 doTest("audit_channel_index_2", true /*localRecord*/, true /*customHandler*/, 404 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 405 false /*useByteBuffer*/, false /*blocking*/, 406 true /*auditRecording*/, true /*isChannelIndex*/, 192000 /*TEST_SR*/, 407 (1 << 0) | (1 << 2) /* 2 channels, gap in middle */, 408 AudioFormat.ENCODING_PCM_8BIT); 409 } 410 411 // Audit buffers can run out of space with high numbers of channels, 412 // so keep the sample rate low. 413 @Test testAudioRecordAuditChannelIndex5()414 public void testAudioRecordAuditChannelIndex5() throws Exception { 415 doTest("audit_channel_index_5", true /*localRecord*/, true /*customHandler*/, 416 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 417 false /*useByteBuffer*/, false /*blocking*/, 418 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 419 (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4) /* 5 channels */, 420 AudioFormat.ENCODING_PCM_16BIT); 421 } 422 423 // Audit buffers can run out of space with high numbers of channels, 424 // so keep the sample rate low. 425 // This tests the maximum reported Mixed PCM channel capability 426 // for AudioRecord and AudioTrack. 427 @Test testAudioRecordAuditChannelIndexMax()428 public void testAudioRecordAuditChannelIndexMax() throws Exception { 429 // We skip this test for isLowRamDevice(s). 430 // Otherwise if the build reports a high PCM channel count capability, 431 // we expect this CTS test to work at 16kHz. 432 if (isLowRamDevice()) { 433 return; // skip. FIXME: reenable when AF memory allocation is updated. 434 } 435 final int maxChannels = AudioSystem.OUT_CHANNEL_COUNT_MAX; // FCC_LIMIT 436 doTest("audit_channel_index_max", true /*localRecord*/, true /*customHandler*/, 437 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 438 false /*useByteBuffer*/, false /*blocking*/, 439 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 440 (1 << maxChannels) - 1, 441 AudioFormat.ENCODING_PCM_16BIT); 442 } 443 444 // Audit buffers can run out of space with high numbers of channels, 445 // so keep the sample rate low. 446 @Test testAudioRecordAuditChannelIndex3()447 public void testAudioRecordAuditChannelIndex3() throws Exception { 448 doTest("audit_channel_index_3", true /*localRecord*/, true /*customHandler*/, 449 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 450 true /*useByteBuffer*/, false /*blocking*/, 451 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 452 (1 << 0) | (1 << 1) | (1 << 2) /* 3 channels */, 453 AudioFormat.ENCODING_PCM_24BIT_PACKED); 454 } 455 456 // Audit buffers can run out of space with high numbers of channels, 457 // so keep the sample rate low. 458 @Test testAudioRecordAuditChannelIndex1()459 public void testAudioRecordAuditChannelIndex1() throws Exception { 460 doTest("audit_channel_index_1", true /*localRecord*/, true /*customHandler*/, 461 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 462 true /*useByteBuffer*/, false /*blocking*/, 463 true /*auditRecording*/, true /*isChannelIndex*/, 24000 /*TEST_SR*/, 464 (1 << 0) /* 1 channels */, 465 AudioFormat.ENCODING_PCM_32BIT); 466 } 467 468 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 469 // an empty Builder matches the documentation / expected values 470 @Test testAudioRecordBuilderDefault()471 public void testAudioRecordBuilderDefault() throws Exception { 472 if (!hasMicrophone()) { 473 return; 474 } 475 // constants for test 476 final String TEST_NAME = "testAudioRecordBuilderDefault"; 477 // expected values below match the AudioRecord.Builder documentation 478 final int expectedCapturePreset = MediaRecorder.AudioSource.DEFAULT; 479 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 480 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 481 final int expectedState = AudioRecord.STATE_INITIALIZED; 482 // use builder with default values 483 final AudioRecord rec = new AudioRecord.Builder().build(); 484 // save results 485 final int observedSource = rec.getAudioSource(); 486 final int observedChannel = rec.getChannelConfiguration(); 487 final int observedEncoding = rec.getAudioFormat(); 488 final int observedState = rec.getState(); 489 // release recorder before the test exits (either successfully or with an exception) 490 rec.release(); 491 // compare results 492 assertEquals(TEST_NAME + ": default capture preset", expectedCapturePreset, observedSource); 493 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 494 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 495 assertEquals(TEST_NAME + ": state", expectedState, observedState); 496 } 497 498 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 499 // an incomplete AudioFormat matches the documentation / expected values 500 @Test testAudioRecordBuilderPartialFormat()501 public void testAudioRecordBuilderPartialFormat() throws Exception { 502 if (!hasMicrophone()) { 503 return; 504 } 505 // constants for test 506 final String TEST_NAME = "testAudioRecordBuilderPartialFormat"; 507 final int expectedRate = 16000; 508 final int expectedState = AudioRecord.STATE_INITIALIZED; 509 // expected values below match the AudioRecord.Builder documentation 510 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 511 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 512 // use builder with a partial audio format 513 final AudioRecord rec = new AudioRecord.Builder() 514 .setAudioFormat(new AudioFormat.Builder().setSampleRate(expectedRate).build()) 515 .build(); 516 // save results 517 final int observedRate = rec.getSampleRate(); 518 final int observedChannel = rec.getChannelConfiguration(); 519 final int observedEncoding = rec.getAudioFormat(); 520 final int observedState = rec.getState(); 521 // release recorder before the test exits (either successfully or with an exception) 522 rec.release(); 523 // compare results 524 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 525 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 526 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 527 assertEquals(TEST_NAME + ": state", expectedState, observedState); 528 } 529 530 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord matches 531 // the parameters used in the builder 532 @Test testAudioRecordBuilderParams()533 public void testAudioRecordBuilderParams() throws Exception { 534 if (!hasMicrophone()) { 535 return; 536 } 537 // constants for test 538 final String TEST_NAME = "testAudioRecordBuilderParams"; 539 final int expectedRate = 8000; 540 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 541 final int expectedChannelCount = 1; 542 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 543 final int expectedSource = MediaRecorder.AudioSource.VOICE_COMMUNICATION; 544 final int expectedState = AudioRecord.STATE_INITIALIZED; 545 // use builder with expected parameters 546 final AudioRecord rec = new AudioRecord.Builder() 547 .setAudioFormat(new AudioFormat.Builder() 548 .setSampleRate(expectedRate) 549 .setChannelMask(expectedChannel) 550 .setEncoding(expectedEncoding) 551 .build()) 552 .setAudioSource(expectedSource) 553 .build(); 554 // save results 555 final int observedRate = rec.getSampleRate(); 556 final int observedChannel = rec.getChannelConfiguration(); 557 final int observedChannelCount = rec.getChannelCount(); 558 final int observedEncoding = rec.getAudioFormat(); 559 final int observedSource = rec.getAudioSource(); 560 final int observedState = rec.getState(); 561 // release recorder before the test exits (either successfully or with an exception) 562 rec.release(); 563 // compare results 564 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 565 assertEquals(TEST_NAME + ": configured channel config", expectedChannel, observedChannel); 566 assertEquals(TEST_NAME + ": configured encoding", expectedEncoding, observedEncoding); 567 assertEquals(TEST_NAME + ": implicit channel count", expectedChannelCount, 568 observedChannelCount); 569 assertEquals(TEST_NAME + ": configured source", expectedSource, observedSource); 570 assertEquals(TEST_NAME + ": state", expectedState, observedState); 571 } 572 // Test AudioRecord.Builder.setRequestHotwordStream, and hotword capture 573 @Test testAudioRecordBuilderHotword()574 public void testAudioRecordBuilderHotword() throws Exception { 575 if (!hasMicrophone()) { 576 return; 577 } 578 // Verify typical behavior continues to work, and clearing works 579 AudioRecord regularRecord = new AudioRecord.Builder() 580 .setRequestHotwordStream(true) 581 .setRequestHotwordStream(false) 582 .build(); 583 584 assertEquals(regularRecord.getState(), AudioRecord.STATE_INITIALIZED); 585 assertFalse(regularRecord.isHotwordStream()); 586 assertFalse(regularRecord.isHotwordLookbackStream()); 587 regularRecord.startRecording(); 588 regularRecord.read(ByteBuffer.allocateDirect(4096), 4096); 589 regularRecord.stop(); 590 regularRecord.release(); 591 592 regularRecord = new AudioRecord.Builder() 593 .setRequestHotwordLookbackStream(true) 594 .setRequestHotwordLookbackStream(false) 595 .build(); 596 597 assertEquals(regularRecord.getState(), AudioRecord.STATE_INITIALIZED); 598 assertFalse(regularRecord.isHotwordStream()); 599 assertFalse(regularRecord.isHotwordLookbackStream()); 600 regularRecord.startRecording(); 601 regularRecord.read(ByteBuffer.allocateDirect(4096), 4096); 602 regularRecord.stop(); 603 regularRecord.release(); 604 605 // Should fail due to incompatible arguments 606 assertThrows(UnsupportedOperationException.class, 607 () -> new AudioRecord.Builder() 608 .setRequestHotwordStream(true) 609 .setRequestHotwordLookbackStream(true) 610 .build()); 611 612 // Should fail due to permission issues 613 assertThrows(UnsupportedOperationException.class, 614 () -> new AudioRecord.Builder() 615 .setRequestHotwordStream(true) 616 .build()); 617 assertThrows(UnsupportedOperationException.class, 618 () -> new AudioRecord.Builder() 619 .setRequestHotwordLookbackStream(true) 620 .build()); 621 622 try { 623 // Adopt permissions to access query APIs and test functionality 624 InstrumentationRegistry.getInstrumentation() 625 .getUiAutomation() 626 .adoptShellPermissionIdentity( 627 Manifest.permission.CAPTURE_AUDIO_HOTWORD); 628 InstrumentationRegistry.getInstrumentation() 629 .getContext() 630 .getSystemService(AudioManager.class) 631 .permissionUpdateBarrier(); 632 633 634 for (final boolean lookbackOn : new boolean[] { false, true} ) { 635 AudioRecord audioRecord = null; 636 if (!mAudioManager.isHotwordStreamSupported(lookbackOn)) { 637 // Hardware does not support capturing hotword content 638 continue; 639 } 640 try { 641 AudioRecord.Builder builder = new AudioRecord.Builder(); 642 if (lookbackOn) { 643 builder.setRequestHotwordLookbackStream(true); 644 } else { 645 builder.setRequestHotwordStream(true); 646 } 647 audioRecord = builder.build(); 648 if (lookbackOn) { 649 assertTrue(audioRecord.isHotwordLookbackStream()); 650 } else { 651 assertTrue(audioRecord.isHotwordStream()); 652 } 653 audioRecord.startRecording(); 654 audioRecord.read(ByteBuffer.allocateDirect(4096), 4096); 655 audioRecord.stop(); 656 } finally { 657 if (audioRecord != null) { 658 audioRecord.release(); 659 } 660 } 661 } 662 } finally { 663 InstrumentationRegistry.getInstrumentation() 664 .getUiAutomation() 665 .dropShellPermissionIdentity(); 666 InstrumentationRegistry.getInstrumentation() 667 .getContext() 668 .getSystemService(AudioManager.class) 669 .permissionUpdateBarrier(); 670 } 671 } 672 673 // Test AudioRecord to ensure we can build after a failure. 674 @Test testAudioRecordBufferSize()675 public void testAudioRecordBufferSize() throws Exception { 676 if (!hasMicrophone()) { 677 return; 678 } 679 // constants for test 680 final String TEST_NAME = "testAudioRecordBufferSize"; 681 682 // use builder with parameters that should fail 683 final int superBigBufferSize = 1 << 28; 684 try { 685 final AudioRecord record = new AudioRecord.Builder() 686 .setBufferSizeInBytes(superBigBufferSize) 687 .build(); 688 record.release(); 689 fail(TEST_NAME + ": should throw exception on failure"); 690 } catch (UnsupportedOperationException e) { 691 ; 692 } 693 694 // we should be able to create again with minimum buffer size 695 final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples 696 final AudioRecord record2 = new AudioRecord.Builder() 697 .setBufferSizeInBytes(verySmallBufferSize) 698 .build(); 699 700 final int observedState2 = record2.getState(); 701 final int observedBufferSize2 = record2.getBufferSizeInFrames(); 702 record2.release(); 703 704 // succeeds for minimum buffer size 705 assertEquals(TEST_NAME + ": state", AudioRecord.STATE_INITIALIZED, observedState2); 706 // should force the minimum size buffer which is > 0 707 assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0); 708 } 709 710 @Test testTimestamp()711 public void testTimestamp() throws Exception { 712 if (!hasMicrophone()) { 713 return; 714 } 715 final String TEST_NAME = "testTimestamp"; 716 AudioRecord record = null; 717 718 try { 719 final int NANOS_PER_MILLISECOND = 1000000; 720 final long RECORD_TIME_MS = 2000; 721 final long RECORD_TIME_NS = RECORD_TIME_MS * NANOS_PER_MILLISECOND; 722 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; // fixed at this time. 723 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO; 724 final int RECORD_SAMPLE_RATE = 23456; // requires resampling 725 record = new AudioRecord.Builder() 726 .setAudioFormat(new AudioFormat.Builder() 727 .setSampleRate(RECORD_SAMPLE_RATE) 728 .setChannelMask(RECORD_CHANNEL_MASK) 729 .setEncoding(RECORD_ENCODING) 730 .build()) 731 .build(); 732 733 // For our tests, we could set test duration by timed sleep or by # frames received. 734 // Since we don't know *exactly* when AudioRecord actually begins recording, 735 // we end the test by # frames read. 736 final int numChannels = 737 AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK); 738 final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING); 739 final int bytesPerFrame = numChannels * bytesPerSample; 740 // careful about integer overflow in the formula below: 741 final int targetFrames = 742 (int)((long)RECORD_TIME_MS * RECORD_SAMPLE_RATE / 1000); 743 final int targetSamples = targetFrames * numChannels; 744 final int BUFFER_FRAMES = 512; 745 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 746 747 final int tries = 4; 748 for (int i = 0; i < tries; ++i) { 749 Log.d(TEST_NAME, "try " + i); 750 final long trackStartTimeNs = System.nanoTime(); 751 final long trackStartTimeBootNs = android.os.SystemClock.elapsedRealtimeNanos(); 752 753 record.startRecording(); 754 755 final AudioTimestamp ts = new AudioTimestamp(); 756 int samplesRead = 0; 757 // For 16 bit data, use shorts 758 final short[] shortData = new short[BUFFER_SAMPLES]; 759 final AudioHelper.TimestampVerifier tsVerifier = 760 new AudioHelper.TimestampVerifier(TAG, RECORD_SAMPLE_RATE, 761 0 /* startFrames */, isProAudioDevice()); 762 763 while (samplesRead < targetSamples) { 764 final int amount = samplesRead == 0 ? numChannels : 765 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 766 final int ret = record.read(shortData, 0, amount); 767 assertWithMessage("read incorrect amount") 768 .that(ret) 769 .isEqualTo(amount); 770 // timestamps follow a different path than data, so it is conceivable 771 // that first data arrives before the first timestamp is ready. 772 773 if (record.getTimestamp(ts, AudioTimestamp.TIMEBASE_MONOTONIC) 774 == AudioRecord.SUCCESS) { 775 tsVerifier.add(ts); 776 } 777 samplesRead += ret; 778 } 779 record.stop(); 780 781 // stop is synchronous, but need not be in the future. 782 final long SLEEP_AFTER_STOP_FOR_INACTIVITY_MS = 1000; 783 Thread.sleep(SLEEP_AFTER_STOP_FOR_INACTIVITY_MS); 784 785 AudioTimestamp stopTs = new AudioTimestamp(); 786 AudioTimestamp stopTsBoot = new AudioTimestamp(); 787 788 assertWithMessage("timestamp monotonic returns success") 789 .that(record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)) 790 .isEqualTo(AudioRecord.SUCCESS); 791 assertWithMessage("timestamp boottime returns success") 792 .that(record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)) 793 .isEqualTo(AudioRecord.SUCCESS); 794 795 assertWithMessage("timestamp monotonic and boottime have same frame position") 796 .that(stopTs.framePosition) 797 .isEqualTo(stopTsBoot.framePosition); 798 799 assertWithMessage("timestamp monotonic frame position is at least target frames") 800 .that(stopTs.framePosition) 801 .isAtLeast(targetFrames); 802 assertWithMessage("timestamp monotonic elapsed time is at least record time") 803 .that(stopTs.nanoTime - trackStartTimeNs) 804 .isAtLeast(RECORD_TIME_NS); 805 assertWithMessage("timestamp boottime elapsed time is at least record time") 806 .that(stopTsBoot.nanoTime - trackStartTimeBootNs) 807 .isAtLeast(RECORD_TIME_NS); 808 809 tsVerifier.verifyAndLog(trackStartTimeNs, "test_timestamp" /* logName */); 810 } 811 } finally { 812 if (record != null) { 813 record.release(); 814 record = null; 815 } 816 } 817 } 818 819 @Test testRecordNoDataForIdleUids()820 public void testRecordNoDataForIdleUids() throws Exception { 821 // Removed in favor of audiorecordpermissiontests 822 } 823 824 @Test testRestrictedAudioSourcePermissions()825 public void testRestrictedAudioSourcePermissions() throws Exception { 826 // Make sure that the following audio sources cannot be used by apps that 827 // don't have the CAPTURE_AUDIO_OUTPUT permissions: 828 // - VOICE_CALL, 829 // - VOICE_DOWNLINK 830 // - VOICE_UPLINK 831 // - REMOTE_SUBMIX 832 // - ECHO_REFERENCE - 1997 833 // - RADIO_TUNER - 1998 834 // - HOTWORD - 1999 835 // The attempt to build an AudioRecord with those sources should throw either 836 // UnsupportedOperationException or IllegalArgumentException exception. 837 final int[] restrictedAudioSources = new int [] { 838 MediaRecorder.AudioSource.VOICE_CALL, 839 MediaRecorder.AudioSource.VOICE_DOWNLINK, 840 MediaRecorder.AudioSource.VOICE_UPLINK, 841 MediaRecorder.AudioSource.REMOTE_SUBMIX, 842 1997, 843 1998, 844 1999 845 }; 846 847 for (int source : restrictedAudioSources) { 848 // AudioRecord.Builder should fail when trying to use 849 // one of the voice call audio sources. 850 try { 851 AudioRecord ar = new AudioRecord.Builder() 852 .setAudioSource(source) 853 .setAudioFormat(new AudioFormat.Builder() 854 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 855 .setSampleRate(8000) 856 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 857 .build()) 858 .build(); 859 fail("testRestrictedAudioSourcePermissions: no exception thrown for source: " 860 + source); 861 } catch (Exception e) { 862 Log.i(TAG, "Exception: " + e); 863 if (!UnsupportedOperationException.class.isInstance(e) 864 && !IllegalArgumentException.class.isInstance(e)) { 865 fail("testRestrictedAudioSourcePermissions: no exception thrown for source: " 866 + source + " Exception:" + e); 867 } 868 } 869 } 870 } 871 872 @Test testMediaMetrics()873 public void testMediaMetrics() throws Exception { 874 if (!hasMicrophone()) { 875 return; 876 } 877 878 AudioRecord record = null; 879 try { 880 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 881 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_MONO; 882 final int RECORD_SAMPLE_RATE = 8000; 883 final AudioFormat format = new AudioFormat.Builder() 884 .setSampleRate(RECORD_SAMPLE_RATE) 885 .setChannelMask(RECORD_CHANNEL_MASK) 886 .setEncoding(RECORD_ENCODING) 887 .build(); 888 889 // Setup a recorder 890 record = new AudioRecord.Builder() 891 .setAudioSource(MediaRecorder.AudioSource.MIC) 892 .setAudioFormat(format) 893 .build(); 894 895 final PersistableBundle metrics = record.getMetrics(); 896 897 assertNotNull("null metrics", metrics); 898 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.ENCODING, 899 new String("AUDIO_FORMAT_PCM_16_BIT")); 900 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.SOURCE, 901 new String("AUDIO_SOURCE_MIC")); 902 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.SAMPLERATE, 903 new Integer(RECORD_SAMPLE_RATE)); 904 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.CHANNELS, 905 new Integer(AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK))); 906 907 // deprecated, value ignored. 908 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.LATENCY); 909 910 // TestApi: 911 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.CHANNEL_MASK, 912 new Long(RECORD_CHANNEL_MASK)); 913 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.FRAME_COUNT, 914 new Integer(record.getBufferSizeInFrames())); 915 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.DURATION_MS, 916 new Double(0.)); 917 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.START_COUNT, 918 new Long(0)); 919 920 // TestApi: no particular value checking. 921 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.PORT_ID); 922 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.ATTRIBUTES); 923 } finally { 924 if (record != null) { 925 record.release(); 926 } 927 } 928 } 929 printMicrophoneInfo(MicrophoneInfo microphone)930 private void printMicrophoneInfo(MicrophoneInfo microphone) { 931 Log.i(TAG, "deviceId:" + microphone.getDescription()); 932 Log.i(TAG, "portId:" + microphone.getId()); 933 Log.i(TAG, "type:" + microphone.getType()); 934 Log.i(TAG, "address:" + microphone.getAddress()); 935 Log.i(TAG, "deviceLocation:" + microphone.getLocation()); 936 Log.i(TAG, "deviceGroup:" + microphone.getGroup() 937 + " index:" + microphone.getIndexInTheGroup()); 938 MicrophoneInfo.Coordinate3F position = microphone.getPosition(); 939 Log.i(TAG, "position:" + position.x + "," + position.y + "," + position.z); 940 MicrophoneInfo.Coordinate3F orientation = microphone.getOrientation(); 941 Log.i(TAG, "orientation:" + orientation.x + "," + orientation.y + "," + orientation.z); 942 Log.i(TAG, "frequencyResponse:" + microphone.getFrequencyResponse()); 943 Log.i(TAG, "channelMapping:" + microphone.getChannelMapping()); 944 Log.i(TAG, "sensitivity:" + microphone.getSensitivity()); 945 Log.i(TAG, "max spl:" + microphone.getMaxSpl()); 946 Log.i(TAG, "min spl:" + microphone.getMinSpl()); 947 Log.i(TAG, "directionality:" + microphone.getDirectionality()); 948 Log.i(TAG, "******"); 949 } 950 951 @CddTest(requirement="5.4.1/C-1-4") 952 @Test testGetActiveMicrophones()953 public void testGetActiveMicrophones() throws Exception { 954 if (!hasMicrophone()) { 955 return; 956 } 957 mAudioRecord.startRecording(); 958 try { 959 Thread.sleep(1000); 960 } catch (InterruptedException e) { 961 } 962 List<MicrophoneInfo> activeMicrophones = mAudioRecord.getActiveMicrophones(); 963 assertTrue(activeMicrophones.size() > 0); 964 for (MicrophoneInfo activeMicrophone : activeMicrophones) { 965 printMicrophoneInfo(activeMicrophone); 966 } 967 } 968 969 private Executor mExec = new Executor() { 970 @Override 971 public void execute(Runnable command) { 972 command.run(); 973 } 974 }; 975 976 @Test testAudioRecordInfoCallback()977 public void testAudioRecordInfoCallback() throws Exception { 978 if (!hasMicrophone()) { 979 return; 980 } 981 AudioTestUtil.AudioRecordingCallbackUtil callback = 982 new AudioTestUtil.AudioRecordingCallbackUtil( 983 mAudioRecord.getAudioSessionId(), MediaRecorder.AudioSource.DEFAULT); 984 mAudioRecord.registerAudioRecordingCallback(mExec, callback); 985 mAudioRecord.startRecording(); 986 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 987 988 callback.await(TEST_TIMING_TOLERANCE_MS); 989 assertTrue(callback.mCalled); 990 assertTrue(callback.mConfigs.size() <= 1); 991 if (callback.mConfigs.size() == 1) { 992 checkRecordingConfig(callback.mConfigs.get(0)); 993 } 994 995 Thread.sleep(RECORD_DURATION_MS); 996 mAudioRecord.unregisterAudioRecordingCallback(callback); 997 } 998 999 @Test testGetActiveRecordingConfiguration()1000 public void testGetActiveRecordingConfiguration() throws Exception { 1001 if (!hasMicrophone()) { 1002 return; 1003 } 1004 mAudioRecord.startRecording(); 1005 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 1006 1007 try { 1008 Thread.sleep(RECORD_DURATION_MS); 1009 } catch (InterruptedException e) { 1010 } 1011 1012 AudioRecordingConfiguration config = mAudioRecord.getActiveRecordingConfiguration(); 1013 checkRecordingConfig(config); 1014 1015 mAudioRecord.release(); 1016 // test no exception is thrown when querying immediately after release() 1017 // which is not a synchronous operation 1018 config = mAudioRecord.getActiveRecordingConfiguration(); 1019 try { 1020 Thread.sleep(TEST_TIMING_TOLERANCE_MS); 1021 } catch (InterruptedException e) { 1022 } 1023 assertNull("Recording configuration not null after release", 1024 mAudioRecord.getActiveRecordingConfiguration()); 1025 } 1026 checkRecordingConfig(AudioRecordingConfiguration config)1027 private static void checkRecordingConfig(AudioRecordingConfiguration config) { 1028 assertNotNull(config); 1029 AudioFormat format = config.getClientFormat(); 1030 assertEquals(AudioFormat.CHANNEL_IN_MONO, format.getChannelMask()); 1031 assertEquals(AudioFormat.ENCODING_PCM_16BIT, format.getEncoding()); 1032 assertEquals(SAMPLING_RATE_HZ, format.getSampleRate()); 1033 assertEquals(MediaRecorder.AudioSource.MIC, config.getAudioSource()); 1034 assertNotNull(config.getAudioDevice()); 1035 assertNotNull(config.getClientEffects()); 1036 assertNotNull(config.getEffects()); 1037 // no requirement here, just testing the API 1038 config.isClientSilenced(); 1039 } 1040 createAudioRecord( int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, boolean auditRecording, boolean isChannelIndex)1041 private AudioRecord createAudioRecord( 1042 int audioSource, int sampleRateInHz, 1043 int channelConfig, int audioFormat, int bufferSizeInBytes, 1044 boolean auditRecording, boolean isChannelIndex) { 1045 final AudioRecord record; 1046 if (auditRecording) { 1047 record = new AudioHelper.AudioRecordAudit( 1048 audioSource, sampleRateInHz, channelConfig, 1049 audioFormat, bufferSizeInBytes, isChannelIndex); 1050 } else if (isChannelIndex) { 1051 record = new AudioRecord.Builder() 1052 .setAudioFormat(new AudioFormat.Builder() 1053 .setChannelIndexMask(channelConfig) 1054 .setEncoding(audioFormat) 1055 .setSampleRate(sampleRateInHz) 1056 .build()) 1057 .setBufferSizeInBytes(bufferSizeInBytes) 1058 .build(); 1059 } else { 1060 record = new AudioRecord(audioSource, sampleRateInHz, channelConfig, 1061 audioFormat, bufferSizeInBytes); 1062 } 1063 1064 // did we get the AudioRecord we expected? 1065 final AudioFormat format = record.getFormat(); 1066 assertEquals(isChannelIndex ? channelConfig : AudioFormat.CHANNEL_INVALID, 1067 format.getChannelIndexMask()); 1068 assertEquals(isChannelIndex ? AudioFormat.CHANNEL_INVALID : channelConfig, 1069 format.getChannelMask()); 1070 assertEquals(audioFormat, format.getEncoding()); 1071 assertEquals(sampleRateInHz, format.getSampleRate()); 1072 final int frameSize = 1073 format.getChannelCount() * AudioFormat.getBytesPerSample(audioFormat); 1074 // our native frame count cannot be smaller than our minimum buffer size request. 1075 assertTrue(record.getBufferSizeInFrames() * frameSize >= bufferSizeInBytes); 1076 return record; 1077 } 1078 doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT)1079 private void doTest(String reportName, boolean localRecord, boolean customHandler, 1080 int periodsPerSecond, int markerPeriodsPerSecond, 1081 boolean useByteBuffer, boolean blocking, 1082 final boolean auditRecording, final boolean isChannelIndex, 1083 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT) throws Exception { 1084 final int TEST_TIME_MS = auditRecording ? 10000 : 2000; 1085 doTest(reportName, localRecord, customHandler, periodsPerSecond, markerPeriodsPerSecond, 1086 useByteBuffer, blocking, auditRecording, isChannelIndex, 1087 TEST_SR, TEST_CONF, TEST_FORMAT, TEST_TIME_MS); 1088 } doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS)1089 private void doTest(String reportName, boolean localRecord, boolean customHandler, 1090 int periodsPerSecond, int markerPeriodsPerSecond, 1091 boolean useByteBuffer, boolean blocking, 1092 final boolean auditRecording, final boolean isChannelIndex, 1093 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS) 1094 throws Exception { 1095 if (!hasMicrophone()) { 1096 return; 1097 } 1098 // audit recording plays back recorded audio, so use longer test timing 1099 final int TEST_SOURCE = MediaRecorder.AudioSource.DEFAULT; 1100 mIsHandleMessageCalled = false; 1101 1102 // For channelIndex use one frame in bytes for buffer size. 1103 // This is adjusted to the minimum buffer size by native code. 1104 final int bufferSizeInBytes = isChannelIndex ? 1105 (AudioFormat.getBytesPerSample(TEST_FORMAT) 1106 * AudioFormat.channelCountFromInChannelMask(TEST_CONF)) : 1107 AudioRecord.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1108 assertWithMessage("getMinBufferSize() reports nonzero value") 1109 .that(bufferSizeInBytes) 1110 .isGreaterThan(0); 1111 1112 final AudioRecord record; 1113 final AudioHelper 1114 .MakeSomethingAsynchronouslyAndLoop<AudioRecord> makeSomething; 1115 1116 if (localRecord) { 1117 makeSomething = null; 1118 record = createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 1119 TEST_FORMAT, bufferSizeInBytes, auditRecording, isChannelIndex); 1120 } else { 1121 makeSomething = 1122 new AudioHelper.MakeSomethingAsynchronouslyAndLoop<AudioRecord>( 1123 new AudioHelper.MakesSomething<AudioRecord>() { 1124 @Override 1125 public AudioRecord makeSomething() { 1126 return createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 1127 TEST_FORMAT, bufferSizeInBytes, auditRecording, 1128 isChannelIndex); 1129 } 1130 } 1131 ); 1132 // create AudioRecord on different thread's looper. 1133 record = makeSomething.make(); 1134 } 1135 1136 // AudioRecord creation may have silently failed, check state now 1137 assertWithMessage("getState() reports STATE_INITIALIZED") 1138 .that(record.getState()) 1139 .isEqualTo(AudioRecord.STATE_INITIALIZED); 1140 1141 final MockOnRecordPositionUpdateListener listener; 1142 if (customHandler) { 1143 listener = new MockOnRecordPositionUpdateListener(record, mHandler); 1144 } else { 1145 listener = new MockOnRecordPositionUpdateListener(record); 1146 } 1147 1148 final int updatePeriodInFrames = (periodsPerSecond == 0) 1149 ? 0 : TEST_SR / periodsPerSecond; 1150 // After starting, there is no guarantee when the first frame of data is read. 1151 long firstSampleTime = 0; 1152 1153 // blank final variables: all successful paths will initialize the times. 1154 // this must be declared here for visibility as they are set within the try block. 1155 final long endTime; 1156 final long startTime; 1157 final long stopRequestTime; 1158 final long stopTime; 1159 final long coldInputStartTime; 1160 1161 try { 1162 if (markerPeriodsPerSecond != 0) { 1163 mMarkerPeriodInFrames = TEST_SR / markerPeriodsPerSecond; 1164 mMarkerPosition = mMarkerPeriodInFrames; 1165 assertWithMessage("setNotificationMarkerPosition() should succeed") 1166 .that(record.setNotificationMarkerPosition(mMarkerPosition)) 1167 .isEqualTo(AudioRecord.SUCCESS); 1168 } else { 1169 mMarkerPeriodInFrames = 0; 1170 } 1171 1172 assertEquals(AudioRecord.SUCCESS, 1173 record.setPositionNotificationPeriod(updatePeriodInFrames)); 1174 1175 // at the start, there is no timestamp. 1176 AudioTimestamp startTs = new AudioTimestamp(); 1177 assertWithMessage("getTimestamp without startRecording() is ERROR_INVALID_OPERATION") 1178 .that(record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC)) 1179 .isEqualTo(AudioRecord.ERROR_INVALID_OPERATION); 1180 assertWithMessage("invalid getTimestamp doesn't affect nanoTime") 1181 .that(startTs.nanoTime) 1182 .isEqualTo(0); 1183 1184 listener.start(TEST_SR); 1185 record.startRecording(); 1186 assertWithMessage("getRecordingState() should report RECORDSTATE_RECORDING") 1187 .that(record.getRecordingState()) 1188 .isEqualTo(AudioRecord.RECORDSTATE_RECORDING); 1189 startTime = System.currentTimeMillis(); 1190 1191 // For our tests, we could set test duration by timed sleep or by # frames received. 1192 // Since we don't know *exactly* when AudioRecord actually begins recording, 1193 // we end the test by # frames read. 1194 final int numChannels = AudioFormat.channelCountFromInChannelMask(TEST_CONF); 1195 final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); 1196 final int bytesPerFrame = numChannels * bytesPerSample; 1197 // careful about integer overflow in the formula below: 1198 final int targetFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1199 final int targetSamples = targetFrames * numChannels; 1200 final int BUFFER_FRAMES = 512; 1201 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 1202 // TODO: verify behavior when buffer size is not a multiple of frame size. 1203 1204 // For fine accuracy timestamp checks, we sample the timestamps 1205 // 1/6 and 5/6 of the way through recording to avoid the effects 1206 // of AudioRecord start and stop. 1207 final int runningTimestampStart = targetSamples * 1 / 6; 1208 final int runningTimestampStop = targetSamples * 5 / 6; 1209 AudioTimestamp running1Ts = new AudioTimestamp(); 1210 AudioTimestamp running2Ts = new AudioTimestamp(); 1211 1212 int samplesRead = 0; 1213 // abstract out the buffer type used with lambda. 1214 final byte[] byteData = new byte[BUFFER_SAMPLES]; 1215 final short[] shortData = new short[BUFFER_SAMPLES]; 1216 final float[] floatData = new float[BUFFER_SAMPLES]; 1217 final ByteBuffer byteBuffer = 1218 ByteBuffer.allocateDirect(BUFFER_SAMPLES * bytesPerSample); 1219 BiFunction<Integer, Boolean, Integer> reader = null; 1220 1221 // depending on the options, create a lambda to read data. 1222 if (useByteBuffer) { 1223 reader = (samples, blockForData) -> { 1224 final int amount = samples * bytesPerSample; // in bytes 1225 // read always places data at the start of the byte buffer with 1226 // position and limit are ignored. test this by setting 1227 // position and limit to arbitrary values here. 1228 final int lastPosition = 7; 1229 final int lastLimit = 13; 1230 byteBuffer.position(lastPosition); 1231 byteBuffer.limit(lastLimit); 1232 final int ret = blockForData ? record.read(byteBuffer, amount) : 1233 record.read(byteBuffer, amount, AudioRecord.READ_NON_BLOCKING); 1234 return ret / bytesPerSample; 1235 }; 1236 } else { 1237 switch (TEST_FORMAT) { 1238 case AudioFormat.ENCODING_PCM_8BIT: 1239 reader = (samples, blockForData) -> { 1240 return blockForData ? record.read(byteData, 0, samples) : 1241 record.read(byteData, 0, samples, 1242 AudioRecord.READ_NON_BLOCKING); 1243 }; 1244 break; 1245 case AudioFormat.ENCODING_PCM_16BIT: 1246 reader = (samples, blockForData) -> { 1247 return blockForData ? record.read(shortData, 0, samples) : 1248 record.read(shortData, 0, samples, 1249 AudioRecord.READ_NON_BLOCKING); 1250 }; 1251 break; 1252 case AudioFormat.ENCODING_PCM_FLOAT: 1253 reader = (samples, blockForData) -> { 1254 return record.read(floatData, 0, samples, 1255 blockForData ? AudioRecord.READ_BLOCKING 1256 : AudioRecord.READ_NON_BLOCKING); 1257 }; 1258 break; 1259 } 1260 } 1261 1262 while (samplesRead < targetSamples) { 1263 // the first time through, we read a single frame. 1264 // this sets the recording anchor position. 1265 final int amount = samplesRead == 0 ? numChannels : 1266 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1267 final int ret = reader.apply(amount, blocking); 1268 if (blocking) { 1269 assertWithMessage("blocking reads should return amount requested") 1270 .that(amount).isEqualTo(ret); 1271 } else { 1272 assertWithMessage("non-blocking reads should return amount in range: " 1273 + "0 <= " + ret + " <= " + amount) 1274 .that(ret) 1275 .isIn(Range.closed(0, amount)); 1276 } 1277 if (samplesRead == 0 && ret > 0) { 1278 firstSampleTime = System.currentTimeMillis(); 1279 } 1280 samplesRead += ret; 1281 if (startTs.nanoTime == 0 && ret > 0 && 1282 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) 1283 == AudioRecord.SUCCESS) { 1284 assertWithMessage("expecting valid timestamp with nonzero nanoTime") 1285 .that(startTs.nanoTime) 1286 .isGreaterThan(0); 1287 } 1288 if (samplesRead > runningTimestampStart 1289 && running1Ts.nanoTime == 0 && ret > 0) { 1290 record.getTimestamp(running1Ts, AudioTimestamp.TIMEBASE_MONOTONIC); 1291 } 1292 if (samplesRead > runningTimestampStop 1293 && running2Ts.nanoTime == 0 && ret > 0) { 1294 record.getTimestamp(running2Ts, AudioTimestamp.TIMEBASE_MONOTONIC); 1295 } 1296 } 1297 1298 // We've read all the frames, now check the record timing. 1299 endTime = System.currentTimeMillis(); 1300 1301 coldInputStartTime = firstSampleTime - startTime; 1302 //Log.d(TAG, "first sample time " + coldInputStartTime 1303 // + " test time " + (endTime - firstSampleTime)); 1304 1305 if (coldInputStartTime > 200) { 1306 Log.w(TAG, "cold input start time way too long " 1307 + coldInputStartTime + " > 200ms"); 1308 } else if (coldInputStartTime > 100) { 1309 Log.w(TAG, "cold input start time too long " 1310 + coldInputStartTime + " > 100ms"); 1311 } 1312 1313 final int COLD_INPUT_START_TIME_LIMIT_MS = 5000; 1314 assertWithMessage("track must start within " + COLD_INPUT_START_TIME_LIMIT_MS 1315 + " millis") 1316 .that(coldInputStartTime) 1317 .isLessThan(COLD_INPUT_START_TIME_LIMIT_MS); 1318 1319 // Verify recording completes within 50 ms of expected test time (typical 20ms) 1320 final int RECORDING_TIME_TOLERANCE_MS = auditRecording ? 1321 (isLowLatencyDevice() ? 1000 : 2000) : (isLowLatencyDevice() ? 50 : 400); 1322 assertWithMessage("recording must complete within " + RECORDING_TIME_TOLERANCE_MS 1323 + " of expected test time") 1324 .that((double) (endTime - firstSampleTime)) 1325 .isWithin(RECORDING_TIME_TOLERANCE_MS) 1326 .of(TEST_TIME_MS); 1327 1328 // Even though we've read all the frames we want, the events may not be sent to 1329 // the listeners (events are handled through a separate internal callback thread). 1330 // One must sleep to make sure the last event(s) come in. 1331 Thread.sleep(30); 1332 1333 stopRequestTime = System.currentTimeMillis(); 1334 record.stop(); 1335 assertWithMessage("state should be RECORDSTATE_STOPPED after stop()") 1336 .that(record.getRecordingState()) 1337 .isEqualTo(AudioRecord.RECORDSTATE_STOPPED); 1338 1339 stopTime = System.currentTimeMillis(); 1340 1341 // stop listening - we should be done. 1342 // Caution M behavior and likely much earlier: 1343 // we assume no events can happen after stop(), but this may not 1344 // always be true as stop can take 100ms to complete (as it may disable 1345 // input recording on the hal); thus the event handler may be block with 1346 // valid events, issuing right after stop completes. Except for those events, 1347 // no other events should show up after stop. 1348 // This behavior may change in the future but we account for it here in testing. 1349 final long SLEEP_AFTER_STOP_FOR_EVENTS_MS = 30; 1350 Thread.sleep(SLEEP_AFTER_STOP_FOR_EVENTS_MS); 1351 listener.stop(); 1352 1353 // get stop timestamp 1354 // Note: the stop timestamp is collected *after* stop is called. 1355 AudioTimestamp stopTs = new AudioTimestamp(); 1356 assertWithMessage("should successfully get timestamp after stop") 1357 .that(record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)) 1358 .isEqualTo(AudioRecord.SUCCESS); 1359 AudioTimestamp stopTsBoot = new AudioTimestamp(); 1360 assertWithMessage("should successfully get boottime timestamp after stop") 1361 .that(record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)) 1362 .isEqualTo(AudioRecord.SUCCESS); 1363 1364 // printTimestamp("startTs", startTs); 1365 // printTimestamp("stopTs", stopTs); 1366 // printTimestamp("stopTsBoot", stopTsBoot); 1367 // Log.d(TAG, "time Monotonic " + System.nanoTime()); 1368 // Log.d(TAG, "time Boottime " + SystemClock.elapsedRealtimeNanos()); 1369 1370 // stop should not reset timestamps 1371 assertWithMessage("stop timestamp position should be no less than frames read") 1372 .that(stopTs.framePosition) 1373 .isAtLeast(targetFrames); 1374 assertWithMessage("stop timestamp position should be same " 1375 + "between monotonic and boot timestamps") 1376 .that(stopTs.framePosition) 1377 .isEqualTo(stopTsBoot.framePosition); 1378 assertWithMessage("stop timestamp nanoTime must be greater than 0") 1379 .that(stopTs.nanoTime) 1380 .isGreaterThan(0); 1381 1382 // timestamps follow a different path than data, so it is conceivable 1383 // that first data arrives before the first timestamp is ready. 1384 assertWithMessage("start timestamp must have positive time") 1385 .that(startTs.nanoTime) 1386 .isGreaterThan(0); 1387 1388 // we allow more timestamp inaccuacy for the entire recording run, 1389 // including start and stop. 1390 verifyContinuousTimestamps(startTs, stopTs, TEST_SR, true /* coarse */); 1391 1392 // during the middle 2/3 of the run, we expect stable timestamps. 1393 verifyContinuousTimestamps(running1Ts, running2Ts, TEST_SR, false /* coarse */); 1394 1395 // clean up 1396 if (makeSomething != null) { 1397 makeSomething.join(); 1398 } 1399 1400 } finally { 1401 listener.release(); 1402 // we must release the record immediately as it is a system-wide 1403 // resource needed for other tests. 1404 record.release(); 1405 } 1406 1407 final int markerPeriods = markerPeriodsPerSecond * TEST_TIME_MS / 1000; 1408 final int updatePeriods = periodsPerSecond * TEST_TIME_MS / 1000; 1409 final int markerPeriodsMax = 1410 markerPeriodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1411 final int updatePeriodsMax = 1412 periodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1413 1414 // collect statistics 1415 final ArrayList<Integer> markerList = listener.getMarkerList(); 1416 final ArrayList<Integer> periodicList = listener.getPeriodicList(); 1417 // verify count of markers and periodic notifications. 1418 // there could be an extra notification since we don't stop() immediately 1419 // rather wait for potential events to come in. 1420 //Log.d(TAG, "markerPeriods " + markerPeriods + 1421 // " markerPeriodsReceived " + markerList.size()); 1422 //Log.d(TAG, "updatePeriods " + updatePeriods + 1423 // " updatePeriodsReceived " + periodicList.size()); 1424 if (isLowLatencyDevice()) { 1425 assertWithMessage(TAG + ": markerPeriods " + markerPeriods 1426 + " <= markerPeriodsReceived " + markerList.size()) 1427 .that(markerPeriods) 1428 .isAtMost(markerList.size()); 1429 assertWithMessage(TAG + ": markerPeriodsReceived " + markerList.size() 1430 + " <= markerPeriodsMax " + markerPeriodsMax) 1431 .that(markerList.size()) 1432 .isAtMost(markerPeriodsMax); 1433 1434 assertWithMessage(TAG + ": updatePeriods " + updatePeriods 1435 + " <= updatePeriodsReceived " + periodicList.size()) 1436 .that(updatePeriods) 1437 .isAtMost(periodicList.size()); 1438 assertWithMessage(TAG + ": updatePeriodsReceived " + periodicList.size() 1439 + " <= updatePeriodsMax " + updatePeriodsMax) 1440 .that(periodicList.size()) 1441 .isAtMost(updatePeriodsMax); 1442 } 1443 1444 // Since we don't have accurate positioning of the start time of the recorder, 1445 // and there is no record.getPosition(), we consider only differential timing 1446 // from the first marker or periodic event. 1447 final int toleranceInFrames = TEST_SR * 80 / 1000; // 80 ms 1448 final int testTimeInFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1449 1450 AudioHelper.Statistics markerStat = new AudioHelper.Statistics(); 1451 for (int i = 1; i < markerList.size(); ++i) { 1452 final int expected = mMarkerPeriodInFrames * i; 1453 if (markerList.get(i) > testTimeInFrames) { 1454 break; // don't consider any notifications when we might be stopping. 1455 } 1456 final int actual = markerList.get(i) - markerList.get(0); 1457 //Log.d(TAG, "Marker: " + i + " expected(" + expected + ") actual(" + actual 1458 // + ") diff(" + (actual - expected) + ")" 1459 // + " tolerance " + toleranceInFrames); 1460 if (isLowLatencyDevice()) { 1461 assertWithMessage("marker period should match frame count") 1462 .that((double) actual) 1463 .isWithin(toleranceInFrames) 1464 .of(expected); 1465 } 1466 markerStat.add((double)(actual - expected) * 1000 / TEST_SR); 1467 } 1468 1469 AudioHelper.Statistics periodicStat = new AudioHelper.Statistics(); 1470 for (int i = 1; i < periodicList.size(); ++i) { 1471 final int expected = updatePeriodInFrames * i; 1472 if (periodicList.get(i) > testTimeInFrames) { 1473 break; // don't consider any notifications when we might be stopping. 1474 } 1475 final int actual = periodicList.get(i) - periodicList.get(0); 1476 //Log.d(TAG, "Update: " + i + " expected(" + expected + ") actual(" + actual 1477 // + ") diff(" + (actual - expected) + ")" 1478 // + " tolerance " + toleranceInFrames); 1479 if (isLowLatencyDevice()) { 1480 assertWithMessage("position period check should match frame count") 1481 .that((double) actual) 1482 .isWithin(toleranceInFrames) 1483 .of(expected); 1484 } 1485 periodicStat.add((double)(actual - expected) * 1000 / TEST_SR); 1486 } 1487 1488 // report this 1489 DeviceReportLog log = new DeviceReportLog(REPORT_LOG_NAME, reportName); 1490 log.addValue("start_recording_lag", coldInputStartTime, ResultType.LOWER_BETTER, 1491 ResultUnit.MS); 1492 log.addValue("stop_execution_time", stopTime - stopRequestTime, ResultType.LOWER_BETTER, 1493 ResultUnit.MS); 1494 log.addValue("total_record_time_expected", TEST_TIME_MS, ResultType.NEUTRAL, ResultUnit.MS); 1495 log.addValue("total_record_time_actual", endTime - firstSampleTime, ResultType.NEUTRAL, 1496 ResultUnit.MS); 1497 log.addValue("total_markers_expected", markerPeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1498 log.addValue("total_markers_actual", markerList.size(), ResultType.NEUTRAL, 1499 ResultUnit.COUNT); 1500 log.addValue("total_periods_expected", updatePeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1501 log.addValue("total_periods_actual", periodicList.size(), ResultType.NEUTRAL, 1502 ResultUnit.COUNT); 1503 log.addValue("average_marker_diff", markerStat.getAvg(), ResultType.LOWER_BETTER, 1504 ResultUnit.MS); 1505 log.addValue("maximum_marker_abs_diff", markerStat.getMaxAbs(), ResultType.LOWER_BETTER, 1506 ResultUnit.MS); 1507 log.addValue("average_marker_abs_diff", markerStat.getAvgAbs(), ResultType.LOWER_BETTER, 1508 ResultUnit.MS); 1509 log.addValue("average_periodic_diff", periodicStat.getAvg(), ResultType.LOWER_BETTER, 1510 ResultUnit.MS); 1511 log.addValue("maximum_periodic_abs_diff", periodicStat.getMaxAbs(), ResultType.LOWER_BETTER, 1512 ResultUnit.MS); 1513 log.addValue("average_periodic_abs_diff", periodicStat.getAvgAbs(), ResultType.LOWER_BETTER, 1514 ResultUnit.MS); 1515 log.setSummary("unified_abs_diff", (periodicStat.getAvgAbs() + markerStat.getAvgAbs()) / 2, 1516 ResultType.LOWER_BETTER, ResultUnit.MS); 1517 log.submit(InstrumentationRegistry.getInstrumentation()); 1518 } 1519 1520 private class MockOnRecordPositionUpdateListener 1521 implements OnRecordPositionUpdateListener { MockOnRecordPositionUpdateListener(AudioRecord record)1522 public MockOnRecordPositionUpdateListener(AudioRecord record) { 1523 mAudioRecord = record; 1524 record.setRecordPositionUpdateListener(this); 1525 } 1526 MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler)1527 public MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler) { 1528 mAudioRecord = record; 1529 record.setRecordPositionUpdateListener(this, handler); 1530 } 1531 onMarkerReached(AudioRecord record)1532 public synchronized void onMarkerReached(AudioRecord record) { 1533 if (mIsTestActive) { 1534 int position = getPosition(); 1535 mOnMarkerReachedCalled.add(position); 1536 mMarkerPosition += mMarkerPeriodInFrames; 1537 assertWithMessage("setNotificationMarkerPosition() returns SUCCESS") 1538 .that(mAudioRecord.setNotificationMarkerPosition(mMarkerPosition)) 1539 .isEqualTo(AudioRecord.SUCCESS); 1540 } else { 1541 // see comment on stop() 1542 final long delta = System.currentTimeMillis() - mStopTime; 1543 Log.d(TAG, "onMarkerReached called " + delta + " ms after stop"); 1544 fail("onMarkerReached called when not active"); 1545 } 1546 } 1547 onPeriodicNotification(AudioRecord record)1548 public synchronized void onPeriodicNotification(AudioRecord record) { 1549 if (mIsTestActive) { 1550 int position = getPosition(); 1551 mOnPeriodicNotificationCalled.add(position); 1552 } else { 1553 // see comment on stop() 1554 final long delta = System.currentTimeMillis() - mStopTime; 1555 Log.d(TAG, "onPeriodicNotification called " + delta + " ms after stop"); 1556 fail("onPeriodicNotification called when not active"); 1557 } 1558 } 1559 start(int sampleRate)1560 public synchronized void start(int sampleRate) { 1561 mIsTestActive = true; 1562 mSampleRate = sampleRate; 1563 mStartTime = System.currentTimeMillis(); 1564 } 1565 stop()1566 public synchronized void stop() { 1567 // the listener should be stopped some time after AudioRecord is stopped 1568 // as some messages may not yet be posted. 1569 mIsTestActive = false; 1570 mStopTime = System.currentTimeMillis(); 1571 } 1572 getMarkerList()1573 public ArrayList<Integer> getMarkerList() { 1574 return mOnMarkerReachedCalled; 1575 } 1576 getPeriodicList()1577 public ArrayList<Integer> getPeriodicList() { 1578 return mOnPeriodicNotificationCalled; 1579 } 1580 release()1581 public synchronized void release() { 1582 stop(); 1583 mAudioRecord.setRecordPositionUpdateListener(null); 1584 mAudioRecord = null; 1585 } 1586 getPosition()1587 private int getPosition() { 1588 // we don't have mAudioRecord.getRecordPosition(); 1589 // so we fake this by timing. 1590 long delta = System.currentTimeMillis() - mStartTime; 1591 return (int)(delta * mSampleRate / 1000); 1592 } 1593 1594 private long mStartTime; 1595 private long mStopTime; 1596 private int mSampleRate; 1597 private boolean mIsTestActive = true; 1598 private AudioRecord mAudioRecord; 1599 private ArrayList<Integer> mOnMarkerReachedCalled = new ArrayList<Integer>(); 1600 private ArrayList<Integer> mOnPeriodicNotificationCalled = new ArrayList<Integer>(); 1601 } 1602 hasMicrophone()1603 private boolean hasMicrophone() { 1604 return getContext().getPackageManager().hasSystemFeature( 1605 PackageManager.FEATURE_MICROPHONE); 1606 } 1607 isLowRamDevice()1608 private boolean isLowRamDevice() { 1609 return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE)) 1610 .isLowRamDevice(); 1611 } 1612 isLowLatencyDevice()1613 private boolean isLowLatencyDevice() { 1614 return getContext().getPackageManager().hasSystemFeature( 1615 PackageManager.FEATURE_AUDIO_LOW_LATENCY); 1616 } 1617 isProAudioDevice()1618 private boolean isProAudioDevice() { 1619 return getContext().getPackageManager().hasSystemFeature( 1620 PackageManager.FEATURE_AUDIO_PRO); 1621 } 1622 verifyContinuousTimestamps( AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate, boolean coarse)1623 private void verifyContinuousTimestamps( 1624 AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate, boolean coarse) 1625 throws Exception { 1626 final long timeDiff = stopTs.nanoTime - startTs.nanoTime; 1627 final long frameDiff = stopTs.framePosition - startTs.framePosition; 1628 final long NANOS_PER_SECOND = 1000000000; 1629 final long timeByFrames = frameDiff * NANOS_PER_SECOND / sampleRate; 1630 final double ratio = (double)timeDiff / timeByFrames; 1631 final double tolerance = (isLowLatencyDevice() ? 0.01 : 0.5) * (coarse ? 3. : 1.); 1632 1633 // Usually the ratio is accurate to one part per thousand or better. 1634 // Log.d(TAG, "ratio=" + ratio + ", timeDiff=" + timeDiff + ", frameDiff=" + frameDiff + 1635 // ", timeByFrames=" + timeByFrames + ", sampleRate=" + sampleRate); 1636 assertWithMessage("Timestamp rate must match sample rate by ratio") 1637 .that(ratio) 1638 .isWithin(tolerance) 1639 .of(1.); 1640 } 1641 1642 // remove if AudioTimestamp has a better toString(). printTimestamp(String s, AudioTimestamp ats)1643 private void printTimestamp(String s, AudioTimestamp ats) { 1644 Log.d(TAG, s + ": pos: " + ats.framePosition + " time: " + ats.nanoTime); 1645 } 1646 readDataTimed(AudioRecord recorder, long durationMillis, ShortBuffer out)1647 private static void readDataTimed(AudioRecord recorder, long durationMillis, 1648 ShortBuffer out) throws IOException { 1649 final short[] buffer = new short[1024]; 1650 final long startTimeMillis = SystemClock.uptimeMillis(); 1651 final long stopTimeMillis = startTimeMillis + durationMillis; 1652 while (SystemClock.uptimeMillis() < stopTimeMillis) { 1653 final int readCount = recorder.read(buffer, 0, buffer.length); 1654 if (readCount <= 0) { 1655 return; 1656 } 1657 out.put(buffer, 0, readCount); 1658 } 1659 } 1660 isAudioSilent(ShortBuffer buffer)1661 private static boolean isAudioSilent(ShortBuffer buffer) { 1662 // Always need some bytes read 1663 assertWithMessage("Buffer should have some data") 1664 .that(buffer.position()) 1665 .isGreaterThan(0); 1666 1667 // It is possible that the transition from empty to non empty bytes 1668 // happened in the middle of the read data due to the async nature of 1669 // the system. Therefore, we look for the transitions from non-empty 1670 // to empty and from empty to non-empty values for robustness. 1671 int totalSilenceCount = 0; 1672 final int valueCount = buffer.position(); 1673 for (int i = valueCount - 1; i >= 0; i--) { 1674 final short value = buffer.get(i); 1675 if (value == 0) { 1676 totalSilenceCount++; 1677 } 1678 } 1679 return totalSilenceCount > valueCount / 2; 1680 } 1681 getContext()1682 private static Context getContext() { 1683 return InstrumentationRegistry.getInstrumentation().getTargetContext(); 1684 } 1685 1686 /* 1687 * Microphone Direction API tests 1688 */ 1689 @Test testSetPreferredMicrophoneDirection()1690 public void testSetPreferredMicrophoneDirection() { 1691 if (!hasMicrophone()) { 1692 return; 1693 } 1694 1695 try { 1696 boolean success = 1697 mAudioRecord.setPreferredMicrophoneDirection( 1698 MicrophoneDirection.MIC_DIRECTION_TOWARDS_USER); 1699 1700 // Can't actually test this as HAL may not have implemented it 1701 // Just verify that it doesn't crash or throw an exception 1702 // assertTrue(success); 1703 } catch (Exception ex) { 1704 Log.e(TAG, "testSetPreferredMicrophoneDirection() exception:" + ex); 1705 throw(ex); 1706 } 1707 return; 1708 } 1709 1710 @Test testSetPreferredMicrophoneFieldDimension()1711 public void testSetPreferredMicrophoneFieldDimension() { 1712 if (!hasMicrophone()) { 1713 return; 1714 } 1715 1716 try { 1717 boolean success = mAudioRecord.setPreferredMicrophoneFieldDimension(1.0f); 1718 1719 // Can't actually test this as HAL may not have implemented it 1720 // Just verify that it doesn't crash or throw an exception 1721 // assertTrue(success); 1722 } catch (Exception ex) { 1723 Log.e(TAG, "testSetPreferredMicrophoneFieldDimension() exception:" + ex); 1724 throw(ex); 1725 } 1726 return; 1727 } 1728 1729 /** 1730 * Test AudioRecord Builder error handling. 1731 * 1732 * @throws Exception 1733 */ 1734 @Test testAudioRecordBuilderError()1735 public void testAudioRecordBuilderError() throws Exception { 1736 if (!hasMicrophone()) { 1737 return; 1738 } 1739 1740 final AudioRecord[] audioRecord = new AudioRecord[1]; // pointer to AudioRecord. 1741 final int BIGNUM = Integer.MAX_VALUE; // large value that should be invalid. 1742 final int INVALID_SESSION_ID = 1024; // can never occur (wrong type in 3 lsbs) 1743 final int INVALID_CHANNEL_MASK = -1; 1744 1745 try { 1746 // NOTE: 1747 // AudioFormat tested in AudioFormatTest#testAudioFormatBuilderError. 1748 1749 // We must be able to create the AudioRecord. 1750 audioRecord[0] = new AudioRecord.Builder().build(); 1751 audioRecord[0].release(); 1752 1753 // Out of bounds buffer size. A large size will fail in AudioRecord creation. 1754 assertThrows(UnsupportedOperationException.class, () -> { 1755 audioRecord[0] = new AudioRecord.Builder() 1756 .setBufferSizeInBytes(BIGNUM) 1757 .build(); 1758 }); 1759 1760 // 0 and negative buffer size throw IllegalArgumentException 1761 for (int bufferSize : new int[] {-BIGNUM, -1, 0}) { 1762 assertThrows(IllegalArgumentException.class, () -> { 1763 audioRecord[0] = new AudioRecord.Builder() 1764 .setBufferSizeInBytes(bufferSize) 1765 .build(); 1766 }); 1767 } 1768 1769 assertThrows(IllegalArgumentException.class, () -> { 1770 audioRecord[0] = new AudioRecord.Builder() 1771 .setAudioSource(BIGNUM) 1772 .build(); 1773 }); 1774 1775 assertThrows(IllegalArgumentException.class, () -> { 1776 audioRecord[0] = new AudioRecord.Builder() 1777 .setAudioSource(-2) 1778 .build(); 1779 }); 1780 1781 // Invalid session id that is positive. 1782 // (logcat error message vague) 1783 assertThrows(UnsupportedOperationException.class, () -> { 1784 audioRecord[0] = new AudioRecord.Builder() 1785 .setSessionId(INVALID_SESSION_ID) 1786 .build(); 1787 }); 1788 1789 // Specialty AudioRecord tests 1790 assertThrows(NullPointerException.class, () -> { 1791 audioRecord[0] = new AudioRecord.Builder() 1792 .setAudioPlaybackCaptureConfig(null) 1793 .build(); 1794 }); 1795 1796 assertThrows(NullPointerException.class, () -> { 1797 audioRecord[0] = new AudioRecord.Builder() 1798 .setContext(null) 1799 .build(); 1800 }); 1801 1802 // Bad audio encoding DRA expected unsupported. 1803 try { 1804 audioRecord[0] = new AudioRecord.Builder() 1805 .setAudioFormat(new AudioFormat.Builder() 1806 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 1807 .setEncoding(AudioFormat.ENCODING_DRA) 1808 .build()) 1809 .build(); 1810 // Don't throw an exception, maybe it is supported somehow, but warn. 1811 Log.w(TAG, "ENCODING_DRA is expected to be unsupported"); 1812 audioRecord[0].release(); 1813 audioRecord[0] = null; 1814 } catch (UnsupportedOperationException e) { 1815 ; // OK expected 1816 } 1817 1818 // Sample rate out of bounds. 1819 // System levels caught on AudioFormat. 1820 for (int sampleRate : new int[] { 1821 BIGNUM, 1822 AudioSystem.SAMPLE_RATE_HZ_MIN - 1, 1823 AudioSystem.SAMPLE_RATE_HZ_MAX + 1}) { 1824 assertThrows(IllegalArgumentException.class, () -> { 1825 audioRecord[0] = new AudioRecord.Builder() 1826 .setAudioFormat(new AudioFormat.Builder() 1827 .setSampleRate(sampleRate) 1828 .build()) 1829 .build(); 1830 }); 1831 } 1832 1833 // Invalid channel mask 1834 // This is a UOE for AudioRecord vs IAE for AudioTrack. 1835 assertThrows(UnsupportedOperationException.class, () -> { 1836 audioRecord[0] = new AudioRecord.Builder() 1837 .setAudioFormat(new AudioFormat.Builder() 1838 .setChannelMask(INVALID_CHANNEL_MASK) 1839 .build()) 1840 .build(); 1841 }); 1842 } finally { 1843 // Did we successfully complete for some reason but did not 1844 // release? 1845 if (audioRecord[0] != null) { 1846 audioRecord[0].release(); 1847 audioRecord[0] = null; 1848 } 1849 } 1850 } 1851 1852 @Test testPrivacySensitiveBuilder()1853 public void testPrivacySensitiveBuilder() throws Exception { 1854 if (!hasMicrophone()) { 1855 return; 1856 } 1857 1858 for (final boolean privacyOn : new boolean[] { false, true} ) { 1859 AudioRecord record = new AudioRecord.Builder() 1860 .setAudioFormat(new AudioFormat.Builder() 1861 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1862 .setSampleRate(8000) 1863 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1864 .build()) 1865 .setPrivacySensitive(privacyOn) 1866 .build(); 1867 assertWithMessage("Builder with privacyOn " + privacyOn + " is set correctly") 1868 .that(record.isPrivacySensitive()) 1869 .isEqualTo(privacyOn); 1870 record.release(); 1871 } 1872 } 1873 1874 @Test testPrivacySensitiveDefaults()1875 public void testPrivacySensitiveDefaults() throws Exception { 1876 if (!hasMicrophone()) { 1877 return; 1878 } 1879 1880 AudioRecord record = new AudioRecord.Builder() 1881 .setAudioSource(MediaRecorder.AudioSource.MIC) 1882 .setAudioFormat(new AudioFormat.Builder() 1883 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1884 .setSampleRate(8000) 1885 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1886 .build()) 1887 .build(); 1888 assertWithMessage("AudioSource.MIC should not be privacy sensitive") 1889 .that(record.isPrivacySensitive()).isFalse(); 1890 record.release(); 1891 1892 record = new AudioRecord.Builder() 1893 .setAudioSource(MediaRecorder.AudioSource.VOICE_COMMUNICATION) 1894 .setAudioFormat(new AudioFormat.Builder() 1895 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1896 .setSampleRate(8000) 1897 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1898 .build()) 1899 .build(); 1900 assertWithMessage("AudioSource.VOICE_COMMUNICATION should be privacy sensitive") 1901 .that(record.isPrivacySensitive()).isTrue(); 1902 record.release(); 1903 } 1904 1905 @Test testSetLogSessionId()1906 public void testSetLogSessionId() throws Exception { 1907 if (!hasMicrophone()) { 1908 return; 1909 } 1910 AudioRecord audioRecord = null; 1911 try { 1912 audioRecord = new AudioRecord.Builder() 1913 .setAudioFormat(new AudioFormat.Builder() 1914 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1915 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1916 .build()) 1917 .build(); 1918 audioRecord.setLogSessionId(LogSessionId.LOG_SESSION_ID_NONE); // should not throw. 1919 assertWithMessage("Can set LogSessionId.LOG_SESSION_ID_NONE") 1920 .that(audioRecord.getLogSessionId()) 1921 .isEqualTo(LogSessionId.LOG_SESSION_ID_NONE); 1922 1923 final MediaMetricsManager mediaMetricsManager = 1924 getContext().getSystemService(MediaMetricsManager.class); 1925 final RecordingSession recordingSession = 1926 mediaMetricsManager.createRecordingSession(); 1927 audioRecord.setLogSessionId(recordingSession.getSessionId()); 1928 assertWithMessage("Can set recordingSession sessionId") 1929 .that(audioRecord.getLogSessionId()) 1930 .isEqualTo(recordingSession.getSessionId()); 1931 1932 // record some data to generate a log entry. 1933 short data[] = new short[audioRecord.getSampleRate() / 2]; 1934 audioRecord.startRecording(); 1935 audioRecord.read(data, 0 /* offsetInShorts */, data.length); 1936 audioRecord.stop(); 1937 1938 // Also can check the mediametrics dumpsys to validate logs generated. 1939 } finally { 1940 if (audioRecord != null) { 1941 audioRecord.release(); 1942 } 1943 } 1944 } 1945 1946 @Test testCompressedCaptureAAC()1947 public void testCompressedCaptureAAC() throws Exception { 1948 final int ENCODING = AudioFormat.ENCODING_AAC_LC; 1949 final String MIMETYPE = MediaFormat.MIMETYPE_AUDIO_AAC; 1950 final int BUFFER_SIZE = 16000; 1951 if (!hasMicrophone()) { 1952 return; 1953 } 1954 AudioDeviceInfo[] devices = mAudioManager.getDevices(AudioManager.GET_DEVICES_INPUTS); 1955 // TODO test multiple supporting devices if available 1956 AudioDeviceInfo supportingDevice = null; 1957 for (AudioDeviceInfo device : devices) { 1958 for (int encoding : device.getEncodings()) { 1959 if (encoding == ENCODING) { 1960 supportingDevice = device; 1961 break; 1962 } 1963 } 1964 if (supportingDevice != null) break; 1965 } 1966 if (supportingDevice == null) { 1967 Log.i(TAG, "Compressed audio (AAC) not supported"); 1968 return; // Compressed Audio is not supported 1969 } 1970 Log.i(TAG, "Compressed audio (AAC) supported"); 1971 AudioRecord audioRecord = null; 1972 try { 1973 audioRecord = new AudioRecord.Builder() 1974 .setAudioFormat(new AudioFormat.Builder() 1975 .setEncoding(ENCODING) 1976 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1977 .build()) 1978 .build(); 1979 audioRecord.setPreferredDevice(supportingDevice); 1980 class ByteBufferImpl extends StreamUtils.ByteBufferStream { 1981 @Override 1982 public ByteBuffer read() throws IOException { 1983 if (mCount < 1 /* only one buffer */) { 1984 ++mCount; 1985 return mByteBuffer; 1986 } 1987 return null; 1988 } 1989 public ByteBuffer mByteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 1990 private int mCount = 0; 1991 } 1992 1993 ByteBufferImpl byteBufferImpl = new ByteBufferImpl(); 1994 audioRecord.startRecording(); 1995 audioRecord.read(byteBufferImpl.mByteBuffer, BUFFER_SIZE); 1996 audioRecord.stop(); 1997 // Attempt to decode compressed data 1998 //sample rate/ch count not needed 1999 final MediaFormat format = MediaFormat.createAudioFormat(MIMETYPE, 0, 0); 2000 final StreamUtils.MediaCodecStream decodingStream 2001 = new StreamUtils.MediaCodecStream(byteBufferImpl, format, false); 2002 ByteBuffer decoded = decodingStream.read(); 2003 int totalDecoded = 0; 2004 while (decoded != null) { 2005 // TODO validate actual data 2006 totalDecoded += decoded.remaining(); 2007 decoded = decodingStream.read(); 2008 } 2009 Log.i(TAG, "Decoded size:" + String.valueOf(totalDecoded)); 2010 // TODO rethrow following exceptions on verification 2011 } catch (UnsupportedOperationException e) { 2012 Log.w(TAG, "Compressed AudioRecord unable to be built"); 2013 } catch (IllegalStateException e) { 2014 Log.w(TAG, "Compressed AudioRecord unable to be started"); 2015 } finally { 2016 if (audioRecord != null) { 2017 audioRecord.release(); 2018 } 2019 } 2020 } 2021 } 2022