xref: /aosp_15_r20/cts/tests/tests/media/audio/src/android/media/audio/cts/AudioTrackTest.java (revision b7c941bb3fa97aba169d73cee0bed2de8ac964bf)
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media.audio.cts;
18 
19 import static org.junit.Assert.assertEquals;
20 import static org.junit.Assert.assertFalse;
21 import static org.junit.Assert.assertNotNull;
22 import static org.junit.Assert.assertTrue;
23 import static org.junit.Assert.fail;
24 import static org.testng.Assert.assertThrows;
25 
26 import android.app.ActivityManager;
27 import android.content.Context;
28 import android.content.pm.PackageManager;
29 import android.media.AudioAttributes;
30 import android.media.AudioFormat;
31 import android.media.AudioManager;
32 import android.media.AudioMetadataReadMap;
33 import android.media.AudioPresentation;
34 import android.media.AudioSystem;
35 import android.media.AudioTimestamp;
36 import android.media.AudioTrack;
37 import android.media.PlaybackParams;
38 import android.media.cts.AudioHelper;
39 import android.media.metrics.LogSessionId;
40 import android.media.metrics.MediaMetricsManager;
41 import android.media.metrics.PlaybackSession;
42 import android.os.PersistableBundle;
43 import android.platform.test.annotations.AppModeSdkSandbox;
44 import android.platform.test.annotations.Presubmit;
45 import android.util.Log;
46 
47 import androidx.test.InstrumentationRegistry;
48 import androidx.test.filters.LargeTest;
49 import androidx.test.runner.AndroidJUnit4;
50 
51 import com.android.compatibility.common.util.NonMainlineTest;
52 
53 import org.junit.Test;
54 import org.junit.runner.RunWith;
55 
56 import java.nio.ByteBuffer;
57 import java.nio.FloatBuffer;
58 import java.nio.ShortBuffer;
59 import java.util.concurrent.Executor;
60 
61 @NonMainlineTest
62 @AppModeSdkSandbox(reason = "Allow test in the SDK sandbox (does not prevent other modes).")
63 @RunWith(AndroidJUnit4.class)
64 public class AudioTrackTest {
65     private String TAG = "AudioTrackTest";
66     private final long WAIT_MSEC = 200;
67     private final int OFFSET_DEFAULT = 0;
68     private final int OFFSET_NEGATIVE = -10;
69 
log(String testName, String message)70     private void log(String testName, String message) {
71         Log.v(TAG, "[" + testName + "] " + message);
72     }
73 
loge(String testName, String message)74     private void loge(String testName, String message) {
75         Log.e(TAG, "[" + testName + "] " + message);
76     }
77 
78     // -----------------------------------------------------------------
79     // private class to hold test results
80     private static class TestResults {
81         public boolean mResult = false;
82         public String mResultLog = "";
83 
TestResults(boolean b, String s)84         public TestResults(boolean b, String s) {
85             mResult = b;
86             mResultLog = s;
87         }
88     }
89 
90     // -----------------------------------------------------------------
91     // generic test methods
constructorTestMultiSampleRate( int _inTest_streamType, int _inTest_mode, int _inTest_config, int _inTest_format, int _expected_stateForMode)92     public TestResults constructorTestMultiSampleRate(
93     // parameters tested by this method
94             int _inTest_streamType, int _inTest_mode, int _inTest_config, int _inTest_format,
95             // parameter-dependent expected results
96             int _expected_stateForMode) {
97 
98         int[] testSampleRates = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 };
99         String failedRates = "Failure for rate(s): ";
100         boolean localRes, finalRes = true;
101 
102         for (int i = 0; i < testSampleRates.length; i++) {
103             AudioTrack track = null;
104             try {
105                 track = new AudioTrack(_inTest_streamType, testSampleRates[i], _inTest_config,
106                         _inTest_format, AudioTrack.getMinBufferSize(testSampleRates[i],
107                                 _inTest_config, _inTest_format), _inTest_mode);
108             } catch (IllegalArgumentException iae) {
109                 Log.e("MediaAudioTrackTest", "[ constructorTestMultiSampleRate ] exception at SR "
110                         + testSampleRates[i] + ": \n" + iae);
111                 localRes = false;
112             }
113             if (track != null) {
114                 localRes = (track.getState() == _expected_stateForMode);
115                 track.release();
116             } else {
117                 localRes = false;
118             }
119 
120             if (!localRes) {
121                 // log the error for the test runner
122                 failedRates += Integer.toString(testSampleRates[i]) + "Hz ";
123                 // log the error for logcat
124                 log("constructorTestMultiSampleRate", "failed to construct "
125                         + "AudioTrack(streamType="
126                         + _inTest_streamType
127                         + ", sampleRateInHz="
128                         + testSampleRates[i]
129                         + ", channelConfig="
130                         + _inTest_config
131                         + ", audioFormat="
132                         + _inTest_format
133                         + ", bufferSizeInBytes="
134                         + AudioTrack.getMinBufferSize(testSampleRates[i], _inTest_config,
135                                 AudioFormat.ENCODING_PCM_16BIT) + ", mode=" + _inTest_mode);
136                 // mark test as failed
137                 finalRes = false;
138             }
139         }
140         return new TestResults(finalRes, failedRates);
141     }
142 
143     // -----------------------------------------------------------------
144     // AUDIOTRACK TESTS:
145     // ----------------------------------
146 
147     // -----------------------------------------------------------------
148     // AudioTrack constructor and AudioTrack.getMinBufferSize(...) for 16bit PCM
149     // ----------------------------------
150 
151     // Test case 1: constructor for streaming AudioTrack, mono, 16bit at misc
152     // valid sample rates
153     @Test
testConstructorMono16MusicStream()154     public void testConstructorMono16MusicStream() throws Exception {
155 
156         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
157                 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_MONO,
158                 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_INITIALIZED);
159 
160         assertTrue("testConstructorMono16MusicStream: " + res.mResultLog, res.mResult);
161     }
162 
163     // Test case 2: constructor for streaming AudioTrack, stereo, 16bit at misc
164     // valid sample rates
165     @Test
testConstructorStereo16MusicStream()166     public void testConstructorStereo16MusicStream() throws Exception {
167 
168         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
169                 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_STEREO,
170                 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_INITIALIZED);
171 
172         assertTrue("testConstructorStereo16MusicStream: " + res.mResultLog, res.mResult);
173     }
174 
175     // Test case 3: constructor for static AudioTrack, mono, 16bit at misc valid
176     // sample rates
177     @Test
testConstructorMono16MusicStatic()178     public void testConstructorMono16MusicStatic() throws Exception {
179 
180         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
181                 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_MONO,
182                 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_NO_STATIC_DATA);
183 
184         assertTrue("testConstructorMono16MusicStatic: " + res.mResultLog, res.mResult);
185     }
186 
187     // Test case 4: constructor for static AudioTrack, stereo, 16bit at misc
188     // valid sample rates
189     @Test
testConstructorStereo16MusicStatic()190     public void testConstructorStereo16MusicStatic() throws Exception {
191 
192         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
193                 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_STEREO,
194                 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_NO_STATIC_DATA);
195 
196         assertTrue("testConstructorStereo16MusicStatic: " + res.mResultLog, res.mResult);
197     }
198 
199     // -----------------------------------------------------------------
200     // AudioTrack constructor and AudioTrack.getMinBufferSize(...) for 8bit PCM
201     // ----------------------------------
202 
203     // Test case 1: constructor for streaming AudioTrack, mono, 8bit at misc
204     // valid sample rates
205     @Test
testConstructorMono8MusicStream()206     public void testConstructorMono8MusicStream() throws Exception {
207 
208         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
209                 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_MONO,
210                 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_INITIALIZED);
211 
212         assertTrue("testConstructorMono8MusicStream: " + res.mResultLog, res.mResult);
213     }
214 
215     // Test case 2: constructor for streaming AudioTrack, stereo, 8bit at misc
216     // valid sample rates
217     @Test
testConstructorStereo8MusicStream()218     public void testConstructorStereo8MusicStream() throws Exception {
219 
220         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
221                 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_STEREO,
222                 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_INITIALIZED);
223 
224         assertTrue("testConstructorStereo8MusicStream: " + res.mResultLog, res.mResult);
225     }
226 
227     // Test case 3: constructor for static AudioTrack, mono, 8bit at misc valid
228     // sample rates
229     @Test
testConstructorMono8MusicStatic()230     public void testConstructorMono8MusicStatic() throws Exception {
231 
232         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
233                 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_MONO,
234                 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_NO_STATIC_DATA);
235 
236         assertTrue("testConstructorMono8MusicStatic: " + res.mResultLog, res.mResult);
237     }
238 
239     // Test case 4: constructor for static AudioTrack, stereo, 8bit at misc
240     // valid sample rates
241     @Test
testConstructorStereo8MusicStatic()242     public void testConstructorStereo8MusicStatic() throws Exception {
243 
244         TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC,
245                 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_STEREO,
246                 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_NO_STATIC_DATA);
247 
248         assertTrue("testConstructorStereo8MusicStatic: " + res.mResultLog, res.mResult);
249     }
250 
251     // -----------------------------------------------------------------
252     // AudioTrack constructor for all stream types
253     // ----------------------------------
254 
255     // Test case 1: constructor for all stream types
256     @Test
testConstructorStreamType()257     public void testConstructorStreamType() throws Exception {
258         // constants for test
259         final int TYPE_TEST_SR = 22050;
260         final int TYPE_TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
261         final int TYPE_TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
262         final int TYPE_TEST_MODE = AudioTrack.MODE_STREAM;
263         final int[] STREAM_TYPES = { AudioManager.STREAM_ALARM, AudioManager.STREAM_MUSIC,
264                 AudioManager.STREAM_NOTIFICATION, AudioManager.STREAM_RING,
265                 AudioManager.STREAM_SYSTEM, AudioManager.STREAM_VOICE_CALL };
266         final String[] STREAM_NAMES = { "STREAM_ALARM", "STREAM_MUSIC", "STREAM_NOTIFICATION",
267                 "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL" };
268 
269         boolean localTestRes = true;
270         AudioTrack track = null;
271         // test: loop constructor on all stream types
272         for (int i = 0; i < STREAM_TYPES.length; i++) {
273             try {
274                 // -------- initialization --------------
275                 track = new AudioTrack(STREAM_TYPES[i], TYPE_TEST_SR, TYPE_TEST_CONF,
276                         TYPE_TEST_FORMAT, AudioTrack.getMinBufferSize(TYPE_TEST_SR, TYPE_TEST_CONF,
277                                 TYPE_TEST_FORMAT), TYPE_TEST_MODE);
278             } catch (IllegalArgumentException iae) {
279                 loge("testConstructorStreamType", "exception for stream type " + STREAM_NAMES[i]
280                         + ": " + iae);
281                 localTestRes = false;
282             }
283             // -------- test --------------
284             if (track != null) {
285                 if (track.getState() != AudioTrack.STATE_INITIALIZED) {
286                     localTestRes = false;
287                     Log.e("MediaAudioTrackTest",
288                             "[ testConstructorStreamType ] failed for stream type "
289                                     + STREAM_NAMES[i]);
290                 }
291                 // -------- tear down --------------
292                 track.release();
293             } else {
294                 localTestRes = false;
295             }
296         }
297 
298         assertTrue("testConstructorStreamType", localTestRes);
299     }
300 
301     // -----------------------------------------------------------------
302     // AudioTrack construction with Builder
303     // ----------------------------------
304 
305     // Test case 1: build AudioTrack with default parameters, test documented default params
306     @Test
testBuilderDefault()307     public void testBuilderDefault() throws Exception {
308         // constants for test
309         final String TEST_NAME = "testBuilderDefault";
310         final int expectedDefaultEncoding = AudioFormat.ENCODING_PCM_16BIT;
311         final int expectedDefaultRate =
312                 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC);
313         final int expectedDefaultChannels = AudioFormat.CHANNEL_OUT_STEREO;
314         // use Builder
315         final int buffSizeInBytes = AudioTrack.getMinBufferSize(
316                 expectedDefaultRate, expectedDefaultChannels, expectedDefaultEncoding);
317         final AudioTrack track = new AudioTrack.Builder()
318                 .setBufferSizeInBytes(buffSizeInBytes)
319                 .build();
320         // save results
321         final int observedState = track.getState();
322         final int observedFormat = track.getAudioFormat();
323         final int observedChannelConf = track.getChannelConfiguration();
324         final int observedRate = track.getSampleRate();
325         // release track before the test exits (either successfully or with an exception)
326         track.release();
327         // compare results
328         assertEquals(TEST_NAME + ": Track initialized", AudioTrack.STATE_INITIALIZED,
329                 observedState);
330         assertEquals(TEST_NAME + ": Default track encoding", expectedDefaultEncoding,
331                 observedFormat);
332         assertEquals(TEST_NAME + ": Default track channels", expectedDefaultChannels,
333                 observedChannelConf);
334     }
335 
336     // Test case 2: build AudioTrack with AudioFormat, test it's used
337     @Test
testBuilderFormat()338     public void testBuilderFormat() throws Exception {
339         // constants for test
340         final String TEST_NAME = "testBuilderFormat";
341         final int TEST_RATE = 32000;
342         final int TEST_CHANNELS = AudioFormat.CHANNEL_OUT_STEREO;
343         // use Builder
344         final int buffSizeInBytes = AudioTrack.getMinBufferSize(
345                 TEST_RATE, TEST_CHANNELS, AudioFormat.ENCODING_PCM_16BIT);
346         final AudioTrack track = new AudioTrack.Builder()
347                 .setAudioAttributes(new AudioAttributes.Builder().build())
348                 .setBufferSizeInBytes(buffSizeInBytes)
349                 .setAudioFormat(new AudioFormat.Builder()
350                         .setChannelMask(TEST_CHANNELS).setSampleRate(TEST_RATE).build())
351                 .build();
352         // save results
353         final int observedState = track.getState();
354         final int observedChannelConf = track.getChannelConfiguration();
355         final int observedRate = track.getSampleRate();
356         // release track before the test exits (either successfully or with an exception)
357         track.release();
358         // compare results
359         assertEquals(TEST_NAME + ": Track initialized", AudioTrack.STATE_INITIALIZED,
360                 observedState);
361         assertEquals(TEST_NAME + ": Track channels", TEST_CHANNELS, observedChannelConf);
362         assertEquals(TEST_NAME + ": Track sample rate", TEST_RATE, observedRate);
363     }
364 
365     // Test case 3: build AudioTrack with session ID, test it's used
366     @Test
testBuilderSession()367     public void testBuilderSession() throws Exception {
368         // constants for test
369         final String TEST_NAME = "testBuilderSession";
370         // generate a session ID
371         final int expectedSessionId = new AudioManager(getContext()).generateAudioSessionId();
372         // use builder
373         final AudioTrack track = new AudioTrack.Builder()
374                 .setSessionId(expectedSessionId)
375                 .build();
376         // save results
377         final int observedSessionId = track.getAudioSessionId();
378         // release track before the test exits (either successfully or with an exception)
379         track.release();
380         // compare results
381         assertEquals(TEST_NAME + ": Assigned track session ID", expectedSessionId,
382                 observedSessionId);
383     }
384 
385     // Test case 4: build AudioTrack with AudioAttributes built from stream type, test it's used
386     @Test
testBuilderAttributesStream()387     public void testBuilderAttributesStream() throws Exception {
388         // constants for test
389         final String TEST_NAME = "testBuilderAttributesStream";
390         //     use a stream type documented in AudioAttributes.Builder.setLegacyStreamType(int)
391         final int expectedStreamType = AudioManager.STREAM_ALARM;
392         final int expectedContentType = AudioAttributes.CONTENT_TYPE_SPEECH;
393         final AudioAttributes attributes = new AudioAttributes.Builder()
394                 .setLegacyStreamType(expectedStreamType)
395                 .setContentType(expectedContentType)
396                 .build();
397         // use builder
398         final AudioTrack track = new AudioTrack.Builder()
399                 .setAudioAttributes(attributes)
400                 .build();
401         // save results
402         final int observedStreamType = track.getStreamType();
403         final AudioAttributes observedAttributes = track.getAudioAttributes();
404 
405         // release track before the test exits (either successfully or with an exception)
406         track.release();
407         // compare results
408         assertEquals(TEST_NAME + ": track stream type", expectedStreamType, observedStreamType);
409         // attributes and observedAttributes should satisfy the overloaded equals.
410         assertEquals(TEST_NAME + ": observed attributes must match",
411                 attributes, observedAttributes);
412         //    also test content type was preserved in the attributes even though they
413         //     were first configured with a legacy stream type
414         assertEquals(TEST_NAME + ": attributes content type", expectedContentType,
415                 attributes.getContentType());
416     }
417 
418     // Test case 5: build AudioTrack with attributes and performance mode
419     @Test
testBuilderAttributesPerformanceMode()420     public void testBuilderAttributesPerformanceMode() throws Exception {
421         // constants for test
422         final String TEST_NAME = "testBuilderAttributesPerformanceMode";
423         final int testPerformanceModes[] = new int[] {
424             AudioTrack.PERFORMANCE_MODE_NONE,
425             AudioTrack.PERFORMANCE_MODE_LOW_LATENCY,
426             AudioTrack.PERFORMANCE_MODE_POWER_SAVING,
427         };
428         // construct various attributes with different preset performance modes.
429         final AudioAttributes testAttributes[] = new AudioAttributes[] {
430             new AudioAttributes.Builder().build(),
431             new AudioAttributes.Builder().setFlags(AudioAttributes.FLAG_LOW_LATENCY).build(),
432             new AudioAttributes.Builder().setFlags(AudioAttributes.FLAG_DEEP_BUFFER).build(),
433         };
434         for (int performanceMode : testPerformanceModes) {
435             for (AudioAttributes attributes : testAttributes) {
436                 final AudioTrack track = new AudioTrack.Builder()
437                     .setPerformanceMode(performanceMode)
438                     .setAudioAttributes(attributes)
439                     .build();
440                 // save results
441                 final int actualPerformanceMode = track.getPerformanceMode();
442                 // release track before the test exits
443                 track.release();
444                 final String result = "Attribute flags: " + attributes.getAllFlags()
445                         + " set performance mode: " + performanceMode
446                         + " actual performance mode: " + actualPerformanceMode;
447                 Log.d(TEST_NAME, result);
448                 assertTrue(TEST_NAME + ": " + result,
449                         actualPerformanceMode == performanceMode  // either successful
450                         || actualPerformanceMode == AudioTrack.PERFORMANCE_MODE_NONE // or none
451                         || performanceMode == AudioTrack.PERFORMANCE_MODE_NONE);
452             }
453         }
454     }
455 
456     // Test case 6: build AudioTrack with Context and otherwise default arguments, expect success.
457     @Test
testBuilderWithContext()458     public void testBuilderWithContext() {
459         final int expectedDefaultEncoding = AudioFormat.ENCODING_PCM_16BIT;
460         final int expectedDefaultRate =
461                 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC);
462         final int expectedDefaultChannels = AudioFormat.CHANNEL_OUT_STEREO;
463 
464         final AudioTrack track = new AudioTrack.Builder()
465                 .setContext(getContext())
466                 .build();
467 
468         assertEquals(AudioTrack.STATE_INITIALIZED, track.getState());
469         assertEquals(expectedDefaultEncoding, track.getAudioFormat());
470         assertEquals(expectedDefaultRate, track.getSampleRate());
471         assertEquals(expectedDefaultChannels, track.getChannelConfiguration());
472     }
473 
474     // Test case 7: build AudioTrack with Context and otherwise default arguments, expect success.
475     @Test
testBuilderWithNullContext()476     public void testBuilderWithNullContext() {
477         assertThrows(NullPointerException.class, () -> new AudioTrack.Builder()
478                 .setContext(/*context=*/null)
479                 .build());
480     }
481 
482     // -----------------------------------------------------------------
483     // Playback head position
484     // ----------------------------------
485 
486     // Test case 1: getPlaybackHeadPosition() at 0 after initialization
487     @Test
testPlaybackHeadPositionAfterInit()488     public void testPlaybackHeadPositionAfterInit() throws Exception {
489         // constants for test
490         final String TEST_NAME = "testPlaybackHeadPositionAfterInit";
491         final int TEST_SR = 22050;
492         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
493         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
494         final int TEST_MODE = AudioTrack.MODE_STREAM;
495         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
496 
497         // -------- initialization --------------
498         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
499                 AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT), TEST_MODE);
500         // -------- test --------------
501         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
502         assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0);
503         // -------- tear down --------------
504         track.release();
505     }
506 
507     // Test case 2: getPlaybackHeadPosition() increases after play()
508     @Test
testPlaybackHeadPositionIncrease()509     public void testPlaybackHeadPositionIncrease() throws Exception {
510         // constants for test
511         final String TEST_NAME = "testPlaybackHeadPositionIncrease";
512         final int TEST_SR = 22050;
513         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
514         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
515         final int TEST_MODE = AudioTrack.MODE_STREAM;
516         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
517 
518         // -------- initialization --------------
519         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
520         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
521                 2 * minBuffSize, TEST_MODE);
522         byte data[] = new byte[minBuffSize];
523         // -------- test --------------
524         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
525         track.write(data, OFFSET_DEFAULT, data.length);
526         track.write(data, OFFSET_DEFAULT, data.length);
527         track.play();
528         Thread.sleep(100);
529         log(TEST_NAME, "position =" + track.getPlaybackHeadPosition());
530         assertTrue(TEST_NAME, track.getPlaybackHeadPosition() > 0);
531         // -------- tear down --------------
532         track.release();
533     }
534 
535     // Test case 3: getPlaybackHeadPosition() is 0 after flush();
536     @Test
testPlaybackHeadPositionAfterFlush()537     public void testPlaybackHeadPositionAfterFlush() throws Exception {
538         // constants for test
539         final String TEST_NAME = "testPlaybackHeadPositionAfterFlush";
540         final int TEST_SR = 22050;
541         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
542         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
543         final int TEST_MODE = AudioTrack.MODE_STREAM;
544         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
545 
546         // -------- initialization --------------
547         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
548         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
549                 2 * minBuffSize, TEST_MODE);
550         byte data[] = new byte[minBuffSize];
551         // -------- test --------------
552         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
553         track.write(data, OFFSET_DEFAULT, data.length);
554         track.write(data, OFFSET_DEFAULT, data.length);
555         track.play();
556         Thread.sleep(WAIT_MSEC);
557         track.stop();
558         track.flush();
559         log(TEST_NAME, "position =" + track.getPlaybackHeadPosition());
560         assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0);
561         // -------- tear down --------------
562         track.release();
563     }
564 
565     // Test case 3: getPlaybackHeadPosition() is 0 after stop();
566     @Test
testPlaybackHeadPositionAfterStop()567     public void testPlaybackHeadPositionAfterStop() throws Exception {
568         // constants for test
569         final String TEST_NAME = "testPlaybackHeadPositionAfterStop";
570         final int TEST_SR = 22050;
571         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
572         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
573         final int TEST_MODE = AudioTrack.MODE_STREAM;
574         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
575         final int TEST_LOOP_CNT = 10;
576 
577         // -------- initialization --------------
578         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
579         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
580                 2 * minBuffSize, TEST_MODE);
581         byte data[] = new byte[minBuffSize];
582         // -------- test --------------
583         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
584         track.write(data, OFFSET_DEFAULT, data.length);
585         track.write(data, OFFSET_DEFAULT, data.length);
586         track.play();
587         Thread.sleep(WAIT_MSEC);
588         track.stop();
589         int count = 0;
590         int pos;
591         do {
592             Thread.sleep(WAIT_MSEC);
593             pos = track.getPlaybackHeadPosition();
594             count++;
595         } while((pos != 0) && (count < TEST_LOOP_CNT));
596         log(TEST_NAME, "position =" + pos + ", read count ="+count);
597         assertTrue(TEST_NAME, pos == 0);
598         // -------- tear down --------------
599         track.release();
600     }
601 
602     // Test case 4: getPlaybackHeadPosition() is > 0 after play(); pause();
603     @Test
testPlaybackHeadPositionAfterPause()604     public void testPlaybackHeadPositionAfterPause() throws Exception {
605         // constants for test
606         final String TEST_NAME = "testPlaybackHeadPositionAfterPause";
607         final int TEST_SR = 22050;
608         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
609         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
610         final int TEST_MODE = AudioTrack.MODE_STREAM;
611         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
612 
613         // -------- initialization --------------
614         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
615         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
616                 2 * minBuffSize, TEST_MODE);
617         byte data[] = new byte[minBuffSize];
618         // -------- test --------------
619         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
620         track.write(data, OFFSET_DEFAULT, data.length);
621         track.write(data, OFFSET_DEFAULT, data.length);
622         track.play();
623         Thread.sleep(100);
624         track.pause();
625         int pos = track.getPlaybackHeadPosition();
626         log(TEST_NAME, "position =" + pos);
627         assertTrue(TEST_NAME, pos > 0);
628         // -------- tear down --------------
629         track.release();
630     }
631 
632     // Test case 5: getPlaybackHeadPosition() remains 0 after pause(); flush(); play();
633     @Test
testPlaybackHeadPositionAfterFlushAndPlay()634     public void testPlaybackHeadPositionAfterFlushAndPlay() throws Exception {
635         // constants for test
636         final String TEST_NAME = "testPlaybackHeadPositionAfterFlushAndPlay";
637         final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
638         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
639         final int TEST_MODE = AudioTrack.MODE_STREAM;
640         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
641         final int TEST_SR = AudioTrack.getNativeOutputSampleRate(TEST_STREAM_TYPE);
642 
643         // -------- initialization --------------
644         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
645         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
646                 2 * minBuffSize, TEST_MODE);
647         byte data[] = new byte[minBuffSize];
648         // -------- test --------------
649         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
650         track.write(data, OFFSET_DEFAULT, data.length);
651         track.write(data, OFFSET_DEFAULT, data.length);
652         track.play();
653         Thread.sleep(100);
654         track.pause();
655 
656         int pos = track.getPlaybackHeadPosition();
657         log(TEST_NAME, "position after pause =" + pos);
658         assertTrue(TEST_NAME, pos > 0);
659 
660         track.flush();
661         pos = track.getPlaybackHeadPosition();
662         log(TEST_NAME, "position after flush =" + pos);
663         assertTrue(TEST_NAME, pos == 0);
664 
665         track.play();
666         pos = track.getPlaybackHeadPosition();
667         log(TEST_NAME, "position after play =" + pos);
668         assertTrue(TEST_NAME, pos == 0);
669 
670         Thread.sleep(100);
671         pos = track.getPlaybackHeadPosition();
672         log(TEST_NAME, "position after 100 ms sleep =" + pos);
673         assertTrue(TEST_NAME, pos == 0);
674         // -------- tear down --------------
675         track.release();
676     }
677 
678     // -----------------------------------------------------------------
679     // Playback properties
680     // ----------------------------------
681 
682     // Common code for the testSetStereoVolume* and testSetVolume* tests
testSetVolumeCommon(String testName, float vol, boolean isStereo)683     private void testSetVolumeCommon(String testName, float vol, boolean isStereo) throws Exception {
684         // constants for test
685         final int TEST_SR = 22050;
686         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
687         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
688         final int TEST_MODE = AudioTrack.MODE_STREAM;
689         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
690 
691         // -------- initialization --------------
692         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
693         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
694                 2 * minBuffSize, TEST_MODE);
695         byte data[] = new byte[minBuffSize];
696         // -------- test --------------
697         track.write(data, OFFSET_DEFAULT, data.length);
698         track.write(data, OFFSET_DEFAULT, data.length);
699         track.play();
700         if (isStereo) {
701             // TODO to really test this, do a pan instead of using same value for left and right
702             assertTrue(testName, track.setStereoVolume(vol, vol) == AudioTrack.SUCCESS);
703         } else {
704             assertTrue(testName, track.setVolume(vol) == AudioTrack.SUCCESS);
705         }
706         // -------- tear down --------------
707         track.release();
708     }
709 
710     // Test case 1: setStereoVolume() with max volume returns SUCCESS
711     @Test
testSetStereoVolumeMax()712     public void testSetStereoVolumeMax() throws Exception {
713         final String TEST_NAME = "testSetStereoVolumeMax";
714         float maxVol = AudioTrack.getMaxVolume();
715         testSetVolumeCommon(TEST_NAME, maxVol, true /*isStereo*/);
716     }
717 
718     // Test case 2: setStereoVolume() with min volume returns SUCCESS
719     @Test
testSetStereoVolumeMin()720     public void testSetStereoVolumeMin() throws Exception {
721         final String TEST_NAME = "testSetStereoVolumeMin";
722         float minVol = AudioTrack.getMinVolume();
723         testSetVolumeCommon(TEST_NAME, minVol, true /*isStereo*/);
724     }
725 
726     // Test case 3: setStereoVolume() with mid volume returns SUCCESS
727     @Test
testSetStereoVolumeMid()728     public void testSetStereoVolumeMid() throws Exception {
729         final String TEST_NAME = "testSetStereoVolumeMid";
730         float midVol = (AudioTrack.getMaxVolume() - AudioTrack.getMinVolume()) / 2;
731         testSetVolumeCommon(TEST_NAME, midVol, true /*isStereo*/);
732     }
733 
734     // Test case 4: setPlaybackRate() with half the content rate returns SUCCESS
735     @Test
testSetPlaybackRate()736     public void testSetPlaybackRate() throws Exception {
737         // constants for test
738         final String TEST_NAME = "testSetPlaybackRate";
739         final int TEST_SR = 22050;
740         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
741         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
742         final int TEST_MODE = AudioTrack.MODE_STREAM;
743         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
744 
745         // -------- initialization --------------
746         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
747         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
748                 2 * minBuffSize, TEST_MODE);
749         byte data[] = new byte[minBuffSize];
750         // -------- test --------------
751         track.write(data, OFFSET_DEFAULT, data.length);
752         track.write(data, OFFSET_DEFAULT, data.length);
753         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
754         track.play();
755         assertTrue(TEST_NAME, track.setPlaybackRate((int) (TEST_SR / 2)) == AudioTrack.SUCCESS);
756         // -------- tear down --------------
757         track.release();
758     }
759 
760     // Test case 5: setPlaybackRate(0) returns bad value error
761     @Test
testSetPlaybackRateZero()762     public void testSetPlaybackRateZero() throws Exception {
763         // constants for test
764         final String TEST_NAME = "testSetPlaybackRateZero";
765         final int TEST_SR = 22050;
766         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
767         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
768         final int TEST_MODE = AudioTrack.MODE_STREAM;
769         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
770 
771         // -------- initialization --------------
772         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
773         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
774                 minBuffSize, TEST_MODE);
775         // -------- test --------------
776         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
777         assertTrue(TEST_NAME, track.setPlaybackRate(0) == AudioTrack.ERROR_BAD_VALUE);
778         // -------- tear down --------------
779         track.release();
780     }
781 
782     // Test case 6: setPlaybackRate() accepts values twice the output sample
783     // rate
784     @Test
testSetPlaybackRateTwiceOutputSR()785     public void testSetPlaybackRateTwiceOutputSR() throws Exception {
786         // constants for test
787         final String TEST_NAME = "testSetPlaybackRateTwiceOutputSR";
788         final int TEST_SR = 22050;
789         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
790         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
791         final int TEST_MODE = AudioTrack.MODE_STREAM;
792         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
793 
794         // -------- initialization --------------
795         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
796         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
797                 2 * minBuffSize, TEST_MODE);
798         byte data[] = new byte[minBuffSize];
799         int outputSR = AudioTrack.getNativeOutputSampleRate(TEST_STREAM_TYPE);
800         // -------- test --------------
801         track.write(data, OFFSET_DEFAULT, data.length);
802         track.write(data, OFFSET_DEFAULT, data.length);
803         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
804         track.play();
805         assertTrue(TEST_NAME, track.setPlaybackRate(2 * outputSR) == AudioTrack.SUCCESS);
806         // -------- tear down --------------
807         track.release();
808     }
809 
810     // Test case 7: setPlaybackRate() and retrieve value, should be the same for
811     // half the content SR
812     @Test
testSetGetPlaybackRate()813     public void testSetGetPlaybackRate() throws Exception {
814         // constants for test
815         final String TEST_NAME = "testSetGetPlaybackRate";
816         final int TEST_SR = 22050;
817         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
818         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
819         final int TEST_MODE = AudioTrack.MODE_STREAM;
820         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
821 
822         // -------- initialization --------------
823         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
824         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
825                 2 * minBuffSize, TEST_MODE);
826         byte data[] = new byte[minBuffSize];
827         // -------- test --------------
828         track.write(data, OFFSET_DEFAULT, data.length);
829         track.write(data, OFFSET_DEFAULT, data.length);
830         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
831         track.play();
832         track.setPlaybackRate((int) (TEST_SR / 2));
833         assertTrue(TEST_NAME, track.getPlaybackRate() == (int) (TEST_SR / 2));
834         // -------- tear down --------------
835         track.release();
836     }
837 
838     // Test case 8: setPlaybackRate() invalid operation if track not initialized
839     @Test
testSetPlaybackRateUninit()840     public void testSetPlaybackRateUninit() throws Exception {
841         // constants for test
842         final String TEST_NAME = "testSetPlaybackRateUninit";
843         final int TEST_SR = 22050;
844         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
845         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
846         final int TEST_MODE = AudioTrack.MODE_STATIC;
847         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
848 
849         // -------- initialization --------------
850         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
851         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
852                 minBuffSize, TEST_MODE);
853         // -------- test --------------
854         assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState());
855         assertEquals(TEST_NAME, AudioTrack.ERROR_INVALID_OPERATION,
856                 track.setPlaybackRate(TEST_SR / 2));
857         // -------- tear down --------------
858         track.release();
859     }
860 
861     // Test case 9: setVolume() with max volume returns SUCCESS
862     @Test
testSetVolumeMax()863     public void testSetVolumeMax() throws Exception {
864         final String TEST_NAME = "testSetVolumeMax";
865         float maxVol = AudioTrack.getMaxVolume();
866         testSetVolumeCommon(TEST_NAME, maxVol, false /*isStereo*/);
867     }
868 
869     // Test case 10: setVolume() with min volume returns SUCCESS
870     @Test
testSetVolumeMin()871     public void testSetVolumeMin() throws Exception {
872         final String TEST_NAME = "testSetVolumeMin";
873         float minVol = AudioTrack.getMinVolume();
874         testSetVolumeCommon(TEST_NAME, minVol, false /*isStereo*/);
875     }
876 
877     // Test case 11: setVolume() with mid volume returns SUCCESS
878     @Test
testSetVolumeMid()879     public void testSetVolumeMid() throws Exception {
880         final String TEST_NAME = "testSetVolumeMid";
881         float midVol = (AudioTrack.getMaxVolume() - AudioTrack.getMinVolume()) / 2;
882         testSetVolumeCommon(TEST_NAME, midVol, false /*isStereo*/);
883     }
884 
885     // -----------------------------------------------------------------
886     // Playback progress
887     // ----------------------------------
888 
889     // Test case 1: setPlaybackHeadPosition() on playing track
890     @Test
testSetPlaybackHeadPositionPlaying()891     public void testSetPlaybackHeadPositionPlaying() throws Exception {
892         // constants for test
893         final String TEST_NAME = "testSetPlaybackHeadPositionPlaying";
894         final int TEST_SR = 22050;
895         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
896         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
897         final int TEST_MODE = AudioTrack.MODE_STREAM;
898         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
899 
900         // -------- initialization --------------
901         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
902         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
903                 2 * minBuffSize, TEST_MODE);
904         byte data[] = new byte[minBuffSize];
905         // -------- test --------------
906         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
907         track.write(data, OFFSET_DEFAULT, data.length);
908         track.write(data, OFFSET_DEFAULT, data.length);
909         track.play();
910         assertTrue(TEST_NAME,
911                 track.setPlaybackHeadPosition(10) == AudioTrack.ERROR_INVALID_OPERATION);
912         // -------- tear down --------------
913         track.release();
914     }
915 
916     // Test case 2: setPlaybackHeadPosition() on stopped track
917     @Test
testSetPlaybackHeadPositionStopped()918     public void testSetPlaybackHeadPositionStopped() throws Exception {
919         // constants for test
920         final String TEST_NAME = "testSetPlaybackHeadPositionStopped";
921         final int TEST_SR = 22050;
922         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
923         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
924         final int TEST_MODE = AudioTrack.MODE_STATIC;
925         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
926 
927         // -------- initialization --------------
928         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
929         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
930                 2 * minBuffSize, TEST_MODE);
931         byte data[] = new byte[minBuffSize];
932         // -------- test --------------
933         assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState());
934         track.write(data, OFFSET_DEFAULT, data.length);
935         track.write(data, OFFSET_DEFAULT, data.length);
936         assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState());
937         track.play();
938         track.stop();
939         assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_STOPPED, track.getPlayState());
940         assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackHeadPosition(10));
941         // -------- tear down --------------
942         track.release();
943     }
944 
945     // Test case 3: setPlaybackHeadPosition() on paused track
946     @Test
testSetPlaybackHeadPositionPaused()947     public void testSetPlaybackHeadPositionPaused() throws Exception {
948         // constants for test
949         final String TEST_NAME = "testSetPlaybackHeadPositionPaused";
950         final int TEST_SR = 22050;
951         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
952         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
953         final int TEST_MODE = AudioTrack.MODE_STATIC;
954         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
955 
956         // -------- initialization --------------
957         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
958         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
959                 2 * minBuffSize, TEST_MODE);
960         byte data[] = new byte[minBuffSize];
961         // -------- test --------------
962         assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState());
963         track.write(data, OFFSET_DEFAULT, data.length);
964         track.write(data, OFFSET_DEFAULT, data.length);
965         assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState());
966         track.play();
967         track.pause();
968         assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_PAUSED, track.getPlayState());
969         assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackHeadPosition(10));
970         // -------- tear down --------------
971         track.release();
972     }
973 
974     // Test case 4: setPlaybackHeadPosition() beyond what has been written
975     @Test
testSetPlaybackHeadPositionTooFar()976     public void testSetPlaybackHeadPositionTooFar() throws Exception {
977         // constants for test
978         final String TEST_NAME = "testSetPlaybackHeadPositionTooFar";
979         final int TEST_SR = 22050;
980         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
981         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
982         final int TEST_MODE = AudioTrack.MODE_STATIC;
983         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
984 
985         // -------- initialization --------------
986         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
987         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
988                 2 * minBuffSize, TEST_MODE);
989         byte data[] = new byte[minBuffSize];
990         // make up a frame index that's beyond what has been written: go from
991         // buffer size to frame
992         // count (given the audio track properties), and add 77.
993         int frameIndexTooFar = (2 * minBuffSize / 2) + 77;
994         // -------- test --------------
995         assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState());
996         track.write(data, OFFSET_DEFAULT, data.length);
997         track.write(data, OFFSET_DEFAULT, data.length);
998         assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState());
999         track.play();
1000         track.stop();
1001         assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_STOPPED, track.getPlayState());
1002         assertEquals(TEST_NAME, AudioTrack.ERROR_BAD_VALUE,
1003                 track.setPlaybackHeadPosition(frameIndexTooFar));
1004         // -------- tear down --------------
1005         track.release();
1006     }
1007 
1008     // Test case 5: setLoopPoints() fails for MODE_STREAM
1009     @Test
testSetLoopPointsStream()1010     public void testSetLoopPointsStream() throws Exception {
1011         // constants for test
1012         final String TEST_NAME = "testSetLoopPointsStream";
1013         final int TEST_SR = 22050;
1014         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1015         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1016         final int TEST_MODE = AudioTrack.MODE_STREAM;
1017         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1018 
1019         // -------- initialization --------------
1020         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1021         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1022                 2 * minBuffSize, TEST_MODE);
1023         byte data[] = new byte[minBuffSize];
1024         // -------- test --------------
1025         track.write(data, OFFSET_DEFAULT, data.length);
1026         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1027         assertTrue(TEST_NAME, track.setLoopPoints(2, 50, 2) == AudioTrack.ERROR_INVALID_OPERATION);
1028         // -------- tear down --------------
1029         track.release();
1030     }
1031 
1032     // Test case 6: setLoopPoints() fails start > end
1033     @Test
testSetLoopPointsStartAfterEnd()1034     public void testSetLoopPointsStartAfterEnd() throws Exception {
1035         // constants for test
1036         final String TEST_NAME = "testSetLoopPointsStartAfterEnd";
1037         final int TEST_SR = 22050;
1038         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1039         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1040         final int TEST_MODE = AudioTrack.MODE_STATIC;
1041         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1042 
1043         // -------- initialization --------------
1044         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1045         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1046                 minBuffSize, TEST_MODE);
1047         byte data[] = new byte[minBuffSize];
1048         // -------- test --------------
1049         track.write(data, OFFSET_DEFAULT, data.length);
1050         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1051         assertTrue(TEST_NAME, track.setLoopPoints(50, 0, 2) == AudioTrack.ERROR_BAD_VALUE);
1052         // -------- tear down --------------
1053         track.release();
1054     }
1055 
1056     // Test case 6: setLoopPoints() success
1057     @Test
testSetLoopPointsSuccess()1058     public void testSetLoopPointsSuccess() throws Exception {
1059         // constants for test
1060         final String TEST_NAME = "testSetLoopPointsSuccess";
1061         final int TEST_SR = 22050;
1062         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1063         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1064         final int TEST_MODE = AudioTrack.MODE_STATIC;
1065         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1066 
1067         // -------- initialization --------------
1068         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1069         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1070                 minBuffSize, TEST_MODE);
1071         byte data[] = new byte[minBuffSize];
1072         // -------- test --------------
1073         track.write(data, OFFSET_DEFAULT, data.length);
1074         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1075         assertTrue(TEST_NAME, track.setLoopPoints(0, 50, 2) == AudioTrack.SUCCESS);
1076         // -------- tear down --------------
1077         track.release();
1078     }
1079 
1080     // Test case 7: setLoopPoints() fails with loop length bigger than content
1081     @Test
testSetLoopPointsLoopTooLong()1082     public void testSetLoopPointsLoopTooLong() throws Exception {
1083         // constants for test
1084         final String TEST_NAME = "testSetLoopPointsLoopTooLong";
1085         final int TEST_SR = 22050;
1086         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1087         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1088         final int TEST_MODE = AudioTrack.MODE_STATIC;
1089         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1090 
1091         // -------- initialization --------------
1092         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1093         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1094                 minBuffSize, TEST_MODE);
1095         byte data[] = new byte[minBuffSize];
1096         int dataSizeInFrames = minBuffSize / 2;
1097         // -------- test --------------
1098         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA);
1099         track.write(data, OFFSET_DEFAULT, data.length);
1100         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1101         assertTrue(TEST_NAME, track.setLoopPoints(10, dataSizeInFrames + 20, 2) ==
1102             AudioTrack.ERROR_BAD_VALUE);
1103         // -------- tear down --------------
1104         track.release();
1105     }
1106 
1107     // Test case 8: setLoopPoints() fails with start beyond what can be written
1108     // for the track
1109     @Test
testSetLoopPointsStartTooFar()1110     public void testSetLoopPointsStartTooFar() throws Exception {
1111         // constants for test
1112         final String TEST_NAME = "testSetLoopPointsStartTooFar";
1113         final int TEST_SR = 22050;
1114         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1115         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1116         final int TEST_MODE = AudioTrack.MODE_STATIC;
1117         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1118 
1119         // -------- initialization --------------
1120         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1121         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1122                 minBuffSize, TEST_MODE);
1123         byte data[] = new byte[minBuffSize];
1124         int dataSizeInFrames = minBuffSize / 2;// 16bit data
1125         // -------- test --------------
1126         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA);
1127         track.write(data, OFFSET_DEFAULT, data.length);
1128         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1129         assertTrue(TEST_NAME,
1130                 track.setLoopPoints(dataSizeInFrames + 20, dataSizeInFrames + 50, 2) ==
1131                     AudioTrack.ERROR_BAD_VALUE);
1132         // -------- tear down --------------
1133         track.release();
1134     }
1135 
1136     // Test case 9: setLoopPoints() fails with end beyond what can be written
1137     // for the track
1138     @Test
testSetLoopPointsEndTooFar()1139     public void testSetLoopPointsEndTooFar() throws Exception {
1140         // constants for test
1141         final String TEST_NAME = "testSetLoopPointsEndTooFar";
1142         final int TEST_SR = 22050;
1143         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1144         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1145         final int TEST_MODE = AudioTrack.MODE_STATIC;
1146         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1147 
1148         // -------- initialization --------------
1149         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1150         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1151                 minBuffSize, TEST_MODE);
1152         byte data[] = new byte[minBuffSize];
1153         int dataSizeInFrames = minBuffSize / 2;// 16bit data
1154         // -------- test --------------
1155         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA);
1156         track.write(data, OFFSET_DEFAULT, data.length);
1157         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1158         int loopCount = 2;
1159         assertTrue(TEST_NAME,
1160                 track.setLoopPoints(dataSizeInFrames - 10, dataSizeInFrames + 50, loopCount) ==
1161                     AudioTrack.ERROR_BAD_VALUE);
1162         // -------- tear down --------------
1163         track.release();
1164     }
1165 
1166     // -----------------------------------------------------------------
1167     // Audio data supply
1168     // ----------------------------------
1169 
1170     // Test case 1: write() fails when supplying less data (bytes) than declared
1171     @Test
testWriteByteOffsetTooBig()1172     public void testWriteByteOffsetTooBig() throws Exception {
1173         // constants for test
1174         final String TEST_NAME = "testWriteByteOffsetTooBig";
1175         final int TEST_SR = 22050;
1176         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1177         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1178         final int TEST_MODE = AudioTrack.MODE_STREAM;
1179         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1180 
1181         // -------- initialization --------------
1182         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1183         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1184                 2 * minBuffSize, TEST_MODE);
1185         byte data[] = new byte[minBuffSize];
1186         // -------- test --------------
1187         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1188         int offset = 10;
1189         assertTrue(TEST_NAME, track.write(data, offset, data.length) == AudioTrack.ERROR_BAD_VALUE);
1190         // -------- tear down --------------
1191         track.release();
1192     }
1193 
1194     // Test case 2: write() fails when supplying less data (shorts) than
1195     // declared
1196     @Test
testWriteShortOffsetTooBig()1197     public void testWriteShortOffsetTooBig() throws Exception {
1198         // constants for test
1199         final String TEST_NAME = "testWriteShortOffsetTooBig";
1200         final int TEST_SR = 22050;
1201         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1202         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1203         final int TEST_MODE = AudioTrack.MODE_STREAM;
1204         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1205 
1206         // -------- initialization --------------
1207         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1208         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1209                 2 * minBuffSize, TEST_MODE);
1210         short data[] = new short[minBuffSize / 2];
1211         // -------- test --------------
1212         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1213         int offset = 10;
1214         assertTrue(TEST_NAME, track.write(data, offset, data.length)
1215                                                             == AudioTrack.ERROR_BAD_VALUE);
1216         // -------- tear down --------------
1217         track.release();
1218     }
1219 
1220     // Test case 3: write() fails when supplying less data (bytes) than declared
1221     @Test
testWriteByteSizeTooBig()1222     public void testWriteByteSizeTooBig() throws Exception {
1223         // constants for test
1224         final String TEST_NAME = "testWriteByteSizeTooBig";
1225         final int TEST_SR = 22050;
1226         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1227         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1228         final int TEST_MODE = AudioTrack.MODE_STREAM;
1229         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1230 
1231         // -------- initialization --------------
1232         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1233         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1234                 2 * minBuffSize, TEST_MODE);
1235         byte data[] = new byte[minBuffSize];
1236         // -------- test --------------
1237         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1238         assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length + 10)
1239                                                         == AudioTrack.ERROR_BAD_VALUE);
1240         // -------- tear down --------------
1241         track.release();
1242     }
1243 
1244     // Test case 4: write() fails when supplying less data (shorts) than
1245     // declared
1246     @Test
testWriteShortSizeTooBig()1247     public void testWriteShortSizeTooBig() throws Exception {
1248         // constants for test
1249         final String TEST_NAME = "testWriteShortSizeTooBig";
1250         final int TEST_SR = 22050;
1251         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1252         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1253         final int TEST_MODE = AudioTrack.MODE_STREAM;
1254         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1255 
1256         // -------- initialization --------------
1257         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1258         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1259                 2 * minBuffSize, TEST_MODE);
1260         short data[] = new short[minBuffSize / 2];
1261         // -------- test --------------
1262         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1263         assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length + 10)
1264                                                         == AudioTrack.ERROR_BAD_VALUE);
1265         // -------- tear down --------------
1266         track.release();
1267     }
1268 
1269     // Test case 5: write() fails with negative offset
1270     @Test
testWriteByteNegativeOffset()1271     public void testWriteByteNegativeOffset() throws Exception {
1272         // constants for test
1273         final String TEST_NAME = "testWriteByteNegativeOffset";
1274         final int TEST_SR = 22050;
1275         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1276         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1277         final int TEST_MODE = AudioTrack.MODE_STREAM;
1278         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1279 
1280         // -------- initialization --------------
1281         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1282         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1283                 2 * minBuffSize, TEST_MODE);
1284         byte data[] = new byte[minBuffSize];
1285         // -------- test --------------
1286         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1287         assertTrue(TEST_NAME, track.write(data, OFFSET_NEGATIVE, data.length - 10) ==
1288             AudioTrack.ERROR_BAD_VALUE);
1289         // -------- tear down --------------
1290         track.release();
1291     }
1292 
1293     // Test case 6: write() fails with negative offset
1294     @Test
testWriteShortNegativeOffset()1295     public void testWriteShortNegativeOffset() throws Exception {
1296         // constants for test
1297         final String TEST_NAME = "testWriteShortNegativeOffset";
1298         final int TEST_SR = 22050;
1299         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1300         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1301         final int TEST_MODE = AudioTrack.MODE_STREAM;
1302         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1303 
1304         // -------- initialization --------------
1305         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1306         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1307                 2 * minBuffSize, TEST_MODE);
1308         short data[] = new short[minBuffSize / 2];
1309         // -------- test --------------
1310         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1311         assertTrue(TEST_NAME,
1312         track.write(data, OFFSET_NEGATIVE, data.length - 10) == AudioTrack.ERROR_BAD_VALUE);
1313         // -------- tear down --------------
1314         track.release();
1315     }
1316 
1317     // Test case 7: write() fails with negative size
1318     @Test
testWriteByteNegativeSize()1319     public void testWriteByteNegativeSize() throws Exception {
1320         // constants for test
1321         final String TEST_NAME = "testWriteByteNegativeSize";
1322         final int TEST_SR = 22050;
1323         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1324         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1325         final int TEST_MODE = AudioTrack.MODE_STREAM;
1326         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1327 
1328         // -------- initialization --------------
1329         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1330         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1331                 2 * minBuffSize, TEST_MODE);
1332         byte data[] = new byte[minBuffSize];
1333         // -------- test --------------
1334         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1335         int dataLength = -10;
1336         assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, dataLength)
1337                                                     == AudioTrack.ERROR_BAD_VALUE);
1338         // -------- tear down --------------
1339         track.release();
1340     }
1341 
1342     // Test case 8: write() fails with negative size
1343     @Test
testWriteShortNegativeSize()1344     public void testWriteShortNegativeSize() throws Exception {
1345         // constants for test
1346         final String TEST_NAME = "testWriteShortNegativeSize";
1347         final int TEST_SR = 22050;
1348         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1349         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1350         final int TEST_MODE = AudioTrack.MODE_STREAM;
1351         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1352 
1353         // -------- initialization --------------
1354         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1355         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1356                 2 * minBuffSize, TEST_MODE);
1357         short data[] = new short[minBuffSize / 2];
1358         // -------- test --------------
1359         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1360         int dataLength = -10;
1361         assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, dataLength)
1362                                                         == AudioTrack.ERROR_BAD_VALUE);
1363         // -------- tear down --------------
1364         track.release();
1365     }
1366 
1367     // Test case 9: write() succeeds and returns the size that was written for
1368     // 16bit
1369     @Test
testWriteByte()1370     public void testWriteByte() throws Exception {
1371         // constants for test
1372         final String TEST_NAME = "testWriteByte";
1373         final int TEST_SR = 22050;
1374         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1375         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1376         final int TEST_MODE = AudioTrack.MODE_STREAM;
1377         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1378 
1379         // -------- initialization --------------
1380         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1381         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1382                 2 * minBuffSize, TEST_MODE);
1383         byte data[] = new byte[minBuffSize];
1384         // -------- test --------------
1385         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1386         assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length) == data.length);
1387         // -------- tear down --------------
1388         track.release();
1389     }
1390 
1391     // Test case 10: write() succeeds and returns the size that was written for
1392     // 16bit
1393     @Test
testWriteShort()1394     public void testWriteShort() throws Exception {
1395         // constants for test
1396         final String TEST_NAME = "testWriteShort";
1397         final int TEST_SR = 22050;
1398         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1399         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1400         final int TEST_MODE = AudioTrack.MODE_STREAM;
1401         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1402 
1403         // -------- initialization --------------
1404         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1405         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1406                 2 * minBuffSize, TEST_MODE);
1407         short data[] = new short[minBuffSize / 2];
1408         // -------- test --------------
1409         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1410         assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length) == data.length);
1411         track.flush();
1412         // -------- tear down --------------
1413         track.release();
1414     }
1415 
1416     // Test case 11: write() succeeds and returns the size that was written for
1417     // 8bit
1418     @Test
testWriteByte8bit()1419     public void testWriteByte8bit() throws Exception {
1420         // constants for test
1421         final String TEST_NAME = "testWriteByte8bit";
1422         final int TEST_SR = 22050;
1423         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1424         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
1425         final int TEST_MODE = AudioTrack.MODE_STREAM;
1426         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1427 
1428         // -------- initialization --------------
1429         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1430         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1431                 2 * minBuffSize, TEST_MODE);
1432         byte data[] = new byte[minBuffSize];
1433         // -------- test --------------
1434         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1435         assertEquals(TEST_NAME, data.length, track.write(data, OFFSET_DEFAULT, data.length));
1436         // -------- tear down --------------
1437         track.release();
1438     }
1439 
1440     // Test case 12: write() succeeds and returns the size that was written for
1441     // 8bit
1442     @Test
testWriteShort8bit()1443     public void testWriteShort8bit() throws Exception {
1444         // constants for test
1445         final String TEST_NAME = "testWriteShort8bit";
1446         final int TEST_SR = 22050;
1447         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1448         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
1449         final int TEST_MODE = AudioTrack.MODE_STREAM;
1450         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1451 
1452         // -------- initialization --------------
1453         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1454         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1455                 2 * minBuffSize, TEST_MODE);
1456         short data[] = new short[minBuffSize / 2];
1457         // -------- test --------------
1458         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1459         assertEquals(TEST_NAME, data.length, track.write(data, OFFSET_DEFAULT, data.length));
1460         // -------- tear down --------------
1461         track.release();
1462     }
1463 
1464     // -----------------------------------------------------------------
1465     // Getters
1466     // ----------------------------------
1467 
1468     // Test case 1: getMinBufferSize() return ERROR_BAD_VALUE if SR < 4000
1469     @Test
testGetMinBufferSizeTooLowSR()1470     public void testGetMinBufferSizeTooLowSR() throws Exception {
1471         // constant for test
1472         final String TEST_NAME = "testGetMinBufferSizeTooLowSR";
1473         final int TEST_SR = 3999;
1474         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1475         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
1476 
1477         // -------- initialization & test --------------
1478         assertTrue(TEST_NAME, AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT) ==
1479             AudioTrack.ERROR_BAD_VALUE);
1480     }
1481 
1482     // Test case 2: getMinBufferSize() return ERROR_BAD_VALUE if sample rate too high
1483     @Test
testGetMinBufferSizeTooHighSR()1484     public void testGetMinBufferSizeTooHighSR() throws Exception {
1485         // constant for test
1486         final String TEST_NAME = "testGetMinBufferSizeTooHighSR";
1487         // FIXME need an API to retrieve AudioTrack.SAMPLE_RATE_HZ_MAX
1488         final int TEST_SR = AudioFormat.SAMPLE_RATE_HZ_MAX + 1;
1489         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1490         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
1491 
1492         // -------- initialization & test --------------
1493         assertTrue(TEST_NAME, AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT) ==
1494             AudioTrack.ERROR_BAD_VALUE);
1495     }
1496 
1497     @Test
testAudioTrackProperties()1498     public void testAudioTrackProperties() throws Exception {
1499         // constants for test
1500         final String TEST_NAME = "testAudioTrackProperties";
1501         final int TEST_SR = 22050;
1502         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1503         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
1504         final int TEST_MODE = AudioTrack.MODE_STREAM;
1505         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1506 
1507         // -------- initialization --------------
1508         int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1509         MockAudioTrack track = new MockAudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF,
1510                 TEST_FORMAT, 2 * minBuffSize, TEST_MODE);
1511         assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState());
1512         assertEquals(TEST_NAME, TEST_FORMAT, track.getAudioFormat());
1513         assertEquals(TEST_NAME, TEST_CONF, track.getChannelConfiguration());
1514         assertEquals(TEST_NAME, TEST_SR, track.getSampleRate());
1515         assertEquals(TEST_NAME, TEST_STREAM_TYPE, track.getStreamType());
1516         final int hannelCount = 1;
1517         assertEquals(hannelCount, track.getChannelCount());
1518         final int notificationMarkerPosition = 0;
1519         assertEquals(TEST_NAME, notificationMarkerPosition, track.getNotificationMarkerPosition());
1520         final int markerInFrames = 2;
1521         assertEquals(TEST_NAME, AudioTrack.SUCCESS,
1522                 track.setNotificationMarkerPosition(markerInFrames));
1523         assertEquals(TEST_NAME, markerInFrames, track.getNotificationMarkerPosition());
1524         final int positionNotificationPeriod = 0;
1525         assertEquals(TEST_NAME, positionNotificationPeriod, track.getPositionNotificationPeriod());
1526         final int periodInFrames = 2;
1527         assertEquals(TEST_NAME, AudioTrack.SUCCESS,
1528                 track.setPositionNotificationPeriod(periodInFrames));
1529         assertEquals(TEST_NAME, periodInFrames, track.getPositionNotificationPeriod());
1530         track.setState(AudioTrack.STATE_NO_STATIC_DATA);
1531         assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState());
1532         track.setState(AudioTrack.STATE_UNINITIALIZED);
1533         assertEquals(TEST_NAME, AudioTrack.STATE_UNINITIALIZED, track.getState());
1534         int frameCount = 2 * minBuffSize;
1535         if (TEST_CONF == AudioFormat.CHANNEL_CONFIGURATION_STEREO) {
1536             frameCount /= 2;
1537         }
1538         if (TEST_FORMAT == AudioFormat.ENCODING_PCM_16BIT) {
1539             frameCount /= 2;
1540         }
1541         assertTrue(TEST_NAME, track.getNativeFrameCount() >= frameCount);
1542         assertEquals(TEST_NAME, track.getNativeFrameCount(), track.getBufferSizeInFrames());
1543     }
1544 
1545     @Test
testReloadStaticData()1546     public void testReloadStaticData() throws Exception {
1547         // constants for test
1548         final String TEST_NAME = "testReloadStaticData";
1549         final int TEST_SR = 22050;
1550         final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO;
1551         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT;
1552         final int TEST_MODE = AudioTrack.MODE_STATIC;
1553         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1554 
1555         // -------- initialization --------------
1556         int bufferSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
1557         byte data[] = AudioHelper.createSoundDataInByteArray(
1558                 bufferSize, TEST_SR, 1024 /* frequency */, 0 /* sweep */);
1559         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
1560                 bufferSize, TEST_MODE);
1561         // -------- test --------------
1562         track.write(data, OFFSET_DEFAULT, bufferSize);
1563         assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
1564         track.play();
1565         Thread.sleep(WAIT_MSEC);
1566         track.stop();
1567         Thread.sleep(WAIT_MSEC);
1568         assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.reloadStaticData());
1569         track.play();
1570         Thread.sleep(WAIT_MSEC);
1571         track.stop();
1572         // -------- tear down --------------
1573         track.release();
1574     }
1575 
1576     @Presubmit
1577     @Test
testPlayStaticDataShort()1578     public void testPlayStaticDataShort() throws Exception {
1579         if (!hasAudioOutput()) {
1580             Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid "
1581                     + "audio output HAL");
1582             return;
1583         }
1584         // constants for test
1585         final String TEST_NAME = "testPlayStaticDataShort";
1586         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_FLOAT;
1587         final int TEST_SR = 48000;
1588         final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
1589         final int TEST_MODE = AudioTrack.MODE_STATIC;
1590         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1591         final double TEST_SWEEP = 100;
1592         final int TEST_LOOPS = 1;
1593         final double TEST_FREQUENCY = 400;
1594         final long WAIT_TIME_MS = 150; // compensate for cold start when run in isolation.
1595         final double TEST_LOOP_DURATION = 0.25;
1596         final int TEST_ADDITIONAL_DRAIN_MS = 300;  // as a presubmit test, 1% of the time the
1597                                                    // startup is slow by 200ms.
1598 
1599         playOnceStaticData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP,
1600                 TEST_LOOPS, TEST_FORMAT, TEST_FREQUENCY, TEST_SR, TEST_CONF,
1601                 WAIT_TIME_MS, TEST_LOOP_DURATION, TEST_ADDITIONAL_DRAIN_MS);
1602 
1603     }
1604 
1605     @Test
testPlayStaticByteArray()1606     public void testPlayStaticByteArray() throws Exception {
1607         doTestPlayStaticData("testPlayStaticByteArray", AudioFormat.ENCODING_PCM_8BIT);
1608     }
1609 
1610     @Test
testPlayStaticShortArray()1611     public void testPlayStaticShortArray() throws Exception {
1612         doTestPlayStaticData("testPlayStaticShortArray", AudioFormat.ENCODING_PCM_16BIT);
1613     }
1614 
1615     @Test
testPlayStaticFloatArray()1616     public void testPlayStaticFloatArray() throws Exception {
1617         doTestPlayStaticData("testPlayStaticFloatArray", AudioFormat.ENCODING_PCM_FLOAT);
1618     }
1619 
doTestPlayStaticData(String testName, int testFormat)1620     private void doTestPlayStaticData(String testName, int testFormat) throws Exception {
1621         if (!hasAudioOutput()) {
1622             Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid "
1623                     + "audio output HAL");
1624             return;
1625         }
1626         // constants for test
1627         final int TEST_SR_ARRAY[] = {
1628                 12055, // Note multichannel tracks will sound very short at low sample rates
1629                 48000,
1630         };
1631         final int TEST_CONF_ARRAY[] = {
1632                 AudioFormat.CHANNEL_OUT_MONO,    // 1.0
1633                 AudioFormat.CHANNEL_OUT_STEREO,  // 2.0
1634                 AudioFormat.CHANNEL_OUT_7POINT1_SURROUND, // 7.1
1635         };
1636         final int TEST_MODE = AudioTrack.MODE_STATIC;
1637         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1638         final double TEST_SWEEP = 100;
1639         final int TEST_LOOPS = 1;
1640         final double TEST_LOOP_DURATION = 1.;
1641         final int TEST_ADDITIONAL_DRAIN_MS = 0;
1642         // Compensates for cold start when run in isolation.
1643         // The cold output latency must be 500 ms less or
1644         // 200 ms less for low latency devices.
1645         final long WAIT_TIME_MS = isLowLatencyDevice() ? WAIT_MSEC : 500;
1646 
1647         double frequency = 400; // frequency changes for each test
1648         for (int testSampleRate : TEST_SR_ARRAY) {
1649             for (int testChannelConfiguration : TEST_CONF_ARRAY) {
1650                 playOnceStaticData(testName, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP,
1651                         TEST_LOOPS, testFormat, frequency, testSampleRate,
1652                         testChannelConfiguration, WAIT_TIME_MS,
1653                         TEST_LOOP_DURATION, TEST_ADDITIONAL_DRAIN_MS);
1654 
1655                 frequency += 70; // increment test tone frequency
1656             }
1657         }
1658     }
1659 
playOnceStaticData(String testName, int testMode, int testStreamType, double testSweep, int testLoops, int testFormat, double testFrequency, int testSr, int testConf, long waitMsec, double testLoopDuration, int additionalDrainMs)1660     private void playOnceStaticData(String testName, int testMode, int testStreamType,
1661             double testSweep, int testLoops, int testFormat, double testFrequency, int testSr,
1662             int testConf, long waitMsec, double testLoopDuration, int additionalDrainMs)
1663             throws InterruptedException {
1664         // -------- initialization --------------
1665         final int channelCount = Integer.bitCount(testConf);
1666         final int bufferFrames = (int)(testLoopDuration * testSr);
1667         final int bufferSamples = bufferFrames * channelCount;
1668         final int bufferSize = bufferSamples
1669                 * AudioFormat.getBytesPerSample(testFormat);
1670         final double frequency = testFrequency / channelCount;
1671         final long MILLISECONDS_PER_SECOND = 1000;
1672         AudioTrack track = new AudioTrack(testStreamType, testSr,
1673                 testConf, testFormat, bufferSize, testMode);
1674         assertEquals(testName, AudioTrack.STATE_NO_STATIC_DATA, track.getState());
1675 
1676         // -------- test --------------
1677 
1678         // test setLoopPoints and setPosition can be called here.
1679         assertEquals(testName,
1680                 android.media.AudioTrack.SUCCESS,
1681                 track.setPlaybackHeadPosition(bufferFrames/2));
1682         assertEquals(testName,
1683                 android.media.AudioTrack.SUCCESS,
1684                 track.setLoopPoints(
1685                         0 /*startInFrames*/, bufferFrames, 10 /*loopCount*/));
1686         // only need to write once to the static track
1687         switch (testFormat) {
1688         case AudioFormat.ENCODING_PCM_8BIT: {
1689             byte data[] = AudioHelper.createSoundDataInByteArray(
1690                     bufferSamples, testSr,
1691                     frequency, testSweep);
1692             assertEquals(testName,
1693                     bufferSamples,
1694                     track.write(data, 0 /*offsetInBytes*/, data.length));
1695             } break;
1696         case AudioFormat.ENCODING_PCM_16BIT: {
1697             short data[] = AudioHelper.createSoundDataInShortArray(
1698                     bufferSamples, testSr,
1699                     frequency, testSweep);
1700             assertEquals(testName,
1701                     bufferSamples,
1702                     track.write(data, 0 /*offsetInBytes*/, data.length));
1703             } break;
1704         case AudioFormat.ENCODING_PCM_FLOAT: {
1705             float data[] = AudioHelper.createSoundDataInFloatArray(
1706                     bufferSamples, testSr,
1707                     frequency, testSweep);
1708             assertEquals(testName,
1709                     bufferSamples,
1710                     track.write(data, 0 /*offsetInBytes*/, data.length,
1711                             AudioTrack.WRITE_BLOCKING));
1712             } break;
1713         }
1714         assertEquals(testName, AudioTrack.STATE_INITIALIZED, track.getState());
1715         // test setLoopPoints and setPosition can be called here.
1716         assertEquals(testName,
1717                 android.media.AudioTrack.SUCCESS,
1718                 track.setPlaybackHeadPosition(0 /*positionInFrames*/));
1719         assertEquals(testName,
1720                 android.media.AudioTrack.SUCCESS,
1721                 track.setLoopPoints(0 /*startInFrames*/, bufferFrames, testLoops));
1722 
1723         track.play();
1724         Thread.sleep((int)(testLoopDuration * MILLISECONDS_PER_SECOND) * (testLoops + 1));
1725         Thread.sleep(waitMsec + additionalDrainMs);
1726 
1727         // Check position after looping. AudioTrack.getPlaybackHeadPosition() returns
1728         // the running count of frames played, not the actual static buffer position.
1729         int position = track.getPlaybackHeadPosition();
1730         assertEquals(testName, bufferFrames * (testLoops + 1), position);
1731 
1732         track.stop();
1733         Thread.sleep(waitMsec);
1734         // -------- tear down --------------
1735         track.release();
1736     }
1737 
1738     @Presubmit
1739     @Test
testPlayStreamDataShort()1740     public void testPlayStreamDataShort() throws Exception {
1741         // constants for test
1742         final String TEST_NAME = "testPlayStreamDataShort";
1743         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1744         final int TEST_SR = 48000;
1745         final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
1746         final int TEST_MODE = AudioTrack.MODE_STREAM;
1747         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1748         final float TEST_SWEEP = 0; // sine wave only
1749         final boolean TEST_IS_LOW_RAM_DEVICE = isLowRamDevice();
1750         final double TEST_FREQUENCY = 1000;
1751         final long NO_WAIT = 0;
1752 
1753         playOnceStreamData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP,
1754                 TEST_IS_LOW_RAM_DEVICE, TEST_FORMAT, TEST_FREQUENCY, TEST_SR, TEST_CONF,
1755                 NO_WAIT, 0 /* mask */);
1756     }
1757 
1758     @Test
testPlayStreamByteArray()1759     public void testPlayStreamByteArray() throws Exception {
1760         doTestPlayStreamData("testPlayStreamByteArray", AudioFormat.ENCODING_PCM_8BIT);
1761     }
1762 
1763     @Test
testPlayStreamShortArray()1764     public void testPlayStreamShortArray() throws Exception {
1765         doTestPlayStreamData("testPlayStreamShortArray", AudioFormat.ENCODING_PCM_16BIT);
1766     }
1767 
1768     @Test
testPlayStreamFloatArray()1769     public void testPlayStreamFloatArray() throws Exception {
1770         doTestPlayStreamData("testPlayStreamFloatArray", AudioFormat.ENCODING_PCM_FLOAT);
1771     }
1772 
doTestPlayStreamData(String testName, int testFormat)1773     private void doTestPlayStreamData(String testName, int testFormat) throws Exception {
1774         // constants for test
1775         // due to downmixer algorithmic latency, source channels greater than 2 may
1776         // sound shorter in duration at 4kHz sampling rate.
1777         final int TEST_SR_ARRAY[] = {
1778                 4000,
1779                 44100,
1780                 48000,
1781                 96000,
1782                 192000,
1783         };
1784         final int TEST_CONF_ARRAY[] = {
1785                 AudioFormat.CHANNEL_OUT_MONO,    // 1.0
1786                 AudioFormat.CHANNEL_OUT_STEREO,  // 2.0
1787                 AudioFormat.CHANNEL_OUT_STEREO | AudioFormat.CHANNEL_OUT_FRONT_CENTER, // 3.0
1788                 AudioFormat.CHANNEL_OUT_QUAD,    // 4.0
1789                 AudioFormat.CHANNEL_OUT_QUAD | AudioFormat.CHANNEL_OUT_FRONT_CENTER,   // 5.0
1790                 AudioFormat.CHANNEL_OUT_5POINT1, // 5.1
1791                 AudioFormat.CHANNEL_OUT_6POINT1, // 6.1
1792                 AudioFormat.CHANNEL_OUT_7POINT1_SURROUND, // 7.1
1793         };
1794         final int TEST_MODE = AudioTrack.MODE_STREAM;
1795         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
1796         final float TEST_SWEEP = 0; // sine wave only
1797         final boolean TEST_IS_LOW_RAM_DEVICE = isLowRamDevice();
1798 
1799         double frequency = 400; // frequency changes for each test
1800         for (int testSampleRate : TEST_SR_ARRAY) {
1801             for (int testChannelConfiguration : TEST_CONF_ARRAY) {
1802                 playOnceStreamData(testName, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP,
1803                         TEST_IS_LOW_RAM_DEVICE, testFormat, frequency,
1804                         testSampleRate, testChannelConfiguration,
1805                         WAIT_MSEC, 0 /* mask */);
1806                 frequency += 50; // increment test tone frequency
1807             }
1808         }
1809     }
1810 
playOnceStreamData(String testName, int testMode, int testStream, float testSweep, boolean isLowRamDevice, int testFormat, double testFrequency, int testSr, int testConf, long waitMsec, int mask)1811     private void playOnceStreamData(String testName, int testMode, int testStream,
1812             float testSweep, boolean isLowRamDevice, int testFormat, double testFrequency,
1813             int testSr, int testConf, long waitMsec, int mask)
1814             throws InterruptedException {
1815         final int channelCount = Integer.bitCount(testConf);
1816         if (isLowRamDevice
1817                 && (testSr > 96000 || channelCount > 4)) {
1818             return; // ignore. FIXME: reenable when AF memory allocation is updated.
1819         }
1820         // -------- initialization --------------
1821         final int minBufferSize = AudioTrack.getMinBufferSize(testSr,
1822                 testConf, testFormat); // in bytes
1823         AudioTrack track = new AudioTrack(testStream, testSr,
1824                 testConf, testFormat, minBufferSize, testMode);
1825         assertTrue(testName, track.getState() == AudioTrack.STATE_INITIALIZED);
1826 
1827         // compute parameters for the source signal data.
1828         AudioFormat format = track.getFormat();
1829         assertEquals(testName, testSr, format.getSampleRate());
1830         assertEquals(testName, testConf, format.getChannelMask());
1831         assertEquals(testName, channelCount, format.getChannelCount());
1832         assertEquals(testName, testFormat, format.getEncoding());
1833         // duration of test tones
1834         final int frames = AudioHelper.frameCountFromMsec(300 /* ms */, format);
1835         final int sourceSamples = channelCount * frames;
1836         final double frequency = testFrequency / channelCount;
1837 
1838         int written = 0;
1839         // For streaming tracks, it's ok to issue the play() command
1840         // before any audio is written.
1841         track.play();
1842         // -------- test --------------
1843 
1844         // samplesPerWrite can be any positive value.
1845         // We prefer this to be a multiple of channelCount so write()
1846         // does not return a short count.
1847         // If samplesPerWrite is very large, it is limited to the data length
1848         // and we simply write (blocking) the entire source data and not even loop.
1849         // We choose a value here which simulates double buffer writes.
1850         final int buffers = 2; // double buffering mode
1851         final int samplesPerWrite =
1852                 (track.getBufferSizeInFrames() / buffers) * channelCount;
1853         switch (testFormat) {
1854             case AudioFormat.ENCODING_PCM_8BIT: {
1855                 byte data[] = AudioHelper.createSoundDataInByteArray(
1856                         sourceSamples, testSr,
1857                         frequency, testSweep);
1858                 if (mask != 0) {
1859                     AudioHelper.maskArray(data, testConf, mask);
1860                 }
1861                 while (written < data.length) {
1862                     int samples = Math.min(data.length - written, samplesPerWrite);
1863                     int ret = track.write(data, written, samples);
1864                     assertEquals(testName, samples, ret);
1865                     written += ret;
1866                 }
1867             }
1868             break;
1869             case AudioFormat.ENCODING_PCM_16BIT: {
1870                 short data[] = AudioHelper.createSoundDataInShortArray(
1871                         sourceSamples, testSr,
1872                         frequency, testSweep);
1873                 if (mask != 0) {
1874                     AudioHelper.maskArray(data, testConf, mask);
1875                 }
1876                 while (written < data.length) {
1877                     int samples = Math.min(data.length - written, samplesPerWrite);
1878                     int ret = track.write(data, written, samples);
1879                     assertEquals(testName, samples, ret);
1880                     written += ret;
1881                 }
1882             }
1883             break;
1884             case AudioFormat.ENCODING_PCM_FLOAT: {
1885                 float data[] = AudioHelper.createSoundDataInFloatArray(
1886                         sourceSamples, testSr,
1887                         frequency, testSweep);
1888                 if (mask != 0) {
1889                     AudioHelper.maskArray(data, testConf, mask);
1890                 }
1891                 while (written < data.length) {
1892                     int samples = Math.min(data.length - written, samplesPerWrite);
1893                     int ret = track.write(data, written, samples,
1894                             AudioTrack.WRITE_BLOCKING);
1895                     assertEquals(testName, samples, ret);
1896                     written += ret;
1897                 }
1898             }
1899             break;
1900         }
1901 
1902         // For streaming tracks, AudioTrack.stop() doesn't immediately stop playback.
1903         // Rather, it allows the remaining data in the internal buffer to drain.
1904         track.stop();
1905         Thread.sleep(waitMsec); // wait for the data to drain.
1906         // -------- tear down --------------
1907         track.release();
1908         Thread.sleep(waitMsec); // wait for release to complete
1909     }
1910 
playOnceStreamByteBuffer( String testName, double testFrequency, double testSweep, int testStreamType, int testSampleRate, int testChannelMask, int testEncoding, int testTransferMode, int testWriteMode, boolean useChannelIndex, boolean useDirect)1911     private void playOnceStreamByteBuffer(
1912             String testName, double testFrequency, double testSweep,
1913             int testStreamType, int testSampleRate, int testChannelMask, int testEncoding,
1914             int testTransferMode, int testWriteMode,
1915             boolean useChannelIndex, boolean useDirect) throws Exception {
1916         AudioTrack track = null;
1917         try {
1918             AudioFormat.Builder afb = new AudioFormat.Builder()
1919                     .setEncoding(testEncoding)
1920                     .setSampleRate(testSampleRate);
1921             if (useChannelIndex) {
1922                 afb.setChannelIndexMask(testChannelMask);
1923             } else {
1924                 afb.setChannelMask(testChannelMask);
1925             }
1926             final AudioFormat format = afb.build();
1927             final int frameSize = AudioHelper.frameSizeFromFormat(format);
1928             final int frameCount =
1929                     AudioHelper.frameCountFromMsec(300 /* ms */, format);
1930             final int bufferSize = frameCount * frameSize;
1931             final int bufferSamples = frameCount * format.getChannelCount();
1932 
1933             track = new AudioTrack.Builder()
1934                     .setAudioFormat(format)
1935                     .setTransferMode(testTransferMode)
1936                     .setBufferSizeInBytes(bufferSize)
1937                     .build();
1938 
1939             assertEquals(testName + ": state",
1940                     AudioTrack.STATE_INITIALIZED, track.getState());
1941             assertEquals(testName + ": sample rate",
1942                     testSampleRate, track.getSampleRate());
1943             assertEquals(testName + ": encoding",
1944                     testEncoding, track.getAudioFormat());
1945 
1946             ByteBuffer bb = useDirect
1947                     ? ByteBuffer.allocateDirect(bufferSize)
1948                     : ByteBuffer.allocate(bufferSize);
1949             bb.order(java.nio.ByteOrder.nativeOrder());
1950 
1951             final double sampleFrequency = testFrequency / format.getChannelCount();
1952             switch (testEncoding) {
1953                 case AudioFormat.ENCODING_PCM_8BIT: {
1954                     byte data[] = AudioHelper.createSoundDataInByteArray(
1955                             bufferSamples, testSampleRate,
1956                             sampleFrequency, testSweep);
1957                     bb.put(data);
1958                     bb.flip();
1959                 }
1960                 break;
1961                 case AudioFormat.ENCODING_PCM_16BIT: {
1962                     short data[] = AudioHelper.createSoundDataInShortArray(
1963                             bufferSamples, testSampleRate,
1964                             sampleFrequency, testSweep);
1965                     ShortBuffer sb = bb.asShortBuffer();
1966                     sb.put(data);
1967                     bb.limit(sb.limit() * 2);
1968                 }
1969                 break;
1970                 case AudioFormat.ENCODING_PCM_FLOAT: {
1971                     float data[] = AudioHelper.createSoundDataInFloatArray(
1972                             bufferSamples, testSampleRate,
1973                             sampleFrequency, testSweep);
1974                     FloatBuffer fb = bb.asFloatBuffer();
1975                     fb.put(data);
1976                     bb.limit(fb.limit() * 4);
1977                 }
1978                 break;
1979             }
1980             // start the AudioTrack
1981             // This can be done before or after the first write.
1982             // Current behavior for streaming tracks is that
1983             // actual playback does not begin before the internal
1984             // data buffer is completely full.
1985             track.play();
1986 
1987             // write data
1988             final long startTime = System.currentTimeMillis();
1989             final long maxDuration = frameCount * 1000 / testSampleRate + 1000;
1990             for (int written = 0; written < bufferSize; ) {
1991                 // ret may return a short count if write
1992                 // is non blocking or even if write is blocking
1993                 // when a stop/pause/flush is issued from another thread.
1994                 final int kBatchFrames = 1000;
1995                 int ret = track.write(bb,
1996                         Math.min(bufferSize - written, frameSize * kBatchFrames),
1997                         testWriteMode);
1998                 // for non-blocking mode, this loop may spin quickly
1999                 assertTrue(testName + ": write error " + ret, ret >= 0);
2000                 assertTrue(testName + ": write timeout",
2001                         (System.currentTimeMillis() - startTime) <= maxDuration);
2002                 written += ret;
2003             }
2004 
2005             // for streaming tracks, stop will allow the rest of the data to
2006             // drain out, but we don't know how long to wait unless
2007             // we check the position before stop. if we check position
2008             // after we stop, we read 0.
2009             final int position = track.getPlaybackHeadPosition();
2010             final int remainingTimeMs = (int)((double)(frameCount - position)
2011                     * 1000 / testSampleRate);
2012             track.stop();
2013             Thread.sleep(remainingTimeMs);
2014             Thread.sleep(WAIT_MSEC);
2015         } finally {
2016             if (track != null) {
2017                 track.release();
2018             }
2019         }
2020     }
2021 
2022     @Test
testPlayStreamByteBuffer()2023     public void testPlayStreamByteBuffer() throws Exception {
2024         // constants for test
2025         final String TEST_NAME = "testPlayStreamByteBuffer";
2026         final int TEST_FORMAT_ARRAY[] = {  // should hear 4 tones played 3 times
2027                 AudioFormat.ENCODING_PCM_8BIT,
2028                 AudioFormat.ENCODING_PCM_16BIT,
2029                 AudioFormat.ENCODING_PCM_FLOAT,
2030         };
2031         final int TEST_SR_ARRAY[] = {
2032                 48000,
2033         };
2034         final int TEST_CONF_ARRAY[] = {
2035                 AudioFormat.CHANNEL_OUT_STEREO,
2036         };
2037         final int TEST_WRITE_MODE_ARRAY[] = {
2038                 AudioTrack.WRITE_BLOCKING,
2039                 AudioTrack.WRITE_NON_BLOCKING,
2040         };
2041         final int TEST_MODE = AudioTrack.MODE_STREAM;
2042         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
2043         final double TEST_SWEEP = 0; // sine wave only
2044 
2045         for (int TEST_FORMAT : TEST_FORMAT_ARRAY) {
2046             double frequency = 800; // frequency changes for each test
2047             for (int TEST_SR : TEST_SR_ARRAY) {
2048                 for (int TEST_CONF : TEST_CONF_ARRAY) {
2049                     for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) {
2050                         for (int useDirect = 0; useDirect < 2; ++useDirect) {
2051                             playOnceStreamByteBuffer(TEST_NAME, frequency, TEST_SWEEP,
2052                                     TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
2053                                     TEST_MODE, TEST_WRITE_MODE,
2054                                     false /* useChannelIndex */, useDirect != 0);
2055 
2056                             // add a gap to make tones distinct
2057                             Thread.sleep(100 /* millis */);
2058                             frequency += 30; // increment test tone frequency
2059                         }
2060                     }
2061                 }
2062             }
2063         }
2064     }
2065 
2066     @Test
testPlayChannelIndexStreamBuffer()2067     public void testPlayChannelIndexStreamBuffer() throws Exception {
2068         // should hear 4 tones played 3 or 4 times depending
2069         // on the device output capabilities (e.g. stereo or 5.1 or otherwise)
2070         final String TEST_NAME = "testPlayChannelIndexStreamBuffer";
2071         final int TEST_FORMAT_ARRAY[] = {
2072                 AudioFormat.ENCODING_PCM_8BIT,
2073                 //AudioFormat.ENCODING_PCM_16BIT,
2074                 //AudioFormat.ENCODING_PCM_FLOAT,
2075         };
2076         final int TEST_SR_ARRAY[] = {
2077                 48000,
2078         };
2079         // The following channel index masks are iterated over and route
2080         // the AudioTrack channels to the output sink channels based on
2081         // the set bits in counting order (lsb to msb).
2082         //
2083         // For a stereo output sink, the sound may come from L and R, L only, none, or R only.
2084         // For a 5.1 output sink, the sound may come from a variety of outputs
2085         // as commented below.
2086         final int TEST_CONF_ARRAY[] = { // matches output sink channels:
2087                 (1 << 0) | (1 << 1), // Stereo(L, R) 5.1(FL, FR)
2088                 (1 << 0) | (1 << 2), // Stereo(L)    5.1(FL, FC)
2089                 (1 << 4) | (1 << 5), // Stereo(None) 5.1(BL, BR)
2090                 (1 << 1) | (1 << 2), // Stereo(R)    5.1(FR, FC)
2091         };
2092         final int TEST_WRITE_MODE_ARRAY[] = {
2093                 AudioTrack.WRITE_BLOCKING,
2094                 AudioTrack.WRITE_NON_BLOCKING,
2095         };
2096         final double TEST_SWEEP = 0;
2097         final int TEST_MODE = AudioTrack.MODE_STREAM;
2098         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
2099 
2100         for (int TEST_FORMAT : TEST_FORMAT_ARRAY) {
2101             for (int TEST_CONF : TEST_CONF_ARRAY) {
2102                 double frequency = 800; // frequency changes for each test
2103                 for (int TEST_SR : TEST_SR_ARRAY) {
2104                     for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) {
2105                         for (int useDirect = 0; useDirect < 2; ++useDirect) {
2106                             playOnceStreamByteBuffer(TEST_NAME, frequency, TEST_SWEEP,
2107                                     TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
2108                                     TEST_MODE, TEST_WRITE_MODE,
2109                                     true /* useChannelIndex */, useDirect != 0);
2110 
2111                             // add a gap to make tones distinct
2112                             Thread.sleep(100 /* millis */);
2113                             frequency += 30; // increment test tone frequency
2114                         }
2115                     }
2116                 }
2117             }
2118         }
2119     }
2120 
hasAudioOutput()2121     private boolean hasAudioOutput() {
2122         return getContext().getPackageManager()
2123             .hasSystemFeature(PackageManager.FEATURE_AUDIO_OUTPUT);
2124     }
2125 
isLowLatencyDevice()2126     private boolean isLowLatencyDevice() {
2127         return getContext().getPackageManager()
2128             .hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY);
2129     }
2130 
isLowRamDevice()2131     private boolean isLowRamDevice() {
2132         return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE))
2133                 .isLowRamDevice();
2134     }
2135 
isProAudioDevice()2136     private boolean isProAudioDevice() {
2137         return getContext().getPackageManager().hasSystemFeature(
2138                 PackageManager.FEATURE_AUDIO_PRO);
2139     }
2140 
2141     @Test
testGetTimestamp()2142     public void testGetTimestamp() throws Exception {
2143         if (!hasAudioOutput()) {
2144             Log.w(TAG, "AUDIO_OUTPUT feature not found. This system might not have a valid "
2145                     + "audio output HAL");
2146             return;
2147         }
2148         String streamName = "test_get_timestamp";
2149         doTestTimestamp(
2150                 22050 /* sampleRate */,
2151                 AudioFormat.CHANNEL_OUT_MONO ,
2152                 AudioFormat.ENCODING_PCM_16BIT,
2153                 AudioTrack.MODE_STREAM,
2154                 streamName);
2155     }
2156 
2157     @Test
testFastTimestamp()2158     public void testFastTimestamp() throws Exception {
2159         if (!hasAudioOutput()) {
2160             Log.w(TAG, "AUDIO_OUTPUT feature not found. This system might not have a valid "
2161                     + "audio output HAL");
2162             return;
2163         }
2164         String streamName = "test_fast_timestamp";
2165         doTestTimestamp(
2166                 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC),
2167                 AudioFormat.CHANNEL_OUT_MONO,
2168                 AudioFormat.ENCODING_PCM_16BIT,
2169                 AudioTrack.MODE_STREAM,
2170                 streamName);
2171     }
2172 
2173     // Note: this test may fail if playing through a remote device such as Bluetooth.
doTestTimestamp(int sampleRate, int channelMask, int encoding, int transferMode, String streamName)2174     private void doTestTimestamp(int sampleRate, int channelMask, int encoding, int transferMode,
2175             String streamName) throws Exception {
2176         // constants for test
2177         final int TEST_LOOP_CNT = 10;
2178         final int TEST_BUFFER_MS = 100;
2179         final int TEST_USAGE = AudioAttributes.USAGE_MEDIA;
2180 
2181         final int MILLIS_PER_SECOND = 1000;
2182         final int FRAME_TOLERANCE = sampleRate * TEST_BUFFER_MS / MILLIS_PER_SECOND;
2183 
2184         // -------- initialization --------------
2185         final int frameSize =
2186                 AudioFormat.getBytesPerSample(encoding)
2187                 * AudioFormat.channelCountFromOutChannelMask(channelMask);
2188         // see whether we can use fast mode
2189         final int nativeOutputSampleRate =
2190                 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC);
2191         Log.d(TAG, "Native output sample rate " + nativeOutputSampleRate);
2192         final boolean fast = (sampleRate == nativeOutputSampleRate);
2193 
2194         AudioAttributes attributes = (fast ? new AudioAttributes.Builder()
2195                 .setFlags(AudioAttributes.FLAG_LOW_LATENCY) : new AudioAttributes.Builder())
2196                 .setUsage(TEST_USAGE)
2197                 .build();
2198         AudioFormat format = new AudioFormat.Builder()
2199                 //.setChannelIndexMask((1 << AudioFormat.channelCountFromOutChannelMask(channelMask)) - 1)
2200                 .setChannelMask(channelMask)
2201                 .setEncoding(encoding)
2202                 .setSampleRate(sampleRate)
2203                 .build();
2204         // not specifying the buffer size in the builder should get us the minimum buffer size.
2205         AudioTrack track = new AudioTrack.Builder()
2206                 .setAudioAttributes(attributes)
2207                 .setAudioFormat(format)
2208                 .setTransferMode(transferMode)
2209                 .build();
2210         assertEquals(AudioTrack.STATE_INITIALIZED, track.getState());
2211 
2212         try {
2213             // We generally use a transfer size of 100ms for testing, but in rare cases
2214             // (e.g. Bluetooth) this needs to be larger to exceed the internal track buffer.
2215             final int frameCount =
2216                     Math.max(track.getBufferCapacityInFrames(),
2217                             sampleRate * TEST_BUFFER_MS / MILLIS_PER_SECOND);
2218             track.play();
2219 
2220             // Android nanoTime implements MONOTONIC, same as our audio timestamps.
2221 
2222             final ByteBuffer data = ByteBuffer.allocate(frameCount * frameSize);
2223             data.order(java.nio.ByteOrder.nativeOrder()).limit(frameCount * frameSize);
2224             final AudioTimestamp timestamp = new AudioTimestamp();
2225 
2226             long framesWritten = 0;
2227 
2228             // We start data delivery twice, the second start simulates restarting
2229             // the track after a fully drained underrun (important case for Android TV).
2230             for (int start = 0; start < 2; ++start) {
2231                 final long trackStartTimeNs = System.nanoTime();
2232                 final AudioHelper.TimestampVerifier tsVerifier =
2233                         new AudioHelper.TimestampVerifier(
2234                                 TAG + "(start " + start + ")",
2235                                 sampleRate, framesWritten, isProAudioDevice());
2236                 for (int i = 0; i < TEST_LOOP_CNT; ++i) {
2237                     final long trackWriteTimeNs = System.nanoTime();
2238 
2239                     data.position(0);
2240                     assertEquals("write did not complete",
2241                             data.limit(), track.write(data, data.limit(),
2242                             AudioTrack.WRITE_BLOCKING));
2243                     assertEquals("write did not fill buffer",
2244                             data.position(), data.limit());
2245                     framesWritten += data.limit() / frameSize;
2246 
2247                     // track.getTimestamp may return false if there are no physical HAL outputs.
2248                     // This may occur on TV devices without connecting an HDMI monitor.
2249                     // It may also be true immediately after start-up, as the mixing thread could
2250                     // be idle, but since we've already pushed much more than the
2251                     // minimum buffer size, that is unlikely.
2252                     // Nevertheless, we don't want to have unnecessary failures, so we ignore the
2253                     // first iteration if we don't get a timestamp.
2254                     final boolean result = track.getTimestamp(timestamp);
2255                     assertTrue("timestamp could not be read", result || i == 0);
2256                     if (!result) {
2257                         continue;
2258                     }
2259 
2260                     tsVerifier.add(timestamp);
2261 
2262                     // Ensure that seen is greater than presented.
2263                     // This is an "on-the-fly" read without pausing because pausing may cause the
2264                     // timestamp to become stale and affect our jitter measurements.
2265                     final long framesPresented = timestamp.framePosition;
2266                     final int framesSeen = track.getPlaybackHeadPosition();
2267                     assertTrue("server frames ahead of client frames",
2268                             framesWritten >= framesSeen);
2269                     assertTrue("presented frames ahead of server frames",
2270                             framesSeen >= framesPresented);
2271                 }
2272                 // Full drain.
2273                 Thread.sleep(1000 /* millis */);
2274                 // check that we are really at the end of playback.
2275                 assertTrue("timestamp should be valid while draining",
2276                         track.getTimestamp(timestamp));
2277                 // Fast tracks and sw emulated tracks may not fully drain.
2278                 // We log the status here.
2279                 if (framesWritten != timestamp.framePosition) {
2280                     Log.d(TAG, "timestamp should fully drain.  written: "
2281                             + framesWritten + " position: " + timestamp.framePosition);
2282                 }
2283                 final long framesLowerLimit = framesWritten - FRAME_TOLERANCE;
2284                 assertTrue("timestamp frame position needs to be close to written: "
2285                                 + timestamp.framePosition  + " >= " + framesLowerLimit,
2286                         timestamp.framePosition >= framesLowerLimit);
2287 
2288                 assertTrue("timestamp should not advance during underrun: "
2289                         + timestamp.framePosition  + " <= " + framesWritten,
2290                         timestamp.framePosition <= framesWritten);
2291 
2292                 tsVerifier.verifyAndLog(trackStartTimeNs, streamName);
2293             }
2294         } finally {
2295             track.release();
2296         }
2297     }
2298 
2299     @Test
testVariableRatePlayback()2300     public void testVariableRatePlayback() throws Exception {
2301         final String TEST_NAME = "testVariableRatePlayback";
2302         final int TEST_SR = 24000;
2303         final int TEST_FINAL_SR = 96000;
2304         final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
2305         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; // required for test
2306         final int TEST_MODE = AudioTrack.MODE_STATIC; // required for test
2307         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
2308 
2309         final int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
2310         final int bufferSizeInBytes = minBuffSize * 100;
2311         final int numChannels =  AudioFormat.channelCountFromOutChannelMask(TEST_CONF);
2312         final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT);
2313         final int bytesPerFrame = numChannels * bytesPerSample;
2314         final int frameCount = bufferSizeInBytes / bytesPerFrame;
2315 
2316         AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF,
2317                 TEST_FORMAT, bufferSizeInBytes, TEST_MODE);
2318 
2319         // create byte array and write it
2320         byte[] vai = AudioHelper.createSoundDataInByteArray(bufferSizeInBytes, TEST_SR,
2321                 600 /* frequency */, 0 /* sweep */);
2322         assertEquals(vai.length, track.write(vai, 0 /* offsetInBytes */, vai.length));
2323 
2324         // sweep up test and sweep down test
2325         int[] sampleRates = {TEST_SR, TEST_FINAL_SR};
2326         int[] deltaMss = {10, 10};
2327         int[] deltaFreqs = {200, -200};
2328 
2329         for (int i = 0; i < 2; ++i) {
2330             int remainingTime;
2331             int sampleRate = sampleRates[i];
2332             final int deltaMs = deltaMss[i];
2333             final int deltaFreq = deltaFreqs[i];
2334             final int lastCheckMs = 500; // check the last 500 ms
2335 
2336             assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackRate(sampleRate));
2337             track.play();
2338             do {
2339                 Thread.sleep(deltaMs);
2340                 final int position = track.getPlaybackHeadPosition();
2341                 sampleRate += deltaFreq;
2342                 sampleRate = Math.min(TEST_FINAL_SR, Math.max(TEST_SR, sampleRate));
2343                 assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackRate(sampleRate));
2344                 remainingTime = (int)((double)(frameCount - position) * 1000
2345                         / sampleRate / bytesPerFrame);
2346             } while (remainingTime >= lastCheckMs + deltaMs);
2347 
2348             // ensure the final frequency set is constant and plays frames as expected
2349             final int position1 = track.getPlaybackHeadPosition();
2350             Thread.sleep(lastCheckMs);
2351             final int position2 = track.getPlaybackHeadPosition();
2352 
2353             final int toleranceMs = isLowLatencyDevice() ? 60 : 100;
2354             final int toleranceInFrames = toleranceMs * sampleRate / 1000;
2355             final int expected = lastCheckMs * sampleRate / 1000;
2356             final int actual = position2 - position1;
2357 
2358             // Log.d(TAG, "Variable Playback: expected(" + expected + ")  actual(" + actual
2359             //        + ")  diff(" + (expected - actual) + ")");
2360             assertEquals(expected, actual, toleranceInFrames);
2361             track.stop();
2362         }
2363         track.release();
2364     }
2365 
2366     // Test that AudioTrack stop limits drain to only those frames written at the time of stop.
2367     // This ensures consistent stop behavior on Android P and beyond, where data written
2368     // immediately after a stop doesn't get caught in the drain.
2369     @LargeTest
2370     @Test
testStopDrain()2371     public void testStopDrain() throws Exception {
2372         final String TEST_NAME = "testStopDrain";
2373         final int TEST_SR = 8000;
2374         final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO; // required for test
2375         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; // required for test
2376         final int TEST_MODE = AudioTrack.MODE_STREAM; // required for test
2377         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
2378 
2379         final int channelCount = AudioFormat.channelCountFromOutChannelMask(TEST_CONF);
2380         final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT);
2381         final int bytesPerFrame = channelCount * bytesPerSample;
2382         final int frameCount = TEST_SR * 3; // 3 seconds of buffer.
2383         final int bufferSizeInBytes = frameCount * bytesPerFrame;
2384 
2385         final AudioTrack track = new AudioTrack(
2386                 TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, bufferSizeInBytes, TEST_MODE);
2387 
2388         try {
2389             // Create 6 seconds of data, but send down only 3 seconds to fill buffer.
2390             final byte[] soundData = AudioHelper.createSoundDataInByteArray(
2391                     bufferSizeInBytes * 2, TEST_SR, 600 /* frequency */, 0 /* sweep */);
2392             assertEquals("cannot fill AudioTrack buffer",
2393                     bufferSizeInBytes,
2394                     track.write(soundData, 0 /* offsetInBytes */, bufferSizeInBytes));
2395 
2396             // Set the track playing.
2397             track.play();
2398 
2399             // Note that the timings here are very generous for our test (really the
2400             // granularity we need is on the order of a second).  If we don't get scheduled
2401             // to run within about a second or so - this should be extremely rare -
2402             // the result should be a false pass (rather than a false fail).
2403 
2404             // After 1.5 seconds stop.
2405             Thread.sleep(1500 /* millis */); // Assume device starts within 1.5 sec.
2406             track.stop();
2407 
2408             // We should drain 1.5 seconds and fill another 3 seconds of data.
2409             // We shouldn't be able to write 6 seconds of data - that indicates stop continues
2410             // to drain beyond the frames written at the time of stop.
2411             int length = 0;
2412             while (length < soundData.length) {
2413                 Thread.sleep(800 /* millis */); // assume larger than AF thread loop period
2414                 final int delta = track.write(soundData, length, soundData.length - length);
2415                 assertTrue("track write error: " + delta, delta >= 0);
2416                 if (delta == 0) break;
2417                 length += delta;
2418             }
2419 
2420             // Check to see we limit the data drained (should be able to exactly fill the buffer).
2421             assertEquals("stop drain must be limited " + bufferSizeInBytes + " != " + length,
2422                     bufferSizeInBytes, length);
2423         } finally {
2424             track.release();
2425         }
2426     }
2427 
2428     @Test
testVariableSpeedPlayback()2429     public void testVariableSpeedPlayback() throws Exception {
2430         if (!hasAudioOutput()) {
2431             Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid "
2432                     + "audio output HAL");
2433             return;
2434         }
2435 
2436         final String TEST_NAME = "testVariableSpeedPlayback";
2437         final int testChannelMask = AudioFormat.CHANNEL_OUT_MONO;
2438         final int TEST_FORMAT = AudioFormat.ENCODING_PCM_FLOAT; // required for test
2439         final int TEST_MODE = AudioTrack.MODE_STATIC;           // required for test
2440         final int TEST_SR = 48000;
2441         final int minBufferSize = AudioTrack.getMinBufferSize(
2442                 TEST_SR, testChannelMask, TEST_FORMAT);
2443 
2444         AudioFormat format = new AudioFormat.Builder()
2445                 //.setChannelIndexMask((1 << 0))  // output to first channel, FL
2446                 .setChannelMask(testChannelMask)
2447                 .setEncoding(TEST_FORMAT)
2448                 .setSampleRate(TEST_SR)
2449                 .build();
2450 
2451         // create track
2452         final int frameCount = AudioHelper.frameCountFromMsec(100 /*ms*/, format);
2453         final int frameSize = AudioHelper.frameSizeFromFormat(format);
2454         AudioTrack track = new AudioTrack.Builder()
2455                 .setAudioFormat(format)
2456                 .setBufferSizeInBytes(frameCount * frameSize)
2457                 .setTransferMode(TEST_MODE)
2458                 .build();
2459 
2460         // create float array and write it
2461         final int sampleCount = frameCount * format.getChannelCount();
2462         float[] vaf = AudioHelper.createSoundDataInFloatArray(
2463                 sampleCount, TEST_SR, 600 /* frequency */, 0 /* sweep */);
2464         assertEquals(vaf.length, track.write(vaf, 0 /* offsetInFloats */, vaf.length,
2465                 AudioTrack.WRITE_NON_BLOCKING));
2466 
2467         // sweep speed and pitch
2468         final float[][][] speedAndPitch = {
2469              // { {speedStart, pitchStart} {speedEnd, pitchEnd} }
2470                 { {0.5f, 0.5f}, {2.0f, 2.0f} },  // speed by SR conversion (chirp)
2471                 { {0.5f, 1.0f}, {2.0f, 1.0f} },  // speed by time stretch (constant pitch)
2472                 { {1.0f, 0.5f}, {1.0f, 2.0f} },  // pitch by SR conversion (chirp)
2473         };
2474 
2475         // test that playback params works as expected
2476         PlaybackParams params = new PlaybackParams().allowDefaults();
2477         assertEquals("default speed not correct", 1.0f, params.getSpeed(), 0.f /* delta */);
2478         assertEquals("default pitch not correct", 1.0f, params.getPitch(), 0.f /* delta */);
2479         assertEquals(TEST_NAME,
2480                 params.AUDIO_FALLBACK_MODE_DEFAULT,
2481                 params.getAudioFallbackMode());
2482         track.setPlaybackParams(params); // OK
2483         params.setAudioFallbackMode(params.AUDIO_FALLBACK_MODE_FAIL);
2484         assertEquals(TEST_NAME,
2485                 params.AUDIO_FALLBACK_MODE_FAIL, params.getAudioFallbackMode());
2486         params.setPitch(0.0f);
2487         try {
2488             track.setPlaybackParams(params);
2489             fail("IllegalArgumentException should be thrown on out of range data");
2490         } catch (IllegalArgumentException e) {
2491             ; // expect this is invalid
2492         }
2493         // on failure, the AudioTrack params should not change.
2494         PlaybackParams paramCheck = track.getPlaybackParams();
2495         assertEquals(TEST_NAME,
2496                 paramCheck.AUDIO_FALLBACK_MODE_DEFAULT, paramCheck.getAudioFallbackMode());
2497         assertEquals("pitch should be unchanged on failure",
2498                 1.0f, paramCheck.getPitch(), 0. /* delta */);
2499 
2500         // now try to see if we can do extreme pitch correction that should probably be muted.
2501         params.setAudioFallbackMode(params.AUDIO_FALLBACK_MODE_MUTE);
2502         assertEquals(TEST_NAME,
2503                 params.AUDIO_FALLBACK_MODE_MUTE, params.getAudioFallbackMode());
2504         params.setPitch(0.1f);
2505         track.setPlaybackParams(params); // OK
2506 
2507         // now do our actual playback
2508         final int TEST_TIME_MS = 2000;
2509         final int TEST_DELTA_MS = 100;
2510         final int testSteps = TEST_TIME_MS / TEST_DELTA_MS;
2511 
2512         for (int i = 0; i < speedAndPitch.length; ++i) {
2513             final float speedStart = speedAndPitch[i][0][0];
2514             final float pitchStart = speedAndPitch[i][0][1];
2515             final float speedEnd = speedAndPitch[i][1][0];
2516             final float pitchEnd = speedAndPitch[i][1][1];
2517             final float speedInc = (speedEnd - speedStart) / testSteps;
2518             final float pitchInc = (pitchEnd - pitchStart) / testSteps;
2519 
2520             PlaybackParams playbackParams = new PlaybackParams()
2521                     .setPitch(pitchStart)
2522                     .setSpeed(speedStart)
2523                     .allowDefaults();
2524 
2525             // set track in infinite loop to be a sine generator
2526             track.setLoopPoints(0, frameCount, -1 /* loopCount */); // cleared by stop()
2527             track.play();
2528 
2529             Thread.sleep(300 /* millis */); // warm up track
2530 
2531             int anticipatedPosition = track.getPlaybackHeadPosition();
2532             long timeNs = System.nanoTime();
2533             final long startTimeNs = timeNs;
2534             for (int j = 0; j < testSteps; ++j) {
2535                 // set playback settings
2536                 final float pitch = playbackParams.getPitch();
2537                 final float speed = playbackParams.getSpeed();
2538 
2539                 track.setPlaybackParams(playbackParams);
2540 
2541                 // verify that settings have changed
2542                 PlaybackParams checkParams = track.getPlaybackParams();
2543                 assertEquals("pitch not changed correctly",
2544                         pitch, checkParams.getPitch(), 0. /* delta */);
2545                 assertEquals("speed not changed correctly",
2546                         speed, checkParams.getSpeed(), 0. /* delta */);
2547 
2548                 // sleep for playback
2549                 Thread.sleep(TEST_DELTA_MS);
2550                 final long newTimeNs = System.nanoTime();
2551                 // Log.d(TAG, "position[" + j + "] " + track.getPlaybackHeadPosition());
2552                 anticipatedPosition +=
2553                         playbackParams.getSpeed() * ((newTimeNs - timeNs) * TEST_SR / 1000000000f);
2554                 timeNs = newTimeNs;
2555                 playbackParams.setPitch(playbackParams.getPitch() + pitchInc);
2556                 playbackParams.setSpeed(playbackParams.getSpeed() + speedInc);
2557             }
2558             final int endPosition = track.getPlaybackHeadPosition();
2559             final int tolerance100MsInFrames = 100 * TEST_SR / 1000;
2560             final int toleranceInFrames = Math.max(tolerance100MsInFrames,
2561                     (int) (minBufferSize / frameSize));
2562             Log.d(TAG, "Total playback time: " + (timeNs - startTimeNs) / 1000000
2563                     + " ms, tolerance: " + toleranceInFrames + " frames");
2564             assertEquals(TAG, anticipatedPosition, endPosition, toleranceInFrames);
2565             track.stop();
2566 
2567             Thread.sleep(100 /* millis */); // distinct pause between each test
2568         }
2569         track.release();
2570     }
2571 
2572     // Test AudioTrack to ensure we can build after a failure.
2573     @Test
testAudioTrackBufferSize()2574     public void testAudioTrackBufferSize() throws Exception {
2575         // constants for test
2576         final String TEST_NAME = "testAudioTrackBufferSize";
2577 
2578         // use builder with parameters that should fail
2579         final int superBigBufferSize = 1 << 28;
2580         try {
2581             final AudioTrack track = new AudioTrack.Builder()
2582                 .setBufferSizeInBytes(superBigBufferSize)
2583                 .build();
2584             track.release();
2585             fail(TEST_NAME + ": should throw exception on failure");
2586         } catch (UnsupportedOperationException e) {
2587             ;
2588         }
2589 
2590         // we should be able to create again with minimum buffer size
2591         final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples
2592         final AudioTrack track2 = new AudioTrack.Builder()
2593                 .setBufferSizeInBytes(verySmallBufferSize)
2594                 .build();
2595 
2596         final int observedState2 = track2.getState();
2597         final int observedBufferSize2 = track2.getBufferSizeInFrames();
2598         track2.release();
2599 
2600         // succeeds for minimum buffer size
2601         assertEquals(TEST_NAME + ": state", AudioTrack.STATE_INITIALIZED, observedState2);
2602         // should force the minimum size buffer which is > 0
2603         assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0);
2604     }
2605 
2606     // Test AudioTrack to see if there are any problems with large frame counts.
2607     @Test
testAudioTrackLargeFrameCount()2608     public void testAudioTrackLargeFrameCount() throws Exception {
2609         // constants for test
2610         final String TEST_NAME = "testAudioTrackLargeFrameCount";
2611         final int[] BUFFER_SIZES = { 4294968, 42949680, 429496800, Integer.MAX_VALUE };
2612         final int[] MODES = { AudioTrack.MODE_STATIC, AudioTrack.MODE_STREAM };
2613 
2614         for (int mode : MODES) {
2615             for (int bufferSizeInBytes : BUFFER_SIZES) {
2616                 try {
2617                     final AudioTrack track = new AudioTrack.Builder()
2618                         .setAudioFormat(new AudioFormat.Builder()
2619                             .setEncoding(AudioFormat.ENCODING_PCM_8BIT)
2620                             .setSampleRate(44100)
2621                             .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
2622                             .build())
2623                         .setTransferMode(mode)
2624                         .setBufferSizeInBytes(bufferSizeInBytes) // 1 byte == 1 frame
2625                         .build();
2626                     track.release(); // OK to successfully complete
2627                 } catch (UnsupportedOperationException e) {
2628                     ; // OK to throw unsupported exception
2629                 }
2630             }
2631         }
2632     }
2633 
2634     @Test
testSetNullPresentation()2635     public void testSetNullPresentation() throws Exception {
2636         final AudioTrack track = new AudioTrack.Builder().build();
2637         assertThrows(IllegalArgumentException.class, () -> {
2638             track.setPresentation(null);
2639         });
2640     }
2641 
2642     @Test
testAc3BuilderNoBufferSize()2643     public void testAc3BuilderNoBufferSize() throws Exception {
2644         AudioFormat format = new AudioFormat.Builder()
2645             .setEncoding(AudioFormat.ENCODING_AC3)
2646             .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
2647             .setSampleRate(48000)
2648             .build();
2649         try {
2650             AudioTrack audioTrack = new AudioTrack.Builder()
2651                 .setAudioFormat(format)
2652                 .setBufferSizeInBytes(100)
2653                 .build();
2654             audioTrack.release();
2655             Thread.sleep(200);
2656         } catch (UnsupportedOperationException e) {
2657             // Do nothing. It's OK for a device to not support ac3 audio tracks.
2658             return;
2659         }
2660         // if ac3 audio tracks with set buffer size succeed, the builder should also succeed if the
2661         // buffer size isn't set, allowing the framework to report the recommended buffer size.
2662         try {
2663             AudioTrack audioTrack = new AudioTrack.Builder()
2664                 .setAudioFormat(format)
2665                 .build();
2666             audioTrack.release();
2667         } catch (UnsupportedOperationException e) {
2668             // This builder should not fail as the first builder succeeded when setting buffer size
2669             fail("UnsupportedOperationException should not be thrown when setBufferSizeInBytes"
2670                   + " is excluded from builder");
2671         }
2672     }
2673 
2674     @Test
testSetPresentationDefaultTrack()2675     public void testSetPresentationDefaultTrack() throws Exception {
2676         final AudioTrack track = new AudioTrack.Builder().build();
2677         assertEquals(AudioTrack.ERROR, track.setPresentation(createAudioPresentation()));
2678     }
2679 
2680     @Test
testIsDirectPlaybackSupported()2681     public void testIsDirectPlaybackSupported() throws Exception {
2682         // constants for test
2683         final String TEST_NAME = "testIsDirectPlaybackSupported";
2684         // Default format leaves everything unspecified
2685         assertFalse(AudioTrack.isDirectPlaybackSupported(
2686                         new AudioFormat.Builder().build(),
2687                         new AudioAttributes.Builder().build()));
2688         // There is no requirement to support direct playback for this format,
2689         // so it's not possible to assert on the result, but at least the method
2690         // must execute with no exceptions.
2691         boolean isPcmStereo48kSupported = AudioTrack.isDirectPlaybackSupported(
2692                 new AudioFormat.Builder()
2693                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
2694                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
2695                 .setSampleRate(48000)
2696                 .build(),
2697                 new AudioAttributes.Builder().build());
2698         log(TEST_NAME, "PCM Stereo 48 kHz: " + isPcmStereo48kSupported);
2699     }
2700 
2701     @Test
testMediaMetrics()2702     public void testMediaMetrics() throws Exception {
2703         if (!hasAudioOutput()) {
2704             return;
2705         }
2706 
2707         AudioTrack track = null;
2708         try {
2709             final int TEST_SAMPLE_RATE = 44100;
2710             final int TEST_CHANNEL_MASK = AudioFormat.CHANNEL_OUT_STEREO;
2711             final int TEST_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
2712             final AudioFormat format = new AudioFormat.Builder()
2713                 .setSampleRate(TEST_SAMPLE_RATE)
2714                 .setChannelMask(TEST_CHANNEL_MASK)
2715                 .setEncoding(TEST_ENCODING)
2716                 .build();
2717 
2718             final int TEST_USAGE = AudioAttributes.USAGE_MEDIA;
2719             final int TEST_CONTENT_TYPE = AudioAttributes.CONTENT_TYPE_MUSIC;
2720             final AudioAttributes attributes = new AudioAttributes.Builder()
2721                 .setUsage(TEST_USAGE)
2722                 .setContentType(TEST_CONTENT_TYPE)
2723                 .build();
2724 
2725             // Setup a new audio track
2726             track = new AudioTrack.Builder()
2727                 .setAudioFormat(format)
2728                 .setAudioAttributes(attributes)
2729                 .build();
2730 
2731             final PersistableBundle metrics = track.getMetrics();
2732             assertNotNull("null metrics", metrics);
2733 
2734             // The STREAMTYPE constant was generally not present in P, and if so
2735             // was incorrectly exposed as an integer.
2736             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.STREAMTYPE,
2737                     new String("AUDIO_STREAM_MUSIC"));
2738             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.CONTENTTYPE,
2739                     new String("AUDIO_CONTENT_TYPE_MUSIC"));
2740             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.USAGE,
2741                     new String("AUDIO_USAGE_MEDIA"));
2742 
2743             // AudioTrack.MetricsConstants.SAMPLERATE, metrics doesn't exit
2744             // AudioTrack.MetricsConstants.CHANNELMASK, metrics doesn't exist
2745 
2746             // TestApi:
2747             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.SAMPLE_RATE,
2748                     new Integer(track.getSampleRate()));
2749             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.CHANNEL_MASK,
2750                     new Long(TEST_CHANNEL_MASK >> 2));
2751             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.ENCODING,
2752                     new String("AUDIO_FORMAT_PCM_16_BIT"));
2753             AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.FRAME_COUNT,
2754                     new Integer(track.getBufferSizeInFrames()));
2755 
2756             // TestApi: no particular value checking.
2757             AudioHelper.assertMetricsKey(metrics, AudioTrack.MetricsConstants.PORT_ID);
2758             AudioHelper.assertMetricsKey(metrics, AudioTrack.MetricsConstants.ATTRIBUTES);
2759         } finally {
2760             if (track != null) {
2761                 track.release();
2762             }
2763         }
2764     }
2765 
2766     @Test
testMaxAudioTracks()2767     public void testMaxAudioTracks() throws Exception {
2768         if (!hasAudioOutput()) {
2769             return;
2770         }
2771 
2772         // The framework must not give more than MAX_TRACKS tracks per UID.
2773         final int MAX_TRACKS = 512; // an arbitrary large number > 40
2774         final int FRAMES = 1024;
2775 
2776         final AudioTrack[] tracks = new AudioTrack[MAX_TRACKS];
2777         final AudioTrack.Builder builder = new AudioTrack.Builder()
2778             .setAudioFormat(new AudioFormat.Builder()
2779                 .setEncoding(AudioFormat.ENCODING_PCM_8BIT)
2780                 .setSampleRate(8000)
2781                 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
2782                 .build())
2783             .setBufferSizeInBytes(FRAMES)
2784             .setTransferMode(AudioTrack.MODE_STATIC);
2785 
2786         int n = 0;
2787         try {
2788             for (; n < MAX_TRACKS; ++n) {
2789                 tracks[n] = builder.build();
2790             }
2791         } catch (UnsupportedOperationException e) {
2792             ; // we expect this when we hit the uid track limit.
2793         }
2794 
2795         // release all the tracks created.
2796         for (int i = 0; i < n; ++i) {
2797             tracks[i].release();
2798             tracks[i] = null;
2799         }
2800         Log.d(TAG, "" + n + " tracks were created");
2801         assertTrue("should be able to create at least one static track", n > 0);
2802         assertTrue("was able to create " + MAX_TRACKS + " tracks - that's too many!",
2803             n < MAX_TRACKS);
2804     }
2805 
2806     @Test
testTunerConfiguration()2807     public void testTunerConfiguration() throws Exception {
2808         if (!hasAudioOutput()) {
2809             return;
2810         }
2811 
2812         assertThrows(
2813             IllegalArgumentException.class,
2814             () -> {
2815                 final AudioTrack.TunerConfiguration badConfig =
2816                     new AudioTrack.TunerConfiguration(-1 /* contentId */, 1 /* syncId */);
2817             });
2818 
2819         assertThrows(
2820             IllegalArgumentException.class,
2821             () -> {
2822                 final AudioTrack.TunerConfiguration badConfig =
2823                     new AudioTrack.TunerConfiguration(1 /* contentId*/, 0 /* syncId */);
2824             });
2825         assertThrows(
2826             IllegalArgumentException.class,
2827             () -> {
2828                 final AudioTrack track = new AudioTrack.Builder()
2829                     .setEncapsulationMode(-1)
2830                     .build();
2831                 track.release();
2832             });
2833 
2834         assertThrows(
2835             IllegalArgumentException.class,
2836             () -> {
2837                 final AudioTrack track = new AudioTrack.Builder()
2838                     .setTunerConfiguration(null)
2839                     .build();
2840                 track.release();
2841             });
2842 
2843         // this should work.
2844         int[][] contentSyncPairs = {
2845             {1, 2},
2846             {AudioTrack.TunerConfiguration.CONTENT_ID_NONE, 42},
2847         };
2848         for (int[] pair : contentSyncPairs) {
2849             final int contentId = pair[0];
2850             final int syncId = pair[1];
2851             final AudioTrack.TunerConfiguration tunerConfiguration =
2852                     new AudioTrack.TunerConfiguration(contentId, syncId);
2853 
2854             assertEquals("contentId must be set", contentId, tunerConfiguration.getContentId());
2855             assertEquals("syncId must be set", syncId, tunerConfiguration.getSyncId());
2856 
2857             // this may fail on creation, not in any setters.
2858             AudioTrack track = null;
2859             try {
2860                 track = new AudioTrack.Builder()
2861                         .setEncapsulationMode(AudioTrack.ENCAPSULATION_MODE_NONE)
2862                         .setTunerConfiguration(tunerConfiguration)
2863                         .build();
2864             } catch (UnsupportedOperationException e) {
2865                 ; // creation failure is OK as TunerConfiguration requires HW support,
2866                 // however other exception failures are not OK.
2867             } finally {
2868                 if (track != null) {
2869                     track.release();
2870                 }
2871             }
2872         }
2873     }
2874 
2875     @Test
testCodecFormatChangedListener()2876     public void testCodecFormatChangedListener() throws Exception {
2877         if (!hasAudioOutput()) {
2878             return;
2879         }
2880 
2881         final AudioTrack audioTrack = new AudioTrack.Builder().build();
2882 
2883         assertThrows(
2884             NullPointerException.class,
2885             () -> { audioTrack.addOnCodecFormatChangedListener(
2886                     null /* executor */, null /* listener */); });
2887 
2888         assertThrows(
2889             NullPointerException.class,
2890             () -> { audioTrack.removeOnCodecFormatChangedListener(null /* listener */); });
2891 
2892 
2893         final AudioTrack.OnCodecFormatChangedListener listener =
2894             (AudioTrack track, AudioMetadataReadMap readMap) -> {};
2895 
2896         // add a synchronous executor.
2897         audioTrack.addOnCodecFormatChangedListener(new Executor() {
2898                 @Override
2899                 public void execute(Runnable r) {
2900                     r.run();
2901                 }
2902             }, listener);
2903         audioTrack.removeOnCodecFormatChangedListener(listener);
2904         audioTrack.release();
2905     }
2906 
2907     @Test
testDualMonoMode()2908     public void testDualMonoMode() throws Exception {
2909         if (!hasAudioOutput()) {
2910             return;
2911         }
2912 
2913         final AudioTrack audioTrack = new AudioTrack.Builder().build();
2914 
2915         // Note that the output device may not support Dual Mono mode.
2916         // The following path should always succeed.
2917         audioTrack.setDualMonoMode(AudioTrack.DUAL_MONO_MODE_OFF);
2918         assertEquals(AudioTrack.DUAL_MONO_MODE_OFF, audioTrack.getDualMonoMode());
2919 
2920         // throws IAE on invalid argument.
2921         assertThrows(
2922             IllegalArgumentException.class,
2923             () -> { audioTrack.setDualMonoMode(-1); }
2924         );
2925 
2926         // check behavior after release.
2927         audioTrack.release();
2928         assertThrows(
2929             IllegalStateException.class,
2930             () -> { audioTrack.setDualMonoMode(AudioTrack.DUAL_MONO_MODE_OFF); }
2931         );
2932         assertEquals(AudioTrack.DUAL_MONO_MODE_OFF, audioTrack.getDualMonoMode());
2933     }
2934 
2935     @Test
testAudioDescriptionMixLevel()2936     public void testAudioDescriptionMixLevel() throws Exception {
2937         if (!hasAudioOutput()) {
2938             return;
2939         }
2940 
2941         final AudioTrack audioTrack = new AudioTrack.Builder().build();
2942 
2943         // Note that the output device may not support Audio Description Mix Level.
2944         // The following path should always succeed.
2945         audioTrack.setAudioDescriptionMixLeveldB(Float.NEGATIVE_INFINITY);
2946         assertEquals(Float.NEGATIVE_INFINITY,
2947                 audioTrack.getAudioDescriptionMixLeveldB(), 0.f /*delta*/);
2948 
2949         // throws IAE on invalid argument.
2950         assertThrows(
2951             IllegalArgumentException.class,
2952             () -> { audioTrack.setAudioDescriptionMixLeveldB(1e6f); }
2953         );
2954 
2955         // check behavior after release.
2956         audioTrack.release();
2957         assertThrows(
2958             IllegalStateException.class,
2959             () -> { audioTrack.setAudioDescriptionMixLeveldB(0.f); }
2960         );
2961         assertEquals(Float.NEGATIVE_INFINITY,
2962             audioTrack.getAudioDescriptionMixLeveldB(), 0.f /*delta*/);
2963     }
2964 
2965     @Test
testSetLogSessionId()2966     public void testSetLogSessionId() throws Exception {
2967         if (!hasAudioOutput()) {
2968             return;
2969         }
2970         AudioTrack audioTrack = null;
2971         try {
2972             audioTrack = new AudioTrack.Builder()
2973                     .setAudioFormat(new AudioFormat.Builder()
2974                             .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
2975                             .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
2976                             .build())
2977                     .build();
2978             audioTrack.setLogSessionId(LogSessionId.LOG_SESSION_ID_NONE); // should not throw.
2979             assertEquals(LogSessionId.LOG_SESSION_ID_NONE, audioTrack.getLogSessionId());
2980 
2981             final String ARBITRARY_MAGIC = "0123456789abcdef"; // 16 char Base64Url.
2982             audioTrack.setLogSessionId(new LogSessionId(ARBITRARY_MAGIC));
2983             assertEquals(new LogSessionId(ARBITRARY_MAGIC), audioTrack.getLogSessionId());
2984 
2985             final MediaMetricsManager mediaMetricsManager =
2986                     getContext().getSystemService(MediaMetricsManager.class);
2987             final PlaybackSession playbackSession = mediaMetricsManager.createPlaybackSession();
2988             audioTrack.setLogSessionId(playbackSession.getSessionId());
2989             assertEquals(playbackSession.getSessionId(), audioTrack.getLogSessionId());
2990 
2991             // write some data to generate a log entry.
2992             short data[] = new short[audioTrack.getSampleRate() / 2];
2993             audioTrack.play();
2994             audioTrack.write(data, 0 /* offsetInShorts */, data.length);
2995             audioTrack.stop();
2996             Thread.sleep(500 /* millis */); // drain
2997 
2998             // Also can check the mediametrics dumpsys to validate logs generated.
2999         } finally {
3000             if (audioTrack != null) {
3001                 audioTrack.release();
3002             }
3003         }
3004     }
3005 
3006     /*
3007      * The following helpers and tests are used to test setting
3008      * and getting the start threshold in frames.
3009      *
3010      * See Android CDD 5.6 [C-1-2] Cold output latency
3011      */
3012     private static final int START_THRESHOLD_SLEEP_MILLIS = 500;
3013 
3014     /**
3015      * Helper test that validates setting the start threshold.
3016      *
3017      * @param track
3018      * @param startThresholdInFrames
3019      * @throws Exception
3020      */
validateSetStartThresholdInFrames( AudioTrack track, int startThresholdInFrames)3021     private static void validateSetStartThresholdInFrames(
3022             AudioTrack track, int startThresholdInFrames) throws Exception {
3023         assertEquals(startThresholdInFrames,
3024                 track.setStartThresholdInFrames(startThresholdInFrames));
3025         assertEquals(startThresholdInFrames,
3026                 track.getStartThresholdInFrames());
3027     }
3028 
3029     /**
3030      * Helper that tests that the head position eventually equals expectedFrames.
3031      *
3032      * Exponential backoff to ~ 2 x START_THRESHOLD_SLEEP_MILLIS
3033      *
3034      * @param track
3035      * @param expectedFrames
3036      * @param message
3037      * @throws Exception
3038      */
validatePlaybackHeadPosition( AudioTrack track, int expectedFrames, String message)3039     private static void validatePlaybackHeadPosition(
3040             AudioTrack track, int expectedFrames, String message) throws Exception {
3041         int cumulativeMillis = 0;
3042         int playbackHeadPosition = 0;
3043         for (double testMillis = START_THRESHOLD_SLEEP_MILLIS * 0.125;
3044              testMillis <= START_THRESHOLD_SLEEP_MILLIS;  // this is exact for IEEE binary double
3045              testMillis *= 2.) {
3046             Thread.sleep((int)testMillis);
3047             playbackHeadPosition = track.getPlaybackHeadPosition();
3048             if (playbackHeadPosition == expectedFrames) return;
3049             cumulativeMillis += (int)testMillis;
3050         }
3051         fail(message + ": expected track playbackHeadPosition: " + expectedFrames
3052                 + " actual playbackHeadPosition: " + playbackHeadPosition
3053                 + " wait time: " + cumulativeMillis + "ms");
3054     }
3055 
3056     /**
3057      * Helper test that sets the start threshold to frames, and validates
3058      * writing exactly frames amount of data is needed to start the
3059      * track streaming.
3060      *
3061      * @param track
3062      * @param frames
3063      * @throws Exception
3064      */
validateWriteStartsStreamWithSetStartThreshold( AudioTrack track, int frames)3065     private static void validateWriteStartsStreamWithSetStartThreshold(
3066             AudioTrack track, int frames) throws Exception {
3067         // Set our threshold to frames.
3068         validateSetStartThresholdInFrames(track, frames);
3069 
3070         validateWriteStartsStream(track, frames);
3071     }
3072 
3073     /**
3074      * Helper test that validates writing exactly frames amount of data is needed to start the
3075      * track streaming.
3076      *
3077      * @param track
3078      * @param frames
3079      * @throws Exception
3080      */
validateWriteStartsStream(AudioTrack track, int frames)3081     private static void validateWriteStartsStream(AudioTrack track, int frames) throws Exception {
3082         assertEquals(1, track.getChannelCount()); // must be MONO
3083         final short[] data = new short[frames];
3084 
3085         // The track must be idle/underrun or the test will fail.
3086         int expectedFrames = track.getPlaybackHeadPosition();
3087 
3088         Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3089         assertEquals("Streaming doesn't start if the start threshold is larger than buffered data",
3090                 expectedFrames, track.getPlaybackHeadPosition());
3091 
3092         // Write a small amount of data, this isn't enough to start the track.
3093         final int PARTIAL_WRITE_IN_FRAMES = frames - 1;
3094         track.write(data, 0 /* offsetInShorts */, PARTIAL_WRITE_IN_FRAMES);
3095 
3096         // Ensure the track hasn't started.
3097         Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3098         assertEquals("Track needs enough frames to start",
3099                 expectedFrames, track.getPlaybackHeadPosition());
3100 
3101         // Write exactly threshold frames out, this should kick the playback off.
3102         track.write(data, 0 /* offsetInShorts */, data.length - PARTIAL_WRITE_IN_FRAMES);
3103 
3104         // Verify that we have processed the data now.
3105         expectedFrames += frames;
3106         Thread.sleep(frames * 1000L / track.getSampleRate());  // accommodate for #frames.
3107         validatePlaybackHeadPosition(track, expectedFrames,
3108                 "Writing buffer data to start threshold should start streaming");
3109     }
3110 
3111     /**
3112      * Helper that tests reducing the start threshold to frames will start track
3113      * streaming when frames of data are written to it.  (Presumes the
3114      * previous start threshold was greater than frames).
3115      *
3116      * @param track
3117      * @param frames
3118      * @throws Exception
3119      */
validateSetStartThresholdStartsStream( AudioTrack track, int frames)3120     private static void validateSetStartThresholdStartsStream(
3121             AudioTrack track, int frames) throws Exception {
3122         assertTrue(track.getStartThresholdInFrames() > frames);
3123         assertEquals(1, track.getChannelCount()); // must be MONO
3124         final short[] data = new short[frames];
3125 
3126         // The track must be idle/underrun or the test will fail.
3127         int expectedFrames = track.getPlaybackHeadPosition();
3128 
3129         // This write is too small for now.
3130         track.write(data, 0 /* offsetInShorts */, data.length);
3131 
3132         Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3133         assertEquals("Track needs enough frames to start",
3134                 expectedFrames, track.getPlaybackHeadPosition());
3135 
3136         // Reduce our start threshold. This should start streaming.
3137         validateSetStartThresholdInFrames(track, frames);
3138 
3139         // Verify that we have processed the data now.
3140         expectedFrames += frames;
3141         Thread.sleep(frames * 1000L / track.getSampleRate());  // accommodate for #frames.
3142         validatePlaybackHeadPosition(track, expectedFrames,
3143                 "Changing start threshold to buffer data level should start streaming");
3144     }
3145 
3146     // Tests the default fill buffer value to start playing an AudioTrack
3147     @Test
testDefaultStartThresholdInFrames()3148     public void testDefaultStartThresholdInFrames() throws Exception {
3149         if (!hasAudioOutput()) {
3150             return;
3151         }
3152 
3153         AudioTrack audioTrack = null;
3154         try {
3155             // Build our audiotrack
3156             audioTrack = new AudioTrack.Builder()
3157                     .setAudioFormat(new AudioFormat.Builder()
3158                             .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
3159                             .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
3160                             .build())
3161                     .build();
3162 
3163             // Start the AudioTrack. Now the track is waiting for data.
3164             audioTrack.play();
3165 
3166             validateWriteStartsStream(audioTrack, audioTrack.getStartThresholdInFrames());
3167         } finally {
3168             if (audioTrack != null) {
3169                 audioTrack.release();
3170             }
3171         }
3172     }
3173 
3174     // Tests that the getPlaybackHeadPosition is 0 after creating the track and before starting
3175     // to play even after setStartThresholdInFrames is called
3176     @Test
testZeroPositionStartThresholdInFrames()3177     public void testZeroPositionStartThresholdInFrames() throws Exception {
3178         if (!hasAudioOutput()) {
3179             return;
3180         }
3181 
3182         AudioTrack audioTrack = null;
3183         try {
3184             final int TEST_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
3185             final int TEST_RATE = 48000;
3186             final int TEST_CHANNELS = AudioFormat.CHANNEL_OUT_MONO;
3187 
3188             // 1 second buffer
3189             final int buffSizeInBytes = TEST_RATE *
3190                     AudioFormat.channelCountFromOutChannelMask(TEST_CHANNELS) *
3191                     AudioFormat.getBytesPerSample(TEST_ENCODING);
3192 
3193             // Build our audiotrack
3194             audioTrack = new AudioTrack.Builder()
3195                     .setAudioFormat(new AudioFormat.Builder()
3196                             .setEncoding(TEST_ENCODING)
3197                             .setChannelMask(TEST_CHANNELS)
3198                             .setSampleRate(TEST_RATE)
3199                             .build())
3200                     .setBufferSizeInBytes(buffSizeInBytes)
3201                     .build();
3202 
3203             int bufferSize = audioTrack.getBufferSizeInFrames();
3204             final short[] bufferData = new short[bufferSize];
3205             // Use a small part of the buffer size for the frames data
3206             int frames = bufferSize / 4;
3207             int errorMargin = frames;
3208             final short[] data = new short[frames];
3209 
3210             audioTrack.write(data, 0 /* offsetInShorts */, data.length);
3211             Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3212 
3213             assertEquals("PlaybackHeadPosition should be 0 before starting playback.",
3214                     0 /* expectedFrames */, audioTrack.getPlaybackHeadPosition());
3215 
3216             // set a start threshold smaller than the initial buffer size, but larger
3217             // than the already written data
3218             audioTrack.setStartThresholdInFrames(3 * frames);
3219             Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3220 
3221             assertEquals("PlaybackHeadPosition should be 0 before starting playback and setting"
3222                             + " the startThresholdInFrames.",
3223                     0 /* expectedFrames */, audioTrack.getPlaybackHeadPosition());
3224 
3225             // write some more data, but not enough to start playback
3226             audioTrack.write(data, 0 /* offsetInShorts */, data.length);
3227             Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3228 
3229             assertEquals("PlaybackHeadPosition should be 0 before starting playback and setting"
3230                             + " the startThresholdInFrames and writing insufficient data.",
3231                     0 /* expectedFrames */, audioTrack.getPlaybackHeadPosition());
3232 
3233             // write some more data, a full buffer, more than the threshold
3234             audioTrack.write(bufferData, 0 /* offsetInShorts */, data.length);
3235             Thread.sleep(START_THRESHOLD_SLEEP_MILLIS);
3236 
3237             assertEquals("PlaybackHeadPosition should be 0 before starting playback and setting"
3238                             + " the startThresholdInFrames and writing sufficient data.",
3239                     0 /* expectedFrames */, audioTrack.getPlaybackHeadPosition());
3240 
3241             audioTrack.play();
3242             int playbackHeadPosition = audioTrack.getPlaybackHeadPosition();
3243 
3244             assertTrue("PlaybackHeadPosition should be almost 0 immediately after starting playback"
3245                             + " with set startThresholdInFrames and sufficient written data,"
3246                             + " but is " + playbackHeadPosition + ", with margin " + errorMargin,
3247                     playbackHeadPosition < errorMargin);
3248         } finally {
3249             if (audioTrack != null) {
3250                 audioTrack.release();
3251             }
3252         }
3253     }
3254 
3255     // Start threshold levels that we check.
3256     private enum ThresholdLevel { LOW, MEDIUM, HIGH };
3257     @Test
testStartThresholdInFrames()3258     public void testStartThresholdInFrames() throws Exception {
3259         if (!hasAudioOutput()) {
3260             return;
3261         }
3262 
3263         for (ThresholdLevel level : new ThresholdLevel[] {
3264                 ThresholdLevel.LOW, ThresholdLevel.MEDIUM, ThresholdLevel.HIGH}) {
3265             AudioTrack audioTrack = null;
3266             try {
3267                 // Build our audiotrack
3268                 audioTrack = new AudioTrack.Builder()
3269                         .setAudioFormat(new AudioFormat.Builder()
3270                                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
3271                                 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
3272                                 .build())
3273                         .build();
3274 
3275                 // Initially the start threshold must be the same as the buffer size in frames.
3276                 final int bufferSizeInFrames = audioTrack.getBufferSizeInFrames();
3277                 assertEquals("At start, getBufferSizeInFrames should equal getStartThresholdInFrames",
3278                         bufferSizeInFrames,
3279                         audioTrack.getStartThresholdInFrames());
3280 
3281                 final int TARGET_THRESHOLD_IN_FRAMES;  // threshold level to verify
3282                 switch (level) {
3283                     default:
3284                     case LOW:
3285                         TARGET_THRESHOLD_IN_FRAMES = 2;
3286                         break;
3287                     case MEDIUM:
3288                         TARGET_THRESHOLD_IN_FRAMES = bufferSizeInFrames / 2;
3289                         break;
3290                     case HIGH:
3291                         TARGET_THRESHOLD_IN_FRAMES = bufferSizeInFrames - 1;
3292                         break;
3293                 }
3294 
3295                 // Skip extreme cases that don't need testing.
3296                 if (TARGET_THRESHOLD_IN_FRAMES < 2
3297                         || TARGET_THRESHOLD_IN_FRAMES >= bufferSizeInFrames) continue;
3298 
3299                 // Start the AudioTrack. Now the track is waiting for data.
3300                 audioTrack.play();
3301 
3302                 validateWriteStartsStreamWithSetStartThreshold(
3303                         audioTrack, TARGET_THRESHOLD_IN_FRAMES);
3304 
3305                 // Try a condition that requires buffers to be filled again.
3306                 if (false) {
3307                     // Only a deep underrun when the track becomes inactive requires a refill.
3308                     // Disabled as this is dependent on underlying MixerThread timeouts.
3309                     Thread.sleep(5000 /* millis */);
3310                 } else {
3311                     // Flushing will require a refill (this does not require timing).
3312                     audioTrack.pause();
3313                     audioTrack.flush();
3314                     audioTrack.play();
3315                 }
3316 
3317                 // Check that reducing to a smaller threshold will start the track streaming.
3318                 validateSetStartThresholdStartsStream(audioTrack, TARGET_THRESHOLD_IN_FRAMES - 1);
3319             } finally {
3320                 if (audioTrack != null) {
3321                     audioTrack.release();
3322                 }
3323             }
3324         }
3325     }
3326 
3327     @Test
testStartThresholdInFramesExceptions()3328     public void testStartThresholdInFramesExceptions() throws Exception {
3329         if (!hasAudioOutput()) {
3330             return;
3331         }
3332         AudioTrack audioTrack = null;
3333         try {
3334             // Build our audiotrack
3335             audioTrack = new AudioTrack.Builder()
3336                     .setAudioFormat(new AudioFormat.Builder()
3337                             .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
3338                             .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
3339                             .build())
3340                     .build();
3341 
3342             // Test setting invalid start threshold.
3343             final AudioTrack track = audioTrack; // make final for lambda
3344             assertThrows(IllegalArgumentException.class, () -> {
3345                 track.setStartThresholdInFrames(-1 /* startThresholdInFrames */);
3346             });
3347         } finally {
3348             if (audioTrack != null) {
3349                 audioTrack.release();
3350             }
3351         }
3352         // If we're here audioTrack should be non-null but released,
3353         // so calls should return an IllegalStateException.
3354         final AudioTrack track = audioTrack; // make final for lambda
3355         assertThrows(IllegalStateException.class, () -> {
3356             track.getStartThresholdInFrames();
3357         });
3358         assertThrows(IllegalStateException.class, () -> {
3359             track.setStartThresholdInFrames(1 /* setStartThresholdInFrames */);
3360         });
3361     }
3362 
3363     /**
3364      * Tests height channel masks and higher channel counts
3365      * used in immersive AudioTrack streaming.
3366      *
3367      * @throws Exception
3368      */
3369     @Test
testImmersiveStreaming()3370     public void testImmersiveStreaming() throws Exception {
3371         if (!hasAudioOutput()) {
3372             return;
3373         }
3374 
3375         final String TEST_NAME = "testImmersiveStreaming";
3376         final int TEST_FORMAT_ARRAY[] = {
3377             AudioFormat.ENCODING_PCM_16BIT,
3378             AudioFormat.ENCODING_PCM_FLOAT,
3379         };
3380         final int TEST_SR_ARRAY[] = {
3381             48000,  // do not set too high - costly in memory.
3382         };
3383         final int TEST_CONF_ARRAY[] = {
3384             AudioFormat.CHANNEL_OUT_5POINT1POINT2, // 8 ch (includes height channels vs 7.1).
3385             AudioFormat.CHANNEL_OUT_7POINT1POINT2, // 10ch
3386             AudioFormat.CHANNEL_OUT_7POINT1POINT4, // 12 ch
3387             AudioFormat.CHANNEL_OUT_9POINT1POINT4, // 14 ch
3388             AudioFormat.CHANNEL_OUT_9POINT1POINT6, // 16 ch
3389             AudioFormat.CHANNEL_OUT_22POINT2,      // 24 ch
3390         };
3391 
3392         final int TEST_MODE = AudioTrack.MODE_STREAM;
3393         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
3394         final float TEST_SWEEP = 0; // sine wave only
3395         final boolean TEST_IS_LOW_RAM_DEVICE = false;
3396         for (int TEST_FORMAT : TEST_FORMAT_ARRAY) {
3397             double frequency = 400; // Note: frequency changes for each test
3398             for (int TEST_SR : TEST_SR_ARRAY) {
3399                 for (int TEST_CONF : TEST_CONF_ARRAY) {
3400                     if (AudioFormat.channelCountFromOutChannelMask(TEST_CONF)
3401                             > AudioSystem.OUT_CHANNEL_COUNT_MAX) {
3402                         continue; // Skip if the channel count exceeds framework capabilities.
3403                     }
3404                     playOnceStreamData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP,
3405                             TEST_IS_LOW_RAM_DEVICE, TEST_FORMAT, frequency, TEST_SR, TEST_CONF,
3406                             WAIT_MSEC, 0 /* mask */);
3407                     frequency += 50; // increment test tone frequency
3408                 }
3409             }
3410         }
3411     }
3412 
3413     @Test
testImmersiveChannelIndex()3414     public void testImmersiveChannelIndex() throws Exception {
3415         if (!hasAudioOutput()) {
3416             return;
3417         }
3418 
3419         final String TEST_NAME = "testImmersiveChannelIndex";
3420         final int TEST_FORMAT_ARRAY[] = {
3421                 AudioFormat.ENCODING_PCM_FLOAT,
3422         };
3423         final int TEST_SR_ARRAY[] = {
3424                 48000,  // do not set too high - costly in memory.
3425         };
3426         final int MAX_CHANNEL_BIT = 1 << (AudioSystem.FCC_24 - 1); // highest allowed channel.
3427         final int TEST_CONF_ARRAY[] = {
3428                 MAX_CHANNEL_BIT,      // likely silent - no physical device on top channel.
3429                 MAX_CHANNEL_BIT | 1,  // first channel will likely have physical device.
3430                 (1 << AudioSystem.OUT_CHANNEL_COUNT_MAX) - 1,
3431         };
3432         final int TEST_WRITE_MODE_ARRAY[] = {
3433                 AudioTrack.WRITE_BLOCKING,
3434                 AudioTrack.WRITE_NON_BLOCKING,
3435         };
3436         final double TEST_SWEEP = 0;
3437         final int TEST_TRANSFER_MODE = AudioTrack.MODE_STREAM;
3438         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
3439 
3440         double frequency = 200; // frequency changes for each test
3441         for (int TEST_FORMAT : TEST_FORMAT_ARRAY) {
3442             for (int TEST_SR : TEST_SR_ARRAY) {
3443                 for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) {
3444                     for (int useDirect = 0; useDirect < 2; ++useDirect) {
3445                         for (int TEST_CONF : TEST_CONF_ARRAY) {
3446                             // put TEST_CONF in the inner loop to avoid
3447                             // back-to-back creation of large tracks.
3448                             playOnceStreamByteBuffer(
3449                                     TEST_NAME, frequency, TEST_SWEEP,
3450                                     TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
3451                                     TEST_TRANSFER_MODE, TEST_WRITE_MODE,
3452                                     true /* useChannelIndex */, useDirect != 0);
3453                             frequency += 30; // increment test tone frequency
3454                         }
3455                     }
3456                 }
3457             }
3458         }
3459     }
3460 
3461     /**
3462      * Verifies downmixer works with different AudioTrack surround channel masks.
3463      *
3464      * Also a listening test: on a stereo output device, you should hear sine wave tones
3465      * instead of silence if the downmixer is working.
3466      *
3467      * @throws Exception
3468      */
3469     @Test
testDownmix()3470     public void testDownmix() throws Exception {
3471         if (!hasAudioOutput()) {
3472             return;
3473         }
3474 
3475         final String TEST_NAME = "testDownmix";
3476         final int TEST_FORMAT_ARRAY[] = {
3477             // AudioFormat.ENCODING_PCM_8BIT,  // sounds a bit tinny
3478             AudioFormat.ENCODING_PCM_16BIT,
3479             AudioFormat.ENCODING_PCM_FLOAT,
3480         };
3481         final int TEST_SR_ARRAY[] = {
3482             48000,
3483         };
3484         final int TEST_CONF_ARRAY[] = {
3485             // This test will play back FRONT_WIDE_LEFT, then FRONT_WIDE_RIGHT.
3486             AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
3487             AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT,
3488         };
3489 
3490         final int TEST_MODE = AudioTrack.MODE_STREAM;
3491         final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
3492         final float TEST_SWEEP = 0; // sine wave only
3493         final boolean TEST_IS_LOW_RAM_DEVICE = false;
3494         for (int TEST_FORMAT : TEST_FORMAT_ARRAY) {
3495             double frequency = 400; // Note: frequency changes for each test
3496             for (int TEST_SR : TEST_SR_ARRAY) {
3497                 for (int TEST_CONF : TEST_CONF_ARRAY) {
3498                     // Remove the front left and front right channels.
3499                     int signalMask = TEST_CONF & ~(AudioFormat.CHANNEL_OUT_FRONT_LEFT
3500                             | AudioFormat.CHANNEL_OUT_FRONT_RIGHT);
3501                     // Play all the "surround channels" in the mask individually
3502                     // at different frequencies.
3503                     while (signalMask != 0) {
3504                         final int lowbit = signalMask & -signalMask;
3505                         playOnceStreamData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP,
3506                                 TEST_IS_LOW_RAM_DEVICE, TEST_FORMAT, frequency, TEST_SR,
3507                                 TEST_CONF, WAIT_MSEC, lowbit);
3508                         signalMask -= lowbit;
3509                         frequency += 50; // increment test tone frequency
3510                     }
3511                 }
3512             }
3513         }
3514     }
3515 
3516     /**
3517      * Ensure AudioTrack.getMinBufferSize invalid arguments return BAD_VALUE instead
3518      * of throwing exception.
3519      *
3520      * @throws Exception
3521      */
3522     @Test
testInvalidMinBufferSize()3523     public void testInvalidMinBufferSize() throws Exception {
3524         int TEST_SAMPLE_RATE = 24000;
3525         int TEST_CHANNEL_CONFIGURATION = AudioFormat.CHANNEL_OUT_STEREO;
3526         int TEST_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
3527 
3528         for (int i = 1; i < 8; ++i) {
3529             int minBuffSize = AudioTrack.getMinBufferSize(
3530                     (i & 1) != 0 ? 0 : TEST_SAMPLE_RATE,
3531                     (i & 2) != 0 ? AudioFormat.CHANNEL_INVALID : TEST_CHANNEL_CONFIGURATION,
3532                     (i & 4) != 0 ? AudioFormat.ENCODING_INVALID :TEST_ENCODING);
3533             assertEquals("Invalid configuration " + i + " should return ERROR_BAD_VALUE",
3534                     AudioTrack.ERROR_BAD_VALUE, minBuffSize);
3535         }
3536     }
3537 
3538     /**
3539      * Test AudioTrack Builder error handling.
3540      *
3541      * @throws Exception
3542      */
3543     @Test
testAudioTrackBuilderError()3544     public void testAudioTrackBuilderError() throws Exception {
3545         if (!hasAudioOutput()) {
3546             return;
3547         }
3548 
3549         final AudioTrack[] audioTrack = new AudioTrack[1]; // pointer to audio track.
3550         final int BIGNUM = Integer.MAX_VALUE; // large value that should be invalid.
3551         final int INVALID_SESSION_ID = 1024;  // can never occur (wrong type in 3 lsbs)
3552         final int INVALID_CHANNEL_MASK = -1;
3553 
3554         try {
3555             // NOTE:
3556             // Tuner Configuration builder error tested in testTunerConfiguration (same file).
3557             // AudioAttributes tested in AudioAttributesTest#testAudioAttributesBuilderError.
3558             // AudioFormat tested in AudioFormatTest#testAudioFormatBuilderError.
3559 
3560             // We must be able to create the AudioTrack.
3561             audioTrack[0] = new AudioTrack.Builder().build();
3562             audioTrack[0].release();
3563 
3564             // Out of bounds buffer size.  A large size will fail in AudioTrack creation.
3565             assertThrows(UnsupportedOperationException.class, () -> {
3566                 audioTrack[0] = new AudioTrack.Builder()
3567                         .setBufferSizeInBytes(BIGNUM)
3568                         .build();
3569             });
3570 
3571             // 0 and negative buffer size throw IllegalArgumentException
3572             for (int bufferSize : new int[] {-BIGNUM, -1, 0}) {
3573                 assertThrows(IllegalArgumentException.class, () -> {
3574                     audioTrack[0] = new AudioTrack.Builder()
3575                             .setBufferSizeInBytes(bufferSize)
3576                             .build();
3577                 });
3578             }
3579 
3580             assertThrows(IllegalArgumentException.class, () -> {
3581                 audioTrack[0] = new AudioTrack.Builder()
3582                         .setEncapsulationMode(BIGNUM)
3583                         .build();
3584             });
3585 
3586             assertThrows(IllegalArgumentException.class, () -> {
3587                 audioTrack[0] = new AudioTrack.Builder()
3588                         .setPerformanceMode(BIGNUM)
3589                         .build();
3590             });
3591 
3592             // Invalid session id that is positive.
3593             // (logcat error message vague)
3594             assertThrows(UnsupportedOperationException.class, () -> {
3595                 audioTrack[0] = new AudioTrack.Builder()
3596                         .setSessionId(INVALID_SESSION_ID)
3597                         .build();
3598             });
3599 
3600             assertThrows(IllegalArgumentException.class, () -> {
3601                 audioTrack[0] = new AudioTrack.Builder()
3602                         .setTransferMode(BIGNUM)
3603                         .build();
3604             });
3605 
3606             // Specialty AudioTrack build errors.
3607 
3608             // Bad audio encoding DRA expected unsupported.
3609             try {
3610                 audioTrack[0] = new AudioTrack.Builder()
3611                         .setAudioFormat(new AudioFormat.Builder()
3612                                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
3613                                 .setEncoding(AudioFormat.ENCODING_DRA)
3614                                 .build())
3615                         .build();
3616                 // Don't throw an exception, maybe it is supported somehow, but warn.
3617                 // Note: often specialty audio formats are offloaded (see setOffloadedPlayback).
3618                 // AudioTrackSurroundTest and AudioTrackOffloadedTest can be used as examples.
3619                 Log.w(TAG, "ENCODING_DRA is expected to be unsupported");
3620                 audioTrack[0].release();
3621                 audioTrack[0] = null;
3622             } catch (UnsupportedOperationException e) {
3623                 ; // OK expected
3624             }
3625 
3626             // Sample rate out of bounds.
3627             // System levels caught on AudioFormat.
3628             for (int sampleRate : new int[] {
3629                     BIGNUM,
3630                     AudioSystem.SAMPLE_RATE_HZ_MIN - 1,
3631                     AudioSystem.SAMPLE_RATE_HZ_MAX + 1}) {
3632                 assertThrows(IllegalArgumentException.class, () -> {
3633                     audioTrack[0] = new AudioTrack.Builder()
3634                             .setAudioFormat(new AudioFormat.Builder()
3635                                     .setSampleRate(sampleRate)
3636                                     .build())
3637                             .build();
3638                 });
3639             }
3640 
3641             // Invalid channel mask - caught here on use.
3642             assertThrows(IllegalArgumentException.class, () -> {
3643                 audioTrack[0] = new AudioTrack.Builder()
3644                         .setAudioFormat(new AudioFormat.Builder()
3645                                 .setChannelMask(INVALID_CHANNEL_MASK)
3646                                 .build())
3647                         .build();
3648             });
3649         } finally {
3650             // Did we successfully complete for some reason but did not
3651             // release?
3652             if (audioTrack[0] != null) {
3653                 audioTrack[0].release();
3654                 audioTrack[0] = null;
3655             }
3656         }
3657     }
3658 
3659 /* Do not run in JB-MR1. will be re-opened in the next platform release.
3660     public void testResourceLeakage() throws Exception {
3661         final int BUFFER_SIZE = 600 * 1024;
3662         ByteBuffer data = ByteBuffer.allocate(BUFFER_SIZE);
3663         for (int i = 0; i < 10; i++) {
3664             Log.i(TAG, "testResourceLeakage round " + i);
3665             data.rewind();
3666             AudioTrack track = new AudioTrack(AudioManager.STREAM_VOICE_CALL,
3667                                               44100,
3668                                               AudioFormat.CHANNEL_OUT_STEREO,
3669                                               AudioFormat.ENCODING_PCM_16BIT,
3670                                               data.capacity(),
3671                                               AudioTrack.MODE_STREAM);
3672             assertTrue(track != null);
3673             track.write(data.array(), 0, data.capacity());
3674             track.play();
3675             Thread.sleep(100);
3676             track.stop();
3677             track.release();
3678         }
3679     }
3680 */
3681 
3682     /* MockAudioTrack allows testing of protected getNativeFrameCount() and setState(). */
3683     private class MockAudioTrack extends AudioTrack {
3684 
MockAudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)3685         public MockAudioTrack(int streamType, int sampleRateInHz, int channelConfig,
3686                 int audioFormat, int bufferSizeInBytes, int mode) throws IllegalArgumentException {
3687             super(streamType, sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mode);
3688         }
3689 
setState(int state)3690         public void setState(int state) {
3691             super.setState(state);
3692         }
3693 
getNativeFrameCount()3694         public int getNativeFrameCount() {
3695             return super.getNativeFrameCount();
3696         }
3697     }
3698 
createAudioPresentation()3699     private static AudioPresentation createAudioPresentation() {
3700         return (new AudioPresentation.Builder(42 /*presentationId*/)).build();
3701     }
3702 
getContext()3703     private static Context getContext() {
3704         return InstrumentationRegistry.getInstrumentation().getTargetContext();
3705     }
3706 }
3707