xref: /aosp_15_r20/cts/tests/tests/media/common/src/android/media/cts/NonBlockingAudioTrack.java (revision b7c941bb3fa97aba169d73cee0bed2de8ac964bf)
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 package android.media.cts;
17 
18 import android.media.AudioAttributes;
19 import android.media.AudioFormat;
20 import android.media.AudioManager;
21 import android.media.AudioTimestamp;
22 import android.media.AudioTrack;
23 
24 import java.nio.ByteBuffer;
25 import java.util.LinkedList;
26 import java.util.concurrent.atomic.AtomicBoolean;
27 import java.util.concurrent.atomic.AtomicInteger;
28 import java.util.concurrent.atomic.AtomicLong;
29 
30 /**
31  * Class for playing audio by using audio track.
32  * {@link #write(byte[], int, int)} and {@link #write(short[], int, int)} methods will
33  * block until all data has been written to system. In order to avoid blocking, this class
34  * caculates available buffer size first then writes to audio sink.
35  */
36 public class NonBlockingAudioTrack {
37     private static final String TAG = NonBlockingAudioTrack.class.getSimpleName();
38 
39     private static final long END_OF_STREAM_PTS = Long.MAX_VALUE;
40 
41     private static class QueueElement {
42         ByteBuffer data;
43         int size;
44         long pts;
45     }
46 
47     private AudioTrack mAudioTrack;
48     private int mSampleRate;
49     private int mNumBytesQueued = 0;
50     private AtomicInteger mTotalBytesWritten = new AtomicInteger(0);
51     private LinkedList<QueueElement> mQueue = new LinkedList<QueueElement>();
52     private boolean mStopped;
53     private int mBufferSizeInBytes;
54     private AtomicBoolean mStopWriting = new AtomicBoolean(false);
55 
56     /**
57      * An offset (in nanoseconds) to add to presentation timestamps fed to the {@link AudioTrack}.
58      * This is used to simulate desynchronization between tracks.
59      */
60     private AtomicLong mAudioOffsetNs = new AtomicLong(0);
61 
NonBlockingAudioTrack(int sampleRate, int channelCount, boolean hwAvSync, int audioSessionId)62     public NonBlockingAudioTrack(int sampleRate, int channelCount, boolean hwAvSync,
63                     int audioSessionId) {
64         int channelConfig;
65         switch (channelCount) {
66             case 1:
67                 channelConfig = AudioFormat.CHANNEL_OUT_MONO;
68                 break;
69             case 2:
70                 channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
71                 break;
72             case 6:
73                 channelConfig = AudioFormat.CHANNEL_OUT_5POINT1;
74                 break;
75             default:
76                 throw new IllegalArgumentException();
77         }
78 
79         int minBufferSize =
80             AudioTrack.getMinBufferSize(
81                     sampleRate,
82                     channelConfig,
83                     AudioFormat.ENCODING_PCM_16BIT);
84 
85         mBufferSizeInBytes = 2 * minBufferSize;
86 
87         if (!hwAvSync) {
88             mAudioTrack = new AudioTrack(
89                     AudioManager.STREAM_MUSIC,
90                     sampleRate,
91                     channelConfig,
92                     AudioFormat.ENCODING_PCM_16BIT,
93                     mBufferSizeInBytes,
94                     AudioTrack.MODE_STREAM);
95         }
96         else {
97             // build AudioTrack using Audio Attributes and FLAG_HW_AV_SYNC
98             AudioAttributes audioAttributes = (new AudioAttributes.Builder())
99                             .setLegacyStreamType(AudioManager.STREAM_MUSIC)
100                             .setFlags(AudioAttributes.FLAG_HW_AV_SYNC)
101                             .build();
102             AudioFormat audioFormat = (new AudioFormat.Builder())
103                             .setChannelMask(channelConfig)
104                             .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
105                             .setSampleRate(sampleRate)
106                             .build();
107             mAudioTrack = new AudioTrack(audioAttributes, audioFormat, mBufferSizeInBytes,
108                                     AudioTrack.MODE_STREAM, audioSessionId);
109         }
110 
111         mSampleRate = sampleRate;
112     }
113 
getAudioTimeUs()114     public long getAudioTimeUs() {
115         int numFramesPlayed = mAudioTrack.getPlaybackHeadPosition();
116 
117         return (numFramesPlayed * 1000000L) / mSampleRate;
118     }
119 
getTimestamp()120     public AudioTimestamp getTimestamp() {
121         AudioTimestamp timestamp = new AudioTimestamp();
122         mAudioTrack.getTimestamp(timestamp);
123         return timestamp;
124     }
125 
getNumBytesQueued()126     public int getNumBytesQueued() {
127         return mNumBytesQueued;
128     }
129 
play()130     public void play() {
131         mStopped = false;
132         mAudioTrack.play();
133     }
134 
setEndOfStream()135     public void setEndOfStream() {
136         QueueElement element = new QueueElement();
137         element.pts  = END_OF_STREAM_PTS;
138         mQueue.add(element);
139     }
140 
stop()141     public void stop() {
142         if (mQueue.isEmpty()) {
143             mAudioTrack.stop();
144             mNumBytesQueued = 0;
145         } else {
146             mStopped = true;
147         }
148     }
149 
setStopWriting(boolean stop)150     public void setStopWriting(boolean stop) {
151         mStopWriting.set(stop);
152     }
153 
setAudioOffsetNs(long audioOffsetNs)154     public void setAudioOffsetNs(long audioOffsetNs) {
155         mAudioOffsetNs.set(audioOffsetNs);
156     }
157 
pause()158     public void pause() {
159         mAudioTrack.pause();
160     }
161 
flush()162     public void flush() {
163         if (mAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
164             return;
165         }
166         mAudioTrack.flush();
167         mQueue.clear();
168         mNumBytesQueued = 0;
169         mStopped = false;
170     }
171 
release()172     public void release() {
173         mQueue.clear();
174         mNumBytesQueued = 0;
175         mAudioTrack.release();
176         mAudioTrack = null;
177         mStopped = false;
178     }
179 
process()180     public void process() {
181         while (!mQueue.isEmpty()) {
182             QueueElement element = mQueue.peekFirst();
183             if (mStopWriting.get()) {
184                 break;
185             }
186 
187             if (element.pts == END_OF_STREAM_PTS) {
188                 // For tunnel mode, when an audio PTS gap is encountered, silence is rendered
189                 // during the gap. As such, it's necessary to fade down the audio to avoid a
190                 // bad user experience. This necessitates that the Audio HAL holds onto the
191                 // last audio frame and delays releasing it to the output device until the
192                 // subsequent audio frame is seen so that it knows whether there's a PTS gap
193                 // or not. When the end-of-stream is reached, this means that the last audio
194                 // frame has not been rendered yet. So, in order to release the last audio
195                 // frame, a signal must be sent to the Audio HAL so the last frame gets
196                 // released.
197                 int written = mAudioTrack.write(ByteBuffer.allocate(0), 0,
198                                                 AudioTrack.WRITE_NON_BLOCKING,
199                                                 END_OF_STREAM_PTS);
200                 if (written < 0) {
201                    throw new RuntimeException("AudioTrack.write failed (" + written + ")");
202                 }
203                 mQueue.removeFirst();
204                 break;
205             }
206 
207             int written = mAudioTrack.write(element.data, element.size,
208                     AudioTrack.WRITE_NON_BLOCKING, element.pts + mAudioOffsetNs.get());
209             if (written < 0) {
210                 throw new RuntimeException("AudioTrack.write failed (" + written + ")");
211             }
212 
213             mTotalBytesWritten.addAndGet(written);
214             mNumBytesQueued -= written;
215             element.size -= written;
216             if (element.size != 0) {
217                 break;
218             }
219             mQueue.removeFirst();
220         }
221         if (mStopped) {
222             mAudioTrack.stop();
223             mNumBytesQueued = 0;
224             mStopped = false;
225         }
226     }
227 
getFramesWritten()228     public int getFramesWritten() {
229         if (mAudioTrack == null) {
230             return -1;
231         }
232         return mTotalBytesWritten.get() / mAudioTrack.getFormat().getFrameSizeInBytes();
233     }
234 
getPlayState()235     public int getPlayState() {
236         return mAudioTrack.getPlayState();
237     }
238 
write(ByteBuffer data, int size, long pts)239     public void write(ByteBuffer data, int size, long pts) {
240         QueueElement element = new QueueElement();
241         element.data = data;
242         element.size = size;
243         element.pts  = pts;
244 
245         // accumulate size written to queue
246         mNumBytesQueued += size;
247         mQueue.add(element);
248     }
249 
250     /** Returns the underlying {@code AudioTrack}. */
getAudioTrack()251     public AudioTrack getAudioTrack() {
252         return mAudioTrack;
253     }
254 }
255