1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import android.annotation.CallbackExecutor;
20 import android.annotation.FloatRange;
21 import android.annotation.IntDef;
22 import android.annotation.IntRange;
23 import android.annotation.NonNull;
24 import android.annotation.Nullable;
25 import android.annotation.TestApi;
26 import android.compat.annotation.UnsupportedAppUsage;
27 import android.os.Binder;
28 import android.os.Handler;
29 import android.os.HandlerThread;
30 import android.os.Looper;
31 import android.os.Message;
32 import android.os.PersistableBundle;
33 import android.util.ArrayMap;
34 import android.util.Log;
35 
36 import com.android.internal.annotations.GuardedBy;
37 
38 import java.lang.annotation.Retention;
39 import java.lang.annotation.RetentionPolicy;
40 import java.lang.ref.WeakReference;
41 import java.nio.ByteBuffer;
42 import java.nio.ByteOrder;
43 import java.nio.NioUtils;
44 import java.util.LinkedList;
45 import java.util.concurrent.Executor;
46 
47 /**
48  * The AudioTrack class manages and plays a single audio resource for Java applications.
49  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
50  * achieved by "pushing" the data to the AudioTrack object using one of the
51  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
52  *  and {@link #write(float[], int, int, int)} methods.
53  *
54  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
55  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
56  * one of the {@code write()} methods. These are blocking and return when the data has been
57  * transferred from the Java layer to the native layer and queued for playback. The streaming
58  * mode is most useful when playing blocks of audio data that for instance are:
59  *
60  * <ul>
61  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
62  *   <li>too big to fit in memory because of the characteristics of the audio data
63  *         (high sampling rate, bits per sample ...)</li>
64  *   <li>received or generated while previously queued audio is playing.</li>
65  * </ul>
66  *
67  * The static mode should be chosen when dealing with short sounds that fit in memory and
68  * that need to be played with the smallest latency possible. The static mode will
69  * therefore be preferred for UI and game sounds that are played often, and with the
70  * smallest overhead possible.
71  *
72  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
73  * The size of this buffer, specified during the construction, determines how long an AudioTrack
74  * can play before running out of data.<br>
75  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
76  * be played from it.<br>
77  * For the streaming mode, data will be written to the audio sink in chunks of
78  * sizes less than or equal to the total buffer size.
79  *
80  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
81  */
82 public class AudioTrack extends PlayerBase
83                         implements AudioRouting
84                                  , VolumeAutomation
85 {
86     //---------------------------------------------------------
87     // Constants
88     //--------------------
89     /** Minimum value for a linear gain or auxiliary effect level.
90      *  This value must be exactly equal to 0.0f; do not change it.
91      */
92     private static final float GAIN_MIN = 0.0f;
93     /** Maximum value for a linear gain or auxiliary effect level.
94      *  This value must be greater than or equal to 1.0f.
95      */
96     private static final float GAIN_MAX = 1.0f;
97 
98     /** indicates AudioTrack state is stopped */
99     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
100     /** indicates AudioTrack state is paused */
101     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
102     /** indicates AudioTrack state is playing */
103     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
104     /**
105       * @hide
106       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
107       * transition to PLAYSTATE_STOPPED.
108       * Only valid for offload mode.
109       */
110     private static final int PLAYSTATE_STOPPING = 4;
111     /**
112       * @hide
113       * indicates AudioTrack state is paused from stopping state. Will transition to
114       * PLAYSTATE_STOPPING if play() is called.
115       * Only valid for offload mode.
116       */
117     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
118 
119     // keep these values in sync with android_media_AudioTrack.cpp
120     /**
121      * Creation mode where audio data is transferred from Java to the native layer
122      * only once before the audio starts playing.
123      */
124     public static final int MODE_STATIC = 0;
125     /**
126      * Creation mode where audio data is streamed from Java to the native layer
127      * as the audio is playing.
128      */
129     public static final int MODE_STREAM = 1;
130 
131     /** @hide */
132     @IntDef({
133         MODE_STATIC,
134         MODE_STREAM
135     })
136     @Retention(RetentionPolicy.SOURCE)
137     public @interface TransferMode {}
138 
139     /**
140      * State of an AudioTrack that was not successfully initialized upon creation.
141      */
142     public static final int STATE_UNINITIALIZED = 0;
143     /**
144      * State of an AudioTrack that is ready to be used.
145      */
146     public static final int STATE_INITIALIZED   = 1;
147     /**
148      * State of a successfully initialized AudioTrack that uses static data,
149      * but that hasn't received that data yet.
150      */
151     public static final int STATE_NO_STATIC_DATA = 2;
152 
153     /**
154      * Denotes a successful operation.
155      */
156     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
157     /**
158      * Denotes a generic operation failure.
159      */
160     public  static final int ERROR                                 = AudioSystem.ERROR;
161     /**
162      * Denotes a failure due to the use of an invalid value.
163      */
164     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
165     /**
166      * Denotes a failure due to the improper use of a method.
167      */
168     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
169     /**
170      * An error code indicating that the object reporting it is no longer valid and needs to
171      * be recreated.
172      */
173     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
174     /**
175      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
176      * or immediately after start/ACTIVE.
177      * @hide
178      */
179     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
180 
181     // Error codes:
182     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
183     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
184     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
185     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
186     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
187     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
188 
189     // Events:
190     // to keep in sync with frameworks/av/include/media/AudioTrack.h
191     /**
192      * Event id denotes when playback head has reached a previously set marker.
193      */
194     private static final int NATIVE_EVENT_MARKER  = 3;
195     /**
196      * Event id denotes when previously set update period has elapsed during playback.
197      */
198     private static final int NATIVE_EVENT_NEW_POS = 4;
199     /**
200      * Callback for more data
201      */
202     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
203     /**
204      * IAudioTrack tear down for offloaded tracks
205      * TODO: when received, java AudioTrack must be released
206      */
207     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
208     /**
209      * Event id denotes when all the buffers queued in AF and HW are played
210      * back (after stop is called) for an offloaded track.
211      */
212     private static final int NATIVE_EVENT_STREAM_END = 7;
213 
214     private final static String TAG = "android.media.AudioTrack";
215 
216 
217     /** @hide */
218     @IntDef({
219         WRITE_BLOCKING,
220         WRITE_NON_BLOCKING
221     })
222     @Retention(RetentionPolicy.SOURCE)
223     public @interface WriteMode {}
224 
225     /**
226      * The write mode indicating the write operation will block until all data has been written,
227      * to be used as the actual value of the writeMode parameter in
228      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
229      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
230      * {@link #write(ByteBuffer, int, int, long)}.
231      */
232     public final static int WRITE_BLOCKING = 0;
233 
234     /**
235      * The write mode indicating the write operation will return immediately after
236      * queuing as much audio data for playback as possible without blocking,
237      * to be used as the actual value of the writeMode parameter in
238      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
239      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
240      * {@link #write(ByteBuffer, int, int, long)}.
241      */
242     public final static int WRITE_NON_BLOCKING = 1;
243 
244     /** @hide */
245     @IntDef({
246         PERFORMANCE_MODE_NONE,
247         PERFORMANCE_MODE_LOW_LATENCY,
248         PERFORMANCE_MODE_POWER_SAVING
249     })
250     @Retention(RetentionPolicy.SOURCE)
251     public @interface PerformanceMode {}
252 
253     /**
254      * Default performance mode for an {@link AudioTrack}.
255      */
256     public static final int PERFORMANCE_MODE_NONE = 0;
257 
258     /**
259      * Low latency performance mode for an {@link AudioTrack}.
260      * If the device supports it, this mode
261      * enables a lower latency path through to the audio output sink.
262      * Effects may no longer work with such an {@code AudioTrack} and
263      * the sample rate must match that of the output sink.
264      * <p>
265      * Applications should be aware that low latency requires careful
266      * buffer management, with smaller chunks of audio data written by each
267      * {@code write()} call.
268      * <p>
269      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
270      * {@code AudioTrack}'s actual buffer size may be too small.
271      * It is recommended that a fairly
272      * large buffer should be specified when the {@code AudioTrack} is created.
273      * Then the actual size can be reduced by calling
274      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
275      * by lowering it after each {@code write()} call until the audio glitches,
276      * which is detected by calling
277      * {@link #getUnderrunCount()}. Then the buffer size can be increased
278      * until there are no glitches.
279      * This tuning step should be done while playing silence.
280      * This technique provides a compromise between latency and glitch rate.
281      */
282     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
283 
284     /**
285      * Power saving performance mode for an {@link AudioTrack}.
286      * If the device supports it, this
287      * mode will enable a lower power path to the audio output sink.
288      * In addition, this lower power path typically will have
289      * deeper internal buffers and better underrun resistance,
290      * with a tradeoff of higher latency.
291      * <p>
292      * In this mode, applications should attempt to use a larger buffer size
293      * and deliver larger chunks of audio data per {@code write()} call.
294      * Use {@link #getBufferSizeInFrames()} to determine
295      * the actual buffer size of the {@code AudioTrack} as it may have increased
296      * to accommodate a deeper buffer.
297      */
298     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
299 
300     // keep in sync with system/media/audio/include/system/audio-base.h
301     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
302     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
303 
304     // Size of HW_AV_SYNC track AV header.
305     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
306 
307     //--------------------------------------------------------------------------
308     // Member variables
309     //--------------------
310     /**
311      * Indicates the state of the AudioTrack instance.
312      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
313      */
314     private int mState = STATE_UNINITIALIZED;
315     /**
316      * Indicates the play state of the AudioTrack instance.
317      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
318      */
319     private int mPlayState = PLAYSTATE_STOPPED;
320 
321     /**
322      * Indicates that we are expecting an end of stream callback following a call
323      * to setOffloadEndOfStream() in a gapless track transition context. The native track
324      * will be restarted automatically.
325      */
326     private boolean mOffloadEosPending = false;
327 
328     /**
329      * Lock to ensure mPlayState updates reflect the actual state of the object.
330      */
331     private final Object mPlayStateLock = new Object();
332     /**
333      * Sizes of the audio buffer.
334      * These values are set during construction and can be stale.
335      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
336      */
337     private int mNativeBufferSizeInBytes = 0;
338     private int mNativeBufferSizeInFrames = 0;
339     /**
340      * Handler for events coming from the native code.
341      */
342     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
343     /**
344      * Looper associated with the thread that creates the AudioTrack instance.
345      */
346     private final Looper mInitializationLooper;
347     /**
348      * The audio data source sampling rate in Hz.
349      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
350      */
351     private int mSampleRate; // initialized by all constructors via audioParamCheck()
352     /**
353      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
354      */
355     private int mChannelCount = 1;
356     /**
357      * The audio channel mask used for calling native AudioTrack
358      */
359     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
360 
361     /**
362      * The type of the audio stream to play. See
363      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
364      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
365      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
366      *   {@link AudioManager#STREAM_DTMF}.
367      */
368     @UnsupportedAppUsage
369     private int mStreamType = AudioManager.STREAM_MUSIC;
370 
371     /**
372      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
373      */
374     private int mDataLoadMode = MODE_STREAM;
375     /**
376      * The current channel position mask, as specified on AudioTrack creation.
377      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
378      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
379      */
380     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
381     /**
382      * The channel index mask if specified, otherwise 0.
383      */
384     private int mChannelIndexMask = 0;
385     /**
386      * The encoding of the audio samples.
387      * @see AudioFormat#ENCODING_PCM_8BIT
388      * @see AudioFormat#ENCODING_PCM_16BIT
389      * @see AudioFormat#ENCODING_PCM_FLOAT
390      */
391     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
392     /**
393      * The AudioAttributes used in configuration.
394      */
395     private AudioAttributes mConfiguredAudioAttributes;
396     /**
397      * Audio session ID
398      */
399     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
400     /**
401      * HW_AV_SYNC track AV Sync Header
402      */
403     private ByteBuffer mAvSyncHeader = null;
404     /**
405      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
406      */
407     private int mAvSyncBytesRemaining = 0;
408     /**
409      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
410      */
411     private int mOffset = 0;
412     /**
413      * Indicates whether the track is intended to play in offload mode.
414      */
415     private boolean mOffloaded = false;
416     /**
417      * When offloaded track: delay for decoder in frames
418      */
419     private int mOffloadDelayFrames = 0;
420     /**
421      * When offloaded track: padding for decoder in frames
422      */
423     private int mOffloadPaddingFrames = 0;
424 
425     //--------------------------------
426     // Used exclusively by native code
427     //--------------------
428     /**
429      * @hide
430      * Accessed by native methods: provides access to C++ AudioTrack object.
431      */
432     @SuppressWarnings("unused")
433     @UnsupportedAppUsage
434     protected long mNativeTrackInJavaObj;
435     /**
436      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
437      * the native AudioTrack object, but not stored in it).
438      */
439     @SuppressWarnings("unused")
440     @UnsupportedAppUsage
441     private long mJniData;
442 
443 
444     //--------------------------------------------------------------------------
445     // Constructor, Finalize
446     //--------------------
447     /**
448      * Class constructor.
449      * @param streamType the type of the audio stream. See
450      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
451      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
452      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
453      * @param sampleRateInHz the initial source sample rate expressed in Hz.
454      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
455      *   which is usually the sample rate of the sink.
456      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
457      * @param channelConfig describes the configuration of the audio channels.
458      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
459      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
460      * @param audioFormat the format in which the audio data is represented.
461      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
462      *   {@link AudioFormat#ENCODING_PCM_8BIT},
463      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
464      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
465      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
466      *   <p> If the track's creation mode is {@link #MODE_STATIC},
467      *   this is the maximum length sample, or audio clip, that can be played by this instance.
468      *   <p> If the track's creation mode is {@link #MODE_STREAM},
469      *   this should be the desired buffer size
470      *   for the <code>AudioTrack</code> to satisfy the application's
471      *   latency requirements.
472      *   If <code>bufferSizeInBytes</code> is less than the
473      *   minimum buffer size for the output sink, it is increased to the minimum
474      *   buffer size.
475      *   The method {@link #getBufferSizeInFrames()} returns the
476      *   actual size in frames of the buffer created, which
477      *   determines the minimum frequency to write
478      *   to the streaming <code>AudioTrack</code> to avoid underrun.
479      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
480      *   for an AudioTrack instance in streaming mode.
481      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
482      * @throws java.lang.IllegalArgumentException
483      * @deprecated use {@link Builder} or
484      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
485      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
486      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)487     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
488             int bufferSizeInBytes, int mode)
489     throws IllegalArgumentException {
490         this(streamType, sampleRateInHz, channelConfig, audioFormat,
491                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
492     }
493 
494     /**
495      * Class constructor with audio session. Use this constructor when the AudioTrack must be
496      * attached to a particular audio session. The primary use of the audio session ID is to
497      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
498      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
499      * and media players in the same session and not to the output mix.
500      * When an AudioTrack is created without specifying a session, it will create its own session
501      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
502      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
503      * session
504      * with all other media players or audio tracks in the same session, otherwise a new session
505      * will be created for this track if none is supplied.
506      * @param streamType the type of the audio stream. See
507      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
508      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
509      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
510      * @param sampleRateInHz the initial source sample rate expressed in Hz.
511      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
512      *   which is usually the sample rate of the sink.
513      * @param channelConfig describes the configuration of the audio channels.
514      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
515      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
516      * @param audioFormat the format in which the audio data is represented.
517      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
518      *   {@link AudioFormat#ENCODING_PCM_8BIT},
519      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
520      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
521      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
522      *   <p> If the track's creation mode is {@link #MODE_STATIC},
523      *   this is the maximum length sample, or audio clip, that can be played by this instance.
524      *   <p> If the track's creation mode is {@link #MODE_STREAM},
525      *   this should be the desired buffer size
526      *   for the <code>AudioTrack</code> to satisfy the application's
527      *   latency requirements.
528      *   If <code>bufferSizeInBytes</code> is less than the
529      *   minimum buffer size for the output sink, it is increased to the minimum
530      *   buffer size.
531      *   The method {@link #getBufferSizeInFrames()} returns the
532      *   actual size in frames of the buffer created, which
533      *   determines the minimum frequency to write
534      *   to the streaming <code>AudioTrack</code> to avoid underrun.
535      *   You can write data into this buffer in smaller chunks than this size.
536      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
537      *   for an AudioTrack instance in streaming mode.
538      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
539      * @param sessionId Id of audio session the AudioTrack must be attached to
540      * @throws java.lang.IllegalArgumentException
541      * @deprecated use {@link Builder} or
542      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
543      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
544      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)545     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
546             int bufferSizeInBytes, int mode, int sessionId)
547     throws IllegalArgumentException {
548         // mState already == STATE_UNINITIALIZED
549         this((new AudioAttributes.Builder())
550                     .setLegacyStreamType(streamType)
551                     .build(),
552                 (new AudioFormat.Builder())
553                     .setChannelMask(channelConfig)
554                     .setEncoding(audioFormat)
555                     .setSampleRate(sampleRateInHz)
556                     .build(),
557                 bufferSizeInBytes,
558                 mode, sessionId);
559         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
560     }
561 
562     /**
563      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
564      * @param attributes a non-null {@link AudioAttributes} instance.
565      * @param format a non-null {@link AudioFormat} instance describing the format of the data
566      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
567      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
568      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
569      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
570      *   <p> If the track's creation mode is {@link #MODE_STATIC},
571      *   this is the maximum length sample, or audio clip, that can be played by this instance.
572      *   <p> If the track's creation mode is {@link #MODE_STREAM},
573      *   this should be the desired buffer size
574      *   for the <code>AudioTrack</code> to satisfy the application's
575      *   latency requirements.
576      *   If <code>bufferSizeInBytes</code> is less than the
577      *   minimum buffer size for the output sink, it is increased to the minimum
578      *   buffer size.
579      *   The method {@link #getBufferSizeInFrames()} returns the
580      *   actual size in frames of the buffer created, which
581      *   determines the minimum frequency to write
582      *   to the streaming <code>AudioTrack</code> to avoid underrun.
583      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
584      *   for an AudioTrack instance in streaming mode.
585      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
586      * @param sessionId ID of audio session the AudioTrack must be attached to, or
587      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
588      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
589      *   construction.
590      * @throws IllegalArgumentException
591      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)592     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
593             int mode, int sessionId)
594                     throws IllegalArgumentException {
595         this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/);
596     }
597 
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload)598     private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
599             int mode, int sessionId, boolean offload)
600                     throws IllegalArgumentException {
601         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
602         // mState already == STATE_UNINITIALIZED
603 
604         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
605 
606         if (format == null) {
607             throw new IllegalArgumentException("Illegal null AudioFormat");
608         }
609 
610         // Check if we should enable deep buffer mode
611         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
612             mAttributes = new AudioAttributes.Builder(mAttributes)
613                 .replaceFlags((mAttributes.getAllFlags()
614                         | AudioAttributes.FLAG_DEEP_BUFFER)
615                         & ~AudioAttributes.FLAG_LOW_LATENCY)
616                 .build();
617         }
618 
619         // remember which looper is associated with the AudioTrack instantiation
620         Looper looper;
621         if ((looper = Looper.myLooper()) == null) {
622             looper = Looper.getMainLooper();
623         }
624 
625         int rate = format.getSampleRate();
626         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
627             rate = 0;
628         }
629 
630         int channelIndexMask = 0;
631         if ((format.getPropertySetMask()
632                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
633             channelIndexMask = format.getChannelIndexMask();
634         }
635         int channelMask = 0;
636         if ((format.getPropertySetMask()
637                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
638             channelMask = format.getChannelMask();
639         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
640             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
641                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
642         }
643         int encoding = AudioFormat.ENCODING_DEFAULT;
644         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
645             encoding = format.getEncoding();
646         }
647         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
648         mOffloaded = offload;
649         mStreamType = AudioSystem.STREAM_DEFAULT;
650 
651         audioBuffSizeCheck(bufferSizeInBytes);
652 
653         mInitializationLooper = looper;
654 
655         if (sessionId < 0) {
656             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
657         }
658 
659         int[] sampleRate = new int[] {mSampleRate};
660         int[] session = new int[1];
661         session[0] = sessionId;
662         // native initialization
663         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
664                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
665                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
666                 offload);
667         if (initResult != SUCCESS) {
668             loge("Error code "+initResult+" when initializing AudioTrack.");
669             return; // with mState == STATE_UNINITIALIZED
670         }
671 
672         mSampleRate = sampleRate[0];
673         mSessionId = session[0];
674 
675         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
676             int frameSizeInBytes;
677             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
678                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
679             } else {
680                 frameSizeInBytes = 1;
681             }
682             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
683         }
684 
685         if (mDataLoadMode == MODE_STATIC) {
686             mState = STATE_NO_STATIC_DATA;
687         } else {
688             mState = STATE_INITIALIZED;
689         }
690 
691         baseRegisterPlayer();
692     }
693 
694     /**
695      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
696      * the AudioTrackRoutingProxy subclass.
697      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
698      * (associated with an OpenSL ES player).
699      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
700      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
701      * it means that the OpenSL player interface hasn't been realized, so there is no native
702      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
703      * OpenSLES interface is realized.
704      */
AudioTrack(long nativeTrackInJavaObj)705     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
706         super(new AudioAttributes.Builder().build(),
707                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
708         // "final"s
709         mNativeTrackInJavaObj = 0;
710         mJniData = 0;
711 
712         // remember which looper is associated with the AudioTrack instantiation
713         Looper looper;
714         if ((looper = Looper.myLooper()) == null) {
715             looper = Looper.getMainLooper();
716         }
717         mInitializationLooper = looper;
718 
719         // other initialization...
720         if (nativeTrackInJavaObj != 0) {
721             baseRegisterPlayer();
722             deferred_connect(nativeTrackInJavaObj);
723         } else {
724             mState = STATE_UNINITIALIZED;
725         }
726     }
727 
728     /**
729      * @hide
730      */
731     @UnsupportedAppUsage
deferred_connect(long nativeTrackInJavaObj)732     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
733         if (mState != STATE_INITIALIZED) {
734             // Note that for this native_setup, we are providing an already created/initialized
735             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
736             int[] session = { 0 };
737             int[] rates = { 0 };
738             int initResult = native_setup(new WeakReference<AudioTrack>(this),
739                     null /*mAttributes - NA*/,
740                     rates /*sampleRate - NA*/,
741                     0 /*mChannelMask - NA*/,
742                     0 /*mChannelIndexMask - NA*/,
743                     0 /*mAudioFormat - NA*/,
744                     0 /*mNativeBufferSizeInBytes - NA*/,
745                     0 /*mDataLoadMode - NA*/,
746                     session,
747                     nativeTrackInJavaObj,
748                     false /*offload*/);
749             if (initResult != SUCCESS) {
750                 loge("Error code "+initResult+" when initializing AudioTrack.");
751                 return; // with mState == STATE_UNINITIALIZED
752             }
753 
754             mSessionId = session[0];
755 
756             mState = STATE_INITIALIZED;
757         }
758     }
759 
760     /**
761      * Builder class for {@link AudioTrack} objects.
762      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
763      * attributes and audio format parameters, you indicate which of those vary from the default
764      * behavior on the device.
765      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
766      * parameters, to be used by a new <code>AudioTrack</code> instance:
767      *
768      * <pre class="prettyprint">
769      * AudioTrack player = new AudioTrack.Builder()
770      *         .setAudioAttributes(new AudioAttributes.Builder()
771      *                  .setUsage(AudioAttributes.USAGE_ALARM)
772      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
773      *                  .build())
774      *         .setAudioFormat(new AudioFormat.Builder()
775      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
776      *                 .setSampleRate(44100)
777      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
778      *                 .build())
779      *         .setBufferSizeInBytes(minBuffSize)
780      *         .build();
781      * </pre>
782      * <p>
783      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
784      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
785      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
786      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
787      * {@link AudioFormat#ENCODING_PCM_16BIT}.
788      * The sample rate will depend on the device actually selected for playback and can be queried
789      * with {@link #getSampleRate()} method.
790      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
791      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
792      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
793      * <code>MODE_STREAM</code> will be used.
794      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
795      * be generated.
796      * <br>Offload is false by default.
797      */
798     public static class Builder {
799         private AudioAttributes mAttributes;
800         private AudioFormat mFormat;
801         private int mBufferSizeInBytes;
802         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
803         private int mMode = MODE_STREAM;
804         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
805         private boolean mOffload = false;
806 
807         /**
808          * Constructs a new Builder with the default values as described above.
809          */
Builder()810         public Builder() {
811         }
812 
813         /**
814          * Sets the {@link AudioAttributes}.
815          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
816          *     data to be played.
817          * @return the same Builder instance.
818          * @throws IllegalArgumentException
819          */
setAudioAttributes(@onNull AudioAttributes attributes)820         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
821                 throws IllegalArgumentException {
822             if (attributes == null) {
823                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
824             }
825             // keep reference, we only copy the data when building
826             mAttributes = attributes;
827             return this;
828         }
829 
830         /**
831          * Sets the format of the audio data to be played by the {@link AudioTrack}.
832          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
833          * as encoding, channel mask and sample rate.
834          * @param format a non-null {@link AudioFormat} instance.
835          * @return the same Builder instance.
836          * @throws IllegalArgumentException
837          */
setAudioFormat(@onNull AudioFormat format)838         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
839                 throws IllegalArgumentException {
840             if (format == null) {
841                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
842             }
843             // keep reference, we only copy the data when building
844             mFormat = format;
845             return this;
846         }
847 
848         /**
849          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
850          * If using the {@link AudioTrack} in streaming mode
851          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
852          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
853          * the estimated minimum buffer size for the creation of an AudioTrack instance
854          * in streaming mode.
855          * <br>If using the <code>AudioTrack</code> in static mode (see
856          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
857          * played by this instance.
858          * @param bufferSizeInBytes
859          * @return the same Builder instance.
860          * @throws IllegalArgumentException
861          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)862         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
863                 throws IllegalArgumentException {
864             if (bufferSizeInBytes <= 0) {
865                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
866             }
867             mBufferSizeInBytes = bufferSizeInBytes;
868             return this;
869         }
870 
871         /**
872          * Sets the mode under which buffers of audio data are transferred from the
873          * {@link AudioTrack} to the framework.
874          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
875          * @return the same Builder instance.
876          * @throws IllegalArgumentException
877          */
setTransferMode(@ransferMode int mode)878         public @NonNull Builder setTransferMode(@TransferMode int mode)
879                 throws IllegalArgumentException {
880             switch(mode) {
881                 case MODE_STREAM:
882                 case MODE_STATIC:
883                     mMode = mode;
884                     break;
885                 default:
886                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
887             }
888             return this;
889         }
890 
891         /**
892          * Sets the session ID the {@link AudioTrack} will be attached to.
893          * @param sessionId a strictly positive ID number retrieved from another
894          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
895          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
896          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
897          * @return the same Builder instance.
898          * @throws IllegalArgumentException
899          */
setSessionId(@ntRangefrom = 1) int sessionId)900         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
901                 throws IllegalArgumentException {
902             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
903                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
904             }
905             mSessionId = sessionId;
906             return this;
907         }
908 
909         /**
910          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
911          * may not be supported by the particular device, and the framework is free
912          * to ignore such request if it is incompatible with other requests or hardware.
913          *
914          * @param performanceMode one of
915          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
916          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
917          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
918          * @return the same Builder instance.
919          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
920          */
setPerformanceMode(@erformanceMode int performanceMode)921         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
922             switch (performanceMode) {
923                 case PERFORMANCE_MODE_NONE:
924                 case PERFORMANCE_MODE_LOW_LATENCY:
925                 case PERFORMANCE_MODE_POWER_SAVING:
926                     mPerformanceMode = performanceMode;
927                     break;
928                 default:
929                     throw new IllegalArgumentException(
930                             "Invalid performance mode " + performanceMode);
931             }
932             return this;
933         }
934 
935         /**
936          * Sets whether this track will play through the offloaded audio path.
937          * When set to true, at build time, the audio format will be checked against
938          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
939          * to verify the audio format used by this track is supported on the device's offload
940          * path (if any).
941          * <br>Offload is only supported for media audio streams, and therefore requires that
942          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
943          * @param offload true to require the offload path for playback.
944          * @return the same Builder instance.
945          */
setOffloadedPlayback(boolean offload)946         public @NonNull Builder setOffloadedPlayback(boolean offload) {
947             mOffload = offload;
948             return this;
949         }
950 
951         /**
952          * Builds an {@link AudioTrack} instance initialized with all the parameters set
953          * on this <code>Builder</code>.
954          * @return a new successfully initialized {@link AudioTrack} instance.
955          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
956          *     were incompatible, or if they are not supported by the device,
957          *     or if the device was not available.
958          */
build()959         public @NonNull AudioTrack build() throws UnsupportedOperationException {
960             if (mAttributes == null) {
961                 mAttributes = new AudioAttributes.Builder()
962                         .setUsage(AudioAttributes.USAGE_MEDIA)
963                         .build();
964             }
965             switch (mPerformanceMode) {
966             case PERFORMANCE_MODE_LOW_LATENCY:
967                 mAttributes = new AudioAttributes.Builder(mAttributes)
968                     .replaceFlags((mAttributes.getAllFlags()
969                             | AudioAttributes.FLAG_LOW_LATENCY)
970                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
971                     .build();
972                 break;
973             case PERFORMANCE_MODE_NONE:
974                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
975                     break; // do not enable deep buffer mode.
976                 }
977                 // permitted to fall through to enable deep buffer
978             case PERFORMANCE_MODE_POWER_SAVING:
979                 mAttributes = new AudioAttributes.Builder(mAttributes)
980                 .replaceFlags((mAttributes.getAllFlags()
981                         | AudioAttributes.FLAG_DEEP_BUFFER)
982                         & ~AudioAttributes.FLAG_LOW_LATENCY)
983                 .build();
984                 break;
985             }
986 
987             if (mFormat == null) {
988                 mFormat = new AudioFormat.Builder()
989                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
990                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
991                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
992                         .build();
993             }
994 
995             if (mOffload) {
996                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
997                     throw new UnsupportedOperationException(
998                             "Offload and low latency modes are incompatible");
999                 }
1000                 if (!AudioSystem.isOffloadSupported(mFormat, mAttributes)) {
1001                     throw new UnsupportedOperationException(
1002                             "Cannot create AudioTrack, offload format / attributes not supported");
1003                 }
1004             }
1005 
1006             try {
1007                 // If the buffer size is not specified in streaming mode,
1008                 // use a single frame for the buffer size and let the
1009                 // native code figure out the minimum buffer size.
1010                 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1011                     mBufferSizeInBytes = mFormat.getChannelCount()
1012                             * mFormat.getBytesPerSample(mFormat.getEncoding());
1013                 }
1014                 final AudioTrack track = new AudioTrack(
1015                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload);
1016                 if (track.getState() == STATE_UNINITIALIZED) {
1017                     // release is not necessary
1018                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1019                 }
1020                 return track;
1021             } catch (IllegalArgumentException e) {
1022                 throw new UnsupportedOperationException(e.getMessage());
1023             }
1024         }
1025     }
1026 
1027     /**
1028      * Configures the delay and padding values for the current compressed stream playing
1029      * in offload mode.
1030      * This can only be used on a track successfully initialized with
1031      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1032      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1033      * stream corresponds to 200 decoded interleaved PCM samples.
1034      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1035      *     of 0 indicates no delay is to be applied.
1036      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1037      *     of 0 indicates no padding is to be applied.
1038      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1039     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1040             @IntRange(from = 0) int paddingInFrames) {
1041         if (paddingInFrames < 0) {
1042             throw new IllegalArgumentException("Illegal negative padding");
1043         }
1044         if (delayInFrames < 0) {
1045             throw new IllegalArgumentException("Illegal negative delay");
1046         }
1047         if (!mOffloaded) {
1048             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1049         }
1050         if (mState == STATE_UNINITIALIZED) {
1051             throw new IllegalStateException("Uninitialized track");
1052         }
1053         mOffloadDelayFrames = delayInFrames;
1054         mOffloadPaddingFrames = paddingInFrames;
1055         native_set_delay_padding(delayInFrames, paddingInFrames);
1056     }
1057 
1058     /**
1059      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1060      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1061      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1062      * This value can only be queried on a track successfully initialized with
1063      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1064      * @return decoder delay expressed in frames.
1065      */
getOffloadDelay()1066     public @IntRange(from = 0) int getOffloadDelay() {
1067         if (!mOffloaded) {
1068             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1069         }
1070         if (mState == STATE_UNINITIALIZED) {
1071             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1072         }
1073         return mOffloadDelayFrames;
1074     }
1075 
1076     /**
1077      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1078      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1079      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1080      * This value can only be queried on a track successfully initialized with
1081      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1082      * @return decoder padding expressed in frames.
1083      */
getOffloadPadding()1084     public @IntRange(from = 0) int getOffloadPadding() {
1085         if (!mOffloaded) {
1086             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1087         }
1088         if (mState == STATE_UNINITIALIZED) {
1089             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1090         }
1091         return mOffloadPaddingFrames;
1092     }
1093 
1094     /**
1095      * Declares that the last write() operation on this track provided the last buffer of this
1096      * stream.
1097      * After the end of stream, previously set padding and delay values are ignored.
1098      * Can only be called only if the AudioTrack is opened in offload mode
1099      * {@see Builder#setOffloadedPlayback(boolean)}.
1100      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1101      * {@see #getPlayState()}.
1102      * Use this method in the same thread as any write() operation.
1103      */
setOffloadEndOfStream()1104     public void setOffloadEndOfStream() {
1105         if (!mOffloaded) {
1106             throw new IllegalStateException("EOS not supported on non-offloaded track");
1107         }
1108         if (mState == STATE_UNINITIALIZED) {
1109             throw new IllegalStateException("Uninitialized track");
1110         }
1111         if (mPlayState != PLAYSTATE_PLAYING) {
1112             throw new IllegalStateException("EOS not supported if not playing");
1113         }
1114         synchronized (mStreamEventCbLock) {
1115             if (mStreamEventCbInfoList.size() == 0) {
1116                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1117             }
1118         }
1119 
1120         synchronized (mPlayStateLock) {
1121             native_stop();
1122             mOffloadEosPending = true;
1123             mPlayState = PLAYSTATE_STOPPING;
1124         }
1125     }
1126 
1127     /**
1128      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1129      * to {@code true}.
1130      * @return true if the track is using offloaded playback.
1131      */
isOffloadedPlayback()1132     public boolean isOffloadedPlayback() {
1133         return mOffloaded;
1134     }
1135 
1136     /**
1137      * Returns whether direct playback of an audio format with the provided attributes is
1138      * currently supported on the system.
1139      * <p>Direct playback means that the audio stream is not resampled or downmixed
1140      * by the framework. Checking for direct support can help the app select the representation
1141      * of audio content that most closely matches the capabilities of the device and peripherials
1142      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1143      * or mixed with other streams, if needed.
1144      * <p>Also note that this query only provides information about the support of an audio format.
1145      * It does not indicate whether the resources necessary for the playback are available
1146      * at that instant.
1147      * @param format a non-null {@link AudioFormat} instance describing the format of
1148      *   the audio data.
1149      * @param attributes a non-null {@link AudioAttributes} instance.
1150      * @return true if the given audio format can be played directly.
1151      */
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1152     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1153             @NonNull AudioAttributes attributes) {
1154         if (format == null) {
1155             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1156         }
1157         if (attributes == null) {
1158             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1159         }
1160         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1161                 format.getChannelMask(), format.getChannelIndexMask(),
1162                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1163     }
1164 
1165     // mask of all the positional channels supported, however the allowed combinations
1166     // are further restricted by the matching left/right rule and
1167     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1168     private static final int SUPPORTED_OUT_CHANNELS =
1169             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1170             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1171             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1172             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1173             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1174             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1175             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1176             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1177             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
1178 
1179     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1180     // power saving to be automatically enabled for an AudioTrack. Returns false if
1181     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1182     private static boolean shouldEnablePowerSaving(
1183             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1184             int bufferSizeInBytes, int mode) {
1185         // If no attributes, OK
1186         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1187         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1188         // FLAG_DEEP_BUFFER because if set the request is explicit and
1189         // shouldEnablePowerSaving() should return false.
1190         final int flags = attributes.getAllFlags()
1191                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1192                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1193 
1194         if (attributes != null &&
1195                 (flags != 0  // cannot have any special flags
1196                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1197                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1198                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1199                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1200             return false;
1201         }
1202 
1203         // Format must be fully specified and be linear pcm
1204         if (format == null
1205                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1206                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1207                 || !AudioFormat.isValidEncoding(format.getEncoding())
1208                 || format.getChannelCount() < 1) {
1209             return false;
1210         }
1211 
1212         // Mode must be streaming
1213         if (mode != MODE_STREAM) {
1214             return false;
1215         }
1216 
1217         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1218         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1219         if (bufferSizeInBytes != 0) {
1220             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1221             final int MILLIS_PER_SECOND = 1000;
1222             final long bufferTargetSize =
1223                     BUFFER_TARGET_MODE_STREAM_MS
1224                     * format.getChannelCount()
1225                     * format.getBytesPerSample(format.getEncoding())
1226                     * format.getSampleRate()
1227                     / MILLIS_PER_SECOND;
1228             if (bufferSizeInBytes < bufferTargetSize) {
1229                 return false;
1230             }
1231         }
1232 
1233         return true;
1234     }
1235 
1236     // Convenience method for the constructor's parameter checks.
1237     // This is where constructor IllegalArgumentException-s are thrown
1238     // postconditions:
1239     //    mChannelCount is valid
1240     //    mChannelMask is valid
1241     //    mAudioFormat is valid
1242     //    mSampleRate is valid
1243     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1244     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1245                                  int audioFormat, int mode) {
1246         //--------------
1247         // sample rate, note these values are subject to change
1248         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1249                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1250                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1251             throw new IllegalArgumentException(sampleRateInHz
1252                     + "Hz is not a supported sample rate.");
1253         }
1254         mSampleRate = sampleRateInHz;
1255 
1256         // IEC61937 is based on stereo. We could coerce it to stereo.
1257         // But the application needs to know the stream is stereo so that
1258         // it is encoded and played correctly. So better to just reject it.
1259         if (audioFormat == AudioFormat.ENCODING_IEC61937
1260                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) {
1261             throw new IllegalArgumentException(
1262                     "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO");
1263         }
1264 
1265         //--------------
1266         // channel config
1267         mChannelConfiguration = channelConfig;
1268 
1269         switch (channelConfig) {
1270         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1271         case AudioFormat.CHANNEL_OUT_MONO:
1272         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1273             mChannelCount = 1;
1274             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1275             break;
1276         case AudioFormat.CHANNEL_OUT_STEREO:
1277         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1278             mChannelCount = 2;
1279             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1280             break;
1281         default:
1282             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1283                 mChannelCount = 0;
1284                 break; // channel index configuration only
1285             }
1286             if (!isMultichannelConfigSupported(channelConfig)) {
1287                 // input channel configuration features unsupported channels
1288                 throw new IllegalArgumentException("Unsupported channel configuration.");
1289             }
1290             mChannelMask = channelConfig;
1291             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1292         }
1293         // check the channel index configuration (if present)
1294         mChannelIndexMask = channelIndexMask;
1295         if (mChannelIndexMask != 0) {
1296             // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
1297             final int indexMask = (1 << AudioSystem.OUT_CHANNEL_COUNT_MAX) - 1;
1298             if ((channelIndexMask & ~indexMask) != 0) {
1299                 throw new IllegalArgumentException("Unsupported channel index configuration "
1300                         + channelIndexMask);
1301             }
1302             int channelIndexCount = Integer.bitCount(channelIndexMask);
1303             if (mChannelCount == 0) {
1304                  mChannelCount = channelIndexCount;
1305             } else if (mChannelCount != channelIndexCount) {
1306                 throw new IllegalArgumentException("Channel count must match");
1307             }
1308         }
1309 
1310         //--------------
1311         // audio format
1312         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1313             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1314         }
1315 
1316         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1317             throw new IllegalArgumentException("Unsupported audio encoding.");
1318         }
1319         mAudioFormat = audioFormat;
1320 
1321         //--------------
1322         // audio load mode
1323         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1324                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1325             throw new IllegalArgumentException("Invalid mode.");
1326         }
1327         mDataLoadMode = mode;
1328     }
1329 
1330     /**
1331      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1332      * @param channelConfig the mask to validate
1333      * @return false if the AudioTrack can't be used with such a mask
1334      */
isMultichannelConfigSupported(int channelConfig)1335     private static boolean isMultichannelConfigSupported(int channelConfig) {
1336         // check for unsupported channels
1337         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1338             loge("Channel configuration features unsupported channels");
1339             return false;
1340         }
1341         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1342         if (channelCount > AudioSystem.OUT_CHANNEL_COUNT_MAX) {
1343             loge("Channel configuration contains too many channels " +
1344                     channelCount + ">" + AudioSystem.OUT_CHANNEL_COUNT_MAX);
1345             return false;
1346         }
1347         // check for unsupported multichannel combinations:
1348         // - FL/FR must be present
1349         // - L/R channels must be paired (e.g. no single L channel)
1350         final int frontPair =
1351                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1352         if ((channelConfig & frontPair) != frontPair) {
1353                 loge("Front channels must be present in multichannel configurations");
1354                 return false;
1355         }
1356         final int backPair =
1357                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
1358         if ((channelConfig & backPair) != 0) {
1359             if ((channelConfig & backPair) != backPair) {
1360                 loge("Rear channels can't be used independently");
1361                 return false;
1362             }
1363         }
1364         final int sidePair =
1365                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
1366         if ((channelConfig & sidePair) != 0
1367                 && (channelConfig & sidePair) != sidePair) {
1368             loge("Side channels can't be used independently");
1369             return false;
1370         }
1371         return true;
1372     }
1373 
1374 
1375     // Convenience method for the constructor's audio buffer size check.
1376     // preconditions:
1377     //    mChannelCount is valid
1378     //    mAudioFormat is valid
1379     // postcondition:
1380     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1381     private void audioBuffSizeCheck(int audioBufferSize) {
1382         // NB: this section is only valid with PCM or IEC61937 data.
1383         //     To update when supporting compressed formats
1384         int frameSizeInBytes;
1385         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1386             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1387         } else {
1388             frameSizeInBytes = 1;
1389         }
1390         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1391             throw new IllegalArgumentException("Invalid audio buffer size.");
1392         }
1393 
1394         mNativeBufferSizeInBytes = audioBufferSize;
1395         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1396     }
1397 
1398 
1399     /**
1400      * Releases the native AudioTrack resources.
1401      */
release()1402     public void release() {
1403         synchronized (mStreamEventCbLock){
1404             endStreamEventHandling();
1405         }
1406         // even though native_release() stops the native AudioTrack, we need to stop
1407         // AudioTrack subclasses too.
1408         try {
1409             stop();
1410         } catch(IllegalStateException ise) {
1411             // don't raise an exception, we're releasing the resources.
1412         }
1413         baseRelease();
1414         native_release();
1415         synchronized (mPlayStateLock) {
1416             mState = STATE_UNINITIALIZED;
1417             mPlayState = PLAYSTATE_STOPPED;
1418             mPlayStateLock.notify();
1419         }
1420     }
1421 
1422     @Override
finalize()1423     protected void finalize() {
1424         baseRelease();
1425         native_finalize();
1426     }
1427 
1428     //--------------------------------------------------------------------------
1429     // Getters
1430     //--------------------
1431     /**
1432      * Returns the minimum gain value, which is the constant 0.0.
1433      * Gain values less than 0.0 will be clamped to 0.0.
1434      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1435      * @return the minimum value, which is the constant 0.0.
1436      */
getMinVolume()1437     static public float getMinVolume() {
1438         return GAIN_MIN;
1439     }
1440 
1441     /**
1442      * Returns the maximum gain value, which is greater than or equal to 1.0.
1443      * Gain values greater than the maximum will be clamped to the maximum.
1444      * <p>The word "volume" in the API name is historical; this is actually a gain.
1445      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
1446      * corresponds to a gain of 0 dB (sample values left unmodified).
1447      * @return the maximum value, which is greater than or equal to 1.0.
1448      */
getMaxVolume()1449     static public float getMaxVolume() {
1450         return GAIN_MAX;
1451     }
1452 
1453     /**
1454      * Returns the configured audio source sample rate in Hz.
1455      * The initial source sample rate depends on the constructor parameters,
1456      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1457      * If the constructor had a specific sample rate, then the initial sink sample rate is that
1458      * value.
1459      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1460      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1461      */
getSampleRate()1462     public int getSampleRate() {
1463         return mSampleRate;
1464     }
1465 
1466     /**
1467      * Returns the current playback sample rate rate in Hz.
1468      */
getPlaybackRate()1469     public int getPlaybackRate() {
1470         return native_get_playback_rate();
1471     }
1472 
1473     /**
1474      * Returns the current playback parameters.
1475      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1476      * @return current {@link PlaybackParams}.
1477      * @throws IllegalStateException if track is not initialized.
1478      */
getPlaybackParams()1479     public @NonNull PlaybackParams getPlaybackParams() {
1480         return native_get_playback_params();
1481     }
1482 
1483     /**
1484      * Returns the {@link AudioAttributes} used in configuration.
1485      * If a {@code streamType} is used instead of an {@code AudioAttributes}
1486      * to configure the AudioTrack
1487      * (the use of {@code streamType} for configuration is deprecated),
1488      * then the {@code AudioAttributes}
1489      * equivalent to the {@code streamType} is returned.
1490      * @return The {@code AudioAttributes} used to configure the AudioTrack.
1491      * @throws IllegalStateException If the track is not initialized.
1492      */
getAudioAttributes()1493     public @NonNull AudioAttributes getAudioAttributes() {
1494         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
1495             throw new IllegalStateException("track not initialized");
1496         }
1497         return mConfiguredAudioAttributes;
1498     }
1499 
1500     /**
1501      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1502      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1503      */
getAudioFormat()1504     public int getAudioFormat() {
1505         return mAudioFormat;
1506     }
1507 
1508     /**
1509      * Returns the volume stream type of this AudioTrack.
1510      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1511      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1512      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1513      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
1514      * {@link AudioManager#STREAM_ACCESSIBILITY}.
1515      */
getStreamType()1516     public int getStreamType() {
1517         return mStreamType;
1518     }
1519 
1520     /**
1521      * Returns the configured channel position mask.
1522      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1523      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1524      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1525      * a channel index mask was used. Consider
1526      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1527      * which contains both the channel position mask and the channel index mask.
1528      */
getChannelConfiguration()1529     public int getChannelConfiguration() {
1530         return mChannelConfiguration;
1531     }
1532 
1533     /**
1534      * Returns the configured <code>AudioTrack</code> format.
1535      * @return an {@link AudioFormat} containing the
1536      * <code>AudioTrack</code> parameters at the time of configuration.
1537      */
getFormat()1538     public @NonNull AudioFormat getFormat() {
1539         AudioFormat.Builder builder = new AudioFormat.Builder()
1540             .setSampleRate(mSampleRate)
1541             .setEncoding(mAudioFormat);
1542         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
1543             builder.setChannelMask(mChannelConfiguration);
1544         }
1545         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
1546             builder.setChannelIndexMask(mChannelIndexMask);
1547         }
1548         return builder.build();
1549     }
1550 
1551     /**
1552      * Returns the configured number of channels.
1553      */
getChannelCount()1554     public int getChannelCount() {
1555         return mChannelCount;
1556     }
1557 
1558     /**
1559      * Returns the state of the AudioTrack instance. This is useful after the
1560      * AudioTrack instance has been created to check if it was initialized
1561      * properly. This ensures that the appropriate resources have been acquired.
1562      * @see #STATE_UNINITIALIZED
1563      * @see #STATE_INITIALIZED
1564      * @see #STATE_NO_STATIC_DATA
1565      */
getState()1566     public int getState() {
1567         return mState;
1568     }
1569 
1570     /**
1571      * Returns the playback state of the AudioTrack instance.
1572      * @see #PLAYSTATE_STOPPED
1573      * @see #PLAYSTATE_PAUSED
1574      * @see #PLAYSTATE_PLAYING
1575      */
getPlayState()1576     public int getPlayState() {
1577         synchronized (mPlayStateLock) {
1578             switch (mPlayState) {
1579                 case PLAYSTATE_STOPPING:
1580                     return PLAYSTATE_PLAYING;
1581                 case PLAYSTATE_PAUSED_STOPPING:
1582                     return PLAYSTATE_PAUSED;
1583                 default:
1584                     return mPlayState;
1585             }
1586         }
1587     }
1588 
1589 
1590     /**
1591      * Returns the effective size of the <code>AudioTrack</code> buffer
1592      * that the application writes to.
1593      * <p> This will be less than or equal to the result of
1594      * {@link #getBufferCapacityInFrames()}.
1595      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
1596      * <p> If the track is subsequently routed to a different output sink, the buffer
1597      * size and capacity may enlarge to accommodate.
1598      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1599      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1600      * the size of the <code>AudioTrack</code> buffer in bytes.
1601      * <p> See also {@link AudioManager#getProperty(String)} for key
1602      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1603      * @return current size in frames of the <code>AudioTrack</code> buffer.
1604      * @throws IllegalStateException if track is not initialized.
1605      */
getBufferSizeInFrames()1606     public @IntRange (from = 0) int getBufferSizeInFrames() {
1607         return native_get_buffer_size_frames();
1608     }
1609 
1610     /**
1611      * Limits the effective size of the <code>AudioTrack</code> buffer
1612      * that the application writes to.
1613      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
1614      * If a blocking write is used then the write will block until the data
1615      * can fit within this limit.
1616      * <p>Changing this limit modifies the latency associated with
1617      * the buffer for this track. A smaller size will give lower latency
1618      * but there may be more glitches due to buffer underruns.
1619      * <p>The actual size used may not be equal to this requested size.
1620      * It will be limited to a valid range with a maximum of
1621      * {@link #getBufferCapacityInFrames()}.
1622      * It may also be adjusted slightly for internal reasons.
1623      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
1624      * will be returned.
1625      * <p>This method is only supported for PCM audio.
1626      * It is not supported for compressed audio tracks.
1627      *
1628      * @param bufferSizeInFrames requested buffer size in frames
1629      * @return the actual buffer size in frames or an error code,
1630      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1631      * @throws IllegalStateException if track is not initialized.
1632      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)1633     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
1634         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
1635             return ERROR_INVALID_OPERATION;
1636         }
1637         if (bufferSizeInFrames < 0) {
1638             return ERROR_BAD_VALUE;
1639         }
1640         return native_set_buffer_size_frames(bufferSizeInFrames);
1641     }
1642 
1643     /**
1644      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
1645      *  <p> If the track's creation mode is {@link #MODE_STATIC},
1646      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
1647      *  A static track's frame count will not change.
1648      *  <p> If the track's creation mode is {@link #MODE_STREAM},
1649      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
1650      *  For streaming tracks, this value may be rounded up to a larger value if needed by
1651      *  the target output sink, and
1652      *  if the track is subsequently routed to a different output sink, the
1653      *  frame count may enlarge to accommodate.
1654      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1655      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1656      *  the size of the <code>AudioTrack</code> buffer in bytes.
1657      *  <p> See also {@link AudioManager#getProperty(String)} for key
1658      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1659      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
1660      *  @throws IllegalStateException if track is not initialized.
1661      */
getBufferCapacityInFrames()1662     public @IntRange (from = 0) int getBufferCapacityInFrames() {
1663         return native_get_buffer_capacity_frames();
1664     }
1665 
1666     /**
1667      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
1668      *  @return current size in frames of the <code>AudioTrack</code> buffer.
1669      *  @throws IllegalStateException
1670      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
1671      */
1672     @Deprecated
getNativeFrameCount()1673     protected int getNativeFrameCount() {
1674         return native_get_buffer_capacity_frames();
1675     }
1676 
1677     /**
1678      * Returns marker position expressed in frames.
1679      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
1680      * or zero if marker is disabled.
1681      */
getNotificationMarkerPosition()1682     public int getNotificationMarkerPosition() {
1683         return native_get_marker_pos();
1684     }
1685 
1686     /**
1687      * Returns the notification update period expressed in frames.
1688      * Zero means that no position update notifications are being delivered.
1689      */
getPositionNotificationPeriod()1690     public int getPositionNotificationPeriod() {
1691         return native_get_pos_update_period();
1692     }
1693 
1694     /**
1695      * Returns the playback head position expressed in frames.
1696      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
1697      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
1698      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
1699      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
1700      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
1701      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
1702      * the total number of frames played since reset,
1703      * <i>not</i> the current offset within the buffer.
1704      */
getPlaybackHeadPosition()1705     public int getPlaybackHeadPosition() {
1706         return native_get_position();
1707     }
1708 
1709     /**
1710      * Returns this track's estimated latency in milliseconds. This includes the latency due
1711      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
1712      *
1713      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
1714      * a better solution.
1715      * @hide
1716      */
1717     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()1718     public int getLatency() {
1719         return native_get_latency();
1720     }
1721 
1722     /**
1723      * Returns the number of underrun occurrences in the application-level write buffer
1724      * since the AudioTrack was created.
1725      * An underrun occurs if the application does not write audio
1726      * data quickly enough, causing the buffer to underflow
1727      * and a potential audio glitch or pop.
1728      * <p>
1729      * Underruns are less likely when buffer sizes are large.
1730      * It may be possible to eliminate underruns by recreating the AudioTrack with
1731      * a larger buffer.
1732      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
1733      * effective size of the buffer.
1734      */
getUnderrunCount()1735     public int getUnderrunCount() {
1736         return native_get_underrun_count();
1737     }
1738 
1739     /**
1740      * Returns the current performance mode of the {@link AudioTrack}.
1741      *
1742      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
1743      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1744      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1745      * Use {@link AudioTrack.Builder#setPerformanceMode}
1746      * in the {@link AudioTrack.Builder} to enable a performance mode.
1747      * @throws IllegalStateException if track is not initialized.
1748      */
getPerformanceMode()1749     public @PerformanceMode int getPerformanceMode() {
1750         final int flags = native_get_flags();
1751         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
1752             return PERFORMANCE_MODE_LOW_LATENCY;
1753         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
1754             return PERFORMANCE_MODE_POWER_SAVING;
1755         } else {
1756             return PERFORMANCE_MODE_NONE;
1757         }
1758     }
1759 
1760     /**
1761      *  Returns the output sample rate in Hz for the specified stream type.
1762      */
getNativeOutputSampleRate(int streamType)1763     static public int getNativeOutputSampleRate(int streamType) {
1764         return native_get_output_sample_rate(streamType);
1765     }
1766 
1767     /**
1768      * Returns the estimated minimum buffer size required for an AudioTrack
1769      * object to be created in the {@link #MODE_STREAM} mode.
1770      * The size is an estimate because it does not consider either the route or the sink,
1771      * since neither is known yet.  Note that this size doesn't
1772      * guarantee a smooth playback under load, and higher values should be chosen according to
1773      * the expected frequency at which the buffer will be refilled with additional data to play.
1774      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
1775      * to a higher value than the initial source sample rate, be sure to configure the buffer size
1776      * based on the highest planned sample rate.
1777      * @param sampleRateInHz the source sample rate expressed in Hz.
1778      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
1779      * @param channelConfig describes the configuration of the audio channels.
1780      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
1781      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
1782      * @param audioFormat the format in which the audio data is represented.
1783      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
1784      *   {@link AudioFormat#ENCODING_PCM_8BIT},
1785      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1786      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
1787      *   or {@link #ERROR} if unable to query for output properties,
1788      *   or the minimum buffer size expressed in bytes.
1789      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)1790     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
1791         int channelCount = 0;
1792         switch(channelConfig) {
1793         case AudioFormat.CHANNEL_OUT_MONO:
1794         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1795             channelCount = 1;
1796             break;
1797         case AudioFormat.CHANNEL_OUT_STEREO:
1798         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1799             channelCount = 2;
1800             break;
1801         default:
1802             if (!isMultichannelConfigSupported(channelConfig)) {
1803                 loge("getMinBufferSize(): Invalid channel configuration.");
1804                 return ERROR_BAD_VALUE;
1805             } else {
1806                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1807             }
1808         }
1809 
1810         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1811             loge("getMinBufferSize(): Invalid audio format.");
1812             return ERROR_BAD_VALUE;
1813         }
1814 
1815         // sample rate, note these values are subject to change
1816         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
1817         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
1818                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
1819             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
1820             return ERROR_BAD_VALUE;
1821         }
1822 
1823         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
1824         if (size <= 0) {
1825             loge("getMinBufferSize(): error querying hardware");
1826             return ERROR;
1827         }
1828         else {
1829             return size;
1830         }
1831     }
1832 
1833     /**
1834      * Returns the audio session ID.
1835      *
1836      * @return the ID of the audio session this AudioTrack belongs to.
1837      */
getAudioSessionId()1838     public int getAudioSessionId() {
1839         return mSessionId;
1840     }
1841 
1842    /**
1843     * Poll for a timestamp on demand.
1844     * <p>
1845     * If you need to track timestamps during initial warmup or after a routing or mode change,
1846     * you should request a new timestamp periodically until the reported timestamps
1847     * show that the frame position is advancing, or until it becomes clear that
1848     * timestamps are unavailable for this route.
1849     * <p>
1850     * After the clock is advancing at a stable rate,
1851     * query for a new timestamp approximately once every 10 seconds to once per minute.
1852     * Calling this method more often is inefficient.
1853     * It is also counter-productive to call this method more often than recommended,
1854     * because the short-term differences between successive timestamp reports are not meaningful.
1855     * If you need a high-resolution mapping between frame position and presentation time,
1856     * consider implementing that at application level, based on low-resolution timestamps.
1857     * <p>
1858     * The audio data at the returned position may either already have been
1859     * presented, or may have not yet been presented but is committed to be presented.
1860     * It is not possible to request the time corresponding to a particular position,
1861     * or to request the (fractional) position corresponding to a particular time.
1862     * If you need such features, consider implementing them at application level.
1863     *
1864     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1865     *        and owned by caller.
1866     * @return true if a timestamp is available, or false if no timestamp is available.
1867     *         If a timestamp is available,
1868     *         the AudioTimestamp instance is filled in with a position in frame units, together
1869     *         with the estimated time when that frame was presented or is committed to
1870     *         be presented.
1871     *         In the case that no timestamp is available, any supplied instance is left unaltered.
1872     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
1873     *         or during and immediately after a route change.
1874     *         A timestamp is permanently unavailable for a given route if the route does not support
1875     *         timestamps.  In this case, the approximate frame position can be obtained
1876     *         using {@link #getPlaybackHeadPosition}.
1877     *         However, it may be useful to continue to query for
1878     *         timestamps occasionally, to recover after a route change.
1879     */
1880     // Add this text when the "on new timestamp" API is added:
1881     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)1882     public boolean getTimestamp(AudioTimestamp timestamp)
1883     {
1884         if (timestamp == null) {
1885             throw new IllegalArgumentException();
1886         }
1887         // It's unfortunate, but we have to either create garbage every time or use synchronized
1888         long[] longArray = new long[2];
1889         int ret = native_get_timestamp(longArray);
1890         if (ret != SUCCESS) {
1891             return false;
1892         }
1893         timestamp.framePosition = longArray[0];
1894         timestamp.nanoTime = longArray[1];
1895         return true;
1896     }
1897 
1898     /**
1899      * Poll for a timestamp on demand.
1900      * <p>
1901      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
1902      *
1903      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1904      *        and owned by caller.
1905      * @return {@link #SUCCESS} if a timestamp is available
1906      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
1907      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
1908      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
1909      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
1910      *         for the timestamp.
1911      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1912      *         needs to be recreated.
1913      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
1914      *         timestamps. In this case, the approximate frame position can be obtained
1915      *         using {@link #getPlaybackHeadPosition}.
1916      *
1917      *         The AudioTimestamp instance is filled in with a position in frame units, together
1918      *         with the estimated time when that frame was presented or is committed to
1919      *         be presented.
1920      * @hide
1921      */
1922      // Add this text when the "on new timestamp" API is added:
1923      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)1924      public int getTimestampWithStatus(AudioTimestamp timestamp)
1925      {
1926          if (timestamp == null) {
1927              throw new IllegalArgumentException();
1928          }
1929          // It's unfortunate, but we have to either create garbage every time or use synchronized
1930          long[] longArray = new long[2];
1931          int ret = native_get_timestamp(longArray);
1932          timestamp.framePosition = longArray[0];
1933          timestamp.nanoTime = longArray[1];
1934          return ret;
1935      }
1936 
1937     /**
1938      *  Return Metrics data about the current AudioTrack instance.
1939      *
1940      * @return a {@link PersistableBundle} containing the set of attributes and values
1941      * available for the media being handled by this instance of AudioTrack
1942      * The attributes are descibed in {@link MetricsConstants}.
1943      *
1944      * Additional vendor-specific fields may also be present in
1945      * the return value.
1946      */
getMetrics()1947     public PersistableBundle getMetrics() {
1948         PersistableBundle bundle = native_getMetrics();
1949         return bundle;
1950     }
1951 
native_getMetrics()1952     private native PersistableBundle native_getMetrics();
1953 
1954     //--------------------------------------------------------------------------
1955     // Initialization / configuration
1956     //--------------------
1957     /**
1958      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1959      * for each periodic playback head position update.
1960      * Notifications will be received in the same thread as the one in which the AudioTrack
1961      * instance was created.
1962      * @param listener
1963      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)1964     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
1965         setPlaybackPositionUpdateListener(listener, null);
1966     }
1967 
1968     /**
1969      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1970      * for each periodic playback head position update.
1971      * Use this method to receive AudioTrack events in the Handler associated with another
1972      * thread than the one in which you created the AudioTrack instance.
1973      * @param listener
1974      * @param handler the Handler that will receive the event notification messages.
1975      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)1976     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
1977                                                     Handler handler) {
1978         if (listener != null) {
1979             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
1980         } else {
1981             mEventHandlerDelegate = null;
1982         }
1983     }
1984 
1985 
clampGainOrLevel(float gainOrLevel)1986     private static float clampGainOrLevel(float gainOrLevel) {
1987         if (Float.isNaN(gainOrLevel)) {
1988             throw new IllegalArgumentException();
1989         }
1990         if (gainOrLevel < GAIN_MIN) {
1991             gainOrLevel = GAIN_MIN;
1992         } else if (gainOrLevel > GAIN_MAX) {
1993             gainOrLevel = GAIN_MAX;
1994         }
1995         return gainOrLevel;
1996     }
1997 
1998 
1999      /**
2000      * Sets the specified left and right output gain values on the AudioTrack.
2001      * <p>Gain values are clamped to the closed interval [0.0, max] where
2002      * max is the value of {@link #getMaxVolume}.
2003      * A value of 0.0 results in zero gain (silence), and
2004      * a value of 1.0 means unity gain (signal unchanged).
2005      * The default value is 1.0 meaning unity gain.
2006      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2007      * @param leftGain output gain for the left channel.
2008      * @param rightGain output gain for the right channel
2009      * @return error code or success, see {@link #SUCCESS},
2010      *    {@link #ERROR_INVALID_OPERATION}
2011      * @deprecated Applications should use {@link #setVolume} instead, as it
2012      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2013      */
2014     @Deprecated
setStereoVolume(float leftGain, float rightGain)2015     public int setStereoVolume(float leftGain, float rightGain) {
2016         if (mState == STATE_UNINITIALIZED) {
2017             return ERROR_INVALID_OPERATION;
2018         }
2019 
2020         baseSetVolume(leftGain, rightGain);
2021         return SUCCESS;
2022     }
2023 
2024     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2025     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2026         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2027         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2028 
2029         native_setVolume(leftVolume, rightVolume);
2030     }
2031 
2032 
2033     /**
2034      * Sets the specified output gain value on all channels of this track.
2035      * <p>Gain values are clamped to the closed interval [0.0, max] where
2036      * max is the value of {@link #getMaxVolume}.
2037      * A value of 0.0 results in zero gain (silence), and
2038      * a value of 1.0 means unity gain (signal unchanged).
2039      * The default value is 1.0 meaning unity gain.
2040      * <p>This API is preferred over {@link #setStereoVolume}, as it
2041      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2042      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2043      * @param gain output gain for all channels.
2044      * @return error code or success, see {@link #SUCCESS},
2045      *    {@link #ERROR_INVALID_OPERATION}
2046      */
setVolume(float gain)2047     public int setVolume(float gain) {
2048         return setStereoVolume(gain, gain);
2049     }
2050 
2051     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2052     /* package */ int playerApplyVolumeShaper(
2053             @NonNull VolumeShaper.Configuration configuration,
2054             @NonNull VolumeShaper.Operation operation) {
2055         return native_applyVolumeShaper(configuration, operation);
2056     }
2057 
2058     @Override
playerGetVolumeShaperState(int id)2059     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2060         return native_getVolumeShaperState(id);
2061     }
2062 
2063     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2064     public @NonNull VolumeShaper createVolumeShaper(
2065             @NonNull VolumeShaper.Configuration configuration) {
2066         return new VolumeShaper(configuration, this);
2067     }
2068 
2069     /**
2070      * Sets the playback sample rate for this track. This sets the sampling rate at which
2071      * the audio data will be consumed and played back
2072      * (as set by the sampleRateInHz parameter in the
2073      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2074      * not the original sampling rate of the
2075      * content. For example, setting it to half the sample rate of the content will cause the
2076      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2077      * The valid sample rate range is from 1 Hz to twice the value returned by
2078      * {@link #getNativeOutputSampleRate(int)}.
2079      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2080      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2081      * for playback of content of differing sample rate,
2082      * but with identical encoding and channel mask.
2083      * @param sampleRateInHz the sample rate expressed in Hz
2084      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2085      *    {@link #ERROR_INVALID_OPERATION}
2086      */
setPlaybackRate(int sampleRateInHz)2087     public int setPlaybackRate(int sampleRateInHz) {
2088         if (mState != STATE_INITIALIZED) {
2089             return ERROR_INVALID_OPERATION;
2090         }
2091         if (sampleRateInHz <= 0) {
2092             return ERROR_BAD_VALUE;
2093         }
2094         return native_set_playback_rate(sampleRateInHz);
2095     }
2096 
2097 
2098     /**
2099      * Sets the playback parameters.
2100      * This method returns failure if it cannot apply the playback parameters.
2101      * One possible cause is that the parameters for speed or pitch are out of range.
2102      * Another possible cause is that the <code>AudioTrack</code> is streaming
2103      * (see {@link #MODE_STREAM}) and the
2104      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2105      * on configuration must be larger than the speed multiplied by the minimum size
2106      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2107      * @param params see {@link PlaybackParams}. In particular,
2108      * speed, pitch, and audio mode should be set.
2109      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2110      * @throws IllegalStateException if track is not initialized.
2111      */
setPlaybackParams(@onNull PlaybackParams params)2112     public void setPlaybackParams(@NonNull PlaybackParams params) {
2113         if (params == null) {
2114             throw new IllegalArgumentException("params is null");
2115         }
2116         native_set_playback_params(params);
2117     }
2118 
2119 
2120     /**
2121      * Sets the position of the notification marker.  At most one marker can be active.
2122      * @param markerInFrames marker position in wrapping frame units similar to
2123      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2124      * To set a marker at a position which would appear as zero due to wraparound,
2125      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2126      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2127      *  {@link #ERROR_INVALID_OPERATION}
2128      */
setNotificationMarkerPosition(int markerInFrames)2129     public int setNotificationMarkerPosition(int markerInFrames) {
2130         if (mState == STATE_UNINITIALIZED) {
2131             return ERROR_INVALID_OPERATION;
2132         }
2133         return native_set_marker_pos(markerInFrames);
2134     }
2135 
2136 
2137     /**
2138      * Sets the period for the periodic notification event.
2139      * @param periodInFrames update period expressed in frames.
2140      * Zero period means no position updates.  A negative period is not allowed.
2141      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2142      */
setPositionNotificationPeriod(int periodInFrames)2143     public int setPositionNotificationPeriod(int periodInFrames) {
2144         if (mState == STATE_UNINITIALIZED) {
2145             return ERROR_INVALID_OPERATION;
2146         }
2147         return native_set_pos_update_period(periodInFrames);
2148     }
2149 
2150 
2151     /**
2152      * Sets the playback head position within the static buffer.
2153      * The track must be stopped or paused for the position to be changed,
2154      * and must use the {@link #MODE_STATIC} mode.
2155      * @param positionInFrames playback head position within buffer, expressed in frames.
2156      * Zero corresponds to start of buffer.
2157      * The position must not be greater than the buffer size in frames, or negative.
2158      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2159      * the position values have different meanings.
2160      * <br>
2161      * If looping is currently enabled and the new position is greater than or equal to the
2162      * loop end marker, the behavior varies by API level:
2163      * as of {@link android.os.Build.VERSION_CODES#M},
2164      * the looping is first disabled and then the position is set.
2165      * For earlier API levels, the behavior is unspecified.
2166      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2167      *    {@link #ERROR_INVALID_OPERATION}
2168      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2169     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2170         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2171                 getPlayState() == PLAYSTATE_PLAYING) {
2172             return ERROR_INVALID_OPERATION;
2173         }
2174         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2175             return ERROR_BAD_VALUE;
2176         }
2177         return native_set_position(positionInFrames);
2178     }
2179 
2180     /**
2181      * Sets the loop points and the loop count. The loop can be infinite.
2182      * Similarly to setPlaybackHeadPosition,
2183      * the track must be stopped or paused for the loop points to be changed,
2184      * and must use the {@link #MODE_STATIC} mode.
2185      * @param startInFrames loop start marker expressed in frames.
2186      * Zero corresponds to start of buffer.
2187      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2188      * @param endInFrames loop end marker expressed in frames.
2189      * The total buffer size in frames corresponds to end of buffer.
2190      * The end marker must not be greater than the buffer size in frames.
2191      * For looping, the end marker must not be less than or equal to the start marker,
2192      * but to disable looping
2193      * it is permitted for start marker, end marker, and loop count to all be 0.
2194      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2195      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2196      * support,
2197      * {@link #ERROR_BAD_VALUE} is returned.
2198      * The loop range is the interval [startInFrames, endInFrames).
2199      * <br>
2200      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2201      * unless it is greater than or equal to the loop end marker, in which case
2202      * it is forced to the loop start marker.
2203      * For earlier API levels, the effect on position is unspecified.
2204      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2205      *    A value of -1 means infinite looping, and 0 disables looping.
2206      *    A value of positive N means to "loop" (go back) N times.  For example,
2207      *    a value of one means to play the region two times in total.
2208      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2209      *    {@link #ERROR_INVALID_OPERATION}
2210      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2211     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2212             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2213         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2214                 getPlayState() == PLAYSTATE_PLAYING) {
2215             return ERROR_INVALID_OPERATION;
2216         }
2217         if (loopCount == 0) {
2218             ;   // explicitly allowed as an exception to the loop region range check
2219         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2220                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2221             return ERROR_BAD_VALUE;
2222         }
2223         return native_set_loop(startInFrames, endInFrames, loopCount);
2224     }
2225 
2226     /**
2227      * Sets the audio presentation.
2228      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2229      * If a multi-stream decoder (MSD) is not present, or the format does not support
2230      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2231      * {@link #ERROR} is returned in case of any other error.
2232      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2233      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2234      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2235      * @throws IllegalArgumentException if the audio presentation is null.
2236      * @throws IllegalStateException if track is not initialized.
2237      */
setPresentation(@onNull AudioPresentation presentation)2238     public int setPresentation(@NonNull AudioPresentation presentation) {
2239         if (presentation == null) {
2240             throw new IllegalArgumentException("audio presentation is null");
2241         }
2242         return native_setPresentation(presentation.getPresentationId(),
2243                 presentation.getProgramId());
2244     }
2245 
2246     /**
2247      * Sets the initialization state of the instance. This method was originally intended to be used
2248      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2249      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2250      * @param state the state of the AudioTrack instance
2251      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2252      */
2253     @Deprecated
setState(int state)2254     protected void setState(int state) {
2255         mState = state;
2256     }
2257 
2258 
2259     //---------------------------------------------------------
2260     // Transport control methods
2261     //--------------------
2262     /**
2263      * Starts playing an AudioTrack.
2264      * <p>
2265      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2266      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2267      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2268      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2269      * play().
2270      * <p>
2271      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2272      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2273      * If you don't call write() first, or if you call write() but with an insufficient amount of
2274      * data, then the track will be in underrun state at play().  In this case,
2275      * playback will not actually start playing until the data path is filled to a
2276      * device-specific minimum level.  This requirement for the path to be filled
2277      * to a minimum level is also true when resuming audio playback after calling stop().
2278      * Similarly the buffer will need to be filled up again after
2279      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2280      * For portability, an application should prime the data path to the maximum allowed
2281      * by writing data until the write() method returns a short transfer count.
2282      * This allows play() to start immediately, and reduces the chance of underrun.
2283      *
2284      * @throws IllegalStateException if the track isn't properly initialized
2285      */
play()2286     public void play()
2287     throws IllegalStateException {
2288         if (mState != STATE_INITIALIZED) {
2289             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2290         }
2291         //FIXME use lambda to pass startImpl to superclass
2292         final int delay = getStartDelayMs();
2293         if (delay == 0) {
2294             startImpl();
2295         } else {
2296             new Thread() {
2297                 public void run() {
2298                     try {
2299                         Thread.sleep(delay);
2300                     } catch (InterruptedException e) {
2301                         e.printStackTrace();
2302                     }
2303                     baseSetStartDelayMs(0);
2304                     try {
2305                         startImpl();
2306                     } catch (IllegalStateException e) {
2307                         // fail silently for a state exception when it is happening after
2308                         // a delayed start, as the player state could have changed between the
2309                         // call to start() and the execution of startImpl()
2310                     }
2311                 }
2312             }.start();
2313         }
2314     }
2315 
startImpl()2316     private void startImpl() {
2317         synchronized(mPlayStateLock) {
2318             baseStart();
2319             native_start();
2320             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
2321                 mPlayState = PLAYSTATE_STOPPING;
2322             } else {
2323                 mPlayState = PLAYSTATE_PLAYING;
2324                 mOffloadEosPending = false;
2325             }
2326         }
2327     }
2328 
2329     /**
2330      * Stops playing the audio data.
2331      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2332      * after the last buffer that was written has been played. For an immediate stop, use
2333      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2334      * back yet.
2335      * @throws IllegalStateException
2336      */
stop()2337     public void stop()
2338     throws IllegalStateException {
2339         if (mState != STATE_INITIALIZED) {
2340             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2341         }
2342 
2343         // stop playing
2344         synchronized(mPlayStateLock) {
2345             native_stop();
2346             baseStop();
2347             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
2348                 mPlayState = PLAYSTATE_STOPPING;
2349             } else {
2350                 mPlayState = PLAYSTATE_STOPPED;
2351                 mOffloadEosPending = false;
2352                 mAvSyncHeader = null;
2353                 mAvSyncBytesRemaining = 0;
2354                 mPlayStateLock.notify();
2355             }
2356         }
2357     }
2358 
2359     /**
2360      * Pauses the playback of the audio data. Data that has not been played
2361      * back will not be discarded. Subsequent calls to {@link #play} will play
2362      * this data back. See {@link #flush()} to discard this data.
2363      *
2364      * @throws IllegalStateException
2365      */
pause()2366     public void pause()
2367     throws IllegalStateException {
2368         if (mState != STATE_INITIALIZED) {
2369             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
2370         }
2371 
2372         // pause playback
2373         synchronized(mPlayStateLock) {
2374             native_pause();
2375             basePause();
2376             if (mPlayState == PLAYSTATE_STOPPING) {
2377                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
2378             } else {
2379                 mPlayState = PLAYSTATE_PAUSED;
2380             }
2381         }
2382     }
2383 
2384 
2385     //---------------------------------------------------------
2386     // Audio data supply
2387     //--------------------
2388 
2389     /**
2390      * Flushes the audio data currently queued for playback. Any data that has
2391      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
2392      * or if the track's creation mode is not {@link #MODE_STREAM}.
2393      * <BR> Note that although data written but not yet presented is discarded, there is no
2394      * guarantee that all of the buffer space formerly used by that data
2395      * is available for a subsequent write.
2396      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
2397      * less than or equal to the total buffer size
2398      * may return a short actual transfer count.
2399      */
flush()2400     public void flush() {
2401         if (mState == STATE_INITIALIZED) {
2402             // flush the data in native layer
2403             native_flush();
2404             mAvSyncHeader = null;
2405             mAvSyncBytesRemaining = 0;
2406         }
2407 
2408     }
2409 
2410     /**
2411      * Writes the audio data to the audio sink for playback (streaming mode),
2412      * or copies audio data for later playback (static buffer mode).
2413      * The format specified in the AudioTrack constructor should be
2414      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2415      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2416      * <p>
2417      * In streaming mode, the write will normally block until all the data has been enqueued for
2418      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2419      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2420      * occurs during the write, then the write may return a short transfer count.
2421      * <p>
2422      * In static buffer mode, copies the data to the buffer starting at offset 0.
2423      * Note that the actual playback of this data might occur after this function returns.
2424      *
2425      * @param audioData the array that holds the data to play.
2426      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2427      *    starts.
2428      *    Must not be negative, or cause the data access to go out of bounds of the array.
2429      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2430      *    Must not be negative, or cause the data access to go out of bounds of the array.
2431      * @return zero or the positive number of bytes that were written, or one of the following
2432      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2433      *    not to exceed sizeInBytes.
2434      * <ul>
2435      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2436      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2437      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2438      *    needs to be recreated. The dead object error code is not returned if some data was
2439      *    successfully transferred. In this case, the error is returned at the next write()</li>
2440      * <li>{@link #ERROR} in case of other error</li>
2441      * </ul>
2442      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
2443      * set to  {@link #WRITE_BLOCKING}.
2444      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2445     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
2446         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
2447     }
2448 
2449     /**
2450      * Writes the audio data to the audio sink for playback (streaming mode),
2451      * or copies audio data for later playback (static buffer mode).
2452      * The format specified in the AudioTrack constructor should be
2453      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2454      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2455      * <p>
2456      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2457      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2458      * for playback, and will return a full transfer count.  However, if the write mode is
2459      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2460      * interrupts the write by calling stop or pause, or an I/O error
2461      * occurs during the write, then the write may return a short transfer count.
2462      * <p>
2463      * In static buffer mode, copies the data to the buffer starting at offset 0,
2464      * and the write mode is ignored.
2465      * Note that the actual playback of this data might occur after this function returns.
2466      *
2467      * @param audioData the array that holds the data to play.
2468      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2469      *    starts.
2470      *    Must not be negative, or cause the data access to go out of bounds of the array.
2471      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2472      *    Must not be negative, or cause the data access to go out of bounds of the array.
2473      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2474      *     effect in static mode.
2475      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2476      *         to the audio sink.
2477      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2478      *     queuing as much audio data for playback as possible without blocking.
2479      * @return zero or the positive number of bytes that were written, or one of the following
2480      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2481      *    not to exceed sizeInBytes.
2482      * <ul>
2483      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2484      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2485      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2486      *    needs to be recreated. The dead object error code is not returned if some data was
2487      *    successfully transferred. In this case, the error is returned at the next write()</li>
2488      * <li>{@link #ERROR} in case of other error</li>
2489      * </ul>
2490      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)2491     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
2492             @WriteMode int writeMode) {
2493 
2494         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2495             return ERROR_INVALID_OPERATION;
2496         }
2497 
2498         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2499             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2500             return ERROR_BAD_VALUE;
2501         }
2502 
2503         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
2504                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
2505                 || (offsetInBytes + sizeInBytes > audioData.length)) {
2506             return ERROR_BAD_VALUE;
2507         }
2508 
2509         if (!blockUntilOffloadDrain(writeMode)) {
2510             return 0;
2511         }
2512 
2513         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
2514                 writeMode == WRITE_BLOCKING);
2515 
2516         if ((mDataLoadMode == MODE_STATIC)
2517                 && (mState == STATE_NO_STATIC_DATA)
2518                 && (ret > 0)) {
2519             // benign race with respect to other APIs that read mState
2520             mState = STATE_INITIALIZED;
2521         }
2522 
2523         return ret;
2524     }
2525 
2526     /**
2527      * Writes the audio data to the audio sink for playback (streaming mode),
2528      * or copies audio data for later playback (static buffer mode).
2529      * The format specified in the AudioTrack constructor should be
2530      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2531      * <p>
2532      * In streaming mode, the write will normally block until all the data has been enqueued for
2533      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2534      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2535      * occurs during the write, then the write may return a short transfer count.
2536      * <p>
2537      * In static buffer mode, copies the data to the buffer starting at offset 0.
2538      * Note that the actual playback of this data might occur after this function returns.
2539      *
2540      * @param audioData the array that holds the data to play.
2541      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
2542      *     starts.
2543      *    Must not be negative, or cause the data access to go out of bounds of the array.
2544      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2545      *    Must not be negative, or cause the data access to go out of bounds of the array.
2546      * @return zero or the positive number of shorts that were written, or one of the following
2547      *    error codes. The number of shorts will be a multiple of the channel count not to
2548      *    exceed sizeInShorts.
2549      * <ul>
2550      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2551      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2552      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2553      *    needs to be recreated. The dead object error code is not returned if some data was
2554      *    successfully transferred. In this case, the error is returned at the next write()</li>
2555      * <li>{@link #ERROR} in case of other error</li>
2556      * </ul>
2557      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
2558      * set to  {@link #WRITE_BLOCKING}.
2559      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)2560     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
2561         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
2562     }
2563 
2564     /**
2565      * Writes the audio data to the audio sink for playback (streaming mode),
2566      * or copies audio data for later playback (static buffer mode).
2567      * The format specified in the AudioTrack constructor should be
2568      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2569      * <p>
2570      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2571      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2572      * for playback, and will return a full transfer count.  However, if the write mode is
2573      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2574      * interrupts the write by calling stop or pause, or an I/O error
2575      * occurs during the write, then the write may return a short transfer count.
2576      * <p>
2577      * In static buffer mode, copies the data to the buffer starting at offset 0.
2578      * Note that the actual playback of this data might occur after this function returns.
2579      *
2580      * @param audioData the array that holds the data to write.
2581      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
2582      *     starts.
2583      *    Must not be negative, or cause the data access to go out of bounds of the array.
2584      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2585      *    Must not be negative, or cause the data access to go out of bounds of the array.
2586      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2587      *     effect in static mode.
2588      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2589      *         to the audio sink.
2590      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2591      *     queuing as much audio data for playback as possible without blocking.
2592      * @return zero or the positive number of shorts that were written, or one of the following
2593      *    error codes. The number of shorts will be a multiple of the channel count not to
2594      *    exceed sizeInShorts.
2595      * <ul>
2596      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2597      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2598      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2599      *    needs to be recreated. The dead object error code is not returned if some data was
2600      *    successfully transferred. In this case, the error is returned at the next write()</li>
2601      * <li>{@link #ERROR} in case of other error</li>
2602      * </ul>
2603      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)2604     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
2605             @WriteMode int writeMode) {
2606 
2607         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2608             return ERROR_INVALID_OPERATION;
2609         }
2610 
2611         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2612             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2613             return ERROR_BAD_VALUE;
2614         }
2615 
2616         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
2617                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
2618                 || (offsetInShorts + sizeInShorts > audioData.length)) {
2619             return ERROR_BAD_VALUE;
2620         }
2621 
2622         if (!blockUntilOffloadDrain(writeMode)) {
2623             return 0;
2624         }
2625 
2626         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
2627                 writeMode == WRITE_BLOCKING);
2628 
2629         if ((mDataLoadMode == MODE_STATIC)
2630                 && (mState == STATE_NO_STATIC_DATA)
2631                 && (ret > 0)) {
2632             // benign race with respect to other APIs that read mState
2633             mState = STATE_INITIALIZED;
2634         }
2635 
2636         return ret;
2637     }
2638 
2639     /**
2640      * Writes the audio data to the audio sink for playback (streaming mode),
2641      * or copies audio data for later playback (static buffer mode).
2642      * The format specified in the AudioTrack constructor should be
2643      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
2644      * <p>
2645      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2646      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2647      * for playback, and will return a full transfer count.  However, if the write mode is
2648      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2649      * interrupts the write by calling stop or pause, or an I/O error
2650      * occurs during the write, then the write may return a short transfer count.
2651      * <p>
2652      * In static buffer mode, copies the data to the buffer starting at offset 0,
2653      * and the write mode is ignored.
2654      * Note that the actual playback of this data might occur after this function returns.
2655      *
2656      * @param audioData the array that holds the data to write.
2657      *     The implementation does not clip for sample values within the nominal range
2658      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
2659      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
2660      *     that could add energy, such as reverb.  For the convenience of applications
2661      *     that compute samples using filters with non-unity gain,
2662      *     sample values +3 dB beyond the nominal range are permitted.
2663      *     However such values may eventually be limited or clipped, depending on various gains
2664      *     and later processing in the audio path.  Therefore applications are encouraged
2665      *     to provide samples values within the nominal range.
2666      * @param offsetInFloats the offset, expressed as a number of floats,
2667      *     in audioData where the data to write starts.
2668      *    Must not be negative, or cause the data access to go out of bounds of the array.
2669      * @param sizeInFloats the number of floats to write in audioData after the offset.
2670      *    Must not be negative, or cause the data access to go out of bounds of the array.
2671      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2672      *     effect in static mode.
2673      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2674      *         to the audio sink.
2675      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2676      *     queuing as much audio data for playback as possible without blocking.
2677      * @return zero or the positive number of floats that were written, or one of the following
2678      *    error codes. The number of floats will be a multiple of the channel count not to
2679      *    exceed sizeInFloats.
2680      * <ul>
2681      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2682      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2683      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2684      *    needs to be recreated. The dead object error code is not returned if some data was
2685      *    successfully transferred. In this case, the error is returned at the next write()</li>
2686      * <li>{@link #ERROR} in case of other error</li>
2687      * </ul>
2688      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)2689     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
2690             @WriteMode int writeMode) {
2691 
2692         if (mState == STATE_UNINITIALIZED) {
2693             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2694             return ERROR_INVALID_OPERATION;
2695         }
2696 
2697         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
2698             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
2699             return ERROR_INVALID_OPERATION;
2700         }
2701 
2702         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2703             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2704             return ERROR_BAD_VALUE;
2705         }
2706 
2707         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
2708                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
2709                 || (offsetInFloats + sizeInFloats > audioData.length)) {
2710             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
2711             return ERROR_BAD_VALUE;
2712         }
2713 
2714         if (!blockUntilOffloadDrain(writeMode)) {
2715             return 0;
2716         }
2717 
2718         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
2719                 writeMode == WRITE_BLOCKING);
2720 
2721         if ((mDataLoadMode == MODE_STATIC)
2722                 && (mState == STATE_NO_STATIC_DATA)
2723                 && (ret > 0)) {
2724             // benign race with respect to other APIs that read mState
2725             mState = STATE_INITIALIZED;
2726         }
2727 
2728         return ret;
2729     }
2730 
2731 
2732     /**
2733      * Writes the audio data to the audio sink for playback (streaming mode),
2734      * or copies audio data for later playback (static buffer mode).
2735      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
2736      * <p>
2737      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2738      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2739      * for playback, and will return a full transfer count.  However, if the write mode is
2740      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2741      * interrupts the write by calling stop or pause, or an I/O error
2742      * occurs during the write, then the write may return a short transfer count.
2743      * <p>
2744      * In static buffer mode, copies the data to the buffer starting at offset 0,
2745      * and the write mode is ignored.
2746      * Note that the actual playback of this data might occur after this function returns.
2747      *
2748      * @param audioData the buffer that holds the data to write, starting at the position reported
2749      *     by <code>audioData.position()</code>.
2750      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2751      *     have been advanced to reflect the amount of data that was successfully written to
2752      *     the AudioTrack.
2753      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2754      *     that the number of bytes requested be a multiple of the frame size (sample size in
2755      *     bytes multiplied by the channel count).
2756      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2757      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2758      *     effect in static mode.
2759      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2760      *         to the audio sink.
2761      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2762      *     queuing as much audio data for playback as possible without blocking.
2763      * @return zero or the positive number of bytes that were written, or one of the following
2764      *    error codes.
2765      * <ul>
2766      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2767      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2768      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2769      *    needs to be recreated. The dead object error code is not returned if some data was
2770      *    successfully transferred. In this case, the error is returned at the next write()</li>
2771      * <li>{@link #ERROR} in case of other error</li>
2772      * </ul>
2773      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)2774     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2775             @WriteMode int writeMode) {
2776 
2777         if (mState == STATE_UNINITIALIZED) {
2778             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2779             return ERROR_INVALID_OPERATION;
2780         }
2781 
2782         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2783             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2784             return ERROR_BAD_VALUE;
2785         }
2786 
2787         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2788             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2789             return ERROR_BAD_VALUE;
2790         }
2791 
2792         if (!blockUntilOffloadDrain(writeMode)) {
2793             return 0;
2794         }
2795 
2796         int ret = 0;
2797         if (audioData.isDirect()) {
2798             ret = native_write_native_bytes(audioData,
2799                     audioData.position(), sizeInBytes, mAudioFormat,
2800                     writeMode == WRITE_BLOCKING);
2801         } else {
2802             ret = native_write_byte(NioUtils.unsafeArray(audioData),
2803                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
2804                     sizeInBytes, mAudioFormat,
2805                     writeMode == WRITE_BLOCKING);
2806         }
2807 
2808         if ((mDataLoadMode == MODE_STATIC)
2809                 && (mState == STATE_NO_STATIC_DATA)
2810                 && (ret > 0)) {
2811             // benign race with respect to other APIs that read mState
2812             mState = STATE_INITIALIZED;
2813         }
2814 
2815         if (ret > 0) {
2816             audioData.position(audioData.position() + ret);
2817         }
2818 
2819         return ret;
2820     }
2821 
2822     /**
2823      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
2824      * The blocking behavior will depend on the write mode.
2825      * @param audioData the buffer that holds the data to write, starting at the position reported
2826      *     by <code>audioData.position()</code>.
2827      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2828      *     have been advanced to reflect the amount of data that was successfully written to
2829      *     the AudioTrack.
2830      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2831      *     that the number of bytes requested be a multiple of the frame size (sample size in
2832      *     bytes multiplied by the channel count).
2833      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2834      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
2835      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2836      *         to the audio sink.
2837      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2838      *     queuing as much audio data for playback as possible without blocking.
2839      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
2840      *     provided audioData.
2841      * @return zero or the positive number of bytes that were written, or one of the following
2842      *    error codes.
2843      * <ul>
2844      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2845      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2846      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2847      *    needs to be recreated. The dead object error code is not returned if some data was
2848      *    successfully transferred. In this case, the error is returned at the next write()</li>
2849      * <li>{@link #ERROR} in case of other error</li>
2850      * </ul>
2851      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)2852     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2853             @WriteMode int writeMode, long timestamp) {
2854 
2855         if (mState == STATE_UNINITIALIZED) {
2856             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2857             return ERROR_INVALID_OPERATION;
2858         }
2859 
2860         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2861             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2862             return ERROR_BAD_VALUE;
2863         }
2864 
2865         if (mDataLoadMode != MODE_STREAM) {
2866             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
2867             return ERROR_INVALID_OPERATION;
2868         }
2869 
2870         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
2871             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
2872             return write(audioData, sizeInBytes, writeMode);
2873         }
2874 
2875         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2876             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2877             return ERROR_BAD_VALUE;
2878         }
2879 
2880         if (!blockUntilOffloadDrain(writeMode)) {
2881             return 0;
2882         }
2883 
2884         // create timestamp header if none exists
2885         if (mAvSyncHeader == null) {
2886             mAvSyncHeader = ByteBuffer.allocate(mOffset);
2887             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
2888             mAvSyncHeader.putInt(0x55550002);
2889         }
2890 
2891         if (mAvSyncBytesRemaining == 0) {
2892             mAvSyncHeader.putInt(4, sizeInBytes);
2893             mAvSyncHeader.putLong(8, timestamp);
2894             mAvSyncHeader.putInt(16, mOffset);
2895             mAvSyncHeader.position(0);
2896             mAvSyncBytesRemaining = sizeInBytes;
2897         }
2898 
2899         // write timestamp header if not completely written already
2900         int ret = 0;
2901         if (mAvSyncHeader.remaining() != 0) {
2902             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
2903             if (ret < 0) {
2904                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
2905                 mAvSyncHeader = null;
2906                 mAvSyncBytesRemaining = 0;
2907                 return ret;
2908             }
2909             if (mAvSyncHeader.remaining() > 0) {
2910                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
2911                 return 0;
2912             }
2913         }
2914 
2915         // write audio data
2916         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
2917         ret = write(audioData, sizeToWrite, writeMode);
2918         if (ret < 0) {
2919             Log.e(TAG, "AudioTrack.write() could not write audio data!");
2920             mAvSyncHeader = null;
2921             mAvSyncBytesRemaining = 0;
2922             return ret;
2923         }
2924 
2925         mAvSyncBytesRemaining -= ret;
2926 
2927         return ret;
2928     }
2929 
2930 
2931     /**
2932      * Sets the playback head position within the static buffer to zero,
2933      * that is it rewinds to start of static buffer.
2934      * The track must be stopped or paused, and
2935      * the track's creation mode must be {@link #MODE_STATIC}.
2936      * <p>
2937      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
2938      * {@link #getPlaybackHeadPosition()} to zero.
2939      * For earlier API levels, the reset behavior is unspecified.
2940      * <p>
2941      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
2942      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
2943      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2944      *  {@link #ERROR_INVALID_OPERATION}
2945      */
reloadStaticData()2946     public int reloadStaticData() {
2947         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
2948             return ERROR_INVALID_OPERATION;
2949         }
2950         return native_reload_static();
2951     }
2952 
2953     /**
2954      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
2955      * received if blocking write or return with 0 frames written if non blocking mode.
2956      */
blockUntilOffloadDrain(int writeMode)2957     private boolean blockUntilOffloadDrain(int writeMode) {
2958         synchronized (mPlayStateLock) {
2959             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
2960                 if (writeMode == WRITE_NON_BLOCKING) {
2961                     return false;
2962                 }
2963                 try {
2964                     mPlayStateLock.wait();
2965                 } catch (InterruptedException e) {
2966                 }
2967             }
2968             return true;
2969         }
2970     }
2971 
2972     //--------------------------------------------------------------------------
2973     // Audio effects management
2974     //--------------------
2975 
2976     /**
2977      * Attaches an auxiliary effect to the audio track. A typical auxiliary
2978      * effect is a reverberation effect which can be applied on any sound source
2979      * that directs a certain amount of its energy to this effect. This amount
2980      * is defined by setAuxEffectSendLevel().
2981      * {@see #setAuxEffectSendLevel(float)}.
2982      * <p>After creating an auxiliary effect (e.g.
2983      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
2984      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
2985      * this method to attach the audio track to the effect.
2986      * <p>To detach the effect from the audio track, call this method with a
2987      * null effect id.
2988      *
2989      * @param effectId system wide unique id of the effect to attach
2990      * @return error code or success, see {@link #SUCCESS},
2991      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
2992      */
attachAuxEffect(int effectId)2993     public int attachAuxEffect(int effectId) {
2994         if (mState == STATE_UNINITIALIZED) {
2995             return ERROR_INVALID_OPERATION;
2996         }
2997         return native_attachAuxEffect(effectId);
2998     }
2999 
3000     /**
3001      * Sets the send level of the audio track to the attached auxiliary effect
3002      * {@link #attachAuxEffect(int)}.  Effect levels
3003      * are clamped to the closed interval [0.0, max] where
3004      * max is the value of {@link #getMaxVolume}.
3005      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3006      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3007      * this method must be called for the effect to be applied.
3008      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3009      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3010      * so an appropriate conversion from linear UI input x to level is:
3011      * x == 0 -&gt; level = 0
3012      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3013      *
3014      * @param level linear send level
3015      * @return error code or success, see {@link #SUCCESS},
3016      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3017      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3018     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3019         if (mState == STATE_UNINITIALIZED) {
3020             return ERROR_INVALID_OPERATION;
3021         }
3022         return baseSetAuxEffectSendLevel(level);
3023     }
3024 
3025     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3026     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3027         level = clampGainOrLevel(muting ? 0.0f : level);
3028         int err = native_setAuxEffectSendLevel(level);
3029         return err == 0 ? SUCCESS : ERROR;
3030     }
3031 
3032     //--------------------------------------------------------------------------
3033     // Explicit Routing
3034     //--------------------
3035     private AudioDeviceInfo mPreferredDevice = null;
3036 
3037     /**
3038      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3039      * the output from this AudioTrack.
3040      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3041      *  If deviceInfo is null, default routing is restored.
3042      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3043      * does not correspond to a valid audio output device.
3044      */
3045     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3046     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3047         // Do some validation....
3048         if (deviceInfo != null && !deviceInfo.isSink()) {
3049             return false;
3050         }
3051         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3052         boolean status = native_setOutputDevice(preferredDeviceId);
3053         if (status == true) {
3054             synchronized (this) {
3055                 mPreferredDevice = deviceInfo;
3056             }
3057         }
3058         return status;
3059     }
3060 
3061     /**
3062      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3063      * is not guaranteed to correspond to the actual device being used for playback.
3064      */
3065     @Override
getPreferredDevice()3066     public AudioDeviceInfo getPreferredDevice() {
3067         synchronized (this) {
3068             return mPreferredDevice;
3069         }
3070     }
3071 
3072     /**
3073      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3074      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3075      * <code>getRoutedDevice()</code> will return null.
3076      */
3077     @Override
getRoutedDevice()3078     public AudioDeviceInfo getRoutedDevice() {
3079         int deviceId = native_getRoutedDeviceId();
3080         if (deviceId == 0) {
3081             return null;
3082         }
3083         AudioDeviceInfo[] devices =
3084                 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
3085         for (int i = 0; i < devices.length; i++) {
3086             if (devices[i].getId() == deviceId) {
3087                 return devices[i];
3088             }
3089         }
3090         return null;
3091     }
3092 
3093     /*
3094      * Call BEFORE adding a routing callback handler.
3095      */
3096     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3097     private void testEnableNativeRoutingCallbacksLocked() {
3098         if (mRoutingChangeListeners.size() == 0) {
3099             native_enableDeviceCallback();
3100         }
3101     }
3102 
3103     /*
3104      * Call AFTER removing a routing callback handler.
3105      */
3106     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3107     private void testDisableNativeRoutingCallbacksLocked() {
3108         if (mRoutingChangeListeners.size() == 0) {
3109             native_disableDeviceCallback();
3110         }
3111     }
3112 
3113     //--------------------------------------------------------------------------
3114     // (Re)Routing Info
3115     //--------------------
3116     /**
3117      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3118      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3119      * by an app to receive (re)routing notifications.
3120      */
3121     @GuardedBy("mRoutingChangeListeners")
3122     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3123             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3124 
3125    /**
3126     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3127     * changes on this AudioTrack.
3128     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3129     * notifications of rerouting events.
3130     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3131     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3132     * {@link Looper} will be used.
3133     */
3134     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3135     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3136             Handler handler) {
3137         synchronized (mRoutingChangeListeners) {
3138             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3139                 testEnableNativeRoutingCallbacksLocked();
3140                 mRoutingChangeListeners.put(
3141                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3142                                 handler != null ? handler : new Handler(mInitializationLooper)));
3143             }
3144         }
3145     }
3146 
3147     /**
3148      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3149      * to receive rerouting notifications.
3150      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3151      * to remove.
3152      */
3153     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3154     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3155         synchronized (mRoutingChangeListeners) {
3156             if (mRoutingChangeListeners.containsKey(listener)) {
3157                 mRoutingChangeListeners.remove(listener);
3158             }
3159             testDisableNativeRoutingCallbacksLocked();
3160         }
3161     }
3162 
3163     //--------------------------------------------------------------------------
3164     // (Re)Routing Info
3165     //--------------------
3166     /**
3167      * Defines the interface by which applications can receive notifications of
3168      * routing changes for the associated {@link AudioTrack}.
3169      *
3170      * @deprecated users should switch to the general purpose
3171      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3172      */
3173     @Deprecated
3174     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3175         /**
3176          * Called when the routing of an AudioTrack changes from either and
3177          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3178          * retrieve the newly routed-to device.
3179          */
onRoutingChanged(AudioTrack audioTrack)3180         public void onRoutingChanged(AudioTrack audioTrack);
3181 
3182         @Override
onRoutingChanged(AudioRouting router)3183         default public void onRoutingChanged(AudioRouting router) {
3184             if (router instanceof AudioTrack) {
3185                 onRoutingChanged((AudioTrack) router);
3186             }
3187         }
3188     }
3189 
3190     /**
3191      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3192      * on this AudioTrack.
3193      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3194      * of rerouting events.
3195      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3196      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3197      * {@link Looper} will be used.
3198      * @deprecated users should switch to the general purpose
3199      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3200      */
3201     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3202     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3203             android.os.Handler handler) {
3204         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3205     }
3206 
3207     /**
3208      * Removes an {@link OnRoutingChangedListener} which has been previously added
3209      * to receive rerouting notifications.
3210      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3211      * @deprecated users should switch to the general purpose
3212      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3213      */
3214     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3215     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3216         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3217     }
3218 
3219     /**
3220      * Sends device list change notification to all listeners.
3221      */
broadcastRoutingChange()3222     private void broadcastRoutingChange() {
3223         AudioManager.resetAudioPortGeneration();
3224         synchronized (mRoutingChangeListeners) {
3225             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3226                 delegate.notifyClient();
3227             }
3228         }
3229     }
3230 
3231     //---------------------------------------------------------
3232     // Interface definitions
3233     //--------------------
3234     /**
3235      * Interface definition for a callback to be invoked when the playback head position of
3236      * an AudioTrack has reached a notification marker or has increased by a certain period.
3237      */
3238     public interface OnPlaybackPositionUpdateListener  {
3239         /**
3240          * Called on the listener to notify it that the previously set marker has been reached
3241          * by the playback head.
3242          */
onMarkerReached(AudioTrack track)3243         void onMarkerReached(AudioTrack track);
3244 
3245         /**
3246          * Called on the listener to periodically notify it that the playback head has reached
3247          * a multiple of the notification period.
3248          */
onPeriodicNotification(AudioTrack track)3249         void onPeriodicNotification(AudioTrack track);
3250     }
3251 
3252     /**
3253      * Abstract class to receive event notifications about the stream playback in offloaded mode.
3254      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
3255      * the callback on the given {@link AudioTrack} instance.
3256      */
3257     public abstract static class StreamEventCallback {
3258         /**
3259          * Called when an offloaded track is no longer valid and has been discarded by the system.
3260          * An example of this happening is when an offloaded track has been paused too long, and
3261          * gets invalidated by the system to prevent any other offload.
3262          * @param track the {@link AudioTrack} on which the event happened.
3263          */
onTearDown(@onNull AudioTrack track)3264         public void onTearDown(@NonNull AudioTrack track) { }
3265         /**
3266          * Called when all the buffers of an offloaded track that were queued in the audio system
3267          * (e.g. the combination of the Android audio framework and the device's audio hardware)
3268          * have been played after {@link AudioTrack#stop()} has been called.
3269          * @param track the {@link AudioTrack} on which the event happened.
3270          */
onPresentationEnded(@onNull AudioTrack track)3271         public void onPresentationEnded(@NonNull AudioTrack track) { }
3272         /**
3273          * Called when more audio data can be written without blocking on an offloaded track.
3274          * @param track the {@link AudioTrack} on which the event happened.
3275          * @param sizeInFrames the number of frames available to write without blocking.
3276          *   Note that the frame size of a compressed stream is 1 byte.
3277          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)3278         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
3279         }
3280     }
3281 
3282     /**
3283      * Registers a callback for the notification of stream events.
3284      * This callback can only be registered for instances operating in offloaded mode
3285      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
3286      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
3287      * more details).
3288      * @param executor {@link Executor} to handle the callbacks.
3289      * @param eventCallback the callback to receive the stream event notifications.
3290      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)3291     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
3292             @NonNull StreamEventCallback eventCallback) {
3293         if (eventCallback == null) {
3294             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3295         }
3296         if (!mOffloaded) {
3297             throw new IllegalStateException(
3298                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
3299         }
3300         if (executor == null) {
3301             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
3302         }
3303         synchronized (mStreamEventCbLock) {
3304             // check if eventCallback already in list
3305             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3306                 if (seci.mStreamEventCb == eventCallback) {
3307                     throw new IllegalArgumentException(
3308                             "StreamEventCallback already registered");
3309                 }
3310             }
3311             beginStreamEventHandling();
3312             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
3313         }
3314     }
3315 
3316     /**
3317      * Unregisters the callback for notification of stream events, previously registered
3318      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
3319      * @param eventCallback the callback to unregister.
3320      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)3321     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
3322         if (eventCallback == null) {
3323             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3324         }
3325         if (!mOffloaded) {
3326             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
3327         }
3328         synchronized (mStreamEventCbLock) {
3329             StreamEventCbInfo seciToRemove = null;
3330             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3331                 if (seci.mStreamEventCb == eventCallback) {
3332                     // ok to remove while iterating over list as we exit iteration
3333                     mStreamEventCbInfoList.remove(seci);
3334                     if (mStreamEventCbInfoList.size() == 0) {
3335                         endStreamEventHandling();
3336                     }
3337                     return;
3338                 }
3339             }
3340             throw new IllegalArgumentException("StreamEventCallback was not registered");
3341         }
3342     }
3343 
3344     //---------------------------------------------------------
3345     // Offload
3346     //--------------------
3347     private static class StreamEventCbInfo {
3348         final Executor mStreamEventExec;
3349         final StreamEventCallback mStreamEventCb;
3350 
StreamEventCbInfo(Executor e, StreamEventCallback cb)3351         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
3352             mStreamEventExec = e;
3353             mStreamEventCb = cb;
3354         }
3355     }
3356 
3357     private final Object mStreamEventCbLock = new Object();
3358     @GuardedBy("mStreamEventCbLock")
3359     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
3360             new LinkedList<StreamEventCbInfo>();
3361     /**
3362      * Dedicated thread for handling the StreamEvent callbacks
3363      */
3364     private @Nullable HandlerThread mStreamEventHandlerThread;
3365     private @Nullable volatile StreamEventHandler mStreamEventHandler;
3366 
3367     /**
3368      * Called from native AudioTrack callback thread, filter messages if necessary
3369      * and repost event on AudioTrack message loop to prevent blocking native thread.
3370      * @param what event code received from native
3371      * @param arg optional argument for event
3372      */
handleStreamEventFromNative(int what, int arg)3373     void handleStreamEventFromNative(int what, int arg) {
3374         if (mStreamEventHandler == null) {
3375             return;
3376         }
3377         switch (what) {
3378             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
3379                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
3380                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
3381                 mStreamEventHandler.sendMessage(
3382                         mStreamEventHandler.obtainMessage(
3383                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
3384                 break;
3385             case NATIVE_EVENT_NEW_IAUDIOTRACK:
3386                 mStreamEventHandler.sendMessage(
3387                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
3388                 break;
3389             case NATIVE_EVENT_STREAM_END:
3390                 mStreamEventHandler.sendMessage(
3391                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
3392                 break;
3393         }
3394     }
3395 
3396     private class StreamEventHandler extends Handler {
3397 
StreamEventHandler(Looper looper)3398         StreamEventHandler(Looper looper) {
3399             super(looper);
3400         }
3401 
3402         @Override
handleMessage(Message msg)3403         public void handleMessage(Message msg) {
3404             final LinkedList<StreamEventCbInfo> cbInfoList;
3405             synchronized (mStreamEventCbLock) {
3406                 if (msg.what == NATIVE_EVENT_STREAM_END) {
3407                     synchronized (mPlayStateLock) {
3408                         if (mPlayState == PLAYSTATE_STOPPING) {
3409                             if (mOffloadEosPending) {
3410                                 native_start();
3411                                 mPlayState = PLAYSTATE_PLAYING;
3412                             } else {
3413                                 mAvSyncHeader = null;
3414                                 mAvSyncBytesRemaining = 0;
3415                                 mPlayState = PLAYSTATE_STOPPED;
3416                             }
3417                             mOffloadEosPending = false;
3418                             mPlayStateLock.notify();
3419                         }
3420                     }
3421                 }
3422                 if (mStreamEventCbInfoList.size() == 0) {
3423                     return;
3424                 }
3425                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
3426             }
3427 
3428             final long identity = Binder.clearCallingIdentity();
3429             try {
3430                 for (StreamEventCbInfo cbi : cbInfoList) {
3431                     switch (msg.what) {
3432                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
3433                             cbi.mStreamEventExec.execute(() ->
3434                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
3435                             break;
3436                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
3437                             // TODO also release track as it's not longer usable
3438                             cbi.mStreamEventExec.execute(() ->
3439                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
3440                             break;
3441                         case NATIVE_EVENT_STREAM_END:
3442                             cbi.mStreamEventExec.execute(() ->
3443                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
3444                             break;
3445                     }
3446                 }
3447             } finally {
3448                 Binder.restoreCallingIdentity(identity);
3449             }
3450         }
3451     }
3452 
3453     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()3454     private void beginStreamEventHandling() {
3455         if (mStreamEventHandlerThread == null) {
3456             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
3457             mStreamEventHandlerThread.start();
3458             final Looper looper = mStreamEventHandlerThread.getLooper();
3459             if (looper != null) {
3460                 mStreamEventHandler = new StreamEventHandler(looper);
3461             }
3462         }
3463     }
3464 
3465     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()3466     private void endStreamEventHandling() {
3467         if (mStreamEventHandlerThread != null) {
3468             mStreamEventHandlerThread.quit();
3469             mStreamEventHandlerThread = null;
3470         }
3471     }
3472 
3473     //---------------------------------------------------------
3474     // Inner classes
3475     //--------------------
3476     /**
3477      * Helper class to handle the forwarding of native events to the appropriate listener
3478      * (potentially) handled in a different thread
3479      */
3480     private class NativePositionEventHandlerDelegate {
3481         private final Handler mHandler;
3482 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)3483         NativePositionEventHandlerDelegate(final AudioTrack track,
3484                                    final OnPlaybackPositionUpdateListener listener,
3485                                    Handler handler) {
3486             // find the looper for our new event handler
3487             Looper looper;
3488             if (handler != null) {
3489                 looper = handler.getLooper();
3490             } else {
3491                 // no given handler, use the looper the AudioTrack was created in
3492                 looper = mInitializationLooper;
3493             }
3494 
3495             // construct the event handler with this looper
3496             if (looper != null) {
3497                 // implement the event handler delegate
3498                 mHandler = new Handler(looper) {
3499                     @Override
3500                     public void handleMessage(Message msg) {
3501                         if (track == null) {
3502                             return;
3503                         }
3504                         switch(msg.what) {
3505                         case NATIVE_EVENT_MARKER:
3506                             if (listener != null) {
3507                                 listener.onMarkerReached(track);
3508                             }
3509                             break;
3510                         case NATIVE_EVENT_NEW_POS:
3511                             if (listener != null) {
3512                                 listener.onPeriodicNotification(track);
3513                             }
3514                             break;
3515                         default:
3516                             loge("Unknown native event type: " + msg.what);
3517                             break;
3518                         }
3519                     }
3520                 };
3521             } else {
3522                 mHandler = null;
3523             }
3524         }
3525 
getHandler()3526         Handler getHandler() {
3527             return mHandler;
3528         }
3529     }
3530 
3531     //---------------------------------------------------------
3532     // Methods for IPlayer interface
3533     //--------------------
3534     @Override
playerStart()3535     void playerStart() {
3536         play();
3537     }
3538 
3539     @Override
playerPause()3540     void playerPause() {
3541         pause();
3542     }
3543 
3544     @Override
playerStop()3545     void playerStop() {
3546         stop();
3547     }
3548 
3549     //---------------------------------------------------------
3550     // Java methods called from the native side
3551     //--------------------
3552     @SuppressWarnings("unused")
3553     @UnsupportedAppUsage
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)3554     private static void postEventFromNative(Object audiotrack_ref,
3555             int what, int arg1, int arg2, Object obj) {
3556         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
3557         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
3558         if (track == null) {
3559             return;
3560         }
3561 
3562         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
3563             track.broadcastRoutingChange();
3564             return;
3565         }
3566 
3567         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
3568                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
3569                 || what == NATIVE_EVENT_STREAM_END) {
3570             track.handleStreamEventFromNative(what, arg1);
3571             return;
3572         }
3573 
3574         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
3575         if (delegate != null) {
3576             Handler handler = delegate.getHandler();
3577             if (handler != null) {
3578                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
3579                 handler.sendMessage(m);
3580             }
3581         }
3582     }
3583 
3584     //---------------------------------------------------------
3585     // Native methods called from the Java side
3586     //--------------------
3587 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)3588     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
3589             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
3590 
3591     // post-condition: mStreamType is overwritten with a value
3592     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
3593     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, boolean offload)3594     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
3595             Object /*AudioAttributes*/ attributes,
3596             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
3597             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack,
3598             boolean offload);
3599 
native_finalize()3600     private native final void native_finalize();
3601 
3602     /**
3603      * @hide
3604      */
3605     @UnsupportedAppUsage
native_release()3606     public native final void native_release();
3607 
native_start()3608     private native final void native_start();
3609 
native_stop()3610     private native final void native_stop();
3611 
native_pause()3612     private native final void native_pause();
3613 
native_flush()3614     private native final void native_flush();
3615 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)3616     private native final int native_write_byte(byte[] audioData,
3617                                                int offsetInBytes, int sizeInBytes, int format,
3618                                                boolean isBlocking);
3619 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)3620     private native final int native_write_short(short[] audioData,
3621                                                 int offsetInShorts, int sizeInShorts, int format,
3622                                                 boolean isBlocking);
3623 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)3624     private native final int native_write_float(float[] audioData,
3625                                                 int offsetInFloats, int sizeInFloats, int format,
3626                                                 boolean isBlocking);
3627 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)3628     private native final int native_write_native_bytes(ByteBuffer audioData,
3629             int positionInBytes, int sizeInBytes, int format, boolean blocking);
3630 
native_reload_static()3631     private native final int native_reload_static();
3632 
native_get_buffer_size_frames()3633     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)3634     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()3635     private native final int native_get_buffer_capacity_frames();
3636 
native_setVolume(float leftVolume, float rightVolume)3637     private native final void native_setVolume(float leftVolume, float rightVolume);
3638 
native_set_playback_rate(int sampleRateInHz)3639     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()3640     private native final int native_get_playback_rate();
3641 
native_set_playback_params(@onNull PlaybackParams params)3642     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()3643     private native final @NonNull PlaybackParams native_get_playback_params();
3644 
native_set_marker_pos(int marker)3645     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()3646     private native final int native_get_marker_pos();
3647 
native_set_pos_update_period(int updatePeriod)3648     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()3649     private native final int native_get_pos_update_period();
3650 
native_set_position(int position)3651     private native final int native_set_position(int position);
native_get_position()3652     private native final int native_get_position();
3653 
native_get_latency()3654     private native final int native_get_latency();
3655 
native_get_underrun_count()3656     private native final int native_get_underrun_count();
3657 
native_get_flags()3658     private native final int native_get_flags();
3659 
3660     // longArray must be a non-null array of length >= 2
3661     // [0] is assigned the frame position
3662     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)3663     private native final int native_get_timestamp(long[] longArray);
3664 
native_set_loop(int start, int end, int loopCount)3665     private native final int native_set_loop(int start, int end, int loopCount);
3666 
native_get_output_sample_rate(int streamType)3667     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)3668     static private native final int native_get_min_buff_size(
3669             int sampleRateInHz, int channelConfig, int audioFormat);
3670 
native_attachAuxEffect(int effectId)3671     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)3672     private native final int native_setAuxEffectSendLevel(float level);
3673 
native_setOutputDevice(int deviceId)3674     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()3675     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()3676     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()3677     private native final void native_disableDeviceCallback();
3678 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)3679     private native int native_applyVolumeShaper(
3680             @NonNull VolumeShaper.Configuration configuration,
3681             @NonNull VolumeShaper.Operation operation);
3682 
native_getVolumeShaperState(int id)3683     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)3684     private native final int native_setPresentation(int presentationId, int programId);
3685 
native_getPortId()3686     private native int native_getPortId();
3687 
native_set_delay_padding(int delayInFrames, int paddingInFrames)3688     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
3689 
3690     //---------------------------------------------------------
3691     // Utility methods
3692     //------------------
3693 
logd(String msg)3694     private static void logd(String msg) {
3695         Log.d(TAG, msg);
3696     }
3697 
loge(String msg)3698     private static void loge(String msg) {
3699         Log.e(TAG, msg);
3700     }
3701 
3702     public final static class MetricsConstants
3703     {
MetricsConstants()3704         private MetricsConstants() {}
3705 
3706         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
3707         private static final String MM_PREFIX = "android.media.audiotrack.";
3708 
3709         /**
3710          * Key to extract the stream type for this track
3711          * from the {@link AudioTrack#getMetrics} return value.
3712          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
3713          * The value is a {@code String}.
3714          */
3715         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
3716 
3717         /**
3718          * Key to extract the attribute content type for this track
3719          * from the {@link AudioTrack#getMetrics} return value.
3720          * The value is a {@code String}.
3721          */
3722         public static final String CONTENTTYPE = MM_PREFIX + "type";
3723 
3724         /**
3725          * Key to extract the attribute usage for this track
3726          * from the {@link AudioTrack#getMetrics} return value.
3727          * The value is a {@code String}.
3728          */
3729         public static final String USAGE = MM_PREFIX + "usage";
3730 
3731         /**
3732          * Key to extract the sample rate for this track in Hz
3733          * from the {@link AudioTrack#getMetrics} return value.
3734          * The value is an {@code int}.
3735          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
3736          */
3737         @Deprecated
3738         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
3739 
3740         /**
3741          * Key to extract the native channel mask information for this track
3742          * from the {@link AudioTrack#getMetrics} return value.
3743          *
3744          * The value is a {@code long}.
3745          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
3746          * the returned format instead.
3747          */
3748         @Deprecated
3749         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
3750 
3751         /**
3752          * Use for testing only. Do not expose.
3753          * The current sample rate.
3754          * The value is an {@code int}.
3755          * @hide
3756          */
3757         @TestApi
3758         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
3759 
3760         /**
3761          * Use for testing only. Do not expose.
3762          * The native channel mask.
3763          * The value is a {@code long}.
3764          * @hide
3765          */
3766         @TestApi
3767         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
3768 
3769         /**
3770          * Use for testing only. Do not expose.
3771          * The output audio data encoding.
3772          * The value is a {@code String}.
3773          * @hide
3774          */
3775         @TestApi
3776         public static final String ENCODING = MM_PREFIX + "encoding";
3777 
3778         /**
3779          * Use for testing only. Do not expose.
3780          * The port id of this track port in audioserver.
3781          * The value is an {@code int}.
3782          * @hide
3783          */
3784         @TestApi
3785         public static final String PORT_ID = MM_PREFIX + "portId";
3786 
3787         /**
3788          * Use for testing only. Do not expose.
3789          * The buffer frameCount.
3790          * The value is an {@code int}.
3791          * @hide
3792          */
3793         @TestApi
3794         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
3795 
3796         /**
3797          * Use for testing only. Do not expose.
3798          * The actual track attributes used.
3799          * The value is a {@code String}.
3800          * @hide
3801          */
3802         @TestApi
3803         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
3804     }
3805 }
3806