1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20 
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24 
25 #include <android-base/macros.h>
26 #include <audio_utils/clock.h>
27 #include <audio_utils/primitives.h>
28 #include <binder/IPCThreadState.h>
29 #include <media/AudioTrack.h>
30 #include <utils/Log.h>
31 #include <private/media/AudioTrackShared.h>
32 #include <processgroup/sched_policy.h>
33 #include <media/IAudioFlinger.h>
34 #include <media/IAudioPolicyService.h>
35 #include <media/AudioParameter.h>
36 #include <media/AudioResamplerPublic.h>
37 #include <media/AudioSystem.h>
38 #include <media/MediaAnalyticsItem.h>
39 #include <media/TypeConverter.h>
40 
41 #define WAIT_PERIOD_MS                  10
42 #define WAIT_STREAM_END_TIMEOUT_SEC     120
43 static const int kMaxLoopCountNotifications = 32;
44 
45 namespace android {
46 // ---------------------------------------------------------------------------
47 
48 using media::VolumeShaper;
49 
50 // TODO: Move to a separate .h
51 
52 template <typename T>
min(const T & x,const T & y)53 static inline const T &min(const T &x, const T &y) {
54     return x < y ? x : y;
55 }
56 
57 template <typename T>
max(const T & x,const T & y)58 static inline const T &max(const T &x, const T &y) {
59     return x > y ? x : y;
60 }
61 
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)62 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
63 {
64     return ((double)frames * 1000000000) / ((double)sampleRate * speed);
65 }
66 
convertTimespecToUs(const struct timespec & tv)67 static int64_t convertTimespecToUs(const struct timespec &tv)
68 {
69     return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
70 }
71 
72 // TODO move to audio_utils.
convertNsToTimespec(int64_t ns)73 static inline struct timespec convertNsToTimespec(int64_t ns) {
74     struct timespec tv;
75     tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
76     tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
77     return tv;
78 }
79 
80 // current monotonic time in microseconds.
getNowUs()81 static int64_t getNowUs()
82 {
83     struct timespec tv;
84     (void) clock_gettime(CLOCK_MONOTONIC, &tv);
85     return convertTimespecToUs(tv);
86 }
87 
88 // FIXME: we don't use the pitch setting in the time stretcher (not working);
89 // instead we emulate it using our sample rate converter.
90 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)91 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
92 {
93     return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
94 }
95 
adjustSpeed(float speed,float pitch)96 static inline float adjustSpeed(float speed, float pitch)
97 {
98     return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
99 }
100 
adjustPitch(float pitch)101 static inline float adjustPitch(float pitch)
102 {
103     return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
104 }
105 
106 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)107 status_t AudioTrack::getMinFrameCount(
108         size_t* frameCount,
109         audio_stream_type_t streamType,
110         uint32_t sampleRate)
111 {
112     if (frameCount == NULL) {
113         return BAD_VALUE;
114     }
115 
116     // FIXME handle in server, like createTrack_l(), possible missing info:
117     //          audio_io_handle_t output
118     //          audio_format_t format
119     //          audio_channel_mask_t channelMask
120     //          audio_output_flags_t flags (FAST)
121     uint32_t afSampleRate;
122     status_t status;
123     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
124     if (status != NO_ERROR) {
125         ALOGE("%s(): Unable to query output sample rate for stream type %d; status %d",
126                 __func__, streamType, status);
127         return status;
128     }
129     size_t afFrameCount;
130     status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
131     if (status != NO_ERROR) {
132         ALOGE("%s(): Unable to query output frame count for stream type %d; status %d",
133                 __func__, streamType, status);
134         return status;
135     }
136     uint32_t afLatency;
137     status = AudioSystem::getOutputLatency(&afLatency, streamType);
138     if (status != NO_ERROR) {
139         ALOGE("%s(): Unable to query output latency for stream type %d; status %d",
140                 __func__, streamType, status);
141         return status;
142     }
143 
144     // When called from createTrack, speed is 1.0f (normal speed).
145     // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
146     *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
147                                               sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
148 
149     // The formula above should always produce a non-zero value under normal circumstances:
150     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
151     // Return error in the unlikely event that it does not, as that's part of the API contract.
152     if (*frameCount == 0) {
153         ALOGE("%s(): failed for streamType %d, sampleRate %u",
154                 __func__, streamType, sampleRate);
155         return BAD_VALUE;
156     }
157     ALOGV("%s(): getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
158             __func__, *frameCount, afFrameCount, afSampleRate, afLatency);
159     return NO_ERROR;
160 }
161 
162 // static
isDirectOutputSupported(const audio_config_base_t & config,const audio_attributes_t & attributes)163 bool AudioTrack::isDirectOutputSupported(const audio_config_base_t& config,
164                                          const audio_attributes_t& attributes) {
165     ALOGV("%s()", __FUNCTION__);
166     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
167     if (aps == 0) return false;
168     return aps->isDirectOutputSupported(config, attributes);
169 }
170 
171 // ---------------------------------------------------------------------------
172 
gather(const AudioTrack * track)173 void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
174 {
175     // only if we're in a good state...
176     // XXX: shall we gather alternative info if failing?
177     const status_t lstatus = track->initCheck();
178     if (lstatus != NO_ERROR) {
179         ALOGD("%s(): no metrics gathered, track status=%d", __func__, (int) lstatus);
180         return;
181     }
182 
183 #define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
184 
185     // Java API 28 entries, do not change.
186     mAnalyticsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
187     mAnalyticsItem->setCString(MM_PREFIX "type",
188             toString(track->mAttributes.content_type).c_str());
189     mAnalyticsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
190 
191     // Non-API entries, these can change due to a Java string mistake.
192     mAnalyticsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
193     mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
194     // Non-API entries, these can change.
195     mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
196     mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
197     mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
198     mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
199 }
200 
201 // hand the user a snapshot of the metrics.
getMetrics(MediaAnalyticsItem * & item)202 status_t AudioTrack::getMetrics(MediaAnalyticsItem * &item)
203 {
204     mMediaMetrics.gather(this);
205     MediaAnalyticsItem *tmp = mMediaMetrics.dup();
206     if (tmp == nullptr) {
207         return BAD_VALUE;
208     }
209     item = tmp;
210     return NO_ERROR;
211 }
212 
AudioTrack()213 AudioTrack::AudioTrack()
214     : mStatus(NO_INIT),
215       mState(STATE_STOPPED),
216       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
217       mPreviousSchedulingGroup(SP_DEFAULT),
218       mPausedPosition(0),
219       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
220       mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
221 {
222     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
223     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
224     mAttributes.flags = 0x0;
225     strcpy(mAttributes.tags, "");
226 }
227 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)228 AudioTrack::AudioTrack(
229         audio_stream_type_t streamType,
230         uint32_t sampleRate,
231         audio_format_t format,
232         audio_channel_mask_t channelMask,
233         size_t frameCount,
234         audio_output_flags_t flags,
235         callback_t cbf,
236         void* user,
237         int32_t notificationFrames,
238         audio_session_t sessionId,
239         transfer_type transferType,
240         const audio_offload_info_t *offloadInfo,
241         uid_t uid,
242         pid_t pid,
243         const audio_attributes_t* pAttributes,
244         bool doNotReconnect,
245         float maxRequiredSpeed,
246         audio_port_handle_t selectedDeviceId)
247     : mStatus(NO_INIT),
248       mState(STATE_STOPPED),
249       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
250       mPreviousSchedulingGroup(SP_DEFAULT),
251       mPausedPosition(0)
252 {
253     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
254 
255     (void)set(streamType, sampleRate, format, channelMask,
256             frameCount, flags, cbf, user, notificationFrames,
257             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
258             offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
259 }
260 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)261 AudioTrack::AudioTrack(
262         audio_stream_type_t streamType,
263         uint32_t sampleRate,
264         audio_format_t format,
265         audio_channel_mask_t channelMask,
266         const sp<IMemory>& sharedBuffer,
267         audio_output_flags_t flags,
268         callback_t cbf,
269         void* user,
270         int32_t notificationFrames,
271         audio_session_t sessionId,
272         transfer_type transferType,
273         const audio_offload_info_t *offloadInfo,
274         uid_t uid,
275         pid_t pid,
276         const audio_attributes_t* pAttributes,
277         bool doNotReconnect,
278         float maxRequiredSpeed)
279     : mStatus(NO_INIT),
280       mState(STATE_STOPPED),
281       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
282       mPreviousSchedulingGroup(SP_DEFAULT),
283       mPausedPosition(0),
284       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
285 {
286     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
287 
288     (void)set(streamType, sampleRate, format, channelMask,
289             0 /*frameCount*/, flags, cbf, user, notificationFrames,
290             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
291             uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
292 }
293 
~AudioTrack()294 AudioTrack::~AudioTrack()
295 {
296     // pull together the numbers, before we clean up our structures
297     mMediaMetrics.gather(this);
298 
299     if (mStatus == NO_ERROR) {
300         // Make sure that callback function exits in the case where
301         // it is looping on buffer full condition in obtainBuffer().
302         // Otherwise the callback thread will never exit.
303         stop();
304         if (mAudioTrackThread != 0) {
305             mProxy->interrupt();
306             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
307             mAudioTrackThread->requestExitAndWait();
308             mAudioTrackThread.clear();
309         }
310         // No lock here: worst case we remove a NULL callback which will be a nop
311         if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
312             AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
313         }
314         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
315         mAudioTrack.clear();
316         mCblkMemory.clear();
317         mSharedBuffer.clear();
318         IPCThreadState::self()->flushCommands();
319         ALOGV("%s(%d), releasing session id %d from %d on behalf of %d",
320                 __func__, mPortId,
321                 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
322         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
323     }
324 }
325 
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)326 status_t AudioTrack::set(
327         audio_stream_type_t streamType,
328         uint32_t sampleRate,
329         audio_format_t format,
330         audio_channel_mask_t channelMask,
331         size_t frameCount,
332         audio_output_flags_t flags,
333         callback_t cbf,
334         void* user,
335         int32_t notificationFrames,
336         const sp<IMemory>& sharedBuffer,
337         bool threadCanCallJava,
338         audio_session_t sessionId,
339         transfer_type transferType,
340         const audio_offload_info_t *offloadInfo,
341         uid_t uid,
342         pid_t pid,
343         const audio_attributes_t* pAttributes,
344         bool doNotReconnect,
345         float maxRequiredSpeed,
346         audio_port_handle_t selectedDeviceId)
347 {
348     status_t status;
349     uint32_t channelCount;
350     pid_t callingPid;
351     pid_t myPid;
352 
353     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
354     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
355           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
356           __func__,
357           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
358           sessionId, transferType, uid, pid);
359 
360     mThreadCanCallJava = threadCanCallJava;
361     mSelectedDeviceId = selectedDeviceId;
362     mSessionId = sessionId;
363 
364     switch (transferType) {
365     case TRANSFER_DEFAULT:
366         if (sharedBuffer != 0) {
367             transferType = TRANSFER_SHARED;
368         } else if (cbf == NULL || threadCanCallJava) {
369             transferType = TRANSFER_SYNC;
370         } else {
371             transferType = TRANSFER_CALLBACK;
372         }
373         break;
374     case TRANSFER_CALLBACK:
375     case TRANSFER_SYNC_NOTIF_CALLBACK:
376         if (cbf == NULL || sharedBuffer != 0) {
377             ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
378                     convertTransferToText(transferType), __func__);
379             status = BAD_VALUE;
380             goto exit;
381         }
382         break;
383     case TRANSFER_OBTAIN:
384     case TRANSFER_SYNC:
385         if (sharedBuffer != 0) {
386             ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
387             status = BAD_VALUE;
388             goto exit;
389         }
390         break;
391     case TRANSFER_SHARED:
392         if (sharedBuffer == 0) {
393             ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
394             status = BAD_VALUE;
395             goto exit;
396         }
397         break;
398     default:
399         ALOGE("%s(): Invalid transfer type %d",
400                 __func__, transferType);
401         status = BAD_VALUE;
402         goto exit;
403     }
404     mSharedBuffer = sharedBuffer;
405     mTransfer = transferType;
406     mDoNotReconnect = doNotReconnect;
407 
408     ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
409             __func__, sharedBuffer->pointer(), sharedBuffer->size());
410 
411     ALOGV("%s(): streamType %d frameCount %zu flags %04x",
412             __func__, streamType, frameCount, flags);
413 
414     // invariant that mAudioTrack != 0 is true only after set() returns successfully
415     if (mAudioTrack != 0) {
416         ALOGE("%s(): Track already in use", __func__);
417         status = INVALID_OPERATION;
418         goto exit;
419     }
420 
421     // handle default values first.
422     if (streamType == AUDIO_STREAM_DEFAULT) {
423         streamType = AUDIO_STREAM_MUSIC;
424     }
425     if (pAttributes == NULL) {
426         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
427             ALOGE("%s(): Invalid stream type %d", __func__, streamType);
428             status = BAD_VALUE;
429             goto exit;
430         }
431         mStreamType = streamType;
432 
433     } else {
434         // stream type shouldn't be looked at, this track has audio attributes
435         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
436         ALOGV("%s(): Building AudioTrack with attributes:"
437                 " usage=%d content=%d flags=0x%x tags=[%s]",
438                 __func__,
439                  mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
440         mStreamType = AUDIO_STREAM_DEFAULT;
441         audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
442     }
443 
444     // these below should probably come from the audioFlinger too...
445     if (format == AUDIO_FORMAT_DEFAULT) {
446         format = AUDIO_FORMAT_PCM_16_BIT;
447     } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
448         mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
449     }
450 
451     // validate parameters
452     if (!audio_is_valid_format(format)) {
453         ALOGE("%s(): Invalid format %#x", __func__, format);
454         status = BAD_VALUE;
455         goto exit;
456     }
457     mFormat = format;
458 
459     if (!audio_is_output_channel(channelMask)) {
460         ALOGE("%s(): Invalid channel mask %#x",  __func__, channelMask);
461         status = BAD_VALUE;
462         goto exit;
463     }
464     mChannelMask = channelMask;
465     channelCount = audio_channel_count_from_out_mask(channelMask);
466     mChannelCount = channelCount;
467 
468     // force direct flag if format is not linear PCM
469     // or offload was requested
470     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
471             || !audio_is_linear_pcm(format)) {
472         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
473                     ? "%s(): Offload request, forcing to Direct Output"
474                     : "%s(): Not linear PCM, forcing to Direct Output",
475                     __func__);
476         flags = (audio_output_flags_t)
477                 // FIXME why can't we allow direct AND fast?
478                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
479     }
480 
481     // force direct flag if HW A/V sync requested
482     if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
483         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
484     }
485 
486     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
487         if (audio_has_proportional_frames(format)) {
488             mFrameSize = channelCount * audio_bytes_per_sample(format);
489         } else {
490             mFrameSize = sizeof(uint8_t);
491         }
492     } else {
493         ALOG_ASSERT(audio_has_proportional_frames(format));
494         mFrameSize = channelCount * audio_bytes_per_sample(format);
495         // createTrack will return an error if PCM format is not supported by server,
496         // so no need to check for specific PCM formats here
497     }
498 
499     // sampling rate must be specified for direct outputs
500     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
501         status = BAD_VALUE;
502         goto exit;
503     }
504     mSampleRate = sampleRate;
505     mOriginalSampleRate = sampleRate;
506     mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
507     // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
508     mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
509 
510     // Make copy of input parameter offloadInfo so that in the future:
511     //  (a) createTrack_l doesn't need it as an input parameter
512     //  (b) we can support re-creation of offloaded tracks
513     if (offloadInfo != NULL) {
514         mOffloadInfoCopy = *offloadInfo;
515         mOffloadInfo = &mOffloadInfoCopy;
516     } else {
517         mOffloadInfo = NULL;
518         memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
519     }
520 
521     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
522     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
523     mSendLevel = 0.0f;
524     // mFrameCount is initialized in createTrack_l
525     mReqFrameCount = frameCount;
526     if (notificationFrames >= 0) {
527         mNotificationFramesReq = notificationFrames;
528         mNotificationsPerBufferReq = 0;
529     } else {
530         if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
531             ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
532                     __func__, notificationFrames);
533             status = BAD_VALUE;
534             goto exit;
535         }
536         if (frameCount > 0) {
537             ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
538                     __func__, notificationFrames, frameCount);
539             status = BAD_VALUE;
540             goto exit;
541         }
542         mNotificationFramesReq = 0;
543         const uint32_t minNotificationsPerBuffer = 1;
544         const uint32_t maxNotificationsPerBuffer = 8;
545         mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
546                 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
547         ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
548                 "%s(): notificationFrames=%d clamped to the range -%u to -%u",
549                 __func__,
550                 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
551     }
552     mNotificationFramesAct = 0;
553     callingPid = IPCThreadState::self()->getCallingPid();
554     myPid = getpid();
555     if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
556         mClientUid = IPCThreadState::self()->getCallingUid();
557     } else {
558         mClientUid = uid;
559     }
560     if (pid == -1 || (callingPid != myPid)) {
561         mClientPid = callingPid;
562     } else {
563         mClientPid = pid;
564     }
565     mAuxEffectId = 0;
566     mOrigFlags = mFlags = flags;
567     mCbf = cbf;
568 
569     if (cbf != NULL) {
570         mAudioTrackThread = new AudioTrackThread(*this);
571         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
572         // thread begins in paused state, and will not reference us until start()
573     }
574 
575     // create the IAudioTrack
576     {
577         AutoMutex lock(mLock);
578         status = createTrack_l();
579     }
580     if (status != NO_ERROR) {
581         if (mAudioTrackThread != 0) {
582             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
583             mAudioTrackThread->requestExitAndWait();
584             mAudioTrackThread.clear();
585         }
586         goto exit;
587     }
588 
589     mUserData = user;
590     mLoopCount = 0;
591     mLoopStart = 0;
592     mLoopEnd = 0;
593     mLoopCountNotified = 0;
594     mMarkerPosition = 0;
595     mMarkerReached = false;
596     mNewPosition = 0;
597     mUpdatePeriod = 0;
598     mPosition = 0;
599     mReleased = 0;
600     mStartNs = 0;
601     mStartFromZeroUs = 0;
602     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
603     mSequence = 1;
604     mObservedSequence = mSequence;
605     mInUnderrun = false;
606     mPreviousTimestampValid = false;
607     mTimestampStartupGlitchReported = false;
608     mTimestampRetrogradePositionReported = false;
609     mTimestampRetrogradeTimeReported = false;
610     mTimestampStallReported = false;
611     mTimestampStaleTimeReported = false;
612     mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
613     mStartTs.mPosition = 0;
614     mUnderrunCountOffset = 0;
615     mFramesWritten = 0;
616     mFramesWrittenServerOffset = 0;
617     mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
618     mVolumeHandler = new media::VolumeHandler();
619 
620 exit:
621     mStatus = status;
622     return status;
623 }
624 
625 // -------------------------------------------------------------------------
626 
start()627 status_t AudioTrack::start()
628 {
629     AutoMutex lock(mLock);
630     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
631 
632     if (mState == STATE_ACTIVE) {
633         return INVALID_OPERATION;
634     }
635 
636     mInUnderrun = true;
637 
638     State previousState = mState;
639     if (previousState == STATE_PAUSED_STOPPING) {
640         mState = STATE_STOPPING;
641     } else {
642         mState = STATE_ACTIVE;
643     }
644     (void) updateAndGetPosition_l();
645 
646     // save start timestamp
647     if (isOffloadedOrDirect_l()) {
648         if (getTimestamp_l(mStartTs) != OK) {
649             mStartTs.mPosition = 0;
650         }
651     } else {
652         if (getTimestamp_l(&mStartEts) != OK) {
653             mStartEts.clear();
654         }
655     }
656     mStartNs = systemTime(); // save this for timestamp adjustment after starting.
657     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
658         // reset current position as seen by client to 0
659         mPosition = 0;
660         mPreviousTimestampValid = false;
661         mTimestampStartupGlitchReported = false;
662         mTimestampRetrogradePositionReported = false;
663         mTimestampRetrogradeTimeReported = false;
664         mTimestampStallReported = false;
665         mTimestampStaleTimeReported = false;
666         mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
667 
668         if (!isOffloadedOrDirect_l()
669                 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
670             // Server side has consumed something, but is it finished consuming?
671             // It is possible since flush and stop are asynchronous that the server
672             // is still active at this point.
673             ALOGV("%s(%d): server read:%lld  cumulative flushed:%lld  client written:%lld",
674                     __func__, mPortId,
675                     (long long)(mFramesWrittenServerOffset
676                             + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
677                     (long long)mStartEts.mFlushed,
678                     (long long)mFramesWritten);
679             // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
680             mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
681         }
682         mFramesWritten = 0;
683         mProxy->clearTimestamp(); // need new server push for valid timestamp
684         mMarkerReached = false;
685 
686         // For offloaded tracks, we don't know if the hardware counters are really zero here,
687         // since the flush is asynchronous and stop may not fully drain.
688         // We save the time when the track is started to later verify whether
689         // the counters are realistic (i.e. start from zero after this time).
690         mStartFromZeroUs = mStartNs / 1000;
691 
692         // force refresh of remaining frames by processAudioBuffer() as last
693         // write before stop could be partial.
694         mRefreshRemaining = true;
695 
696         // for static track, clear the old flags when starting from stopped state
697         if (mSharedBuffer != 0) {
698             android_atomic_and(
699             ~(CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
700             &mCblk->mFlags);
701         }
702     }
703     mNewPosition = mPosition + mUpdatePeriod;
704     int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
705 
706     status_t status = NO_ERROR;
707     if (!(flags & CBLK_INVALID)) {
708         status = mAudioTrack->start();
709         if (status == DEAD_OBJECT) {
710             flags |= CBLK_INVALID;
711         }
712     }
713     if (flags & CBLK_INVALID) {
714         status = restoreTrack_l("start");
715     }
716 
717     // resume or pause the callback thread as needed.
718     sp<AudioTrackThread> t = mAudioTrackThread;
719     if (status == NO_ERROR) {
720         if (t != 0) {
721             if (previousState == STATE_STOPPING) {
722                 mProxy->interrupt();
723             } else {
724                 t->resume();
725             }
726         } else {
727             mPreviousPriority = getpriority(PRIO_PROCESS, 0);
728             get_sched_policy(0, &mPreviousSchedulingGroup);
729             androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
730         }
731 
732         // Start our local VolumeHandler for restoration purposes.
733         mVolumeHandler->setStarted();
734     } else {
735         ALOGE("%s(%d): status %d", __func__, mPortId, status);
736         mState = previousState;
737         if (t != 0) {
738             if (previousState != STATE_STOPPING) {
739                 t->pause();
740             }
741         } else {
742             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
743             set_sched_policy(0, mPreviousSchedulingGroup);
744         }
745     }
746 
747     return status;
748 }
749 
stop()750 void AudioTrack::stop()
751 {
752     AutoMutex lock(mLock);
753     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
754 
755     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
756         return;
757     }
758 
759     if (isOffloaded_l()) {
760         mState = STATE_STOPPING;
761     } else {
762         mState = STATE_STOPPED;
763         ALOGD_IF(mSharedBuffer == nullptr,
764                 "%s(%d): called with %u frames delivered", __func__, mPortId, mReleased.value());
765         mReleased = 0;
766     }
767 
768     mProxy->stop(); // notify server not to read beyond current client position until start().
769     mProxy->interrupt();
770     mAudioTrack->stop();
771 
772     // Note: legacy handling - stop does not clear playback marker
773     // and periodic update counter, but flush does for streaming tracks.
774 
775     if (mSharedBuffer != 0) {
776         // clear buffer position and loop count.
777         mStaticProxy->setBufferPositionAndLoop(0 /* position */,
778                 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
779     }
780 
781     sp<AudioTrackThread> t = mAudioTrackThread;
782     if (t != 0) {
783         if (!isOffloaded_l()) {
784             t->pause();
785         } else if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
786             // causes wake up of the playback thread, that will callback the client for
787             // EVENT_STREAM_END in processAudioBuffer()
788             t->wake();
789         }
790     } else {
791         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
792         set_sched_policy(0, mPreviousSchedulingGroup);
793     }
794 }
795 
stopped() const796 bool AudioTrack::stopped() const
797 {
798     AutoMutex lock(mLock);
799     return mState != STATE_ACTIVE;
800 }
801 
flush()802 void AudioTrack::flush()
803 {
804     AutoMutex lock(mLock);
805     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
806 
807     if (mSharedBuffer != 0) {
808         return;
809     }
810     if (mState == STATE_ACTIVE) {
811         return;
812     }
813     flush_l();
814 }
815 
flush_l()816 void AudioTrack::flush_l()
817 {
818     ALOG_ASSERT(mState != STATE_ACTIVE);
819 
820     // clear playback marker and periodic update counter
821     mMarkerPosition = 0;
822     mMarkerReached = false;
823     mUpdatePeriod = 0;
824     mRefreshRemaining = true;
825 
826     mState = STATE_FLUSHED;
827     mReleased = 0;
828     if (isOffloaded_l()) {
829         mProxy->interrupt();
830     }
831     mProxy->flush();
832     mAudioTrack->flush();
833 }
834 
pause()835 void AudioTrack::pause()
836 {
837     AutoMutex lock(mLock);
838     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
839 
840     if (mState == STATE_ACTIVE) {
841         mState = STATE_PAUSED;
842     } else if (mState == STATE_STOPPING) {
843         mState = STATE_PAUSED_STOPPING;
844     } else {
845         return;
846     }
847     mProxy->interrupt();
848     mAudioTrack->pause();
849 
850     if (isOffloaded_l()) {
851         if (mOutput != AUDIO_IO_HANDLE_NONE) {
852             // An offload output can be re-used between two audio tracks having
853             // the same configuration. A timestamp query for a paused track
854             // while the other is running would return an incorrect time.
855             // To fix this, cache the playback position on a pause() and return
856             // this time when requested until the track is resumed.
857 
858             // OffloadThread sends HAL pause in its threadLoop. Time saved
859             // here can be slightly off.
860 
861             // TODO: check return code for getRenderPosition.
862 
863             uint32_t halFrames;
864             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
865             ALOGV("%s(%d): for offload, cache current position %u",
866                     __func__, mPortId, mPausedPosition);
867         }
868     }
869 }
870 
setVolume(float left,float right)871 status_t AudioTrack::setVolume(float left, float right)
872 {
873     // This duplicates a test by AudioTrack JNI, but that is not the only caller
874     if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
875             isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
876         return BAD_VALUE;
877     }
878 
879     AutoMutex lock(mLock);
880     mVolume[AUDIO_INTERLEAVE_LEFT] = left;
881     mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
882 
883     mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
884 
885     if (isOffloaded_l()) {
886         mAudioTrack->signal();
887     }
888     return NO_ERROR;
889 }
890 
setVolume(float volume)891 status_t AudioTrack::setVolume(float volume)
892 {
893     return setVolume(volume, volume);
894 }
895 
setAuxEffectSendLevel(float level)896 status_t AudioTrack::setAuxEffectSendLevel(float level)
897 {
898     // This duplicates a test by AudioTrack JNI, but that is not the only caller
899     if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
900         return BAD_VALUE;
901     }
902 
903     AutoMutex lock(mLock);
904     mSendLevel = level;
905     mProxy->setSendLevel(level);
906 
907     return NO_ERROR;
908 }
909 
getAuxEffectSendLevel(float * level) const910 void AudioTrack::getAuxEffectSendLevel(float* level) const
911 {
912     if (level != NULL) {
913         *level = mSendLevel;
914     }
915 }
916 
setSampleRate(uint32_t rate)917 status_t AudioTrack::setSampleRate(uint32_t rate)
918 {
919     AutoMutex lock(mLock);
920     ALOGV("%s(%d): prior state:%s rate:%u", __func__, mPortId, stateToString(mState), rate);
921 
922     if (rate == mSampleRate) {
923         return NO_ERROR;
924     }
925     if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)
926             || (mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL)) {
927         return INVALID_OPERATION;
928     }
929     if (mOutput == AUDIO_IO_HANDLE_NONE) {
930         return NO_INIT;
931     }
932     // NOTE: it is theoretically possible, but highly unlikely, that a device change
933     // could mean a previously allowed sampling rate is no longer allowed.
934     uint32_t afSamplingRate;
935     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
936         return NO_INIT;
937     }
938     // pitch is emulated by adjusting speed and sampleRate
939     const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
940     if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
941         return BAD_VALUE;
942     }
943     // TODO: Should we also check if the buffer size is compatible?
944 
945     mSampleRate = rate;
946     mProxy->setSampleRate(effectiveSampleRate);
947 
948     return NO_ERROR;
949 }
950 
getSampleRate() const951 uint32_t AudioTrack::getSampleRate() const
952 {
953     AutoMutex lock(mLock);
954 
955     // sample rate can be updated during playback by the offloaded decoder so we need to
956     // query the HAL and update if needed.
957 // FIXME use Proxy return channel to update the rate from server and avoid polling here
958     if (isOffloadedOrDirect_l()) {
959         if (mOutput != AUDIO_IO_HANDLE_NONE) {
960             uint32_t sampleRate = 0;
961             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
962             if (status == NO_ERROR) {
963                 mSampleRate = sampleRate;
964             }
965         }
966     }
967     return mSampleRate;
968 }
969 
getOriginalSampleRate() const970 uint32_t AudioTrack::getOriginalSampleRate() const
971 {
972     return mOriginalSampleRate;
973 }
974 
setPlaybackRate(const AudioPlaybackRate & playbackRate)975 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
976 {
977     AutoMutex lock(mLock);
978     if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
979         return NO_ERROR;
980     }
981     if (isOffloadedOrDirect_l()) {
982         return INVALID_OPERATION;
983     }
984     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
985         return INVALID_OPERATION;
986     }
987 
988     ALOGV("%s(%d): mSampleRate:%u  mSpeed:%f  mPitch:%f",
989             __func__, mPortId, mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
990     // pitch is emulated by adjusting speed and sampleRate
991     const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
992     const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
993     const float effectivePitch = adjustPitch(playbackRate.mPitch);
994     AudioPlaybackRate playbackRateTemp = playbackRate;
995     playbackRateTemp.mSpeed = effectiveSpeed;
996     playbackRateTemp.mPitch = effectivePitch;
997 
998     ALOGV("%s(%d) (effective) mSampleRate:%u  mSpeed:%f  mPitch:%f",
999             __func__, mPortId, effectiveRate, effectiveSpeed, effectivePitch);
1000 
1001     if (!isAudioPlaybackRateValid(playbackRateTemp)) {
1002         ALOGW("%s(%d) (%f, %f) failed (effective rate out of bounds)",
1003                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1004         return BAD_VALUE;
1005     }
1006     // Check if the buffer size is compatible.
1007     if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
1008         ALOGW("%s(%d) (%f, %f) failed (buffer size)",
1009                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1010         return BAD_VALUE;
1011     }
1012 
1013     // Check resampler ratios are within bounds
1014     if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
1015             (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1016         ALOGW("%s(%d) (%f, %f) failed. Resample rate exceeds max accepted value",
1017                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1018         return BAD_VALUE;
1019     }
1020 
1021     if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
1022         ALOGW("%s(%d) (%f, %f) failed. Resample rate below min accepted value",
1023                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1024         return BAD_VALUE;
1025     }
1026     mPlaybackRate = playbackRate;
1027     //set effective rates
1028     mProxy->setPlaybackRate(playbackRateTemp);
1029     mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
1030     return NO_ERROR;
1031 }
1032 
getPlaybackRate() const1033 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
1034 {
1035     AutoMutex lock(mLock);
1036     return mPlaybackRate;
1037 }
1038 
getBufferSizeInFrames()1039 ssize_t AudioTrack::getBufferSizeInFrames()
1040 {
1041     AutoMutex lock(mLock);
1042     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1043         return NO_INIT;
1044     }
1045     return (ssize_t) mProxy->getBufferSizeInFrames();
1046 }
1047 
getBufferDurationInUs(int64_t * duration)1048 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
1049 {
1050     if (duration == nullptr) {
1051         return BAD_VALUE;
1052     }
1053     AutoMutex lock(mLock);
1054     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1055         return NO_INIT;
1056     }
1057     ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
1058     if (bufferSizeInFrames < 0) {
1059         return (status_t)bufferSizeInFrames;
1060     }
1061     *duration = (int64_t)((double)bufferSizeInFrames * 1000000
1062             / ((double)mSampleRate * mPlaybackRate.mSpeed));
1063     return NO_ERROR;
1064 }
1065 
setBufferSizeInFrames(size_t bufferSizeInFrames)1066 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
1067 {
1068     AutoMutex lock(mLock);
1069     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1070         return NO_INIT;
1071     }
1072     // Reject if timed track or compressed audio.
1073     if (!audio_is_linear_pcm(mFormat)) {
1074         return INVALID_OPERATION;
1075     }
1076     return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
1077 }
1078 
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)1079 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1080 {
1081     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1082         return INVALID_OPERATION;
1083     }
1084 
1085     if (loopCount == 0) {
1086         ;
1087     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1088             loopEnd - loopStart >= MIN_LOOP) {
1089         ;
1090     } else {
1091         return BAD_VALUE;
1092     }
1093 
1094     AutoMutex lock(mLock);
1095     // See setPosition() regarding setting parameters such as loop points or position while active
1096     if (mState == STATE_ACTIVE) {
1097         return INVALID_OPERATION;
1098     }
1099     setLoop_l(loopStart, loopEnd, loopCount);
1100     return NO_ERROR;
1101 }
1102 
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1103 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1104 {
1105     // We do not update the periodic notification point.
1106     // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1107     mLoopCount = loopCount;
1108     mLoopEnd = loopEnd;
1109     mLoopStart = loopStart;
1110     mLoopCountNotified = loopCount;
1111     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1112 
1113     // Waking the AudioTrackThread is not needed as this cannot be called when active.
1114 }
1115 
setMarkerPosition(uint32_t marker)1116 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1117 {
1118     // The only purpose of setting marker position is to get a callback
1119     if (mCbf == NULL || isOffloadedOrDirect()) {
1120         return INVALID_OPERATION;
1121     }
1122 
1123     AutoMutex lock(mLock);
1124     mMarkerPosition = marker;
1125     mMarkerReached = false;
1126 
1127     sp<AudioTrackThread> t = mAudioTrackThread;
1128     if (t != 0) {
1129         t->wake();
1130     }
1131     return NO_ERROR;
1132 }
1133 
getMarkerPosition(uint32_t * marker) const1134 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1135 {
1136     if (isOffloadedOrDirect()) {
1137         return INVALID_OPERATION;
1138     }
1139     if (marker == NULL) {
1140         return BAD_VALUE;
1141     }
1142 
1143     AutoMutex lock(mLock);
1144     mMarkerPosition.getValue(marker);
1145 
1146     return NO_ERROR;
1147 }
1148 
setPositionUpdatePeriod(uint32_t updatePeriod)1149 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1150 {
1151     // The only purpose of setting position update period is to get a callback
1152     if (mCbf == NULL || isOffloadedOrDirect()) {
1153         return INVALID_OPERATION;
1154     }
1155 
1156     AutoMutex lock(mLock);
1157     mNewPosition = updateAndGetPosition_l() + updatePeriod;
1158     mUpdatePeriod = updatePeriod;
1159 
1160     sp<AudioTrackThread> t = mAudioTrackThread;
1161     if (t != 0) {
1162         t->wake();
1163     }
1164     return NO_ERROR;
1165 }
1166 
getPositionUpdatePeriod(uint32_t * updatePeriod) const1167 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1168 {
1169     if (isOffloadedOrDirect()) {
1170         return INVALID_OPERATION;
1171     }
1172     if (updatePeriod == NULL) {
1173         return BAD_VALUE;
1174     }
1175 
1176     AutoMutex lock(mLock);
1177     *updatePeriod = mUpdatePeriod;
1178 
1179     return NO_ERROR;
1180 }
1181 
setPosition(uint32_t position)1182 status_t AudioTrack::setPosition(uint32_t position)
1183 {
1184     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1185         return INVALID_OPERATION;
1186     }
1187     if (position > mFrameCount) {
1188         return BAD_VALUE;
1189     }
1190 
1191     AutoMutex lock(mLock);
1192     // Currently we require that the player is inactive before setting parameters such as position
1193     // or loop points.  Otherwise, there could be a race condition: the application could read the
1194     // current position, compute a new position or loop parameters, and then set that position or
1195     // loop parameters but it would do the "wrong" thing since the position has continued to advance
1196     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1197     // to specify how it wants to handle such scenarios.
1198     if (mState == STATE_ACTIVE) {
1199         return INVALID_OPERATION;
1200     }
1201     // After setting the position, use full update period before notification.
1202     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1203     mStaticProxy->setBufferPosition(position);
1204 
1205     // Waking the AudioTrackThread is not needed as this cannot be called when active.
1206     return NO_ERROR;
1207 }
1208 
getPosition(uint32_t * position)1209 status_t AudioTrack::getPosition(uint32_t *position)
1210 {
1211     if (position == NULL) {
1212         return BAD_VALUE;
1213     }
1214 
1215     AutoMutex lock(mLock);
1216     // FIXME: offloaded and direct tracks call into the HAL for render positions
1217     // for compressed/synced data; however, we use proxy position for pure linear pcm data
1218     // as we do not know the capability of the HAL for pcm position support and standby.
1219     // There may be some latency differences between the HAL position and the proxy position.
1220     if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1221         uint32_t dspFrames = 0;
1222 
1223         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1224             ALOGV("%s(%d): called in paused state, return cached position %u",
1225                 __func__, mPortId, mPausedPosition);
1226             *position = mPausedPosition;
1227             return NO_ERROR;
1228         }
1229 
1230         if (mOutput != AUDIO_IO_HANDLE_NONE) {
1231             uint32_t halFrames; // actually unused
1232             (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1233             // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1234         }
1235         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1236         // due to hardware latency. We leave this behavior for now.
1237         *position = dspFrames;
1238     } else {
1239         if (mCblk->mFlags & CBLK_INVALID) {
1240             (void) restoreTrack_l("getPosition");
1241             // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1242             // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1243         }
1244 
1245         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1246         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1247                 0 : updateAndGetPosition_l().value();
1248     }
1249     return NO_ERROR;
1250 }
1251 
getBufferPosition(uint32_t * position)1252 status_t AudioTrack::getBufferPosition(uint32_t *position)
1253 {
1254     if (mSharedBuffer == 0) {
1255         return INVALID_OPERATION;
1256     }
1257     if (position == NULL) {
1258         return BAD_VALUE;
1259     }
1260 
1261     AutoMutex lock(mLock);
1262     *position = mStaticProxy->getBufferPosition();
1263     return NO_ERROR;
1264 }
1265 
reload()1266 status_t AudioTrack::reload()
1267 {
1268     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1269         return INVALID_OPERATION;
1270     }
1271 
1272     AutoMutex lock(mLock);
1273     // See setPosition() regarding setting parameters such as loop points or position while active
1274     if (mState == STATE_ACTIVE) {
1275         return INVALID_OPERATION;
1276     }
1277     mNewPosition = mUpdatePeriod;
1278     (void) updateAndGetPosition_l();
1279     mPosition = 0;
1280     mPreviousTimestampValid = false;
1281 #if 0
1282     // The documentation is not clear on the behavior of reload() and the restoration
1283     // of loop count. Historically we have not restored loop count, start, end,
1284     // but it makes sense if one desires to repeat playing a particular sound.
1285     if (mLoopCount != 0) {
1286         mLoopCountNotified = mLoopCount;
1287         mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1288     }
1289 #endif
1290     mStaticProxy->setBufferPosition(0);
1291     return NO_ERROR;
1292 }
1293 
getOutput() const1294 audio_io_handle_t AudioTrack::getOutput() const
1295 {
1296     AutoMutex lock(mLock);
1297     return mOutput;
1298 }
1299 
setOutputDevice(audio_port_handle_t deviceId)1300 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1301     AutoMutex lock(mLock);
1302     if (mSelectedDeviceId != deviceId) {
1303         mSelectedDeviceId = deviceId;
1304         if (mStatus == NO_ERROR) {
1305             android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1306             mProxy->interrupt();
1307         }
1308     }
1309     return NO_ERROR;
1310 }
1311 
getOutputDevice()1312 audio_port_handle_t AudioTrack::getOutputDevice() {
1313     AutoMutex lock(mLock);
1314     return mSelectedDeviceId;
1315 }
1316 
1317 // must be called with mLock held
updateRoutedDeviceId_l()1318 void AudioTrack::updateRoutedDeviceId_l()
1319 {
1320     // if the track is inactive, do not update actual device as the output stream maybe routed
1321     // to a device not relevant to this client because of other active use cases.
1322     if (mState != STATE_ACTIVE) {
1323         return;
1324     }
1325     if (mOutput != AUDIO_IO_HANDLE_NONE) {
1326         audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1327         if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1328             mRoutedDeviceId = deviceId;
1329         }
1330     }
1331 }
1332 
getRoutedDeviceId()1333 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1334     AutoMutex lock(mLock);
1335     updateRoutedDeviceId_l();
1336     return mRoutedDeviceId;
1337 }
1338 
attachAuxEffect(int effectId)1339 status_t AudioTrack::attachAuxEffect(int effectId)
1340 {
1341     AutoMutex lock(mLock);
1342     status_t status = mAudioTrack->attachAuxEffect(effectId);
1343     if (status == NO_ERROR) {
1344         mAuxEffectId = effectId;
1345     }
1346     return status;
1347 }
1348 
streamType() const1349 audio_stream_type_t AudioTrack::streamType() const
1350 {
1351     if (mStreamType == AUDIO_STREAM_DEFAULT) {
1352         return AudioSystem::attributesToStreamType(mAttributes);
1353     }
1354     return mStreamType;
1355 }
1356 
latency()1357 uint32_t AudioTrack::latency()
1358 {
1359     AutoMutex lock(mLock);
1360     updateLatency_l();
1361     return mLatency;
1362 }
1363 
1364 // -------------------------------------------------------------------------
1365 
1366 // must be called with mLock held
updateLatency_l()1367 void AudioTrack::updateLatency_l()
1368 {
1369     status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1370     if (status != NO_ERROR) {
1371         ALOGW("%s(%d): getLatency(%d) failed status %d", __func__, mPortId, mOutput, status);
1372     } else {
1373         // FIXME don't believe this lie
1374         mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1375     }
1376 }
1377 
1378 // TODO Move this macro to a common header file for enum to string conversion in audio framework.
1379 #define MEDIA_CASE_ENUM(name) case name: return #name
convertTransferToText(transfer_type transferType)1380 const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1381     switch (transferType) {
1382         MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1383         MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1384         MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1385         MEDIA_CASE_ENUM(TRANSFER_SYNC);
1386         MEDIA_CASE_ENUM(TRANSFER_SHARED);
1387         MEDIA_CASE_ENUM(TRANSFER_SYNC_NOTIF_CALLBACK);
1388         default:
1389             return "UNRECOGNIZED";
1390     }
1391 }
1392 
createTrack_l()1393 status_t AudioTrack::createTrack_l()
1394 {
1395     status_t status;
1396     bool callbackAdded = false;
1397 
1398     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1399     if (audioFlinger == 0) {
1400         ALOGE("%s(%d): Could not get audioflinger",
1401                 __func__, mPortId);
1402         status = NO_INIT;
1403         goto exit;
1404     }
1405 
1406     {
1407     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1408     // After fast request is denied, we will request again if IAudioTrack is re-created.
1409     // Client can only express a preference for FAST.  Server will perform additional tests.
1410     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1411         // either of these use cases:
1412         // use case 1: shared buffer
1413         bool sharedBuffer = mSharedBuffer != 0;
1414         bool transferAllowed =
1415             // use case 2: callback transfer mode
1416             (mTransfer == TRANSFER_CALLBACK) ||
1417             // use case 3: obtain/release mode
1418             (mTransfer == TRANSFER_OBTAIN) ||
1419             // use case 4: synchronous write
1420             ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
1421                     && mThreadCanCallJava);
1422 
1423         bool fastAllowed = sharedBuffer || transferAllowed;
1424         if (!fastAllowed) {
1425             ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
1426                   " not shared buffer and transfer = %s",
1427                   __func__, mPortId,
1428                   convertTransferToText(mTransfer));
1429             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1430         }
1431     }
1432 
1433     IAudioFlinger::CreateTrackInput input;
1434     if (mStreamType != AUDIO_STREAM_DEFAULT) {
1435         input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
1436     } else {
1437         input.attr = mAttributes;
1438     }
1439     input.config = AUDIO_CONFIG_INITIALIZER;
1440     input.config.sample_rate = mSampleRate;
1441     input.config.channel_mask = mChannelMask;
1442     input.config.format = mFormat;
1443     input.config.offload_info = mOffloadInfoCopy;
1444     input.clientInfo.clientUid = mClientUid;
1445     input.clientInfo.clientPid = mClientPid;
1446     input.clientInfo.clientTid = -1;
1447     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1448         // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1449         // application-level code follows all non-blocking design rules, the language runtime
1450         // doesn't also follow those rules, so the thread will not benefit overall.
1451         if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1452             input.clientInfo.clientTid = mAudioTrackThread->getTid();
1453         }
1454     }
1455     input.sharedBuffer = mSharedBuffer;
1456     input.notificationsPerBuffer = mNotificationsPerBufferReq;
1457     input.speed = 1.0;
1458     if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
1459             (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1460         input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1461                         max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1462     }
1463     input.flags = mFlags;
1464     input.frameCount = mReqFrameCount;
1465     input.notificationFrameCount = mNotificationFramesReq;
1466     input.selectedDeviceId = mSelectedDeviceId;
1467     input.sessionId = mSessionId;
1468 
1469     IAudioFlinger::CreateTrackOutput output;
1470 
1471     sp<IAudioTrack> track = audioFlinger->createTrack(input,
1472                                                       output,
1473                                                       &status);
1474 
1475     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
1476         ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
1477                 __func__, mPortId, status, output.outputId);
1478         if (status == NO_ERROR) {
1479             status = NO_INIT;
1480         }
1481         goto exit;
1482     }
1483     ALOG_ASSERT(track != 0);
1484 
1485     mFrameCount = output.frameCount;
1486     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
1487     mRoutedDeviceId = output.selectedDeviceId;
1488     mSessionId = output.sessionId;
1489 
1490     mSampleRate = output.sampleRate;
1491     if (mOriginalSampleRate == 0) {
1492         mOriginalSampleRate = mSampleRate;
1493     }
1494 
1495     mAfFrameCount = output.afFrameCount;
1496     mAfSampleRate = output.afSampleRate;
1497     mAfLatency = output.afLatencyMs;
1498 
1499     mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1500 
1501     // AudioFlinger now owns the reference to the I/O handle,
1502     // so we are no longer responsible for releasing it.
1503 
1504     // FIXME compare to AudioRecord
1505     sp<IMemory> iMem = track->getCblk();
1506     if (iMem == 0) {
1507         ALOGE("%s(%d): Could not get control block", __func__, mPortId);
1508         status = NO_INIT;
1509         goto exit;
1510     }
1511     void *iMemPointer = iMem->pointer();
1512     if (iMemPointer == NULL) {
1513         ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
1514         status = NO_INIT;
1515         goto exit;
1516     }
1517     // invariant that mAudioTrack != 0 is true only after set() returns successfully
1518     if (mAudioTrack != 0) {
1519         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1520         mDeathNotifier.clear();
1521     }
1522     mAudioTrack = track;
1523     mCblkMemory = iMem;
1524     IPCThreadState::self()->flushCommands();
1525 
1526     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1527     mCblk = cblk;
1528 
1529     mAwaitBoost = false;
1530     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1531         if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
1532             ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
1533                   __func__, mPortId, mReqFrameCount, mFrameCount);
1534             if (!mThreadCanCallJava) {
1535                 mAwaitBoost = true;
1536             }
1537         } else {
1538             ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
1539                   __func__, mPortId, mReqFrameCount, mFrameCount);
1540         }
1541     }
1542     mFlags = output.flags;
1543 
1544     //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1545     if (mDeviceCallback != 0) {
1546         if (mOutput != AUDIO_IO_HANDLE_NONE) {
1547             AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1548         }
1549         AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
1550         callbackAdded = true;
1551     }
1552 
1553     mPortId = output.portId;
1554     // We retain a copy of the I/O handle, but don't own the reference
1555     mOutput = output.outputId;
1556     mRefreshRemaining = true;
1557 
1558     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1559     // is the value of pointer() for the shared buffer, otherwise buffers points
1560     // immediately after the control block.  This address is for the mapping within client
1561     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1562     void* buffers;
1563     if (mSharedBuffer == 0) {
1564         buffers = cblk + 1;
1565     } else {
1566         buffers = mSharedBuffer->pointer();
1567         if (buffers == NULL) {
1568             ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
1569             status = NO_INIT;
1570             goto exit;
1571         }
1572     }
1573 
1574     mAudioTrack->attachAuxEffect(mAuxEffectId);
1575 
1576     // If IAudioTrack is re-created, don't let the requested frameCount
1577     // decrease.  This can confuse clients that cache frameCount().
1578     if (mFrameCount > mReqFrameCount) {
1579         mReqFrameCount = mFrameCount;
1580     }
1581 
1582     // reset server position to 0 as we have new cblk.
1583     mServer = 0;
1584 
1585     // update proxy
1586     if (mSharedBuffer == 0) {
1587         mStaticProxy.clear();
1588         mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1589     } else {
1590         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1591         mProxy = mStaticProxy;
1592     }
1593 
1594     mProxy->setVolumeLR(gain_minifloat_pack(
1595             gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1596             gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1597 
1598     mProxy->setSendLevel(mSendLevel);
1599     const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1600     const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1601     const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1602     mProxy->setSampleRate(effectiveSampleRate);
1603 
1604     AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1605     playbackRateTemp.mSpeed = effectiveSpeed;
1606     playbackRateTemp.mPitch = effectivePitch;
1607     mProxy->setPlaybackRate(playbackRateTemp);
1608     mProxy->setMinimum(mNotificationFramesAct);
1609 
1610     mDeathNotifier = new DeathNotifier(this);
1611     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1612 
1613     }
1614 
1615 exit:
1616     if (status != NO_ERROR && callbackAdded) {
1617         // note: mOutput is always valid is callbackAdded is true
1618         AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1619     }
1620 
1621     mStatus = status;
1622 
1623     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
1624     return status;
1625 }
1626 
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1627 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1628 {
1629     if (audioBuffer == NULL) {
1630         if (nonContig != NULL) {
1631             *nonContig = 0;
1632         }
1633         return BAD_VALUE;
1634     }
1635     if (mTransfer != TRANSFER_OBTAIN) {
1636         audioBuffer->frameCount = 0;
1637         audioBuffer->size = 0;
1638         audioBuffer->raw = NULL;
1639         if (nonContig != NULL) {
1640             *nonContig = 0;
1641         }
1642         return INVALID_OPERATION;
1643     }
1644 
1645     const struct timespec *requested;
1646     struct timespec timeout;
1647     if (waitCount == -1) {
1648         requested = &ClientProxy::kForever;
1649     } else if (waitCount == 0) {
1650         requested = &ClientProxy::kNonBlocking;
1651     } else if (waitCount > 0) {
1652         time_t ms = WAIT_PERIOD_MS * (time_t) waitCount;
1653         timeout.tv_sec = ms / 1000;
1654         timeout.tv_nsec = (long) (ms % 1000) * 1000000;
1655         requested = &timeout;
1656     } else {
1657         ALOGE("%s(%d): invalid waitCount %d", __func__, mPortId, waitCount);
1658         requested = NULL;
1659     }
1660     return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1661 }
1662 
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1663 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1664         struct timespec *elapsed, size_t *nonContig)
1665 {
1666     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1667     uint32_t oldSequence = 0;
1668 
1669     Proxy::Buffer buffer;
1670     status_t status = NO_ERROR;
1671 
1672     static const int32_t kMaxTries = 5;
1673     int32_t tryCounter = kMaxTries;
1674 
1675     do {
1676         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1677         // keep them from going away if another thread re-creates the track during obtainBuffer()
1678         sp<AudioTrackClientProxy> proxy;
1679         sp<IMemory> iMem;
1680 
1681         {   // start of lock scope
1682             AutoMutex lock(mLock);
1683 
1684             uint32_t newSequence = mSequence;
1685             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1686             if (status == DEAD_OBJECT) {
1687                 // re-create track, unless someone else has already done so
1688                 if (newSequence == oldSequence) {
1689                     status = restoreTrack_l("obtainBuffer");
1690                     if (status != NO_ERROR) {
1691                         buffer.mFrameCount = 0;
1692                         buffer.mRaw = NULL;
1693                         buffer.mNonContig = 0;
1694                         break;
1695                     }
1696                 }
1697             }
1698             oldSequence = newSequence;
1699 
1700             if (status == NOT_ENOUGH_DATA) {
1701                 restartIfDisabled();
1702             }
1703 
1704             // Keep the extra references
1705             proxy = mProxy;
1706             iMem = mCblkMemory;
1707 
1708             if (mState == STATE_STOPPING) {
1709                 status = -EINTR;
1710                 buffer.mFrameCount = 0;
1711                 buffer.mRaw = NULL;
1712                 buffer.mNonContig = 0;
1713                 break;
1714             }
1715 
1716             // Non-blocking if track is stopped or paused
1717             if (mState != STATE_ACTIVE) {
1718                 requested = &ClientProxy::kNonBlocking;
1719             }
1720 
1721         }   // end of lock scope
1722 
1723         buffer.mFrameCount = audioBuffer->frameCount;
1724         // FIXME starts the requested timeout and elapsed over from scratch
1725         status = proxy->obtainBuffer(&buffer, requested, elapsed);
1726     } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1727 
1728     audioBuffer->frameCount = buffer.mFrameCount;
1729     audioBuffer->size = buffer.mFrameCount * mFrameSize;
1730     audioBuffer->raw = buffer.mRaw;
1731     audioBuffer->sequence = oldSequence;
1732     if (nonContig != NULL) {
1733         *nonContig = buffer.mNonContig;
1734     }
1735     return status;
1736 }
1737 
releaseBuffer(const Buffer * audioBuffer)1738 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1739 {
1740     // FIXME add error checking on mode, by adding an internal version
1741     if (mTransfer == TRANSFER_SHARED) {
1742         return;
1743     }
1744 
1745     size_t stepCount = audioBuffer->size / mFrameSize;
1746     if (stepCount == 0) {
1747         return;
1748     }
1749 
1750     Proxy::Buffer buffer;
1751     buffer.mFrameCount = stepCount;
1752     buffer.mRaw = audioBuffer->raw;
1753 
1754     AutoMutex lock(mLock);
1755     if (audioBuffer->sequence != mSequence) {
1756         // This Buffer came from a different IAudioTrack instance, so ignore the releaseBuffer
1757         ALOGD("%s is no-op due to IAudioTrack sequence mismatch %u != %u",
1758                 __func__, audioBuffer->sequence, mSequence);
1759         return;
1760     }
1761     mReleased += stepCount;
1762     mInUnderrun = false;
1763     mProxy->releaseBuffer(&buffer);
1764 
1765     // restart track if it was disabled by audioflinger due to previous underrun
1766     restartIfDisabled();
1767 }
1768 
restartIfDisabled()1769 void AudioTrack::restartIfDisabled()
1770 {
1771     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1772     if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1773         ALOGW("%s(%d): releaseBuffer() track %p disabled due to previous underrun, restarting",
1774                 __func__, mPortId, this);
1775         // FIXME ignoring status
1776         mAudioTrack->start();
1777     }
1778 }
1779 
1780 // -------------------------------------------------------------------------
1781 
write(const void * buffer,size_t userSize,bool blocking)1782 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1783 {
1784     if (mTransfer != TRANSFER_SYNC && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
1785         return INVALID_OPERATION;
1786     }
1787 
1788     if (isDirect()) {
1789         AutoMutex lock(mLock);
1790         int32_t flags = android_atomic_and(
1791                             ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1792                             &mCblk->mFlags);
1793         if (flags & CBLK_INVALID) {
1794             return DEAD_OBJECT;
1795         }
1796     }
1797 
1798     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1799         // Validation: user is most-likely passing an error code, and it would
1800         // make the return value ambiguous (actualSize vs error).
1801         ALOGE("%s(%d): AudioTrack::write(buffer=%p, size=%zu (%zd)",
1802                 __func__, mPortId, buffer, userSize, userSize);
1803         return BAD_VALUE;
1804     }
1805 
1806     size_t written = 0;
1807     Buffer audioBuffer;
1808 
1809     while (userSize >= mFrameSize) {
1810         audioBuffer.frameCount = userSize / mFrameSize;
1811 
1812         status_t err = obtainBuffer(&audioBuffer,
1813                 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1814         if (err < 0) {
1815             if (written > 0) {
1816                 break;
1817             }
1818             if (err == TIMED_OUT || err == -EINTR) {
1819                 err = WOULD_BLOCK;
1820             }
1821             return ssize_t(err);
1822         }
1823 
1824         size_t toWrite = audioBuffer.size;
1825         memcpy(audioBuffer.i8, buffer, toWrite);
1826         buffer = ((const char *) buffer) + toWrite;
1827         userSize -= toWrite;
1828         written += toWrite;
1829 
1830         releaseBuffer(&audioBuffer);
1831     }
1832 
1833     if (written > 0) {
1834         mFramesWritten += written / mFrameSize;
1835 
1836         if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
1837             const sp<AudioTrackThread> t = mAudioTrackThread;
1838             if (t != 0) {
1839                 // causes wake up of the playback thread, that will callback the client for
1840                 // more data (with EVENT_CAN_WRITE_MORE_DATA) in processAudioBuffer()
1841                 t->wake();
1842             }
1843         }
1844     }
1845 
1846     return written;
1847 }
1848 
1849 // -------------------------------------------------------------------------
1850 
processAudioBuffer()1851 nsecs_t AudioTrack::processAudioBuffer()
1852 {
1853     // Currently the AudioTrack thread is not created if there are no callbacks.
1854     // Would it ever make sense to run the thread, even without callbacks?
1855     // If so, then replace this by checks at each use for mCbf != NULL.
1856     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1857 
1858     mLock.lock();
1859     if (mAwaitBoost) {
1860         mAwaitBoost = false;
1861         mLock.unlock();
1862         static const int32_t kMaxTries = 5;
1863         int32_t tryCounter = kMaxTries;
1864         uint32_t pollUs = 10000;
1865         do {
1866             int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1867             if (policy == SCHED_FIFO || policy == SCHED_RR) {
1868                 break;
1869             }
1870             usleep(pollUs);
1871             pollUs <<= 1;
1872         } while (tryCounter-- > 0);
1873         if (tryCounter < 0) {
1874             ALOGE("%s(%d): did not receive expected priority boost on time",
1875                     __func__, mPortId);
1876         }
1877         // Run again immediately
1878         return 0;
1879     }
1880 
1881     // Can only reference mCblk while locked
1882     int32_t flags = android_atomic_and(
1883         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1884 
1885     // Check for track invalidation
1886     if (flags & CBLK_INVALID) {
1887         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1888         // AudioSystem cache. We should not exit here but after calling the callback so
1889         // that the upper layers can recreate the track
1890         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1891             status_t status __unused = restoreTrack_l("processAudioBuffer");
1892             // FIXME unused status
1893             // after restoration, continue below to make sure that the loop and buffer events
1894             // are notified because they have been cleared from mCblk->mFlags above.
1895         }
1896     }
1897 
1898     bool waitStreamEnd = mState == STATE_STOPPING;
1899     bool active = mState == STATE_ACTIVE;
1900 
1901     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1902     bool newUnderrun = false;
1903     if (flags & CBLK_UNDERRUN) {
1904 #if 0
1905         // Currently in shared buffer mode, when the server reaches the end of buffer,
1906         // the track stays active in continuous underrun state.  It's up to the application
1907         // to pause or stop the track, or set the position to a new offset within buffer.
1908         // This was some experimental code to auto-pause on underrun.   Keeping it here
1909         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1910         if (mTransfer == TRANSFER_SHARED) {
1911             mState = STATE_PAUSED;
1912             active = false;
1913         }
1914 #endif
1915         if (!mInUnderrun) {
1916             mInUnderrun = true;
1917             newUnderrun = true;
1918         }
1919     }
1920 
1921     // Get current position of server
1922     Modulo<uint32_t> position(updateAndGetPosition_l());
1923 
1924     // Manage marker callback
1925     bool markerReached = false;
1926     Modulo<uint32_t> markerPosition(mMarkerPosition);
1927     // uses 32 bit wraparound for comparison with position.
1928     if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1929         mMarkerReached = markerReached = true;
1930     }
1931 
1932     // Determine number of new position callback(s) that will be needed, while locked
1933     size_t newPosCount = 0;
1934     Modulo<uint32_t> newPosition(mNewPosition);
1935     uint32_t updatePeriod = mUpdatePeriod;
1936     // FIXME fails for wraparound, need 64 bits
1937     if (updatePeriod > 0 && position >= newPosition) {
1938         newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1939         mNewPosition += updatePeriod * newPosCount;
1940     }
1941 
1942     // Cache other fields that will be needed soon
1943     uint32_t sampleRate = mSampleRate;
1944     float speed = mPlaybackRate.mSpeed;
1945     const uint32_t notificationFrames = mNotificationFramesAct;
1946     if (mRefreshRemaining) {
1947         mRefreshRemaining = false;
1948         mRemainingFrames = notificationFrames;
1949         mRetryOnPartialBuffer = false;
1950     }
1951     size_t misalignment = mProxy->getMisalignment();
1952     uint32_t sequence = mSequence;
1953     sp<AudioTrackClientProxy> proxy = mProxy;
1954 
1955     // Determine the number of new loop callback(s) that will be needed, while locked.
1956     int loopCountNotifications = 0;
1957     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1958 
1959     if (mLoopCount > 0) {
1960         int loopCount;
1961         size_t bufferPosition;
1962         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1963         loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1964         loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1965         mLoopCountNotified = loopCount; // discard any excess notifications
1966     } else if (mLoopCount < 0) {
1967         // FIXME: We're not accurate with notification count and position with infinite looping
1968         // since loopCount from server side will always return -1 (we could decrement it).
1969         size_t bufferPosition = mStaticProxy->getBufferPosition();
1970         loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1971         loopPeriod = mLoopEnd - bufferPosition;
1972     } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1973         size_t bufferPosition = mStaticProxy->getBufferPosition();
1974         loopPeriod = mFrameCount - bufferPosition;
1975     }
1976 
1977     // These fields don't need to be cached, because they are assigned only by set():
1978     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1979     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1980 
1981     mLock.unlock();
1982 
1983     // get anchor time to account for callbacks.
1984     const nsecs_t timeBeforeCallbacks = systemTime();
1985 
1986     if (waitStreamEnd) {
1987         // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1988         // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1989         // (and make sure we don't callback for more data while we're stopping).
1990         // This helps with position, marker notifications, and track invalidation.
1991         struct timespec timeout;
1992         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1993         timeout.tv_nsec = 0;
1994 
1995         status_t status = proxy->waitStreamEndDone(&timeout);
1996         switch (status) {
1997         case NO_ERROR:
1998         case DEAD_OBJECT:
1999         case TIMED_OUT:
2000             if (status != DEAD_OBJECT) {
2001                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2002                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2003                 mCbf(EVENT_STREAM_END, mUserData, NULL);
2004             }
2005             {
2006                 AutoMutex lock(mLock);
2007                 // The previously assigned value of waitStreamEnd is no longer valid,
2008                 // since the mutex has been unlocked and either the callback handler
2009                 // or another thread could have re-started the AudioTrack during that time.
2010                 waitStreamEnd = mState == STATE_STOPPING;
2011                 if (waitStreamEnd) {
2012                     mState = STATE_STOPPED;
2013                     mReleased = 0;
2014                 }
2015             }
2016             if (waitStreamEnd && status != DEAD_OBJECT) {
2017                return NS_INACTIVE;
2018             }
2019             break;
2020         }
2021         return 0;
2022     }
2023 
2024     // perform callbacks while unlocked
2025     if (newUnderrun) {
2026         mCbf(EVENT_UNDERRUN, mUserData, NULL);
2027     }
2028     while (loopCountNotifications > 0) {
2029         mCbf(EVENT_LOOP_END, mUserData, NULL);
2030         --loopCountNotifications;
2031     }
2032     if (flags & CBLK_BUFFER_END) {
2033         mCbf(EVENT_BUFFER_END, mUserData, NULL);
2034     }
2035     if (markerReached) {
2036         mCbf(EVENT_MARKER, mUserData, &markerPosition);
2037     }
2038     while (newPosCount > 0) {
2039         size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2040         mCbf(EVENT_NEW_POS, mUserData, &temp);
2041         newPosition += updatePeriod;
2042         newPosCount--;
2043     }
2044 
2045     if (mObservedSequence != sequence) {
2046         mObservedSequence = sequence;
2047         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2048         // for offloaded tracks, just wait for the upper layers to recreate the track
2049         if (isOffloadedOrDirect()) {
2050             return NS_INACTIVE;
2051         }
2052     }
2053 
2054     // if inactive, then don't run me again until re-started
2055     if (!active) {
2056         return NS_INACTIVE;
2057     }
2058 
2059     // Compute the estimated time until the next timed event (position, markers, loops)
2060     // FIXME only for non-compressed audio
2061     uint32_t minFrames = ~0;
2062     if (!markerReached && position < markerPosition) {
2063         minFrames = (markerPosition - position).value();
2064     }
2065     if (loopPeriod > 0 && loopPeriod < minFrames) {
2066         // loopPeriod is already adjusted for actual position.
2067         minFrames = loopPeriod;
2068     }
2069     if (updatePeriod > 0) {
2070         minFrames = min(minFrames, (newPosition - position).value());
2071     }
2072 
2073     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2074     static const uint32_t kPoll = 0;
2075     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2076         minFrames = kPoll * notificationFrames;
2077     }
2078 
2079     // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2080     static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2081     const nsecs_t timeAfterCallbacks = systemTime();
2082 
2083     // Convert frame units to time units
2084     nsecs_t ns = NS_WHENEVER;
2085     if (minFrames != (uint32_t) ~0) {
2086         // AudioFlinger consumption of client data may be irregular when coming out of device
2087         // standby since the kernel buffers require filling. This is throttled to no more than 2x
2088         // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2089         // half (but no more than half a second) to improve callback accuracy during these temporary
2090         // data surges.
2091         const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2092         constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2093         ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2094         ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2095         // TODO: Should we warn if the callback time is too long?
2096         if (ns < 0) ns = 0;
2097     }
2098 
2099     // If not supplying data by EVENT_MORE_DATA or EVENT_CAN_WRITE_MORE_DATA, then we're done
2100     if (mTransfer != TRANSFER_CALLBACK && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
2101         return ns;
2102     }
2103 
2104     // EVENT_MORE_DATA callback handling.
2105     // Timing for linear pcm audio data formats can be derived directly from the
2106     // buffer fill level.
2107     // Timing for compressed data is not directly available from the buffer fill level,
2108     // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2109     // to return a certain fill level.
2110 
2111     struct timespec timeout;
2112     const struct timespec *requested = &ClientProxy::kForever;
2113     if (ns != NS_WHENEVER) {
2114         timeout.tv_sec = ns / 1000000000LL;
2115         timeout.tv_nsec = ns % 1000000000LL;
2116         ALOGV("%s(%d): timeout %ld.%03d",
2117                 __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2118         requested = &timeout;
2119     }
2120 
2121     size_t writtenFrames = 0;
2122     while (mRemainingFrames > 0) {
2123 
2124         Buffer audioBuffer;
2125         audioBuffer.frameCount = mRemainingFrames;
2126         size_t nonContig;
2127         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2128         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2129                 "%s(%d): obtainBuffer() err=%d frameCount=%zu",
2130                  __func__, mPortId, err, audioBuffer.frameCount);
2131         requested = &ClientProxy::kNonBlocking;
2132         size_t avail = audioBuffer.frameCount + nonContig;
2133         ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2134                 __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2135         if (err != NO_ERROR) {
2136             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2137                     (isOffloaded() && (err == DEAD_OBJECT))) {
2138                 // FIXME bug 25195759
2139                 return 1000000;
2140             }
2141             ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
2142                     __func__, mPortId, err);
2143             return NS_NEVER;
2144         }
2145 
2146         if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2147             mRetryOnPartialBuffer = false;
2148             if (avail < mRemainingFrames) {
2149                 if (ns > 0) { // account for obtain time
2150                     const nsecs_t timeNow = systemTime();
2151                     ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2152                 }
2153 
2154                 // delayNs is first computed by the additional frames required in the buffer.
2155                 nsecs_t delayNs = framesToNanoseconds(
2156                         mRemainingFrames - avail, sampleRate, speed);
2157 
2158                 // afNs is the AudioFlinger mixer period in ns.
2159                 const nsecs_t afNs = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2160 
2161                 // If the AudioTrack is double buffered based on the AudioFlinger mixer period,
2162                 // we may have a race if we wait based on the number of frames desired.
2163                 // This is a possible issue with resampling and AAudio.
2164                 //
2165                 // The granularity of audioflinger processing is one mixer period; if
2166                 // our wait time is less than one mixer period, wait at most half the period.
2167                 if (delayNs < afNs) {
2168                     delayNs = std::min(delayNs, afNs / 2);
2169                 }
2170 
2171                 // adjust our ns wait by delayNs.
2172                 if (ns < 0 /* NS_WHENEVER */ || delayNs < ns) {
2173                     ns = delayNs;
2174                 }
2175                 return ns;
2176             }
2177         }
2178 
2179         size_t reqSize = audioBuffer.size;
2180         if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2181             // when notifying client it can write more data, pass the total size that can be
2182             // written in the next write() call, since it's not passed through the callback
2183             audioBuffer.size += nonContig;
2184         }
2185         mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
2186                 mUserData, &audioBuffer);
2187         size_t writtenSize = audioBuffer.size;
2188 
2189         // Validate on returned size
2190         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2191             ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2192                     __func__, mPortId, reqSize, ssize_t(writtenSize));
2193             return NS_NEVER;
2194         }
2195 
2196         if (writtenSize == 0) {
2197             if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2198                 // The callback EVENT_CAN_WRITE_MORE_DATA was processed in the JNI of
2199                 // android.media.AudioTrack. The JNI is not using the callback to provide data,
2200                 // it only signals to the Java client that it can provide more data, which
2201                 // this track is read to accept now.
2202                 // The playback thread will be awaken at the next ::write()
2203                 return NS_WHENEVER;
2204             }
2205             // The callback is done filling buffers
2206             // Keep this thread going to handle timed events and
2207             // still try to get more data in intervals of WAIT_PERIOD_MS
2208             // but don't just loop and block the CPU, so wait
2209 
2210             // mCbf(EVENT_MORE_DATA, ...) might either
2211             // (1) Block until it can fill the buffer, returning 0 size on EOS.
2212             // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2213             // (3) Return 0 size when no data is available, does not wait for more data.
2214             //
2215             // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2216             // We try to compute the wait time to avoid a tight sleep-wait cycle,
2217             // especially for case (3).
2218             //
2219             // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2220             // and this loop; whereas for case (3) we could simply check once with the full
2221             // buffer size and skip the loop entirely.
2222 
2223             nsecs_t myns;
2224             if (audio_has_proportional_frames(mFormat)) {
2225                 // time to wait based on buffer occupancy
2226                 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2227                         framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2228                 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2229                 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2230                 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2231                 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2232                 myns = datans + (afns / 2);
2233             } else {
2234                 // FIXME: This could ping quite a bit if the buffer isn't full.
2235                 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2236                 myns = kWaitPeriodNs;
2237             }
2238             if (ns > 0) { // account for obtain and callback time
2239                 const nsecs_t timeNow = systemTime();
2240                 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2241             }
2242             if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2243                 ns = myns;
2244             }
2245             return ns;
2246         }
2247 
2248         size_t releasedFrames = writtenSize / mFrameSize;
2249         audioBuffer.frameCount = releasedFrames;
2250         mRemainingFrames -= releasedFrames;
2251         if (misalignment >= releasedFrames) {
2252             misalignment -= releasedFrames;
2253         } else {
2254             misalignment = 0;
2255         }
2256 
2257         releaseBuffer(&audioBuffer);
2258         writtenFrames += releasedFrames;
2259 
2260         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2261         // if callback doesn't like to accept the full chunk
2262         if (writtenSize < reqSize) {
2263             continue;
2264         }
2265 
2266         // There could be enough non-contiguous frames available to satisfy the remaining request
2267         if (mRemainingFrames <= nonContig) {
2268             continue;
2269         }
2270 
2271 #if 0
2272         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2273         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2274         // that total to a sum == notificationFrames.
2275         if (0 < misalignment && misalignment <= mRemainingFrames) {
2276             mRemainingFrames = misalignment;
2277             return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2278         }
2279 #endif
2280 
2281     }
2282     if (writtenFrames > 0) {
2283         AutoMutex lock(mLock);
2284         mFramesWritten += writtenFrames;
2285     }
2286     mRemainingFrames = notificationFrames;
2287     mRetryOnPartialBuffer = true;
2288 
2289     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2290     return 0;
2291 }
2292 
restoreTrack_l(const char * from)2293 status_t AudioTrack::restoreTrack_l(const char *from)
2294 {
2295     ALOGW("%s(%d): dead IAudioTrack, %s, creating a new one from %s()",
2296             __func__, mPortId, isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2297     ++mSequence;
2298 
2299     // refresh the audio configuration cache in this process to make sure we get new
2300     // output parameters and new IAudioFlinger in createTrack_l()
2301     AudioSystem::clearAudioConfigCache();
2302 
2303     if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2304         // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2305         // reconsider enabling for linear PCM encodings when position can be preserved.
2306         return DEAD_OBJECT;
2307     }
2308 
2309     // Save so we can return count since creation.
2310     mUnderrunCountOffset = getUnderrunCount_l();
2311 
2312     // save the old static buffer position
2313     uint32_t staticPosition = 0;
2314     size_t bufferPosition = 0;
2315     int loopCount = 0;
2316     if (mStaticProxy != 0) {
2317         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2318         staticPosition = mStaticProxy->getPosition().unsignedValue();
2319     }
2320 
2321     // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
2322     // causes a lot of churn on the service side, and it can reject starting
2323     // playback of a previously created track. May also apply to other cases.
2324     const int INITIAL_RETRIES = 3;
2325     int retries = INITIAL_RETRIES;
2326 retry:
2327     if (retries < INITIAL_RETRIES) {
2328         // See the comment for clearAudioConfigCache at the start of the function.
2329         AudioSystem::clearAudioConfigCache();
2330     }
2331     mFlags = mOrigFlags;
2332 
2333     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2334     // following member variables: mAudioTrack, mCblkMemory and mCblk.
2335     // It will also delete the strong references on previous IAudioTrack and IMemory.
2336     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2337     status_t result = createTrack_l();
2338 
2339     if (result == NO_ERROR) {
2340         // take the frames that will be lost by track recreation into account in saved position
2341         // For streaming tracks, this is the amount we obtained from the user/client
2342         // (not the number actually consumed at the server - those are already lost).
2343         if (mStaticProxy == 0) {
2344             mPosition = mReleased;
2345         }
2346         // Continue playback from last known position and restore loop.
2347         if (mStaticProxy != 0) {
2348             if (loopCount != 0) {
2349                 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2350                         mLoopStart, mLoopEnd, loopCount);
2351             } else {
2352                 mStaticProxy->setBufferPosition(bufferPosition);
2353                 if (bufferPosition == mFrameCount) {
2354                     ALOGD("%s(%d): restoring track at end of static buffer", __func__, mPortId);
2355                 }
2356             }
2357         }
2358         // restore volume handler
2359         mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2360             sp<VolumeShaper::Operation> operationToEnd =
2361                     new VolumeShaper::Operation(shaper.mOperation);
2362             // TODO: Ideally we would restore to the exact xOffset position
2363             // as returned by getVolumeShaperState(), but we don't have that
2364             // information when restoring at the client unless we periodically poll
2365             // the server or create shared memory state.
2366             //
2367             // For now, we simply advance to the end of the VolumeShaper effect
2368             // if it has been started.
2369             if (shaper.isStarted()) {
2370                 operationToEnd->setNormalizedTime(1.f);
2371             }
2372             return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2373         });
2374 
2375         if (mState == STATE_ACTIVE) {
2376             result = mAudioTrack->start();
2377         }
2378         // server resets to zero so we offset
2379         mFramesWrittenServerOffset =
2380                 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2381         mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2382     }
2383     if (result != NO_ERROR) {
2384         ALOGW("%s(%d): failed status %d, retries %d", __func__, mPortId, result, retries);
2385         if (--retries > 0) {
2386             // leave time for an eventual race condition to clear before retrying
2387             usleep(500000);
2388             goto retry;
2389         }
2390         // if no retries left, set invalid bit to force restoring at next occasion
2391         // and avoid inconsistent active state on client and server sides
2392         if (mCblk != nullptr) {
2393             android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
2394         }
2395     }
2396     return result;
2397 }
2398 
updateAndGetPosition_l()2399 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2400 {
2401     // This is the sole place to read server consumed frames
2402     Modulo<uint32_t> newServer(mProxy->getPosition());
2403     const int32_t delta = (newServer - mServer).signedValue();
2404     // TODO There is controversy about whether there can be "negative jitter" in server position.
2405     //      This should be investigated further, and if possible, it should be addressed.
2406     //      A more definite failure mode is infrequent polling by client.
2407     //      One could call (void)getPosition_l() in releaseBuffer(),
2408     //      so mReleased and mPosition are always lock-step as best possible.
2409     //      That should ensure delta never goes negative for infrequent polling
2410     //      unless the server has more than 2^31 frames in its buffer,
2411     //      in which case the use of uint32_t for these counters has bigger issues.
2412     ALOGE_IF(delta < 0,
2413             "%s(%d): detected illegal retrograde motion by the server: mServer advanced by %d",
2414             __func__, mPortId, delta);
2415     mServer = newServer;
2416     if (delta > 0) { // avoid retrograde
2417         mPosition += delta;
2418     }
2419     return mPosition;
2420 }
2421 
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed)2422 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2423 {
2424     updateLatency_l();
2425     // applicable for mixing tracks only (not offloaded or direct)
2426     if (mStaticProxy != 0) {
2427         return true; // static tracks do not have issues with buffer sizing.
2428     }
2429     const size_t minFrameCount =
2430             AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
2431                                             sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2432     const bool allowed = mFrameCount >= minFrameCount;
2433     ALOGD_IF(!allowed,
2434             "%s(%d): denied "
2435             "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2436             "mFrameCount:%zu < minFrameCount:%zu",
2437             __func__, mPortId,
2438             mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2439             mFrameCount, minFrameCount);
2440     return allowed;
2441 }
2442 
setParameters(const String8 & keyValuePairs)2443 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2444 {
2445     AutoMutex lock(mLock);
2446     return mAudioTrack->setParameters(keyValuePairs);
2447 }
2448 
selectPresentation(int presentationId,int programId)2449 status_t AudioTrack::selectPresentation(int presentationId, int programId)
2450 {
2451     AutoMutex lock(mLock);
2452     AudioParameter param = AudioParameter();
2453     param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
2454     param.addInt(String8(AudioParameter::keyProgramId), programId);
2455     ALOGV("%s(%d): PresentationId/ProgramId[%s]",
2456             __func__, mPortId, param.toString().string());
2457 
2458     return mAudioTrack->setParameters(param.toString());
2459 }
2460 
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2461 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2462         const sp<VolumeShaper::Configuration>& configuration,
2463         const sp<VolumeShaper::Operation>& operation)
2464 {
2465     AutoMutex lock(mLock);
2466     mVolumeHandler->setIdIfNecessary(configuration);
2467     VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2468 
2469     if (status == DEAD_OBJECT) {
2470         if (restoreTrack_l("applyVolumeShaper") == OK) {
2471             status = mAudioTrack->applyVolumeShaper(configuration, operation);
2472         }
2473     }
2474     if (status >= 0) {
2475         // save VolumeShaper for restore
2476         mVolumeHandler->applyVolumeShaper(configuration, operation);
2477         if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2478             mVolumeHandler->setStarted();
2479         }
2480     } else {
2481         // warn only if not an expected restore failure.
2482         ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2483                 "%s(%d): applyVolumeShaper failed: %d", __func__, mPortId, status);
2484     }
2485     return status;
2486 }
2487 
getVolumeShaperState(int id)2488 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2489 {
2490     AutoMutex lock(mLock);
2491     sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2492     if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2493         if (restoreTrack_l("getVolumeShaperState") == OK) {
2494             state = mAudioTrack->getVolumeShaperState(id);
2495         }
2496     }
2497     return state;
2498 }
2499 
getTimestamp(ExtendedTimestamp * timestamp)2500 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2501 {
2502     if (timestamp == nullptr) {
2503         return BAD_VALUE;
2504     }
2505     AutoMutex lock(mLock);
2506     return getTimestamp_l(timestamp);
2507 }
2508 
getTimestamp_l(ExtendedTimestamp * timestamp)2509 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2510 {
2511     if (mCblk->mFlags & CBLK_INVALID) {
2512         const status_t status = restoreTrack_l("getTimestampExtended");
2513         if (status != OK) {
2514             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2515             // recommending that the track be recreated.
2516             return DEAD_OBJECT;
2517         }
2518     }
2519     // check for offloaded/direct here in case restoring somehow changed those flags.
2520     if (isOffloadedOrDirect_l()) {
2521         return INVALID_OPERATION; // not supported
2522     }
2523     status_t status = mProxy->getTimestamp(timestamp);
2524     LOG_ALWAYS_FATAL_IF(status != OK, "%s(%d): status %d not allowed from proxy getTimestamp",
2525             __func__, mPortId, status);
2526     bool found = false;
2527     timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2528     timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2529     // server side frame offset in case AudioTrack has been restored.
2530     for (int i = ExtendedTimestamp::LOCATION_SERVER;
2531             i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2532         if (timestamp->mTimeNs[i] >= 0) {
2533             // apply server offset (frames flushed is ignored
2534             // so we don't report the jump when the flush occurs).
2535             timestamp->mPosition[i] += mFramesWrittenServerOffset;
2536             found = true;
2537         }
2538     }
2539     return found ? OK : WOULD_BLOCK;
2540 }
2541 
getTimestamp(AudioTimestamp & timestamp)2542 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2543 {
2544     AutoMutex lock(mLock);
2545     return getTimestamp_l(timestamp);
2546 }
2547 
getTimestamp_l(AudioTimestamp & timestamp)2548 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2549 {
2550     bool previousTimestampValid = mPreviousTimestampValid;
2551     // Set false here to cover all the error return cases.
2552     mPreviousTimestampValid = false;
2553 
2554     switch (mState) {
2555     case STATE_ACTIVE:
2556     case STATE_PAUSED:
2557         break; // handle below
2558     case STATE_FLUSHED:
2559     case STATE_STOPPED:
2560         return WOULD_BLOCK;
2561     case STATE_STOPPING:
2562     case STATE_PAUSED_STOPPING:
2563         if (!isOffloaded_l()) {
2564             return INVALID_OPERATION;
2565         }
2566         break; // offloaded tracks handled below
2567     default:
2568         LOG_ALWAYS_FATAL("%s(%d): Invalid mState in getTimestamp(): %d",
2569                __func__, mPortId, mState);
2570         break;
2571     }
2572 
2573     if (mCblk->mFlags & CBLK_INVALID) {
2574         const status_t status = restoreTrack_l("getTimestamp");
2575         if (status != OK) {
2576             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2577             // recommending that the track be recreated.
2578             return DEAD_OBJECT;
2579         }
2580     }
2581 
2582     // The presented frame count must always lag behind the consumed frame count.
2583     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2584 
2585     status_t status;
2586     if (isOffloadedOrDirect_l()) {
2587         // use Binder to get timestamp
2588         status = mAudioTrack->getTimestamp(timestamp);
2589     } else {
2590         // read timestamp from shared memory
2591         ExtendedTimestamp ets;
2592         status = mProxy->getTimestamp(&ets);
2593         if (status == OK) {
2594             ExtendedTimestamp::Location location;
2595             status = ets.getBestTimestamp(&timestamp, &location);
2596 
2597             if (status == OK) {
2598                 updateLatency_l();
2599                 // It is possible that the best location has moved from the kernel to the server.
2600                 // In this case we adjust the position from the previous computed latency.
2601                 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2602                     ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2603                             "%s(%d): location moved from kernel to server",
2604                             __func__, mPortId);
2605                     // check that the last kernel OK time info exists and the positions
2606                     // are valid (if they predate the current track, the positions may
2607                     // be zero or negative).
2608                     const int64_t frames =
2609                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2610                             ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2611                             ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2612                             ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2613                             ?
2614                             int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2615                                     / 1000)
2616                             :
2617                             (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2618                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2619                     ALOGV("%s(%d): frame adjustment:%lld  timestamp:%s",
2620                             __func__, mPortId, (long long)frames, ets.toString().c_str());
2621                     if (frames >= ets.mPosition[location]) {
2622                         timestamp.mPosition = 0;
2623                     } else {
2624                         timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2625                     }
2626                 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2627                     ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2628                             "%s(%d): location moved from server to kernel",
2629                             __func__, mPortId);
2630 
2631                     if (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER] ==
2632                             ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL]) {
2633                         // In Q, we don't return errors as an invalid time
2634                         // but instead we leave the last kernel good timestamp alone.
2635                         //
2636                         // If server is identical to kernel, the device data pipeline is idle.
2637                         // A better start time is now.  The retrograde check ensures
2638                         // timestamp monotonicity.
2639                         const int64_t nowNs = systemTime();
2640                         if (!mTimestampStallReported) {
2641                             ALOGD("%s(%d): device stall time corrected using current time %lld",
2642                                     __func__, mPortId, (long long)nowNs);
2643                             mTimestampStallReported = true;
2644                         }
2645                         timestamp.mTime = convertNsToTimespec(nowNs);
2646                     }  else {
2647                         mTimestampStallReported = false;
2648                     }
2649                 }
2650 
2651                 // We update the timestamp time even when paused.
2652                 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2653                     const int64_t now = systemTime();
2654                     const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2655                     const int64_t lag =
2656                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2657                                 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2658                             ? int64_t(mAfLatency * 1000000LL)
2659                             : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2660                              - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2661                              * NANOS_PER_SECOND / mSampleRate;
2662                     const int64_t limit = now - lag; // no earlier than this limit
2663                     if (at < limit) {
2664                         ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2665                                 (long long)lag, (long long)at, (long long)limit);
2666                         timestamp.mTime = convertNsToTimespec(limit);
2667                     }
2668                 }
2669                 mPreviousLocation = location;
2670             } else {
2671                 // right after AudioTrack is started, one may not find a timestamp
2672                 ALOGV("%s(%d): getBestTimestamp did not find timestamp", __func__, mPortId);
2673             }
2674         }
2675         if (status == INVALID_OPERATION) {
2676             // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2677             // other failures are signaled by a negative time.
2678             // If we come out of FLUSHED or STOPPED where the position is known
2679             // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2680             // "zero" for NuPlayer).  We don't convert for track restoration as position
2681             // does not reset.
2682             ALOGV("%s(%d): timestamp server offset:%lld restore frames:%lld",
2683                     __func__, mPortId,
2684                     (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2685             if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2686                 status = WOULD_BLOCK;
2687             }
2688         }
2689     }
2690     if (status != NO_ERROR) {
2691         ALOGV_IF(status != WOULD_BLOCK, "%s(%d): getTimestamp error:%#x", __func__, mPortId, status);
2692         return status;
2693     }
2694     if (isOffloadedOrDirect_l()) {
2695         if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2696             // use cached paused position in case another offloaded track is running.
2697             timestamp.mPosition = mPausedPosition;
2698             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2699             // TODO: adjust for delay
2700             return NO_ERROR;
2701         }
2702 
2703         // Check whether a pending flush or stop has completed, as those commands may
2704         // be asynchronous or return near finish or exhibit glitchy behavior.
2705         //
2706         // Originally this showed up as the first timestamp being a continuation of
2707         // the previous song under gapless playback.
2708         // However, we sometimes see zero timestamps, then a glitch of
2709         // the previous song's position, and then correct timestamps afterwards.
2710         if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2711             static const int kTimeJitterUs = 100000; // 100 ms
2712             static const int k1SecUs = 1000000;
2713 
2714             const int64_t timeNow = getNowUs();
2715 
2716             if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2717                 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2718                 if (timestampTimeUs < mStartFromZeroUs) {
2719                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2720                 }
2721                 const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2722                 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2723                         / ((double)mSampleRate * mPlaybackRate.mSpeed);
2724 
2725                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2726                     // Verify that the counter can't count faster than the sample rate
2727                     // since the start time.  If greater, then that means we may have failed
2728                     // to completely flush or stop the previous playing track.
2729                     ALOGW_IF(!mTimestampStartupGlitchReported,
2730                             "%s(%d): startup glitch detected"
2731                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2732                             __func__, mPortId,
2733                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
2734                             timestamp.mPosition);
2735                     mTimestampStartupGlitchReported = true;
2736                     if (previousTimestampValid
2737                             && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2738                         timestamp = mPreviousTimestamp;
2739                         mPreviousTimestampValid = true;
2740                         return NO_ERROR;
2741                     }
2742                     return WOULD_BLOCK;
2743                 }
2744                 if (deltaPositionByUs != 0) {
2745                     mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2746                 }
2747             } else {
2748                 mStartFromZeroUs = 0; // don't check again, start time expired.
2749             }
2750             mTimestampStartupGlitchReported = false;
2751         }
2752     } else {
2753         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2754         (void) updateAndGetPosition_l();
2755         // Server consumed (mServer) and presented both use the same server time base,
2756         // and server consumed is always >= presented.
2757         // The delta between these represents the number of frames in the buffer pipeline.
2758         // If this delta between these is greater than the client position, it means that
2759         // actually presented is still stuck at the starting line (figuratively speaking),
2760         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2761         // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2762         // mPosition exceeds 32 bits.
2763         // TODO Remove when timestamp is updated to contain pipeline status info.
2764         const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2765         if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2766                 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2767             return INVALID_OPERATION;
2768         }
2769         // Convert timestamp position from server time base to client time base.
2770         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2771         // But if we change it to 64-bit then this could fail.
2772         // Use Modulo computation here.
2773         timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2774         // Immediately after a call to getPosition_l(), mPosition and
2775         // mServer both represent the same frame position.  mPosition is
2776         // in client's point of view, and mServer is in server's point of
2777         // view.  So the difference between them is the "fudge factor"
2778         // between client and server views due to stop() and/or new
2779         // IAudioTrack.  And timestamp.mPosition is initially in server's
2780         // point of view, so we need to apply the same fudge factor to it.
2781     }
2782 
2783     // Prevent retrograde motion in timestamp.
2784     // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2785     if (status == NO_ERROR) {
2786         // Fix stale time when checking timestamp right after start().
2787         // The position is at the last reported location but the time can be stale
2788         // due to pause or standby or cold start latency.
2789         //
2790         // We keep advancing the time (but not the position) to ensure that the
2791         // stale value does not confuse the application.
2792         //
2793         // For offload compatibility, use a default lag value here.
2794         // Any time discrepancy between this update and the pause timestamp is handled
2795         // by the retrograde check afterwards.
2796         int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2797         const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2798         const int64_t limitNs = mStartNs - lagNs;
2799         if (currentTimeNanos < limitNs) {
2800             if (!mTimestampStaleTimeReported) {
2801                 ALOGD("%s(%d): stale timestamp time corrected, "
2802                         "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2803                         __func__, mPortId,
2804                         (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2805                 mTimestampStaleTimeReported = true;
2806             }
2807             timestamp.mTime = convertNsToTimespec(limitNs);
2808             currentTimeNanos = limitNs;
2809         } else {
2810             mTimestampStaleTimeReported = false;
2811         }
2812 
2813         // previousTimestampValid is set to false when starting after a stop or flush.
2814         if (previousTimestampValid) {
2815             const int64_t previousTimeNanos =
2816                     audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2817 
2818             // retrograde check
2819             if (currentTimeNanos < previousTimeNanos) {
2820                 if (!mTimestampRetrogradeTimeReported) {
2821                     ALOGW("%s(%d): retrograde timestamp time corrected, %lld < %lld",
2822                             __func__, mPortId,
2823                             (long long)currentTimeNanos, (long long)previousTimeNanos);
2824                     mTimestampRetrogradeTimeReported = true;
2825                 }
2826                 timestamp.mTime = mPreviousTimestamp.mTime;
2827             } else {
2828                 mTimestampRetrogradeTimeReported = false;
2829             }
2830 
2831             // Looking at signed delta will work even when the timestamps
2832             // are wrapping around.
2833             int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2834                     - mPreviousTimestamp.mPosition).signedValue();
2835             if (deltaPosition < 0) {
2836                 // Only report once per position instead of spamming the log.
2837                 if (!mTimestampRetrogradePositionReported) {
2838                     ALOGW("%s(%d): retrograde timestamp position corrected, %d = %u - %u",
2839                             __func__, mPortId,
2840                             deltaPosition,
2841                             timestamp.mPosition,
2842                             mPreviousTimestamp.mPosition);
2843                     mTimestampRetrogradePositionReported = true;
2844                 }
2845             } else {
2846                 mTimestampRetrogradePositionReported = false;
2847             }
2848             if (deltaPosition < 0) {
2849                 timestamp.mPosition = mPreviousTimestamp.mPosition;
2850                 deltaPosition = 0;
2851             }
2852 #if 0
2853             // Uncomment this to verify audio timestamp rate.
2854             const int64_t deltaTime =
2855                     audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2856             if (deltaTime != 0) {
2857                 const int64_t computedSampleRate =
2858                         deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2859                 ALOGD("%s(%d): computedSampleRate:%u  sampleRate:%u",
2860                         __func__, mPortId,
2861                         (unsigned)computedSampleRate, mSampleRate);
2862             }
2863 #endif
2864         }
2865         mPreviousTimestamp = timestamp;
2866         mPreviousTimestampValid = true;
2867     }
2868 
2869     return status;
2870 }
2871 
getParameters(const String8 & keys)2872 String8 AudioTrack::getParameters(const String8& keys)
2873 {
2874     audio_io_handle_t output = getOutput();
2875     if (output != AUDIO_IO_HANDLE_NONE) {
2876         return AudioSystem::getParameters(output, keys);
2877     } else {
2878         return String8::empty();
2879     }
2880 }
2881 
isOffloaded() const2882 bool AudioTrack::isOffloaded() const
2883 {
2884     AutoMutex lock(mLock);
2885     return isOffloaded_l();
2886 }
2887 
isDirect() const2888 bool AudioTrack::isDirect() const
2889 {
2890     AutoMutex lock(mLock);
2891     return isDirect_l();
2892 }
2893 
isOffloadedOrDirect() const2894 bool AudioTrack::isOffloadedOrDirect() const
2895 {
2896     AutoMutex lock(mLock);
2897     return isOffloadedOrDirect_l();
2898 }
2899 
2900 
dump(int fd,const Vector<String16> & args __unused) const2901 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2902 {
2903     String8 result;
2904 
2905     result.append(" AudioTrack::dump\n");
2906     result.appendFormat("  id(%d) status(%d), state(%d), session Id(%d), flags(%#x)\n",
2907                         mPortId, mStatus, mState, mSessionId, mFlags);
2908     result.appendFormat("  stream type(%d), left - right volume(%f, %f)\n",
2909                         (mStreamType == AUDIO_STREAM_DEFAULT) ?
2910                             AudioSystem::attributesToStreamType(mAttributes) :
2911                             mStreamType,
2912                         mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2913     result.appendFormat("  format(%#x), channel mask(%#x), channel count(%u)\n",
2914                   mFormat, mChannelMask, mChannelCount);
2915     result.appendFormat("  sample rate(%u), original sample rate(%u), speed(%f)\n",
2916                   mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
2917     result.appendFormat("  frame count(%zu), req. frame count(%zu)\n",
2918                   mFrameCount, mReqFrameCount);
2919     result.appendFormat("  notif. frame count(%u), req. notif. frame count(%u),"
2920             " req. notif. per buff(%u)\n",
2921              mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
2922     result.appendFormat("  latency (%d), selected device Id(%d), routed device Id(%d)\n",
2923                         mLatency, mSelectedDeviceId, mRoutedDeviceId);
2924     result.appendFormat("  output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
2925                         mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
2926     ::write(fd, result.string(), result.size());
2927     return NO_ERROR;
2928 }
2929 
getUnderrunCount() const2930 uint32_t AudioTrack::getUnderrunCount() const
2931 {
2932     AutoMutex lock(mLock);
2933     return getUnderrunCount_l();
2934 }
2935 
getUnderrunCount_l() const2936 uint32_t AudioTrack::getUnderrunCount_l() const
2937 {
2938     return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2939 }
2940 
getUnderrunFrames() const2941 uint32_t AudioTrack::getUnderrunFrames() const
2942 {
2943     AutoMutex lock(mLock);
2944     return mProxy->getUnderrunFrames();
2945 }
2946 
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2947 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2948 {
2949 
2950     if (callback == 0) {
2951         ALOGW("%s(%d): adding NULL callback!", __func__, mPortId);
2952         return BAD_VALUE;
2953     }
2954     AutoMutex lock(mLock);
2955     if (mDeviceCallback.unsafe_get() == callback.get()) {
2956         ALOGW("%s(%d): adding same callback!", __func__, mPortId);
2957         return INVALID_OPERATION;
2958     }
2959     status_t status = NO_ERROR;
2960     if (mOutput != AUDIO_IO_HANDLE_NONE) {
2961         if (mDeviceCallback != 0) {
2962             ALOGW("%s(%d): callback already present!", __func__, mPortId);
2963             AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
2964         }
2965         status = AudioSystem::addAudioDeviceCallback(this, mOutput, mPortId);
2966     }
2967     mDeviceCallback = callback;
2968     return status;
2969 }
2970 
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2971 status_t AudioTrack::removeAudioDeviceCallback(
2972         const sp<AudioSystem::AudioDeviceCallback>& callback)
2973 {
2974     if (callback == 0) {
2975         ALOGW("%s(%d): removing NULL callback!", __func__, mPortId);
2976         return BAD_VALUE;
2977     }
2978     AutoMutex lock(mLock);
2979     if (mDeviceCallback.unsafe_get() != callback.get()) {
2980         ALOGW("%s removing different callback!", __FUNCTION__);
2981         return INVALID_OPERATION;
2982     }
2983     mDeviceCallback.clear();
2984     if (mOutput != AUDIO_IO_HANDLE_NONE) {
2985         AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
2986     }
2987     return NO_ERROR;
2988 }
2989 
2990 
onAudioDeviceUpdate(audio_io_handle_t audioIo,audio_port_handle_t deviceId)2991 void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
2992                                  audio_port_handle_t deviceId)
2993 {
2994     sp<AudioSystem::AudioDeviceCallback> callback;
2995     {
2996         AutoMutex lock(mLock);
2997         if (audioIo != mOutput) {
2998             return;
2999         }
3000         callback = mDeviceCallback.promote();
3001         // only update device if the track is active as route changes due to other use cases are
3002         // irrelevant for this client
3003         if (mState == STATE_ACTIVE) {
3004             mRoutedDeviceId = deviceId;
3005         }
3006     }
3007 
3008     if (callback.get() != nullptr) {
3009         callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
3010     }
3011 }
3012 
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)3013 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
3014 {
3015     if (msec == nullptr ||
3016             (location != ExtendedTimestamp::LOCATION_SERVER
3017                     && location != ExtendedTimestamp::LOCATION_KERNEL)) {
3018         return BAD_VALUE;
3019     }
3020     AutoMutex lock(mLock);
3021     // inclusive of offloaded and direct tracks.
3022     //
3023     // It is possible, but not enabled, to allow duration computation for non-pcm
3024     // audio_has_proportional_frames() formats because currently they have
3025     // the drain rate equivalent to the pcm sample rate * framesize.
3026     if (!isPurePcmData_l()) {
3027         return INVALID_OPERATION;
3028     }
3029     ExtendedTimestamp ets;
3030     if (getTimestamp_l(&ets) == OK
3031             && ets.mTimeNs[location] > 0) {
3032         int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
3033                 - ets.mPosition[location];
3034         if (diff < 0) {
3035             *msec = 0;
3036         } else {
3037             // ms is the playback time by frames
3038             int64_t ms = (int64_t)((double)diff * 1000 /
3039                     ((double)mSampleRate * mPlaybackRate.mSpeed));
3040             // clockdiff is the timestamp age (negative)
3041             int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
3042                     ets.mTimeNs[location]
3043                     + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
3044                     - systemTime(SYSTEM_TIME_MONOTONIC);
3045 
3046             //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
3047             static const int NANOS_PER_MILLIS = 1000000;
3048             *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
3049         }
3050         return NO_ERROR;
3051     }
3052     if (location != ExtendedTimestamp::LOCATION_SERVER) {
3053         return INVALID_OPERATION; // LOCATION_KERNEL is not available
3054     }
3055     // use server position directly (offloaded and direct arrive here)
3056     updateAndGetPosition_l();
3057     int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
3058     *msec = (diff <= 0) ? 0
3059             : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
3060     return NO_ERROR;
3061 }
3062 
hasStarted()3063 bool AudioTrack::hasStarted()
3064 {
3065     AutoMutex lock(mLock);
3066     switch (mState) {
3067     case STATE_STOPPED:
3068         if (isOffloadedOrDirect_l()) {
3069             // check if we have started in the past to return true.
3070             return mStartFromZeroUs > 0;
3071         }
3072         // A normal audio track may still be draining, so
3073         // check if stream has ended.  This covers fasttrack position
3074         // instability and start/stop without any data written.
3075         if (mProxy->getStreamEndDone()) {
3076             return true;
3077         }
3078         FALLTHROUGH_INTENDED;
3079     case STATE_ACTIVE:
3080     case STATE_STOPPING:
3081         break;
3082     case STATE_PAUSED:
3083     case STATE_PAUSED_STOPPING:
3084     case STATE_FLUSHED:
3085         return false;  // we're not active
3086     default:
3087         LOG_ALWAYS_FATAL("%s(%d): Invalid mState in hasStarted(): %d", __func__, mPortId, mState);
3088         break;
3089     }
3090 
3091     // wait indicates whether we need to wait for a timestamp.
3092     // This is conservatively figured - if we encounter an unexpected error
3093     // then we will not wait.
3094     bool wait = false;
3095     if (isOffloadedOrDirect_l()) {
3096         AudioTimestamp ts;
3097         status_t status = getTimestamp_l(ts);
3098         if (status == WOULD_BLOCK) {
3099             wait = true;
3100         } else if (status == OK) {
3101             wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
3102         }
3103         ALOGV("%s(%d): hasStarted wait:%d  ts:%u  start position:%lld",
3104                 __func__, mPortId,
3105                 (int)wait,
3106                 ts.mPosition,
3107                 (long long)mStartTs.mPosition);
3108     } else {
3109         int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
3110         ExtendedTimestamp ets;
3111         status_t status = getTimestamp_l(&ets);
3112         if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
3113             wait = true;
3114         } else if (status == OK) {
3115             for (location = ExtendedTimestamp::LOCATION_KERNEL;
3116                     location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
3117                 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
3118                     continue;
3119                 }
3120                 wait = ets.mPosition[location] == 0
3121                         || ets.mPosition[location] == mStartEts.mPosition[location];
3122                 break;
3123             }
3124         }
3125         ALOGV("%s(%d): hasStarted wait:%d  ets:%lld  start position:%lld",
3126                 __func__, mPortId,
3127                 (int)wait,
3128                 (long long)ets.mPosition[location],
3129                 (long long)mStartEts.mPosition[location]);
3130     }
3131     return !wait;
3132 }
3133 
3134 // =========================================================================
3135 
binderDied(const wp<IBinder> & who __unused)3136 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3137 {
3138     sp<AudioTrack> audioTrack = mAudioTrack.promote();
3139     if (audioTrack != 0) {
3140         AutoMutex lock(audioTrack->mLock);
3141         audioTrack->mProxy->binderDied();
3142     }
3143 }
3144 
3145 // =========================================================================
3146 
AudioTrackThread(AudioTrack & receiver)3147 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
3148     : Thread(true /* bCanCallJava */)  // binder recursion on restoreTrack_l() may call Java.
3149     , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3150       mIgnoreNextPausedInt(false)
3151 {
3152 }
3153 
~AudioTrackThread()3154 AudioTrack::AudioTrackThread::~AudioTrackThread()
3155 {
3156 }
3157 
threadLoop()3158 bool AudioTrack::AudioTrackThread::threadLoop()
3159 {
3160     {
3161         AutoMutex _l(mMyLock);
3162         if (mPaused) {
3163             // TODO check return value and handle or log
3164             mMyCond.wait(mMyLock);
3165             // caller will check for exitPending()
3166             return true;
3167         }
3168         if (mIgnoreNextPausedInt) {
3169             mIgnoreNextPausedInt = false;
3170             mPausedInt = false;
3171         }
3172         if (mPausedInt) {
3173             // TODO use futex instead of condition, for event flag "or"
3174             if (mPausedNs > 0) {
3175                 // TODO check return value and handle or log
3176                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3177             } else {
3178                 // TODO check return value and handle or log
3179                 mMyCond.wait(mMyLock);
3180             }
3181             mPausedInt = false;
3182             return true;
3183         }
3184     }
3185     if (exitPending()) {
3186         return false;
3187     }
3188     nsecs_t ns = mReceiver.processAudioBuffer();
3189     switch (ns) {
3190     case 0:
3191         return true;
3192     case NS_INACTIVE:
3193         pauseInternal();
3194         return true;
3195     case NS_NEVER:
3196         return false;
3197     case NS_WHENEVER:
3198         // Event driven: call wake() when callback notifications conditions change.
3199         ns = INT64_MAX;
3200         FALLTHROUGH_INTENDED;
3201     default:
3202         LOG_ALWAYS_FATAL_IF(ns < 0, "%s(%d): processAudioBuffer() returned %lld",
3203                 __func__, mReceiver.mPortId, (long long)ns);
3204         pauseInternal(ns);
3205         return true;
3206     }
3207 }
3208 
requestExit()3209 void AudioTrack::AudioTrackThread::requestExit()
3210 {
3211     // must be in this order to avoid a race condition
3212     Thread::requestExit();
3213     resume();
3214 }
3215 
pause()3216 void AudioTrack::AudioTrackThread::pause()
3217 {
3218     AutoMutex _l(mMyLock);
3219     mPaused = true;
3220 }
3221 
resume()3222 void AudioTrack::AudioTrackThread::resume()
3223 {
3224     AutoMutex _l(mMyLock);
3225     mIgnoreNextPausedInt = true;
3226     if (mPaused || mPausedInt) {
3227         mPaused = false;
3228         mPausedInt = false;
3229         mMyCond.signal();
3230     }
3231 }
3232 
wake()3233 void AudioTrack::AudioTrackThread::wake()
3234 {
3235     AutoMutex _l(mMyLock);
3236     if (!mPaused) {
3237         // wake() might be called while servicing a callback - ignore the next
3238         // pause time and call processAudioBuffer.
3239         mIgnoreNextPausedInt = true;
3240         if (mPausedInt && mPausedNs > 0) {
3241             // audio track is active and internally paused with timeout.
3242             mPausedInt = false;
3243             mMyCond.signal();
3244         }
3245     }
3246 }
3247 
pauseInternal(nsecs_t ns)3248 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3249 {
3250     AutoMutex _l(mMyLock);
3251     mPausedInt = true;
3252     mPausedNs = ns;
3253 }
3254 
3255 } // namespace android
3256