blob: 9b611d2a9ff871f02b921d2f76eb9ef8d3da7a69 [file] [log] [blame]
Eric Laurent81784c32012-11-19 14:55:58 -08001/*
2**
3** Copyright 2012, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9** http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19#define LOG_TAG "AudioFlinger"
20//#define LOG_NDEBUG 0
21
22#include <math.h>
23#include <cutils/compiler.h>
24#include <utils/Log.h>
25
26#include <private/media/AudioTrackShared.h>
27
28#include <common_time/cc_helper.h>
29#include <common_time/local_clock.h>
30
31#include "AudioMixer.h"
32#include "AudioFlinger.h"
33#include "ServiceUtilities.h"
34
35// ----------------------------------------------------------------------------
36
37// Note: the following macro is used for extremely verbose logging message. In
38// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
39// 0; but one side effect of this is to turn all LOGV's as well. Some messages
40// are so verbose that we want to suppress them even when we have ALOG_ASSERT
41// turned on. Do not uncomment the #def below unless you really know what you
42// are doing and want to see all of the extremely verbose messages.
43//#define VERY_VERY_VERBOSE_LOGGING
44#ifdef VERY_VERY_VERBOSE_LOGGING
45#define ALOGVV ALOGV
46#else
47#define ALOGVV(a...) do { } while(0)
48#endif
49
50namespace android {
51
52// ----------------------------------------------------------------------------
53// TrackBase
54// ----------------------------------------------------------------------------
55
56// TrackBase constructor must be called with AudioFlinger::mLock held
57AudioFlinger::ThreadBase::TrackBase::TrackBase(
58 ThreadBase *thread,
59 const sp<Client>& client,
60 uint32_t sampleRate,
61 audio_format_t format,
62 audio_channel_mask_t channelMask,
63 size_t frameCount,
64 const sp<IMemory>& sharedBuffer,
65 int sessionId)
66 : RefBase(),
67 mThread(thread),
68 mClient(client),
69 mCblk(NULL),
70 // mBuffer
71 // mBufferEnd
72 mStepCount(0),
73 mState(IDLE),
74 mSampleRate(sampleRate),
75 mFormat(format),
76 mChannelMask(channelMask),
77 mChannelCount(popcount(channelMask)),
78 mFrameSize(audio_is_linear_pcm(format) ?
79 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
80 mFrameCount(frameCount),
81 mStepServerFailed(false),
82 mSessionId(sessionId)
83{
84 // client == 0 implies sharedBuffer == 0
85 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
86
87 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
88 sharedBuffer->size());
89
90 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
91 size_t size = sizeof(audio_track_cblk_t);
92 size_t bufferSize = frameCount * mFrameSize;
93 if (sharedBuffer == 0) {
94 size += bufferSize;
95 }
96
97 if (client != 0) {
98 mCblkMemory = client->heap()->allocate(size);
99 if (mCblkMemory != 0) {
100 mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
101 // can't assume mCblk != NULL
102 } else {
103 ALOGE("not enough memory for AudioTrack size=%u", size);
104 client->heap()->dump("AudioTrack");
105 return;
106 }
107 } else {
108 mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
109 // assume mCblk != NULL
110 }
111
112 // construct the shared structure in-place.
113 if (mCblk != NULL) {
114 new(mCblk) audio_track_cblk_t();
115 // clear all buffers
116 mCblk->frameCount_ = frameCount;
117 mCblk->sampleRate = sampleRate;
118// uncomment the following lines to quickly test 32-bit wraparound
119// mCblk->user = 0xffff0000;
120// mCblk->server = 0xffff0000;
121// mCblk->userBase = 0xffff0000;
122// mCblk->serverBase = 0xffff0000;
123 if (sharedBuffer == 0) {
124 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
125 memset(mBuffer, 0, bufferSize);
126 // Force underrun condition to avoid false underrun callback until first data is
127 // written to buffer (other flags are cleared)
128 mCblk->flags = CBLK_UNDERRUN;
129 } else {
130 mBuffer = sharedBuffer->pointer();
131 }
132 mBufferEnd = (uint8_t *)mBuffer + bufferSize;
133 }
134}
135
136AudioFlinger::ThreadBase::TrackBase::~TrackBase()
137{
138 if (mCblk != NULL) {
139 if (mClient == 0) {
140 delete mCblk;
141 } else {
142 mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
143 }
144 }
145 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
146 if (mClient != 0) {
147 // Client destructor must run with AudioFlinger mutex locked
148 Mutex::Autolock _l(mClient->audioFlinger()->mLock);
149 // If the client's reference count drops to zero, the associated destructor
150 // must run with AudioFlinger lock held. Thus the explicit clear() rather than
151 // relying on the automatic clear() at end of scope.
152 mClient.clear();
153 }
154}
155
156// AudioBufferProvider interface
157// getNextBuffer() = 0;
158// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
159void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
160{
161 buffer->raw = NULL;
162 mStepCount = buffer->frameCount;
163 // FIXME See note at getNextBuffer()
164 (void) step(); // ignore return value of step()
165 buffer->frameCount = 0;
166}
167
168bool AudioFlinger::ThreadBase::TrackBase::step() {
169 bool result;
170 audio_track_cblk_t* cblk = this->cblk();
171
172 result = cblk->stepServer(mStepCount, mFrameCount, isOut());
173 if (!result) {
174 ALOGV("stepServer failed acquiring cblk mutex");
175 mStepServerFailed = true;
176 }
177 return result;
178}
179
180void AudioFlinger::ThreadBase::TrackBase::reset() {
181 audio_track_cblk_t* cblk = this->cblk();
182
183 cblk->user = 0;
184 cblk->server = 0;
185 cblk->userBase = 0;
186 cblk->serverBase = 0;
187 mStepServerFailed = false;
188 ALOGV("TrackBase::reset");
189}
190
191uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
192 return mCblk->sampleRate;
193}
194
195void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
196 audio_track_cblk_t* cblk = this->cblk();
197 int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize;
198 int8_t *bufferEnd = bufferStart + frames * mFrameSize;
199
200 // Check validity of returned pointer in case the track control block would have been corrupted.
201 ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
202 "TrackBase::getBuffer buffer out of range:\n"
203 " start: %p, end %p , mBuffer %p mBufferEnd %p\n"
204 " server %u, serverBase %u, user %u, userBase %u, frameSize %u",
205 bufferStart, bufferEnd, mBuffer, mBufferEnd,
206 cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize);
207
208 return bufferStart;
209}
210
211status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
212{
213 mSyncEvents.add(event);
214 return NO_ERROR;
215}
216
217// ----------------------------------------------------------------------------
218// Playback
219// ----------------------------------------------------------------------------
220
221AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
222 : BnAudioTrack(),
223 mTrack(track)
224{
225}
226
227AudioFlinger::TrackHandle::~TrackHandle() {
228 // just stop the track on deletion, associated resources
229 // will be freed from the main thread once all pending buffers have
230 // been played. Unless it's not in the active track list, in which
231 // case we free everything now...
232 mTrack->destroy();
233}
234
235sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
236 return mTrack->getCblk();
237}
238
239status_t AudioFlinger::TrackHandle::start() {
240 return mTrack->start();
241}
242
243void AudioFlinger::TrackHandle::stop() {
244 mTrack->stop();
245}
246
247void AudioFlinger::TrackHandle::flush() {
248 mTrack->flush();
249}
250
Eric Laurent81784c32012-11-19 14:55:58 -0800251void AudioFlinger::TrackHandle::pause() {
252 mTrack->pause();
253}
254
255status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
256{
257 return mTrack->attachAuxEffect(EffectId);
258}
259
260status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
261 sp<IMemory>* buffer) {
262 if (!mTrack->isTimedTrack())
263 return INVALID_OPERATION;
264
265 PlaybackThread::TimedTrack* tt =
266 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
267 return tt->allocateTimedBuffer(size, buffer);
268}
269
270status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
271 int64_t pts) {
272 if (!mTrack->isTimedTrack())
273 return INVALID_OPERATION;
274
275 PlaybackThread::TimedTrack* tt =
276 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
277 return tt->queueTimedBuffer(buffer, pts);
278}
279
280status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
281 const LinearTransform& xform, int target) {
282
283 if (!mTrack->isTimedTrack())
284 return INVALID_OPERATION;
285
286 PlaybackThread::TimedTrack* tt =
287 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
288 return tt->setMediaTimeTransform(
289 xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
290}
291
292status_t AudioFlinger::TrackHandle::onTransact(
293 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
294{
295 return BnAudioTrack::onTransact(code, data, reply, flags);
296}
297
298// ----------------------------------------------------------------------------
299
300// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
301AudioFlinger::PlaybackThread::Track::Track(
302 PlaybackThread *thread,
303 const sp<Client>& client,
304 audio_stream_type_t streamType,
305 uint32_t sampleRate,
306 audio_format_t format,
307 audio_channel_mask_t channelMask,
308 size_t frameCount,
309 const sp<IMemory>& sharedBuffer,
310 int sessionId,
311 IAudioFlinger::track_flags_t flags)
312 : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
313 sessionId),
Eric Laurent81784c32012-11-19 14:55:58 -0800314 mFillingUpStatus(FS_INVALID),
315 // mRetryCount initialized later when needed
316 mSharedBuffer(sharedBuffer),
317 mStreamType(streamType),
318 mName(-1), // see note below
319 mMainBuffer(thread->mixBuffer()),
320 mAuxBuffer(NULL),
321 mAuxEffectId(0), mHasVolumeController(false),
322 mPresentationCompleteFrames(0),
323 mFlags(flags),
324 mFastIndex(-1),
325 mUnderrunCount(0),
Glenn Kasten5736c352012-12-04 12:12:34 -0800326 mCachedVolume(1.0),
327 mIsInvalid(false)
Eric Laurent81784c32012-11-19 14:55:58 -0800328{
329 if (mCblk != NULL) {
330 // to avoid leaking a track name, do not allocate one unless there is an mCblk
331 mName = thread->getTrackName_l(channelMask, sessionId);
332 mCblk->mName = mName;
333 if (mName < 0) {
334 ALOGE("no more track names available");
335 return;
336 }
337 // only allocate a fast track index if we were able to allocate a normal track name
338 if (flags & IAudioFlinger::TRACK_FAST) {
339 ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
340 int i = __builtin_ctz(thread->mFastTrackAvailMask);
341 ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
342 // FIXME This is too eager. We allocate a fast track index before the
343 // fast track becomes active. Since fast tracks are a scarce resource,
344 // this means we are potentially denying other more important fast tracks from
345 // being created. It would be better to allocate the index dynamically.
346 mFastIndex = i;
347 mCblk->mName = i;
348 // Read the initial underruns because this field is never cleared by the fast mixer
349 mObservedUnderruns = thread->getFastTrackUnderruns(i);
350 thread->mFastTrackAvailMask &= ~(1 << i);
351 }
352 }
353 ALOGV("Track constructor name %d, calling pid %d", mName,
354 IPCThreadState::self()->getCallingPid());
355}
356
357AudioFlinger::PlaybackThread::Track::~Track()
358{
359 ALOGV("PlaybackThread::Track destructor");
360}
361
362void AudioFlinger::PlaybackThread::Track::destroy()
363{
364 // NOTE: destroyTrack_l() can remove a strong reference to this Track
365 // by removing it from mTracks vector, so there is a risk that this Tracks's
366 // destructor is called. As the destructor needs to lock mLock,
367 // we must acquire a strong reference on this Track before locking mLock
368 // here so that the destructor is called only when exiting this function.
369 // On the other hand, as long as Track::destroy() is only called by
370 // TrackHandle destructor, the TrackHandle still holds a strong ref on
371 // this Track with its member mTrack.
372 sp<Track> keep(this);
373 { // scope for mLock
374 sp<ThreadBase> thread = mThread.promote();
375 if (thread != 0) {
376 if (!isOutputTrack()) {
377 if (mState == ACTIVE || mState == RESUMING) {
378 AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
379
380#ifdef ADD_BATTERY_DATA
381 // to track the speaker usage
382 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
383#endif
384 }
385 AudioSystem::releaseOutput(thread->id());
386 }
387 Mutex::Autolock _l(thread->mLock);
388 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
389 playbackThread->destroyTrack_l(this);
390 }
391 }
392}
393
394/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
395{
Glenn Kastene4756fe2012-11-29 13:38:14 -0800396 result.append(" Name Client Type Fmt Chn mask Session StpCnt fCount S F SRate "
Eric Laurent81784c32012-11-19 14:55:58 -0800397 "L dB R dB Server User Main buf Aux Buf Flags Underruns\n");
398}
399
400void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
401{
402 uint32_t vlr = mCblk->getVolumeLR();
403 if (isFastTrack()) {
404 sprintf(buffer, " F %2d", mFastIndex);
405 } else {
406 sprintf(buffer, " %4d", mName - AudioMixer::TRACK0);
407 }
408 track_state state = mState;
409 char stateChar;
410 switch (state) {
411 case IDLE:
412 stateChar = 'I';
413 break;
414 case TERMINATED:
415 stateChar = 'T';
416 break;
417 case STOPPING_1:
418 stateChar = 's';
419 break;
420 case STOPPING_2:
421 stateChar = '5';
422 break;
423 case STOPPED:
424 stateChar = 'S';
425 break;
426 case RESUMING:
427 stateChar = 'R';
428 break;
429 case ACTIVE:
430 stateChar = 'A';
431 break;
432 case PAUSING:
433 stateChar = 'p';
434 break;
435 case PAUSED:
436 stateChar = 'P';
437 break;
438 case FLUSHED:
439 stateChar = 'F';
440 break;
441 default:
442 stateChar = '?';
443 break;
444 }
445 char nowInUnderrun;
446 switch (mObservedUnderruns.mBitFields.mMostRecent) {
447 case UNDERRUN_FULL:
448 nowInUnderrun = ' ';
449 break;
450 case UNDERRUN_PARTIAL:
451 nowInUnderrun = '<';
452 break;
453 case UNDERRUN_EMPTY:
454 nowInUnderrun = '*';
455 break;
456 default:
457 nowInUnderrun = '?';
458 break;
459 }
Glenn Kastene4756fe2012-11-29 13:38:14 -0800460 snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g "
Eric Laurent81784c32012-11-19 14:55:58 -0800461 "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
462 (mClient == 0) ? getpid_cached : mClient->pid(),
463 mStreamType,
464 mFormat,
465 mChannelMask,
466 mSessionId,
467 mStepCount,
468 mFrameCount,
469 stateChar,
Eric Laurent81784c32012-11-19 14:55:58 -0800470 mFillingUpStatus,
471 mCblk->sampleRate,
472 20.0 * log10((vlr & 0xFFFF) / 4096.0),
473 20.0 * log10((vlr >> 16) / 4096.0),
474 mCblk->server,
475 mCblk->user,
476 (int)mMainBuffer,
477 (int)mAuxBuffer,
478 mCblk->flags,
479 mUnderrunCount,
480 nowInUnderrun);
481}
482
483// AudioBufferProvider interface
484status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
485 AudioBufferProvider::Buffer* buffer, int64_t pts)
486{
487 audio_track_cblk_t* cblk = this->cblk();
488 uint32_t framesReady;
489 uint32_t framesReq = buffer->frameCount;
490
491 // Check if last stepServer failed, try to step now
492 if (mStepServerFailed) {
493 // FIXME When called by fast mixer, this takes a mutex with tryLock().
494 // Since the fast mixer is higher priority than client callback thread,
495 // it does not result in priority inversion for client.
496 // But a non-blocking solution would be preferable to avoid
497 // fast mixer being unable to tryLock(), and
498 // to avoid the extra context switches if the client wakes up,
499 // discovers the mutex is locked, then has to wait for fast mixer to unlock.
500 if (!step()) goto getNextBuffer_exit;
501 ALOGV("stepServer recovered");
502 mStepServerFailed = false;
503 }
504
505 // FIXME Same as above
506 framesReady = cblk->framesReadyOut();
507
508 if (CC_LIKELY(framesReady)) {
509 uint32_t s = cblk->server;
510 uint32_t bufferEnd = cblk->serverBase + mFrameCount;
511
512 bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
513 if (framesReq > framesReady) {
514 framesReq = framesReady;
515 }
516 if (framesReq > bufferEnd - s) {
517 framesReq = bufferEnd - s;
518 }
519
520 buffer->raw = getBuffer(s, framesReq);
521 buffer->frameCount = framesReq;
522 return NO_ERROR;
523 }
524
525getNextBuffer_exit:
526 buffer->raw = NULL;
527 buffer->frameCount = 0;
528 ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
529 return NOT_ENOUGH_DATA;
530}
531
532// Note that framesReady() takes a mutex on the control block using tryLock().
533// This could result in priority inversion if framesReady() is called by the normal mixer,
534// as the normal mixer thread runs at lower
535// priority than the client's callback thread: there is a short window within framesReady()
536// during which the normal mixer could be preempted, and the client callback would block.
537// Another problem can occur if framesReady() is called by the fast mixer:
538// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
539// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
540size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
541 return mCblk->framesReadyOut();
542}
543
544// Don't call for fast tracks; the framesReady() could result in priority inversion
545bool AudioFlinger::PlaybackThread::Track::isReady() const {
546 if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
547 return true;
548 }
549
550 if (framesReady() >= mFrameCount ||
551 (mCblk->flags & CBLK_FORCEREADY)) {
552 mFillingUpStatus = FS_FILLED;
553 android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
554 return true;
555 }
556 return false;
557}
558
559status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
560 int triggerSession)
561{
562 status_t status = NO_ERROR;
563 ALOGV("start(%d), calling pid %d session %d",
564 mName, IPCThreadState::self()->getCallingPid(), mSessionId);
565
566 sp<ThreadBase> thread = mThread.promote();
567 if (thread != 0) {
568 Mutex::Autolock _l(thread->mLock);
569 track_state state = mState;
570 // here the track could be either new, or restarted
571 // in both cases "unstop" the track
572 if (mState == PAUSED) {
573 mState = TrackBase::RESUMING;
574 ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
575 } else {
576 mState = TrackBase::ACTIVE;
577 ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
578 }
579
580 if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
581 thread->mLock.unlock();
582 status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
583 thread->mLock.lock();
584
585#ifdef ADD_BATTERY_DATA
586 // to track the speaker usage
587 if (status == NO_ERROR) {
588 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
589 }
590#endif
591 }
592 if (status == NO_ERROR) {
593 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
594 playbackThread->addTrack_l(this);
595 } else {
596 mState = state;
597 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
598 }
599 } else {
600 status = BAD_VALUE;
601 }
602 return status;
603}
604
605void AudioFlinger::PlaybackThread::Track::stop()
606{
607 ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
608 sp<ThreadBase> thread = mThread.promote();
609 if (thread != 0) {
610 Mutex::Autolock _l(thread->mLock);
611 track_state state = mState;
612 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
613 // If the track is not active (PAUSED and buffers full), flush buffers
614 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
615 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
616 reset();
617 mState = STOPPED;
618 } else if (!isFastTrack()) {
619 mState = STOPPED;
620 } else {
621 // prepareTracks_l() will set state to STOPPING_2 after next underrun,
622 // and then to STOPPED and reset() when presentation is complete
623 mState = STOPPING_1;
624 }
625 ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
626 playbackThread);
627 }
628 if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
629 thread->mLock.unlock();
630 AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
631 thread->mLock.lock();
632
633#ifdef ADD_BATTERY_DATA
634 // to track the speaker usage
635 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
636#endif
637 }
638 }
639}
640
641void AudioFlinger::PlaybackThread::Track::pause()
642{
643 ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
644 sp<ThreadBase> thread = mThread.promote();
645 if (thread != 0) {
646 Mutex::Autolock _l(thread->mLock);
647 if (mState == ACTIVE || mState == RESUMING) {
648 mState = PAUSING;
649 ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
650 if (!isOutputTrack()) {
651 thread->mLock.unlock();
652 AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
653 thread->mLock.lock();
654
655#ifdef ADD_BATTERY_DATA
656 // to track the speaker usage
657 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
658#endif
659 }
660 }
661 }
662}
663
664void AudioFlinger::PlaybackThread::Track::flush()
665{
666 ALOGV("flush(%d)", mName);
667 sp<ThreadBase> thread = mThread.promote();
668 if (thread != 0) {
669 Mutex::Autolock _l(thread->mLock);
670 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
671 mState != PAUSING && mState != IDLE && mState != FLUSHED) {
672 return;
673 }
674 // No point remaining in PAUSED state after a flush => go to
675 // FLUSHED state
676 mState = FLUSHED;
677 // do not reset the track if it is still in the process of being stopped or paused.
678 // this will be done by prepareTracks_l() when the track is stopped.
679 // prepareTracks_l() will see mState == FLUSHED, then
680 // remove from active track list, reset(), and trigger presentation complete
681 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
682 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
683 reset();
684 }
685 }
686}
687
688void AudioFlinger::PlaybackThread::Track::reset()
689{
690 // Do not reset twice to avoid discarding data written just after a flush and before
691 // the audioflinger thread detects the track is stopped.
692 if (!mResetDone) {
693 TrackBase::reset();
694 // Force underrun condition to avoid false underrun callback until first data is
695 // written to buffer
696 android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
697 android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
698 mFillingUpStatus = FS_FILLING;
699 mResetDone = true;
700 if (mState == FLUSHED) {
701 mState = IDLE;
702 }
703 }
704}
705
Eric Laurent81784c32012-11-19 14:55:58 -0800706status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
707{
708 status_t status = DEAD_OBJECT;
709 sp<ThreadBase> thread = mThread.promote();
710 if (thread != 0) {
711 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
712 sp<AudioFlinger> af = mClient->audioFlinger();
713
714 Mutex::Autolock _l(af->mLock);
715
716 sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
717
718 if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
719 Mutex::Autolock _dl(playbackThread->mLock);
720 Mutex::Autolock _sl(srcThread->mLock);
721 sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
722 if (chain == 0) {
723 return INVALID_OPERATION;
724 }
725
726 sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
727 if (effect == 0) {
728 return INVALID_OPERATION;
729 }
730 srcThread->removeEffect_l(effect);
731 playbackThread->addEffect_l(effect);
732 // removeEffect_l() has stopped the effect if it was active so it must be restarted
733 if (effect->state() == EffectModule::ACTIVE ||
734 effect->state() == EffectModule::STOPPING) {
735 effect->start();
736 }
737
738 sp<EffectChain> dstChain = effect->chain().promote();
739 if (dstChain == 0) {
740 srcThread->addEffect_l(effect);
741 return INVALID_OPERATION;
742 }
743 AudioSystem::unregisterEffect(effect->id());
744 AudioSystem::registerEffect(&effect->desc(),
745 srcThread->id(),
746 dstChain->strategy(),
747 AUDIO_SESSION_OUTPUT_MIX,
748 effect->id());
749 }
750 status = playbackThread->attachAuxEffect(this, EffectId);
751 }
752 return status;
753}
754
755void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
756{
757 mAuxEffectId = EffectId;
758 mAuxBuffer = buffer;
759}
760
761bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
762 size_t audioHalFrames)
763{
764 // a track is considered presented when the total number of frames written to audio HAL
765 // corresponds to the number of frames written when presentationComplete() is called for the
766 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
767 if (mPresentationCompleteFrames == 0) {
768 mPresentationCompleteFrames = framesWritten + audioHalFrames;
769 ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
770 mPresentationCompleteFrames, audioHalFrames);
771 }
772 if (framesWritten >= mPresentationCompleteFrames) {
773 ALOGV("presentationComplete() session %d complete: framesWritten %d",
774 mSessionId, framesWritten);
775 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
776 return true;
777 }
778 return false;
779}
780
781void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
782{
783 for (int i = 0; i < (int)mSyncEvents.size(); i++) {
784 if (mSyncEvents[i]->type() == type) {
785 mSyncEvents[i]->trigger();
786 mSyncEvents.removeAt(i);
787 i--;
788 }
789 }
790}
791
792// implement VolumeBufferProvider interface
793
794uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
795{
796 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
797 ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
798 uint32_t vlr = mCblk->getVolumeLR();
799 uint32_t vl = vlr & 0xFFFF;
800 uint32_t vr = vlr >> 16;
801 // track volumes come from shared memory, so can't be trusted and must be clamped
802 if (vl > MAX_GAIN_INT) {
803 vl = MAX_GAIN_INT;
804 }
805 if (vr > MAX_GAIN_INT) {
806 vr = MAX_GAIN_INT;
807 }
808 // now apply the cached master volume and stream type volume;
809 // this is trusted but lacks any synchronization or barrier so may be stale
810 float v = mCachedVolume;
811 vl *= v;
812 vr *= v;
813 // re-combine into U4.16
814 vlr = (vr << 16) | (vl & 0xFFFF);
815 // FIXME look at mute, pause, and stop flags
816 return vlr;
817}
818
819status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
820{
821 if (mState == TERMINATED || mState == PAUSED ||
822 ((framesReady() == 0) && ((mSharedBuffer != 0) ||
823 (mState == STOPPED)))) {
824 ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
825 mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
826 event->cancel();
827 return INVALID_OPERATION;
828 }
829 (void) TrackBase::setSyncEvent(event);
830 return NO_ERROR;
831}
832
833bool AudioFlinger::PlaybackThread::Track::isOut() const
834{
835 return true;
836}
837
Glenn Kasten5736c352012-12-04 12:12:34 -0800838void AudioFlinger::PlaybackThread::Track::invalidate()
839{
840 // FIXME should use proxy
841 android_atomic_or(CBLK_INVALID, &mCblk->flags);
842 mCblk->cv.signal();
843 mIsInvalid = true;
844}
845
Eric Laurent81784c32012-11-19 14:55:58 -0800846// ----------------------------------------------------------------------------
847
848sp<AudioFlinger::PlaybackThread::TimedTrack>
849AudioFlinger::PlaybackThread::TimedTrack::create(
850 PlaybackThread *thread,
851 const sp<Client>& client,
852 audio_stream_type_t streamType,
853 uint32_t sampleRate,
854 audio_format_t format,
855 audio_channel_mask_t channelMask,
856 size_t frameCount,
857 const sp<IMemory>& sharedBuffer,
858 int sessionId) {
859 if (!client->reserveTimedTrack())
860 return 0;
861
862 return new TimedTrack(
863 thread, client, streamType, sampleRate, format, channelMask, frameCount,
864 sharedBuffer, sessionId);
865}
866
867AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
868 PlaybackThread *thread,
869 const sp<Client>& client,
870 audio_stream_type_t streamType,
871 uint32_t sampleRate,
872 audio_format_t format,
873 audio_channel_mask_t channelMask,
874 size_t frameCount,
875 const sp<IMemory>& sharedBuffer,
876 int sessionId)
877 : Track(thread, client, streamType, sampleRate, format, channelMask,
878 frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
879 mQueueHeadInFlight(false),
880 mTrimQueueHeadOnRelease(false),
881 mFramesPendingInQueue(0),
882 mTimedSilenceBuffer(NULL),
883 mTimedSilenceBufferSize(0),
884 mTimedAudioOutputOnTime(false),
885 mMediaTimeTransformValid(false)
886{
887 LocalClock lc;
888 mLocalTimeFreq = lc.getLocalFreq();
889
890 mLocalTimeToSampleTransform.a_zero = 0;
891 mLocalTimeToSampleTransform.b_zero = 0;
892 mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
893 mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
894 LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
895 &mLocalTimeToSampleTransform.a_to_b_denom);
896
897 mMediaTimeToSampleTransform.a_zero = 0;
898 mMediaTimeToSampleTransform.b_zero = 0;
899 mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
900 mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
901 LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
902 &mMediaTimeToSampleTransform.a_to_b_denom);
903}
904
905AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
906 mClient->releaseTimedTrack();
907 delete [] mTimedSilenceBuffer;
908}
909
910status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
911 size_t size, sp<IMemory>* buffer) {
912
913 Mutex::Autolock _l(mTimedBufferQueueLock);
914
915 trimTimedBufferQueue_l();
916
917 // lazily initialize the shared memory heap for timed buffers
918 if (mTimedMemoryDealer == NULL) {
919 const int kTimedBufferHeapSize = 512 << 10;
920
921 mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
922 "AudioFlingerTimed");
923 if (mTimedMemoryDealer == NULL)
924 return NO_MEMORY;
925 }
926
927 sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
928 if (newBuffer == NULL) {
929 newBuffer = mTimedMemoryDealer->allocate(size);
930 if (newBuffer == NULL)
931 return NO_MEMORY;
932 }
933
934 *buffer = newBuffer;
935 return NO_ERROR;
936}
937
938// caller must hold mTimedBufferQueueLock
939void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
940 int64_t mediaTimeNow;
941 {
942 Mutex::Autolock mttLock(mMediaTimeTransformLock);
943 if (!mMediaTimeTransformValid)
944 return;
945
946 int64_t targetTimeNow;
947 status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
948 ? mCCHelper.getCommonTime(&targetTimeNow)
949 : mCCHelper.getLocalTime(&targetTimeNow);
950
951 if (OK != res)
952 return;
953
954 if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
955 &mediaTimeNow)) {
956 return;
957 }
958 }
959
960 size_t trimEnd;
961 for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
962 int64_t bufEnd;
963
964 if ((trimEnd + 1) < mTimedBufferQueue.size()) {
965 // We have a next buffer. Just use its PTS as the PTS of the frame
966 // following the last frame in this buffer. If the stream is sparse
967 // (ie, there are deliberate gaps left in the stream which should be
968 // filled with silence by the TimedAudioTrack), then this can result
969 // in one extra buffer being left un-trimmed when it could have
970 // been. In general, this is not typical, and we would rather
971 // optimized away the TS calculation below for the more common case
972 // where PTSes are contiguous.
973 bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
974 } else {
975 // We have no next buffer. Compute the PTS of the frame following
976 // the last frame in this buffer by computing the duration of of
977 // this frame in media time units and adding it to the PTS of the
978 // buffer.
979 int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
980 / mFrameSize;
981
982 if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
983 &bufEnd)) {
984 ALOGE("Failed to convert frame count of %lld to media time"
985 " duration" " (scale factor %d/%u) in %s",
986 frameCount,
987 mMediaTimeToSampleTransform.a_to_b_numer,
988 mMediaTimeToSampleTransform.a_to_b_denom,
989 __PRETTY_FUNCTION__);
990 break;
991 }
992 bufEnd += mTimedBufferQueue[trimEnd].pts();
993 }
994
995 if (bufEnd > mediaTimeNow)
996 break;
997
998 // Is the buffer we want to use in the middle of a mix operation right
999 // now? If so, don't actually trim it. Just wait for the releaseBuffer
1000 // from the mixer which should be coming back shortly.
1001 if (!trimEnd && mQueueHeadInFlight) {
1002 mTrimQueueHeadOnRelease = true;
1003 }
1004 }
1005
1006 size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
1007 if (trimStart < trimEnd) {
1008 // Update the bookkeeping for framesReady()
1009 for (size_t i = trimStart; i < trimEnd; ++i) {
1010 updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
1011 }
1012
1013 // Now actually remove the buffers from the queue.
1014 mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
1015 }
1016}
1017
1018void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
1019 const char* logTag) {
1020 ALOG_ASSERT(mTimedBufferQueue.size() > 0,
1021 "%s called (reason \"%s\"), but timed buffer queue has no"
1022 " elements to trim.", __FUNCTION__, logTag);
1023
1024 updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
1025 mTimedBufferQueue.removeAt(0);
1026}
1027
1028void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
1029 const TimedBuffer& buf,
1030 const char* logTag) {
1031 uint32_t bufBytes = buf.buffer()->size();
1032 uint32_t consumedAlready = buf.position();
1033
1034 ALOG_ASSERT(consumedAlready <= bufBytes,
1035 "Bad bookkeeping while updating frames pending. Timed buffer is"
1036 " only %u bytes long, but claims to have consumed %u"
1037 " bytes. (update reason: \"%s\")",
1038 bufBytes, consumedAlready, logTag);
1039
1040 uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
1041 ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
1042 "Bad bookkeeping while updating frames pending. Should have at"
1043 " least %u queued frames, but we think we have only %u. (update"
1044 " reason: \"%s\")",
1045 bufFrames, mFramesPendingInQueue, logTag);
1046
1047 mFramesPendingInQueue -= bufFrames;
1048}
1049
1050status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
1051 const sp<IMemory>& buffer, int64_t pts) {
1052
1053 {
1054 Mutex::Autolock mttLock(mMediaTimeTransformLock);
1055 if (!mMediaTimeTransformValid)
1056 return INVALID_OPERATION;
1057 }
1058
1059 Mutex::Autolock _l(mTimedBufferQueueLock);
1060
1061 uint32_t bufFrames = buffer->size() / mFrameSize;
1062 mFramesPendingInQueue += bufFrames;
1063 mTimedBufferQueue.add(TimedBuffer(buffer, pts));
1064
1065 return NO_ERROR;
1066}
1067
1068status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
1069 const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
1070
1071 ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
1072 xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
1073 target);
1074
1075 if (!(target == TimedAudioTrack::LOCAL_TIME ||
1076 target == TimedAudioTrack::COMMON_TIME)) {
1077 return BAD_VALUE;
1078 }
1079
1080 Mutex::Autolock lock(mMediaTimeTransformLock);
1081 mMediaTimeTransform = xform;
1082 mMediaTimeTransformTarget = target;
1083 mMediaTimeTransformValid = true;
1084
1085 return NO_ERROR;
1086}
1087
1088#define min(a, b) ((a) < (b) ? (a) : (b))
1089
1090// implementation of getNextBuffer for tracks whose buffers have timestamps
1091status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
1092 AudioBufferProvider::Buffer* buffer, int64_t pts)
1093{
1094 if (pts == AudioBufferProvider::kInvalidPTS) {
1095 buffer->raw = NULL;
1096 buffer->frameCount = 0;
1097 mTimedAudioOutputOnTime = false;
1098 return INVALID_OPERATION;
1099 }
1100
1101 Mutex::Autolock _l(mTimedBufferQueueLock);
1102
1103 ALOG_ASSERT(!mQueueHeadInFlight,
1104 "getNextBuffer called without releaseBuffer!");
1105
1106 while (true) {
1107
1108 // if we have no timed buffers, then fail
1109 if (mTimedBufferQueue.isEmpty()) {
1110 buffer->raw = NULL;
1111 buffer->frameCount = 0;
1112 return NOT_ENOUGH_DATA;
1113 }
1114
1115 TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1116
1117 // calculate the PTS of the head of the timed buffer queue expressed in
1118 // local time
1119 int64_t headLocalPTS;
1120 {
1121 Mutex::Autolock mttLock(mMediaTimeTransformLock);
1122
1123 ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
1124
1125 if (mMediaTimeTransform.a_to_b_denom == 0) {
1126 // the transform represents a pause, so yield silence
1127 timedYieldSilence_l(buffer->frameCount, buffer);
1128 return NO_ERROR;
1129 }
1130
1131 int64_t transformedPTS;
1132 if (!mMediaTimeTransform.doForwardTransform(head.pts(),
1133 &transformedPTS)) {
1134 // the transform failed. this shouldn't happen, but if it does
1135 // then just drop this buffer
1136 ALOGW("timedGetNextBuffer transform failed");
1137 buffer->raw = NULL;
1138 buffer->frameCount = 0;
1139 trimTimedBufferQueueHead_l("getNextBuffer; no transform");
1140 return NO_ERROR;
1141 }
1142
1143 if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
1144 if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
1145 &headLocalPTS)) {
1146 buffer->raw = NULL;
1147 buffer->frameCount = 0;
1148 return INVALID_OPERATION;
1149 }
1150 } else {
1151 headLocalPTS = transformedPTS;
1152 }
1153 }
1154
1155 // adjust the head buffer's PTS to reflect the portion of the head buffer
1156 // that has already been consumed
1157 int64_t effectivePTS = headLocalPTS +
1158 ((head.position() / mFrameSize) * mLocalTimeFreq / sampleRate());
1159
1160 // Calculate the delta in samples between the head of the input buffer
1161 // queue and the start of the next output buffer that will be written.
1162 // If the transformation fails because of over or underflow, it means
1163 // that the sample's position in the output stream is so far out of
1164 // whack that it should just be dropped.
1165 int64_t sampleDelta;
1166 if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
1167 ALOGV("*** head buffer is too far from PTS: dropped buffer");
1168 trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
1169 " mix");
1170 continue;
1171 }
1172 if (!mLocalTimeToSampleTransform.doForwardTransform(
1173 (effectivePTS - pts) << 32, &sampleDelta)) {
1174 ALOGV("*** too late during sample rate transform: dropped buffer");
1175 trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
1176 continue;
1177 }
1178
1179 ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
1180 " sampleDelta=[%d.%08x]",
1181 head.pts(), head.position(), pts,
1182 static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
1183 + (sampleDelta >> 32)),
1184 static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
1185
1186 // if the delta between the ideal placement for the next input sample and
1187 // the current output position is within this threshold, then we will
1188 // concatenate the next input samples to the previous output
1189 const int64_t kSampleContinuityThreshold =
1190 (static_cast<int64_t>(sampleRate()) << 32) / 250;
1191
1192 // if this is the first buffer of audio that we're emitting from this track
1193 // then it should be almost exactly on time.
1194 const int64_t kSampleStartupThreshold = 1LL << 32;
1195
1196 if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
1197 (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
1198 // the next input is close enough to being on time, so concatenate it
1199 // with the last output
1200 timedYieldSamples_l(buffer);
1201
1202 ALOGVV("*** on time: head.pos=%d frameCount=%u",
1203 head.position(), buffer->frameCount);
1204 return NO_ERROR;
1205 }
1206
1207 // Looks like our output is not on time. Reset our on timed status.
1208 // Next time we mix samples from our input queue, then should be within
1209 // the StartupThreshold.
1210 mTimedAudioOutputOnTime = false;
1211 if (sampleDelta > 0) {
1212 // the gap between the current output position and the proper start of
1213 // the next input sample is too big, so fill it with silence
1214 uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
1215
1216 timedYieldSilence_l(framesUntilNextInput, buffer);
1217 ALOGV("*** silence: frameCount=%u", buffer->frameCount);
1218 return NO_ERROR;
1219 } else {
1220 // the next input sample is late
1221 uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
1222 size_t onTimeSamplePosition =
1223 head.position() + lateFrames * mFrameSize;
1224
1225 if (onTimeSamplePosition > head.buffer()->size()) {
1226 // all the remaining samples in the head are too late, so
1227 // drop it and move on
1228 ALOGV("*** too late: dropped buffer");
1229 trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
1230 continue;
1231 } else {
1232 // skip over the late samples
1233 head.setPosition(onTimeSamplePosition);
1234
1235 // yield the available samples
1236 timedYieldSamples_l(buffer);
1237
1238 ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
1239 return NO_ERROR;
1240 }
1241 }
1242 }
1243}
1244
1245// Yield samples from the timed buffer queue head up to the given output
1246// buffer's capacity.
1247//
1248// Caller must hold mTimedBufferQueueLock
1249void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
1250 AudioBufferProvider::Buffer* buffer) {
1251
1252 const TimedBuffer& head = mTimedBufferQueue[0];
1253
1254 buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
1255 head.position());
1256
1257 uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
1258 mFrameSize);
1259 size_t framesRequested = buffer->frameCount;
1260 buffer->frameCount = min(framesLeftInHead, framesRequested);
1261
1262 mQueueHeadInFlight = true;
1263 mTimedAudioOutputOnTime = true;
1264}
1265
1266// Yield samples of silence up to the given output buffer's capacity
1267//
1268// Caller must hold mTimedBufferQueueLock
1269void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
1270 uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
1271
1272 // lazily allocate a buffer filled with silence
1273 if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
1274 delete [] mTimedSilenceBuffer;
1275 mTimedSilenceBufferSize = numFrames * mFrameSize;
1276 mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
1277 memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
1278 }
1279
1280 buffer->raw = mTimedSilenceBuffer;
1281 size_t framesRequested = buffer->frameCount;
1282 buffer->frameCount = min(numFrames, framesRequested);
1283
1284 mTimedAudioOutputOnTime = false;
1285}
1286
1287// AudioBufferProvider interface
1288void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
1289 AudioBufferProvider::Buffer* buffer) {
1290
1291 Mutex::Autolock _l(mTimedBufferQueueLock);
1292
1293 // If the buffer which was just released is part of the buffer at the head
1294 // of the queue, be sure to update the amt of the buffer which has been
1295 // consumed. If the buffer being returned is not part of the head of the
1296 // queue, its either because the buffer is part of the silence buffer, or
1297 // because the head of the timed queue was trimmed after the mixer called
1298 // getNextBuffer but before the mixer called releaseBuffer.
1299 if (buffer->raw == mTimedSilenceBuffer) {
1300 ALOG_ASSERT(!mQueueHeadInFlight,
1301 "Queue head in flight during release of silence buffer!");
1302 goto done;
1303 }
1304
1305 ALOG_ASSERT(mQueueHeadInFlight,
1306 "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
1307 " head in flight.");
1308
1309 if (mTimedBufferQueue.size()) {
1310 TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1311
1312 void* start = head.buffer()->pointer();
1313 void* end = reinterpret_cast<void*>(
1314 reinterpret_cast<uint8_t*>(head.buffer()->pointer())
1315 + head.buffer()->size());
1316
1317 ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
1318 "released buffer not within the head of the timed buffer"
1319 " queue; qHead = [%p, %p], released buffer = %p",
1320 start, end, buffer->raw);
1321
1322 head.setPosition(head.position() +
1323 (buffer->frameCount * mFrameSize));
1324 mQueueHeadInFlight = false;
1325
1326 ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
1327 "Bad bookkeeping during releaseBuffer! Should have at"
1328 " least %u queued frames, but we think we have only %u",
1329 buffer->frameCount, mFramesPendingInQueue);
1330
1331 mFramesPendingInQueue -= buffer->frameCount;
1332
1333 if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
1334 || mTrimQueueHeadOnRelease) {
1335 trimTimedBufferQueueHead_l("releaseBuffer");
1336 mTrimQueueHeadOnRelease = false;
1337 }
1338 } else {
1339 LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
1340 " buffers in the timed buffer queue");
1341 }
1342
1343done:
1344 buffer->raw = 0;
1345 buffer->frameCount = 0;
1346}
1347
1348size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
1349 Mutex::Autolock _l(mTimedBufferQueueLock);
1350 return mFramesPendingInQueue;
1351}
1352
1353AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
1354 : mPTS(0), mPosition(0) {}
1355
1356AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
1357 const sp<IMemory>& buffer, int64_t pts)
1358 : mBuffer(buffer), mPTS(pts), mPosition(0) {}
1359
1360
1361// ----------------------------------------------------------------------------
1362
1363AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1364 PlaybackThread *playbackThread,
1365 DuplicatingThread *sourceThread,
1366 uint32_t sampleRate,
1367 audio_format_t format,
1368 audio_channel_mask_t channelMask,
1369 size_t frameCount)
1370 : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
1371 NULL, 0, IAudioFlinger::TRACK_DEFAULT),
1372 mActive(false), mSourceThread(sourceThread), mBuffers(NULL)
1373{
1374
1375 if (mCblk != NULL) {
1376 mBuffers = (char*)mCblk + sizeof(audio_track_cblk_t);
1377 mOutBuffer.frameCount = 0;
1378 playbackThread->mTracks.add(this);
1379 ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, mBuffers %p, " \
1380 "mCblk->frameCount %d, mCblk->sampleRate %u, mChannelMask 0x%08x mBufferEnd %p",
1381 mCblk, mBuffer, mBuffers,
1382 mCblk->frameCount, mCblk->sampleRate, mChannelMask, mBufferEnd);
1383 } else {
1384 ALOGW("Error creating output track on thread %p", playbackThread);
1385 }
1386}
1387
1388AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1389{
1390 clearBufferQueue();
1391}
1392
1393status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1394 int triggerSession)
1395{
1396 status_t status = Track::start(event, triggerSession);
1397 if (status != NO_ERROR) {
1398 return status;
1399 }
1400
1401 mActive = true;
1402 mRetryCount = 127;
1403 return status;
1404}
1405
1406void AudioFlinger::PlaybackThread::OutputTrack::stop()
1407{
1408 Track::stop();
1409 clearBufferQueue();
1410 mOutBuffer.frameCount = 0;
1411 mActive = false;
1412}
1413
1414bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
1415{
1416 Buffer *pInBuffer;
1417 Buffer inBuffer;
1418 uint32_t channelCount = mChannelCount;
1419 bool outputBufferFull = false;
1420 inBuffer.frameCount = frames;
1421 inBuffer.i16 = data;
1422
1423 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1424
1425 if (!mActive && frames != 0) {
1426 start();
1427 sp<ThreadBase> thread = mThread.promote();
1428 if (thread != 0) {
1429 MixerThread *mixerThread = (MixerThread *)thread.get();
1430 if (mFrameCount > frames) {
1431 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1432 uint32_t startFrames = (mFrameCount - frames);
1433 pInBuffer = new Buffer;
1434 pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
1435 pInBuffer->frameCount = startFrames;
1436 pInBuffer->i16 = pInBuffer->mBuffer;
1437 memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
1438 mBufferQueue.add(pInBuffer);
1439 } else {
1440 ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
1441 }
1442 }
1443 }
1444 }
1445
1446 while (waitTimeLeftMs) {
1447 // First write pending buffers, then new data
1448 if (mBufferQueue.size()) {
1449 pInBuffer = mBufferQueue.itemAt(0);
1450 } else {
1451 pInBuffer = &inBuffer;
1452 }
1453
1454 if (pInBuffer->frameCount == 0) {
1455 break;
1456 }
1457
1458 if (mOutBuffer.frameCount == 0) {
1459 mOutBuffer.frameCount = pInBuffer->frameCount;
1460 nsecs_t startTime = systemTime();
1461 if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
1462 ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
1463 mThread.unsafe_get());
1464 outputBufferFull = true;
1465 break;
1466 }
1467 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1468 if (waitTimeLeftMs >= waitTimeMs) {
1469 waitTimeLeftMs -= waitTimeMs;
1470 } else {
1471 waitTimeLeftMs = 0;
1472 }
1473 }
1474
1475 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1476 pInBuffer->frameCount;
1477 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
1478 mCblk->stepUserOut(outFrames, mFrameCount);
1479 pInBuffer->frameCount -= outFrames;
1480 pInBuffer->i16 += outFrames * channelCount;
1481 mOutBuffer.frameCount -= outFrames;
1482 mOutBuffer.i16 += outFrames * channelCount;
1483
1484 if (pInBuffer->frameCount == 0) {
1485 if (mBufferQueue.size()) {
1486 mBufferQueue.removeAt(0);
1487 delete [] pInBuffer->mBuffer;
1488 delete pInBuffer;
1489 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
1490 mThread.unsafe_get(), mBufferQueue.size());
1491 } else {
1492 break;
1493 }
1494 }
1495 }
1496
1497 // If we could not write all frames, allocate a buffer and queue it for next time.
1498 if (inBuffer.frameCount) {
1499 sp<ThreadBase> thread = mThread.promote();
1500 if (thread != 0 && !thread->standby()) {
1501 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1502 pInBuffer = new Buffer;
1503 pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
1504 pInBuffer->frameCount = inBuffer.frameCount;
1505 pInBuffer->i16 = pInBuffer->mBuffer;
1506 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
1507 sizeof(int16_t));
1508 mBufferQueue.add(pInBuffer);
1509 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
1510 mThread.unsafe_get(), mBufferQueue.size());
1511 } else {
1512 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
1513 mThread.unsafe_get(), this);
1514 }
1515 }
1516 }
1517
1518 // Calling write() with a 0 length buffer, means that no more data will be written:
1519 // If no more buffers are pending, fill output track buffer to make sure it is started
1520 // by output mixer.
1521 if (frames == 0 && mBufferQueue.size() == 0) {
1522 if (mCblk->user < mFrameCount) {
1523 frames = mFrameCount - mCblk->user;
1524 pInBuffer = new Buffer;
1525 pInBuffer->mBuffer = new int16_t[frames * channelCount];
1526 pInBuffer->frameCount = frames;
1527 pInBuffer->i16 = pInBuffer->mBuffer;
1528 memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
1529 mBufferQueue.add(pInBuffer);
1530 } else if (mActive) {
1531 stop();
1532 }
1533 }
1534
1535 return outputBufferFull;
1536}
1537
1538status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
1539 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
1540{
1541 int active;
1542 status_t result;
1543 audio_track_cblk_t* cblk = mCblk;
1544 uint32_t framesReq = buffer->frameCount;
1545
1546 ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
1547 buffer->frameCount = 0;
1548
1549 uint32_t framesAvail = cblk->framesAvailableOut(mFrameCount);
1550
1551
1552 if (framesAvail == 0) {
1553 Mutex::Autolock _l(cblk->lock);
1554 goto start_loop_here;
1555 while (framesAvail == 0) {
1556 active = mActive;
1557 if (CC_UNLIKELY(!active)) {
1558 ALOGV("Not active and NO_MORE_BUFFERS");
1559 return NO_MORE_BUFFERS;
1560 }
1561 result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
1562 if (result != NO_ERROR) {
1563 return NO_MORE_BUFFERS;
1564 }
1565 // read the server count again
1566 start_loop_here:
1567 framesAvail = cblk->framesAvailableOut_l(mFrameCount);
1568 }
1569 }
1570
1571// if (framesAvail < framesReq) {
1572// return NO_MORE_BUFFERS;
1573// }
1574
1575 if (framesReq > framesAvail) {
1576 framesReq = framesAvail;
1577 }
1578
1579 uint32_t u = cblk->user;
1580 uint32_t bufferEnd = cblk->userBase + mFrameCount;
1581
1582 if (framesReq > bufferEnd - u) {
1583 framesReq = bufferEnd - u;
1584 }
1585
1586 buffer->frameCount = framesReq;
1587 buffer->raw = cblk->buffer(mBuffers, mFrameSize, u);
1588 return NO_ERROR;
1589}
1590
1591
1592void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
1593{
1594 size_t size = mBufferQueue.size();
1595
1596 for (size_t i = 0; i < size; i++) {
1597 Buffer *pBuffer = mBufferQueue.itemAt(i);
1598 delete [] pBuffer->mBuffer;
1599 delete pBuffer;
1600 }
1601 mBufferQueue.clear();
1602}
1603
1604
1605// ----------------------------------------------------------------------------
1606// Record
1607// ----------------------------------------------------------------------------
1608
1609AudioFlinger::RecordHandle::RecordHandle(
1610 const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
1611 : BnAudioRecord(),
1612 mRecordTrack(recordTrack)
1613{
1614}
1615
1616AudioFlinger::RecordHandle::~RecordHandle() {
1617 stop_nonvirtual();
1618 mRecordTrack->destroy();
1619}
1620
1621sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
1622 return mRecordTrack->getCblk();
1623}
1624
1625status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
1626 int triggerSession) {
1627 ALOGV("RecordHandle::start()");
1628 return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
1629}
1630
1631void AudioFlinger::RecordHandle::stop() {
1632 stop_nonvirtual();
1633}
1634
1635void AudioFlinger::RecordHandle::stop_nonvirtual() {
1636 ALOGV("RecordHandle::stop()");
1637 mRecordTrack->stop();
1638}
1639
1640status_t AudioFlinger::RecordHandle::onTransact(
1641 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
1642{
1643 return BnAudioRecord::onTransact(code, data, reply, flags);
1644}
1645
1646// ----------------------------------------------------------------------------
1647
1648// RecordTrack constructor must be called with AudioFlinger::mLock held
1649AudioFlinger::RecordThread::RecordTrack::RecordTrack(
1650 RecordThread *thread,
1651 const sp<Client>& client,
1652 uint32_t sampleRate,
1653 audio_format_t format,
1654 audio_channel_mask_t channelMask,
1655 size_t frameCount,
1656 int sessionId)
1657 : TrackBase(thread, client, sampleRate, format,
1658 channelMask, frameCount, 0 /*sharedBuffer*/, sessionId),
1659 mOverflow(false)
1660{
1661 ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
1662}
1663
1664AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
1665{
1666 ALOGV("%s", __func__);
1667}
1668
1669// AudioBufferProvider interface
1670status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
1671 int64_t pts)
1672{
1673 audio_track_cblk_t* cblk = this->cblk();
1674 uint32_t framesAvail;
1675 uint32_t framesReq = buffer->frameCount;
1676
1677 // Check if last stepServer failed, try to step now
1678 if (mStepServerFailed) {
1679 if (!step()) {
1680 goto getNextBuffer_exit;
1681 }
1682 ALOGV("stepServer recovered");
1683 mStepServerFailed = false;
1684 }
1685
1686 // FIXME lock is not actually held, so overrun is possible
1687 framesAvail = cblk->framesAvailableIn_l(mFrameCount);
1688
1689 if (CC_LIKELY(framesAvail)) {
1690 uint32_t s = cblk->server;
1691 uint32_t bufferEnd = cblk->serverBase + mFrameCount;
1692
1693 if (framesReq > framesAvail) {
1694 framesReq = framesAvail;
1695 }
1696 if (framesReq > bufferEnd - s) {
1697 framesReq = bufferEnd - s;
1698 }
1699
1700 buffer->raw = getBuffer(s, framesReq);
1701 buffer->frameCount = framesReq;
1702 return NO_ERROR;
1703 }
1704
1705getNextBuffer_exit:
1706 buffer->raw = NULL;
1707 buffer->frameCount = 0;
1708 return NOT_ENOUGH_DATA;
1709}
1710
1711status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
1712 int triggerSession)
1713{
1714 sp<ThreadBase> thread = mThread.promote();
1715 if (thread != 0) {
1716 RecordThread *recordThread = (RecordThread *)thread.get();
1717 return recordThread->start(this, event, triggerSession);
1718 } else {
1719 return BAD_VALUE;
1720 }
1721}
1722
1723void AudioFlinger::RecordThread::RecordTrack::stop()
1724{
1725 sp<ThreadBase> thread = mThread.promote();
1726 if (thread != 0) {
1727 RecordThread *recordThread = (RecordThread *)thread.get();
1728 recordThread->mLock.lock();
1729 bool doStop = recordThread->stop_l(this);
1730 if (doStop) {
1731 TrackBase::reset();
1732 // Force overrun condition to avoid false overrun callback until first data is
1733 // read from buffer
1734 android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
1735 }
1736 recordThread->mLock.unlock();
1737 if (doStop) {
1738 AudioSystem::stopInput(recordThread->id());
1739 }
1740 }
1741}
1742
1743void AudioFlinger::RecordThread::RecordTrack::destroy()
1744{
1745 // see comments at AudioFlinger::PlaybackThread::Track::destroy()
1746 sp<RecordTrack> keep(this);
1747 {
1748 sp<ThreadBase> thread = mThread.promote();
1749 if (thread != 0) {
1750 if (mState == ACTIVE || mState == RESUMING) {
1751 AudioSystem::stopInput(thread->id());
1752 }
1753 AudioSystem::releaseInput(thread->id());
1754 Mutex::Autolock _l(thread->mLock);
1755 RecordThread *recordThread = (RecordThread *) thread.get();
1756 recordThread->destroyTrack_l(this);
1757 }
1758 }
1759}
1760
1761
1762/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
1763{
1764 result.append(" Clien Fmt Chn mask Session Step S SRate Serv User FrameCount\n");
1765}
1766
1767void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
1768{
1769 snprintf(buffer, size, " %05d %03u 0x%08x %05d %04u %01d %05u %08x %08x %05d\n",
1770 (mClient == 0) ? getpid_cached : mClient->pid(),
1771 mFormat,
1772 mChannelMask,
1773 mSessionId,
1774 mStepCount,
1775 mState,
1776 mCblk->sampleRate,
1777 mCblk->server,
1778 mCblk->user,
1779 mFrameCount);
1780}
1781
1782bool AudioFlinger::RecordThread::RecordTrack::isOut() const
1783{
1784 return false;
1785}
1786
1787}; // namespace android