am bf5e2397: am 5baf2af5: more support for audio effect offload

* commit 'bf5e23979a03da96ce1d63126c480103232f174b':
  more support for audio effect offload
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 62f0c64..36430a3 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -60,7 +60,7 @@
         size_t      frameCount;     // number of sample frames corresponding to size;
                                     // on input it is the number of frames available,
                                     // on output is the number of frames actually drained
-                                    // (currently ignored, but will make the primary field in future)
+                                    // (currently ignored but will make the primary field in future)
 
         size_t      size;           // input/output in bytes == frameCount * frameSize
                                     // FIXME this is redundant with respect to frameCount,
@@ -444,7 +444,8 @@
                                                     // notification callback
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback
-    bool                    mRefreshRemaining;  // processAudioBuffer() should refresh next 2
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
+                                                    // mRemainingFrames and mRetryOnPartialBuffer
 
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 225ef76..b96b8a1 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -155,7 +155,8 @@
     class OutputDescriptor {
     public:
         OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)  {}
+        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)
+            {}
 
         uint32_t samplingRate;
         audio_format_t format;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 453c106..56bf49a 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -660,7 +660,6 @@
     size_t                  mReqFrameCount;         // frame count to request the next time a new
                                                     // IAudioTrack is needed
 
-
     // constant after constructor or set()
     audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
     audio_stream_type_t     mStreamType;
@@ -699,7 +698,8 @@
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback,
                                                     // at initial source sample rate
-    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh next 2
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
+                                                    // mRemainingFrames and mRetryOnPartialBuffer
 
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 2653b53..098aa69 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -47,6 +47,7 @@
  private:
     void write(const char *src, size_t num);
     size_t read(char *dst, size_t num);
+    int32_t mSkip;
     int32_t mFrontPadding;
     int32_t mBackPadding;
     int32_t mWriteHead;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index fe258ad..4b13887 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -48,7 +48,7 @@
 #define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client
 
 //EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
-#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded
+#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 // assuming up to a maximum of 20 seconds of offloaded
 
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index be818c6..22ad453 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -132,7 +132,7 @@
             lStatus = reply.readInt32();
             track = interface_cast<IAudioTrack>(reply.readStrongBinder());
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
         return track;
@@ -180,7 +180,7 @@
             lStatus = reply.readInt32();
             record = interface_cast<IAudioRecord>(reply.readStrongBinder());
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
         return record;
@@ -397,15 +397,25 @@
         audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
         ALOGV("openOutput() returned output, %d", output);
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) *pDevices = devices;
+        if (pDevices != NULL) {
+            *pDevices = devices;
+        }
         samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = samplingRate;
+        }
         format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) *pFormat = format;
+        if (pFormat != NULL) {
+            *pFormat = format;
+        }
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) {
+            *pChannelMask = channelMask;
+        }
         latency = reply.readInt32();
-        if (pLatencyMs != NULL) *pLatencyMs = latency;
+        if (pLatencyMs != NULL) {
+            *pLatencyMs = latency;
+        }
         return output;
     }
 
@@ -469,13 +479,21 @@
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices != NULL) *pDevices = devices;
+        if (pDevices != NULL) {
+            *pDevices = devices;
+        }
         samplingRate = reply.readInt32();
-        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = samplingRate;
+        }
         format = (audio_format_t) reply.readInt32();
-        if (pFormat != NULL) *pFormat = format;
+        if (pFormat != NULL) {
+            *pFormat = format;
+        }
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask != NULL) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) {
+            *pChannelMask = channelMask;
+        }
         return input;
     }
 
@@ -517,11 +535,11 @@
         status_t status = reply.readInt32();
         if (status == NO_ERROR) {
             uint32_t tmp = reply.readInt32();
-            if (halFrames) {
+            if (halFrames != NULL) {
                 *halFrames = tmp;
             }
             tmp = reply.readInt32();
-            if (dspFrames) {
+            if (dspFrames != NULL) {
                 *dspFrames = tmp;
             }
         }
@@ -639,7 +657,7 @@
 
         if (pDesc == NULL) {
             return effect;
-            if (status) {
+            if (status != NULL) {
                 *status = BAD_VALUE;
             }
         }
@@ -657,7 +675,7 @@
         } else {
             lStatus = reply.readInt32();
             int tmp = reply.readInt32();
-            if (id) {
+            if (id != NULL) {
                 *id = tmp;
             }
             tmp = reply.readInt32();
@@ -667,7 +685,7 @@
             effect = interface_cast<IEffect>(reply.readStrongBinder());
             reply.read(pDesc, sizeof(effect_descriptor_t));
         }
-        if (status) {
+        if (status != NULL) {
             *status = lStatus;
         }
 
diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp
index c2ac1a3..afe3936 100644
--- a/media/libmediaplayerservice/HDCP.cpp
+++ b/media/libmediaplayerservice/HDCP.cpp
@@ -107,11 +107,7 @@
         return NO_INIT;
     }
 
-    // TO-DO:
-    // Only support HDCP_CAPS_ENCRYPT (byte-array to byte-array) for now.
-    // use mHDCPModule->getCaps() when the HDCP libraries get updated.
-    //return mHDCPModule->getCaps();
-    return HDCPModule::HDCP_CAPS_ENCRYPT;
+    return mHDCPModule->getCaps();
 }
 
 status_t HDCP::encrypt(
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 773854f..e2e6d79 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -25,7 +25,7 @@
 namespace android {
 
 SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
-    mFrontPadding = skip;
+    mFrontPadding = mSkip = skip;
     mBackPadding = cut;
     mWriteHead = 0;
     mReadHead = 0;
@@ -94,6 +94,7 @@
 
 void SkipCutBuffer::clear() {
     mWriteHead = mReadHead = 0;
+    mFrontPadding = mSkip;
 }
 
 void SkipCutBuffer::write(const char *src, size_t num) {
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1b20cbb..c9b5d26 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -58,6 +58,8 @@
       mIsADTS(false),
       mInputBufferCount(0),
       mSignalledError(false),
+      mSawInputEos(false),
+      mSignalledOutputEos(false),
       mAnchorTimeUs(0),
       mNumSamplesOutput(0),
       mOutputPortSettingsChange(NONE) {
@@ -350,115 +352,83 @@
         return;
     }
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+    while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+        BufferInfo *inInfo = NULL;
+        OMX_BUFFERHEADERTYPE *inHeader = NULL;
+        if (!inQueue.empty()) {
+            inInfo = *inQueue.begin();
+            inHeader = inInfo->mHeader;
+        }
 
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        outHeader->nFlags = 0;
 
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
+        if (inHeader) {
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mSawInputEos = true;
+            }
 
-            if (mDecoderHasData) {
-                // flush out the decoder's delayed data by calling DecodeFrame
-                // one more time, with the AACDEC_FLUSH flag set
-                INT_PCM *outBuffer =
-                        reinterpret_cast<INT_PCM *>(
-                                outHeader->pBuffer + outHeader->nOffset);
+            if (inHeader->nOffset == 0 && inHeader->nFilledLen) {
+                mAnchorTimeUs = inHeader->nTimeStamp;
+                mNumSamplesOutput = 0;
+            }
 
-                AAC_DECODER_ERROR decoderErr =
-                    aacDecoder_DecodeFrame(mAACDecoder,
-                                           outBuffer,
-                                           outHeader->nAllocLen,
-                                           AACDEC_FLUSH);
-                mDecoderHasData = false;
+            if (mIsADTS) {
+                size_t adtsHeaderSize = 0;
+                // skip 30 bits, aac_frame_length follows.
+                // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
 
-                if (decoderErr != AAC_DEC_OK) {
+                const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+
+                bool signalError = false;
+                if (inHeader->nFilledLen < 7) {
+                    ALOGE("Audio data too short to contain even the ADTS header. "
+                          "Got %ld bytes.", inHeader->nFilledLen);
+                    hexdump(adtsHeader, inHeader->nFilledLen);
+                    signalError = true;
+                } else {
+                    bool protectionAbsent = (adtsHeader[1] & 1);
+
+                    unsigned aac_frame_length =
+                        ((adtsHeader[3] & 3) << 11)
+                        | (adtsHeader[4] << 3)
+                        | (adtsHeader[5] >> 5);
+
+                    if (inHeader->nFilledLen < aac_frame_length) {
+                        ALOGE("Not enough audio data for the complete frame. "
+                              "Got %ld bytes, frame size according to the ADTS "
+                              "header is %u bytes.",
+                              inHeader->nFilledLen, aac_frame_length);
+                        hexdump(adtsHeader, inHeader->nFilledLen);
+                        signalError = true;
+                    } else {
+                        adtsHeaderSize = (protectionAbsent ? 7 : 9);
+
+                        inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
+                        inBufferLength[0] = aac_frame_length - adtsHeaderSize;
+
+                        inHeader->nOffset += adtsHeaderSize;
+                        inHeader->nFilledLen -= adtsHeaderSize;
+                    }
+                }
+
+                if (signalError) {
                     mSignalledError = true;
 
-                    notify(OMX_EventError, OMX_ErrorUndefined, decoderErr,
+                    notify(OMX_EventError,
+                           OMX_ErrorStreamCorrupt,
+                           ERROR_MALFORMED,
                            NULL);
 
                     return;
                 }
-
-                outHeader->nFilledLen =
-                        mStreamInfo->frameSize
-                            * sizeof(int16_t)
-                            * mStreamInfo->numChannels;
             } else {
-                // we never submitted any data to the decoder, so there's nothing to flush out
-                outHeader->nFilledLen = 0;
-            }
-
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
-        }
-
-        if (inHeader->nOffset == 0) {
-            mAnchorTimeUs = inHeader->nTimeStamp;
-            mNumSamplesOutput = 0;
-        }
-
-        size_t adtsHeaderSize = 0;
-        if (mIsADTS) {
-            // skip 30 bits, aac_frame_length follows.
-            // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
-
-            const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
-
-            bool signalError = false;
-            if (inHeader->nFilledLen < 7) {
-                ALOGE("Audio data too short to contain even the ADTS header. "
-                      "Got %ld bytes.", inHeader->nFilledLen);
-                hexdump(adtsHeader, inHeader->nFilledLen);
-                signalError = true;
-            } else {
-                bool protectionAbsent = (adtsHeader[1] & 1);
-
-                unsigned aac_frame_length =
-                    ((adtsHeader[3] & 3) << 11)
-                    | (adtsHeader[4] << 3)
-                    | (adtsHeader[5] >> 5);
-
-                if (inHeader->nFilledLen < aac_frame_length) {
-                    ALOGE("Not enough audio data for the complete frame. "
-                          "Got %ld bytes, frame size according to the ADTS "
-                          "header is %u bytes.",
-                          inHeader->nFilledLen, aac_frame_length);
-                    hexdump(adtsHeader, inHeader->nFilledLen);
-                    signalError = true;
-                } else {
-                    adtsHeaderSize = (protectionAbsent ? 7 : 9);
-
-                    inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
-                    inBufferLength[0] = aac_frame_length - adtsHeaderSize;
-
-                    inHeader->nOffset += adtsHeaderSize;
-                    inHeader->nFilledLen -= adtsHeaderSize;
-                }
-            }
-
-            if (signalError) {
-                mSignalledError = true;
-
-                notify(OMX_EventError,
-                       OMX_ErrorStreamCorrupt,
-                       ERROR_MALFORMED,
-                       NULL);
-
-                return;
+                inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
+                inBufferLength[0] = inHeader->nFilledLen;
             }
         } else {
-            inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
-            inBufferLength[0] = inHeader->nFilledLen;
+            inBufferLength[0] = 0;
         }
 
         // Fill and decode
@@ -471,50 +441,66 @@
         int prevNumChannels = mStreamInfo->numChannels;
 
         AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS;
-        while (bytesValid[0] > 0 && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+        while ((bytesValid[0] > 0 || mSawInputEos) && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+            mDecoderHasData |= (bytesValid[0] > 0);
             aacDecoder_Fill(mAACDecoder,
                             inBuffer,
                             inBufferLength,
                             bytesValid);
-            mDecoderHasData = true;
 
             decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
                                                 outBuffer,
                                                 outHeader->nAllocLen,
                                                 0 /* flags */);
-
             if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
-                ALOGW("Not enough bits, bytesValid %d", bytesValid[0]);
+                if (mSawInputEos && bytesValid[0] <= 0) {
+                    if (mDecoderHasData) {
+                        // flush out the decoder's delayed data by calling DecodeFrame
+                        // one more time, with the AACDEC_FLUSH flag set
+                        decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+                                                            outBuffer,
+                                                            outHeader->nAllocLen,
+                                                            AACDEC_FLUSH);
+                        mDecoderHasData = false;
+                    }
+                    outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                    mSignalledOutputEos = true;
+                    break;
+                } else {
+                    ALOGW("Not enough bits, bytesValid %d", bytesValid[0]);
+                }
             }
         }
 
         size_t numOutBytes =
             mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
 
-        if (decoderErr == AAC_DEC_OK) {
-            UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
-            inHeader->nFilledLen -= inBufferUsedLength;
-            inHeader->nOffset += inBufferUsedLength;
-        } else {
-            ALOGW("AAC decoder returned error %d, substituting silence",
-                  decoderErr);
+        if (inHeader) {
+            if (decoderErr == AAC_DEC_OK) {
+                UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
+                inHeader->nFilledLen -= inBufferUsedLength;
+                inHeader->nOffset += inBufferUsedLength;
+            } else {
+                ALOGW("AAC decoder returned error %d, substituting silence",
+                      decoderErr);
 
-            memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes);
+                memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes);
 
-            // Discard input buffer.
-            inHeader->nFilledLen = 0;
+                // Discard input buffer.
+                inHeader->nFilledLen = 0;
 
-            aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+                aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
 
-            // fall through
-        }
+                // fall through
+            }
 
-        if (inHeader->nFilledLen == 0) {
-            inInfo->mOwnedByUs = false;
-            inQueue.erase(inQueue.begin());
-            inInfo = NULL;
-            notifyEmptyBufferDone(inHeader);
-            inHeader = NULL;
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            }
         }
 
         /*
@@ -555,7 +541,6 @@
             // we've previously decoded valid data, in the latter case
             // (decode failed) we'll output a silent frame.
             outHeader->nFilledLen = numOutBytes;
-            outHeader->nFlags = 0;
 
             outHeader->nTimeStamp =
                 mAnchorTimeUs
@@ -606,6 +591,8 @@
     mStreamInfo->sampleRate = 0;
 
     mSignalledError = false;
+    mSawInputEos = false;
+    mSignalledOutputEos = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 2d960ab..a7ea1e2 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -55,6 +55,8 @@
     bool mDecoderHasData;
     size_t mInputBufferCount;
     bool mSignalledError;
+    bool mSawInputEos;
+    bool mSignalledOutputEos;
     int64_t mAnchorTimeUs;
     int64_t mNumSamplesOutput;
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 7c382fb..877e3cb 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -49,6 +49,8 @@
       mNumChannels(2),
       mSamplingRate(44100),
       mSignalledError(false),
+      mSawInputEos(false),
+      mSignalledOutputEos(false),
       mOutputPortSettingsChange(NONE) {
     initPorts();
     initDecoder();
@@ -194,48 +196,36 @@
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+    while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+        BufferInfo *inInfo = NULL;
+        OMX_BUFFERHEADERTYPE *inHeader = NULL;
+        if (!inQueue.empty()) {
+            inInfo = *inQueue.begin();
+            inHeader = inInfo->mHeader;
+        }
 
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        outHeader->nFlags = 0;
 
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
-
-            if (!mIsFirst) {
-                // pad the end of the stream with 529 samples, since that many samples
-                // were trimmed off the beginning when decoding started
-                outHeader->nFilledLen =
-                    kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
-
-                memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
-            } else {
-                // Since we never discarded frames from the start, we won't have
-                // to add any padding at the end either.
-                outHeader->nFilledLen = 0;
+        if (inHeader) {
+            if (inHeader->nOffset == 0 && inHeader->nFilledLen) {
+                mAnchorTimeUs = inHeader->nTimeStamp;
+                mNumFramesOutput = 0;
             }
 
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mSawInputEos = true;
+            }
 
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
+            mConfig->pInputBuffer =
+                inHeader->pBuffer + inHeader->nOffset;
+
+            mConfig->inputBufferCurrentLength = inHeader->nFilledLen;
+        } else {
+            mConfig->pInputBuffer = NULL;
+            mConfig->inputBufferCurrentLength = 0;
         }
-
-        if (inHeader->nOffset == 0) {
-            mAnchorTimeUs = inHeader->nTimeStamp;
-            mNumFramesOutput = 0;
-        }
-
-        mConfig->pInputBuffer =
-            inHeader->pBuffer + inHeader->nOffset;
-
-        mConfig->inputBufferCurrentLength = inHeader->nFilledLen;
         mConfig->inputBufferMaxLength = 0;
         mConfig->inputBufferUsedLength = 0;
 
@@ -262,13 +252,28 @@
                 mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
             }
 
-            // This is recoverable, just ignore the current frame and
-            // play silence instead.
-            memset(outHeader->pBuffer,
-                   0,
-                   mConfig->outputFrameSize * sizeof(int16_t));
+            if (decoderErr == NO_ENOUGH_MAIN_DATA_ERROR && mSawInputEos) {
+                if (!mIsFirst) {
+                    // pad the end of the stream with 529 samples, since that many samples
+                    // were trimmed off the beginning when decoding started
+                    outHeader->nOffset = 0;
+                    outHeader->nFilledLen = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
 
-            mConfig->inputBufferUsedLength = inHeader->nFilledLen;
+                    memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
+                }
+                outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                mSignalledOutputEos = true;
+            } else {
+                // This is recoverable, just ignore the current frame and
+                // play silence instead.
+                memset(outHeader->pBuffer,
+                       0,
+                       mConfig->outputFrameSize * sizeof(int16_t));
+
+                if (inHeader) {
+                    mConfig->inputBufferUsedLength = inHeader->nFilledLen;
+                }
+            }
         } else if (mConfig->samplingRate != mSamplingRate
                 || mConfig->num_channels != mNumChannels) {
             mSamplingRate = mConfig->samplingRate;
@@ -289,7 +294,7 @@
 
             outHeader->nFilledLen =
                 mConfig->outputFrameSize * sizeof(int16_t) - outHeader->nOffset;
-        } else {
+        } else if (!mSignalledOutputEos) {
             outHeader->nOffset = 0;
             outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t);
         }
@@ -298,23 +303,24 @@
             mAnchorTimeUs
                 + (mNumFramesOutput * 1000000ll) / mConfig->samplingRate;
 
-        outHeader->nFlags = 0;
+        if (inHeader) {
+            CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
 
-        CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
+            inHeader->nOffset += mConfig->inputBufferUsedLength;
+            inHeader->nFilledLen -= mConfig->inputBufferUsedLength;
 
-        inHeader->nOffset += mConfig->inputBufferUsedLength;
-        inHeader->nFilledLen -= mConfig->inputBufferUsedLength;
+
+            if (inHeader->nFilledLen == 0) {
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            }
+        }
 
         mNumFramesOutput += mConfig->outputFrameSize / mNumChannels;
 
-        if (inHeader->nFilledLen == 0) {
-            inInfo->mOwnedByUs = false;
-            inQueue.erase(inQueue.begin());
-            inInfo = NULL;
-            notifyEmptyBufferDone(inHeader);
-            inHeader = NULL;
-        }
-
         outInfo->mOwnedByUs = false;
         outQueue.erase(outQueue.begin());
         outInfo = NULL;
@@ -362,6 +368,8 @@
     pvmp3_InitDecoder(mConfig, mDecoderBuf);
     mIsFirst = true;
     mSignalledError = false;
+    mSawInputEos = false;
+    mSignalledOutputEos = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h
index 4af91ea..f9e7b53 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.h
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h
@@ -61,6 +61,8 @@
 
     bool mIsFirst;
     bool mSignalledError;
+    bool mSawInputEos;
+    bool mSignalledOutputEos;
 
     enum {
         NONE,
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 51bb958..515e4d3 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -54,6 +54,8 @@
       mAnchorTimeUs(0),
       mNumFramesOutput(0),
       mNumFramesLeftOnPage(-1),
+      mSawInputEos(false),
+      mSignalledOutputEos(false),
       mOutputPortSettingsChange(NONE) {
     initPorts();
     CHECK_EQ(initDecoder(), (status_t)OK);
@@ -290,48 +292,47 @@
         return;
     }
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+    while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+        BufferInfo *inInfo = NULL;
+        OMX_BUFFERHEADERTYPE *inHeader = NULL;
+        if (!inQueue.empty()) {
+            inInfo = *inQueue.begin();
+            inHeader = inInfo->mHeader;
+        }
 
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
+        int32_t numPageSamples = 0;
 
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        if (inHeader) {
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mSawInputEos = true;
+            }
 
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
+            if (inHeader->nFilledLen || !mSawInputEos) {
+                CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
+                memcpy(&numPageSamples,
+                       inHeader->pBuffer
+                        + inHeader->nOffset + inHeader->nFilledLen - 4,
+                       sizeof(numPageSamples));
+
+                if (inHeader->nOffset == 0) {
+                    mAnchorTimeUs = inHeader->nTimeStamp;
+                    mNumFramesOutput = 0;
+                }
+
+                inHeader->nFilledLen -= sizeof(numPageSamples);;
+            }
         }
 
-        int32_t numPageSamples;
-        CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
-        memcpy(&numPageSamples,
-               inHeader->pBuffer
-                + inHeader->nOffset + inHeader->nFilledLen - 4,
-               sizeof(numPageSamples));
-
         if (numPageSamples >= 0) {
             mNumFramesLeftOnPage = numPageSamples;
         }
 
-        if (inHeader->nOffset == 0) {
-            mAnchorTimeUs = inHeader->nTimeStamp;
-            mNumFramesOutput = 0;
-        }
-
-        inHeader->nFilledLen -= sizeof(numPageSamples);;
-
         ogg_buffer buf;
-        buf.data = inHeader->pBuffer + inHeader->nOffset;
-        buf.size = inHeader->nFilledLen;
+        buf.data = inHeader ? inHeader->pBuffer + inHeader->nOffset : NULL;
+        buf.size = inHeader ? inHeader->nFilledLen : 0;
         buf.refcount = 1;
         buf.ptr.owner = NULL;
 
@@ -351,6 +352,7 @@
 
         int numFrames = 0;
 
+        outHeader->nFlags = 0;
         int err = vorbis_dsp_synthesis(mState, &pack, 1);
         if (err != 0) {
             ALOGW("vorbis_dsp_synthesis returned %d", err);
@@ -370,13 +372,16 @@
                 ALOGV("discarding %d frames at end of page",
                      numFrames - mNumFramesLeftOnPage);
                 numFrames = mNumFramesLeftOnPage;
+                if (mSawInputEos) {
+                    outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                    mSignalledOutputEos = true;
+                }
             }
             mNumFramesLeftOnPage -= numFrames;
         }
 
         outHeader->nFilledLen = numFrames * sizeof(int16_t) * mVi->channels;
         outHeader->nOffset = 0;
-        outHeader->nFlags = 0;
 
         outHeader->nTimeStamp =
             mAnchorTimeUs
@@ -384,11 +389,13 @@
 
         mNumFramesOutput += numFrames;
 
-        inInfo->mOwnedByUs = false;
-        inQueue.erase(inQueue.begin());
-        inInfo = NULL;
-        notifyEmptyBufferDone(inHeader);
-        inHeader = NULL;
+        if (inHeader) {
+            inInfo->mOwnedByUs = false;
+            inQueue.erase(inQueue.begin());
+            inInfo = NULL;
+            notifyEmptyBufferDone(inHeader);
+            inHeader = NULL;
+        }
 
         outInfo->mOwnedByUs = false;
         outQueue.erase(outQueue.begin());
@@ -425,6 +432,8 @@
         mVi = NULL;
     }
 
+    mSawInputEos = false;
+    mSignalledOutputEos = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
index cb628a0..1d00816 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
@@ -59,6 +59,8 @@
     int64_t mAnchorTimeUs;
     int64_t mNumFramesOutput;
     int32_t mNumFramesLeftOnPage;
+    bool mSawInputEos;
+    bool mSignalledOutputEos;
 
     enum {
         NONE,
diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk
index a5e7ba2..9a9fde2 100644
--- a/media/libstagefright/timedtext/test/Android.mk
+++ b/media/libstagefright/timedtext/test/Android.mk
@@ -2,7 +2,6 @@
 
 # ================================================================
 # Unit tests for libstagefright_timedtext
-# See also /development/testrunner/test_defs.xml
 # ================================================================
 
 # ================================================================
@@ -18,10 +17,13 @@
 
 LOCAL_C_INCLUDES := \
     $(TOP)/external/expat/lib \
-    $(TOP)/frameworks/base/media/libstagefright/timedtext
+    $(TOP)/frameworks/av/media/libstagefright/timedtext
 
 LOCAL_SHARED_LIBRARIES := \
+    libbinder \
     libexpat \
-    libstagefright
+    libstagefright \
+    libstagefright_foundation \
+    libutils
 
 include $(BUILD_NATIVE_TEST)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 8fbac42..4d36bad 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -162,12 +162,15 @@
         (void) property_get("af.tee", value, "0");
         teeEnabled = atoi(value);
     }
-    if (teeEnabled & 1)
+    if (teeEnabled & 1) {
         mTeeSinkInputEnabled = true;
-    if (teeEnabled & 2)
+    }
+    if (teeEnabled & 2) {
         mTeeSinkOutputEnabled = true;
-    if (teeEnabled & 4)
+    }
+    if (teeEnabled & 4) {
         mTeeSinkTrackEnabled = true;
+    }
 #endif
 }
 
@@ -505,10 +508,12 @@
 
         track = thread->createTrack_l(client, streamType, sampleRate, format,
                 channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, &lStatus);
+        // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
 
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
         if (lStatus == NO_ERROR && effectThread != NULL) {
+            // no risk of deadlock because AudioFlinger::mLock is held
             Mutex::Autolock _dl(thread->mLock);
             Mutex::Autolock _sl(effectThread->mLock);
             moveEffectChain_l(lSessionId, effectThread, thread, true);
@@ -528,7 +533,9 @@
                 }
             }
         }
+
     }
+
     if (lStatus == NO_ERROR) {
         // s for server's pid, n for normal mixer name, f for fast index
         name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0,
@@ -542,9 +549,7 @@
     }
 
 Exit:
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return trackHandle;
 }
 
@@ -1278,6 +1283,7 @@
         recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                   frameCount, lSessionId, flags, tid, &lStatus);
     }
+
     if (lStatus != NO_ERROR) {
         // remove local strong reference to Client before deleting the RecordTrack so that the
         // Client destructor is called by the TrackBase destructor with mLock held
@@ -1286,14 +1292,11 @@
         goto Exit;
     }
 
-    // return to handle to client
+    // return handle to client
     recordHandle = new RecordHandle(recordTrack);
-    lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return recordHandle;
 }
 
@@ -1434,18 +1437,15 @@
                                            audio_output_flags_t flags,
                                            const audio_offload_info_t *offloadInfo)
 {
-    PlaybackThread *thread = NULL;
     struct audio_config config;
+    memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
     config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
     config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
-    if (offloadInfo) {
+    if (offloadInfo != NULL) {
         config.offload_info = *offloadInfo;
     }
 
-    audio_stream_out_t *outStream = NULL;
-    AudioHwDevice *outHwDev;
-
     ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (pDevices != NULL) ? *pDevices : 0,
@@ -1454,7 +1454,7 @@
               config.channel_mask,
               flags);
     ALOGV("openOutput(), offloadInfo %p version 0x%04x",
-          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version );
+          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version);
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1462,15 +1462,17 @@
 
     Mutex::Autolock _l(mLock);
 
-    outHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (outHwDev == NULL)
+    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (outHwDev == NULL) {
         return 0;
+    }
 
     audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
     audio_io_handle_t id = nextUniqueId();
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
+    audio_stream_out_t *outStream = NULL;
     status_t status = hwDevHal->open_output_stream(hwDevHal,
                                           id,
                                           *pDevices,
@@ -1490,6 +1492,7 @@
     if (status == NO_ERROR && outStream != NULL) {
         AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
 
+        PlaybackThread *thread;
         if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
             thread = new OffloadThread(this, output, id, *pDevices);
             ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
@@ -1657,18 +1660,15 @@
                                           audio_format_t *pFormat,
                                           audio_channel_mask_t *pChannelMask)
 {
-    status_t status;
-    RecordThread *thread = NULL;
     struct audio_config config;
+    memset(&config, 0, sizeof(config));
     config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
     config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
     config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
 
     uint32_t reqSamplingRate = config.sample_rate;
     audio_format_t reqFormat = config.format;
-    audio_channel_mask_t reqChannels = config.channel_mask;
-    audio_stream_in_t *inStream = NULL;
-    AudioHwDevice *inHwDev;
+    audio_channel_mask_t reqChannelMask = config.channel_mask;
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1676,14 +1676,16 @@
 
     Mutex::Autolock _l(mLock);
 
-    inHwDev = findSuitableHwDev_l(module, *pDevices);
-    if (inHwDev == NULL)
+    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices);
+    if (inHwDev == NULL) {
         return 0;
+    }
 
     audio_hw_device_t *inHwHal = inHwDev->hwDevice();
     audio_io_handle_t id = nextUniqueId();
 
-    status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
+    audio_stream_in_t *inStream = NULL;
+    status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
                                         &inStream);
     ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, "
             "status %d",
@@ -1699,7 +1701,7 @@
     if (status == BAD_VALUE &&
         reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
         (config.sample_rate <= 2 * reqSamplingRate) &&
-        (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
+        (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannelMask) <= FCC_2)) {
         ALOGV("openInput() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
         status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream);
@@ -1761,10 +1763,10 @@
         // Start record thread
         // RecordThread requires both input and output device indication to forward to audio
         // pre processing modules
-        thread = new RecordThread(this,
+        RecordThread *thread = new RecordThread(this,
                                   input,
                                   reqSamplingRate,
-                                  reqChannels,
+                                  reqChannelMask,
                                   id,
                                   primaryOutputDevice_l(),
                                   *pDevices
@@ -1781,7 +1783,7 @@
             *pFormat = config.format;
         }
         if (pChannelMask != NULL) {
-            *pChannelMask = reqChannels;
+            *pChannelMask = reqChannelMask;
         }
 
         // notify client processes of the new input creation
@@ -1927,7 +1929,7 @@
             }
         }
         if (!found) {
-            Mutex::Autolock _l (t->mLock);
+            Mutex::Autolock _l(t->mLock);
             // remove all effects from the chain
             while (ec->mEffects.size()) {
                 sp<EffectModule> effect = ec->mEffects[0];
@@ -2225,9 +2227,7 @@
     }
 
 Exit:
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return handle;
 }
 
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index b41d480..7f2f9ec 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -109,7 +109,7 @@
                                 pid_t tid,
                                 int *sessionId,
                                 String8& name,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
     virtual sp<IAudioRecord> openRecord(
                                 audio_io_handle_t input,
@@ -120,7 +120,7 @@
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int *sessionId,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
     virtual     uint32_t    sampleRate(audio_io_handle_t output) const;
     virtual     int         channelCount(audio_io_handle_t output) const;
@@ -209,7 +209,7 @@
                         int32_t priority,
                         audio_io_handle_t io,
                         int sessionId,
-                        status_t *status,
+                        status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index df4e029..91aedbb 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -229,7 +229,7 @@
 
 void AudioMixer::invalidateState(uint32_t mask)
 {
-    if (mask) {
+    if (mask != 0) {
         mState.needsChanged |= mask;
         mState.hook = process__validate;
     }
@@ -709,7 +709,7 @@
 
     // select the processing hooks
     state->hook = process__nop;
-    if (countActiveTracks) {
+    if (countActiveTracks > 0) {
         if (resampling) {
             if (!state->outputTemp) {
                 state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -745,15 +745,14 @@
 
     // Now that the volume ramp has been done, set optimal state and
     // track hooks for subsequent mixer process
-    if (countActiveTracks) {
+    if (countActiveTracks > 0) {
         bool allMuted = true;
         uint32_t en = state->enabledTracks;
         while (en) {
             const int i = 31 - __builtin_clz(en);
             en &= ~(1<<i);
             track_t& t = state->tracks[i];
-            if (!t.doesResample() && t.volumeRL == 0)
-            {
+            if (!t.doesResample() && t.volumeRL == 0) {
                 t.needs |= NEEDS_MUTE_ENABLED;
                 t.hook = track__nop;
             } else {
@@ -1124,8 +1123,9 @@
         t.in = t.buffer.raw;
         // t.in == NULL can happen if the track was flushed just after having
         // been enabled for mixing.
-        if (t.in == NULL)
+        if (t.in == NULL) {
             enabledTracks &= ~(1<<i);
+        }
     }
 
     e0 = enabledTracks;
@@ -1162,7 +1162,7 @@
                 }
                 while (outFrames) {
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
-                    if (inFrames) {
+                    if (inFrames > 0) {
                         t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
                                 state->resampleTemp, aux);
                         t.frameCount -= inFrames;
@@ -1445,8 +1445,9 @@
 int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
                                        int outputFrameIndex)
 {
-    if (AudioBufferProvider::kInvalidPTS == basePTS)
+    if (AudioBufferProvider::kInvalidPTS == basePTS) {
         return AudioBufferProvider::kInvalidPTS;
+    }
 
     return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate);
 }
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 4be292f..d4bf4eb 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -77,24 +77,28 @@
     mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
     ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
                                                &mpAudioPolicy);
     ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     rc = mpAudioPolicy->init_check(mpAudioPolicy);
     ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
-    if (rc)
+    if (rc) {
         return;
+    }
 
     ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
 
@@ -126,10 +130,12 @@
     }
     mInputs.clear();
 
-    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL)
+    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
         mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
-    if (mpAudioPolicyDev != NULL)
+    }
+    if (mpAudioPolicyDev != NULL) {
         audio_policy_dev_close(mpAudioPolicyDev);
+    }
 }
 
 status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
@@ -1113,11 +1119,13 @@
 int AudioPolicyService::startTone(audio_policy_tone_t tone,
                                   audio_stream_type_t stream)
 {
-    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION)
+    if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
         ALOGE("startTone: illegal tone requested (%d)", tone);
-    if (stream != AUDIO_STREAM_VOICE_CALL)
+    }
+    if (stream != AUDIO_STREAM_VOICE_CALL) {
         ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
             tone);
+    }
     mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
                                           AUDIO_STREAM_VOICE_CALL);
     return 0;
@@ -1508,8 +1516,9 @@
 static int aps_close_output(void *service, audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->closeOutput(output);
 }
@@ -1572,8 +1581,9 @@
 static int aps_close_input(void *service, audio_io_handle_t input)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->closeInput(input);
 }
@@ -1582,8 +1592,9 @@
                                      audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->setStreamOutput(stream, output);
 }
@@ -1593,8 +1604,9 @@
                                 audio_io_handle_t dst_output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0)
+    if (af == 0) {
         return PERMISSION_DENIED;
+    }
 
     return af->moveEffects(session, src_output, dst_output);
 }
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 2c3c719..323f1a4 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -339,8 +339,9 @@
             out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
             out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
-            if (outputIndex == outputSampleCount)
+            if (outputIndex == outputSampleCount) {
                 break;
+            }
         }
 
         // process input samples
@@ -434,8 +435,9 @@
             out[outputIndex++] += vl * sample;
             out[outputIndex++] += vr * sample;
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
-            if (outputIndex == outputSampleCount)
+            if (outputIndex == outputSampleCount) {
                 break;
+            }
         }
 
         // process input samples
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 18e59e9..1f9714b 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -66,8 +66,9 @@
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
-        if (mBuffer.raw == NULL)
+        if (mBuffer.raw == NULL) {
             return;
+        }
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
@@ -97,8 +98,9 @@
                 mBuffer.frameCount = inFrameCount;
                 provider->getNextBuffer(&mBuffer,
                                         calculateOutputPTS(outputIndex / 2));
-                if (mBuffer.raw == NULL)
+                if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
+                }
                 in = mBuffer.i16;
                 // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
             }
@@ -132,8 +134,9 @@
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
-        if (mBuffer.raw == NULL)
+        if (mBuffer.raw == NULL) {
             return;
+        }
         // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
@@ -163,8 +166,9 @@
                 mBuffer.frameCount = inFrameCount;
                 provider->getNextBuffer(&mBuffer,
                                         calculateOutputPTS(outputIndex / 2));
-                if (mBuffer.raw == NULL)
+                if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
+                }
                 // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
                 in = mBuffer.i16;
             }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 0ca2107..c6e78a6 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -116,8 +116,9 @@
             continue;
         }
         // first non destroyed handle is considered in control
-        if (controlHandle == NULL)
+        if (controlHandle == NULL) {
             controlHandle = h;
+        }
         if (h->priority() <= priority) {
             break;
         }
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index f27ea17..7126e92 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -459,8 +459,9 @@
             }
 
             int64_t pts;
-            if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts)))
+            if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) {
                 pts = AudioBufferProvider::kInvalidPTS;
+            }
 
             // process() is CPU-bound
             mixer->process(pts);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index f7ad6b1..be4b811 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -33,6 +33,7 @@
                                 int sessionId,
                                 IAudioFlinger::track_flags_t flags);
     virtual             ~Track();
+    virtual status_t    initCheck() const;
 
     static  void        appendDumpHeader(String8& result);
             void        dump(char* buffer, size_t size);
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index cd8f70c..a4e1810 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -58,5 +58,4 @@
     // releaseBuffer() not overridden
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
-    AudioRecordServerProxy* mAudioRecordServerProxy;
 };
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7d0ecac..194d58e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -269,8 +269,8 @@
     :   Thread(false /*canCallJava*/),
         mType(type),
         mAudioFlinger(audioFlinger),
-        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
-        // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
+        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
+        // are set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
         mParamStatus(NO_ERROR),
         mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
         mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
@@ -296,6 +296,17 @@
     }
 }
 
+status_t AudioFlinger::ThreadBase::readyToRun()
+{
+    status_t status = initCheck();
+    if (status == NO_ERROR) {
+        ALOGI("AudioFlinger's thread %p ready to run", this);
+    } else {
+        ALOGE("No working audio driver found.");
+    }
+    return status;
+}
+
 void AudioFlinger::ThreadBase::exit()
 {
     ALOGV("ThreadBase::exit");
@@ -368,7 +379,13 @@
 
 void AudioFlinger::ThreadBase::processConfigEvents()
 {
-    mLock.lock();
+    Mutex::Autolock _l(mLock);
+    processConfigEvents_l();
+}
+
+// post condition: mConfigEvents.isEmpty()
+void AudioFlinger::ThreadBase::processConfigEvents_l()
+{
     while (!mConfigEvents.isEmpty()) {
         ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
         ConfigEvent *event = mConfigEvents[0];
@@ -376,32 +393,31 @@
         // release mLock before locking AudioFlinger mLock: lock order is always
         // AudioFlinger then ThreadBase to avoid cross deadlock
         mLock.unlock();
-        switch(event->type()) {
-            case CFG_EVENT_PRIO: {
-                PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
-                // FIXME Need to understand why this has be done asynchronously
-                int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
-                        true /*asynchronous*/);
-                if (err != 0) {
-                    ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; "
-                          "error %d",
-                          prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
-                }
-            } break;
-            case CFG_EVENT_IO: {
-                IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
-                mAudioFlinger->mLock.lock();
+        switch (event->type()) {
+        case CFG_EVENT_PRIO: {
+            PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
+            // FIXME Need to understand why this has be done asynchronously
+            int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
+                    true /*asynchronous*/);
+            if (err != 0) {
+                ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
+                      prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
+            }
+        } break;
+        case CFG_EVENT_IO: {
+            IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
+            {
+                Mutex::Autolock _l(mAudioFlinger->mLock);
                 audioConfigChanged_l(ioEvent->event(), ioEvent->param());
-                mAudioFlinger->mLock.unlock();
-            } break;
-            default:
-                ALOGE("processConfigEvents() unknown event type %d", event->type());
-                break;
+            }
+        } break;
+        default:
+            ALOGE("processConfigEvents() unknown event type %d", event->type());
+            break;
         }
         delete event;
         mLock.lock();
     }
-    mLock.unlock();
 }
 
 void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args)
@@ -426,6 +442,8 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
     result.append(buffer);
+    snprintf(buffer, SIZE, "HAL buffer size: %u bytes\n", mBufferSize);
+    result.append(buffer);
     snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount);
     result.append(buffer);
     snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
@@ -685,8 +703,7 @@
         int sessionId,
         effect_descriptor_t *desc,
         int *enabled,
-        status_t *status
-        )
+        status_t *status)
 {
     sp<EffectModule> effect;
     sp<EffectHandle> handle;
@@ -796,9 +813,7 @@
         handle.clear();
     }
 
-    if (status != NULL) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return handle;
 }
 
@@ -948,7 +963,7 @@
                                              type_t type)
     :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
         mNormalFrameCount(0), mMixBuffer(NULL),
-        mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        mSuspended(0), mBytesWritten(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
@@ -1004,7 +1019,7 @@
 AudioFlinger::PlaybackThread::~PlaybackThread()
 {
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete [] mAllocMixBuffer;
+    delete[] mMixBuffer;
 }
 
 void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
@@ -1094,16 +1109,6 @@
 }
 
 // Thread virtuals
-status_t AudioFlinger::PlaybackThread::readyToRun()
-{
-    status_t status = initCheck();
-    if (status == NO_ERROR) {
-        ALOGI("AudioFlinger's thread %p ready to run", this);
-    } else {
-        ALOGE("No working audio driver found.");
-    }
-    return status;
-}
 
 void AudioFlinger::PlaybackThread::onFirstRef()
 {
@@ -1269,8 +1274,12 @@
             track = TimedTrack::create(this, client, streamType, sampleRate, format,
                     channelMask, frameCount, sharedBuffer, sessionId);
         }
-        if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
-            lStatus = NO_MEMORY;
+
+        // new Track always returns non-NULL,
+        // but TimedTrack::create() is a factory that could fail by returning NULL
+        lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
+        if (lStatus != NO_ERROR) {
+            track.clear();
             goto Exit;
         }
 
@@ -1295,9 +1304,7 @@
     lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return track;
 }
 
@@ -1582,7 +1589,8 @@
                 mFormat);
     }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
-    mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
+    mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
+    mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
         ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
                 mFrameCount);
@@ -1638,11 +1646,11 @@
     ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
-    delete[] mAllocMixBuffer;
-    size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
-    mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
-    mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
-    memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
+    delete[] mMixBuffer;
+    size_t normalBufferSize = mNormalFrameCount * mFrameSize;
+    // For historical reasons mMixBuffer is int16_t[], but mFrameSize can be odd (such as 1)
+    mMixBuffer = new int16_t[(normalBufferSize + 1) >> 1];
+    memset(mMixBuffer, 0, normalBufferSize);
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
@@ -1776,7 +1784,7 @@
         const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (count) {
+    if (count > 0) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             if (!track->isOutputTrack()) {
@@ -2321,7 +2329,7 @@
 void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (count) {
+    if (count > 0) {
         for (size_t i=0 ; i<count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
             mActiveTracks.remove(track);
@@ -2696,7 +2704,7 @@
             sleepTime = idleSleepTime;
         }
     } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
-        memset (mMixBuffer, 0, mixBufferSize);
+        memset(mMixBuffer, 0, mixBufferSize);
         sleepTime = 0;
         ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
                 "anticipated start");
@@ -2922,7 +2930,7 @@
             // +1 for rounding and +1 for additional sample needed for interpolation
             desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
             // add frames already consumed but not yet released by the resampler
-            // because cblk->framesReady() will include these frames
+            // because mAudioTrackServerProxy->framesReady() will include these frames
             desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
             // the minimum track buffer size is normally twice the number of frames necessary
             // to fill one buffer and the resampler should not leave more than one buffer worth
@@ -3260,6 +3268,7 @@
             if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
                 status = BAD_VALUE;
             } else {
+                // no need to save value, since it's constant
                 reconfig = true;
             }
         }
@@ -3267,6 +3276,7 @@
             if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
                 status = BAD_VALUE;
             } else {
+                // no need to save value, since it's constant
                 reconfig = true;
             }
         }
@@ -4240,7 +4250,7 @@
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
     mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
-    // mRsmpInIndex and mBufferSize set by readInputParameters()
+    // mRsmpInIndex set by readInputParameters()
     mReqChannelCount(popcount(channelMask)),
     mReqSampleRate(sampleRate)
     // mBytesRead is only meaningful while active, and so is cleared in start()
@@ -4268,18 +4278,9 @@
     run(mName, PRIORITY_URGENT_AUDIO);
 }
 
-status_t AudioFlinger::RecordThread::readyToRun()
-{
-    status_t status = initCheck();
-    ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this);
-    return status;
-}
-
 bool AudioFlinger::RecordThread::threadLoop()
 {
     AudioBufferProvider::Buffer buffer;
-    sp<RecordTrack> activeTrack;
-    Vector< sp<EffectChain> > effectChains;
 
     nsecs_t lastWarning = 0;
 
@@ -4289,21 +4290,34 @@
     // used to verify we've read at least once before evaluating how many bytes were read
     bool readOnce = false;
 
-    // start recording
-    while (!exitPending()) {
+    // used to request a deferred sleep, to be executed later while mutex is unlocked
+    bool doSleep = false;
 
-        processConfigEvents();
+    // start recording
+    for (;;) {
+        sp<RecordTrack> activeTrack;
+        TrackBase::track_state activeTrackState;
+        Vector< sp<EffectChain> > effectChains;
+
+        // sleep with mutex unlocked
+        if (doSleep) {
+            doSleep = false;
+            usleep(kRecordThreadSleepUs);
+        }
 
         { // scope for mLock
             Mutex::Autolock _l(mLock);
-            checkForNewParameters_l();
-            if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
+            if (exitPending()) {
+                break;
+            }
+            processConfigEvents_l();
+            // return value 'reconfig' is currently unused
+            bool reconfig = checkForNewParameters_l();
+            // make a stable copy of mActiveTrack
+            activeTrack = mActiveTrack;
+            if (activeTrack == 0) {
                 standby();
-
-                if (exitPending()) {
-                    break;
-                }
-
+                // exitPending() can't become true here
                 releaseWakeLock_l();
                 ALOGV("RecordThread: loop stopping");
                 // go to sleep
@@ -4312,172 +4326,191 @@
                 acquireWakeLock_l();
                 continue;
             }
-            if (mActiveTrack != 0) {
-                if (mActiveTrack->isTerminated()) {
-                    removeTrack_l(mActiveTrack);
-                    mActiveTrack.clear();
-                } else if (mActiveTrack->mState == TrackBase::PAUSING) {
-                    standby();
+
+            if (activeTrack->isTerminated()) {
+                removeTrack_l(activeTrack);
+                mActiveTrack.clear();
+                continue;
+            }
+
+            activeTrackState = activeTrack->mState;
+            switch (activeTrackState) {
+            case TrackBase::PAUSING:
+                standby();
+                mActiveTrack.clear();
+                mStartStopCond.broadcast();
+                doSleep = true;
+                continue;
+
+            case TrackBase::RESUMING:
+                mStandby = false;
+                if (mReqChannelCount != activeTrack->channelCount()) {
                     mActiveTrack.clear();
                     mStartStopCond.broadcast();
-                } else if (mActiveTrack->mState == TrackBase::RESUMING) {
-                    if (mReqChannelCount != mActiveTrack->channelCount()) {
-                        mActiveTrack.clear();
-                        mStartStopCond.broadcast();
-                    } else if (readOnce) {
-                        // record start succeeds only if first read from audio input
-                        // succeeds
-                        if (mBytesRead >= 0) {
-                            mActiveTrack->mState = TrackBase::ACTIVE;
-                        } else {
-                            mActiveTrack.clear();
-                        }
-                        mStartStopCond.broadcast();
-                    }
-                    mStandby = false;
+                    continue;
                 }
+                if (readOnce) {
+                    mStartStopCond.broadcast();
+                    // record start succeeds only if first read from audio input succeeds
+                    if (mBytesRead < 0) {
+                        mActiveTrack.clear();
+                        continue;
+                    }
+                    activeTrack->mState = TrackBase::ACTIVE;
+                }
+                break;
+
+            case TrackBase::ACTIVE:
+                break;
+
+            case TrackBase::IDLE:
+                doSleep = true;
+                continue;
+
+            default:
+                LOG_FATAL("Unexpected activeTrackState %d", activeTrackState);
             }
+
             lockEffectChains_l(effectChains);
         }
 
-        if (mActiveTrack != 0) {
-            if (mActiveTrack->mState != TrackBase::ACTIVE &&
-                mActiveTrack->mState != TrackBase::RESUMING) {
-                unlockEffectChains(effectChains);
-                usleep(kRecordThreadSleepUs);
-                continue;
-            }
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
-            }
+        // thread mutex is now unlocked, mActiveTrack unknown, activeTrack != 0, kept, immutable
+        // activeTrack->mState unknown, activeTrackState immutable and is ACTIVE or RESUMING
 
-            buffer.frameCount = mFrameCount;
-            status_t status = mActiveTrack->getNextBuffer(&buffer);
-            if (status == NO_ERROR) {
-                readOnce = true;
-                size_t framesOut = buffer.frameCount;
-                if (mResampler == NULL) {
-                    // no resampling
-                    while (framesOut) {
-                        size_t framesIn = mFrameCount - mRsmpInIndex;
-                        if (framesIn) {
-                            int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
-                            int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
-                                    mActiveTrack->mFrameSize;
-                            if (framesIn > framesOut)
-                                framesIn = framesOut;
-                            mRsmpInIndex += framesIn;
-                            framesOut -= framesIn;
-                            if (mChannelCount == mReqChannelCount) {
-                                memcpy(dst, src, framesIn * mFrameSize);
-                            } else {
-                                if (mChannelCount == 1) {
-                                    upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
-                                            (int16_t *)src, framesIn);
-                                } else {
-                                    downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
-                                            (int16_t *)src, framesIn);
-                                }
-                            }
-                        }
-                        if (framesOut && mFrameCount == mRsmpInIndex) {
-                            void *readInto;
-                            if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
-                                readInto = buffer.raw;
-                                framesOut = 0;
-                            } else {
-                                readInto = mRsmpInBuffer;
-                                mRsmpInIndex = 0;
-                            }
-                            mBytesRead = mInput->stream->read(mInput->stream, readInto,
-                                    mBufferSize);
-                            if (mBytesRead <= 0) {
-                                if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
-                                {
-                                    ALOGE("Error reading audio input");
-                                    // Force input into standby so that it tries to
-                                    // recover at next read attempt
-                                    inputStandBy();
-                                    usleep(kRecordThreadSleepUs);
-                                }
-                                mRsmpInIndex = mFrameCount;
-                                framesOut = 0;
-                                buffer.frameCount = 0;
-                            }
-#ifdef TEE_SINK
-                            else if (mTeeSink != 0) {
-                                (void) mTeeSink->write(readInto,
-                                        mBytesRead >> Format_frameBitShift(mTeeSink->format()));
-                            }
-#endif
-                        }
-                    }
-                } else {
-                    // resampling
-
-                    // resampler accumulates, but we only have one source track
-                    memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
-                    // alter output frame count as if we were expecting stereo samples
-                    if (mChannelCount == 1 && mReqChannelCount == 1) {
-                        framesOut >>= 1;
-                    }
-                    mResampler->resample(mRsmpOutBuffer, framesOut,
-                            this /* AudioBufferProvider* */);
-                    // ditherAndClamp() works as long as all buffers returned by
-                    // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
-                    if (mChannelCount == 2 && mReqChannelCount == 1) {
-                        // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
-                        ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
-                        // the resampler always outputs stereo samples:
-                        // do post stereo to mono conversion
-                        downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
-                                framesOut);
-                    } else {
-                        ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
-                    }
-                    // now done with mRsmpOutBuffer
-
-                }
-                if (mFramestoDrop == 0) {
-                    mActiveTrack->releaseBuffer(&buffer);
-                } else {
-                    if (mFramestoDrop > 0) {
-                        mFramestoDrop -= buffer.frameCount;
-                        if (mFramestoDrop <= 0) {
-                            clearSyncStartEvent();
-                        }
-                    } else {
-                        mFramestoDrop += buffer.frameCount;
-                        if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
-                                mSyncStartEvent->isCancelled()) {
-                            ALOGW("Synced record %s, session %d, trigger session %d",
-                                  (mFramestoDrop >= 0) ? "timed out" : "cancelled",
-                                  mActiveTrack->sessionId(),
-                                  (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
-                            clearSyncStartEvent();
-                        }
-                    }
-                }
-                mActiveTrack->clearOverflow();
-            }
-            // client isn't retrieving buffers fast enough
-            else {
-                if (!mActiveTrack->setOverflow()) {
-                    nsecs_t now = systemTime();
-                    if ((now - lastWarning) > kWarningThrottleNs) {
-                        ALOGW("RecordThread: buffer overflow");
-                        lastWarning = now;
-                    }
-                }
-                // Release the processor for a while before asking for a new buffer.
-                // This will give the application more chance to read from the buffer and
-                // clear the overflow.
-                usleep(kRecordThreadSleepUs);
-            }
+        for (size_t i = 0; i < effectChains.size(); i ++) {
+            // thread mutex is not locked, but effect chain is locked
+            effectChains[i]->process_l();
         }
+
+        buffer.frameCount = mFrameCount;
+        status_t status = activeTrack->getNextBuffer(&buffer);
+        if (status == NO_ERROR) {
+            readOnce = true;
+            size_t framesOut = buffer.frameCount;
+            if (mResampler == NULL) {
+                // no resampling
+                while (framesOut) {
+                    size_t framesIn = mFrameCount - mRsmpInIndex;
+                    if (framesIn > 0) {
+                        int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
+                        int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
+                                activeTrack->mFrameSize;
+                        if (framesIn > framesOut) {
+                            framesIn = framesOut;
+                        }
+                        mRsmpInIndex += framesIn;
+                        framesOut -= framesIn;
+                        if (mChannelCount == mReqChannelCount) {
+                            memcpy(dst, src, framesIn * mFrameSize);
+                        } else {
+                            if (mChannelCount == 1) {
+                                upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
+                                        (int16_t *)src, framesIn);
+                            } else {
+                                downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
+                                        (int16_t *)src, framesIn);
+                            }
+                        }
+                    }
+                    if (framesOut > 0 && mFrameCount == mRsmpInIndex) {
+                        void *readInto;
+                        if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
+                            readInto = buffer.raw;
+                            framesOut = 0;
+                        } else {
+                            readInto = mRsmpInBuffer;
+                            mRsmpInIndex = 0;
+                        }
+                        mBytesRead = mInput->stream->read(mInput->stream, readInto,
+                                mBufferSize);
+                        if (mBytesRead <= 0) {
+                            // TODO: verify that it's benign to use a stale track state
+                            if ((mBytesRead < 0) && (activeTrackState == TrackBase::ACTIVE))
+                            {
+                                ALOGE("Error reading audio input");
+                                // Force input into standby so that it tries to
+                                // recover at next read attempt
+                                inputStandBy();
+                                doSleep = true;
+                            }
+                            mRsmpInIndex = mFrameCount;
+                            framesOut = 0;
+                            buffer.frameCount = 0;
+                        }
+#ifdef TEE_SINK
+                        else if (mTeeSink != 0) {
+                            (void) mTeeSink->write(readInto,
+                                    mBytesRead >> Format_frameBitShift(mTeeSink->format()));
+                        }
+#endif
+                    }
+                }
+            } else {
+                // resampling
+
+                // resampler accumulates, but we only have one source track
+                memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
+                // alter output frame count as if we were expecting stereo samples
+                if (mChannelCount == 1 && mReqChannelCount == 1) {
+                    framesOut >>= 1;
+                }
+                mResampler->resample(mRsmpOutBuffer, framesOut,
+                        this /* AudioBufferProvider* */);
+                // ditherAndClamp() works as long as all buffers returned by
+                // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
+                if (mChannelCount == 2 && mReqChannelCount == 1) {
+                    // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
+                    ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
+                    // the resampler always outputs stereo samples:
+                    // do post stereo to mono conversion
+                    downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
+                            framesOut);
+                } else {
+                    ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
+                }
+                // now done with mRsmpOutBuffer
+
+            }
+            if (mFramestoDrop == 0) {
+                activeTrack->releaseBuffer(&buffer);
+            } else {
+                if (mFramestoDrop > 0) {
+                    mFramestoDrop -= buffer.frameCount;
+                    if (mFramestoDrop <= 0) {
+                        clearSyncStartEvent();
+                    }
+                } else {
+                    mFramestoDrop += buffer.frameCount;
+                    if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
+                            mSyncStartEvent->isCancelled()) {
+                        ALOGW("Synced record %s, session %d, trigger session %d",
+                              (mFramestoDrop >= 0) ? "timed out" : "cancelled",
+                              activeTrack->sessionId(),
+                              (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
+                        clearSyncStartEvent();
+                    }
+                }
+            }
+            activeTrack->clearOverflow();
+        }
+        // client isn't retrieving buffers fast enough
+        else {
+            if (!activeTrack->setOverflow()) {
+                nsecs_t now = systemTime();
+                if ((now - lastWarning) > kWarningThrottleNs) {
+                    ALOGW("RecordThread: buffer overflow");
+                    lastWarning = now;
+                }
+            }
+            // Release the processor for a while before asking for a new buffer.
+            // This will give the application more chance to read from the buffer and
+            // clear the overflow.
+            doSleep = true;
+        }
+
         // enable changes in effect chain
         unlockEffectChains(effectChains);
-        effectChains.clear();
+        // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
     }
 
     standby();
@@ -4511,7 +4544,7 @@
     mInput->stream->common.standby(&mInput->stream->common);
 }
 
-sp<AudioFlinger::RecordThread::RecordTrack>  AudioFlinger::RecordThread::createRecordTrack_l(
+sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
         const sp<AudioFlinger::Client>& client,
         uint32_t sampleRate,
         audio_format_t format,
@@ -4590,8 +4623,9 @@
         track = new RecordTrack(this, client, sampleRate,
                       format, channelMask, frameCount, sessionId);
 
-        if (track->getCblk() == 0) {
-            lStatus = NO_MEMORY;
+        lStatus = track->initCheck();
+        if (lStatus != NO_ERROR) {
+            track.clear();
             goto Exit;
         }
         mTracks.add(track);
@@ -4612,9 +4646,7 @@
     lStatus = NO_ERROR;
 
 Exit:
-    if (status) {
-        *status = lStatus;
-    }
+    *status = lStatus;
     return track;
 }
 
@@ -4645,6 +4677,7 @@
     }
 
     {
+        // This section is a rendezvous between binder thread executing start() and RecordThread
         AutoMutex lock(mLock);
         if (mActiveTrack != 0) {
             if (recordTrack != mActiveTrack.get()) {
@@ -4655,11 +4688,13 @@
             return status;
         }
 
+        // FIXME why? already set in constructor, 'STARTING_1' would be more accurate
         recordTrack->mState = TrackBase::IDLE;
         mActiveTrack = recordTrack;
         mLock.unlock();
         status_t status = AudioSystem::startInput(mId);
         mLock.lock();
+        // FIXME should verify that mActiveTrack is still == recordTrack
         if (status != NO_ERROR) {
             mActiveTrack.clear();
             clearSyncStartEvent();
@@ -4670,6 +4705,8 @@
         if (mResampler != NULL) {
             mResampler->reset();
         }
+        // FIXME hijacking a playback track state name which was intended for start after pause;
+        //       here 'STARTING_2' would be more accurate
         mActiveTrack->mState = TrackBase::RESUMING;
         // signal thread to start
         ALOGV("Signal record thread");
@@ -4680,6 +4717,7 @@
             status = INVALID_OPERATION;
             goto startError;
         }
+        // FIXME incorrect usage of wait: no explicit predicate or loop
         mStartStopCond.wait(mLock);
         if (mActiveTrack == 0) {
             ALOGV("Record failed to start");
@@ -4730,11 +4768,13 @@
     if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
+    // note that threadLoop may still be processing the track at this point [without lock]
     recordTrack->mState = TrackBase::PAUSING;
     // do not wait for mStartStopCond if exiting
     if (exitPending()) {
         return true;
     }
+    // FIXME incorrect usage of wait: no explicit predicate or loop
     mStartStopCond.wait(mLock);
     // if we have been restarted, recordTrack == mActiveTrack.get() here
     if (exitPending() || recordTrack != mActiveTrack.get()) {
@@ -4870,6 +4910,7 @@
                 // Force input into standby so that it tries to
                 // recover at next read attempt
                 inputStandBy();
+                // FIXME an awkward place to sleep, consider using doSleep when this is pulled up
                 usleep(kRecordThreadSleepUs);
             }
             buffer->raw = NULL;
@@ -4912,7 +4953,7 @@
         int value;
         audio_format_t reqFormat = mFormat;
         uint32_t reqSamplingRate = mReqSampleRate;
-        uint32_t reqChannelCount = mReqChannelCount;
+        audio_channel_mask_t reqChannelMask = audio_channel_in_mask_from_count(mReqChannelCount);
 
         if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
             reqSamplingRate = value;
@@ -4927,8 +4968,13 @@
             }
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-            reqChannelCount = popcount(value);
-            reconfig = true;
+            audio_channel_mask_t mask = (audio_channel_mask_t) value;
+            if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) {
+                status = BAD_VALUE;
+            } else {
+                reqChannelMask = mask;
+                reconfig = true;
+            }
         }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
@@ -4977,6 +5023,7 @@
             }
             mAudioSource = (audio_source_t)value;
         }
+
         if (status == NO_ERROR) {
             status = mInput->stream->common.set_parameters(&mInput->stream->common,
                     keyValuePair.string());
@@ -4993,7 +5040,8 @@
                             <= (2 * reqSamplingRate)) &&
                     popcount(mInput->stream->common.get_channels(&mInput->stream->common))
                             <= FCC_2 &&
-                    (reqChannelCount <= FCC_2)) {
+                    (reqChannelMask == AUDIO_CHANNEL_IN_MONO ||
+                            reqChannelMask == AUDIO_CHANNEL_IN_STEREO)) {
                     status = NO_ERROR;
                 }
                 if (status == NO_ERROR) {
@@ -5070,8 +5118,7 @@
     mFrameCount = mBufferSize / mFrameSize;
     mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
 
-    if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
-    {
+    if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) {
         int channelCount;
         // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
         // stereo to mono post process as the resampler always outputs stereo.
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 3fe470c..7999436 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -36,6 +36,8 @@
                 audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
     virtual             ~ThreadBase();
 
+    virtual status_t    readyToRun();
+
     void dumpBase(int fd, const Vector<String16>& args);
     void dumpEffectChains(int fd, const Vector<String16>& args);
 
@@ -141,6 +143,7 @@
                 void        sendIoConfigEvent_l(int event, int param = 0);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
                 void        processConfigEvents();
+                void        processConfigEvents_l();
 
                 // see note at declaration of mStandby, mOutDevice and mInDevice
                 bool        standby() const { return mStandby; }
@@ -156,7 +159,7 @@
                                     int sessionId,
                                     effect_descriptor_t *desc,
                                     int *enabled,
-                                    status_t *status);
+                                    status_t *status /*non-NULL*/);
                 void disconnectEffect(const sp< EffectModule>& effect,
                                       EffectHandle *handle,
                                       bool unpinIfLast);
@@ -270,6 +273,7 @@
                 uint32_t                mChannelCount;
                 size_t                  mFrameSize;
                 audio_format_t          mFormat;
+                size_t                  mBufferSize;       // HAL buffer size for read() or write()
 
                 // Parameter sequence by client: binder thread calling setParameters():
                 //  1. Lock mLock
@@ -353,7 +357,6 @@
                 void        dump(int fd, const Vector<String16>& args);
 
     // Thread virtuals
-    virtual     status_t    readyToRun();
     virtual     bool        threadLoop();
 
     // RefBase
@@ -419,7 +422,7 @@
                                 int sessionId,
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
-                                status_t *status);
+                                status_t *status /*non-NULL*/);
 
                 AudioStreamOut* getOutput() const;
                 AudioStreamOut* clearOutput();
@@ -471,7 +474,6 @@
     size_t                          mNormalFrameCount;  // normal mixer and effects
 
     int16_t*                        mMixBuffer;         // frame size aligned mix buffer
-    int8_t*                         mAllocMixBuffer;    // mixer buffer allocation address
 
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
@@ -854,12 +856,12 @@
 
     // Thread virtuals
     virtual bool        threadLoop();
-    virtual status_t    readyToRun();
 
     // RefBase
     virtual void        onFirstRef();
 
     virtual status_t    initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
+
             sp<AudioFlinger::RecordThread::RecordTrack>  createRecordTrack_l(
                     const sp<AudioFlinger::Client>& client,
                     uint32_t sampleRate,
@@ -869,7 +871,7 @@
                     int sessionId,
                     IAudioFlinger::track_flags_t *flags,
                     pid_t tid,
-                    status_t *status);
+                    status_t *status /*non-NULL*/);
 
             status_t    start(RecordTrack* recordTrack,
                               AudioSystem::sync_event_t event,
@@ -912,13 +914,13 @@
             bool        hasFastRecorder() const { return false; }
 
 private:
-            void clearSyncStartEvent();
+            void    clearSyncStartEvent();
 
             // Enter standby if not already in standby, and set mStandby flag
-            void standby();
+            void    standby();
 
             // Call the HAL standby method unconditionally, and don't change mStandby flag
-            void inputStandBy();
+            void    inputStandBy();
 
             AudioStreamIn                       *mInput;
             SortedVector < sp<RecordTrack> >    mTracks;
@@ -933,7 +935,6 @@
             int32_t                             *mRsmpOutBuffer;
             int16_t                             *mRsmpInBuffer; // [mFrameCount * mChannelCount]
             size_t                              mRsmpInIndex;
-            size_t                              mBufferSize;    // stream buffer size for read()
             const uint32_t                      mReqChannelCount;
             const uint32_t                      mReqSampleRate;
             ssize_t                             mBytesRead;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 523e4b2..00a91b7 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -47,6 +47,7 @@
                                 int sessionId,
                                 bool isOut);
     virtual             ~TrackBase();
+    virtual status_t    initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; }
 
     virtual status_t    start(AudioSystem::sync_event_t event,
                              int triggerSession) = 0;
@@ -76,15 +77,6 @@
 
     virtual uint32_t sampleRate() const { return mSampleRate; }
 
-    // Return a pointer to the start of a contiguous slice of the track buffer.
-    // Parameter 'offset' is the requested start position, expressed in
-    // monotonically increasing frame units relative to the track epoch.
-    // Parameter 'frames' is the requested length, also in frame units.
-    // Always returns non-NULL.  It is the caller's responsibility to
-    // verify that this will be successful; the result of calling this
-    // function with invalid 'offset' or 'frames' is undefined.
-    void* getBuffer(uint32_t offset, uint32_t frames) const;
-
     bool isStopped() const {
         return (mState == STOPPED || mState == FLUSHED);
     }
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 57aad1e..4055153 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -376,6 +376,15 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::initCheck() const
+{
+    status_t status = TrackBase::initCheck();
+    if (status == NO_ERROR && mName < 0) {
+        status = NO_MEMORY;
+    }
+    return status;
+}
+
 void AudioFlinger::PlaybackThread::Track::destroy()
 {
     // NOTE: destroyTrack_l() can remove a strong reference to this Track
@@ -999,15 +1008,17 @@
 
         mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
                                               "AudioFlingerTimed");
-        if (mTimedMemoryDealer == NULL)
+        if (mTimedMemoryDealer == NULL) {
             return NO_MEMORY;
+        }
     }
 
     sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
     if (newBuffer == NULL) {
         newBuffer = mTimedMemoryDealer->allocate(size);
-        if (newBuffer == NULL)
+        if (newBuffer == NULL) {
             return NO_MEMORY;
+        }
     }
 
     *buffer = newBuffer;
@@ -1716,9 +1727,7 @@
 {
     ALOGV("RecordTrack constructor");
     if (mCblk != NULL) {
-        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
-                mFrameSize);
-        mServerProxy = mAudioRecordServerProxy;
+        mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
     }
 }