Merge "point to new repo hooks for mainline uploads" into rvc-dev
diff --git a/apex/testing/Android.bp b/apex/testing/Android.bp
index 376d3e4..a04ab3f 100644
--- a/apex/testing/Android.bp
+++ b/apex/testing/Android.bp
@@ -17,6 +17,7 @@
     manifest: "test_manifest.json",
     file_contexts: ":com.android.media-file_contexts",
     defaults: ["com.android.media-defaults"],
+    prebuilts: ["sdkinfo_45"],
     installable: false,
 }
 
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 9cbfdb0..755051c 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -49,6 +49,9 @@
         }
         errorPhysicalCameraId = cameraId;
     }
+    parcel->readInt64(&lastCompletedRegularFrameNumber);
+    parcel->readInt64(&lastCompletedReprocessFrameNumber);
+    parcel->readInt64(&lastCompletedZslFrameNumber);
 
     return OK;
 }
@@ -76,6 +79,9 @@
     } else {
         parcel->writeBool(false);
     }
+    parcel->writeInt64(lastCompletedRegularFrameNumber);
+    parcel->writeInt64(lastCompletedReprocessFrameNumber);
+    parcel->writeInt64(lastCompletedZslFrameNumber);
 
     return OK;
 }
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index dc3d282..f163c1e 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -76,6 +76,34 @@
      */
     String16  errorPhysicalCameraId;
 
+    // The last completed frame numbers shouldn't be checked in onResultReceived() and notifyError()
+    // because the output buffers could be arriving after onResultReceived() and
+    // notifyError(). Given this constraint, we check it for each
+    // onCaptureStarted, and if there is no further onCaptureStarted(),
+    // check for onDeviceIdle() to clear out all pending frame numbers.
+
+    /**
+     * The latest regular request frameNumber for which all buffers and capture result have been
+     * returned or notified as an BUFFER_ERROR/RESULT_ERROR/REQUEST_ERROR. -1 if
+     * none has completed.
+     */
+    int64_t lastCompletedRegularFrameNumber;
+
+    /**
+     * The latest reprocess request frameNumber for which all buffers and capture result have been
+     * returned or notified as an BUFFER_ERROR/RESULT_ERROR/REQUEST_ERROR. -1 if
+     * none has completed.
+     */
+    int64_t lastCompletedReprocessFrameNumber;
+
+    /**
+     * The latest Zsl request frameNumber for which all buffers and capture result have been
+     * returned or notified as an BUFFER_ERROR/RESULT_ERROR/REQUEST_ERROR. -1 if
+     * none has completed.
+     */
+    int64_t lastCompletedZslFrameNumber;
+
+
     /**
      * Constructor initializes object as invalid by setting requestId to be -1.
      */
@@ -87,7 +115,10 @@
           frameNumber(0),
           partialResultCount(0),
           errorStreamId(-1),
-          errorPhysicalCameraId() {
+          errorPhysicalCameraId(),
+          lastCompletedRegularFrameNumber(-1),
+          lastCompletedReprocessFrameNumber(-1),
+          lastCompletedZslFrameNumber(-1) {
     }
 
     /**
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 0d7180a..c15c5a5 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -1336,56 +1336,97 @@
 void
 CameraDevice::checkAndFireSequenceCompleteLocked() {
     int64_t completedFrameNumber = mFrameNumberTracker.getCompletedFrameNumber();
-    //std::map<int, int64_t> mSequenceLastFrameNumberMap;
     auto it = mSequenceLastFrameNumberMap.begin();
     while (it != mSequenceLastFrameNumberMap.end()) {
         int sequenceId = it->first;
-        int64_t lastFrameNumber = it->second;
-        bool seqCompleted = false;
-        bool hasCallback  = true;
+        int64_t lastFrameNumber = it->second.lastFrameNumber;
+        bool hasCallback = true;
+
+        if (mRemote == nullptr) {
+            ALOGW("Camera %s closed while checking sequence complete", getId());
+            return;
+        }
+        ALOGV("%s: seq %d's last frame number %" PRId64 ", completed %" PRId64,
+                __FUNCTION__, sequenceId, lastFrameNumber, completedFrameNumber);
+        if (!it->second.isSequenceCompleted) {
+            // Check if there is callback for this sequence
+            // This should not happen because we always register callback (with nullptr inside)
+            if (mSequenceCallbackMap.count(sequenceId) == 0) {
+                ALOGW("No callback found for sequenceId %d", sequenceId);
+                hasCallback = false;
+            }
+
+            if (lastFrameNumber <= completedFrameNumber) {
+                ALOGV("Mark sequenceId %d as sequence completed", sequenceId);
+                it->second.isSequenceCompleted = true;
+            }
+
+            if (it->second.isSequenceCompleted && hasCallback) {
+                auto cbIt = mSequenceCallbackMap.find(sequenceId);
+                CallbackHolder cbh = cbIt->second;
+
+                // send seq complete callback
+                sp<AMessage> msg = new AMessage(kWhatCaptureSeqEnd, mHandler);
+                msg->setPointer(kContextKey, cbh.mContext);
+                msg->setObject(kSessionSpKey, cbh.mSession);
+                msg->setPointer(kCallbackFpKey, (void*) cbh.mOnCaptureSequenceCompleted);
+                msg->setInt32(kSequenceIdKey, sequenceId);
+                msg->setInt64(kFrameNumberKey, lastFrameNumber);
+
+                // Clear the session sp before we send out the message
+                // This will guarantee the rare case where the message is processed
+                // before cbh goes out of scope and causing we call the session
+                // destructor while holding device lock
+                cbh.mSession.clear();
+                postSessionMsgAndCleanup(msg);
+            }
+        }
+
+        if (it->second.isSequenceCompleted && it->second.isInflightCompleted) {
+            if (mSequenceCallbackMap.find(sequenceId) != mSequenceCallbackMap.end()) {
+                mSequenceCallbackMap.erase(sequenceId);
+            }
+            it = mSequenceLastFrameNumberMap.erase(it);
+            ALOGV("%s: Remove holder for sequenceId %d", __FUNCTION__, sequenceId);
+        } else {
+            ++it;
+        }
+    }
+}
+
+void
+CameraDevice::removeCompletedCallbackHolderLocked(int64_t lastCompletedRegularFrameNumber) {
+    auto it = mSequenceLastFrameNumberMap.begin();
+    while (it != mSequenceLastFrameNumberMap.end()) {
+        int sequenceId = it->first;
+        int64_t lastFrameNumber = it->second.lastFrameNumber;
 
         if (mRemote == nullptr) {
             ALOGW("Camera %s closed while checking sequence complete", getId());
             return;
         }
 
-        // Check if there is callback for this sequence
-        // This should not happen because we always register callback (with nullptr inside)
-        if (mSequenceCallbackMap.count(sequenceId) == 0) {
-            ALOGW("No callback found for sequenceId %d", sequenceId);
-            hasCallback = false;
-        }
+        ALOGV("%s: seq %d's last frame number %" PRId64
+                ", completed inflight frame number %" PRId64,
+                __FUNCTION__, sequenceId, lastFrameNumber,
+                lastCompletedRegularFrameNumber);
+        if (lastFrameNumber <= lastCompletedRegularFrameNumber) {
+            if (it->second.isSequenceCompleted) {
+                // Check if there is callback for this sequence
+                // This should not happen because we always register callback (with nullptr inside)
+                if (mSequenceCallbackMap.count(sequenceId) == 0) {
+                    ALOGW("No callback found for sequenceId %d", sequenceId);
+                } else {
+                    mSequenceCallbackMap.erase(sequenceId);
+                }
 
-        if (lastFrameNumber <= completedFrameNumber) {
-            ALOGV("seq %d reached last frame %" PRId64 ", completed %" PRId64,
-                    sequenceId, lastFrameNumber, completedFrameNumber);
-            seqCompleted = true;
-        }
-
-        if (seqCompleted && hasCallback) {
-            // remove callback holder from callback map
-            auto cbIt = mSequenceCallbackMap.find(sequenceId);
-            CallbackHolder cbh = cbIt->second;
-            mSequenceCallbackMap.erase(cbIt);
-            // send seq complete callback
-            sp<AMessage> msg = new AMessage(kWhatCaptureSeqEnd, mHandler);
-            msg->setPointer(kContextKey, cbh.mContext);
-            msg->setObject(kSessionSpKey, cbh.mSession);
-            msg->setPointer(kCallbackFpKey, (void*) cbh.mOnCaptureSequenceCompleted);
-            msg->setInt32(kSequenceIdKey, sequenceId);
-            msg->setInt64(kFrameNumberKey, lastFrameNumber);
-
-            // Clear the session sp before we send out the message
-            // This will guarantee the rare case where the message is processed
-            // before cbh goes out of scope and causing we call the session
-            // destructor while holding device lock
-            cbh.mSession.clear();
-            postSessionMsgAndCleanup(msg);
-        }
-
-        // No need to track sequence complete if there is no callback registered
-        if (seqCompleted || !hasCallback) {
-            it = mSequenceLastFrameNumberMap.erase(it);
+                it = mSequenceLastFrameNumberMap.erase(it);
+                ALOGV("%s: Remove holder for sequenceId %d", __FUNCTION__, sequenceId);
+            } else {
+                ALOGV("Mark sequenceId %d as inflight completed", sequenceId);
+                it->second.isInflightCompleted = true;
+                ++it;
+            }
         } else {
             ++it;
         }
@@ -1480,6 +1521,9 @@
         return ret;
     }
 
+    dev->removeCompletedCallbackHolderLocked(
+             std::numeric_limits<int64_t>::max()/*lastCompletedRegularFrameNumber*/);
+
     if (dev->mIdle) {
         // Already in idle state. Possibly other thread did waitUntilIdle
         return ret;
@@ -1522,6 +1566,9 @@
         return ret;
     }
 
+    dev->removeCompletedCallbackHolderLocked(
+            resultExtras.lastCompletedRegularFrameNumber);
+
     int sequenceId = resultExtras.requestId;
     int32_t burstId = resultExtras.burstId;
 
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 6c2ceb3..d937865 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -267,8 +267,23 @@
     static const int REQUEST_ID_NONE = -1;
     int mRepeatingSequenceId = REQUEST_ID_NONE;
 
-    // sequence id -> last frame number map
-    std::map<int, int64_t> mSequenceLastFrameNumberMap;
+    // sequence id -> last frame number holder map
+    struct RequestLastFrameNumberHolder {
+        int64_t lastFrameNumber;
+        // Whether the current sequence is completed (capture results are
+        // generated). May be set to true, but
+        // not removed from the map if not all inflight requests in the sequence
+        // have been completed.
+        bool isSequenceCompleted = false;
+        // Whether all inflight requests in the sequence are completed
+        // (capture results and buffers are generated). May be
+        // set to true, but not removed from the map yet if the capture results
+        // haven't been delivered to the app yet.
+        bool isInflightCompleted = false;
+        RequestLastFrameNumberHolder(int64_t lastFN) :
+                lastFrameNumber(lastFN) {}
+    };
+    std::map<int, RequestLastFrameNumberHolder> mSequenceLastFrameNumberMap;
 
     struct CallbackHolder {
         CallbackHolder(sp<ACameraCaptureSession>          session,
@@ -338,6 +353,7 @@
 
     void checkRepeatingSequenceCompleteLocked(const int sequenceId, const int64_t lastFrameNumber);
     void checkAndFireSequenceCompleteLocked();
+    void removeCompletedCallbackHolderLocked(int64_t lastCompletedRegularFrameNumber);
 
     // Misc variables
     int32_t mShadingMapSize[2];   // const after constructor
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 8763c62..7d78571 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -6054,10 +6054,11 @@
      * </ul></p>
      *
      * <p>The accuracy of the frame timestamp synchronization determines the physical cameras'
-     * ability to start exposure at the same time. If the sensorSyncType is CALIBRATED,
-     * the physical camera sensors usually run in master-slave mode so that their shutter
-     * time is synchronized. For APPROXIMATE sensorSyncType, the camera sensors usually run in
-     * master-master mode, and there could be offset between their start of exposure.</p>
+     * ability to start exposure at the same time. If the sensorSyncType is CALIBRATED, the
+     * physical camera sensors usually run in leader/follower mode where one sensor generates a
+     * timing signal for the other, so that their shutter time is synchronized. For APPROXIMATE
+     * sensorSyncType, the camera sensors usually run in leader/leader mode, where both sensors
+     * use their own timing generator, and there could be offset between their start of exposure.</p>
      * <p>In both cases, all images generated for a particular capture request still carry the same
      * timestamps, so that they can be used to look up the matching frame number and
      * onCaptureStarted callback.</p>
@@ -8088,19 +8089,35 @@
      * <li>ACAMERA_LENS_POSE_REFERENCE</li>
      * <li>ACAMERA_LENS_DISTORTION</li>
      * </ul>
-     * <p>The field of view of all non-RAW physical streams must be the same or as close as
-     * possible to that of non-RAW logical streams. If the requested FOV is outside of the
-     * range supported by the physical camera, the physical stream for that physical camera
-     * will use either the maximum or minimum scaler crop region, depending on which one is
-     * closer to the requested FOV. For example, for a logical camera with wide-tele lens
-     * configuration where the wide lens is the default, if the logical camera's crop region
-     * is set to maximum, the physical stream for the tele lens will be configured to its
-     * maximum crop region. On the other hand, if the logical camera has a normal-wide lens
-     * configuration where the normal lens is the default, when the logical camera's crop
-     * region is set to maximum, the FOV of the logical streams will be that of the normal
-     * lens. The FOV of the physical streams for the wide lens will be the same as the
-     * logical stream, by making the crop region smaller than its active array size to
-     * compensate for the smaller focal length.</p>
+     * <p>The field of view of non-RAW physical streams must not be smaller than that of the
+     * non-RAW logical streams, or the maximum field-of-view of the physical camera,
+     * whichever is smaller. The application should check the physical capture result
+     * metadata for how the physical streams are cropped or zoomed. More specifically, given
+     * the physical camera result metadata, the effective horizontal field-of-view of the
+     * physical camera is:</p>
+     * <pre><code>fov = 2 * atan2(cropW * sensorW / (2 * zoomRatio * activeArrayW), focalLength)
+     * </code></pre>
+     * <p>where the equation parameters are the physical camera's crop region width, physical
+     * sensor width, zoom ratio, active array width, and focal length respectively. Typically
+     * the physical stream of active physical camera has the same field-of-view as the
+     * logical streams. However, the same may not be true for physical streams from
+     * non-active physical cameras. For example, if the logical camera has a wide-ultrawide
+     * configuration where the wide lens is the default, when the crop region is set to the
+     * logical camera's active array size, (and the zoom ratio set to 1.0 starting from
+     * Android 11), a physical stream for the ultrawide camera may prefer outputing images
+     * with larger field-of-view than that of the wide camera for better stereo matching
+     * margin or more robust motion tracking. At the same time, the physical non-RAW streams'
+     * field of view must not be smaller than the requested crop region and zoom ratio, as
+     * long as it's within the physical lens' capability. For example, for a logical camera
+     * with wide-tele lens configuration where the wide lens is the default, if the logical
+     * camera's crop region is set to maximum size, and zoom ratio set to 1.0, the physical
+     * stream for the tele lens will be configured to its maximum size crop region (no zoom).</p>
+     * <p><em>Deprecated:</em> Prior to Android 11, the field of view of all non-RAW physical streams
+     * cannot be larger than that of non-RAW logical streams. If the logical camera has a
+     * wide-ultrawide lens configuration where the wide lens is the default, when the logical
+     * camera's crop region is set to maximum size, the FOV of the physical streams for the
+     * ultrawide lens will be the same as the logical stream, by making the crop region
+     * smaller than its active array size to compensate for the smaller focal length.</p>
      * <p>Even if the underlying physical cameras have different RAW characteristics (such as
      * size or CFA pattern), a logical camera can still advertise RAW capability. In this
      * case, when the application configures a RAW stream, the camera device will make sure
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index f39620e..9884733 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -89,11 +89,18 @@
         addParameter(
                 DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
-                .withFields({C2F(mChannelCount, value).inRange(1, 8)})
+                .withFields({C2F(mChannelCount, value).inRange(1, MAX_CHANNEL_COUNT)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
+                DefineParam(mMaxChannelCount, C2_PARAMKEY_MAX_CHANNEL_COUNT)
+                .withDefault(new C2StreamMaxChannelCountInfo::input(0u, MAX_CHANNEL_COUNT))
+                .withFields({C2F(mMaxChannelCount, value).inRange(1, MAX_CHANNEL_COUNT)})
+                .withSetter(Setter<decltype(*mMaxChannelCount)>::StrictValueWithNoDeps)
+                .build());
+
+        addParameter(
                 DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
                 .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(8000, 960000)})
@@ -225,6 +232,7 @@
     int32_t getDrcAttenuationFactor() const { return mDrcAttenuationFactor->value * 127. + 0.5; }
     int32_t getDrcEffectType() const { return mDrcEffectType->value; }
     int32_t getDrcAlbumMode() const { return mDrcAlbumMode->value; }
+    u_int32_t getMaxChannelCount() const { return mMaxChannelCount->value; }
     int32_t getDrcOutputLoudness() const { return (mDrcOutputLoudness->value <= 0 ? -mDrcOutputLoudness->value * 4. + 0.5 : -1); }
 
 private:
@@ -241,6 +249,7 @@
     std::shared_ptr<C2StreamDrcAttenuationFactorTuning::input> mDrcAttenuationFactor;
     std::shared_ptr<C2StreamDrcEffectTypeTuning::input> mDrcEffectType;
     std::shared_ptr<C2StreamDrcAlbumModeTuning::input> mDrcAlbumMode;
+    std::shared_ptr<C2StreamMaxChannelCountInfo::input> mMaxChannelCount;
     std::shared_ptr<C2StreamDrcOutputLoudnessTuning::output> mDrcOutputLoudness;
     // TODO Add : C2StreamAacSbrModeTuning
 };
@@ -366,9 +375,10 @@
     ALOGV("AAC decoder using MPEG-D DRC album mode %d", albumMode);
     aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_ALBUM_MODE, albumMode);
 
-    // By default, the decoder creates a 5.1 channel downmix signal.
-    // For seven and eight channel input streams, enable 6.1 and 7.1 channel output
-    aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1);
+    // AAC_PCM_MAX_OUTPUT_CHANNELS
+    u_int32_t maxChannelCount = mIntf->getMaxChannelCount();
+    ALOGV("AAC decoder using maximum output channel count %d", maxChannelCount);
+    aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, maxChannelCount);
 
     return status;
 }
@@ -707,6 +717,11 @@
         ALOGV("AAC decoder using MPEG-D DRC album mode %d", albumMode);
         aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_ALBUM_MODE, albumMode);
 
+        // AAC_PCM_MAX_OUTPUT_CHANNELS
+        int32_t maxChannelCount = mIntf->getMaxChannelCount();
+        ALOGV("AAC decoder using maximum output channel count %d", maxChannelCount);
+        aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, maxChannelCount);
+
         mDrcWrap.update();
 
         UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
@@ -847,6 +862,51 @@
                     ALOGE("Getting output loudness failed");
                 }
             }
+
+            // update config with values used for decoding:
+            //    Album mode, target reference level, DRC effect type, DRC attenuation and boost
+            //    factor, DRC compression mode, encoder target level and max channel count
+            // with input values as they were not modified by decoder
+
+            C2StreamDrcAttenuationFactorTuning::input currentAttenuationFactor(0u,
+                    (C2FloatValue) (attenuationFactor/127.));
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentAttenuationFactor));
+
+            C2StreamDrcBoostFactorTuning::input currentBoostFactor(0u,
+                    (C2FloatValue) (boostFactor/127.));
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentBoostFactor));
+
+            C2StreamDrcCompressionModeTuning::input currentCompressMode(0u,
+                    (C2Config::drc_compression_mode_t) compressMode);
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentCompressMode));
+
+            C2StreamDrcEncodedTargetLevelTuning::input currentEncodedTargetLevel(0u,
+                    (C2FloatValue) (encTargetLevel*-0.25));
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentEncodedTargetLevel));
+
+            C2StreamDrcAlbumModeTuning::input currentAlbumMode(0u,
+                    (C2Config::drc_album_mode_t) albumMode);
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentAlbumMode));
+
+            C2StreamDrcTargetReferenceLevelTuning::input currentTargetRefLevel(0u,
+                    (float) (targetRefLevel*-0.25));
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentTargetRefLevel));
+
+            C2StreamDrcEffectTypeTuning::input currentEffectype(0u,
+                    (C2Config::drc_effect_type_t) effectType);
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentEffectype));
+
+            C2StreamMaxChannelCountInfo::input currentMaxChannelCnt(0u, maxChannelCount);
+            work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(currentMaxChannelCnt));
+
         } while (decoderErr == AAC_DEC_OK);
     }
 
diff --git a/media/codec2/components/base/SimpleC2Interface.cpp b/media/codec2/components/base/SimpleC2Interface.cpp
index 5c019f3..29740d1 100644
--- a/media/codec2/components/base/SimpleC2Interface.cpp
+++ b/media/codec2/components/base/SimpleC2Interface.cpp
@@ -39,6 +39,16 @@
     setDerivedInstance(this);
 
     addParameter(
+            DefineParam(mApiFeatures, C2_PARAMKEY_API_FEATURES)
+            .withConstValue(new C2ApiFeaturesSetting(C2Config::api_feature_t(
+                    API_REFLECTION |
+                    API_VALUES |
+                    API_CURRENT_VALUES |
+                    API_DEPENDENCY |
+                    API_SAME_INPUT_BUFFER)))
+            .build());
+
+    addParameter(
             DefineParam(mName, C2_PARAMKEY_COMPONENT_NAME)
             .withConstValue(AllocSharedString<C2ComponentNameSetting>(name.c_str()))
             .build());
@@ -305,7 +315,6 @@
     Clients need to handle the following base params due to custom dependency.
 
     std::shared_ptr<C2ApiLevelSetting> mApiLevel;
-    std::shared_ptr<C2ApiFeaturesSetting> mApiFeatures;
     std::shared_ptr<C2ComponentAttributesSetting> mAttrib;
 
     std::shared_ptr<C2PortSuggestedBufferCountTuning::input> mSuggestedInputBufferCount;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 74e105e..7e9090f 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -460,8 +460,8 @@
 
     const C2ConstGraphicBlock inBuffer =
         inputBuffer->data().graphicBlocks().front();
-    if (inBuffer.width() != mSize->width ||
-        inBuffer.height() != mSize->height) {
+    if (inBuffer.width() < mSize->width ||
+        inBuffer.height() < mSize->height) {
         ALOGE("unexpected Input buffer attributes %d(%d) x %d(%d)",
               inBuffer.width(), mSize->width, inBuffer.height(),
               mSize->height);
@@ -472,8 +472,8 @@
     bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
     vpx_image_t raw_frame;
     const C2PlanarLayout &layout = rView->layout();
-    uint32_t width = rView->width();
-    uint32_t height = rView->height();
+    uint32_t width = mSize->width;
+    uint32_t height = mSize->height;
     if (width > 0x8000 || height > 0x8000) {
         ALOGE("Image too big: %u x %u", width, height);
         work->result = C2_BAD_VALUE;
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index a41c2dc..0251ec2 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -92,7 +92,10 @@
         for (size_t i = 0; i < updates.size(); ++i) {
             C2Param* param = updates[i].get();
             if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
-                csd = true;
+                C2StreamInitDataInfo::output* csdBuffer =
+                        (C2StreamInitDataInfo::output*)(param);
+                size_t csdSize = csdBuffer->flexCount();
+                if (csdSize > 0) csd = true;
             } else if ((param->index() == C2StreamSampleRateInfo::output::PARAM_TYPE) ||
                        (param->index() == C2StreamChannelCountInfo::output::PARAM_TYPE) ||
                        (param->index() == C2StreamPictureSizeInfo::output::PARAM_TYPE)) {
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 74088dd..12ed725 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -109,6 +109,7 @@
         mFramesReceived = 0;
         mTimestampUs = 0u;
         mWorkResult = C2_OK;
+        mReorderDepth = -1;
         mTimestampDevTest = false;
         mMd5Offset = 0;
         mMd5Enable = false;
@@ -211,34 +212,46 @@
         for (std::unique_ptr<C2Work>& work : workItems) {
             if (!work->worklets.empty()) {
                 // For decoder components current timestamp always exceeds
-                // previous timestamp
+                // previous timestamp if output is in display order
                 typedef std::unique_lock<std::mutex> ULock;
                 mWorkResult |= work->result;
                 bool codecConfig = ((work->worklets.front()->output.flags &
                                      C2FrameData::FLAG_CODEC_CONFIG) != 0);
                 if (!codecConfig && !work->worklets.front()->output.buffers.empty()) {
-                    EXPECT_GE((work->worklets.front()->output.ordinal.timestamp.peeku()),
-                              mTimestampUs);
-                    mTimestampUs = work->worklets.front()->output.ordinal.timestamp.peeku();
-
-                    ULock l(mQueueLock);
-                    if (mTimestampDevTest) {
-                        bool tsHit = false;
-                        std::list<uint64_t>::iterator it = mTimestampUslist.begin();
-                        while (it != mTimestampUslist.end()) {
-                            if (*it == mTimestampUs) {
-                                mTimestampUslist.erase(it);
-                                tsHit = true;
-                                break;
-                            }
-                            it++;
+                    if (mReorderDepth < 0) {
+                        C2PortReorderBufferDepthTuning::output reorderBufferDepth;
+                        mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK,
+                                          nullptr);
+                        mReorderDepth = reorderBufferDepth.value;
+                        if (mReorderDepth > 0) {
+                            // TODO: Add validation for reordered output
+                            mTimestampDevTest = false;
                         }
-                        if (tsHit == false) {
-                            if (mTimestampUslist.empty() == false) {
-                                EXPECT_EQ(tsHit, true) << "TimeStamp not recognized";
-                            } else {
-                                std::cout << "[   INFO   ] Received non-zero "
-                                             "output / TimeStamp not recognized \n";
+                    }
+                    if (mTimestampDevTest) {
+                        EXPECT_GE((work->worklets.front()->output.ordinal.timestamp.peeku()),
+                                  mTimestampUs);
+                        mTimestampUs = work->worklets.front()->output.ordinal.timestamp.peeku();
+
+                        ULock l(mQueueLock);
+                        {
+                            bool tsHit = false;
+                            std::list<uint64_t>::iterator it = mTimestampUslist.begin();
+                            while (it != mTimestampUslist.end()) {
+                                if (*it == mTimestampUs) {
+                                    mTimestampUslist.erase(it);
+                                    tsHit = true;
+                                    break;
+                                }
+                                it++;
+                            }
+                            if (tsHit == false) {
+                                if (mTimestampUslist.empty() == false) {
+                                    EXPECT_EQ(tsHit, true) << "TimeStamp not recognized";
+                                } else {
+                                    std::cout << "[   INFO   ] Received non-zero "
+                                                 "output / TimeStamp not recognized \n";
+                                }
                             }
                         }
                     }
@@ -281,6 +294,7 @@
     standardComp mCompName;
 
     int32_t mWorkResult;
+    int32_t mReorderDepth;
     uint32_t mFramesReceived;
     C2BlockPool::local_id_t mBlockPoolId;
     std::shared_ptr<C2BlockPool> mLinearPool;
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 9e425d2..ecaf3a8 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -510,12 +510,10 @@
         ASSERT_TRUE(false);
     }
 
-    if (!mCsd && (mCompName != vp8 && mCompName != vp9)) {
-        ASSERT_TRUE(false) << "CSD Buffer not received";
-    }
-
-    if (mCsd && (mCompName == vp8 || mCompName == vp9)) {
-        ASSERT_TRUE(false) << "CSD Buffer not expected";
+    if (mCompName == vp8 || mCompName == h263) {
+        ASSERT_FALSE(mCsd) << "CSD Buffer not expected";
+    } else if (mCompName != vp9) {
+        ASSERT_TRUE(mCsd) << "CSD Buffer not received";
     }
 
     if (mTimestampDevTest) EXPECT_EQ(mTimestampUslist.empty(), true);
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index a3fff35..6f8b1d4 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1955,11 +1955,98 @@
             inputSurface->getHalInterface()));
 }
 
-static void MaybeLogUnrecognizedName(const char *func, const std::string &name) {
-    thread_local std::set<std::string> sLogged{};
-    if (sLogged.insert(name).second) {
-        ALOGW("%s: Unrecognized interface name: %s", func, name.c_str());
+class IntfCache {
+public:
+    IntfCache() = default;
+
+    status_t init(const std::string &name) {
+        std::shared_ptr<Codec2Client::Interface> intf{
+            Codec2Client::CreateInterfaceByName(name.c_str())};
+        if (!intf) {
+            ALOGW("IntfCache [%s]: Unrecognized interface name", name.c_str());
+            mInitStatus = NO_INIT;
+            return NO_INIT;
+        }
+        const static C2StreamUsageTuning::input sUsage{0u /* stream id */};
+        mFields.push_back(C2FieldSupportedValuesQuery::Possible(
+                C2ParamField{&sUsage, &sUsage.value}));
+        c2_status_t err = intf->querySupportedValues(mFields, C2_MAY_BLOCK);
+        if (err != C2_OK) {
+            ALOGW("IntfCache [%s]: failed to query usage supported value (err=%d)",
+                    name.c_str(), err);
+            mFields[0].status = err;
+        }
+        std::vector<std::unique_ptr<C2Param>> params;
+        err = intf->query(
+                {&mApiFeatures},
+                {C2PortAllocatorsTuning::input::PARAM_TYPE},
+                C2_MAY_BLOCK,
+                &params);
+        if (err != C2_OK && err != C2_BAD_INDEX) {
+            ALOGW("IntfCache [%s]: failed to query api features (err=%d)",
+                    name.c_str(), err);
+        }
+        while (!params.empty()) {
+            C2Param *param = params.back().release();
+            params.pop_back();
+            if (!param) {
+                continue;
+            }
+            if (param->type() == C2PortAllocatorsTuning::input::PARAM_TYPE) {
+                mInputAllocators.reset(
+                        C2PortAllocatorsTuning::input::From(params[0].get()));
+            }
+        }
+        mInitStatus = OK;
+        return OK;
     }
+
+    status_t initCheck() const { return mInitStatus; }
+
+    const C2FieldSupportedValuesQuery &getUsageSupportedValues() const {
+        CHECK_EQ(1u, mFields.size());
+        return mFields[0];
+    }
+
+    const C2ApiFeaturesSetting &getApiFeatures() const {
+        return mApiFeatures;
+    }
+
+    const C2PortAllocatorsTuning::input &getInputAllocators() const {
+        static std::unique_ptr<C2PortAllocatorsTuning::input> sInvalidated = []{
+            std::unique_ptr<C2PortAllocatorsTuning::input> param =
+                C2PortAllocatorsTuning::input::AllocUnique(0);
+            param->invalidate();
+            return param;
+        }();
+        return mInputAllocators ? *mInputAllocators : *sInvalidated;
+    }
+
+private:
+    status_t mInitStatus{NO_INIT};
+
+    std::vector<C2FieldSupportedValuesQuery> mFields;
+    C2ApiFeaturesSetting mApiFeatures;
+    std::unique_ptr<C2PortAllocatorsTuning::input> mInputAllocators;
+};
+
+static const IntfCache &GetIntfCache(const std::string &name) {
+    static IntfCache sNullIntfCache;
+    static std::mutex sMutex;
+    static std::map<std::string, IntfCache> sCache;
+    std::unique_lock<std::mutex> lock{sMutex};
+    auto it = sCache.find(name);
+    if (it == sCache.end()) {
+        lock.unlock();
+        IntfCache intfCache;
+        status_t err = intfCache.init(name);
+        if (err != OK) {
+            return sNullIntfCache;
+        }
+        lock.lock();
+        it = sCache.insert({name, std::move(intfCache)}).first;
+    }
+    return it->second;
 }
 
 static status_t GetCommonAllocatorIds(
@@ -1977,24 +2064,16 @@
     }
     bool firstIteration = true;
     for (const std::string &name : names) {
-        std::shared_ptr<Codec2Client::Interface> intf{
-            Codec2Client::CreateInterfaceByName(name.c_str())};
-        if (!intf) {
-            MaybeLogUnrecognizedName(__FUNCTION__, name);
+        const IntfCache &intfCache = GetIntfCache(name);
+        if (intfCache.initCheck() != OK) {
             continue;
         }
-        std::vector<std::unique_ptr<C2Param>> params;
-        c2_status_t err = intf->query(
-                {}, {C2PortAllocatorsTuning::input::PARAM_TYPE}, C2_MAY_BLOCK, &params);
+        const C2PortAllocatorsTuning::input &allocators = intfCache.getInputAllocators();
         if (firstIteration) {
             firstIteration = false;
-            if (err == C2_OK && params.size() == 1u) {
-                C2PortAllocatorsTuning::input *allocators =
-                    C2PortAllocatorsTuning::input::From(params[0].get());
-                if (allocators && allocators->flexCount() > 0) {
-                    ids->insert(allocators->m.values,
-                                allocators->m.values + allocators->flexCount());
-                }
+            if (allocators && allocators.flexCount() > 0) {
+                ids->insert(allocators.m.values,
+                            allocators.m.values + allocators.flexCount());
             }
             if (ids->empty()) {
                 // The component does not advertise allocators. Use default.
@@ -2003,24 +2082,20 @@
             continue;
         }
         bool filtered = false;
-        if (err == C2_OK && params.size() == 1u) {
-            C2PortAllocatorsTuning::input *allocators =
-                C2PortAllocatorsTuning::input::From(params[0].get());
-            if (allocators && allocators->flexCount() > 0) {
-                filtered = true;
-                for (auto it = ids->begin(); it != ids->end(); ) {
-                    bool found = false;
-                    for (size_t j = 0; j < allocators->flexCount(); ++j) {
-                        if (allocators->m.values[j] == *it) {
-                            found = true;
-                            break;
-                        }
+        if (allocators && allocators.flexCount() > 0) {
+            filtered = true;
+            for (auto it = ids->begin(); it != ids->end(); ) {
+                bool found = false;
+                for (size_t j = 0; j < allocators.flexCount(); ++j) {
+                    if (allocators.m.values[j] == *it) {
+                        found = true;
+                        break;
                     }
-                    if (found) {
-                        ++it;
-                    } else {
-                        it = ids->erase(it);
-                    }
+                }
+                if (found) {
+                    ++it;
+                } else {
+                    it = ids->erase(it);
                 }
             }
         }
@@ -2052,23 +2127,16 @@
     *minUsage = 0;
     *maxUsage = ~0ull;
     for (const std::string &name : names) {
-        std::shared_ptr<Codec2Client::Interface> intf{
-            Codec2Client::CreateInterfaceByName(name.c_str())};
-        if (!intf) {
-            MaybeLogUnrecognizedName(__FUNCTION__, name);
+        const IntfCache &intfCache = GetIntfCache(name);
+        if (intfCache.initCheck() != OK) {
             continue;
         }
-        std::vector<C2FieldSupportedValuesQuery> fields;
-        fields.push_back(C2FieldSupportedValuesQuery::Possible(
-                C2ParamField{&sUsage, &sUsage.value}));
-        c2_status_t err = intf->querySupportedValues(fields, C2_MAY_BLOCK);
-        if (err != C2_OK) {
+        const C2FieldSupportedValuesQuery &usageSupportedValues =
+            intfCache.getUsageSupportedValues();
+        if (usageSupportedValues.status != C2_OK) {
             continue;
         }
-        if (fields[0].status != C2_OK) {
-            continue;
-        }
-        const C2FieldSupportedValues &supported = fields[0].values;
+        const C2FieldSupportedValues &supported = usageSupportedValues.values;
         if (supported.type != C2FieldSupportedValues::FLAGS) {
             continue;
         }
@@ -2089,6 +2157,17 @@
 // static
 status_t CCodec::CanFetchLinearBlock(
         const std::vector<std::string> &names, const C2MemoryUsage &usage, bool *isCompatible) {
+    for (const std::string &name : names) {
+        const IntfCache &intfCache = GetIntfCache(name);
+        if (intfCache.initCheck() != OK) {
+            continue;
+        }
+        const C2ApiFeaturesSetting &features = intfCache.getApiFeatures();
+        if (features && !(features.value & API_SAME_INPUT_BUFFER)) {
+            *isCompatible = false;
+            return OK;
+        }
+    }
     uint64_t minUsage = usage.expected;
     uint64_t maxUsage = ~0ull;
     std::set<C2Allocator::id_t> allocators;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 907aa39..0626c8d 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -18,6 +18,8 @@
 #define LOG_TAG "CCodecBufferChannel"
 #include <utils/Log.h>
 
+#include <algorithm>
+#include <list>
 #include <numeric>
 
 #include <C2AllocatorGralloc.h>
@@ -616,13 +618,14 @@
 }
 
 void CCodecBufferChannel::feedInputBufferIfAvailableInternal() {
-    if (mInputMetEos ||
-           mOutput.lock()->buffers->hasPending() ||
-           mPipelineWatcher.lock()->pipelineFull()) {
+    if (mInputMetEos || mPipelineWatcher.lock()->pipelineFull()) {
         return;
-    } else {
+    }
+    {
         Mutexed<Output>::Locked output(mOutput);
-        if (!output->buffers || output->buffers->numClientBuffers() >= output->numSlots) {
+        if (!output->buffers ||
+                output->buffers->hasPending() ||
+                output->buffers->numClientBuffers() >= output->numSlots) {
             return;
         }
     }
@@ -729,6 +732,9 @@
     std::shared_ptr<const C2StreamHdr10PlusInfo::output> hdr10PlusInfo =
         std::static_pointer_cast<const C2StreamHdr10PlusInfo::output>(
                 c2Buffer->getInfo(C2StreamHdr10PlusInfo::output::PARAM_TYPE));
+    if (hdr10PlusInfo && hdr10PlusInfo->flexCount() == 0) {
+        hdr10PlusInfo.reset();
+    }
 
     {
         Mutexed<OutputSurface>::Locked output(mOutputSurface);
@@ -780,7 +786,7 @@
                     .maxLuminance = hdrStaticInfo->mastering.maxLuminance,
                     .minLuminance = hdrStaticInfo->mastering.minLuminance,
                 };
-                hdr.validTypes = HdrMetadata::SMPTE2086;
+                hdr.validTypes |= HdrMetadata::SMPTE2086;
                 hdr.smpte2086 = smpte2086_meta;
             }
             // If the content light level fields are 0, do not use them, it
@@ -914,6 +920,12 @@
 
     if (inputFormat != nullptr) {
         bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
+        C2Config::api_feature_t apiFeatures = C2Config::api_feature_t(
+                API_REFLECTION |
+                API_VALUES |
+                API_CURRENT_VALUES |
+                API_DEPENDENCY |
+                API_SAME_INPUT_BUFFER);
         std::shared_ptr<C2BlockPool> pool;
         {
             Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -925,14 +937,15 @@
             // query C2PortAllocatorsTuning::input from component. If an allocator ID is obtained
             // from component, create the input block pool with given ID. Otherwise, use default IDs.
             std::vector<std::unique_ptr<C2Param>> params;
-            err = mComponent->query({ },
+            C2ApiFeaturesSetting featuresSetting{apiFeatures};
+            err = mComponent->query({ &featuresSetting },
                                     { C2PortAllocatorsTuning::input::PARAM_TYPE },
                                     C2_DONT_BLOCK,
                                     &params);
             if ((err != C2_OK && err != C2_BAD_INDEX) || params.size() != 1) {
                 ALOGD("[%s] Query input allocators returned %zu params => %s (%u)",
                         mName, params.size(), asString(err), err);
-            } else if (err == C2_OK && params.size() == 1) {
+            } else if (params.size() == 1) {
                 C2PortAllocatorsTuning::input *inputAllocators =
                     C2PortAllocatorsTuning::input::From(params[0].get());
                 if (inputAllocators && inputAllocators->flexCount() > 0) {
@@ -947,6 +960,9 @@
                     }
                 }
             }
+            if (featuresSetting) {
+                apiFeatures = featuresSetting.value;
+            }
 
             // TODO: use C2Component wrapper to associate this pool with ourselves
             if ((poolMask >> pools->inputAllocatorId) & 1) {
@@ -980,7 +996,10 @@
         input->numSlots = numInputSlots;
         input->extraBuffers.flush();
         input->numExtraSlots = 0u;
-        if (!buffersBoundToCodec) {
+        bool conforming = (apiFeatures & API_SAME_INPUT_BUFFER);
+        // For encrypted content, framework decrypts source buffer (ashmem) into
+        // C2Buffers. Thus non-conforming codecs can process these.
+        if (!buffersBoundToCodec && (hasCryptoOrDescrambler() || conforming)) {
             input->buffers.reset(new SlotInputBuffers(mName));
         } else if (graphic) {
             if (mInputSurface) {
@@ -1242,62 +1261,98 @@
         return UNKNOWN_ERROR;
     }
     size_t numInputSlots = mInput.lock()->numSlots;
-    std::vector<sp<MediaCodecBuffer>> toBeQueued;
-    for (size_t i = 0; i < numInputSlots; ++i) {
+
+    struct ClientInputBuffer {
         size_t index;
         sp<MediaCodecBuffer> buffer;
-        {
-            Mutexed<Input>::Locked input(mInput);
-            if (!input->buffers->requestNewBuffer(&index, &buffer)) {
-                if (i == 0) {
-                    ALOGW("[%s] start: cannot allocate memory at all", mName);
-                    return NO_MEMORY;
-                } else {
-                    ALOGV("[%s] start: cannot allocate memory, only %zu buffers allocated",
-                            mName, i);
-                }
+        size_t capacity;
+    };
+    std::list<ClientInputBuffer> clientInputBuffers;
+
+    {
+        Mutexed<Input>::Locked input(mInput);
+        while (clientInputBuffers.size() < numInputSlots) {
+            ClientInputBuffer clientInputBuffer;
+            if (!input->buffers->requestNewBuffer(&clientInputBuffer.index,
+                                                  &clientInputBuffer.buffer)) {
                 break;
             }
+            clientInputBuffer.capacity = clientInputBuffer.buffer->capacity();
+            clientInputBuffers.emplace_back(std::move(clientInputBuffer));
         }
-        if (buffer) {
-            Mutexed<std::list<sp<ABuffer>>>::Locked configs(mFlushedConfigs);
-            ALOGV("[%s] input buffer %zu available", mName, index);
-            bool post = true;
-            if (!configs->empty()) {
+    }
+    if (clientInputBuffers.empty()) {
+        ALOGW("[%s] start: cannot allocate memory at all", mName);
+        return NO_MEMORY;
+    } else if (clientInputBuffers.size() < numInputSlots) {
+        ALOGD("[%s] start: cannot allocate memory for all slots, "
+              "only %zu buffers allocated",
+              mName, clientInputBuffers.size());
+    } else {
+        ALOGV("[%s] %zu initial input buffers available",
+              mName, clientInputBuffers.size());
+    }
+    // Sort input buffers by their capacities in increasing order.
+    clientInputBuffers.sort(
+            [](const ClientInputBuffer& a, const ClientInputBuffer& b) {
+                return a.capacity < b.capacity;
+            });
+
+    {
+        Mutexed<std::list<sp<ABuffer>>>::Locked configs(mFlushedConfigs);
+        if (!configs->empty()) {
+            while (!configs->empty()) {
                 sp<ABuffer> config = configs->front();
                 configs->pop_front();
-                if (buffer->capacity() >= config->size()) {
-                    memcpy(buffer->base(), config->data(), config->size());
-                    buffer->setRange(0, config->size());
-                    buffer->meta()->clear();
-                    buffer->meta()->setInt64("timeUs", 0);
-                    buffer->meta()->setInt32("csd", 1);
-                    post = false;
-                } else {
-                    ALOGD("[%s] buffer capacity too small for the config (%zu < %zu)",
-                            mName, buffer->capacity(), config->size());
+                // Find the smallest input buffer that can fit the config.
+                auto i = std::find_if(
+                        clientInputBuffers.begin(),
+                        clientInputBuffers.end(),
+                        [cfgSize = config->size()](const ClientInputBuffer& b) {
+                            return b.capacity >= cfgSize;
+                        });
+                if (i == clientInputBuffers.end()) {
+                    ALOGW("[%s] no input buffer large enough for the config "
+                          "(%zu bytes)",
+                          mName, config->size());
+                    return NO_MEMORY;
                 }
-            } else if (oStreamFormat.value == C2BufferData::LINEAR && i == 0
-                        && (!prepend || prepend.value == PREPEND_HEADER_TO_NONE)) {
-                // WORKAROUND: Some apps expect CSD available without queueing
-                //             any input. Queue an empty buffer to get the CSD.
-                buffer->setRange(0, 0);
+                sp<MediaCodecBuffer> buffer = i->buffer;
+                memcpy(buffer->base(), config->data(), config->size());
+                buffer->setRange(0, config->size());
                 buffer->meta()->clear();
                 buffer->meta()->setInt64("timeUs", 0);
-                post = false;
+                buffer->meta()->setInt32("csd", 1);
+                if (queueInputBufferInternal(buffer) != OK) {
+                    ALOGW("[%s] Error while queueing a flushed config",
+                          mName);
+                    return UNKNOWN_ERROR;
+                }
+                clientInputBuffers.erase(i);
             }
-            if (post) {
-                mCallback->onInputBufferAvailable(index, buffer);
-            } else {
-                toBeQueued.emplace_back(buffer);
+        } else if (oStreamFormat.value == C2BufferData::LINEAR &&
+                   (!prepend || prepend.value == PREPEND_HEADER_TO_NONE)) {
+            sp<MediaCodecBuffer> buffer = clientInputBuffers.front().buffer;
+            // WORKAROUND: Some apps expect CSD available without queueing
+            //             any input. Queue an empty buffer to get the CSD.
+            buffer->setRange(0, 0);
+            buffer->meta()->clear();
+            buffer->meta()->setInt64("timeUs", 0);
+            if (queueInputBufferInternal(buffer) != OK) {
+                ALOGW("[%s] Error while queueing an empty buffer to get CSD",
+                      mName);
+                return UNKNOWN_ERROR;
             }
+            clientInputBuffers.pop_front();
         }
     }
-    for (const sp<MediaCodecBuffer> &buffer : toBeQueued) {
-        if (queueInputBufferInternal(buffer) != OK) {
-            ALOGV("[%s] Error while queueing initial buffers", mName);
-        }
+
+    for (const ClientInputBuffer& clientInputBuffer: clientInputBuffers) {
+        mCallback->onInputBufferAvailable(
+                clientInputBuffer.index,
+                clientInputBuffer.buffer);
     }
+
     return OK;
 }
 
@@ -1732,7 +1787,7 @@
                     realloc(c2Buffer);
             output.unlock();
             mCCodecCallback->onOutputBuffersChanged();
-            return;
+            break;
         case OutputBuffers::RETRY:
             ALOGV("[%s] sendOutputBuffers: unable to register output buffer",
                   mName);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 79fa5ed..4520823 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -288,7 +288,7 @@
             requestStop();
         }
 
-        logBufferState();
+        logReleaseBufferState();
 
         setState(AAUDIO_STREAM_STATE_CLOSING);
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
@@ -783,6 +783,14 @@
         adjustedFrames = std::min(actualFrames, adjustedFrames);
     }
 
+    if (adjustedFrames != mBufferSizeInFrames) {
+        android::mediametrics::LogItem(mMetricsId)
+                .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
+                .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, adjustedFrames)
+                .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
+                .record();
+    }
+
     mBufferSizeInFrames = adjustedFrames;
     ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
     return (aaudio_result_t) adjustedFrames;
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 0644368..bc973bd 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -116,9 +116,10 @@
     }
 }
 
-void AudioStream::logBufferState() {
+void AudioStream::logReleaseBufferState() {
     if (mMetricsId.size() > 0) {
         android::mediametrics::LogItem(mMetricsId)
+                .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_RELEASE)
                 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t) getBufferSize())
                 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
                 .record();
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 613a092..fb71c36 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -115,7 +115,7 @@
 
     // log to MediaMetrics
     virtual void logOpen();
-    void logBufferState();
+    void logReleaseBufferState();
 
     /**
      * Free any hardware or system resources from the open() call.
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 853c0db..6e5110f 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -292,7 +292,7 @@
     //  Then call it from here
     if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
         mAudioRecord->removeAudioDeviceCallback(mDeviceCallback);
-        logBufferState();
+        logReleaseBufferState();
         mAudioRecord.clear();
         mFixedBlockWriter.close();
         return AudioStream::release_l();
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1120f05..ea08361 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -254,7 +254,7 @@
 aaudio_result_t AudioStreamTrack::release_l() {
     if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
         mAudioTrack->removeAudioDeviceCallback(mDeviceCallback);
-        logBufferState();
+        logReleaseBufferState();
         // TODO Investigate why clear() causes a hang in test_various.cpp
         // if I call close() from a data callback.
         // But the same thing in AudioRecord is OK!
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index a6e5f70..8935d57 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -233,6 +233,7 @@
     srcs: ["test_steal_exclusive.cpp"],
     shared_libs: [
         "libaaudio",
+        "liblog",
         "libbinder",
         "libcutils",
         "libutils",
diff --git a/media/libaaudio/tests/test_steal_exclusive.cpp b/media/libaaudio/tests/test_steal_exclusive.cpp
index 2a05910..05c560d 100644
--- a/media/libaaudio/tests/test_steal_exclusive.cpp
+++ b/media/libaaudio/tests/test_steal_exclusive.cpp
@@ -47,137 +47,271 @@
  */
 
 #include <atomic>
+#include <mutex>
 #include <stdio.h>
 #include <thread>
 #include <unistd.h>
 
+#include <android/log.h>
+
 #include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
 
 #define DEFAULT_TIMEOUT_NANOS  ((int64_t)1000000000)
 #define SOLO_DURATION_MSEC    2000
 #define DUET_DURATION_MSEC    8000
 #define SLEEP_DURATION_MSEC    500
 
+#define MODULE_NAME  "stealAudio"
+#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, MODULE_NAME, __VA_ARGS__)
+
 static const char * s_sharingModeToText(aaudio_sharing_mode_t mode) {
     return (mode == AAUDIO_SHARING_MODE_EXCLUSIVE) ? "EXCLUSIVE"
         : ((mode == AAUDIO_SHARING_MODE_SHARED)  ? "SHARED"
             : AAudio_convertResultToText(mode));
 }
 
+static const char * s_performanceModeToText(aaudio_performance_mode_t mode) {
+    return (mode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) ? "LOWLAT"
+        : ((mode == AAUDIO_PERFORMANCE_MODE_NONE)  ? "NONE"
+            : AAudio_convertResultToText(mode));
+}
+
+static aaudio_data_callback_result_t s_myDataCallbackProc(
+        AAudioStream * /* stream */,
+        void *userData,
+        void *audioData,
+        int32_t numFrames);
+
 static void s_myErrorCallbackProc(
         AAudioStream *stream,
         void *userData,
         aaudio_result_t error);
 
-struct AudioEngine {
-    AAudioStream        *stream = nullptr;
-    std::thread         *thread = nullptr;
-    aaudio_direction_t   direction = AAUDIO_DIRECTION_OUTPUT;
+class AudioEngine {
+public:
+
+    AudioEngine(const char *name) {
+        mName = name;
+    }
 
     // These counters are read and written by the callback and the main thread.
-    std::atomic<int32_t> framesRead{};
     std::atomic<int32_t> framesCalled{};
     std::atomic<int32_t> callbackCount{};
+    std::atomic<aaudio_sharing_mode_t> sharingMode{};
+    std::atomic<aaudio_performance_mode_t> performanceMode{};
+    std::atomic<bool> isMMap{false};
 
+    void setMaxRetries(int maxRetries) {
+        mMaxRetries = maxRetries;
+    }
+
+    void setOpenDelayMillis(int openDelayMillis) {
+        mOpenDelayMillis = openDelayMillis;
+    }
+
+    void restartStream() {
+        int retriesLeft = mMaxRetries;
+        aaudio_result_t result;
+        do {
+            closeAudioStream();
+            if (mOpenDelayMillis) usleep(mOpenDelayMillis * 1000);
+            openAudioStream(mDirection, mRequestedSharingMode);
+            // It is possible for the stream to be disconnected, or stolen between the time
+            // it is opened and when it is started. If that happens then try again.
+            // If it was stolen then it should succeed the second time because there will already be
+            // a SHARED stream, which will not get stolen.
+            result = AAudioStream_requestStart(mStream);
+            printf("%s: AAudioStream_requestStart() returns %s\n",
+                    mName.c_str(),
+                    AAudio_convertResultToText(result));
+        } while (retriesLeft-- > 0 && result != AAUDIO_OK);
+    }
+
+    aaudio_data_callback_result_t onAudioReady(
+            void * /*audioData */,
+            int32_t numFrames) {
+        callbackCount++;
+        framesCalled += numFrames;
+        return AAUDIO_CALLBACK_RESULT_CONTINUE;
+    }
+
+    aaudio_result_t openAudioStream(aaudio_direction_t direction,
+            aaudio_sharing_mode_t requestedSharingMode) {
+        std::lock_guard<std::mutex> lock(mLock);
+
+        AAudioStreamBuilder *builder = nullptr;
+        mDirection = direction;
+        mRequestedSharingMode = requestedSharingMode;
+
+        // Use an AAudioStreamBuilder to contain requested parameters.
+        aaudio_result_t result = AAudio_createStreamBuilder(&builder);
+        if (result != AAUDIO_OK) {
+            printf("AAudio_createStreamBuilder returned %s",
+                   AAudio_convertResultToText(result));
+            return result;
+        }
+
+        // Request stream properties.
+        AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_FLOAT);
+        AAudioStreamBuilder_setPerformanceMode(builder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+        AAudioStreamBuilder_setSharingMode(builder, mRequestedSharingMode);
+        AAudioStreamBuilder_setDirection(builder, direction);
+        AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, this);
+        AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, this);
+
+        // Create an AAudioStream using the Builder.
+        result = AAudioStreamBuilder_openStream(builder, &mStream);
+        AAudioStreamBuilder_delete(builder);
+        builder = nullptr;
+        if (result != AAUDIO_OK) {
+            printf("AAudioStreamBuilder_openStream returned %s",
+                   AAudio_convertResultToText(result));
+        }
+
+        // See what kind of stream we actually opened.
+        int32_t deviceId = AAudioStream_getDeviceId(mStream);
+        sharingMode = AAudioStream_getSharingMode(mStream);
+        performanceMode = AAudioStream_getPerformanceMode(mStream);
+        isMMap = AAudioStream_isMMapUsed(mStream);
+        printf("%s: opened: deviceId = %3d, sharingMode = %s, perf = %s, %s --------\n",
+               mName.c_str(),
+               deviceId,
+               s_sharingModeToText(sharingMode),
+               s_performanceModeToText(performanceMode),
+               (isMMap ? "MMAP" : "Legacy")
+               );
+
+        return result;
+    }
+
+    aaudio_result_t closeAudioStream() {
+        std::lock_guard<std::mutex> lock(mLock);
+        aaudio_result_t result = AAUDIO_OK;
+        if (mStream != nullptr) {
+            result = AAudioStream_close(mStream);
+            if (result != AAUDIO_OK) {
+                printf("AAudioStream_close returned %s\n",
+                       AAudio_convertResultToText(result));
+            }
+            mStream = nullptr;
+        }
+        return result;
+    }
+
+    /**
+     * @return 0 is OK, -1 for error
+     */
+    int checkEnginePositions() {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStream == nullptr) return 0;
+
+        const int64_t framesRead = AAudioStream_getFramesRead(mStream);
+        const int64_t framesWritten = AAudioStream_getFramesWritten(mStream);
+        const int32_t delta = (int32_t)(framesWritten - framesRead);
+        printf("%s: playing framesRead = %7d, framesWritten = %7d"
+               ", delta = %4d, framesCalled = %6d, callbackCount = %4d\n",
+               mName.c_str(),
+               (int32_t) framesRead,
+               (int32_t) framesWritten,
+               delta,
+               framesCalled.load(),
+               callbackCount.load()
+        );
+        if (delta > AAudioStream_getBufferCapacityInFrames(mStream)) {
+            printf("ERROR - delta > capacity\n");
+            return -1;
+        }
+        return 0;
+    }
+
+    aaudio_result_t start() {
+        std::lock_guard<std::mutex> lock(mLock);
+        reset();
+        if (mStream == nullptr) return 0;
+        return AAudioStream_requestStart(mStream);
+    }
+
+    aaudio_result_t stop() {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStream == nullptr) return 0;
+        return AAudioStream_requestStop(mStream);
+    }
+
+    bool hasAdvanced() {
+        std::lock_guard<std::mutex> lock(mLock);
+        if (mStream == nullptr) return 0;
+        if (mDirection == AAUDIO_DIRECTION_OUTPUT) {
+            return AAudioStream_getFramesRead(mStream) > 0;
+        } else {
+            return AAudioStream_getFramesWritten(mStream) > 0;
+        }
+    }
+
+    aaudio_result_t verify() {
+        int errorCount = 0;
+        if (hasAdvanced()) {
+            printf("%s: stream is running => PASS\n", mName.c_str());
+        } else {
+            errorCount++;
+            printf("%s: stream should be running => FAIL!!\n", mName.c_str());
+        }
+
+        if (isMMap) {
+            printf("%s: data path is MMAP => PASS\n", mName.c_str());
+        } else {
+            errorCount++;
+            printf("%s: data path is Legacy! => FAIL\n", mName.c_str());
+        }
+
+        // Check for PASS/FAIL
+        if (sharingMode == AAUDIO_SHARING_MODE_SHARED) {
+            printf("%s: mode is SHARED => PASS\n", mName.c_str());
+        } else {
+            errorCount++;
+            printf("%s: modes is EXCLUSIVE => FAIL!!\n", mName.c_str());
+        }
+        return errorCount ? AAUDIO_ERROR_INVALID_FORMAT : AAUDIO_OK;
+    }
+
+private:
     void reset() {
-        framesRead.store(0);
         framesCalled.store(0);
         callbackCount.store(0);
     }
+
+    AAudioStream       *mStream = nullptr;
+    aaudio_direction_t  mDirection = AAUDIO_DIRECTION_OUTPUT;
+    aaudio_sharing_mode_t mRequestedSharingMode = AAUDIO_UNSPECIFIED;
+    std::mutex          mLock;
+    std::string         mName;
+    int                 mMaxRetries = 1;
+    int                 mOpenDelayMillis = 0;
 };
 
 // Callback function that fills the audio output buffer.
 static aaudio_data_callback_result_t s_myDataCallbackProc(
-        AAudioStream *stream,
+        AAudioStream * /* stream */,
         void *userData,
         void *audioData,
         int32_t numFrames
 ) {
-    (void) audioData;
-    (void) numFrames;
-    AudioEngine *engine = (struct AudioEngine *)userData;
-    engine->callbackCount++;
-
-    engine->framesRead = (int32_t)AAudioStream_getFramesRead(stream);
-    engine->framesCalled += numFrames;
-    return AAUDIO_CALLBACK_RESULT_CONTINUE;
-}
-
-static aaudio_result_t s_OpenAudioStream(struct AudioEngine *engine,
-                                         aaudio_direction_t direction) {
-    AAudioStreamBuilder *builder = nullptr;
-    engine->direction = direction;
-
-    // Use an AAudioStreamBuilder to contain requested parameters.
-    aaudio_result_t result = AAudio_createStreamBuilder(&builder);
-    if (result != AAUDIO_OK) {
-        printf("AAudio_createStreamBuilder returned %s",
-               AAudio_convertResultToText(result));
-        return result;
-    }
-
-    // Request stream properties.
-    AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_FLOAT);
-    AAudioStreamBuilder_setPerformanceMode(builder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
-    AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_EXCLUSIVE);
-    AAudioStreamBuilder_setDirection(builder, direction);
-    AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, engine);
-    AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, engine);
-
-    // Create an AAudioStream using the Builder.
-    result = AAudioStreamBuilder_openStream(builder, &engine->stream);
-    AAudioStreamBuilder_delete(builder);
-    builder = nullptr;
-    if (result != AAUDIO_OK) {
-        printf("AAudioStreamBuilder_openStream returned %s",
-               AAudio_convertResultToText(result));
-    }
-
-    // See see what kind of stream we actually opened.
-    int32_t deviceId = AAudioStream_getDeviceId(engine->stream);
-    aaudio_sharing_mode_t actualSharingMode = AAudioStream_getSharingMode(engine->stream);
-    printf("-------- opened: deviceId = %3d, actualSharingMode = %s\n",
-           deviceId,
-           s_sharingModeToText(actualSharingMode));
-
-    return result;
-}
-
-static aaudio_result_t s_CloseAudioStream(struct AudioEngine *engine) {
-    aaudio_result_t result = AAUDIO_OK;
-    if (engine->stream != nullptr) {
-        result = AAudioStream_close(engine->stream);
-        if (result != AAUDIO_OK) {
-            printf("AAudioStream_close returned %s\n",
-                   AAudio_convertResultToText(result));
-        }
-        engine->stream = nullptr;
-    }
-    return result;
+    AudioEngine *engine = (AudioEngine *)userData;
+    return engine->onAudioReady(audioData, numFrames);
 }
 
 static void s_myRestartStreamProc(void *userData) {
+    LOGI("%s() called", __func__);
     printf("%s() - restart in separate thread\n", __func__);
     AudioEngine *engine = (AudioEngine *) userData;
-    int retriesLeft = 1;
-    aaudio_result_t result;
-    do {
-        s_CloseAudioStream(engine);
-        s_OpenAudioStream(engine, engine->direction);
-        // It is possible for the stream to be disconnected, or stolen between the time
-        // it is opened and when it is started. If that happens then try again.
-        // If it was stolen then it should succeed the second time because there will already be
-        // a SHARED stream, which will not get stolen.
-        result = AAudioStream_requestStart(engine->stream);
-        printf("%s() - AAudioStream_requestStart() returns %s\n", __func__,
-                AAudio_convertResultToText(result));
-    } while (retriesLeft-- > 0 && result != AAUDIO_OK);
+    engine->restartStream();
 }
 
 static void s_myErrorCallbackProc(
         AAudioStream * /* stream */,
         void *userData,
         aaudio_result_t error) {
+    LOGI("%s() called", __func__);
     printf("%s() - error = %s\n", __func__, AAudio_convertResultToText(error));
     // Handle error on a separate thread.
     std::thread t(s_myRestartStreamProc, userData);
@@ -185,48 +319,28 @@
 }
 
 static void s_usage() {
-    printf("test_steal_exclusive [-i]\n");
+    printf("test_steal_exclusive [-i] [-r{maxRetries}] [-d{delay}] -s\n");
     printf("     -i direction INPUT, otherwise OUTPUT\n");
+    printf("     -d delay open by milliseconds, default = 0\n");
+    printf("     -r max retries in the error callback, default = 1\n");
+    printf("     -s try to open in SHARED mode\n");
 }
 
-/**
- * @return 0 is OK, -1 for error
- */
-static int s_checkEnginePositions(AudioEngine *engine) {
-    if (engine->stream == nullptr) return 0; // race condition with onError procs!
-
-    const int64_t framesRead = AAudioStream_getFramesRead(engine->stream);
-    const int64_t framesWritten = AAudioStream_getFramesWritten(engine->stream);
-    const int32_t delta = (int32_t)(framesWritten - framesRead);
-    printf("playing framesRead = %7d, framesWritten = %7d"
-           ", delta = %4d, framesCalled = %6d, callbackCount = %4d\n",
-           (int32_t) framesRead,
-           (int32_t) framesWritten,
-           delta,
-           engine->framesCalled.load(),
-           engine->callbackCount.load()
-    );
-    if (delta > AAudioStream_getBufferCapacityInFrames(engine->stream)) {
-        printf("ERROR - delta > capacity\n");
-        return -1;
-    }
-    return 0;
-}
-
-int main(int argc, char **argv) {
-    (void) argc;
-    (void) argv;
-    struct AudioEngine victim;
-    struct AudioEngine thief;
+int main(int argc, char ** argv) {
+    AudioEngine victim("victim");
+    AudioEngine thief("thief");
     aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT;
     aaudio_result_t result = AAUDIO_OK;
     int errorCount = 0;
+    int maxRetries = 1;
+    int openDelayMillis = 0;
+    aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
 
-    printf("Test Stealing an EXCLUSIVE stream V1.0\n");
+    printf("Test interaction between streams V1.1\n");
     printf("\n");
 
     for (int i = 1; i < argc; i++) {
@@ -234,9 +348,18 @@
         if (arg[0] == '-') {
             char option = arg[1];
             switch (option) {
+                case 'd':
+                    openDelayMillis = atoi(&arg[2]);
+                    break;
                 case 'i':
                     direction = AAUDIO_DIRECTION_INPUT;
                     break;
+                case 'r':
+                    maxRetries = atoi(&arg[2]);
+                    break;
+                case 's':
+                    requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+                    break;
                 default:
                     s_usage();
                     exit(EXIT_FAILURE);
@@ -249,16 +372,35 @@
         }
     }
 
-    result = s_OpenAudioStream(&victim, direction);
+    victim.setOpenDelayMillis(openDelayMillis);
+    thief.setOpenDelayMillis(openDelayMillis);
+    victim.setMaxRetries(maxRetries);
+    thief.setMaxRetries(maxRetries);
+
+    result = victim.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
         printf("s_OpenAudioStream victim returned %s\n",
                AAudio_convertResultToText(result));
         errorCount++;
     }
-    victim.reset();
+
+    if (victim.sharingMode == requestedSharingMode) {
+        printf("Victim modes is %s => OK\n", s_sharingModeToText(requestedSharingMode));
+    } else {
+        printf("Victim modes should be %s => test not valid!\n",
+                s_sharingModeToText(requestedSharingMode));
+        goto onerror;
+    }
+
+    if (victim.isMMap) {
+        printf("Victim data path is MMAP => OK\n");
+    } else {
+        printf("Victim data path is Legacy! => test not valid\n");
+        goto onerror;
+    }
 
     // Start stream.
-    result = AAudioStream_requestStart(victim.stream);
+    result = victim.start();
     printf("AAudioStream_requestStart(VICTIM) returned %d >>>>>>>>>>>>>>>>>>>>>>\n", result);
     if (result != AAUDIO_OK) {
         errorCount++;
@@ -267,77 +409,69 @@
     if (result == AAUDIO_OK) {
         const int watchLoops = SOLO_DURATION_MSEC / SLEEP_DURATION_MSEC;
         for (int i = watchLoops; i > 0; i--) {
-            errorCount += s_checkEnginePositions(&victim) ? 1 : 0;
+            errorCount += victim.checkEnginePositions() ? 1 : 0;
             usleep(SLEEP_DURATION_MSEC * 1000);
         }
     }
 
-    printf("Try to start the THIEF stream that may steal the VICTIM MMAP resource -----\n");
-    result = s_OpenAudioStream(&thief, direction);
+    printf("Trying to start the THIEF stream, which may steal the VICTIM MMAP resource -----\n");
+    result = thief.openAudioStream(direction, requestedSharingMode);
     if (result != AAUDIO_OK) {
         printf("s_OpenAudioStream victim returned %s\n",
                AAudio_convertResultToText(result));
         errorCount++;
     }
-    thief.reset();
 
     // Start stream.
-    result = AAudioStream_requestStart(thief.stream);
+    result = thief.start();
     printf("AAudioStream_requestStart(THIEF) returned %d >>>>>>>>>>>>>>>>>>>>>>\n", result);
     if (result != AAUDIO_OK) {
         errorCount++;
     }
-    printf("You might enjoy plugging in a headset now to see what happens...\n");
+
+    // Give stream time to advance.
+    usleep(SLEEP_DURATION_MSEC * 1000);
+
+    if (victim.verify()) {
+        errorCount++;
+        goto onerror;
+    }
+    if (thief.verify()) {
+        errorCount++;
+        goto onerror;
+    }
+
+    LOGI("Both streams running. Ask user to plug in headset. ====");
+    printf("\n====\nPlease PLUG IN A HEADSET now!\n====\n\n");
 
     if (result == AAUDIO_OK) {
         const int watchLoops = DUET_DURATION_MSEC / SLEEP_DURATION_MSEC;
         for (int i = watchLoops; i > 0; i--) {
-            printf("victim: ");
-            errorCount += s_checkEnginePositions(&victim) ? 1 : 0;
-            printf(" thief: ");
-            errorCount += s_checkEnginePositions(&thief) ? 1 : 0;
+            errorCount += victim.checkEnginePositions() ? 1 : 0;
+            errorCount += thief.checkEnginePositions() ? 1 : 0;
             usleep(SLEEP_DURATION_MSEC * 1000);
         }
     }
 
-    // Check for PASS/FAIL
-    aaudio_sharing_mode_t victimSharingMode = AAudioStream_getSharingMode(victim.stream);
-    aaudio_sharing_mode_t thiefSharingMode = AAudioStream_getSharingMode(thief.stream);
-    printf("victimSharingMode = %s, thiefSharingMode = %s, - ",
-           s_sharingModeToText(victimSharingMode),
-           s_sharingModeToText(thiefSharingMode));
-    if ((victimSharingMode == AAUDIO_SHARING_MODE_SHARED)
-            && (thiefSharingMode == AAUDIO_SHARING_MODE_SHARED)) {
-        printf("Both modes are SHARED => PASS\n");
-    } else {
-        errorCount++;
-        printf("Both modes should be SHARED => FAIL!!\n");
-    }
+    errorCount += victim.verify() ? 1 : 0;
+    errorCount += thief.verify() ? 1 : 0;
 
-    const int64_t victimFramesRead = AAudioStream_getFramesRead(victim.stream);
-    const int64_t thiefFramesRead = AAudioStream_getFramesRead(thief.stream);
-    printf("victimFramesRead = %d, thiefFramesRead = %d, - ",
-           (int)victimFramesRead, (int)thiefFramesRead);
-    if (victimFramesRead > 0 && thiefFramesRead > 0) {
-        printf("Both streams are running => PASS\n");
-    } else {
-        errorCount++;
-        printf("Both streams should be running => FAIL!!\n");
-    }
-
-    result = AAudioStream_requestStop(victim.stream);
+    result = victim.stop();
     printf("AAudioStream_requestStop() returned %d <<<<<<<<<<<<<<<<<<<<<\n", result);
     if (result != AAUDIO_OK) {
+        printf("stop result = %d = %s\n", result, AAudio_convertResultToText(result));
         errorCount++;
     }
-    result = AAudioStream_requestStop(thief.stream);
+    result = thief.stop();
     printf("AAudioStream_requestStop() returned %d <<<<<<<<<<<<<<<<<<<<<\n", result);
     if (result != AAUDIO_OK) {
+        printf("stop result = %d = %s\n", result, AAudio_convertResultToText(result));
         errorCount++;
     }
 
-    s_CloseAudioStream(&victim);
-    s_CloseAudioStream(&thief);
+onerror:
+    victim.closeAudioStream();
+    thief.closeAudioStream();
 
     printf("aaudio result = %d = %s\n", result, AAudio_convertResultToText(result));
     printf("test %s\n", errorCount ? "FAILED" : "PASSED");
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 7efa67c..df47def 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -742,8 +742,6 @@
     void *iMemPointer;
     audio_track_cblk_t* cblk;
     status_t status;
-    std::string flagsAsString;
-    std::string originalFlagsAsString;
 
     if (audioFlinger == 0) {
         ALOGE("%s(%d): Could not get audioflinger", __func__, mPortId);
@@ -922,15 +920,13 @@
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
 
-    InputFlagConverter::toString(mFlags, flagsAsString);
-    InputFlagConverter::toString(mOrigFlags, originalFlagsAsString);
     mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(mPortId);
     mediametrics::LogItem(mMetricsId)
         .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
         .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
         // the following are immutable (at least until restore)
-        .set(AMEDIAMETRICS_PROP_FLAGS, flagsAsString.c_str())
-        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, originalFlagsAsString.c_str())
+        .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
+        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
         .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
         .set(AMEDIAMETRICS_PROP_TRACKID, mPortId)
         .set(AMEDIAMETRICS_PROP_SOURCE, toString(mAttributes.source).c_str())
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 604d182..32129f0 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -787,8 +787,9 @@
             .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_STOP)
             .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
             .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
+            .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t)mProxy->getBufferSizeInFrames())
+            .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getUnderrunCount_l())
             .record();
-        logBufferSizeUnderruns();
     });
 
     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
@@ -1141,16 +1142,6 @@
     return NO_ERROR;
 }
 
-void AudioTrack::logBufferSizeUnderruns() {
-    LOG_ALWAYS_FATAL_IF(mMetricsId.size() == 0, "mMetricsId is empty!");
-    ALOGD("%s(), mMetricsId = %s", __func__, mMetricsId.c_str());
-    // FIXME THis hangs! Why?
-//    android::mediametrics::LogItem(mMetricsId)
-//            .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t) getBufferSizeInFrames())
-//            .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getUnderrunCount())
-//            .record();
-}
-
 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
 {
     AutoMutex lock(mLock);
@@ -1165,7 +1156,11 @@
     ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
     ssize_t finalBufferSize  = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
     if (originalBufferSize != finalBufferSize) {
-        logBufferSizeUnderruns();
+        android::mediametrics::LogItem(mMetricsId)
+                .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
+                .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t)mProxy->getBufferSizeInFrames())
+                .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t)getUnderrunCount_l())
+                .record();
     }
     return finalBufferSize;
 }
@@ -1718,16 +1713,12 @@
     // is the first log of the AudioTrack and must be present before
     // any AudioTrack client logs will be accepted.
 
-    std::string flagsAsString;
-    OutputFlagConverter::toString(mFlags, flagsAsString);
-    std::string originalFlagsAsString;
-    OutputFlagConverter::toString(mOrigFlags, originalFlagsAsString);
     mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
     mediametrics::LogItem(mMetricsId)
         .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
         // the following are immutable
-        .set(AMEDIAMETRICS_PROP_FLAGS, flagsAsString.c_str())
-        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, originalFlagsAsString.c_str())
+        .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
+        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
         .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
         .set(AMEDIAMETRICS_PROP_TRACKID, mPortId) // dup from key
         .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 17af7d4..0dbd842 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -1274,8 +1274,6 @@
     std::string mMetricsId;  // GUARDED_BY(mLock), could change in createTrack_l().
     std::string mCallerName; // for example "aaudio"
 
-    void logBufferSizeUnderruns();
-
 private:
     class AudioTrackCallback : public media::BnAudioTrackCallback {
     public:
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
index 64f91fe..a54e22f 100644
--- a/media/libaudioprocessing/AudioMixerBase.cpp
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -1500,7 +1500,7 @@
     ALOGVV("track__Resample\n");
     mResampler->setSampleRate(sampleRate);
     const bool ramp = needsRamp();
-    if (MIXTYPE == MIXTYPE_MONOEXPAND || MIXTYPE == MIXTYPE_STEREOEXPAND
+    if (MIXTYPE == MIXTYPE_MONOEXPAND || MIXTYPE == MIXTYPE_STEREOEXPAND // custom volume handling
             || ramp || aux != NULL) {
         // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
         // if aux != NULL: resample with unity gain to temp buffer then apply send level.
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 2748182..80bd093 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -236,7 +236,8 @@
             || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
             || MIXTYPE == MIXTYPE_STEREOEXPAND);
     auto proc = [](auto& a, const auto& b) {
-        if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL) {
+        if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
+                || MIXTYPE == MIXTYPE_STEREOEXPAND) {
             a += b;
         } else {
             a = b;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 4925ea4..3fd3fc3 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -33,10 +33,8 @@
     path: "aidl",
 }
 
-aidl_interface {
-    name: "resourcemanager_aidl_interface",
-    unstable: true,
-    local_include_dir: "aidl",
+filegroup {
+    name: "resourcemanager_aidl",
     srcs: [
         "aidl/android/media/IResourceManagerClient.aidl",
         "aidl/android/media/IResourceManagerService.aidl",
@@ -45,6 +43,16 @@
         "aidl/android/media/MediaResourceParcel.aidl",
         "aidl/android/media/MediaResourcePolicyParcel.aidl",
     ],
+    path: "aidl",
+}
+
+aidl_interface {
+    name: "resourcemanager_aidl_interface",
+    unstable: true,
+    local_include_dir: "aidl",
+    srcs: [
+        ":resourcemanager_aidl",
+    ],
 }
 
 cc_library_shared {
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index b916a78..84388c9 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -147,6 +147,7 @@
 #define AMEDIAMETRICS_PROP_THREADID       "threadId"       // int32 value io handle
 #define AMEDIAMETRICS_PROP_THROTTLEMS     "throttleMs"     // double
 #define AMEDIAMETRICS_PROP_TRACKID        "trackId"        // int32 port id of track/record
+#define AMEDIAMETRICS_PROP_TRAITS         "traits"         // string
 #define AMEDIAMETRICS_PROP_TYPE           "type"           // string (thread type)
 #define AMEDIAMETRICS_PROP_UNDERRUN       "underrun"       // int32
 #define AMEDIAMETRICS_PROP_UNDERRUNFRAMES "underrunFrames" // int64_t from Thread
@@ -175,10 +176,12 @@
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_OPEN       "open"
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_PAUSE      "pause"  // AudioTrack
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS "readParameters" // Thread
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_RELEASE    "release"
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_RESTORE    "restore"
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_SETMODE    "setMode" // AudioFlinger
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE    "setBufferSize" // AudioTrack
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYBACKPARAM "setPlaybackParam" // AudioTrack
-#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOICEVOLUME "setVoiceVolume" // AudioFlinger
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOICEVOLUME   "setVoiceVolume" // AudioFlinger
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOLUME  "setVolume"  // AudioTrack
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_START      "start"  // AudioTrack, AudioRecord
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_STOP       "stop"   // AudioTrack, AudioRecord
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 0eaa503..e6bb2e1 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1159,7 +1159,7 @@
         readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
 
         if (mode != MediaPlayerSeekMode::SEEK_CLOSEST) {
-            seekTimeUs = actualTimeUs;
+            seekTimeUs = std::max<int64_t>(0, actualTimeUs);
         }
         mVideoLastDequeueTimeUs = actualTimeUs;
     }
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index fa13f32..88b15ae 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -22,11 +22,14 @@
 
 #include <C2Buffer.h>
 
+#include <Codec2BufferUtils.h>
+
 #include <android/hardware/cas/native/1.0/IDescrambler.h>
 #include <android/hardware/drm/1.0/types.h>
 #include <binder/MemoryDealer.h>
 #include <hidlmemory/FrameworkUtils.h>
 #include <media/openmax/OMX_Core.h>
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/MediaCodec.h>
@@ -91,15 +94,27 @@
 }
 
 status_t ACodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
-    if (mDealer != nullptr) {
-        return -ENOSYS;
-    }
     std::shared_ptr<const std::vector<const BufferInfo>> array(
             std::atomic_load(&mInputBuffers));
     BufferInfoIterator it = findClientBuffer(array, buffer);
     if (it == array->end()) {
         return -ENOENT;
     }
+    if (it->mClientBuffer != it->mCodecBuffer) {
+        // Copy metadata from client to codec buffer.
+        it->mCodecBuffer->meta()->clear();
+        int64_t timeUs;
+        CHECK(it->mClientBuffer->meta()->findInt64("timeUs", &timeUs));
+        it->mCodecBuffer->meta()->setInt64("timeUs", timeUs);
+        int32_t eos;
+        if (it->mClientBuffer->meta()->findInt32("eos", &eos)) {
+            it->mCodecBuffer->meta()->setInt32("eos", eos);
+        }
+        int32_t csd;
+        if (it->mClientBuffer->meta()->findInt32("csd", &csd)) {
+            it->mCodecBuffer->meta()->setInt32("csd", csd);
+        }
+    }
     ALOGV("queueInputBuffer #%d", it->mBufferId);
     sp<AMessage> msg = mInputBufferFilled->dup();
     msg->setObject("buffer", it->mCodecBuffer);
@@ -267,16 +282,30 @@
             }
             C2ConstLinearBlock block{c2Buffer->data().linearBlocks().front()};
             C2ReadView view{block.map().get()};
-            if (view.capacity() > buffer->capacity()) {
-                return -ENOSYS;
-            }
-            memcpy(buffer->base(), view.data(), view.capacity());
-            buffer->setRange(0, view.capacity());
+            size_t copyLength = std::min(size_t(view.capacity()), buffer->capacity());
+            ALOGV_IF(view.capacity() > buffer->capacity(),
+                    "view.capacity() = %zu, buffer->capacity() = %zu",
+                    view.capacity(), buffer->capacity());
+            memcpy(buffer->base(), view.data(), copyLength);
+            buffer->setRange(0, copyLength);
             break;
         }
         case C2BufferData::GRAPHIC: {
-            // TODO
-            return -ENOSYS;
+            sp<ABuffer> imageData;
+            if (!buffer->format()->findBuffer("image-data", &imageData)) {
+                return -ENOSYS;
+            }
+            if (c2Buffer->data().graphicBlocks().size() != 1u) {
+                return -ENOSYS;
+            }
+            C2ConstGraphicBlock block{c2Buffer->data().graphicBlocks().front()};
+            const C2GraphicView view{block.map().get()};
+            status_t err = ImageCopy(
+                    buffer->base(), (const MediaImage2 *)(imageData->base()), view);
+            if (err != OK) {
+                return err;
+            }
+            break;
         }
         case C2BufferData::LINEAR_CHUNKS:  [[fallthrough]];
         case C2BufferData::GRAPHIC_CHUNKS: [[fallthrough]];
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 3e49bae..3bccb7b 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -273,6 +273,7 @@
         "libutils",
         "libmedia_helper",
         "libsfplugin_ccodec",
+        "libsfplugin_ccodec_utils",
         "libstagefright_codecbase",
         "libstagefright_foundation",
         "libstagefright_omx_utils",
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 4c4d228..39423c7 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -945,10 +945,30 @@
     mInMemoryCache = NULL;
     mInMemoryCacheOffset = 0;
 
+    status_t err = OK;
+    int32_t is4bitTrackId = false;
+    if (param && param->findInt32(kKey4BitTrackIds, &is4bitTrackId) && is4bitTrackId) {
+        err = validateAllTracksId(true);
+    } else {
+        err = validateAllTracksId(false);
+    }
+    if (err != OK) {
+        return err;
+    }
 
     ALOGV("muxer starting: mHasMoovBox %d, mHasFileLevelMeta %d",
             mHasMoovBox, mHasFileLevelMeta);
 
+    err = startWriterThread();
+    if (err != OK) {
+        return err;
+    }
+
+    err = setupAndStartLooper();
+    if (err != OK) {
+        return err;
+    }
+
     writeFtypBox(param);
 
     mFreeBoxOffset = mOffset;
@@ -980,22 +1000,22 @@
     seekOrPostError(mFd, mMdatOffset, SEEK_SET);
     write("\x00\x00\x00\x01mdat????????", 16);
 
-    status_t err = startWriterThread();
-    if (err != OK) {
-        return err;
-    }
-
-    setupAndStartLooper();
-
-    int32_t is4bitTrackId = false;
-    if (param && param->findInt32(kKey4BitTrackIds, &is4bitTrackId) && is4bitTrackId) {
-        err = validateAllTracksId(true);
-    }
-    else {
-        err = validateAllTracksId(false);
-    }
-    if (err != OK) {
-        return err;
+    /* Confirm whether the writing of the initial file atoms, ftyp and free,
+     * are written to the file properly by posting kWhatNoIOErrorSoFar to the
+     * MP4WtrCtrlHlpLooper that's handling write and seek errors also. If there
+     * was kWhatIOError, the following two scenarios should be handled.
+     * 1) If kWhatIOError was delivered and processed, MP4WtrCtrlHlpLooper
+     * would have stopped all threads gracefully already and posting
+     * kWhatNoIOErrorSoFar would fail.
+     * 2) If kWhatIOError wasn't delivered or getting processed,
+     * kWhatNoIOErrorSoFar should get posted successfully.  Wait for
+     * response from MP4WtrCtrlHlpLooper.
+     */
+    sp<AMessage> msg = new AMessage(kWhatNoIOErrorSoFar, mReflector);
+    sp<AMessage> response;
+    err = msg->postAndAwaitResponse(&response);
+    if (err != OK || !response->findInt32("err", &err) || err != OK) {
+        return ERROR_IO;
     }
 
     err = startTracks(param);
@@ -1025,13 +1045,16 @@
     }
 
     void *dummy;
-    status_t err = pthread_join(mThread, &dummy);
-    WARN_UNLESS(err == 0, "stopWriterThread pthread_join err: %d", err);
-
-    err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+    status_t err = OK;
+    int retVal = pthread_join(mThread, &dummy);
+    if (retVal == 0) {
+        err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+        ALOGD("WriterThread stopped. Status:%d", err);
+    } else {
+        ALOGE("stopWriterThread pthread_join status:%d", retVal);
+        err = UNKNOWN_ERROR;
+    }
     mWriterThreadStarted = false;
-    WARN_UNLESS(err == 0, "stopWriterThread pthread_join retVal: %d", err);
-    ALOGD("Writer thread stopped");
     return err;
 }
 
@@ -1089,23 +1112,26 @@
 
 status_t MPEG4Writer::release() {
     ALOGD("release()");
-    if (mPreAllocationEnabled) {
-        truncatePreAllocation();
+    status_t err = OK;
+    if (!truncatePreAllocation()) {
+        if (err == OK) { err = ERROR_IO; }
     }
-    int err = OK;
-    int retVal = fsync(mFd);
-    WARN_UNLESS(retVal == 0, "fsync err:%s(%d)", std::strerror(errno), errno);
-    err |= retVal;
-    retVal = close(mFd);
-    WARN_UNLESS(retVal == 0, "close err:%s(%d)", std::strerror(errno), errno);
-    err |= retVal;
+    if (fsync(mFd) != 0) {
+        ALOGW("(ignored)fsync err:%s(%d)", std::strerror(errno), errno);
+        // Don't bubble up fsync error, b/157291505.
+        // if (err == OK) { err = ERROR_IO; }
+    }
+    if (close(mFd) != 0) {
+        ALOGE("close err:%s(%d)", std::strerror(errno), errno);
+        if (err == OK) { err = ERROR_IO; }
+    }
     mFd = -1;
     if (mNextFd != -1) {
-        retVal = close(mNextFd);
+        if (close(mNextFd) != 0) {
+            ALOGE("close(mNextFd) error:%s(%d)", std::strerror(errno), errno);
+        }
+        if (err == OK) { err = ERROR_IO; }
         mNextFd = -1;
-        WARN_UNLESS(retVal == 0, "close mNextFd error:%s(%d)",
-                    std::strerror(errno), errno);
-        err |= retVal;
     }
     stopAndReleaseLooper();
     mInitCheck = NO_INIT;
@@ -1165,7 +1191,7 @@
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
         status_t trackErr = (*it)->stop(stopSource);
-        WARN_UNLESS(trackErr == 0, "%s track stopped with an error",
+        WARN_UNLESS(trackErr == OK, "%s track stopped with an error",
                     (*it)->getTrackType());
         if (err == OK && trackErr != OK) {
             err = trackErr;
@@ -1254,7 +1280,11 @@
 
     CHECK(mBoxes.empty());
 
-    err = release();
+    status_t errRelease = release();
+    // Prioritize the error that occurred before release().
+    if (err == OK) {
+        err = errRelease;
+    }
     return err;
 }
 
@@ -1577,9 +1607,8 @@
 
     // Can't guarantee that file is usable or write would succeed anymore, hence signal to stop.
     sp<AMessage> msg = new AMessage(kWhatIOError, mReflector);
-    msg->setInt32("errno", errno);
-    status_t err = msg->post();
-    ALOGE("writeOrPostError post:%d", err);
+    msg->setInt32("err", ERROR_IO);
+    WARN_UNLESS(msg->post() == OK, "writeOrPostError:error posting ERROR_IO");
 }
 
 void MPEG4Writer::seekOrPostError(int fd, off64_t offset, int whence) {
@@ -1597,9 +1626,8 @@
 
     // Can't guarantee that file is usable or seek would succeed anymore, hence signal to stop.
     sp<AMessage> msg = new AMessage(kWhatIOError, mReflector);
-    msg->setInt32("errno", errno);
-    status_t err = msg->post();
-    ALOGE("seekOrPostError post:%d", err);
+    msg->setInt32("err", ERROR_IO);
+    WARN_UNLESS(msg->post() == OK, "seekOrPostError:error posting ERROR_IO");
 }
 
 void MPEG4Writer::beginBox(uint32_t id) {
@@ -1838,7 +1866,7 @@
     if (res == -1) {
         ALOGE("fallocate err:%s, %d, fd:%d", strerror(errno), errno, mFd);
         sp<AMessage> msg = new AMessage(kWhatFallocateError, mReflector);
-        msg->setInt32("errno", errno);
+        msg->setInt32("err", ERROR_IO);
         status_t err = msg->post();
         mFallocateErr = true;
         ALOGD("preAllocation post:%d", err);
@@ -1850,6 +1878,9 @@
 }
 
 bool MPEG4Writer::truncatePreAllocation() {
+    if (!mPreAllocationEnabled)
+        return true;
+
     bool status = true;
     off64_t endOffset = std::max(mMdatEndOffset, mOffset);
     /* if mPreAllocateFileEndOffset >= endOffset, then preallocation logic works good. (diff >= 0).
@@ -1861,6 +1892,10 @@
     if(ftruncate(mFd, endOffset) == -1) {
         ALOGE("ftruncate err:%s, %d, fd:%d", strerror(errno), errno, mFd);
         status = false;
+        /* No need to post and handle(stop & notify client) error like it's done in preAllocate(),
+         * because ftruncate() is called during release() only and the error here would be
+         * reported from there as this function is returning false on any error in ftruncate().
+         */
     }
     return status;
 }
@@ -2153,14 +2188,17 @@
     mElstTableEntries->add(htonl((((uint32_t)mediaRate) << 16) | (uint32_t)mediaRateFraction));
 }
 
-void MPEG4Writer::setupAndStartLooper() {
+status_t MPEG4Writer::setupAndStartLooper() {
+    status_t err = OK;
     if (mLooper == nullptr) {
         mLooper = new ALooper;
         mLooper->setName("MP4WtrCtrlHlpLooper");
-        mLooper->start();
+        err = mLooper->start();
         mReflector = new AHandlerReflector<MPEG4Writer>(this);
         mLooper->registerHandler(mReflector);
     }
+    ALOGD("MP4WtrCtrlHlpLooper Started");
+    return err;
 }
 
 void MPEG4Writer::stopAndReleaseLooper() {
@@ -2399,23 +2437,35 @@
         case kWhatIOError: {
             ALOGE("kWhatIOError");
             int32_t err;
-            CHECK(msg->findInt32("errno", &err));
+            CHECK(msg->findInt32("err", &err));
             // Stop tracks' threads and main writer thread.
             stop();
             notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_RECORDER_ERROR_UNKNOWN, err);
             break;
         }
-        // fallocate() failed, hence notify app about it and stop().
+        // fallocate() failed, hence stop() and notify app.
         case kWhatFallocateError: {
             ALOGE("kWhatFallocateError");
             int32_t err;
-            CHECK(msg->findInt32("errno", &err));
+            CHECK(msg->findInt32("err", &err));
             // Stop tracks' threads and main writer thread.
             stop();
             //TODO: introduce a suitable MEDIA_RECORDER_ERROR_* instead MEDIA_RECORDER_ERROR_UNKNOWN?
             notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_RECORDER_ERROR_UNKNOWN, err);
             break;
         }
+        /* Response to kWhatNoIOErrorSoFar would be OK always as of now.
+         * Responding with other options could be added later if required.
+         */
+        case kWhatNoIOErrorSoFar: {
+            ALOGD("kWhatNoIOErrorSoFar");
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", OK);
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
+            break;
+        }
         default:
         TRESPASS();
     }
@@ -2778,11 +2828,16 @@
     mDone = true;
 
     void *dummy;
-    status_t err = pthread_join(mThread, &dummy);
-    WARN_UNLESS(err == 0, "track::stop: pthread_join status:%d", err);
-    status_t threadRetVal = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
-    WARN_UNLESS(threadRetVal == 0, "%s track stopped. Status :%d. %s source",
-                getTrackType(), err, stopSource ? "Stop" : "Not Stop");
+    status_t err = OK;
+    int retVal = pthread_join(mThread, &dummy);
+    if (retVal == 0) {
+        err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+        ALOGD("%s track stopped. Status:%d. %s source",
+            getTrackType(), err, stopSource ? "Stop" : "Not Stop");
+    } else {
+        ALOGE("track::stop: pthread_join retVal:%d", retVal);
+        err = UNKNOWN_ERROR;
+    }
     mStarted = false;
     return err;
 }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7f5e762..5d17f97 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1482,9 +1482,9 @@
     return PostAndAwaitResponse(msg, &response);
 }
 
-status_t MediaCodec::releaseAsync() {
+status_t MediaCodec::releaseAsync(const sp<AMessage> &notify) {
     sp<AMessage> msg = new AMessage(kWhatRelease, this);
-    msg->setInt32("async", 1);
+    msg->setMessage("async", notify);
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
@@ -2496,6 +2496,18 @@
                         }
                         break;
                     }
+                    if (!mLeftover.empty()) {
+                        ssize_t index = dequeuePortBuffer(kPortIndexInput);
+                        CHECK_GE(index, 0);
+
+                        status_t err = handleLeftover(index);
+                        if (err != OK) {
+                            setStickyError(err);
+                            postActivityNotificationIfPossible();
+                            cancelPendingDequeueOperations();
+                        }
+                        break;
+                    }
 
                     if (mFlags & kFlagIsAsync) {
                         if (!mHaveInputSurface) {
@@ -2683,6 +2695,11 @@
                     if (mReplyID != nullptr) {
                         (new AMessage)->postReply(mReplyID);
                     }
+                    if (mAsyncReleaseCompleteNotification != nullptr) {
+                        flushMediametrics();
+                        mAsyncReleaseCompleteNotification->post();
+                        mAsyncReleaseCompleteNotification.clear();
+                    }
                     break;
                 }
 
@@ -3069,8 +3086,8 @@
                 break;
             }
 
-            int32_t async = 0;
-            if (msg->findInt32("async", &async) && async) {
+            sp<AMessage> asyncNotify;
+            if (msg->findMessage("async", &asyncNotify) && asyncNotify != nullptr) {
                 if (mSurface != NULL) {
                     if (!mReleaseSurface) {
                         mReleaseSurface.reset(new ReleaseSurface);
@@ -3102,10 +3119,11 @@
                 pushBlankBuffersToNativeWindow(mSurface.get());
             }
 
-            if (async) {
+            if (asyncNotify != nullptr) {
                 mResourceManagerProxy->markClientForPendingRemoval();
                 (new AMessage)->postReply(mReplyID);
                 mReplyID = 0;
+                mAsyncReleaseCompleteNotification = asyncNotify;
             }
 
             break;
@@ -3185,7 +3203,15 @@
                 break;
             }
 
-            status_t err = onQueueInputBuffer(msg);
+            status_t err = UNKNOWN_ERROR;
+            if (!mLeftover.empty()) {
+                mLeftover.push_back(msg);
+                size_t index;
+                msg->findSize("index", &index);
+                err = handleLeftover(index);
+            } else {
+                err = onQueueInputBuffer(msg);
+            }
 
             PostReplyWithError(replyID, err);
             break;
@@ -3472,8 +3498,8 @@
     sp<hardware::HidlMemory> memory;
     size_t offset = 0;
 
-    if ((mFlags & kFlagUseBlockModel) && mOwnerName.startsWith("codec2::")) {
-        if (mCrypto) {
+    if (mFlags & kFlagUseBlockModel) {
+        if (hasCryptoOrDescrambler()) {
             constexpr size_t kInitialDealerCapacity = 1048576;  // 1MB
             thread_local sp<MemoryDealer> sDealer = new MemoryDealer(
                     kInitialDealerCapacity, "CSD(1MB)");
@@ -3598,6 +3624,9 @@
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
     Mutex::Autolock al(mBufferLock);
 
+    if (portIndex == kPortIndexInput) {
+        mLeftover.clear();
+    }
     for (size_t i = 0; i < mPortBuffers[portIndex].size(); ++i) {
         BufferInfo *info = &mPortBuffers[portIndex][i];
 
@@ -3728,7 +3757,26 @@
             err = mBufferChannel->attachEncryptedBuffer(
                     memory, (mFlags & kFlagIsSecure), key, iv, mode, pattern,
                     offset, subSamples, numSubSamples, buffer);
+        } else {
+            err = UNKNOWN_ERROR;
         }
+
+        if (err == OK && !buffer->asC2Buffer()
+                && c2Buffer && c2Buffer->data().type() == C2BufferData::LINEAR) {
+            C2ConstLinearBlock block{c2Buffer->data().linearBlocks().front()};
+            if (block.size() > buffer->size()) {
+                C2ConstLinearBlock leftover = block.subBlock(
+                        block.offset() + buffer->size(), block.size() - buffer->size());
+                sp<WrapperObject<std::shared_ptr<C2Buffer>>> obj{
+                    new WrapperObject<std::shared_ptr<C2Buffer>>{
+                        C2Buffer::CreateLinearBuffer(leftover)}};
+                msg->setObject("c2buffer", obj);
+                mLeftover.push_front(msg);
+                // Not sending EOS if we have leftovers
+                flags &= ~BUFFER_FLAG_EOS;
+            }
+        }
+
         offset = buffer->offset();
         size = buffer->size();
         if (err != OK) {
@@ -3793,6 +3841,16 @@
     return err;
 }
 
+status_t MediaCodec::handleLeftover(size_t index) {
+    if (mLeftover.empty()) {
+        return OK;
+    }
+    sp<AMessage> msg = mLeftover.front();
+    mLeftover.pop_front();
+    msg->setSize("index", index);
+    return onQueueInputBuffer(msg);
+}
+
 //static
 size_t MediaCodec::CreateFramesRenderedMessage(
         const std::list<FrameRenderTracker::Info> &done, sp<AMessage> &msg) {
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index e048f07..a1fe57c 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -87,7 +87,8 @@
     enum {
         kWhatSwitch                  = 'swch',
         kWhatIOError                 = 'ioer',
-        kWhatFallocateError          = 'faer'
+        kWhatFallocateError          = 'faer',
+        kWhatNoIOErrorSoFar          = 'noie'
     };
 
     int  mFd;
@@ -231,7 +232,7 @@
     status_t stopWriterThread();
     static void *ThreadWrapper(void *me);
     void threadFunc();
-    void setupAndStartLooper();
+    status_t setupAndStartLooper();
     void stopAndReleaseLooper();
 
     // Buffer a single chunk to be written out later.
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 7f308c0..f7e6c27 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -139,7 +139,7 @@
     // object.
     status_t release();
 
-    status_t releaseAsync();
+    status_t releaseAsync(const sp<AMessage> &notify);
 
     status_t flush();
 
@@ -383,6 +383,7 @@
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
     sp<AMessage> mOnFrameRenderedNotification;
+    sp<AMessage> mAsyncReleaseCompleteNotification;
 
     sp<ResourceManagerServiceProxy> mResourceManagerProxy;
 
@@ -515,6 +516,9 @@
     class ReleaseSurface;
     std::unique_ptr<ReleaseSurface> mReleaseSurface;
 
+    std::list<sp<AMessage>> mLeftover;
+    status_t handleLeftover(size_t index);
+
     sp<BatteryChecker> mBatteryChecker;
 
     void statsBufferSent(int64_t presentationUs);
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index d13cb8f..7e06096 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -164,6 +164,10 @@
         stream = outStream;
         mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
         status = stream->getFrameSize(&mHalFrameSize);
+        LOG_ALWAYS_FATAL_IF(status != OK, "Error retrieving frame size from HAL: %d", status);
+        LOG_ALWAYS_FATAL_IF(mHalFrameSize <= 0, "Error frame size was %zu but must be greater than"
+                " zero", mHalFrameSize);
+
     }
 
     return status;
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index dd84bf2..d6d6e25 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -154,7 +154,7 @@
         mReadBufferState = -1;
         dumpState->mFrameCount = frameCount;
     }
-
+    dumpState->mSilenced = current->mSilenceCapture;
 }
 
 void FastCapture::onWork()
@@ -208,6 +208,9 @@
             mReadBufferState = frameCount;
         }
         if (mReadBufferState > 0) {
+            if (current->mSilenceCapture) {
+                memset(mReadBuffer, 0, mReadBufferState * Format_frameSize(mFormat));
+            }
             ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState);
             audio_track_cblk_t* cblk = current->mCblk;
             if (fastPatchRecordBufferProvider != 0) {
diff --git a/services/audioflinger/FastCaptureDumpState.cpp b/services/audioflinger/FastCaptureDumpState.cpp
index 53eeba5..b8b3866 100644
--- a/services/audioflinger/FastCaptureDumpState.cpp
+++ b/services/audioflinger/FastCaptureDumpState.cpp
@@ -44,10 +44,11 @@
     double periodSec = (double) mFrameCount / mSampleRate;
     dprintf(fd, "  FastCapture command=%s readSequence=%u framesRead=%u\n"
                 "              readErrors=%u sampleRate=%u frameCount=%zu\n"
-                "              measuredWarmup=%.3g ms, warmupCycles=%u period=%.2f ms\n",
+                "              measuredWarmup=%.3g ms, warmupCycles=%u period=%.2f ms\n"
+                "              silenced: %s\n",
                 FastCaptureState::commandToString(mCommand), mReadSequence, mFramesRead,
                 mReadErrors, mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
-                periodSec * 1e3);
+                periodSec * 1e3, mSilenced ? "true" : "false");
 }
 
 }   // android
diff --git a/services/audioflinger/FastCaptureDumpState.h b/services/audioflinger/FastCaptureDumpState.h
index 6f9c4c3..a1b8706 100644
--- a/services/audioflinger/FastCaptureDumpState.h
+++ b/services/audioflinger/FastCaptureDumpState.h
@@ -35,6 +35,7 @@
     uint32_t mReadErrors;       // total number of read() errors
     uint32_t mSampleRate;
     size_t   mFrameCount;
+    bool     mSilenced = false; // capture is silenced
 };
 
 }   // android
diff --git a/services/audioflinger/FastCaptureState.h b/services/audioflinger/FastCaptureState.h
index d287232..f949275 100644
--- a/services/audioflinger/FastCaptureState.h
+++ b/services/audioflinger/FastCaptureState.h
@@ -41,6 +41,8 @@
     audio_format_t  mFastPatchRecordFormat = AUDIO_FORMAT_INVALID;
     AudioBufferProvider* mFastPatchRecordBufferProvider = nullptr;   // a reference to a patch
                                                                      // record in fast mode
+    bool            mSilenceCapture = false;    // request to silence capture for fast track.
+                                                // note: this also silences the normal mixer pipe
 
     // Extends FastThreadState::Command
     static const Command
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 4a4899f..5930117 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6062,10 +6062,6 @@
     bool trackPaused = false;
     bool trackStopped = false;
 
-    if ((mType == DIRECT) && audio_is_linear_pcm(mFormat) && !usesHwAvSync()) {
-        return !mStandby;
-    }
-
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
@@ -7089,6 +7085,8 @@
         // reference to a fast track which is about to be removed
         sp<RecordTrack> fastTrackToRemove;
 
+        bool silenceFastCapture = false;
+
         { // scope for mLock
             Mutex::Autolock _l(mLock);
 
@@ -7176,14 +7174,33 @@
                             __func__, activeTrackState, activeTrack->id(), size);
                 }
 
-                activeTracks.add(activeTrack);
-                i++;
-
                 if (activeTrack->isFastTrack()) {
                     ALOG_ASSERT(!mFastTrackAvail);
                     ALOG_ASSERT(fastTrack == 0);
+                    // if the active fast track is silenced either:
+                    // 1) silence the whole capture from fast capture buffer if this is
+                    //    the only active track
+                    // 2) invalidate this track: this will cause the client to reconnect and possibly
+                    //    be invalidated again until unsilenced
+                    if (activeTrack->isSilenced()) {
+                        if (size > 1) {
+                            activeTrack->invalidate();
+                            ALOG_ASSERT(fastTrackToRemove == 0);
+                            fastTrackToRemove = activeTrack;
+                            removeTrack_l(activeTrack);
+                            mActiveTracks.remove(activeTrack);
+                            size--;
+                            continue;
+                        } else {
+                            silenceFastCapture = true;
+                        }
+                    }
                     fastTrack = activeTrack;
                 }
+
+                activeTracks.add(activeTrack);
+                i++;
+
             }
 
             mActiveTracks.updatePowerState(this);
@@ -7257,6 +7274,10 @@
                         AUDIO_FORMAT_INVALID : fastTrack->format();
                 didModify = true;
             }
+            if (state->mSilenceCapture != silenceFastCapture) {
+                state->mSilenceCapture = silenceFastCapture;
+                didModify = true;
+            }
             sq->end(didModify);
             if (didModify) {
                 sq->push(block);
@@ -7341,8 +7362,10 @@
 
         // Update server timestamp with server stats
         // systemTime() is optional if the hardware supports timestamps.
-        mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
-        mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
+        if (framesRead >= 0) {
+            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
+            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
+        }
 
         // Update server timestamp with kernel stats
         if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
@@ -8409,13 +8432,14 @@
     }
     result = mInput->stream->getFrameSize(&mFrameSize);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+    LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
+            mFrameSize);
     result = mInput->stream->getBufferSize(&mBufferSize);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
     mFrameCount = mBufferSize / mFrameSize;
-    ALOGV("%p RecordThread params: mChannelCount=%u, mFormat=%#x, mFrameSize=%lld, "
-            "mBufferSize=%lld, mFrameCount=%lld",
-            this, mChannelCount, mFormat, (long long)mFrameSize, (long long)mBufferSize,
-            (long long)mFrameCount);
+    ALOGV("%p RecordThread params: mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
+            "mBufferSize=%zu, mFrameCount=%zu",
+            this, mChannelCount, mFormat, mFrameSize, mBufferSize, mFrameCount);
     // This is the formula for calculating the temporary buffer size.
     // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
     // 1 full output buffer, regardless of the alignment of the available input.
@@ -8991,6 +9015,8 @@
     LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
     result = mHalStream->getFrameSize(&mFrameSize);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+    LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
+            mFrameSize);
     result = mHalStream->getBufferSize(&mBufferSize);
     LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
     mFrameCount = mBufferSize / mFrameSize;
diff --git a/services/audioflinger/TrackMetrics.h b/services/audioflinger/TrackMetrics.h
index 12bd341..af16448 100644
--- a/services/audioflinger/TrackMetrics.h
+++ b/services/audioflinger/TrackMetrics.h
@@ -68,6 +68,7 @@
     }
 
     void logConstructor(pid_t creatorPid, uid_t creatorUid,
+            const std::string& traits = {},
             audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT) const {
         // Once this item is logged by the server, the client can add properties.
         // no lock required, all local or const variables.
@@ -76,7 +77,8 @@
             .setUid(creatorUid)
             .set(AMEDIAMETRICS_PROP_ALLOWUID, (int32_t)creatorUid)
             .set(AMEDIAMETRICS_PROP_EVENT,
-                    AMEDIAMETRICS_PROP_PREFIX_SERVER AMEDIAMETRICS_PROP_EVENT_VALUE_CTOR);
+                    AMEDIAMETRICS_PROP_PREFIX_SERVER AMEDIAMETRICS_PROP_EVENT_VALUE_CTOR)
+            .set(AMEDIAMETRICS_PROP_TRAITS, traits);
         // log streamType from the service, since client doesn't know chosen streamType.
         if (streamType != AUDIO_STREAM_DEFAULT) {
             item.set(AMEDIAMETRICS_PROP_STREAMTYPE, toString(streamType).c_str());
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9386a42..be2f687 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -602,7 +602,8 @@
     }
 
     // Once this item is logged by the server, the client can add properties.
-    mTrackMetrics.logConstructor(creatorPid, uid, streamType);
+    const char * const traits = sharedBuffer == 0 ? "" : "static";
+    mTrackMetrics.logConstructor(creatorPid, uid, traits, streamType);
 }
 
 AudioFlinger::PlaybackThread::Track::~Track()
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 0c5d1d0..923310c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -35,6 +35,7 @@
 
 namespace android {
 
+class AudioPolicyMix;
 class DeviceDescriptor;
 class HwAudioOutputDescriptor;
 class SwAudioOutputDescriptor;
@@ -90,11 +91,12 @@
                           product_strategy_t strategy, VolumeSource volumeSource,
                           audio_output_flags_t flags,
                           bool isPreferredDeviceForExclusiveUse,
-                          std::vector<wp<SwAudioOutputDescriptor>> secondaryOutputs) :
+                          std::vector<wp<SwAudioOutputDescriptor>> secondaryOutputs,
+                          wp<AudioPolicyMix> primaryMix) :
         ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId,
                          isPreferredDeviceForExclusiveUse),
         mStream(stream), mStrategy(strategy), mVolumeSource(volumeSource), mFlags(flags),
-        mSecondaryOutputs(std::move(secondaryOutputs)) {}
+        mSecondaryOutputs(std::move(secondaryOutputs)), mPrimaryMix(primaryMix) {}
     ~TrackClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
@@ -108,6 +110,9 @@
         return mSecondaryOutputs;
     };
     VolumeSource volumeSource() const { return mVolumeSource; }
+    const sp<AudioPolicyMix> getPrimaryMix() const {
+        return mPrimaryMix.promote();
+    };
 
     void setActive(bool active) override
     {
@@ -136,7 +141,7 @@
     const VolumeSource mVolumeSource;
     const audio_output_flags_t mFlags;
     const std::vector<wp<SwAudioOutputDescriptor>> mSecondaryOutputs;
-
+    const wp<AudioPolicyMix> mPrimaryMix;
     /**
      * required for duplicating thread, prevent from removing active client from an output
      * involved in a duplication.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index fc1a59f..b6de4be 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -28,7 +28,7 @@
 
 void AudioPolicyMix::dump(String8 *dst, int spaces, int index) const
 {
-    dst->appendFormat("%*sAudio Policy Mix %d:\n", spaces, "", index + 1);
+    dst->appendFormat("%*sAudio Policy Mix %d (%p):\n", spaces, "", index + 1, this);
     std::string mixTypeLiteral;
     if (!MixTypeConverter::toString(mMixType, mixTypeLiteral)) {
         ALOGE("%s: failed to convert mix type %d", __FUNCTION__, mMixType);
@@ -44,6 +44,9 @@
 
     dst->appendFormat("%*s- device address: %s\n", spaces, "", mDeviceAddress.string());
 
+    dst->appendFormat("%*s- output: %d\n", spaces, "",
+            mOutput == nullptr ? 0 : mOutput->mIoHandle);
+
     int indexCriterion = 0;
     for (const auto &criterion : mCriteria) {
         dst->appendFormat("%*s- Criterion %d: ", spaces + 2, "", indexCriterion++);
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 95822b9..afc4d01 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -23,6 +23,7 @@
 #include <TypeConverter.h>
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
+#include "AudioPolicyMix.h"
 #include "ClientDescriptor.h"
 #include "DeviceDescriptor.h"
 #include "HwModule.h"
@@ -55,6 +56,12 @@
     ClientDescriptor::dump(dst, spaces, index);
     dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
     dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
+    dst->appendFormat("%*s- DAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
+    dst->appendFormat("%*s- DAP Secondary Outputs:\n", spaces, "");
+    for (auto desc : mSecondaryOutputs) {
+        dst->appendFormat("%*s  - %d\n", spaces, "",
+                desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+    }
 }
 
 std::string TrackClientDescriptor::toShortString() const
@@ -88,7 +95,7 @@
     TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
         {config.sample_rate, config.channel_mask, config.format}, AUDIO_PORT_HANDLE_NONE,
         stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
-        {} /* Sources do not support secondary outputs*/), mSrcDevice(srcDevice)
+        {} /* Sources do not support secondary outputs*/, nullptr), mSrcDevice(srcDevice)
 {
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 2a9a4c4..c5c13e9 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1102,14 +1102,15 @@
     };
     *portId = PolicyAudioPort::getNextUniqueId();
 
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
     sp<TrackClientDescriptor> clientDesc =
         new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
                                   sanitizedRequestedPortId, *stream,
                                   mEngine->getProductStrategyForAttributes(resultAttr),
                                   toVolumeSource(resultAttr),
                                   *flags, isRequestedDeviceForExclusiveUse,
-                                  std::move(weakSecondaryOutputDescs));
-    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
+                                  std::move(weakSecondaryOutputDescs),
+                                  outputDesc->mPolicyMix);
     outputDesc->addClient(clientDesc);
 
     ALOGV("%s() returns output %d requestedPortId %d selectedDeviceId %d for port ID %d", __func__,
@@ -2877,7 +2878,7 @@
 {
     ALOGV("registerPolicyMixes() %zu mix(es)", mixes.size());
     status_t res = NO_ERROR;
-
+    bool checkOutputs = false;
     sp<HwModule> rSubmixModule;
     // examine each mix's route type
     for (size_t i = 0; i < mixes.size(); i++) {
@@ -2996,11 +2997,16 @@
                         i, type, address.string());
                 res = INVALID_OPERATION;
                 break;
+            } else {
+                checkOutputs = true;
             }
         }
     }
     if (res != NO_ERROR) {
         unregisterPolicyMixes(mixes);
+    } else if (checkOutputs) {
+        checkForDeviceAndOutputChanges();
+        updateCallAndOutputRouting();
     }
     return res;
 }
@@ -3009,6 +3015,7 @@
 {
     ALOGV("unregisterPolicyMixes() num mixes %zu", mixes.size());
     status_t res = NO_ERROR;
+    bool checkOutputs = false;
     sp<HwModule> rSubmixModule;
     // examine each mix's route type
     for (const auto& mix : mixes) {
@@ -3049,9 +3056,15 @@
             if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
                 res = INVALID_OPERATION;
                 continue;
+            } else {
+                checkOutputs = true;
             }
         }
     }
+    if (res == NO_ERROR && checkOutputs) {
+        checkForDeviceAndOutputChanges();
+        updateCallAndOutputRouting();
+    }
     return res;
 }
 
@@ -3820,7 +3833,11 @@
                     ALOGE("%s output not found for id %d", __func__, patch->sources[0].id);
                     return BAD_VALUE;
                 }
-                // Reset handle so that setOutputDevice will force new AF patch to reach the sink
+                if (patchDesc->getHandle() != outputDesc->getPatchHandle()) {
+                    // force SwOutput patch removal as AF counter part patch has already gone.
+                    ALOGV("%s reset patch handle on Output as different from SWBridge", __func__);
+                    removeAudioPatch(outputDesc->getPatchHandle());
+                }
                 outputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
                 setOutputDevices(outputDesc,
                                  getNewOutputDevices(outputDesc, true /*fromCache*/),
@@ -5222,32 +5239,38 @@
     SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevices(oldDevices, mPreviousOutputs);
     SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevices(newDevices, mOutputs);
 
-    // also take into account external policy-related changes: add all outputs which are
-    // associated with policies in the "before" and "after" output vectors
-    ALOGVV("%s(): policy related outputs", __func__);
-    bool hasDynamicPolicy = false;
-    for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
-        const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
-        if (desc != 0 && desc->mPolicyMix != NULL) {
-            srcOutputs.add(desc->mIoHandle);
-            hasDynamicPolicy = true;
-            ALOGVV(" previous outputs: adding %d", desc->mIoHandle);
+    uint32_t maxLatency = 0;
+    bool invalidate = false;
+    // take into account dynamic audio policies related changes: if a client is now associated
+    // to a different policy mix than at creation time, invalidate corresponding stream
+    for (size_t i = 0; i < mPreviousOutputs.size() && !invalidate; i++) {
+        const sp<SwAudioOutputDescriptor>& desc = mPreviousOutputs.valueAt(i);
+        if (desc->isDuplicated()) {
+            continue;
         }
-    }
-    for (size_t i = 0 ; i < mOutputs.size() ; i++) {
-        const sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        if (desc != 0 && desc->mPolicyMix != NULL) {
-            dstOutputs.add(desc->mIoHandle);
-            hasDynamicPolicy = true;
-            ALOGVV(" new outputs: adding %d", desc->mIoHandle);
+        for (const sp<TrackClientDescriptor>& client : desc->getClientIterable()) {
+            if (mEngine->getProductStrategyForAttributes(client->attributes()) != psId) {
+                continue;
+            }
+            sp<AudioPolicyMix> primaryMix;
+            status_t status = mPolicyMixes.getOutputForAttr(client->attributes(), client->uid(),
+                    client->flags(), primaryMix, nullptr);
+            if (status != OK) {
+                continue;
+            }
+            if (client->getPrimaryMix() != primaryMix) {
+                invalidate = true;
+                if (desc->isStrategyActive(psId)) {
+                    maxLatency = desc->latency();
+                }
+                break;
+            }
         }
     }
 
-    if (srcOutputs != dstOutputs) {
+    if (srcOutputs != dstOutputs || invalidate) {
         // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
         // audio from invalidated tracks will be rendered when unmuting
-        uint32_t maxLatency = 0;
-        bool invalidate = hasDynamicPolicy;
         for (audio_io_handle_t srcOut : srcOutputs) {
             sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
             if (desc == nullptr) continue;
@@ -5747,15 +5770,6 @@
     DeviceVector filteredDevices = outputDesc->filterSupportedDevices(devices);
     DeviceVector prevDevices = outputDesc->devices();
 
-    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
-    // output profile or if new device is not supported AND previous device(s) is(are) still
-    // available (otherwise reset device must be done on the output)
-    if (!devices.isEmpty() && filteredDevices.isEmpty() &&
-            !mAvailableOutputDevices.filter(prevDevices).empty()) {
-        ALOGV("%s: unsupported device %s for output", __func__, devices.toString().c_str());
-        return 0;
-    }
-
     ALOGV("setOutputDevices() prevDevice %s", prevDevices.toString().c_str());
 
     if (!filteredDevices.isEmpty()) {
@@ -5770,6 +5784,17 @@
         muteWaitMs = 0;
     }
 
+    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
+    // output profile or if new device is not supported AND previous device(s) is(are) still
+    // available (otherwise reset device must be done on the output)
+    if (!devices.isEmpty() && filteredDevices.isEmpty() &&
+            !mAvailableOutputDevices.filter(prevDevices).empty()) {
+        ALOGV("%s: unsupported device %s for output", __func__, devices.toString().c_str());
+        // restore previous device after evaluating strategy mute state
+        outputDesc->setDevices(prevDevices);
+        return muteWaitMs;
+    }
+
     // Do not change the routing if:
     //      the requested device is AUDIO_DEVICE_NONE
     //      OR the requested device is the same as current device
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 9577160..34d07b6 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -593,7 +593,7 @@
     }
 
     // including successes gets very verbose
-    // but once we cut over to westworld, log them all.
+    // but once we cut over to statsd, log them all.
     if (status != NO_ERROR) {
 
         static constexpr char kAudioPolicy[] = "audiopolicy";
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index bac9430..af1e01d 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -496,9 +496,6 @@
         clientToDisconnect->notifyError(
                 hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
                 CaptureResultExtras{});
-        // Ensure not in binder RPC so client disconnect PID checks work correctly
-        LOG_ALWAYS_FATAL_IF(CameraThreadState::getCallingPid() != getpid(),
-                "onDeviceStatusChanged must be called from the camera service process!");
         clientToDisconnect->disconnect();
     }
 }
@@ -1377,7 +1374,12 @@
             Mutex::Autolock l(mLogLock);
             mEventLog.add(msg);
 
-            return -EBUSY;
+            auto current = mActiveClientManager.get(cameraId);
+            if (current != nullptr) {
+                return -EBUSY; // CAMERA_IN_USE
+            } else {
+                return -EUSERS; // MAX_CAMERAS_IN_USE
+            }
         }
 
         for (auto& i : evicted) {
@@ -1638,7 +1640,7 @@
                     cameraId.string(), clientName8.string(), clientPid);
         }
 
-        // Enforce client permissions and do basic sanity checks
+        // Enforce client permissions and do basic validity checks
         if(!(ret = validateConnectLocked(cameraId, clientName8,
                 /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
             return ret;
@@ -1669,6 +1671,10 @@
                     return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
                             "Higher-priority client using camera, ID \"%s\" currently unavailable",
                             cameraId.string());
+                case -EUSERS:
+                    return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                            "Too many cameras already open, cannot open camera \"%s\"",
+                            cameraId.string());
                 default:
                     return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
                             "Unexpected error %s (%d) opening camera \"%s\"",
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 20333d1..dbc863b 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -230,7 +230,7 @@
     previewFpsRange[1] = fastInfo.bestStillCaptureFpsRange[1];
 
     // PREVIEW_FRAME_RATE / SUPPORTED_PREVIEW_FRAME_RATES are deprecated, but
-    // still have to do something sane for them
+    // still have to do something reasonable for them
 
     // NOTE: Not scaled like FPS range values are.
     int previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 964c96a..9d3874f 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -205,7 +205,7 @@
     virtual void notifyRepeatingRequestError(long lastFrameNumber);
 
     // utility function to convert AIDL SessionConfiguration to HIDL
-    // streamConfiguration. Also checks for sanity of SessionConfiguration and
+    // streamConfiguration. Also checks for validity of SessionConfiguration and
     // returns a non-ok binder::Status if the passed in session configuration
     // isn't valid.
     static binder::Status
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 1a0881f..a63f402 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -620,7 +620,8 @@
         if (mPendingInputFrames.find(mAppSegmentFrameNumbers.front()) == mPendingInputFrames.end()) {
             ALOGE("%s: mPendingInputFrames doesn't contain frameNumber %" PRId64, __FUNCTION__,
                     mAppSegmentFrameNumbers.front());
-            mInputYuvBuffers.erase(it);
+            mInputAppSegmentBuffers.erase(it);
+            mAppSegmentFrameNumbers.pop();
             continue;
         }
 
@@ -664,6 +665,7 @@
             ALOGE("%s: mPendingInputFrames doesn't contain frameNumber %" PRId64, __FUNCTION__,
                     mMainImageFrameNumbers.front());
             mInputYuvBuffers.erase(it);
+            mMainImageFrameNumbers.pop();
             continue;
         }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index b00a2d9..a898df9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1043,8 +1043,9 @@
     }
     CaptureOutputStates states {
         mId,
-        mInFlightLock, mInFlightMap,
-        mOutputLock,  mResultQueue, mResultSignal,
+        mInFlightLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
         mNextShutterFrameNumber,
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
@@ -1100,8 +1101,9 @@
 
     CaptureOutputStates states {
         mId,
-        mInFlightLock, mInFlightMap,
-        mOutputLock,  mResultQueue, mResultSignal,
+        mInFlightLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
         mNextShutterFrameNumber,
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
@@ -1139,8 +1141,9 @@
 
     CaptureOutputStates states {
         mId,
-        mInFlightLock, mInFlightMap,
-        mOutputLock,  mResultQueue, mResultSignal,
+        mInFlightLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mInFlightMap, mOutputLock,  mResultQueue, mResultSignal,
         mNextShutterFrameNumber,
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
@@ -1777,13 +1780,6 @@
     mStatusChanged.broadcast();
 }
 
-void Camera3Device::pauseStateNotify(bool enable) {
-    Mutex::Autolock il(mInterfaceLock);
-    Mutex::Autolock l(mLock);
-
-    mPauseStateNotify = enable;
-}
-
 // Pause to reconfigure
 status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
     if (mRequestThread.get() != nullptr) {
@@ -2359,7 +2355,7 @@
     return false;
 }
 
-bool Camera3Device::reconfigureCamera(const CameraMetadata& sessionParams) {
+bool Camera3Device::reconfigureCamera(const CameraMetadata& sessionParams, int clientStatusId) {
     ATRACE_CALL();
     bool ret = false;
 
@@ -2373,7 +2369,16 @@
         return true;
     }
 
-    auto rc = internalPauseAndWaitLocked(maxExpectedDuration);
+    status_t rc = NO_ERROR;
+    bool markClientActive = false;
+    if (mStatus == STATUS_ACTIVE) {
+        markClientActive = true;
+        mPauseStateNotify = true;
+        mStatusTracker->markComponentIdle(clientStatusId, Fence::NO_FENCE);
+
+        rc = internalPauseAndWaitLocked(maxExpectedDuration);
+    }
+
     if (rc == NO_ERROR) {
         mNeedConfig = true;
         rc = configureStreamsLocked(mOperatingMode, sessionParams, /*notifyRequestThread*/ false);
@@ -2401,6 +2406,10 @@
         ALOGE("%s: Failed to pause streaming: %d", __FUNCTION__, rc);
     }
 
+    if (markClientActive) {
+        mStatusTracker->markComponentActive(clientStatusId);
+    }
+
     return ret;
 }
 
@@ -4277,22 +4286,11 @@
         }
 
         if (res == OK) {
-            sp<StatusTracker> statusTracker = mStatusTracker.promote();
-            if (statusTracker != 0) {
-                sp<Camera3Device> parent = mParent.promote();
-                if (parent != nullptr) {
-                    parent->pauseStateNotify(true);
-                }
-
-                statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
-
-                if (parent != nullptr) {
-                    mReconfigured |= parent->reconfigureCamera(mLatestSessionParams);
-                }
-
-                statusTracker->markComponentActive(mStatusId);
-                setPaused(false);
+            sp<Camera3Device> parent = mParent.promote();
+            if (parent != nullptr) {
+                mReconfigured |= parent->reconfigureCamera(mLatestSessionParams, mStatusId);
             }
+            setPaused(false);
 
             if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
                 mNextRequests[0].captureRequest->mInputStream->restoreConfiguredState();
@@ -5906,11 +5904,13 @@
     //       though technically no other thread should be talking to Camera3Device at this point
     Camera3OfflineStates offlineStates(
             mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
-            mUsePartialResult, mNumPartialResults, mNextResultFrameNumber,
-            mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-            mNextShutterFrameNumber, mNextReprocessShutterFrameNumber,
-            mNextZslStillShutterFrameNumber, mDeviceInfo, mPhysicalDeviceInfoMap,
-            mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers);
+            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
+            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
+            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
+            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
+            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
+            mZoomRatioMappers, mRotateAndCropMappers);
 
     *session = new Camera3OfflineSession(mId, inputStream, offlineStreamSet,
             std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index c059f55..408f1f9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -638,17 +638,10 @@
                                             const SurfaceMap &surfaceMap);
 
     /**
-     * Pause state updates to the client application.  Needed to mask out idle/active
-     * transitions during internal reconfigure
-     */
-    void pauseStateNotify(bool enable);
-
-    /**
      * Internally re-configure camera device using new session parameters.
-     * This will get triggered by the request thread. Be sure to call
-     * pauseStateNotify(true) before going idle in the requesting location.
+     * This will get triggered by the request thread.
      */
-    bool reconfigureCamera(const CameraMetadata& sessionParams);
+    bool reconfigureCamera(const CameraMetadata& sessionParams, int clientStatusId);
 
     /**
      * Return true in case of any output or input abandoned streams,
@@ -1017,6 +1010,9 @@
     std::mutex                    mInFlightLock;
     camera3::InFlightRequestMap   mInFlightMap;
     nsecs_t                       mExpectedInflightDuration = 0;
+    int64_t                       mLastCompletedRegularFrameNumber = -1;
+    int64_t                       mLastCompletedReprocessFrameNumber = -1;
+    int64_t                       mLastCompletedZslFrameNumber = -1;
     // End of mInFlightLock protection scope
 
     int mInFlightStatusId; // const after initialize
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
index 5942868..95f9633 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
@@ -61,6 +61,9 @@
         mNeedFixupMonochromeTags(offlineStates.mNeedFixupMonochromeTags),
         mUsePartialResult(offlineStates.mUsePartialResult),
         mNumPartialResults(offlineStates.mNumPartialResults),
+        mLastCompletedRegularFrameNumber(offlineStates.mLastCompletedRegularFrameNumber),
+        mLastCompletedReprocessFrameNumber(offlineStates.mLastCompletedReprocessFrameNumber),
+        mLastCompletedZslFrameNumber(offlineStates.mLastCompletedZslFrameNumber),
         mNextResultFrameNumber(offlineStates.mNextResultFrameNumber),
         mNextReprocessResultFrameNumber(offlineStates.mNextReprocessResultFrameNumber),
         mNextZslStillResultFrameNumber(offlineStates.mNextZslStillResultFrameNumber),
@@ -247,8 +250,9 @@
 
     CaptureOutputStates states {
         mId,
-        mOfflineReqsLock, mOfflineReqs,
-        mOutputLock, mResultQueue, mResultSignal,
+        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
         mNextShutterFrameNumber,
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
@@ -285,8 +289,9 @@
 
     CaptureOutputStates states {
         mId,
-        mOfflineReqsLock, mOfflineReqs,
-        mOutputLock, mResultQueue, mResultSignal,
+        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
         mNextShutterFrameNumber,
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
@@ -318,8 +323,9 @@
 
     CaptureOutputStates states {
         mId,
-        mOfflineReqsLock, mOfflineReqs,
-        mOutputLock, mResultQueue, mResultSignal,
+        mOfflineReqsLock, mLastCompletedRegularFrameNumber,
+        mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
+        mOfflineReqs, mOutputLock, mResultQueue, mResultSignal,
         mNextShutterFrameNumber,
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.h b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
index 208f70d..c4c7a85 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
@@ -57,10 +57,11 @@
             const TagMonitor& tagMonitor, const metadata_vendor_id_t vendorTagId,
             const bool useHalBufManager, const bool needFixupMonochromeTags,
             const bool usePartialResult, const uint32_t numPartialResults,
-            const uint32_t nextResultFN, const uint32_t nextReprocResultFN,
-            const uint32_t nextZslResultFN,  const uint32_t nextShutterFN,
-            const uint32_t nextReprocShutterFN, const uint32_t nextZslShutterFN,
-            const CameraMetadata& deviceInfo,
+            const int64_t lastCompletedRegularFN, const int64_t lastCompletedReprocessFN,
+            const int64_t lastCompletedZslFN, const uint32_t nextResultFN,
+            const uint32_t nextReprocResultFN, const uint32_t nextZslResultFN,
+            const uint32_t nextShutterFN, const uint32_t nextReprocShutterFN,
+            const uint32_t nextZslShutterFN, const CameraMetadata& deviceInfo,
             const std::unordered_map<std::string, CameraMetadata>& physicalDeviceInfoMap,
             const std::unordered_map<std::string, camera3::DistortionMapper>& distortionMappers,
             const std::unordered_map<std::string, camera3::ZoomRatioMapper>& zoomRatioMappers,
@@ -69,6 +70,9 @@
             mTagMonitor(tagMonitor), mVendorTagId(vendorTagId),
             mUseHalBufManager(useHalBufManager), mNeedFixupMonochromeTags(needFixupMonochromeTags),
             mUsePartialResult(usePartialResult), mNumPartialResults(numPartialResults),
+            mLastCompletedRegularFrameNumber(lastCompletedRegularFN),
+            mLastCompletedReprocessFrameNumber(lastCompletedReprocessFN),
+            mLastCompletedZslFrameNumber(lastCompletedZslFN),
             mNextResultFrameNumber(nextResultFN),
             mNextReprocessResultFrameNumber(nextReprocResultFN),
             mNextZslStillResultFrameNumber(nextZslResultFN),
@@ -90,6 +94,15 @@
     const bool mUsePartialResult;
     const uint32_t mNumPartialResults;
 
+    // The last completed (buffers, result metadata, and error notify) regular
+    // request frame number
+    const int64_t mLastCompletedRegularFrameNumber;
+    // The last completed (buffers, result metadata, and error notify) reprocess
+    // request frame number
+    const int64_t mLastCompletedReprocessFrameNumber;
+    // The last completed (buffers, result metadata, and error notify) zsl
+    // request frame number
+    const int64_t mLastCompletedZslFrameNumber;
     // the minimal frame number of the next non-reprocess result
     const uint32_t mNextResultFrameNumber;
     // the minimal frame number of the next reprocess result
@@ -214,6 +227,12 @@
     std::mutex mOutputLock;
     std::list<CaptureResult> mResultQueue;
     std::condition_variable mResultSignal;
+    // the last completed frame number of regular requests
+    int64_t mLastCompletedRegularFrameNumber;
+    // the last completed frame number of reprocess requests
+    int64_t mLastCompletedReprocessFrameNumber;
+    // the last completed frame number of ZSL still capture requests
+    int64_t mLastCompletedZslFrameNumber;
     // the minimal frame number of the next non-reprocess result
     uint32_t mNextResultFrameNumber;
     // the minimal frame number of the next reprocess result
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 603f516..eea5ef1 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -405,8 +405,8 @@
     // In the case of a successful request:
     //      all input and output buffers, all result metadata, shutter callback
     //      arrived.
-    // In the case of a unsuccessful request:
-    //      all input and output buffers arrived.
+    // In the case of an unsuccessful request:
+    //      all input and output buffers, as well as request/result error notifications, arrived.
     if (request.numBuffersLeft == 0 &&
             (request.skipResultMetadata ||
             (request.haveResultMetadata && shutterTimestamp != 0))) {
@@ -434,7 +434,17 @@
             states.useHalBufManager, states.listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(), 0, /*timestampIncreasing*/true,
-            request.outputSurfaces, request.resultExtras);
+            request.outputSurfaces, request.resultExtras,
+            request.errorBufStrategy);
+
+        // Note down the just completed frame number
+        if (request.hasInputBuffer) {
+            states.lastCompletedReprocessFrameNumber = frameNumber;
+        } else if (request.zslCapture) {
+            states.lastCompletedZslFrameNumber = frameNumber;
+        } else {
+            states.lastCompletedRegularFrameNumber = frameNumber;
+        }
 
         removeInFlightMapEntryLocked(states, idx);
         ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
@@ -487,10 +497,13 @@
         InFlightRequest &request = states.inflightMap.editValueAt(idx);
         ALOGVV("%s: got InFlightRequest requestId = %" PRId32
                 ", frameNumber = %" PRId64 ", burstId = %" PRId32
-                ", partialResultCount = %d, hasCallback = %d",
+                ", partialResultCount = %d/%d, hasCallback = %d, num_output_buffers %d"
+                ", usePartialResult = %d",
                 __FUNCTION__, request.resultExtras.requestId,
                 request.resultExtras.frameNumber, request.resultExtras.burstId,
-                result->partial_result, request.hasCallback);
+                result->partial_result, states.numPartialResults,
+                request.hasCallback, result->num_output_buffers,
+                states.usePartialResult);
         // Always update the partial count to the latest one if it's not 0
         // (buffers only). When framework aggregates adjacent partial results
         // into one, the latest partial count will be used.
@@ -555,6 +568,7 @@
                     request.collectedPartialResult);
             }
             request.haveResultMetadata = true;
+            request.errorBufStrategy = ERROR_BUF_RETURN_NOTIFY;
         }
 
         uint32_t numBuffersReturned = result->num_output_buffers;
@@ -581,18 +595,14 @@
             request.sensorTimestamp = entry.data.i64[0];
         }
 
-        // If shutter event isn't received yet, append the output buffers to
-        // the in-flight request. Otherwise, return the output buffers to
-        // streams.
-        if (shutterTimestamp == 0) {
-            request.pendingOutputBuffers.appendArray(result->output_buffers,
+        // If shutter event isn't received yet, do not return the pending output
+        // buffers.
+        request.pendingOutputBuffers.appendArray(result->output_buffers,
                 result->num_output_buffers);
-        } else {
-            bool timestampIncreasing = !(request.zslCapture || request.hasInputBuffer);
-            returnOutputBuffers(states.useHalBufManager, states.listener,
-                result->output_buffers, result->num_output_buffers,
-                shutterTimestamp, timestampIncreasing,
-                request.outputSurfaces, request.resultExtras);
+        if (shutterTimestamp != 0) {
+            returnAndRemovePendingOutputBuffers(
+                states.useHalBufManager, states.listener,
+                request);
         }
 
         if (result->result != NULL && !isPartialResult) {
@@ -791,10 +801,26 @@
         const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
         nsecs_t timestamp, bool timestampIncreasing,
         const SurfaceMap& outputSurfaces,
-        const CaptureResultExtras &inResultExtras) {
+        const CaptureResultExtras &inResultExtras,
+        ERROR_BUF_STRATEGY errorBufStrategy) {
 
     for (size_t i = 0; i < numBuffers; i++)
     {
+        Camera3StreamInterface *stream = Camera3Stream::cast(outputBuffers[i].stream);
+        int streamId = stream->getId();
+
+        // Call notify(ERROR_BUFFER) if necessary.
+        if (outputBuffers[i].status == CAMERA3_BUFFER_STATUS_ERROR &&
+                errorBufStrategy == ERROR_BUF_RETURN_NOTIFY) {
+            if (listener != nullptr) {
+                CaptureResultExtras extras = inResultExtras;
+                extras.errorStreamId = streamId;
+                listener->notifyError(
+                        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
+                        extras);
+            }
+        }
+
         if (outputBuffers[i].buffer == nullptr) {
             if (!useHalBufManager) {
                 // With HAL buffer management API, HAL sometimes will have to return buffers that
@@ -805,20 +831,23 @@
             continue;
         }
 
-        Camera3StreamInterface *stream = Camera3Stream::cast(outputBuffers[i].stream);
-        int streamId = stream->getId();
         const auto& it = outputSurfaces.find(streamId);
         status_t res = OK;
-        if (it != outputSurfaces.end()) {
-            res = stream->returnBuffer(
-                    outputBuffers[i], timestamp, timestampIncreasing, it->second,
-                    inResultExtras.frameNumber);
-        } else {
-            res = stream->returnBuffer(
-                    outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
-                    inResultExtras.frameNumber);
-        }
 
+        // Do not return the buffer if the buffer status is error, and the error
+        // buffer strategy is CACHE.
+        if (outputBuffers[i].status != CAMERA3_BUFFER_STATUS_ERROR ||
+                errorBufStrategy != ERROR_BUF_CACHE) {
+            if (it != outputSurfaces.end()) {
+                res = stream->returnBuffer(
+                        outputBuffers[i], timestamp, timestampIncreasing, it->second,
+                        inResultExtras.frameNumber);
+            } else {
+                res = stream->returnBuffer(
+                        outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
+                        inResultExtras.frameNumber);
+            }
+        }
         // Note: stream may be deallocated at this point, if this buffer was
         // the last reference to it.
         if (res == NO_INIT || res == DEAD_OBJECT) {
@@ -848,6 +877,28 @@
     }
 }
 
+void returnAndRemovePendingOutputBuffers(bool useHalBufManager,
+        sp<NotificationListener> listener, InFlightRequest& request) {
+    bool timestampIncreasing = !(request.zslCapture || request.hasInputBuffer);
+    returnOutputBuffers(useHalBufManager, listener,
+            request.pendingOutputBuffers.array(),
+            request.pendingOutputBuffers.size(),
+            request.shutterTimestamp, timestampIncreasing,
+            request.outputSurfaces, request.resultExtras,
+            request.errorBufStrategy);
+
+    // Remove error buffers that are not cached.
+    for (auto iter = request.pendingOutputBuffers.begin();
+            iter != request.pendingOutputBuffers.end(); ) {
+        if (request.errorBufStrategy != ERROR_BUF_CACHE ||
+                iter->status != CAMERA3_BUFFER_STATUS_ERROR) {
+            iter = request.pendingOutputBuffers.erase(iter);
+        } else {
+            iter++;
+        }
+    }
+}
+
 void notifyShutter(CaptureOutputStates& states, const camera3_shutter_msg_t &msg) {
     ATRACE_CALL();
     ssize_t idx;
@@ -899,6 +950,12 @@
                     msg.frame_number, r.resultExtras.requestId, msg.timestamp);
                 // Call listener, if any
                 if (states.listener != nullptr) {
+                    r.resultExtras.lastCompletedRegularFrameNumber =
+                            states.lastCompletedRegularFrameNumber;
+                    r.resultExtras.lastCompletedReprocessFrameNumber =
+                            states.lastCompletedReprocessFrameNumber;
+                    r.resultExtras.lastCompletedZslFrameNumber =
+                            states.lastCompletedZslFrameNumber;
                     states.listener->notifyShutter(r.resultExtras, msg.timestamp);
                 }
                 // send pending result and buffers
@@ -908,13 +965,8 @@
                     r.hasInputBuffer, r.zslCapture && r.stillCapture,
                     r.rotateAndCropAuto, r.cameraIdsWithZoom, r.physicalMetadatas);
             }
-            bool timestampIncreasing = !(r.zslCapture || r.hasInputBuffer);
-            returnOutputBuffers(
-                    states.useHalBufManager, states.listener,
-                    r.pendingOutputBuffers.array(),
-                    r.pendingOutputBuffers.size(), r.shutterTimestamp, timestampIncreasing,
-                    r.outputSurfaces, r.resultExtras);
-            r.pendingOutputBuffers.clear();
+            returnAndRemovePendingOutputBuffers(
+                    states.useHalBufManager, states.listener, r);
 
             removeInFlightRequestIfReadyLocked(states, idx);
         }
@@ -968,7 +1020,6 @@
             break;
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
-        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
             {
                 std::lock_guard<std::mutex> l(states.inflightLock);
                 ssize_t idx = states.inflightMap.indexOfKey(msg.frame_number);
@@ -976,7 +1027,7 @@
                     InFlightRequest &r = states.inflightMap.editValueAt(idx);
                     r.requestStatus = msg.error_code;
                     resultExtras = r.resultExtras;
-                    bool logicalDeviceResultError = false;
+                    bool physicalDeviceResultError = false;
                     if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
                             errorCode) {
                         if (physicalCameraId.size() > 0) {
@@ -990,23 +1041,22 @@
                             }
                             r.physicalCameraIds.erase(iter);
                             resultExtras.errorPhysicalCameraId = physicalCameraId;
-                        } else {
-                            logicalDeviceResultError = true;
+                            physicalDeviceResultError = true;
                         }
                     }
 
-                    if (logicalDeviceResultError
-                            ||  hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST ==
-                            errorCode) {
+                    if (!physicalDeviceResultError) {
                         r.skipResultMetadata = true;
-                    }
-                    if (logicalDeviceResultError) {
-                        // In case of missing result check whether the buffers
-                        // returned. If they returned, then remove inflight
-                        // request.
-                        // TODO: should we call this for ERROR_CAMERA_REQUEST as well?
-                        //       otherwise we are depending on HAL to send the buffers back after
-                        //       calling notifyError. Not sure if that's in the spec.
+                        if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT
+                                == errorCode) {
+                            r.errorBufStrategy = ERROR_BUF_RETURN_NOTIFY;
+                        } else {
+                            // errorCode is ERROR_CAMERA_REQUEST
+                            r.errorBufStrategy = ERROR_BUF_RETURN;
+                        }
+
+                        // Check whether the buffers returned. If they returned,
+                        // remove inflight request.
                         removeInFlightRequestIfReadyLocked(states, idx);
                     }
                 } else {
@@ -1024,6 +1074,10 @@
                         states.cameraId.string(), __FUNCTION__);
             }
             break;
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+            // Do not depend on HAL ERROR_CAMERA_BUFFER to send buffer error
+            // callback to the app. Rather, use STATUS_ERROR of image buffers.
+            break;
         default:
             // SET_ERR calls notifyError
             SET_ERR("Unknown error message from HAL: %d", msg.error_code);
@@ -1338,8 +1392,14 @@
                 request.pendingOutputBuffers.array(),
                 request.pendingOutputBuffers.size(), 0,
                 /*timestampIncreasing*/true, request.outputSurfaces,
-                request.resultExtras);
+                request.resultExtras, request.errorBufStrategy);
+            ALOGW("%s: Frame %d |  Timestamp: %" PRId64 ", metadata"
+                    " arrived: %s, buffers left: %d.\n", __FUNCTION__,
+                    states.inflightMap.keyAt(idx), request.shutterTimestamp,
+                    request.haveResultMetadata ? "true" : "false",
+                    request.numBuffersLeft);
         }
+
         states.inflightMap.clear();
         states.inflightIntf.onInflightMapFlushedLocked();
     }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index fbb47f8..9946312 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -44,7 +44,9 @@
     /**
      * Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
      */
-    // helper function to return the output buffers to output streams.
+
+    // helper function to return the output buffers to output streams. The
+    // function also optionally calls notify(ERROR_BUFFER).
     void returnOutputBuffers(
             bool useHalBufManager,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
@@ -53,13 +55,25 @@
             // The following arguments are only meant for surface sharing use case
             const SurfaceMap& outputSurfaces = SurfaceMap{},
             // Used to send buffer error callback when failing to return buffer
-            const CaptureResultExtras &resultExtras = CaptureResultExtras{});
+            const CaptureResultExtras &resultExtras = CaptureResultExtras{},
+            ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN);
+
+    // helper function to return the output buffers to output streams, and
+    // remove the returned buffers from the inflight request's pending buffers
+    // vector.
+    void returnAndRemovePendingOutputBuffers(
+            bool useHalBufManager,
+            sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
+            InFlightRequest& request);
 
     // Camera3Device/Camera3OfflineSession internal states used in notify/processCaptureResult
     // callbacks
     struct CaptureOutputStates {
         const String8& cameraId;
         std::mutex& inflightLock;
+        int64_t& lastCompletedRegularFrameNumber;
+        int64_t& lastCompletedZslFrameNumber;
+        int64_t& lastCompletedReprocessFrameNumber;
         InFlightRequestMap& inflightMap; // end of inflightLock scope
         std::mutex& outputLock;
         std::list<CaptureResult>& resultQueue;
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 424043b..da4f228 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -32,7 +32,18 @@
 
 namespace camera3 {
 
+typedef enum {
+    // Cache the buffers with STATUS_ERROR within InFlightRequest
+    ERROR_BUF_CACHE,
+    // Return the buffers with STATUS_ERROR to the buffer queue
+    ERROR_BUF_RETURN,
+    // Return the buffers with STATUS_ERROR to the buffer queue, and call
+    // notify(ERROR_BUFFER) as well
+    ERROR_BUF_RETURN_NOTIFY
+} ERROR_BUF_STRATEGY;
+
 struct InFlightRequest {
+
     // Set by notify() SHUTTER call.
     nsecs_t shutterTimestamp;
     // Set by process_capture_result().
@@ -43,6 +54,9 @@
     // Decremented by calls to process_capture_result with valid output
     // and input buffers
     int     numBuffersLeft;
+
+    // The inflight request is considered complete if all buffers are returned
+
     CaptureResultExtras resultExtras;
     // If this request has any input buffer
     bool hasInputBuffer;
@@ -79,6 +93,10 @@
     // REQUEST/RESULT error.
     bool skipResultMetadata;
 
+    // Whether the buffers with STATUS_ERROR should be cached as pending buffers,
+    // returned to the buffer queue, or returned to the buffer queue and notify with ERROR_BUFFER.
+    ERROR_BUF_STRATEGY errorBufStrategy;
+
     // The physical camera ids being requested.
     std::set<String8> physicalCameraIds;
 
@@ -114,6 +132,7 @@
             hasCallback(true),
             maxExpectedDuration(kDefaultExpectedDuration),
             skipResultMetadata(false),
+            errorBufStrategy(ERROR_BUF_CACHE),
             stillCapture(false),
             zslCapture(false),
             rotateAndCropAuto(false) {
@@ -134,6 +153,7 @@
             hasCallback(hasAppCallback),
             maxExpectedDuration(maxDuration),
             skipResultMetadata(false),
+            errorBufStrategy(ERROR_BUF_CACHE),
             physicalCameraIds(physicalCameraIdSet),
             stillCapture(isStillCapture),
             zslCapture(isZslCapture),
diff --git a/services/camera/libcameraservice/tests/ClientManagerTest.cpp b/services/camera/libcameraservice/tests/ClientManagerTest.cpp
new file mode 100644
index 0000000..6a38427
--- /dev/null
+++ b/services/camera/libcameraservice/tests/ClientManagerTest.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "ClientManagerTest"
+
+#include "../utils/ClientManager.h"
+#include <gtest/gtest.h>
+
+using namespace android::resource_policy;
+
+struct TestClient {
+    TestClient(int id, int32_t cost, const std::set<int>& conflictingKeys, int32_t ownerId,
+            int32_t score, int32_t state, bool isVendorClient) :
+            mId(id), mCost(cost), mConflictingKeys(conflictingKeys),
+            mOwnerId(ownerId), mScore(score), mState(state), mIsVendorClient(isVendorClient) {};
+    int mId;
+    int32_t mCost;    // Int 0..100
+    std::set<int> mConflictingKeys;
+    int32_t mOwnerId; // PID
+    int32_t mScore;   // Priority
+    int32_t mState;   // Foreground/background etc
+    bool mIsVendorClient;
+};
+
+using TestClientDescriptor = ClientDescriptor<int, TestClient>;
+using TestDescriptorPtr = std::shared_ptr<TestClientDescriptor>;
+
+TestDescriptorPtr makeDescFromTestClient(const TestClient& tc) {
+    return std::make_shared<TestClientDescriptor>(/*ID*/tc.mId, tc, tc.mCost, tc.mConflictingKeys,
+            tc.mScore, tc.mOwnerId, tc.mState, tc.mIsVendorClient);
+}
+
+class TestClientManager : public ClientManager<int, TestClient> {
+public:
+    TestClientManager() {}
+    virtual ~TestClientManager() {}
+};
+
+
+// Test ClientMager behavior when there is only one single owner
+// The expected behavior is that if one owner (application or vendor) is trying
+// to open second camera, it may succeed or not, but the first opened camera
+// should never be evicted.
+TEST(ClientManagerTest, SingleOwnerMultipleCamera) {
+
+    TestClientManager cm;
+    TestClient cam0Client(/*ID*/0, /*cost*/100, /*conflicts*/{1},
+            /*ownerId*/ 1000, /*score*/50, /*state*/ 1, /*isVendorClient*/ false);
+    auto cam0Desc = makeDescFromTestClient(cam0Client);
+    auto evicted = cm.addAndEvict(cam0Desc);
+    ASSERT_EQ(evicted.size(), 0u) << "Evicted list must be empty";
+
+    TestClient cam1Client(/*ID*/1, /*cost*/100, /*conflicts*/{0},
+            /*ownerId*/ 1000, /*score*/50, /*state*/ 1, /*isVendorClient*/ false);
+    auto cam1Desc = makeDescFromTestClient(cam1Client);
+
+    // 1. Check with conflicting devices, new client would be evicted
+    auto wouldBeEvicted = cm.wouldEvict(cam1Desc);
+    ASSERT_EQ(wouldBeEvicted.size(), 1u) << "Evicted list length must be 1";
+    ASSERT_EQ(wouldBeEvicted[0]->getKey(), cam1Desc->getKey()) << "cam1 must be evicted";
+
+    cm.removeAll();
+
+    TestClient cam2Client(/*ID*/2, /*cost*/100, /*conflicts*/{},
+            /*ownerId*/ 1000, /*score*/50, /*state*/ 1, /*isVendorClient*/ false);
+    auto cam2Desc = makeDescFromTestClient(cam2Client);
+    evicted = cm.addAndEvict(cam2Desc);
+    ASSERT_EQ(evicted.size(), 0u) << "Evicted list must be empty";
+
+    TestClient cam3Client(/*ID*/3, /*cost*/100, /*conflicts*/{},
+            /*ownerId*/ 1000, /*score*/50, /*state*/ 1, /*isVendorClient*/ false);
+    auto cam3Desc = makeDescFromTestClient(cam3Client);
+
+    // 2. Check without conflicting devices, the pre-existing client won't be evicted
+    // In this case, the new client would be granted, but could later be rejected by HAL due to
+    // resource cost.
+    wouldBeEvicted = cm.wouldEvict(cam3Desc);
+    ASSERT_EQ(wouldBeEvicted.size(), 0u) << "Evicted list must be empty";
+
+    cm.removeAll();
+
+    evicted = cm.addAndEvict(cam0Desc);
+    ASSERT_EQ(evicted.size(), 0u) << "Evicted list must be empty";
+
+    TestClient cam0ClientNew(/*ID*/0, /*cost*/100, /*conflicts*/{1},
+            /*ownerId*/ 1000, /*score*/50, /*state*/ 1, /*isVendorClient*/ false);
+    auto cam0DescNew = makeDescFromTestClient(cam0ClientNew);
+    wouldBeEvicted = cm.wouldEvict(cam0DescNew);
+
+    // 3. Check opening the same camera twice will evict the older client
+    ASSERT_EQ(wouldBeEvicted.size(), 1u) << "Evicted list length must be 1";
+    ASSERT_EQ(wouldBeEvicted[0], cam0Desc) << "cam0 (old) must be evicted";
+}
+
diff --git a/services/camera/libcameraservice/utils/ClientManager.h b/services/camera/libcameraservice/utils/ClientManager.h
index 35d25bf..64be6c5 100644
--- a/services/camera/libcameraservice/utils/ClientManager.h
+++ b/services/camera/libcameraservice/utils/ClientManager.h
@@ -496,6 +496,20 @@
                 evictList.clear();
                 evictList.push_back(client);
                 return evictList;
+            } else if (conflicting && owner == curOwner) {
+                // Pre-existing conflicting client with the same client owner exists
+                // Open the same device twice -> most recent open wins
+                // Otherwise let the existing client wins to avoid behaviors difference
+                // due to how HAL advertising conflicting devices (which is hidden from
+                // application)
+                if (curKey == key) {
+                    evictList.push_back(i);
+                    totalCost -= curCost;
+                } else {
+                    evictList.clear();
+                    evictList.push_back(client);
+                    return evictList;
+                }
             } else if (conflicting || ((totalCost > mMaxCost && curCost > 0) &&
                     (curPriority >= priority) &&
                     !(highestPriorityOwner == owner && owner == curOwner))) {
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index fb519d9..cfb9f17 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -32,7 +32,7 @@
 class SessionConfigurationUtils {
 public:
     // utility function to convert AIDL SessionConfiguration to HIDL
-    // streamConfiguration. Also checks for sanity of SessionConfiguration and
+    // streamConfiguration. Also checks for validity of SessionConfiguration and
     // returns a non-ok binder::Status if the passed in session configuration
     // isn't valid.
     static binder::Status
diff --git a/services/mediametrics/AnalyticsState.h b/services/mediametrics/AnalyticsState.h
index b648947..09c0b4c 100644
--- a/services/mediametrics/AnalyticsState.h
+++ b/services/mediametrics/AnalyticsState.h
@@ -93,7 +93,7 @@
         int32_t ll = lines;
 
         if (ll > 0) {
-            ss << "TransactionLog:\n";
+            ss << "TransactionLog: gc(" << mTransactionLog.getGarbageCollectionCount() << ")\n";
             --ll;
         }
         if (ll > 0) {
@@ -102,7 +102,7 @@
             ll -= l;
         }
         if (ll > 0) {
-            ss << "TimeMachine:\n";
+            ss << "TimeMachine: gc(" << mTimeMachine.getGarbageCollectionCount() << ")\n";
             --ll;
         }
         if (ll > 0) {
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index f819f1b..55de260 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -102,6 +102,7 @@
         "libutils",
     ],
     header_libs: [
+        "libaudioutils_headers",
         "libmediametrics_headers",
     ],
 
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 800f099..29801a4 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -31,30 +31,133 @@
 
 #define PROP_AUDIO_ANALYTICS_CLOUD_ENABLED "persist.audio.analytics.cloud.enabled"
 
-// Enable for testing of delivery to statsd
-//#define STATSD
+namespace android::mediametrics {
 
-// Transmit to statsd in integer or strings
-//#define USE_INT
+// Enable for testing of delivery to statsd. Caution if this is enabled, all protos MUST exist.
+#define STATSD_ENABLE
 
-#ifdef USE_INT
-using short_enum_type_t = int32_t;
-using long_enum_type_t = int64_t;
-#define ENUM_EXTRACT(x) (x)
+#ifdef STATSD_ENABLE
+#define CONDITION(INT_VALUE) (INT_VALUE)  // allow value
 #else
-using short_enum_type_t = std::string;
-using long_enum_type_t = std::string;
-#define ENUM_EXTRACT(x) (x).c_str()
+#define CONDITION(INT_VALUE) (int(0))     // mask value since the proto may not be defined yet.
 #endif
 
-using android::base::DEBUG;
+// Maximum length of a device name.
+static constexpr size_t STATSD_DEVICE_NAME_MAX_LENGTH = 32;
 
-namespace android::mediametrics {
+// Transmit Enums to statsd in integer or strings  (this must match the atoms.proto)
+static constexpr bool STATSD_USE_INT_FOR_ENUM = false;
+
+// derive types based on integer or strings.
+using short_enum_type_t = std::conditional_t<STATSD_USE_INT_FOR_ENUM, int32_t, std::string>;
+using long_enum_type_t = std::conditional_t<STATSD_USE_INT_FOR_ENUM, int64_t, std::string>;
+
+// Convert std::string to char *
+template <typename T>
+auto ENUM_EXTRACT(const T& x) {
+    if constexpr (std::is_same_v<std::decay_t<T>, std::string>) {
+        return x.c_str();
+    } else {
+        return x;
+    }
+}
+
+static constexpr const auto LOG_LEVEL = android::base::VERBOSE;
+
+static constexpr int PREVIOUS_STATE_EXPIRE_SEC = 60 * 60; // 1 hour.
+
+/*
+ * For logging purposes, we list all of the MediaMetrics atom fields,
+ * which can then be associated with consecutive arguments to the statsd write.
+ */
+
+static constexpr const char * const AudioRecordDeviceUsageFields[] = {
+    "mediametrics_audiorecorddeviceusage_reported", // proto number
+    "devices",
+    "device_names",
+    "device_time_nanos",
+    "encoding",
+    "frame_count",
+    "interval_count",
+    "sample_rate",
+    "flags",
+    "package_name",
+    "selected_device_id",
+    "caller",
+    "source",
+};
+
+static constexpr const char * const AudioThreadDeviceUsageFields[] = {
+    "mediametrics_audiothreaddeviceusage_reported",
+    "devices",
+    "device_names",
+    "device_time_nanos",
+    "encoding",
+    "frame_count",
+    "interval_count",
+    "sample_rate",
+    "flags",
+    "xruns",
+    "type",
+};
+
+static constexpr const char * const AudioTrackDeviceUsageFields[] = {
+    "mediametrics_audiotrackdeviceusage_reported",
+    "devices",
+    "device_names",
+    "device_time_nanos",
+    "encoding",
+    "frame_count",
+    "interval_count",
+    "sample_rate",
+    "flags",
+    "xruns",
+    "package_name",
+    "device_latency_millis",
+    "device_startup_millis",
+    "device_volume",
+    "selected_device_id",
+    "stream_type",
+    "usage",
+    "content_type",
+    "caller",
+    "traits",
+};
+
+static constexpr const char * const AudioDeviceConnectionFields[] = {
+    "mediametrics_audiodeviceconnection_reported",
+    "input_devices",
+    "output_devices",
+    "device_names",
+    "result",
+    "time_to_connect_millis",
+    "connection_count",
+};
+
+/**
+ * sendToStatsd is a helper method that sends the arguments to statsd
+ * and returns a pair { result, summary_string }.
+ */
+template <size_t N, typename ...Types>
+std::pair<int, std::string> sendToStatsd(const char * const (& fields)[N], Types ... args)
+{
+    int result = 0;
+    std::stringstream ss;
+
+#ifdef STATSD_ENABLE
+    result = android::util::stats_write(args...);
+    ss << "result:" << result;
+#endif
+    ss << " { ";
+    stringutils::fieldPrint(ss, fields, args...);
+    ss << "}";
+    return { result, ss.str() };
+}
 
 AudioAnalytics::AudioAnalytics()
     : mDeliverStatistics(property_get_bool(PROP_AUDIO_ANALYTICS_CLOUD_ENABLED, true))
 {
-    SetMinimumLogSeverity(DEBUG); // for LOG().
+    SetMinimumLogSeverity(android::base::DEBUG); // for LOG().
     ALOGD("%s", __func__);
 
     // Add action to save AnalyticsState if audioserver is restarted.
@@ -72,6 +175,19 @@
                 // to end of full expression.
                 mAnalyticsState->clear();  // TODO: filter the analytics state.
                 // Perhaps report this.
+
+                // Set up a timer to expire the previous audio state to save space.
+                // Use the transaction log size as a cookie to see if it is the
+                // same as before.  A benign race is possible where a state is cleared early.
+                const size_t size = mPreviousAnalyticsState->transactionLog().size();
+                mTimedAction.postIn(
+                        std::chrono::seconds(PREVIOUS_STATE_EXPIRE_SEC), [this, size](){
+                    if (mPreviousAnalyticsState->transactionLog().size() == size) {
+                        ALOGD("expiring previous audio state after %d seconds.",
+                                PREVIOUS_STATE_EXPIRE_SEC);
+                        mPreviousAnalyticsState->clear();  // removes data from the state.
+                    }
+                });
             }));
 
     // Handle device use record statistics
@@ -220,11 +336,26 @@
         ll -= l;
     }
 
+    if (ll > 0) {
+        // Print the statsd atoms we sent out.
+        const std::string statsd = mStatsdLog.dumpToString("  " /* prefix */, ll - 1);
+        const size_t n = std::count(statsd.begin(), statsd.end(), '\n') + 1; // we control this.
+        if ((size_t)ll >= n) {
+            if (n == 1) {
+                ss << "Statsd atoms: empty or truncated\n";
+            } else {
+                ss << "Statsd atoms:\n" << statsd;
+            }
+            ll -= n;
+        }
+    }
+
     if (ll > 0 && prefix == nullptr) {
         auto [s, l] = mAudioPowerUsage.dump(ll);
         ss << s;
         ll -= l;
     }
+
     return { ss.str(), lines - ll };
 }
 
@@ -312,20 +443,25 @@
 
     // Get connected device name if from bluetooth.
     bool isBluetooth = false;
-    std::string deviceNames; // we only have one device name at this time.
+
+    std::string inputDeviceNames;  // not filled currently.
+    std::string outputDeviceNames;
     if (outputDevices.find("AUDIO_DEVICE_OUT_BLUETOOTH") != std::string::npos) {
         isBluetooth = true;
         mAudioAnalytics.mAnalyticsState->timeMachine().get(
-            "audio.device.bt_a2dp", AMEDIAMETRICS_PROP_NAME, &deviceNames);
+            "audio.device.bt_a2dp", AMEDIAMETRICS_PROP_NAME, &outputDeviceNames);
         // Remove | if present
-        stringutils::replace(deviceNames, "|", '?');
+        stringutils::replace(outputDeviceNames, "|", '?');
+        if (outputDeviceNames.size() > STATSD_DEVICE_NAME_MAX_LENGTH) {
+            outputDeviceNames.resize(STATSD_DEVICE_NAME_MAX_LENGTH); // truncate
+        }
     }
 
     switch (itemType) {
     case RECORD: {
         std::string callerName;
-        mAudioAnalytics.mAnalyticsState->timeMachine().get(
-                key, AMEDIAMETRICS_PROP_CALLERNAME, &callerName);
+        const bool clientCalled = mAudioAnalytics.mAnalyticsState->timeMachine().get(
+                key, AMEDIAMETRICS_PROP_CALLERNAME, &callerName) == OK;
 
         std::string packageName;
         int64_t versionCode = 0;
@@ -350,10 +486,10 @@
         const auto flagsForStats = types::lookup<types::INPUT_FLAG, short_enum_type_t>(flags);
         const auto sourceForStats = types::lookup<types::SOURCE_TYPE, short_enum_type_t>(source);
 
-        LOG(DEBUG) << "key:" << key
+        LOG(LOG_LEVEL) << "key:" << key
               << " id:" << id
               << " inputDevices:" << inputDevices << "(" << inputDeviceBits
-              << ") deviceNames:" << deviceNames
+              << ") inputDeviceNames:" << inputDeviceNames
               << " deviceTimeNs:" << deviceTimeNs
               << " encoding:" << encoding << "(" << encodingForStats
               << ") frameCount:" << frameCount
@@ -364,12 +500,12 @@
               << " selectedDeviceId:" << selectedDeviceId
               << " callerName:" << callerName << "(" << callerNameForStats
               << ") source:" << source << "(" << sourceForStats << ")";
-#ifdef STATSD
-        if (mAudioAnalytics.mDeliverStatistics) {
-            (void)android::util::stats_write(
-                    android::util::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED
+        if (clientCalled  // only log if client app called AudioRecord.
+                && mAudioAnalytics.mDeliverStatistics) {
+            const auto [ result, str ] = sendToStatsd(AudioRecordDeviceUsageFields,
+                    CONDITION(android::util::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED)
                     , ENUM_EXTRACT(inputDeviceBits)
-                    , deviceNames.c_str()
+                    , inputDeviceNames.c_str()
                     , deviceTimeNs
                     , ENUM_EXTRACT(encodingForStats)
                     , frameCount
@@ -382,8 +518,9 @@
                     , ENUM_EXTRACT(callerNameForStats)
                     , ENUM_EXTRACT(sourceForStats)
                     );
+            ALOGV("%s: statsd %s", __func__, str.c_str());
+            mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
         }
-#endif
     } break;
     case THREAD: {
         std::string type;
@@ -400,11 +537,12 @@
                         : types::lookup<types::OUTPUT_FLAG, short_enum_type_t>(flags));
         const auto typeForStats = types::lookup<types::THREAD_TYPE, short_enum_type_t>(type);
 
-        LOG(DEBUG) << "key:" << key
+        LOG(LOG_LEVEL) << "key:" << key
               << " id:" << id
               << " inputDevices:" << inputDevices << "(" << inputDeviceBits
               << ") outputDevices:" << outputDevices << "(" << outputDeviceBits
-              << ") deviceNames:" << deviceNames
+              << ") inputDeviceNames:" << inputDeviceNames
+              << " outputDeviceNames:" << outputDeviceNames
               << " deviceTimeNs:" << deviceTimeNs
               << " encoding:" << encoding << "(" << encodingForStats
               << ") frameCount:" << frameCount
@@ -414,13 +552,11 @@
               << " flags:" << flags << "(" << flagsForStats
               << ") type:" << type << "(" << typeForStats
               << ")";
-#ifdef STATSD
         if (mAudioAnalytics.mDeliverStatistics) {
-            (void)android::util::stats_write(
-                android::util::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED
-                , ENUM_EXTRACT(inputDeviceBits)
-                , ENUM_EXTRACT(outputDeviceBits)
-                , deviceNames.c_str()
+            const auto [ result, str ] = sendToStatsd(AudioThreadDeviceUsageFields,
+                CONDITION(android::util::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED)
+                , isInput ? ENUM_EXTRACT(inputDeviceBits) : ENUM_EXTRACT(outputDeviceBits)
+                , isInput ? inputDeviceNames.c_str() : outputDeviceNames.c_str()
                 , deviceTimeNs
                 , ENUM_EXTRACT(encodingForStats)
                 , frameCount
@@ -430,13 +566,15 @@
                 , underrun
                 , ENUM_EXTRACT(typeForStats)
             );
+            ALOGV("%s: statsd %s", __func__, str.c_str());
+            mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
         }
-#endif
     } break;
     case TRACK: {
         std::string callerName;
-        mAudioAnalytics.mAnalyticsState->timeMachine().get(
-                key, AMEDIAMETRICS_PROP_CALLERNAME, &callerName);
+        const bool clientCalled = mAudioAnalytics.mAnalyticsState->timeMachine().get(
+                key, AMEDIAMETRICS_PROP_CALLERNAME, &callerName) == OK;
+
         std::string contentType;
         mAudioAnalytics.mAnalyticsState->timeMachine().get(
                 key, AMEDIAMETRICS_PROP_CONTENTTYPE, &contentType);
@@ -470,6 +608,9 @@
         std::string streamType;
         mAudioAnalytics.mAnalyticsState->timeMachine().get(
                 key, AMEDIAMETRICS_PROP_STREAMTYPE, &streamType);
+        std::string traits;
+        mAudioAnalytics.mAnalyticsState->timeMachine().get(
+                key, AMEDIAMETRICS_PROP_TRAITS, &traits);
         int32_t underrun = 0;
         mAudioAnalytics.mAnalyticsState->timeMachine().get(
                 key, AMEDIAMETRICS_PROP_UNDERRUN, &underrun);
@@ -485,12 +626,14 @@
         const auto flagsForStats = types::lookup<types::OUTPUT_FLAG, short_enum_type_t>(flags);
         const auto streamTypeForStats =
                 types::lookup<types::STREAM_TYPE, short_enum_type_t>(streamType);
+        const auto traitsForStats =
+                 types::lookup<types::TRACK_TRAITS, short_enum_type_t>(traits);
         const auto usageForStats = types::lookup<types::USAGE, short_enum_type_t>(usage);
 
-        LOG(DEBUG) << "key:" << key
+        LOG(LOG_LEVEL) << "key:" << key
               << " id:" << id
               << " outputDevices:" << outputDevices << "(" << outputDeviceBits
-              << ") deviceNames:" << deviceNames
+              << ") outputDeviceNames:" << outputDeviceNames
               << " deviceTimeNs:" << deviceTimeNs
               << " encoding:" << encoding << "(" << encodingForStats
               << ") frameCount:" << frameCount
@@ -508,14 +651,15 @@
               << " playbackSpeed:" << playbackSpeed
               << " selectedDeviceId:" << selectedDeviceId
               << " streamType:" << streamType << "(" << streamTypeForStats
+              << ") traits:" << traits << "(" << traitsForStats
               << ") usage:" << usage << "(" << usageForStats
               << ")";
-#ifdef STATSD
-        if (mAudioAnalytics.mDeliverStatistics) {
-            (void)android::util::stats_write(
-                    android::util::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED
+        if (clientCalled // only log if client app called AudioTracks
+                && mAudioAnalytics.mDeliverStatistics) {
+            const auto [ result, str ] = sendToStatsd(AudioTrackDeviceUsageFields,
+                    CONDITION(android::util::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED)
                     , ENUM_EXTRACT(outputDeviceBits)
-                    , deviceNames.c_str()
+                    , outputDeviceNames.c_str()
                     , deviceTimeNs
                     , ENUM_EXTRACT(encodingForStats)
                     , frameCount
@@ -523,7 +667,6 @@
                     , sampleRate
                     , ENUM_EXTRACT(flagsForStats)
                     , underrun
-
                     , packageName.c_str()
                     , (float)deviceLatencyMs
                     , (float)deviceStartupMs
@@ -533,9 +676,11 @@
                     , ENUM_EXTRACT(usageForStats)
                     , ENUM_EXTRACT(contentTypeForStats)
                     , ENUM_EXTRACT(callerNameForStats)
+                    , ENUM_EXTRACT(traitsForStats)
                     );
+            ALOGV("%s: statsd %s", __func__, str.c_str());
+            mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
         }
-#endif
         } break;
     }
 
@@ -592,23 +737,26 @@
         const auto outputDeviceBits = types::lookup<types::OUTPUT_DEVICE, long_enum_type_t>(
                 "AUDIO_DEVICE_OUT_BLUETOOTH_A2DP");
 
-        LOG(DEBUG) << "key:" << key
+        LOG(LOG_LEVEL) << "key:" << key
                 << " A2DP SUCCESS"
                 << " outputDevices:" << outputDeviceBits
+                << " deviceName:" << mA2dpDeviceName
                 << " connectionTimeMs:" <<  connectionTimeMs;
-#ifdef STATSD
         if (mAudioAnalytics.mDeliverStatistics) {
             const long_enum_type_t inputDeviceBits{};
-            (void)android::util::stats_write(
-                    android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED
+
+            const auto [ result, str ] = sendToStatsd(AudioDeviceConnectionFields,
+                    CONDITION(android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
                     , ENUM_EXTRACT(inputDeviceBits)
                     , ENUM_EXTRACT(outputDeviceBits)
+                    , mA2dpDeviceName.c_str()
                     , types::DEVICE_CONNECTION_RESULT_SUCCESS
                     , connectionTimeMs
                     , /* connection_count */ 1
                     );
+            ALOGV("%s: statsd %s", __func__, str.c_str());
+            mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
         }
-#endif
     }
 }
 
@@ -620,13 +768,17 @@
     std::string state;
     item->get(AMEDIAMETRICS_PROP_STATE, &state);
     if (state != "connected") return;
+
+    std::string name;
+    item->get(AMEDIAMETRICS_PROP_NAME, &name);
     {
         std::lock_guard l(mLock);
         mA2dpConnectionRequestNs = atNs;
         ++mA2dpConnectionRequests;
+        mA2dpDeviceName = name;
     }
-    ALOGD("(key=%s) a2dp connection request atNs:%lld",
-            key.c_str(), (long long)atNs);
+    ALOGD("(key=%s) a2dp connection name:%s request atNs:%lld",
+            key.c_str(), name.c_str(), (long long)atNs);
     // TODO: attempt to cancel a timed event, rather than let it expire.
     mAudioAnalytics.mTimedAction.postIn(std::chrono::seconds(5), [this](){ expire(); });
 }
@@ -635,29 +787,29 @@
     std::lock_guard l(mLock);
     if (mA2dpConnectionRequestNs == 0) return; // ignore (this was an internal connection).
 
-#ifdef STATSD
     const long_enum_type_t inputDeviceBits{};
-#endif
     const auto outputDeviceBits = types::lookup<types::OUTPUT_DEVICE, long_enum_type_t>(
             "AUDIO_DEVICE_OUT_BLUETOOTH_A2DP");
 
     if (mA2dpConnectionServiceNs == 0) {
         ++mA2dpConnectionJavaServiceCancels;  // service did not connect to A2DP
 
-        LOG(DEBUG) << "A2DP CANCEL"
-                << " outputDevices:" << outputDeviceBits;
-#ifdef STATSD
+        LOG(LOG_LEVEL) << "A2DP CANCEL"
+                << " outputDevices:" << outputDeviceBits
+                << " deviceName:" << mA2dpDeviceName;
         if (mAudioAnalytics.mDeliverStatistics) {
-            (void)android::util::stats_write(
-                    android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED
+            const auto [ result, str ] = sendToStatsd(AudioDeviceConnectionFields,
+                    CONDITION(android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
                     , ENUM_EXTRACT(inputDeviceBits)
                     , ENUM_EXTRACT(outputDeviceBits)
+                    , mA2dpDeviceName.c_str()
                     , types::DEVICE_CONNECTION_RESULT_JAVA_SERVICE_CANCEL
                     , /* connection_time_ms */ 0.f
                     , /* connection_count */ 1
                     );
+            ALOGV("%s: statsd %s", __func__, str.c_str());
+            mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
         }
-#endif
         return;
     }
 
@@ -668,20 +820,22 @@
     mA2dpConnectionServiceNs = 0;
     ++mA2dpConnectionUnknowns;  // connection result unknown
 
-    LOG(DEBUG) << "A2DP UNKNOWN"
-            << " outputDevices:" << outputDeviceBits;
-#ifdef STATSD
+    LOG(LOG_LEVEL) << "A2DP UNKNOWN"
+            << " outputDevices:" << outputDeviceBits
+            << " deviceName:" << mA2dpDeviceName;
     if (mAudioAnalytics.mDeliverStatistics) {
-        (void)android::util::stats_write(
-                android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED
+        const auto [ result, str ] = sendToStatsd(AudioDeviceConnectionFields,
+                CONDITION(android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED)
                 , ENUM_EXTRACT(inputDeviceBits)
                 , ENUM_EXTRACT(outputDeviceBits)
+                , mA2dpDeviceName.c_str()
                 , types::DEVICE_CONNECTION_RESULT_UNKNOWN
                 , /* connection_time_ms */ 0.f
                 , /* connection_count */ 1
                 );
+        ALOGV("%s: statsd %s", __func__, str.c_str());
+        mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
     }
-#endif
 }
 
 } // namespace android::mediametrics
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index 138ddcc..df097b1 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -17,6 +17,7 @@
 #pragma once
 
 #include <android-base/thread_annotations.h>
+#include <audio_utils/SimpleLog.h>
 #include "AnalyticsActions.h"
 #include "AnalyticsState.h"
 #include "AudioPowerUsage.h"
@@ -116,12 +117,14 @@
 
     // AnalyticsState is individually locked, and we use SharedPtrWrap
     // to allow safe access even if the shared pointer changes underneath.
-
+    // These wrap pointers always point to a valid state object.
     SharedPtrWrap<AnalyticsState> mAnalyticsState;
     SharedPtrWrap<AnalyticsState> mPreviousAnalyticsState;
 
     TimedAction mTimedAction; // locked internally
 
+    SimpleLog mStatsdLog{16 /* log lines */}; // locked internally
+
     // DeviceUse is a nested class which handles audio device usage accounting.
     // We define this class at the end to ensure prior variables all properly constructed.
     // TODO: Track / Thread interaction
@@ -173,6 +176,7 @@
         AudioAnalytics &mAudioAnalytics;
 
         mutable std::mutex mLock;
+        std::string mA2dpDeviceName;
         int64_t mA2dpConnectionRequestNs GUARDED_BY(mLock) = 0;  // Time for BT service request.
         int64_t mA2dpConnectionServiceNs GUARDED_BY(mLock) = 0;  // Time audio service agrees.
 
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index c441110..cca6b41 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -34,7 +34,7 @@
 #define PROP_AUDIO_METRICS_DISABLED "persist.media.audio_metrics.power_usage_disabled"
 #define AUDIO_METRICS_DISABLED_DEFAULT (false)
 
-// property to set how long to send audio power use metrics data to westworld, default is 24hrs
+// property to set how long to send audio power use metrics data to statsd, default is 24hrs
 #define PROP_AUDIO_METRICS_INTERVAL_HR "persist.media.audio_metrics.interval_hr"
 #define INTERVAL_HR_DEFAULT (24)
 
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 2a2dbaf..aa44447 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -20,7 +20,30 @@
 
 namespace android::mediametrics::types {
 
-std::unordered_map<std::string, int64_t>& getAudioDeviceInMap() {
+const std::unordered_map<std::string, int32_t>& getAudioCallerNameMap() {
+    // DO NOT MODIFY VALUES (OK to add new ones).
+    // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
+    static std::unordered_map<std::string, int32_t> map{
+        {"unknown",       0},           // callerName not set
+        {"aaudio",        1},           // Native AAudio
+        {"java",          2},           // Java API layer
+        {"media",         3},           // libmedia (mediaplayer)
+        {"opensles",      4},           // Open SLES
+        {"rtp",           5},           // RTP communication
+        {"soundpool",     6},           // SoundPool
+        {"tonegenerator", 7},           // dial tones
+        // R values above.
+    };
+    return map;
+}
+
+// A map in case we need to return a flag for input devices.
+// This is 64 bits (and hence not the same as audio_device_t) because we need extra
+// bits to represent new devices.
+// NOT USED FOR R.  We do not use int64 flags.
+// This can be out of date for now, as it is unused even for string validation
+// (instead TypeConverter<InputDeviceTraits> is used).
+const std::unordered_map<std::string, int64_t>& getAudioDeviceInMap() {
     // DO NOT MODIFY VALUES (OK to add new ones).  This does NOT match audio_device_t.
     static std::unordered_map<std::string, int64_t> map{
         {"AUDIO_DEVICE_IN_COMMUNICATION",          1LL << 0},
@@ -57,7 +80,13 @@
     return map;
 }
 
-std::unordered_map<std::string, int64_t>& getAudioDeviceOutMap() {
+// A map in case we need to return a flag for output devices.
+// This is 64 bits (and hence not the same as audio_device_t) because we need extra
+// bits to represent new devices.
+// NOT USED FOR R.  We do not use int64 flags.
+// This can be out of date for now, as it is unused even for string validation
+// (instead TypeConverter<OutputDeviceTraits> is used).
+const std::unordered_map<std::string, int64_t>& getAudioDeviceOutMap() {
     // DO NOT MODIFY VALUES (OK to add new ones).  This does NOT match audio_device_t.
     static std::unordered_map<std::string, int64_t> map{
         {"AUDIO_DEVICE_OUT_EARPIECE",                  1LL << 0},
@@ -96,24 +125,7 @@
     return map;
 }
 
-std::unordered_map<std::string, int32_t>& getCallerNameMap() {
-    // DO NOT MODIFY VALUES (OK to add new ones).
-    // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
-    static std::unordered_map<std::string, int32_t> map{
-        {"aaudio",        0},           // Native AAudio
-        {"java",          1},           // Java API layer
-        {"media",         2},           // libmedia (mediaplayer)
-        {"opensles",      3},           // Open SLES
-        {"rtp",           4},           // RTP communication
-        {"soundpool",     5},           // SoundPool
-        {"tonegenerator", 6},           // dial tones
-        {"unknown",       7},           // callerName not set
-        // R values above.
-    };
-    return map;
-}
-
-std::unordered_map<std::string, int32_t>& getThreadTypeMap() {
+const std::unordered_map<std::string, int32_t>& getAudioThreadTypeMap() {
     // DO NOT MODIFY VALUES (OK to add new ones).
     // This may be found in frameworks/av/services/audioflinger/Threads.h
     static std::unordered_map<std::string, int32_t> map{
@@ -130,6 +142,15 @@
     return map;
 }
 
+const std::unordered_map<std::string, int32_t>& getAudioTrackTraitsMap() {
+    // DO NOT MODIFY VALUES (OK to add new ones).
+    static std::unordered_map<std::string, int32_t> map{
+        {"static",        (1 << 0)},  // A static track
+        // R values above.
+    };
+    return map;
+}
+
 // Helper: Create the corresponding int32 from string flags split with '|'.
 template <typename Traits>
 int32_t int32FromFlags(const std::string &flags)
@@ -163,6 +184,37 @@
     return sFlags;
 }
 
+template <typename M>
+std::string validateStringFromMap(const std::string &str, const M& map)
+{
+    if (str.empty()) return {};
+
+    const auto result = stringutils::split(str, "|");
+    std::stringstream ss;
+    for (const auto &s : result) {
+        if (map.count(s) > 0) {
+            if (ss.tellp() > 0) ss << "|";
+            ss << s;
+        }
+    }
+    return ss.str();
+}
+
+template <typename M>
+typename M::mapped_type flagsFromMap(const std::string &str, const M& map)
+{
+    if (str.empty()) return {};
+
+    const auto result = stringutils::split(str, "|");
+    typename M::mapped_type value{};
+    for (const auto &s : result) {
+        auto it = map.find(s);
+        if (it == map.end()) continue;
+        value |= it->second;
+    }
+    return value;
+}
+
 template <>
 int32_t lookup<CONTENT_TYPE>(const std::string &contentType)
 {
@@ -178,7 +230,7 @@
 {
     AudioContentTraits::Type value;
     if (!TypeConverter<AudioContentTraits>::fromString(contentType, value)) {
-        return "UNKNOWN";
+        return "";
     }
     return contentType.c_str() + sizeof("AUDIO_CONTENT_TYPE");
 }
@@ -198,7 +250,7 @@
 {
     FormatTraits::Type value;
     if (!TypeConverter<FormatTraits>::fromString(encoding, value)) {
-        return "INVALID";
+        return "";
     }
     return encoding.c_str() + sizeof("AUDIO_FORMAT");
 }
@@ -242,7 +294,7 @@
 {
     SourceTraits::Type value;
     if (!TypeConverter<SourceTraits>::fromString(sourceType, value)) {
-        return "DEFAULT";
+        return "";
     }
     return sourceType.c_str() + sizeof("AUDIO_SOURCE");
 }
@@ -262,7 +314,7 @@
 {
     StreamTraits::Type value;
     if (!TypeConverter<StreamTraits>::fromString(streamType, value)) {
-        return "DEFAULT";
+        return "";
     }
     return streamType.c_str() + sizeof("AUDIO_STREAM");
 }
@@ -282,7 +334,7 @@
 {
     UsageTraits::Type value;
     if (!TypeConverter<UsageTraits>::fromString(usage, value)) {
-        return "UNKNOWN";
+        return "";
     }
     return usage.c_str() + sizeof("AUDIO_USAGE");
 }
@@ -290,54 +342,40 @@
 template <>
 int64_t lookup<INPUT_DEVICE>(const std::string &inputDevice)
 {
-    auto& map = getAudioDeviceInMap();
-    auto it = map.find(inputDevice);
-    if (it == map.end()) {
-        return 0;
-    }
-    return it->second;
+    // NOT USED FOR R.
+    // Returns a set of bits, each one representing a device in inputDevice.
+    // This is a 64 bit integer, not the same as audio_device_t.
+    return flagsFromMap(inputDevice, getAudioDeviceInMap());
 }
 
 template <>
 std::string lookup<INPUT_DEVICE>(const std::string &inputDevice)
 {
-    auto& map = getAudioDeviceInMap();
-    auto it = map.find(inputDevice);
-    if (it == map.end()) {
-        return "NONE";
-    }
-    return inputDevice.c_str() + sizeof("AUDIO_DEVICE_IN");
+    return stringFromFlags<InputDeviceTraits>(inputDevice, sizeof("AUDIO_DEVICE_IN"));
 }
 
 template <>
 int64_t lookup<OUTPUT_DEVICE>(const std::string &outputDevice)
 {
-    auto& map = getAudioDeviceOutMap();
-    auto it = map.find(outputDevice);
-    if (it == map.end()) {
-        return 0; // nothing
-    }
-    return it->second;
+    // NOT USED FOR R.
+    // Returns a set of bits, each one representing a device in outputDevice.
+    // This is a 64 bit integer, not the same as audio_device_t.
+    return flagsFromMap(outputDevice, getAudioDeviceOutMap());
 }
 
 template <>
 std::string lookup<OUTPUT_DEVICE>(const std::string &outputDevice)
 {
-    auto& map = getAudioDeviceOutMap();
-    auto it = map.find(outputDevice);
-    if (it == map.end()) {
-        return "NONE";
-    }
-    return outputDevice.c_str() + sizeof("AUDIO_DEVICE_OUT");
+    return stringFromFlags<OutputDeviceTraits>(outputDevice, sizeof("AUDIO_DEVICE_OUT"));
 }
 
 template <>
 int32_t lookup<CALLER_NAME>(const std::string &callerName)
 {
-    auto& map = getCallerNameMap();
+    auto& map = getAudioCallerNameMap();
     auto it = map.find(callerName);
     if (it == map.end()) {
-        return 7;      // return unknown
+        return 0;      // return unknown
     }
     return it->second;
 }
@@ -345,10 +383,10 @@
 template <>
 std::string lookup<CALLER_NAME>(const std::string &callerName)
 {
-    auto& map = getCallerNameMap();
+    auto& map = getAudioCallerNameMap();
     auto it = map.find(callerName);
     if (it == map.end()) {
-        return "unknown";
+        return "";
     }
     return callerName;
 }
@@ -356,7 +394,7 @@
 template <>
 int32_t lookup<THREAD_TYPE>(const std::string &threadType)
 {
-    auto& map = getThreadTypeMap();
+    auto& map = getAudioThreadTypeMap();
     auto it = map.find(threadType);
     if (it == map.end()) {
         return -1; // note this as an illegal thread value as we don't have unknown here.
@@ -367,10 +405,10 @@
 template <>
 std::string lookup<THREAD_TYPE>(const std::string &threadType)
 {
-    auto& map = getThreadTypeMap();
+    auto& map = getAudioThreadTypeMap();
     auto it = map.find(threadType);
     if (it == map.end()) {
-        return "UNKNOWN";
+        return "";
     }
     return threadType;
 }
@@ -380,4 +418,16 @@
     return threadType == "RECORD" || threadType == "MMAP_CAPTURE";
 }
 
+template <>
+std::string lookup<TRACK_TRAITS>(const std::string &traits)
+{
+    return validateStringFromMap(traits, getAudioTrackTraitsMap());
+}
+
+template <>
+int32_t lookup<TRACK_TRAITS>(const std::string &traits)
+{
+    return flagsFromMap(traits, getAudioTrackTraitsMap());
+}
+
 } // namespace android::mediametrics::types
diff --git a/services/mediametrics/AudioTypes.h b/services/mediametrics/AudioTypes.h
index a094e6e..e1deeb1 100644
--- a/services/mediametrics/AudioTypes.h
+++ b/services/mediametrics/AudioTypes.h
@@ -21,17 +21,19 @@
 
 namespace android::mediametrics::types {
 
-// Helper methods that map mediametrics logged strings to
-// integer codes.
-std::unordered_map<std::string, int64_t>& getAudioDeviceInMap();
-std::unordered_map<std::string, int64_t>& getAudioDeviceOutMap();
-std::unordered_map<std::string, int32_t>& getCallerNameMap();
-std::unordered_map<std::string, int32_t>& getThreadTypeMap();
+// Helper methods that map mediametrics logged strings to integer codes.
+// In R we do not use the integer codes, but rather we can use these maps
+// to validate correct strings.
+const std::unordered_map<std::string, int32_t>& getAudioCallerNameMap();
+const std::unordered_map<std::string, int64_t>& getAudioDeviceInMap();
+const std::unordered_map<std::string, int64_t>& getAudioDeviceOutMap();
+const std::unordered_map<std::string, int32_t>& getAudioThreadTypeMap();
+const std::unordered_map<std::string, int32_t>& getAudioTrackTraitsMap();
 
 // Enumeration for the device connection results.
 enum DeviceConnectionResult : int32_t {
-    DEVICE_CONNECTION_RESULT_UNKNOWN = 0,              // Success is unknown.
-    DEVICE_CONNECTION_RESULT_SUCCESS = 1,              // Audio delivered
+    DEVICE_CONNECTION_RESULT_SUCCESS = 0,              // Audio delivered
+    DEVICE_CONNECTION_RESULT_UNKNOWN = 1,              // Success is unknown.
     DEVICE_CONNECTION_RESULT_JAVA_SERVICE_CANCEL = 2,  // Canceled in Java service
     // Do not modify the constants above after R.  Adding new constants is fine.
 };
@@ -48,6 +50,7 @@
     SOURCE_TYPE,
     STREAM_TYPE,
     THREAD_TYPE,
+    TRACK_TRAITS,
     USAGE,
 };
 
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index d682fed..48e766e 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -185,7 +185,7 @@
     }
 
     if (!isTrusted || item->getTimestamp() == 0) {
-        // Westworld logs two times for events: ElapsedRealTimeNs (BOOTTIME) and
+        // Statsd logs two times for events: ElapsedRealTimeNs (BOOTTIME) and
         // WallClockTimeNs (REALTIME), but currently logs REALTIME to cloud.
         //
         // For consistency and correlation with other logging mechanisms
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/MediaMetricsService.h
index d152264..792b7f0 100644
--- a/services/mediametrics/MediaMetricsService.h
+++ b/services/mediametrics/MediaMetricsService.h
@@ -65,7 +65,7 @@
     static nsecs_t roundTime(nsecs_t timeNs);
 
     /**
-     * Returns true if we should use uid for package name when uploading to WestWorld.
+     * Returns true if we should use uid for package name when uploading to statsd.
      */
     static bool useUidForPackage(const std::string& package, const std::string& installer);
 
diff --git a/services/mediametrics/StringUtils.h b/services/mediametrics/StringUtils.h
index d878720..7a8bbee 100644
--- a/services/mediametrics/StringUtils.h
+++ b/services/mediametrics/StringUtils.h
@@ -22,6 +22,30 @@
 namespace android::mediametrics::stringutils {
 
 /**
+ * fieldPrint is a helper method that logs to a stringstream a sequence of
+ * field names (in a fixed size array) together with a variable number of arg parameters.
+ *
+ * stringstream << field[0] << ":" << arg0 << " ";
+ * stringstream << field[1] << ":" << arg1 << " ";
+ * ...
+ * stringstream << field[N-1] << ":" << arg{N-1} << " ";
+ *
+ * The number of fields must exactly match the (variable) arguments.
+ *
+ * Example:
+ *
+ * const char * const fields[] = { "integer" };
+ * std::stringstream ss;
+ * fieldPrint(ss, fields, int(10));
+ */
+template <size_t N, typename... Targs>
+void fieldPrint(std::stringstream& ss, const char * const (& fields)[N], Targs... args) {
+    static_assert(N == sizeof...(args));          // guarantee #fields == #args
+    auto fptr = fields;                           // get a pointer to the base of fields array
+    ((ss << *fptr++ << ":" << args << " "), ...); // (fold expression), send to stringstream.
+}
+
+/**
  * Return string tokens from iterator, separated by spaces and reserved chars.
  */
 std::string tokenizer(std::string::const_iterator& it,
diff --git a/services/mediametrics/TimeMachine.h b/services/mediametrics/TimeMachine.h
index 00a44a4..ce579b3 100644
--- a/services/mediametrics/TimeMachine.h
+++ b/services/mediametrics/TimeMachine.h
@@ -220,10 +220,10 @@
 
     using History = std::map<std::string /* key */, std::shared_ptr<KeyHistory>>;
 
-    static inline constexpr size_t kTimeSequenceMaxElements = 100;
-    static inline constexpr size_t kKeyMaxProperties = 100;
-    static inline constexpr size_t kKeyLowWaterMark = 500;
-    static inline constexpr size_t kKeyHighWaterMark = 1000;
+    static inline constexpr size_t kTimeSequenceMaxElements = 50;
+    static inline constexpr size_t kKeyMaxProperties = 50;
+    static inline constexpr size_t kKeyLowWaterMark = 400;
+    static inline constexpr size_t kKeyHighWaterMark = 500;
 
     // Estimated max data space usage is 3KB * kKeyHighWaterMark.
 
@@ -255,6 +255,7 @@
         {
             std::lock_guard lock2(other.mLock);
             mHistory = other.mHistory;
+            mGarbageCollectionCount = other.mGarbageCollectionCount.load();
         }
 
         // Now that we safely have our own shared pointers, let's dup them
@@ -420,6 +421,7 @@
     void clear() {
         std::lock_guard lock(mLock);
         mHistory.clear();
+        mGarbageCollectionCount = 0;
     }
 
     /**
@@ -453,6 +455,10 @@
         return { ss.str(), lines - ll };
     }
 
+    size_t getGarbageCollectionCount() const {
+        return mGarbageCollectionCount;
+    }
+
 private:
 
     // Obtains the lock for a KeyHistory.
@@ -496,8 +502,6 @@
         // TODO: something better than this for garbage collection.
         if (mHistory.size() < mKeyHighWaterMark) return false;
 
-        ALOGD("%s: garbage collection", __func__);
-
         // erase everything explicitly expired.
         std::multimap<int64_t, std::string> accessList;
         // use a stale vector with precise type to avoid type erasure overhead in garbage
@@ -534,12 +538,16 @@
         ALOGD("%s(%zu, %zu): key size:%zu",
                 __func__, mKeyLowWaterMark, mKeyHighWaterMark,
                 mHistory.size());
+
+        ++mGarbageCollectionCount;
         return true;
     }
 
     const size_t mKeyLowWaterMark = kKeyLowWaterMark;
     const size_t mKeyHighWaterMark = kKeyHighWaterMark;
 
+    std::atomic<size_t> mGarbageCollectionCount{};
+
     /**
      * Locking Strategy
      *
diff --git a/services/mediametrics/TransactionLog.h b/services/mediametrics/TransactionLog.h
index 8a22826..0ca4639 100644
--- a/services/mediametrics/TransactionLog.h
+++ b/services/mediametrics/TransactionLog.h
@@ -43,9 +43,9 @@
     // Transaction Log between the Low Water Mark and the High Water Mark.
 
     // low water mark
-    static inline constexpr size_t kLogItemsLowWater = 5000;
+    static inline constexpr size_t kLogItemsLowWater = 1700;
     // high water mark
-    static inline constexpr size_t kLogItemsHighWater = 10000;
+    static inline constexpr size_t kLogItemsHighWater = 2000;
 
     // Estimated max data usage is 1KB * kLogItemsHighWater.
 
@@ -79,6 +79,7 @@
         std::lock_guard lock2(other.mLock);
         mLog = other.mLog;
         mItemMap = other.mItemMap;
+        mGarbageCollectionCount = other.mGarbageCollectionCount.load();
 
         return *this;
     }
@@ -181,6 +182,11 @@
         std::lock_guard lock(mLock);
         mLog.clear();
         mItemMap.clear();
+        mGarbageCollectionCount = 0;
+    }
+
+    size_t getGarbageCollectionCount() const {
+        return mGarbageCollectionCount;
     }
 
 private:
@@ -216,8 +222,6 @@
     bool gc(std::vector<std::any>& garbage) REQUIRES(mLock) {
         if (mLog.size() < mHighWaterMark) return false;
 
-        ALOGD("%s: garbage collection", __func__);
-
         auto eraseEnd = mLog.begin();
         size_t toRemove = mLog.size() - mLowWaterMark;
         // remove at least those elements.
@@ -265,6 +269,7 @@
         ALOGD("%s(%zu, %zu): log size:%zu item map size:%zu, item map items:%zu",
                 __func__, mLowWaterMark, mHighWaterMark,
                 mLog.size(), mItemMap.size(), itemMapCount);
+        ++mGarbageCollectionCount;
         return true;
     }
 
@@ -287,6 +292,8 @@
     const size_t mLowWaterMark = kLogItemsLowWater;
     const size_t mHighWaterMark = kLogItemsHighWater;
 
+    std::atomic<size_t> mGarbageCollectionCount{};
+
     mutable std::mutex mLock;
 
     MapTimeItem mLog GUARDED_BY(mLock);
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 89d6f8f..ac58929 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -50,30 +50,26 @@
     int64_t pkgVersionCode = item->getPkgVersionCode();
     int64_t mediaApexVersion = 0;
 
-    char *vendor = nullptr;
-    (void) item->getCString("vendor", &vendor);
-    char *description = nullptr;
-    (void) item->getCString("description", &description);
-    char *serialized_metrics = nullptr;
-    (void) item->getCString("serialized_metrics", &serialized_metrics);
+    std::string vendor;
+    (void) item->getString("vendor", &vendor);
+    std::string description;
+    (void) item->getString("description", &description);
+    std::string serialized_metrics;
+    (void) item->getString("serialized_metrics", &serialized_metrics);
 
     if (enabled_statsd) {
-        android::util::BytesField bf_serialized(serialized_metrics ? serialized_metrics : nullptr,
-                                                serialized_metrics ? strlen(serialized_metrics)
-                                                                   : 0);
+        android::util::BytesField bf_serialized(serialized_metrics.c_str(),
+                                                serialized_metrics.size());
         android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
                                    timestamp, pkgName.c_str(), pkgVersionCode,
                                    mediaApexVersion,
-                                   vendor, description,
+                                   vendor.c_str(),
+                                   description.c_str(),
                                    bf_serialized);
     } else {
-        ALOGV("NOT sending: mediadrm private data (len=%zu)",
-              serialized_metrics ? strlen(serialized_metrics) : 0);
+        ALOGV("NOT sending: mediadrm private data (len=%zu)", serialized_metrics.size());
     }
 
-    free(vendor);
-    free(description);
-    free(serialized_metrics);
     return true;
 }
 
@@ -87,23 +83,20 @@
     int64_t pkgVersionCode = item->getPkgVersionCode();
     int64_t mediaApexVersion = 0;
 
-    char *serialized_metrics = nullptr;
-    (void) item->getCString("serialized_metrics", &serialized_metrics);
+    std::string serialized_metrics;
+    (void) item->getString("serialized_metrics", &serialized_metrics);
 
     if (enabled_statsd) {
-        android::util::BytesField bf_serialized(serialized_metrics ? serialized_metrics : nullptr,
-                                                serialized_metrics ? strlen(serialized_metrics)
-                                                                   : 0);
+        android::util::BytesField bf_serialized(serialized_metrics.c_str(),
+                                                serialized_metrics.size());
         android::util::stats_write(android::util::MEDIAMETRICS_DRM_WIDEVINE_REPORTED,
                                    timestamp, pkgName.c_str(), pkgVersionCode,
                                    mediaApexVersion,
                                    bf_serialized);
     } else {
-        ALOGV("NOT sending: widevine private data (len=%zu)",
-              serialized_metrics ? strlen(serialized_metrics) : 0);
+        ALOGV("NOT sending: widevine private data (len=%zu)", serialized_metrics.size());
     }
 
-    free(serialized_metrics);
     return true;
 }
 
@@ -123,14 +116,14 @@
     int64_t pkgVersionCode = item->getPkgVersionCode();
     int64_t mediaApexVersion = 0;
 
-    char *plugin_id = nullptr;
-    (void) item->getCString("plugin_id", &plugin_id);
-    char *description = nullptr;
-    (void) item->getCString("description", &description);
+    std::string plugin_id;
+    (void) item->getString("plugin_id", &plugin_id);
+    std::string description;
+    (void) item->getString("description", &description);
     int32_t method_id = -1;
     (void) item->getInt32("method_id", &method_id);
-    char *mime_types = nullptr;
-    (void) item->getCString("mime_types", &mime_types);
+    std::string mime_types;
+    (void) item->getString("mime_types", &mime_types);
 
     // Corresponds to the 13 APIs tracked in the MediametricsDrmManagerReported statsd proto
     // Please see also DrmManager::kMethodIdMap
@@ -141,16 +134,15 @@
 
     android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
                                timestamp, pkgName.c_str(), pkgVersionCode, mediaApexVersion,
-                               plugin_id, description, method_id, mime_types,
+                               plugin_id.c_str(), description.c_str(),
+                               method_id, mime_types.c_str(),
                                methodCounts[0], methodCounts[1], methodCounts[2],
                                methodCounts[3], methodCounts[4], methodCounts[5],
                                methodCounts[6], methodCounts[7], methodCounts[8],
                                methodCounts[9], methodCounts[10], methodCounts[11],
                                methodCounts[12]);
 
-    free(plugin_id);
-    free(description);
-    free(mime_types);
     return true;
 }
+
 } // namespace android
diff --git a/services/mediametrics/tests/Android.bp b/services/mediametrics/tests/Android.bp
index bdeda30..c2e0759 100644
--- a/services/mediametrics/tests/Android.bp
+++ b/services/mediametrics/tests/Android.bp
@@ -21,6 +21,10 @@
         "libutils",
     ],
 
+    header_libs: [
+        "libaudioutils_headers",
+    ],
+
     srcs: [
         "mediametrics_tests.cpp",
     ],
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 7da6306..478355b 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -18,12 +18,16 @@
 #include <utils/Log.h>
 
 #include "MediaMetricsService.h"
-#include "StringUtils.h"
 
 #include <stdio.h>
+#include <unordered_set>
 
 #include <gtest/gtest.h>
 #include <media/MediaMetricsItem.h>
+#include <system/audio.h>
+
+#include "AudioTypes.h"
+#include "StringUtils.h"
 
 using namespace android;
 
@@ -36,6 +40,15 @@
     return count;
 }
 
+template <typename M>
+ssize_t countDuplicates(const M& map) {
+    std::unordered_set<typename M::mapped_type> s;
+    for (const auto &m : map) {
+        s.emplace(m.second);
+    }
+    return map.size() - s.size();
+}
+
 TEST(mediametrics_tests, startsWith) {
   std::string s("test");
   ASSERT_EQ(true, android::mediametrics::startsWith(s, "te"));
@@ -804,7 +817,7 @@
 
   // TODO: Verify contents of AudioAnalytics.
   // Currently there is no getter API in AudioAnalytics besides dump.
-  ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
+  ASSERT_EQ(11, audioAnalytics.dump(1000).second /* lines */);
 
   ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
   // untrusted entities can add to an existing key
@@ -840,7 +853,7 @@
 
   // TODO: Verify contents of AudioAnalytics.
   // Currently there is no getter API in AudioAnalytics besides dump.
-  ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
+  ASSERT_EQ(11, audioAnalytics.dump(1000).second /* lines */);
 
   ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
   // untrusted entities can add to an existing key
@@ -926,6 +939,132 @@
     ASSERT_EQ((size_t)1, timedAction.size());
 }
 
+// Ensure we don't introduce unexpected duplicates into our maps.
+TEST(mediametrics_tests, audio_types_tables) {
+    using namespace android::mediametrics::types;
+
+    ASSERT_EQ(0, countDuplicates(getAudioCallerNameMap()));
+    ASSERT_EQ(2, countDuplicates(getAudioDeviceInMap()));  // has dups
+    ASSERT_EQ(1, countDuplicates(getAudioDeviceOutMap())); // has dups
+    ASSERT_EQ(0, countDuplicates(getAudioThreadTypeMap()));
+    ASSERT_EQ(0, countDuplicates(getAudioTrackTraitsMap()));
+}
+
+// Check our string validation (before logging to statsd).
+// This variant checks the logged, possibly shortened string.
+TEST(mediametrics_tests, audio_types_string) {
+    using namespace android::mediametrics::types;
+
+    ASSERT_EQ("java", (lookup<CALLER_NAME, std::string>)("java"));
+    ASSERT_EQ("", (lookup<CALLER_NAME, std::string>)("random"));
+
+    ASSERT_EQ("SPEECH", (lookup<CONTENT_TYPE, std::string>)("AUDIO_CONTENT_TYPE_SPEECH"));
+    ASSERT_EQ("", (lookup<CONTENT_TYPE, std::string>)("random"));
+
+    ASSERT_EQ("FLAC", (lookup<ENCODING, std::string>)("AUDIO_FORMAT_FLAC"));
+    ASSERT_EQ("", (lookup<ENCODING, std::string>)("random"));
+
+    ASSERT_EQ("USB_DEVICE", (lookup<INPUT_DEVICE, std::string>)("AUDIO_DEVICE_IN_USB_DEVICE"));
+    ASSERT_EQ("BUILTIN_MIC|WIRED_HEADSET", (lookup<INPUT_DEVICE, std::string>)(
+            "AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_WIRED_HEADSET"));
+    ASSERT_EQ("", (lookup<INPUT_DEVICE, std::string>)("random"));
+
+    ASSERT_EQ("RAW", (lookup<INPUT_FLAG, std::string>)("AUDIO_INPUT_FLAG_RAW"));
+    ASSERT_EQ("HW_AV_SYNC|VOIP_TX", (lookup<INPUT_FLAG, std::string>)(
+            "AUDIO_INPUT_FLAG_HW_AV_SYNC|AUDIO_INPUT_FLAG_VOIP_TX"));
+    ASSERT_EQ("", (lookup<INPUT_FLAG, std::string>)("random"));
+
+    ASSERT_EQ("BLUETOOTH_SCO_CARKIT",
+            (lookup<OUTPUT_DEVICE, std::string>)("AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"));
+    ASSERT_EQ("SPEAKER|HDMI", (lookup<OUTPUT_DEVICE, std::string>)(
+            "AUDIO_DEVICE_OUT_SPEAKER|AUDIO_DEVICE_OUT_HDMI"));
+    ASSERT_EQ("", (lookup<OUTPUT_DEVICE, std::string>)("random"));
+
+    ASSERT_EQ("PRIMARY", (lookup<OUTPUT_FLAG, std::string>)("AUDIO_OUTPUT_FLAG_PRIMARY"));
+    ASSERT_EQ("DEEP_BUFFER|NON_BLOCKING", (lookup<OUTPUT_FLAG, std::string>)(
+            "AUDIO_OUTPUT_FLAG_DEEP_BUFFER|AUDIO_OUTPUT_FLAG_NON_BLOCKING"));
+    ASSERT_EQ("", (lookup<OUTPUT_FLAG, std::string>)("random"));
+
+    ASSERT_EQ("MIC", (lookup<SOURCE_TYPE, std::string>)("AUDIO_SOURCE_MIC"));
+    ASSERT_EQ("", (lookup<SOURCE_TYPE, std::string>)("random"));
+
+    ASSERT_EQ("TTS", (lookup<STREAM_TYPE, std::string>)("AUDIO_STREAM_TTS"));
+    ASSERT_EQ("", (lookup<STREAM_TYPE, std::string>)("random"));
+
+    ASSERT_EQ("DIRECT", (lookup<THREAD_TYPE, std::string>)("DIRECT"));
+    ASSERT_EQ("", (lookup<THREAD_TYPE, std::string>)("random"));
+
+    ASSERT_EQ("static", (lookup<TRACK_TRAITS, std::string>)("static"));
+    ASSERT_EQ("", (lookup<TRACK_TRAITS, std::string>)("random"));
+
+    ASSERT_EQ("VOICE_COMMUNICATION",
+            (lookup<USAGE, std::string>)("AUDIO_USAGE_VOICE_COMMUNICATION"));
+    ASSERT_EQ("", (lookup<USAGE, std::string>)("random"));
+}
+
+// Check our string validation (before logging to statsd).
+// This variant checks integral value logging.
+TEST(mediametrics_tests, audio_types_integer) {
+    using namespace android::mediametrics::types;
+
+    ASSERT_EQ(2, (lookup<CALLER_NAME, int32_t>)("java"));
+    ASSERT_EQ(0, (lookup<CALLER_NAME, int32_t>)("random")); // 0 == unknown
+
+    ASSERT_EQ((int32_t)AUDIO_CONTENT_TYPE_SPEECH,
+            (lookup<CONTENT_TYPE, int32_t>)("AUDIO_CONTENT_TYPE_SPEECH"));
+    ASSERT_EQ((int32_t)AUDIO_CONTENT_TYPE_UNKNOWN, (lookup<CONTENT_TYPE, int32_t>)("random"));
+
+    ASSERT_EQ((int32_t)AUDIO_FORMAT_FLAC, (lookup<ENCODING, int32_t>)("AUDIO_FORMAT_FLAC"));
+    ASSERT_EQ((int32_t)AUDIO_FORMAT_INVALID, (lookup<ENCODING, int32_t>)("random"));
+
+    ASSERT_EQ(getAudioDeviceInMap().at("AUDIO_DEVICE_IN_USB_DEVICE"),
+            (lookup<INPUT_DEVICE, int64_t>)("AUDIO_DEVICE_IN_USB_DEVICE"));
+    ASSERT_EQ(getAudioDeviceInMap().at("AUDIO_DEVICE_IN_BUILTIN_MIC")
+            | getAudioDeviceInMap().at("AUDIO_DEVICE_IN_WIRED_HEADSET"),
+            (lookup<INPUT_DEVICE, int64_t>)(
+            "AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_WIRED_HEADSET"));
+    ASSERT_EQ(0, (lookup<INPUT_DEVICE, int64_t>)("random"));
+
+    ASSERT_EQ((int32_t)AUDIO_INPUT_FLAG_RAW,
+            (lookup<INPUT_FLAG, int32_t>)("AUDIO_INPUT_FLAG_RAW"));
+    ASSERT_EQ((int32_t)AUDIO_INPUT_FLAG_HW_AV_SYNC
+            | (int32_t)AUDIO_INPUT_FLAG_VOIP_TX,
+            (lookup<INPUT_FLAG, int32_t>)(
+            "AUDIO_INPUT_FLAG_HW_AV_SYNC|AUDIO_INPUT_FLAG_VOIP_TX"));
+    ASSERT_EQ(0, (lookup<INPUT_FLAG, int32_t>)("random"));
+
+    ASSERT_EQ(getAudioDeviceOutMap().at("AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"),
+            (lookup<OUTPUT_DEVICE, int64_t>)("AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT"));
+    ASSERT_EQ(getAudioDeviceOutMap().at("AUDIO_DEVICE_OUT_SPEAKER")
+            | getAudioDeviceOutMap().at("AUDIO_DEVICE_OUT_HDMI"),
+            (lookup<OUTPUT_DEVICE, int64_t>)(
+            "AUDIO_DEVICE_OUT_SPEAKER|AUDIO_DEVICE_OUT_HDMI"));
+    ASSERT_EQ(0, (lookup<OUTPUT_DEVICE, int64_t>)("random"));
+
+    ASSERT_EQ((int32_t)AUDIO_OUTPUT_FLAG_PRIMARY,
+            (lookup<OUTPUT_FLAG, int32_t>)("AUDIO_OUTPUT_FLAG_PRIMARY"));
+    ASSERT_EQ((int32_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER | (int32_t)AUDIO_OUTPUT_FLAG_NON_BLOCKING,
+            (lookup<OUTPUT_FLAG, int32_t>)(
+            "AUDIO_OUTPUT_FLAG_DEEP_BUFFER|AUDIO_OUTPUT_FLAG_NON_BLOCKING"));
+    ASSERT_EQ(0, (lookup<OUTPUT_FLAG, int32_t>)("random"));
+
+    ASSERT_EQ((int32_t)AUDIO_SOURCE_MIC, (lookup<SOURCE_TYPE, int32_t>)("AUDIO_SOURCE_MIC"));
+    ASSERT_EQ((int32_t)AUDIO_SOURCE_DEFAULT, (lookup<SOURCE_TYPE, int32_t>)("random"));
+
+    ASSERT_EQ((int32_t)AUDIO_STREAM_TTS, (lookup<STREAM_TYPE, int32_t>)("AUDIO_STREAM_TTS"));
+    ASSERT_EQ((int32_t)AUDIO_STREAM_DEFAULT, (lookup<STREAM_TYPE, int32_t>)("random"));
+
+    ASSERT_EQ(1, (lookup<THREAD_TYPE, int32_t>)("DIRECT"));
+    ASSERT_EQ(-1, (lookup<THREAD_TYPE, int32_t>)("random"));
+
+    ASSERT_EQ(getAudioTrackTraitsMap().at("static"), (lookup<TRACK_TRAITS, int32_t>)("static"));
+    ASSERT_EQ(0, (lookup<TRACK_TRAITS, int32_t>)("random"));
+
+    ASSERT_EQ((int32_t)AUDIO_USAGE_VOICE_COMMUNICATION,
+            (lookup<USAGE, int32_t>)("AUDIO_USAGE_VOICE_COMMUNICATION"));
+    ASSERT_EQ((int32_t)AUDIO_USAGE_UNKNOWN, (lookup<USAGE, int32_t>)("random"));
+}
+
 #if 0
 // Stress test code for garbage collection, you need to enable AID_SHELL as trusted to run
 // in MediaMetricsService.cpp.
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 6e14434..9d9ca63 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -106,18 +106,9 @@
 
 aaudio_result_t
 AAudioClientTracker::registerClientStream(pid_t pid, sp<AAudioServiceStreamBase> serviceStream) {
-    aaudio_result_t result = AAUDIO_OK;
     ALOGV("registerClientStream(%d,)\n", pid);
     std::lock_guard<std::mutex> lock(mLock);
-    sp<NotificationClient> notificationClient = mNotificationClients[pid];
-    if (notificationClient == 0) {
-        // This will get called the first time the audio server registers an internal stream.
-        ALOGV("registerClientStream(%d,) unrecognized pid\n", pid);
-        notificationClient = new NotificationClient(pid, nullptr);
-        mNotificationClients[pid] = notificationClient;
-    }
-    notificationClient->registerClientStream(serviceStream);
-    return result;
+    return getNotificationClient_l(pid)->registerClientStream(serviceStream);
 }
 
 // Find the tracker for this process and remove it.
@@ -136,6 +127,33 @@
     return AAUDIO_OK;
 }
 
+void AAudioClientTracker::setExclusiveEnabled(pid_t pid, bool enabled) {
+    ALOGD("%s(%d, %d)\n", __func__, pid, enabled);
+    std::lock_guard<std::mutex> lock(mLock);
+    getNotificationClient_l(pid)->setExclusiveEnabled(enabled);
+}
+
+bool AAudioClientTracker::isExclusiveEnabled(pid_t pid) {
+    std::lock_guard<std::mutex> lock(mLock);
+    return getNotificationClient_l(pid)->isExclusiveEnabled();
+}
+
+sp<AAudioClientTracker::NotificationClient>
+        AAudioClientTracker::getNotificationClient_l(pid_t pid) {
+    sp<NotificationClient> notificationClient = mNotificationClients[pid];
+    if (notificationClient == nullptr) {
+        // This will get called the first time the audio server uses this PID.
+        ALOGV("%s(%d,) unrecognized PID\n", __func__, pid);
+        notificationClient = new AAudioClientTracker::NotificationClient(pid, nullptr);
+        mNotificationClients[pid] = notificationClient;
+    }
+    return notificationClient;
+}
+
+// =======================================
+// AAudioClientTracker::NotificationClient
+// =======================================
+
 AAudioClientTracker::NotificationClient::NotificationClient(pid_t pid, const sp<IBinder>& binder)
         : mProcessId(pid), mBinder(binder) {
 }
diff --git a/services/oboeservice/AAudioClientTracker.h b/services/oboeservice/AAudioClientTracker.h
index 00ff467..943b809 100644
--- a/services/oboeservice/AAudioClientTracker.h
+++ b/services/oboeservice/AAudioClientTracker.h
@@ -58,6 +58,15 @@
     aaudio_result_t unregisterClientStream(pid_t pid,
                                            android::sp<AAudioServiceStreamBase> serviceStream);
 
+    /**
+     * Specify whether a process is allowed to create an EXCLUSIVE MMAP stream.
+     * @param pid
+     * @param enabled
+     */
+    void setExclusiveEnabled(pid_t pid, bool enabled);
+
+    bool isExclusiveEnabled(pid_t pid);
+
     android::AAudioService *getAAudioService() const {
         return mAAudioService;
     }
@@ -84,17 +93,29 @@
 
         aaudio_result_t unregisterClientStream(android::sp<AAudioServiceStreamBase> serviceStream);
 
+        void setExclusiveEnabled(bool enabled) {
+            mExclusiveEnabled = enabled;
+        }
+
+        bool isExclusiveEnabled() {
+            return mExclusiveEnabled;
+        }
+
         // IBinder::DeathRecipient
         virtual     void    binderDied(const android::wp<IBinder>& who);
 
-    protected:
+    private:
         mutable std::mutex                              mLock;
         const pid_t                                     mProcessId;
         std::set<android::sp<AAudioServiceStreamBase>>  mStreams;
         // hold onto binder to receive death notifications
         android::sp<IBinder>                            mBinder;
+        bool                                            mExclusiveEnabled = true;
     };
 
+    // This must be called under mLock
+    android::sp<NotificationClient> getNotificationClient_l(pid_t pid);
+
     mutable std::mutex                               mLock;
     std::map<pid_t, android::sp<NotificationClient>> mNotificationClients;
     android::AAudioService                          *mAAudioService = nullptr;
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index c9bf72f..9f34153 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -25,6 +25,7 @@
 #include <sstream>
 #include <utility/AAudioUtilities.h>
 
+#include "AAudioClientTracker.h"
 #include "AAudioEndpointManager.h"
 #include "AAudioServiceEndpointShared.h"
 #include "AAudioServiceEndpointMMAP.h"
@@ -174,7 +175,15 @@
                 && !request.isSharingModeMatchRequired()) { // app did not request a shared stream
             ALOGD("%s() endpoint in EXCLUSIVE use. Steal it!", __func__);
             mExclusiveStolenCount++;
-            endpointToSteal = endpoint;
+            // Prevent this process from getting another EXCLUSIVE stream.
+            // This will prevent two clients from colliding after a DISCONNECTION
+            // when they both try to open an exclusive stream at the same time.
+            // That can result in a stream getting disconnected between the OPEN
+            // and START calls. This will help preserve app compatibility.
+            // An app can avoid having this happen by closing their streams when
+            // the app is paused.
+            AAudioClientTracker::getInstance().setExclusiveEnabled(request.getProcessId(), false);
+            endpointToSteal = endpoint; // return it to caller
         }
         return nullptr;
     } else {
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 82b12d6..22cdb35 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -117,7 +117,8 @@
         return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
     }
 
-    if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+    if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE
+        && AAudioClientTracker::getInstance().isExclusiveEnabled(request.getProcessId())) {
         // only trust audioserver for in service indication
         bool inService = false;
         if (isCallerInService()) {
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 15cbd82..ceefe93 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -95,9 +95,16 @@
         mRegisteredStreams.swap(streamsDisconnected);
     }
     mConnected.store(false);
+    // We need to stop all the streams before we disconnect them.
+    // Otherwise there is a race condition where the first disconnected app
+    // tries to reopen a stream as MMAP but is blocked by the second stream,
+    // which hasn't stopped yet. Then the first app ends up with a Legacy stream.
     for (const auto &stream : streamsDisconnected) {
-        ALOGD("%s() - stop and disconnect port %d", __func__, stream->getPortHandle());
+        ALOGD("%s() - stop(), port = %d", __func__, stream->getPortHandle());
         stream->stop();
+    }
+    for (const auto &stream : streamsDisconnected) {
+        ALOGD("%s() - disconnect(), port = %d", __func__, stream->getPortHandle());
         stream->disconnect();
     }
     return streamsDisconnected;