Merge "android.media.codec-aconfig-cc: Set double_loadable to true" into main am: 20024657cd am: a77d2bd563

Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/2980860

Change-Id: I39e4897fb8d51b18e2dba39161d3379b1a800a2c
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/camera/Android.bp b/camera/Android.bp
index 22f1633..d0f8e7e 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_camera_framework",
     default_applicable_licenses: ["frameworks_av_camera_license"],
 }
 
@@ -46,6 +47,7 @@
 aconfig_declarations {
     name: "camera_platform_flags",
     package: "com.android.internal.camera.flags",
+    container: "system",
     srcs: ["camera_platform.aconfig"],
 }
 
@@ -64,6 +66,7 @@
     name: "camera_headers",
     export_include_dirs: ["include"],
 }
+
 cc_library {
     name: "libcamera_client",
 
@@ -119,10 +122,14 @@
         "frameworks/native/include/media/openmax",
     ],
     export_include_dirs: [
-         "include",
-         "include/camera"
+        "include",
+        "include/camera",
     ],
-    export_shared_lib_headers: ["libcamera_metadata", "libnativewindow", "libgui"],
+    export_shared_lib_headers: [
+        "libcamera_metadata",
+        "libnativewindow",
+        "libgui",
+    ],
 
     cflags: [
         "-Werror",
@@ -152,7 +159,7 @@
 
     export_include_dirs: [
         "include",
-        "include/camera"
+        "include/camera",
     ],
 }
 
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 2e808d1..424923a 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -880,7 +880,7 @@
     return OK;
 }
 
-metadata_vendor_id_t CameraMetadata::getVendorId() {
+metadata_vendor_id_t CameraMetadata::getVendorId() const {
     return get_camera_metadata_vendor_id(mBuffer);
 }
 
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index fb26f83..c12a1a1 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -466,7 +466,7 @@
 
 int VendorTagDescriptorCache::getTagType(uint32_t tag,
         metadata_vendor_id_t id) const {
-    int ret = 0;
+    int ret = -1;
     auto desc = mVendorMap.find(id);
     if (desc != mVendorMap.end()) {
         ret = desc->second->getTagType(tag);
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 0eeeb7f..4bea896 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -278,14 +278,28 @@
     CameraMetadataNative createDefaultRequest(@utf8InCpp String cameraId, int templateId);
 
     /**
-      * Check whether a particular session configuration with optional session parameters
-      * has camera device support.
-      *
-      * @param cameraId The camera id to query session configuration on
-      * @param sessionConfiguration Specific session configuration to be verified.
-      * @return true  - in case the stream combination is supported.
-      *         false - in case there is no device support.
-      */
+     * Check whether a particular session configuration with optional session parameters
+     * has camera device support.
+     *
+     * @param cameraId The camera id to query session configuration for
+     * @param sessionConfiguration Specific session configuration to be verified.
+     * @return true  - in case the stream combination is supported.
+     *         false - in case there is no device support.
+     */
     boolean isSessionConfigurationWithParametersSupported(@utf8InCpp String cameraId,
             in SessionConfiguration sessionConfiguration);
+
+    /**
+     * Get the camera characteristics for a particular session configuration for
+     * the given camera device.
+     *
+     * @param cameraId ID of the device for which the session characteristics must be fetched.
+     * @param sessionConfiguration session configuration for which the characteristics
+     * must be fetched.
+     * @return - characteristics associated with the given session.
+     */
+    CameraMetadataNative getSessionCharacteristics(@utf8InCpp String cameraId,
+                int targetSdkVersion,
+                boolean overrideToPortrait,
+                in SessionConfiguration sessionConfiguration);
 }
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 843e0d4..8e1fcc0 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -99,15 +99,6 @@
       */
     boolean isSessionConfigurationSupported(in SessionConfiguration sessionConfiguration);
 
-    /**
-     * Get the camera characteristics for a particular session configuration
-     *
-     * @param sessionConfiguration Specific session configuration for which the characteristics
-     * are fetched.
-     * @return - characteristics associated with the given session.
-     */
-    CameraMetadataNative getSessionCharacteristics(in SessionConfiguration sessionConfiguration);
-
     void deleteStream(int streamId);
 
     /**
diff --git a/camera/camera_platform.aconfig b/camera/camera_platform.aconfig
index 5d2a263..1f50570 100644
--- a/camera/camera_platform.aconfig
+++ b/camera/camera_platform.aconfig
@@ -1,4 +1,5 @@
 package: "com.android.internal.camera.flags"
+container: "system"
 
 flag {
      namespace: "camera_platform"
@@ -23,6 +24,13 @@
 
 flag {
      namespace: "camera_platform"
+     name: "watch_foreground_changes"
+     description: "Request AppOps to notify changes in the foreground status of the client"
+     bug: "290086710"
+}
+
+flag {
+     namespace: "camera_platform"
      name: "log_ultrawide_usage"
      description: "Enable measuring how much usage there is for ultrawide-angle cameras"
      bug: "300515796"
@@ -76,3 +84,52 @@
      description: "Enable creating MultiResolutionImageReader with usage flag configuration"
      bug: "301588215"
 }
+
+flag {
+     namespace: "camera_platform"
+     name: "use_ro_board_api_level_for_vndk_version"
+     description: "Enable using ro.board.api_level instead of ro.vndk.version to get VNDK version"
+     bug: "312315580"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "camera_extensions_characteristics_get"
+     description: "Enable get extension specific camera characteristics API"
+     bug: "280649914"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "delay_lazy_hal_instantiation"
+     description: "Only trigger lazy HAL instantiation when the HAL is needed for an operation."
+     bug: "319735068"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "return_buffers_outside_locks"
+     description: "Enable returning graphics buffers to buffer queues without holding the in-flight mutex"
+     bug: "315526878"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "camera_device_setup"
+     description: "Create an intermediate Camera Device class for limited CameraDevice access."
+     bug: "320741775"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "camera_privacy_allowlist"
+     description: "Allowlisting to exempt safety-relevant cameras from privacy control for automotive devices"
+     bug: "282814430"
+}
+
+flag {
+     namespace: "camera_platform"
+     name: "extension_10_bit"
+     description: "Enables 10-bit support in the camera extensions."
+     bug: "316375635"
+}
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index 13b705c..6862cb1 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_camera_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_camera_license"
diff --git a/camera/include/camera/CameraMetadata.h b/camera/include/camera/CameraMetadata.h
index c56ee6d..2903dfb 100644
--- a/camera/include/camera/CameraMetadata.h
+++ b/camera/include/camera/CameraMetadata.h
@@ -245,7 +245,7 @@
     /**
      * Return the current vendor tag id associated with this metadata.
      */
-    metadata_vendor_id_t getVendorId();
+    metadata_vendor_id_t getVendorId() const;
 
   private:
     camera_metadata_t *mBuffer;
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index d4dd546..421469a 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -17,6 +17,7 @@
 // frameworks/av/include.
 
 package {
+    default_team: "trendy_team_camera_framework",
     default_applicable_licenses: ["frameworks_av_camera_ndk_license"],
 }
 
@@ -154,8 +155,8 @@
         "libcamera_metadata",
         "libmediandk",
         "android.frameworks.cameraservice.common-V1-ndk",
-        "android.frameworks.cameraservice.device-V1-ndk",
-        "android.frameworks.cameraservice.service-V1-ndk",
+        "android.frameworks.cameraservice.device-V2-ndk",
+        "android.frameworks.cameraservice.service-V2-ndk",
     ],
     static_libs: [
         "android.hardware.camera.common@1.0-helper",
@@ -188,7 +189,6 @@
     ],
     static_libs: [
         "android.hardware.camera.common@1.0-helper",
-        "android.hidl.token@1.0",
     ],
     cflags: [
         "-D__ANDROID_VNDK__",
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
index 4387cc6..92de1e4 100644
--- a/camera/ndk/NdkCameraCaptureSession.cpp
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -213,7 +213,7 @@
 EXPORT
 camera_status_t ACameraCaptureSession_prepareWindow(
         ACameraCaptureSession* session,
-        ACameraWindowType *window) {
+        ANativeWindow *window) {
     ATRACE_CALL();
     if (session == nullptr || window == nullptr) {
         ALOGE("%s: Error: session %p / window %p is null", __FUNCTION__, session, window);
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 8211671..f2ec573 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -124,7 +124,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionOutput_create(
-        ACameraWindowType* window, /*out*/ACaptureSessionOutput** out) {
+        ANativeWindow* window, /*out*/ACaptureSessionOutput** out) {
     ATRACE_CALL();
     if (window == nullptr || out == nullptr) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
@@ -137,7 +137,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionSharedOutput_create(
-        ACameraWindowType* window, /*out*/ACaptureSessionOutput** out) {
+        ANativeWindow* window, /*out*/ACaptureSessionOutput** out) {
     ATRACE_CALL();
     if (window == nullptr || out == nullptr) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
@@ -150,7 +150,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionPhysicalOutput_create(
-        ACameraWindowType* window, const char* physicalId,
+        ANativeWindow* window, const char* physicalId,
         /*out*/ACaptureSessionOutput** out) {
     ATRACE_CALL();
     if (window == nullptr || physicalId == nullptr || out == nullptr) {
@@ -164,7 +164,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *out,
-        ACameraWindowType* window) {
+        ANativeWindow* window) {
     ATRACE_CALL();
     if ((window == nullptr) || (out == nullptr)) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
@@ -190,7 +190,7 @@
 
 EXPORT
 camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *out,
-        ACameraWindowType* window) {
+        ANativeWindow* window) {
     ATRACE_CALL();
     if ((window == nullptr) || (out == nullptr)) {
         ALOGE("%s: Error: bad argument. window %p, out %p",
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index 87de4a9..b851a1d 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -27,7 +27,7 @@
 
 EXPORT
 camera_status_t ACameraOutputTarget_create(
-        ACameraWindowType* window, ACameraOutputTarget** out) {
+        ANativeWindow* window, ACameraOutputTarget** out) {
     ATRACE_CALL();
     if (window == nullptr) {
         ALOGE("%s: Error: input window is null", __FUNCTION__);
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
index 73439c7..449c0b4 100644
--- a/camera/ndk/impl/ACameraCaptureSession.cpp
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -146,7 +146,7 @@
     return ret;
 }
 
-camera_status_t ACameraCaptureSession::prepare(ACameraWindowType* window) {
+camera_status_t ACameraCaptureSession::prepare(ANativeWindow* window) {
 #ifdef __ANDROID_VNDK__
     std::shared_ptr<acam::CameraDevice> dev = getDevicePtr();
 #else
diff --git a/camera/ndk/impl/ACameraCaptureSession.h b/camera/ndk/impl/ACameraCaptureSession.h
index 88135ba..0d7a2c1 100644
--- a/camera/ndk/impl/ACameraCaptureSession.h
+++ b/camera/ndk/impl/ACameraCaptureSession.h
@@ -23,14 +23,14 @@
 
 #ifdef __ANDROID_VNDK__
 #include "ndk_vendor/impl/ACameraDevice.h"
-#include "ndk_vendor/impl/ACameraCaptureSessionVendor.h"
 #else
 #include "ACameraDevice.h"
+#endif
 
 using namespace android;
 
 struct ACaptureSessionOutput {
-    explicit ACaptureSessionOutput(ACameraWindowType* window, bool isShared = false,
+    explicit ACaptureSessionOutput(ANativeWindow* window, bool isShared = false,
             const char* physicalCameraId = "") :
             mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
 
@@ -47,28 +47,27 @@
         return mWindow > other.mWindow;
     }
 
-    inline bool isWindowEqual(ACameraWindowType* window) const {
+    inline bool isWindowEqual(ANativeWindow* window) const {
         return mWindow == window;
     }
 
     // returns true if the window was successfully added, false otherwise.
-    inline bool addSharedWindow(ACameraWindowType* window) {
+    inline bool addSharedWindow(ANativeWindow* window) {
         auto ret = mSharedWindows.insert(window);
         return ret.second;
     }
 
     // returns the number of elements removed.
-    inline size_t removeSharedWindow(ACameraWindowType* window) {
+    inline size_t removeSharedWindow(ANativeWindow* window) {
         return mSharedWindows.erase(window);
     }
 
-    ACameraWindowType* mWindow;
-    std::set<ACameraWindowType *> mSharedWindows;
+    ANativeWindow* mWindow;
+    std::set<ANativeWindow*> mSharedWindows;
     bool           mIsShared;
     int            mRotation = CAMERA3_STREAM_ROTATION_0;
     std::string mPhysicalCameraId;
 };
-#endif
 
 struct ACaptureSessionOutputContainer {
     std::set<ACaptureSessionOutput> mOutputs;
@@ -147,7 +146,7 @@
         mPreparedCb.context = context;
         mPreparedCb.onWindowPrepared = cb;
     }
-    camera_status_t prepare(ACameraWindowType *window);
+    camera_status_t prepare(ANativeWindow *window);
 
     ACameraDevice* getDevice();
 
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 97d65b0..1fa71f4 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -341,7 +341,7 @@
     return ACAMERA_OK;
 }
 
-camera_status_t CameraDevice::prepareLocked(ACameraWindowType *window) {
+camera_status_t CameraDevice::prepareLocked(ANativeWindow *window) {
     camera_status_t ret = checkCameraClosedOrErrorLocked();
     if (ret != ACAMERA_OK) {
         return ret;
@@ -1097,7 +1097,7 @@
                     if (onWindowPrepared == nullptr) {
                         return;
                     }
-                    ACameraWindowType* anw;
+                    ANativeWindow* anw;
                     found = msg->findPointer(kAnwKey, (void**) &anw);
                     if (!found) {
                         ALOGE("%s: Cannot find ANativeWindow: %d!", __FUNCTION__, __LINE__);
@@ -1823,7 +1823,7 @@
         return ret;
     }
     // We've found the window corresponding to the surface id.
-    ACameraWindowType *window = it->second.first;
+    ANativeWindow *window = it->second.first;
     sp<AMessage> msg = new AMessage(kWhatPreparedCb, dev->mHandler);
     msg->setPointer(kContextKey, session->mPreparedCb.context);
     msg->setPointer(kAnwKey, window);
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 4658d18..2b9f327 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -151,7 +151,7 @@
 
     camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
 
-    camera_status_t prepareLocked(ACameraWindowType *window);
+    camera_status_t prepareLocked(ANativeWindow *window);
 
     camera_status_t allocateCaptureRequest(
             const ACaptureRequest* request, sp<CaptureRequest>& outReq);
diff --git a/camera/ndk/impl/ACaptureRequest.h b/camera/ndk/impl/ACaptureRequest.h
index 2ffcafe..118c2a5 100644
--- a/camera/ndk/impl/ACaptureRequest.h
+++ b/camera/ndk/impl/ACaptureRequest.h
@@ -22,11 +22,8 @@
 
 using namespace android;
 
-#ifdef __ANDROID_VNDK__
-#include "ndk_vendor/impl/ACaptureRequestVendor.h"
-#else
 struct ACameraOutputTarget {
-    explicit ACameraOutputTarget(ACameraWindowType* window) : mWindow(window) {};
+    explicit ACameraOutputTarget(ANativeWindow* window) : mWindow(window) {};
 
     bool operator == (const ACameraOutputTarget& other) const {
         return mWindow == other.mWindow;
@@ -41,9 +38,8 @@
         return mWindow > other.mWindow;
     }
 
-    ACameraWindowType* mWindow;
+    ANativeWindow* mWindow;
 };
-#endif
 
 struct ACameraOutputTargets {
     std::set<ACameraOutputTarget> mOutputs;
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 099c5c5..cf6b970 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -124,7 +124,7 @@
  */
 typedef void (*ACameraCaptureSession_prepareCallback)(
         void *context,
-        ACameraWindowType *window,
+        ANativeWindow *window,
         ACameraCaptureSession *session);
 
 /// Enum for describing error reason in {@link ACameraCaptureFailure}
@@ -276,7 +276,7 @@
  */
 typedef void (*ACameraCaptureSession_captureCallback_bufferLost)(
         void* context, ACameraCaptureSession* session,
-        ACaptureRequest* request, ACameraWindowType* window, int64_t frameNumber);
+        ACaptureRequest* request, ANativeWindow* window, int64_t frameNumber);
 
 /**
  * ACaptureCaptureSession_captureCallbacks structure used in
@@ -1088,7 +1088,7 @@
  * and no pre-allocation is done.</p>
  *
  * @param session the {@link ACameraCaptureSession} that needs to prepare output buffers.
- * @param window the {@link ACameraWindowType} for which the output buffers need to be prepared.
+ * @param window the {@link ANativeWindow} for which the output buffers need to be prepared.
  *
  * @return <ul><li>
  *             {@link ACAMERA_OK} if the method succeeds</li>
@@ -1102,7 +1102,7 @@
  */
 camera_status_t ACameraCaptureSession_prepareWindow(
     ACameraCaptureSession* session,
-    ACameraWindowType *window) __INTRODUCED_IN(34);
+    ANativeWindow *window) __INTRODUCED_IN(34);
 __END_DECLS
 
 #endif /* _NDK_CAMERA_CAPTURE_SESSION_H */
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index de10eb3..fbd0ee1 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -364,7 +364,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionOutput_create(
-        ACameraWindowType* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(24);
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(24);
 
 /**
  * Free a ACaptureSessionOutput object.
@@ -705,7 +705,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionSharedOutput_create(
-        ACameraWindowType* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(28);
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(28);
 
 /**
  * Add a native window to shared ACaptureSessionOutput.
@@ -723,7 +723,7 @@
  *             ACaptureSessionOutput.</li></ul>
  */
 camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *output,
-        ACameraWindowType *anw) __INTRODUCED_IN(28);
+        ANativeWindow *anw) __INTRODUCED_IN(28);
 
 /**
  * Remove a native window from shared ACaptureSessionOutput.
@@ -739,7 +739,7 @@
  *             ACaptureSessionOutput.</li></ul>
  */
 camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *output,
-        ACameraWindowType* anw) __INTRODUCED_IN(28);
+        ANativeWindow* anw) __INTRODUCED_IN(28);
 
 /**
  * Create a new camera capture session similar to {@link ACameraDevice_createCaptureSession}. This
@@ -797,7 +797,7 @@
  * @see ACaptureSessionOutputContainer_add
  */
 camera_status_t ACaptureSessionPhysicalOutput_create(
-        ACameraWindowType* anw, const char* physicalId,
+        ANativeWindow* anw, const char* physicalId,
         /*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(29);
 
 /**
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2c68cef..1ed17a3 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -76,6 +76,7 @@
     ACAMERA_AUTOMOTIVE_LENS,
     ACAMERA_EXTENSION,
     ACAMERA_JPEGR,
+    ACAMERA_EFV,
     ACAMERA_SECTION_COUNT,
 
     ACAMERA_VENDOR = 0x8000
@@ -123,6 +124,7 @@
     ACAMERA_AUTOMOTIVE_LENS_START  = ACAMERA_AUTOMOTIVE_LENS   << 16,
     ACAMERA_EXTENSION_START        = ACAMERA_EXTENSION         << 16,
     ACAMERA_JPEGR_START            = ACAMERA_JPEGR             << 16,
+    ACAMERA_EFV_START              = ACAMERA_EFV               << 16,
     ACAMERA_VENDOR_START           = ACAMERA_VENDOR            << 16
 } acamera_metadata_section_start_t;
 
@@ -4705,18 +4707,21 @@
      * </ul>
      * <p>should be interpreted in the effective after raw crop field-of-view coordinate system.
      * In this coordinate system,
-     * {preCorrectionActiveArraySize.left, preCorrectionActiveArraySize.top} corresponds to the
+     * {ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.left,
+     *  ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.top} corresponds to the
      * the top left corner of the cropped RAW frame and
-     * {preCorrectionActiveArraySize.right, preCorrectionActiveArraySize.bottom} corresponds to
+     * {ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.right,
+     *  ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.bottom} corresponds to
      * the bottom right corner. Client applications must use the values of the keys
      * in the CaptureResult metadata if present.</p>
-     * <p>Crop regions (android.scaler.CropRegion), AE/AWB/AF regions and face coordinates still
+     * <p>Crop regions ACAMERA_SCALER_CROP_REGION, AE/AWB/AF regions and face coordinates still
      * use the ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE coordinate system as usual.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
      * @see ACAMERA_LENS_POSE_ROTATION
      * @see ACAMERA_LENS_POSE_TRANSLATION
+     * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
@@ -11524,6 +11529,7 @@
 
 
 
+
 __END_DECLS
 
 #endif /* _NDK_CAMERA_METADATA_TAGS_H */
diff --git a/camera/ndk/include/camera/NdkCameraWindowType.h b/camera/ndk/include/camera/NdkCameraWindowType.h
index 0838fba..2217528 100644
--- a/camera/ndk/include/camera/NdkCameraWindowType.h
+++ b/camera/ndk/include/camera/NdkCameraWindowType.h
@@ -41,14 +41,11 @@
  * camera2 NDK. This enables us to share the api definition headers and avoid
  * code duplication (since the VNDK variant doesn't use ANativeWindow unlike the
  * NDK variant).
+ * @deprecated No longer needed. Both NDK and VNDK use ANativeWindow now.
+ *             Use ANativeWindow directly.
  */
-#ifdef __ANDROID_VNDK__
-#include <cutils/native_handle.h>
-typedef const native_handle_t ACameraWindowType;
-#else
 #include <android/native_window.h>
 typedef ANativeWindow ACameraWindowType;
-#endif
 
 /** @} */
 
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index dc18544..5ccb510 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -99,7 +99,7 @@
  *
  * @see ACaptureRequest_addTarget
  */
-camera_status_t ACameraOutputTarget_create(ACameraWindowType* window,
+camera_status_t ACameraOutputTarget_create(ANativeWindow* window,
         ACameraOutputTarget** output) __INTRODUCED_IN(24);
 
 /**
diff --git a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h b/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
deleted file mode 100644
index 45098c3..0000000
--- a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "utils.h"
-
-#include <android/binder_auto_utils.h>
-#include <string>
-#include <set>
-
-using ::android::acam::utils::native_handle_ptr_wrapper;
-
-struct ACaptureSessionOutput {
-    explicit ACaptureSessionOutput(const native_handle_t* window, bool isShared = false,
-            const char* physicalCameraId = "") :
-            mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
-
-    bool operator == (const ACaptureSessionOutput& other) const {
-        return (mWindow == other.mWindow);
-    }
-
-    bool operator != (const ACaptureSessionOutput& other) const {
-        return mWindow != other.mWindow;
-    }
-
-    bool operator < (const ACaptureSessionOutput& other) const {
-        return mWindow < other.mWindow;
-    }
-
-    bool operator > (const ACaptureSessionOutput& other) const {
-        return mWindow > other.mWindow;
-    }
-
-    inline bool isWindowEqual(ACameraWindowType* window) const {
-        return mWindow == native_handle_ptr_wrapper(window);
-    }
-
-    // returns true if the window was successfully added, false otherwise.
-    inline bool addSharedWindow(ACameraWindowType* window) {
-        auto ret = mSharedWindows.insert(window);
-        return ret.second;
-    }
-
-    // returns the number of elements removed.
-    inline size_t removeSharedWindow(ACameraWindowType* window) {
-        return mSharedWindows.erase(window);
-    }
-
-    native_handle_ptr_wrapper mWindow;
-    std::set<native_handle_ptr_wrapper> mSharedWindows;
-    bool           mIsShared;
-    int            mRotation = CAMERA3_STREAM_ROTATION_0;
-    std::string mPhysicalCameraId;
-};
-
-
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index 87102e4..3325da6 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -26,7 +26,7 @@
 #include <aidl/android/frameworks/cameraservice/device/CameraMetadata.h>
 #include <aidl/android/frameworks/cameraservice/device/OutputConfiguration.h>
 #include <aidl/android/frameworks/cameraservice/device/SessionConfiguration.h>
-#include <aidlcommonsupport/NativeHandle.h>
+#include <android/native_window_aidl.h>
 #include <inttypes.h>
 #include <map>
 #include <utility>
@@ -59,6 +59,7 @@
 using AidlCameraMetadata = ::aidl::android::frameworks::cameraservice::device::CameraMetadata;
 using ::aidl::android::frameworks::cameraservice::device::OutputConfiguration;
 using ::aidl::android::frameworks::cameraservice::device::SessionConfiguration;
+using ::aidl::android::view::Surface;
 using ::ndk::ScopedAStatus;
 
 // Static member definitions
@@ -231,8 +232,9 @@
         OutputConfiguration& outputStream = sessionConfig.outputStreams[index];
         outputStream.rotation = utils::convertToAidl(output.mRotation);
         outputStream.windowGroupId = -1;
-        outputStream.windowHandles.resize(output.mSharedWindows.size() + 1);
-        outputStream.windowHandles[0] = std::move(dupToAidl(output.mWindow));
+        auto& surfaces = outputStream.surfaces;
+        surfaces.reserve(output.mSharedWindows.size() + 1);
+        surfaces.emplace_back(output.mWindow);
         outputStream.physicalCameraId = output.mPhysicalCameraId;
         index++;
     }
@@ -298,12 +300,12 @@
 
     OutputConfiguration outConfig;
     outConfig.rotation = utils::convertToAidl(output->mRotation);
-    outConfig.windowHandles.resize(output->mSharedWindows.size() + 1);
-    outConfig.windowHandles[0] = std::move(dupToAidl(output->mWindow));
+    auto& surfaces = outConfig.surfaces;
+    surfaces.reserve(output->mSharedWindows.size() + 1);
+    surfaces.emplace_back(output->mWindow);
     outConfig.physicalCameraId = output->mPhysicalCameraId;
-    int i = 1;
     for (auto& anw : output->mSharedWindows) {
-        outConfig.windowHandles[i++] = std::move(dupToAidl(anw));
+        surfaces.emplace_back(anw);
     }
 
     auto remoteRet = mRemote->updateOutputConfiguration(streamId,
@@ -340,7 +342,7 @@
     return ACAMERA_OK;
 }
 
-camera_status_t CameraDevice::prepareLocked(ACameraWindowType *window) {
+camera_status_t CameraDevice::prepareLocked(ANativeWindow *window) {
     camera_status_t ret = checkCameraClosedOrErrorLocked();
     if (ret != ACAMERA_OK) {
         return ret;
@@ -387,18 +389,19 @@
     std::vector<int32_t> requestSurfaceIdxList;
 
     for (auto& outputTarget : request->targets->mOutputs) {
-        native_handle_ptr_wrapper anw = outputTarget.mWindow;
+        ANativeWindow *anw = outputTarget.mWindow;
         bool found = false;
         req->mSurfaceList.push_back(anw);
         // lookup stream/surface ID
         for (const auto& kvPair : mConfiguredOutputs) {
             int streamId = kvPair.first;
             const OutputConfiguration& outConfig = kvPair.second.second;
-            const auto& windowHandles = outConfig.windowHandles;
-            for (int surfaceId = 0; surfaceId < (int) windowHandles.size(); surfaceId++) {
+            const auto& surfaces = outConfig.surfaces;
+            for (int surfaceId = 0; surfaceId < (int) surfaces.size(); surfaceId++) {
                 // If two window handles point to the same native window,
                 // they have the same surfaces.
-                if (utils::isWindowNativeHandleEqual(anw, windowHandles[surfaceId])) {
+                auto& surface = surfaces[surfaceId];
+                if (anw == surface.get()) {
                     found = true;
                     requestStreamIdxList.push_back(streamId);
                     requestSurfaceIdxList.push_back(surfaceId);
@@ -410,7 +413,7 @@
             }
         }
         if (!found) {
-            ALOGE("Unconfigured output target %p in capture request!", anw.mWindow);
+            ALOGE("Unconfigured output target %p in capture request!", anw);
             return ACAMERA_ERROR_INVALID_PARAMETER;
         }
     }
@@ -470,7 +473,7 @@
     }
     pRequest->targets = new ACameraOutputTargets();
     for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
-        native_handle_ptr_wrapper anw = req->mSurfaceList[i];
+        ANativeWindow *anw = req->mSurfaceList[i];
         ACameraOutputTarget outputTarget(anw);
         pRequest->targets->mOutputs.insert(std::move(outputTarget));
     }
@@ -637,20 +640,21 @@
         return ret;
     }
 
-    std::map<native_handle_ptr_wrapper, OutputConfiguration> handleToConfig;
+    std::map<ANativeWindow *, OutputConfiguration> windowToConfig;
     for (const auto& outConfig : outputs->mOutputs) {
-        native_handle_ptr_wrapper anw = outConfig.mWindow;
+        ANativeWindow *anw = outConfig.mWindow;
         OutputConfiguration outConfigInsert;
         outConfigInsert.rotation = utils::convertToAidl(outConfig.mRotation);
         outConfigInsert.windowGroupId = -1;
-        outConfigInsert.windowHandles.resize(outConfig.mSharedWindows.size() + 1);
-        outConfigInsert.windowHandles[0] = std::move(dupToAidl(anw));
+        auto& surfaces = outConfigInsert.surfaces;
+        surfaces.reserve(outConfig.mSharedWindows.size() + 1);
+        surfaces.emplace_back(anw);
         outConfigInsert.physicalCameraId = outConfig.mPhysicalCameraId;
-        handleToConfig.insert({anw, std::move(outConfigInsert)});
+        windowToConfig.insert({anw, std::move(outConfigInsert)});
     }
 
-    std::set<native_handle_ptr_wrapper> addSet;
-    for (auto& kvPair : handleToConfig) {
+    std::set<ANativeWindow *> addSet;
+    for (auto& kvPair : windowToConfig) {
         addSet.insert(kvPair.first);
     }
 
@@ -663,8 +667,8 @@
         auto& anw = outputPair.first;
         auto& configuredOutput = outputPair.second;
 
-        auto itr = handleToConfig.find(anw);
-        if (itr != handleToConfig.end() && (itr->second) == configuredOutput) {
+        auto itr = windowToConfig.find(anw);
+        if (itr != windowToConfig.end() && (itr->second) == configuredOutput) {
             deleteList.push_back(streamId);
         } else {
             addSet.erase(anw);
@@ -714,13 +718,13 @@
     // add new streams
     for (const auto &anw : addSet) {
         int32_t streamId;
-        auto itr = handleToConfig.find(anw);
+        auto itr = windowToConfig.find(anw);
         remoteRet = mRemote->createStream(itr->second, &streamId);
         CHECK_TRANSACTION_AND_RET(remoteRet, "createStream()")
         mConfiguredOutputs.insert(std::make_pair(streamId,
                                                  std::make_pair(anw,
                                                                 std::move(itr->second))));
-        handleToConfig.erase(itr);
+        windowToConfig.erase(itr);
     }
 
     AidlCameraMetadata aidlParams;
@@ -867,9 +871,9 @@
         // Get the surfaces corresponding to the error stream id, go through
         // them and try to match the surfaces in the corresponding
         // CaptureRequest.
-        const auto& errorWindowHandles =
-                outputPairIt->second.second.windowHandles;
-        for (const auto& errorWindowHandle : errorWindowHandles) {
+        const auto& errorSurfaces =
+                outputPairIt->second.second.surfaces;
+        for (const auto& errorSurface : errorSurfaces) {
             for (const auto &requestStreamAndWindowId :
                         request->mCaptureRequest.streamAndWindowIds) {
                 // Go through the surfaces in the capture request and see which
@@ -884,12 +888,11 @@
                     return;
                 }
 
-                const auto &requestWindowHandles =
-                        requestSurfacePairIt->second.second.windowHandles;
+                const auto &requestSurfaces = requestSurfacePairIt->second.second.surfaces;
+                auto& requestSurface = requestSurfaces[requestWindowId];
 
-                if (requestWindowHandles[requestWindowId] == errorWindowHandle) {
-                    const native_handle_t* anw = makeFromAidl(
-                            requestWindowHandles[requestWindowId]);
+                if (requestSurface == errorSurface) {
+                    const ANativeWindow *anw = requestSurface.get();
                     ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
                             getId(), anw, frameNumber);
 
@@ -1085,7 +1088,7 @@
                     if (onWindowPrepared == nullptr) {
                         return;
                     }
-                    native_handle_t* anw;
+                    ANativeWindow* anw;
                     found = msg->findPointer(kAnwKey, (void**) &anw);
                     if (!found) {
                         ALOGE("%s: Cannot find ANativeWindow: %d!", __FUNCTION__, __LINE__);
@@ -1342,10 +1345,10 @@
                         return;
                     }
 
-                    native_handle_t* anw;
+                    ANativeWindow* anw;
                     found = msg->findPointer(kAnwKey, (void**) &anw);
                     if (!found) {
-                        ALOGE("%s: Cannot find native_handle_t!", __FUNCTION__);
+                        ALOGE("%s: Cannot find ANativeWindow!", __FUNCTION__);
                         return;
                     }
 
@@ -1359,7 +1362,6 @@
                     ACaptureRequest* request = allocateACaptureRequest(requestSp, id_cstr);
                     (*onBufferLost)(context, session.get(), request, anw, frameNumber);
                     freeACaptureRequest(request);
-                    native_handle_delete(anw); // clean up anw as it was copied from AIDL
                     break;
                 }
             }
@@ -1842,7 +1844,7 @@
         return ScopedAStatus::ok();
     }
     // We've found the window corresponding to the surface id.
-    const native_handle_t *anw = it->second.first.mWindow;
+    const ANativeWindow *anw = it->second.first;
     sp<AMessage> msg = new AMessage(kWhatPreparedCb, dev->mHandler);
     msg->setPointer(kContextKey, session->mPreparedCb.context);
     msg->setPointer(kAnwKey, (void *)anw);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 6e0c772..b771d47 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -66,7 +66,6 @@
 using ::aidl::android::frameworks::cameraservice::service::CameraStatusAndId;
 using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
 using ::android::AidlMessageQueue;
-using ::android::acam::utils::native_handle_ptr_wrapper;
 
 
 using ResultMetadataQueue = AidlMessageQueue<int8_t, SynchronizedReadWrite>;
@@ -197,7 +196,7 @@
 
     camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
 
-    camera_status_t prepareLocked(ACameraWindowType *window);
+    camera_status_t prepareLocked(ANativeWindow *window);
 
     // Since this writes to ICameraDeviceUser's fmq, clients must take care that:
     //   a) This function is called serially.
@@ -236,7 +235,7 @@
 
     // stream id -> pair of (ACameraWindowType* from application, OutputConfiguration used for
     // camera service)
-    std::map<int, std::pair<native_handle_ptr_wrapper, OutputConfiguration>> mConfiguredOutputs;
+    std::map<int, std::pair<ANativeWindow *, OutputConfiguration>> mConfiguredOutputs;
 
     // TODO: maybe a bool will suffice for synchronous implementation?
     std::atomic_bool mClosing;
diff --git a/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h b/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
deleted file mode 100644
index fcb7e34..0000000
--- a/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "utils.h"
-
-using ::android::acam::utils::native_handle_ptr_wrapper;
-struct ACameraOutputTarget {
-    explicit ACameraOutputTarget(const native_handle_t* window) : mWindow(window) {};
-
-    bool operator == (const ACameraOutputTarget& other) const {
-        return mWindow == other.mWindow;
-    }
-    bool operator != (const ACameraOutputTarget& other) const {
-        return mWindow != other.mWindow;
-    }
-    bool operator < (const ACameraOutputTarget& other) const {
-        return mWindow < other.mWindow;
-    }
-    bool operator > (const ACameraOutputTarget& other) const {
-        return mWindow > other.mWindow;
-    }
-
-    native_handle_ptr_wrapper mWindow;
-};
diff --git a/camera/ndk/ndk_vendor/impl/utils.cpp b/camera/ndk/ndk_vendor/impl/utils.cpp
index 73a527b..3971c73 100644
--- a/camera/ndk/ndk_vendor/impl/utils.cpp
+++ b/camera/ndk/ndk_vendor/impl/utils.cpp
@@ -18,7 +18,6 @@
 
 #include "utils.h"
 
-#include <aidlcommonsupport/NativeHandle.h>
 #include <utils/Log.h>
 
 namespace android {
@@ -138,51 +137,6 @@
     return ret;
 }
 
-bool isWindowNativeHandleEqual(const native_handle_t *nh1, const native_handle_t *nh2) {
-    if (nh1->numFds !=0 || nh2->numFds !=0) {
-        ALOGE("Invalid window native handles being compared");
-        return false;
-    }
-    if (nh1->version != nh2->version || nh1->numFds != nh2->numFds ||
-        nh1->numInts != nh2->numInts) {
-        return false;
-    }
-    for (int i = 0; i < nh1->numInts; i++) {
-        if(nh1->data[i] != nh2->data[i]) {
-            return false;
-        }
-    }
-    return true;
-}
-
-bool isWindowNativeHandleEqual(const native_handle_t *nh1,
-                               const aidl::android::hardware::common::NativeHandle& nh2) {
-    native_handle_t* tempNh = makeFromAidl(nh2);
-    bool equal = isWindowNativeHandleEqual(nh1, tempNh);
-    native_handle_delete(tempNh);
-    return equal;
-}
-
-bool isWindowNativeHandleLessThan(const native_handle_t *nh1, const native_handle_t *nh2) {
-    if (isWindowNativeHandleEqual(nh1, nh2)) {
-        return false;
-    }
-    if (nh1->numInts != nh2->numInts) {
-        return nh1->numInts < nh2->numInts;
-    }
-
-    for (int i = 0; i < nh1->numInts; i++) {
-        if (nh1->data[i] != nh2->data[i]) {
-            return nh1->data[i] < nh2->data[i];
-        }
-    }
-    return false;
-}
-
-bool isWindowNativeHandleGreaterThan(const native_handle_t *nh1, const native_handle_t *nh2) {
-    return !isWindowNativeHandleLessThan(nh1, nh2) && !isWindowNativeHandleEqual(nh1, nh2);
-}
-
 } // namespace utils
 } // namespace acam
 } // namespace android
diff --git a/camera/ndk/ndk_vendor/impl/utils.h b/camera/ndk/ndk_vendor/impl/utils.h
index 7ad74ad..d0dd2fc 100644
--- a/camera/ndk/ndk_vendor/impl/utils.h
+++ b/camera/ndk/ndk_vendor/impl/utils.h
@@ -38,53 +38,14 @@
 using ::aidl::android::frameworks::cameraservice::device::OutputConfiguration;
 using ::aidl::android::frameworks::cameraservice::device::PhysicalCameraSettings;
 using ::aidl::android::frameworks::cameraservice::device::TemplateId;
-using ::aidl::android::hardware::common::NativeHandle;
 using ::android::hardware::camera::common::V1_0::helper::CameraMetadata;
 using AidlCameraMetadata = ::aidl::android::frameworks::cameraservice::device::CameraMetadata;
 using AidlCaptureRequest = ::aidl::android::frameworks::cameraservice::device::CaptureRequest;
 
-bool isWindowNativeHandleEqual(const native_handle_t *nh1, const native_handle_t *nh2);
-
-bool isWindowNativeHandleEqual(const native_handle_t* nh1, const NativeHandle& nh2);
-
-bool isWindowNativeHandleLessThan(const native_handle_t *nh1, const native_handle_t *nh2);
-
-// Convenience wrapper over isWindowNativeHandleLessThan and isWindowNativeHandleEqual
-bool isWindowNativeHandleGreaterThan(const native_handle_t *nh1, const native_handle_t *nh2);
-
-// Utility class so the native_handle_t can be compared with  its contents instead
-// of just raw pointer comparisons.
-struct native_handle_ptr_wrapper {
-    const native_handle_t *mWindow = nullptr;
-
-    native_handle_ptr_wrapper(const native_handle_t *nh) : mWindow(nh) { }
-
-    native_handle_ptr_wrapper() = default;
-
-    operator const native_handle_t *() const { return mWindow; }
-
-    bool operator ==(const native_handle_ptr_wrapper other) const {
-        return isWindowNativeHandleEqual(mWindow, other.mWindow);
-    }
-
-    bool operator != (const native_handle_ptr_wrapper& other) const {
-        return !isWindowNativeHandleEqual(mWindow, other.mWindow);
-    }
-
-    bool operator < (const native_handle_ptr_wrapper& other) const {
-        return isWindowNativeHandleLessThan(mWindow, other.mWindow);
-    }
-
-    bool operator > (const native_handle_ptr_wrapper& other) const {
-        return !isWindowNativeHandleGreaterThan(mWindow, other.mWindow);
-    }
-
-};
-
 // Utility class so that CaptureRequest can be stored by sp<>
 struct CaptureRequest: public RefBase {
   AidlCaptureRequest mCaptureRequest;
-  std::vector<native_handle_ptr_wrapper> mSurfaceList;
+  std::vector<ANativeWindow *> mSurfaceList;
   // Physical camera settings metadata is stored here, as the capture request
   // might not contain it. That's since, fmq might have consumed it.
   std::vector<PhysicalCameraSettings> mPhysicalCameraSettings;
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 74c6cad..0259359 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -31,8 +31,6 @@
 #include <stdio.h>
 
 #include <android/log.h>
-#include <android/hidl/manager/1.2/IServiceManager.h>
-#include <android/hidl/token/1.0/ITokenManager.h>
 #include <camera/NdkCameraError.h>
 #include <camera/NdkCameraManager.h>
 #include <camera/NdkCameraDevice.h>
@@ -40,7 +38,6 @@
 #include <hidl/ServiceManagement.h>
 #include <media/NdkImage.h>
 #include <media/NdkImageReader.h>
-#include <cutils/native_handle.h>
 #include <VendorTagDescriptor.h>
 
 namespace {
@@ -53,9 +50,7 @@
 static constexpr int kTestImageFormat = AIMAGE_FORMAT_YUV_420_888;
 
 using android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
-using android::hidl::manager::V1_0::IServiceManager;
-using android::hidl::token::V1_0::ITokenManager;
-using ConfiguredWindows = std::set<const native_handle_t *>;
+using ConfiguredWindows = std::set<ANativeWindow*>;
 
 class CameraHelper {
    public:
@@ -65,11 +60,11 @@
 
     struct PhysicalImgReaderInfo {
         const char* physicalCameraId;
-        const native_handle_t* anw;
+        ANativeWindow* anw;
     };
 
     // Retaining the error code in case the caller needs to analyze it.
-    std::variant<int, ConfiguredWindows> initCamera(const native_handle_t* imgReaderAnw,
+    std::variant<int, ConfiguredWindows> initCamera(ANativeWindow* imgReaderAnw,
             const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
             bool usePhysicalSettings, bool prepareWindows = false) {
         ConfiguredWindows configuredWindows;
@@ -109,7 +104,7 @@
         }
         configuredWindows.insert(mImgReaderAnw);
         std::vector<const char*> idPointerList;
-        std::set<const native_handle_t*> physicalStreamMap;
+        std::set<ANativeWindow*> physicalStreamMap;
         for (auto& physicalStream : physicalImgReaders) {
             ACaptureSessionOutput* sessionOutput = nullptr;
             ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
@@ -301,7 +296,7 @@
 
 
    private:
-    static void onPreparedCb(void* obj, ACameraWindowType *anw, ACameraCaptureSession *session) {
+    static void onPreparedCb(void* obj, ANativeWindow *anw, ACameraCaptureSession *session) {
         CameraHelper* thiz = reinterpret_cast<CameraHelper*>(obj);
         thiz->handlePrepared(anw, session);
     }
@@ -317,7 +312,7 @@
         return ret;
     }
 
-    void handlePrepared(ACameraWindowType *anw, ACameraCaptureSession *session) {
+    void handlePrepared(ANativeWindow *anw, ACameraCaptureSession *session) {
         // Reduce the pending prepared count of anw by 1. If count is  0, remove the key.
         std::lock_guard<std::mutex> lock(mMutex);
         if (session != mSession) {
@@ -334,7 +329,7 @@
             mPendingPreparedCbs.erase(anw);
         }
     }
-    void incPendingPrepared(ACameraWindowType *anw) {
+    void incPendingPrepared(ANativeWindow *anw) {
         std::lock_guard<std::mutex> lock(mMutex);
         if ((mPendingPreparedCbs.find(anw) == mPendingPreparedCbs.end())) {
             mPendingPreparedCbs[anw] = 1;
@@ -344,13 +339,13 @@
     }
 
     // ANW -> pending prepared callbacks
-    std::unordered_map<ACameraWindowType *, int> mPendingPreparedCbs;
+    std::unordered_map<ANativeWindow*, int> mPendingPreparedCbs;
     ACameraDevice_StateCallbacks mDeviceCb{this, nullptr, nullptr};
     ACameraCaptureSession_stateCallbacks mSessionCb{ this, nullptr, nullptr, nullptr};
 
     ACameraCaptureSession_prepareCallback mPreparedCb = &onPreparedCb;
 
-    const native_handle_t* mImgReaderAnw = nullptr;  // not owned by us.
+    ANativeWindow* mImgReaderAnw = nullptr;  // not owned by us.
 
     // Camera device
     ACameraDevice* mDevice = nullptr;
@@ -484,7 +479,7 @@
     ~ImageReaderTestCase() {
         if (mImgReaderAnw) {
             AImageReader_delete(mImgReader);
-            // No need to call native_handle_t_release on imageReaderAnw
+            // No need to call AImageReader_release(mImgReaderAnw).
         }
     }
 
@@ -514,17 +509,18 @@
             return ret;
         }
 
-        ret = AImageReader_getWindowNativeHandle(mImgReader, &mImgReaderAnw);
+
+        ret = AImageReader_getWindow(mImgReader, &mImgReaderAnw);
         if (ret != AMEDIA_OK || mImgReaderAnw == nullptr) {
-            ALOGE("Failed to get native_handle_t from AImageReader, ret=%d, mImgReaderAnw=%p.", ret,
-                  mImgReaderAnw);
+            ALOGE("Failed to get ANativeWindow* from AImageReader, ret=%d, mImgReader=%p.", ret,
+                  mImgReader);
             return -1;
         }
 
         return 0;
     }
 
-    const native_handle_t* getNativeWindow() { return mImgReaderAnw; }
+    ANativeWindow* getNativeWindow() { return mImgReaderAnw; }
 
     int getAcquiredImageCount() {
         std::lock_guard<std::mutex> lock(mMutex);
@@ -657,7 +653,7 @@
     int mAcquiredImageCount{0};
 
     AImageReader* mImgReader = nullptr;
-    native_handle_t* mImgReaderAnw = nullptr;
+    ANativeWindow* mImgReaderAnw = nullptr;
 
     AImageReader_ImageListener mReaderAvailableCb{this, onImageAvailable};
     AImageReader_BufferRemovedListener mReaderDetachedCb{this, onBufferRemoved};
@@ -985,20 +981,12 @@
 
 
 
-TEST_F(AImageReaderVendorTest, CreateWindowNativeHandle) {
-    auto transport = android::hardware::defaultServiceManager()->getTransport(ITokenManager::descriptor, "default");
-    if (transport.isOk() && transport == IServiceManager::Transport::EMPTY) {
-        GTEST_SKIP() << "This device no longer supports AImageReader_getWindowNativeHandle";
-    }
+TEST_F(AImageReaderVendorTest, CreateANativeWindow) {
     testBasicTakePictures(/*prepareSurfaces*/ false);
     testBasicTakePictures(/*prepareSurfaces*/ true);
 }
 
 TEST_F(AImageReaderVendorTest, LogicalCameraPhysicalStream) {
-    auto transport = android::hardware::defaultServiceManager()->getTransport(ITokenManager::descriptor, "default");
-    if (transport.isOk() && transport == IServiceManager::Transport::EMPTY) {
-        GTEST_SKIP() << "This device no longer supports AImageReader_getWindowNativeHandle";
-    }
     for (auto & v2 : {true, false}) {
         testLogicalCameraPhysicalStream(false/*usePhysicalSettings*/, v2);
         testLogicalCameraPhysicalStream(true/*usePhysicalSettings*/, v2);
diff --git a/camera/tests/Android.bp b/camera/tests/Android.bp
index 65b8b41..9aaac6a 100644
--- a/camera/tests/Android.bp
+++ b/camera/tests/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_camera_framework",
     // See: http://go/android-license-faq
     default_applicable_licenses: [
         "frameworks_av_camera_license",
diff --git a/camera/tests/fuzzer/Android.bp b/camera/tests/fuzzer/Android.bp
index b74b7a1..bd97c39 100644
--- a/camera/tests/fuzzer/Android.bp
+++ b/camera/tests/fuzzer/Android.bp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 package {
+    default_team: "trendy_team_camera_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_camera_license"
diff --git a/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp b/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
index 07efc20..8371905 100644
--- a/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_Parameters_fuzzer.cpp
@@ -16,14 +16,19 @@
 
 #include <CameraParameters.h>
 #include <CameraParameters2.h>
+#include <camera/StringUtils.h>
 #include <fcntl.h>
 #include <fuzzer/FuzzedDataProvider.h>
 #include <utils/String16.h>
 #include <camera/StringUtils.h>
 
+#include <functional>
+
 using namespace std;
 using namespace android;
 
+constexpr int8_t kMaxBytes = 20;
+
 string kValidFormats[] = {
         CameraParameters::PIXEL_FORMAT_YUV422SP,      CameraParameters::PIXEL_FORMAT_YUV420SP,
         CameraParameters::PIXEL_FORMAT_YUV422I,       CameraParameters::PIXEL_FORMAT_YUV420P,
@@ -34,26 +39,22 @@
 class CameraParametersFuzzer {
   public:
     void process(const uint8_t* data, size_t size);
-    ~CameraParametersFuzzer() {
-        delete mCameraParameters;
-        delete mCameraParameters2;
-    }
 
   private:
     void invokeCameraParameters();
     template <class type>
-    void initCameraParameters(type** obj);
+    void initCameraParameters(unique_ptr<type>& obj);
     template <class type>
-    void cameraParametersCommon(type* obj);
-    CameraParameters* mCameraParameters = nullptr;
-    CameraParameters2* mCameraParameters2 = nullptr;
+    void callCameraParametersAPIs(unique_ptr<type>& obj);
+    unique_ptr<CameraParameters> mCameraParameters;
+    unique_ptr<CameraParameters2> mCameraParameters2;
     FuzzedDataProvider* mFDP = nullptr;
 };
 
 template <class type>
-void CameraParametersFuzzer::initCameraParameters(type** obj) {
+void CameraParametersFuzzer::initCameraParameters(unique_ptr<type>& obj) {
     if (mFDP->ConsumeBool()) {
-        *obj = new type();
+        obj = make_unique<type>();
     } else {
         string params;
         if (mFDP->ConsumeBool()) {
@@ -61,94 +62,176 @@
             int32_t height = mFDP->ConsumeIntegral<int32_t>();
             int32_t minFps = mFDP->ConsumeIntegral<int32_t>();
             int32_t maxFps = mFDP->ConsumeIntegral<int32_t>();
-            params = CameraParameters::KEY_SUPPORTED_VIDEO_SIZES;
+            params = mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                         : CameraParameters::KEY_SUPPORTED_VIDEO_SIZES;
             params += '=' + to_string(width) + 'x' + to_string(height) + ';';
             if (mFDP->ConsumeBool()) {
-                params += CameraParameters::KEY_PREVIEW_FPS_RANGE;
+                params += mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                              : CameraParameters::KEY_PREVIEW_FPS_RANGE;
                 params += '=' + to_string(minFps) + ',' + to_string(maxFps) + ';';
             }
             if (mFDP->ConsumeBool()) {
-                params += CameraParameters::KEY_SUPPORTED_PICTURE_SIZES;
+                params += mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                              : CameraParameters::KEY_SUPPORTED_PICTURE_SIZES;
                 params += '=' + to_string(width) + 'x' + to_string(height) + ';';
             }
             if (mFDP->ConsumeBool()) {
-                params += CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS;
-                params += '=' + mFDP->PickValueInArray(kValidFormats) + ';';
+                params += mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                              : CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS;
+                params += '=' +
+                          (mFDP->ConsumeBool() ? mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()
+                                               : mFDP->PickValueInArray(kValidFormats)) + ';';
             }
         } else {
-            params = mFDP->ConsumeRandomLengthString();
+            params = mFDP->ConsumeRandomLengthString(kMaxBytes);
         }
-        *obj = new type(toString8(params));
+        obj = make_unique<type>(toString8(params));
     }
 }
 
 template <class type>
-void CameraParametersFuzzer::cameraParametersCommon(type* obj) {
-    Vector<Size> supportedPreviewSizes;
-    obj->getSupportedPreviewSizes(supportedPreviewSizes);
-    int32_t previewWidth = mFDP->ConsumeIntegral<int32_t>();
-    int32_t previewHeight = mFDP->ConsumeIntegral<int32_t>();
-    obj->setPreviewSize(previewWidth, previewHeight);
-    obj->getPreviewSize(&previewWidth, &previewHeight);
-
+void CameraParametersFuzzer::callCameraParametersAPIs(unique_ptr<type>& obj) {
     Vector<Size> supportedVideoSizes;
-    obj->getSupportedVideoSizes(supportedVideoSizes);
-    if (supportedVideoSizes.size() != 0) {
-        int32_t videoWidth, videoHeight, preferredVideoWidth, preferredVideoHeight;
-        if (mFDP->ConsumeBool()) {
-            int32_t idx = mFDP->ConsumeIntegralInRange<int32_t>(0, supportedVideoSizes.size() - 1);
-            obj->setVideoSize(supportedVideoSizes[idx].width, supportedVideoSizes[idx].height);
-        } else {
-            videoWidth = mFDP->ConsumeIntegral<int32_t>();
-            videoHeight = mFDP->ConsumeIntegral<int32_t>();
-            obj->setVideoSize(videoWidth, videoHeight);
-        }
-        obj->getVideoSize(&videoWidth, &videoHeight);
-        obj->getPreferredPreviewSizeForVideo(&preferredVideoWidth, &preferredVideoHeight);
-    }
-
-    int32_t fps = mFDP->ConsumeIntegral<int32_t>();
-    obj->setPreviewFrameRate(fps);
-    obj->getPreviewFrameRate();
-    string previewFormat = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
-                                               : mFDP->ConsumeRandomLengthString();
-    obj->setPreviewFormat(previewFormat.c_str());
-
-    int32_t pictureWidth = mFDP->ConsumeIntegral<int32_t>();
-    int32_t pictureHeight = mFDP->ConsumeIntegral<int32_t>();
-    Vector<Size> supportedPictureSizes;
-    obj->setPictureSize(pictureWidth, pictureHeight);
-    obj->getPictureSize(&pictureWidth, &pictureHeight);
-    obj->getSupportedPictureSizes(supportedPictureSizes);
-    string pictureFormat = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
-                                               : mFDP->ConsumeRandomLengthString();
-    obj->setPictureFormat(pictureFormat.c_str());
-    obj->getPictureFormat();
-
-    if (mFDP->ConsumeBool()) {
-        obj->dump();
-    } else {
-        int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
-        Vector<String16> args = {};
-        obj->dump(fd, args);
-        close(fd);
+    while (mFDP->remaining_bytes()) {
+        auto callCameraUtilsAPIs = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() {
+                    Vector<Size> supportedPreviewSizes;
+                    obj->getSupportedPreviewSizes(supportedPreviewSizes);
+                },
+                [&]() {
+                    int32_t previewWidth = mFDP->ConsumeIntegral<int32_t>();
+                    int32_t previewHeight = mFDP->ConsumeIntegral<int32_t>();
+                    obj->setPreviewSize(previewWidth, previewHeight);
+                },
+                [&]() {
+                    int32_t previewWidth, previewHeight;
+                    obj->getPreviewSize(&previewWidth, &previewHeight);
+                },
+                [&]() { obj->getSupportedVideoSizes(supportedVideoSizes); },
+                [&]() {
+                    int32_t videoWidth, videoHeight;
+                    if (supportedVideoSizes.size()) {
+                        int32_t idx = mFDP->ConsumeIntegralInRange<int32_t>(
+                                0, supportedVideoSizes.size() - 1);
+                        videoWidth = mFDP->ConsumeBool() ? supportedVideoSizes[idx].width
+                                                         : mFDP->ConsumeIntegral<int32_t>();
+                        videoHeight = mFDP->ConsumeBool() ? supportedVideoSizes[idx].height
+                                                          : mFDP->ConsumeIntegral<int32_t>();
+                        obj->setVideoSize(videoWidth, videoHeight);
+                    }
+                },
+                [&]() {
+                    int32_t videoWidth, videoHeight;
+                    obj->getVideoSize(&videoWidth, &videoHeight);
+                },
+                [&]() {
+                    int32_t preferredVideoWidth, preferredVideoHeight;
+                    obj->getPreferredPreviewSizeForVideo(&preferredVideoWidth,
+                                                         &preferredVideoHeight);
+                },
+                [&]() {
+                    int32_t fps = mFDP->ConsumeIntegral<int32_t>();
+                    obj->setPreviewFrameRate(fps);
+                },
+                [&]() { obj->getPreviewFrameRate(); },
+                [&]() {
+                    string previewFormat = mFDP->ConsumeBool()
+                                                   ? mFDP->PickValueInArray(kValidFormats)
+                                                   : mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->setPreviewFormat(previewFormat.c_str());
+                },
+                [&]() {
+                    int32_t pictureWidth = mFDP->ConsumeIntegral<int32_t>();
+                    int32_t pictureHeight = mFDP->ConsumeIntegral<int32_t>();
+                    obj->setPictureSize(pictureWidth, pictureHeight);
+                },
+                [&]() {
+                    int32_t pictureWidth, pictureHeight;
+                    obj->getPictureSize(&pictureWidth, &pictureHeight);
+                },
+                [&]() {
+                    Vector<Size> supportedPictureSizes;
+                    obj->getSupportedPictureSizes(supportedPictureSizes);
+                },
+                [&]() {
+                    string pictureFormat = mFDP->ConsumeBool()
+                                                   ? mFDP->PickValueInArray(kValidFormats)
+                                                   : mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->setPictureFormat(pictureFormat.c_str());
+                },
+                [&]() { obj->getPictureFormat(); },
+                [&]() {
+                    if (mFDP->ConsumeBool()) {
+                        obj->dump();
+                    } else {
+                        int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+                        Vector<String16> args = {};
+                        obj->dump(fd, args);
+                        close(fd);
+                    }
+                },
+                [&]() { obj->flatten(); },
+                [&]() {
+                    string key = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    float value = mFDP->ConsumeFloatingPoint<float>();
+                    obj->setFloat(key.c_str(), value);
+                },
+                [&]() {
+                    string key = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->getFloat(key.c_str());
+                },
+                [&]() { obj->getPreviewFormat(); },
+                [&]() {
+                    string key = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                    obj->remove(key.c_str());
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters>) {
+                        string format = mFDP->ConsumeBool()
+                                                ? mFDP->ConsumeRandomLengthString(kMaxBytes)
+                                                : mFDP->PickValueInArray(kValidFormats);
+                        mCameraParameters->previewFormatToEnum(format.c_str());
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters>) {
+                        mCameraParameters->isEmpty();
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters>) {
+                        Vector<int32_t> formats;
+                        mCameraParameters->getSupportedPreviewFormats(formats);
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters2>) {
+                        string key1 = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        string key2 = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        int32_t order;
+                        mCameraParameters2->compareSetOrder(key1.c_str(), key2.c_str(), &order);
+                    }
+                },
+                [&]() {
+                    if (std::is_same_v<type, CameraParameters2>) {
+                        int32_t minFps = mFDP->ConsumeIntegral<int32_t>();
+                        int32_t maxFps = mFDP->ConsumeIntegral<int32_t>();
+                        mCameraParameters2->setPreviewFpsRange(minFps, maxFps);
+                    }
+                },
+        });
+        callCameraUtilsAPIs();
     }
 }
 
 void CameraParametersFuzzer::invokeCameraParameters() {
-    initCameraParameters<CameraParameters>(&mCameraParameters);
-    cameraParametersCommon<CameraParameters>(mCameraParameters);
-    initCameraParameters<CameraParameters2>(&mCameraParameters2);
-    cameraParametersCommon<CameraParameters2>(mCameraParameters2);
-
-    int32_t minFPS, maxFPS;
-    mCameraParameters->getPreviewFpsRange(&minFPS, &maxFPS);
-    string format = mFDP->ConsumeBool() ? mFDP->PickValueInArray(kValidFormats)
-                                        : mFDP->ConsumeRandomLengthString();
-    mCameraParameters->previewFormatToEnum(format.c_str());
-    mCameraParameters->isEmpty();
-    Vector<int32_t> formats;
-    mCameraParameters->getSupportedPreviewFormats(formats);
+    if (mFDP->ConsumeBool()) {
+        initCameraParameters<CameraParameters>(mCameraParameters);
+        callCameraParametersAPIs(mCameraParameters);
+    } else {
+        initCameraParameters<CameraParameters2>(mCameraParameters2);
+        callCameraParametersAPIs(mCameraParameters2);
+    }
 }
 
 void CameraParametersFuzzer::process(const uint8_t* data, size_t size) {
diff --git a/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp b/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
index 494ec1b..5ad9530 100644
--- a/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_c2CaptureRequest_fuzzer.cpp
@@ -44,7 +44,7 @@
     }
 
     for (size_t idx = 0; idx < physicalCameraSettingsSize; ++idx) {
-        string id = fdp.ConsumeRandomLengthString();
+        string id = fdp.ConsumeRandomLengthString(kMaxBytes);
         if (fdp.ConsumeBool()) {
             parcelCamCaptureReq.writeString16(toString16(id));
         }
@@ -120,7 +120,11 @@
         }
     }
 
-    invokeReadWriteParcelsp<CaptureRequest>(captureRequest);
+    if (fdp.ConsumeBool()) {
+        invokeReadWriteParcelsp<CaptureRequest>(captureRequest);
+    } else {
+        invokeNewReadWriteParcelsp<CaptureRequest>(captureRequest, fdp);
+    }
     invokeReadWriteNullParcelsp<CaptureRequest>(captureRequest);
     parcelCamCaptureReq.setDataPosition(0);
     captureRequest->readFromParcel(&parcelCamCaptureReq);
diff --git a/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp b/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
index 2fe9a94..7046075 100644
--- a/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_c2OutputConfiguration_fuzzer.cpp
@@ -26,85 +26,122 @@
 using namespace android;
 using namespace android::hardware::camera2::params;
 
+constexpr int8_t kMaxLoopIterations = 100;
 constexpr int32_t kSizeMin = 0;
 constexpr int32_t kSizeMax = 1000;
 
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
-    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+class C2OutputConfigurationFuzzer {
+  public:
+    void process(const uint8_t* data, size_t size);
 
-    OutputConfiguration* outputConfiguration = nullptr;
+  private:
+    void invokeC2OutputConfigFuzzer();
+    unique_ptr<OutputConfiguration> getC2OutputConfig();
+    sp<IGraphicBufferProducer> createIGraphicBufferProducer();
+    FuzzedDataProvider* mFDP = nullptr;
+};
 
-    if (fdp.ConsumeBool()) {
-        outputConfiguration = new OutputConfiguration();
+sp<IGraphicBufferProducer> C2OutputConfigurationFuzzer::createIGraphicBufferProducer() {
+    sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+    sp<SurfaceControl> surfaceControl = composerClient->createSurface(
+            static_cast<String8>(mFDP->ConsumeRandomLengthString(kMaxBytes).c_str()) /* name */,
+            mFDP->ConsumeIntegral<uint32_t>() /* width */,
+            mFDP->ConsumeIntegral<uint32_t>() /* height */,
+            mFDP->ConsumeIntegral<int32_t>() /* format */,
+            mFDP->ConsumeIntegral<int32_t>() /* flags */);
+    if (surfaceControl) {
+        sp<Surface> surface = surfaceControl->getSurface();
+        return surface->getIGraphicBufferProducer();
     } else {
-        int32_t rotation = fdp.ConsumeIntegral<int32_t>();
-        string physicalCameraId = fdp.ConsumeRandomLengthString();
-        int32_t surfaceSetID = fdp.ConsumeIntegral<int32_t>();
-        bool isShared = fdp.ConsumeBool();
-
-        if (fdp.ConsumeBool()) {
-            sp<IGraphicBufferProducer> iGBP = nullptr;
-            sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
-            sp<SurfaceControl> surfaceControl = composerClient->createSurface(
-                    static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
-                    fdp.ConsumeIntegral<uint32_t>() /* width */,
-                    fdp.ConsumeIntegral<uint32_t>() /* height */,
-                    fdp.ConsumeIntegral<int32_t>() /* format */,
-                    fdp.ConsumeIntegral<int32_t>() /* flags */);
-            if (surfaceControl) {
-                sp<Surface> surface = surfaceControl->getSurface();
-                iGBP = surface->getIGraphicBufferProducer();
-            }
-            outputConfiguration = new OutputConfiguration(iGBP, rotation, physicalCameraId,
-                                                          surfaceSetID, isShared);
-            iGBP.clear();
-            composerClient.clear();
-            surfaceControl.clear();
-        } else {
-            size_t iGBPSize = fdp.ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
-            vector<sp<IGraphicBufferProducer>> iGBPs;
-            for (size_t idx = 0; idx < iGBPSize; ++idx) {
-                sp<IGraphicBufferProducer> iGBP = nullptr;
-                sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
-                sp<SurfaceControl> surfaceControl = composerClient->createSurface(
-                        static_cast<String8>(fdp.ConsumeRandomLengthString().c_str()) /* name */,
-                        fdp.ConsumeIntegral<uint32_t>() /* width */,
-                        fdp.ConsumeIntegral<uint32_t>() /* height */,
-                        fdp.ConsumeIntegral<int32_t>() /* format */,
-                        fdp.ConsumeIntegral<int32_t>() /* flags */);
-                if (surfaceControl) {
-                    sp<Surface> surface = surfaceControl->getSurface();
-                    iGBP = surface->getIGraphicBufferProducer();
-                    iGBPs.push_back(iGBP);
-                }
-                iGBP.clear();
-                composerClient.clear();
-                surfaceControl.clear();
-            }
-            outputConfiguration = new OutputConfiguration(iGBPs, rotation, physicalCameraId,
-                                                          surfaceSetID, isShared);
-        }
+        sp<IGraphicBufferProducer> gbp;
+        return gbp;
     }
+}
 
-    outputConfiguration->getRotation();
-    outputConfiguration->getSurfaceSetID();
-    outputConfiguration->getSurfaceType();
-    outputConfiguration->getWidth();
-    outputConfiguration->getHeight();
-    outputConfiguration->isDeferred();
-    outputConfiguration->isShared();
-    outputConfiguration->getPhysicalCameraId();
+unique_ptr<OutputConfiguration> C2OutputConfigurationFuzzer::getC2OutputConfig() {
+    unique_ptr<OutputConfiguration> outputConfiguration = nullptr;
+    auto selectOutputConfigurationConstructor =
+            mFDP->PickValueInArray<const std::function<void()>>({
+                    [&]() { outputConfiguration = make_unique<OutputConfiguration>(); },
 
-    OutputConfiguration outputConfiguration2;
-    outputConfiguration->gbpsEqual(outputConfiguration2);
-    outputConfiguration->sensorPixelModesUsedEqual(outputConfiguration2);
-    outputConfiguration->gbpsLessThan(outputConfiguration2);
-    outputConfiguration->sensorPixelModesUsedLessThan(outputConfiguration2);
-    outputConfiguration->getGraphicBufferProducers();
-    sp<IGraphicBufferProducer> gbp;
-    outputConfiguration->addGraphicProducer(gbp);
-    invokeReadWriteNullParcel<OutputConfiguration>(outputConfiguration);
-    invokeReadWriteParcel<OutputConfiguration>(outputConfiguration);
-    delete outputConfiguration;
+                    [&]() {
+                        int32_t rotation = mFDP->ConsumeIntegral<int32_t>();
+                        string physicalCameraId = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        int32_t surfaceSetID = mFDP->ConsumeIntegral<int32_t>();
+                        bool isShared = mFDP->ConsumeBool();
+                        sp<IGraphicBufferProducer> iGBP = createIGraphicBufferProducer();
+                        outputConfiguration = make_unique<OutputConfiguration>(
+                                iGBP, rotation, physicalCameraId, surfaceSetID, isShared);
+                    },
+
+                    [&]() {
+                        int32_t rotation = mFDP->ConsumeIntegral<int32_t>();
+                        string physicalCameraId = mFDP->ConsumeRandomLengthString(kMaxBytes);
+                        int32_t surfaceSetID = mFDP->ConsumeIntegral<int32_t>();
+                        bool isShared = mFDP->ConsumeBool();
+                        size_t iGBPSize = mFDP->ConsumeIntegralInRange<size_t>(kSizeMin, kSizeMax);
+                        vector<sp<IGraphicBufferProducer>> iGBPs;
+                        for (size_t idx = 0; idx < iGBPSize; ++idx) {
+                            sp<IGraphicBufferProducer> iGBP = createIGraphicBufferProducer();
+                            iGBPs.push_back(iGBP);
+                        }
+                        outputConfiguration = make_unique<OutputConfiguration>(
+                                iGBPs, rotation, physicalCameraId, surfaceSetID, isShared);
+                    },
+            });
+    selectOutputConfigurationConstructor();
+    return outputConfiguration;
+}
+
+void C2OutputConfigurationFuzzer::invokeC2OutputConfigFuzzer() {
+    unique_ptr<OutputConfiguration> outputConfiguration = getC2OutputConfig();
+    int8_t count = kMaxLoopIterations;
+    while (--count > 0) {
+    unique_ptr<OutputConfiguration> outputConfiguration2 = getC2OutputConfig();
+        auto callC2OutputConfAPIs = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() { outputConfiguration->getRotation(); },
+                [&]() { outputConfiguration->getSurfaceSetID(); },
+                [&]() { outputConfiguration->getSurfaceType(); },
+                [&]() { outputConfiguration->getWidth(); },
+                [&]() { outputConfiguration->getHeight(); },
+                [&]() { outputConfiguration->isDeferred(); },
+                [&]() { outputConfiguration->isShared(); },
+                [&]() { outputConfiguration->getPhysicalCameraId(); },
+                [&]() { outputConfiguration->gbpsEqual(*outputConfiguration2); },
+                [&]() { outputConfiguration->sensorPixelModesUsedEqual(*outputConfiguration2); },
+                [&]() { outputConfiguration->gbpsLessThan(*outputConfiguration2); },
+                [&]() { outputConfiguration->sensorPixelModesUsedLessThan(*outputConfiguration2); },
+                [&]() { outputConfiguration->getGraphicBufferProducers(); },
+                [&]() {
+                    sp<IGraphicBufferProducer> gbp = createIGraphicBufferProducer();
+                    outputConfiguration->addGraphicProducer(gbp);
+                },
+                [&]() { outputConfiguration->isMultiResolution(); },
+                [&]() { outputConfiguration->getColorSpace(); },
+                [&]() { outputConfiguration->getStreamUseCase(); },
+                [&]() { outputConfiguration->getTimestampBase(); },
+                [&]() { outputConfiguration->getMirrorMode(); },
+                [&]() { outputConfiguration->useReadoutTimestamp(); },
+        });
+        callC2OutputConfAPIs();
+    }
+    // Not keeping invokeReadWrite() APIs in while loop to avoid possible OOM.
+    invokeReadWriteNullParcel<OutputConfiguration>(outputConfiguration.get());
+    if (mFDP->ConsumeBool()) {
+        invokeReadWriteParcel<OutputConfiguration>(outputConfiguration.get());
+    } else {
+        invokeNewReadWriteParcel<OutputConfiguration>(outputConfiguration.get(), *mFDP);
+    }
+}
+
+void C2OutputConfigurationFuzzer::process(const uint8_t* data, size_t size) {
+    mFDP = new FuzzedDataProvider(data, size);
+    invokeC2OutputConfigFuzzer();
+    delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    C2OutputConfigurationFuzzer c2OutputConfigurationFuzzer;
+    c2OutputConfigurationFuzzer.process(data, size);
     return 0;
 }
diff --git a/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp b/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
index dc40b0f..c588f11 100644
--- a/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_c2SubmitInfo_fuzzer.cpp
@@ -27,6 +27,10 @@
     SubmitInfo submitInfo;
     submitInfo.mRequestId = fdp.ConsumeIntegral<int32_t>();
     submitInfo.mLastFrameNumber = fdp.ConsumeIntegral<int64_t>();
-    invokeReadWriteParcel<SubmitInfo>(&submitInfo);
+    if (fdp.ConsumeBool()) {
+        invokeReadWriteParcel<SubmitInfo>(&submitInfo);
+    } else {
+        invokeNewReadWriteParcel<SubmitInfo>(&submitInfo, fdp);
+    }
     return 0;
 }
diff --git a/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp b/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
index e14d9ce..3131f1d 100644
--- a/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
+++ b/camera/tests/fuzzer/camera_vendorTagDescriptor_fuzzer.cpp
@@ -29,6 +29,8 @@
 constexpr int32_t kRangeMin = 0;
 constexpr int32_t kRangeMax = 1000;
 constexpr int32_t kVendorTagDescriptorId = -1;
+constexpr int8_t kMinLoopIterations = 1;
+constexpr int8_t kMaxLoopIterations = 50;
 
 extern "C" {
 
@@ -95,39 +97,63 @@
     initVendorTagDescriptor();
 
     sp<VendorTagDescriptor> vdesc = new VendorTagDescriptor();
-    vdesc->copyFrom(*mVendorTagDescriptor);
-    VendorTagDescriptor::setAsGlobalVendorTagDescriptor(mVendorTagDescriptor);
-    VendorTagDescriptor::getGlobalVendorTagDescriptor();
 
-    int32_t tagCount = mVendorTagDescriptor->getTagCount();
-    if (tagCount > 0) {
-        uint32_t tagArray[tagCount];
-        mVendorTagDescriptor->getTagArray(tagArray);
-        uint32_t tag;
-        for (int32_t i = 0; i < tagCount; ++i) {
-            tag = tagArray[i];
-            get_local_camera_metadata_section_name_vendor_id(tag, kVendorTagDescriptorId);
-            get_local_camera_metadata_tag_name_vendor_id(tag, kVendorTagDescriptorId);
-            get_local_camera_metadata_tag_type_vendor_id(tag, kVendorTagDescriptorId);
-            mVendorTagDescriptor->getSectionIndex(tag);
-        }
-        mVendorTagDescriptor->getAllSectionNames();
+    int8_t count = mFDP->ConsumeIntegralInRange<int8_t>(kMinLoopIterations, kMaxLoopIterations);
+    while (--count > 0) {
+        auto callVendorTagDescriptor = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() {
+                    int32_t tagCount = mVendorTagDescriptor->getTagCount();
+                    if (tagCount > 0) {
+                        uint32_t tagArray[tagCount];
+                        mVendorTagDescriptor->getTagArray(tagArray);
+                        uint32_t tag;
+                        for (int32_t i = 0; i < tagCount; ++i) {
+                            tag = tagArray[i];
+                            get_local_camera_metadata_section_name_vendor_id(
+                                    tag, kVendorTagDescriptorId);
+                            get_local_camera_metadata_tag_name_vendor_id(tag,
+                                                                         kVendorTagDescriptorId);
+                            get_local_camera_metadata_tag_type_vendor_id(tag,
+                                                                         kVendorTagDescriptorId);
+                            mVendorTagDescriptor->getSectionIndex(tag);
+                        }
+                    }
+                },
+                [&]() {
+                    if (mVendorTagDescriptor->getTagCount() > 0) {
+                        mVendorTagDescriptor->getAllSectionNames();
+                    }
+                },
+                [&]() { vdesc->copyFrom(*mVendorTagDescriptor); },
+                [&]() {
+                    VendorTagDescriptor::setAsGlobalVendorTagDescriptor(mVendorTagDescriptor);
+                },
+                [&]() { VendorTagDescriptor::getGlobalVendorTagDescriptor(); },
+                [&]() {
+                    String8 name((mFDP->ConsumeRandomLengthString()).c_str());
+                    String8 section((mFDP->ConsumeRandomLengthString()).c_str());
+                    uint32_t lookupTag;
+                    mVendorTagDescriptor->lookupTag(name, section, &lookupTag);
+                },
+                [&]() {
+                    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+                    int32_t verbosity = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+                    int32_t indentation =
+                            mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
+                    mVendorTagDescriptor->dump(fd, verbosity, indentation);
+                    close(fd);
+                },
+        });
+        callVendorTagDescriptor();
     }
 
-    String8 name((mFDP->ConsumeRandomLengthString()).c_str());
-    String8 section((mFDP->ConsumeRandomLengthString()).c_str());
-    uint32_t lookupTag;
-    mVendorTagDescriptor->lookupTag(name, section, &lookupTag);
-
-    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
-    int32_t verbosity = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
-    int32_t indentation = mFDP->ConsumeIntegralInRange<int32_t>(kRangeMin, kRangeMax);
-    mVendorTagDescriptor->dump(fd, verbosity, indentation);
-
-    invokeReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor);
+    // Do not keep invokeReadWrite() APIs in while loop to avoid possible OOM.
+    if (mFDP->ConsumeBool()) {
+        invokeReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor);
+    } else {
+        invokeNewReadWriteParcelsp<VendorTagDescriptor>(mVendorTagDescriptor, *mFDP);
+    }
     VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-    vdesc.clear();
-    close(fd);
 }
 
 void VendorTagDescriptorFuzzer::invokeVendorTagDescriptorCache() {
@@ -135,36 +161,52 @@
     uint64_t id = mFDP->ConsumeIntegral<uint64_t>();
     initVendorTagDescriptor();
 
-    mVendorTagDescriptorCache->addVendorDescriptor(id, mVendorTagDescriptor);
-    VendorTagDescriptorCache::setAsGlobalVendorTagCache(mVendorTagDescriptorCache);
-    VendorTagDescriptorCache::getGlobalVendorTagCache();
-    sp<VendorTagDescriptor> tagDesc;
-    mVendorTagDescriptorCache->getVendorTagDescriptor(id, &tagDesc);
-
-    int32_t tagCount = mVendorTagDescriptorCache->getTagCount(id);
-    if (tagCount > 0) {
-        uint32_t tagArray[tagCount];
-        mVendorTagDescriptorCache->getTagArray(tagArray, id);
-        uint32_t tag;
-        for (int32_t i = 0; i < tagCount; ++i) {
-            tag = tagArray[i];
-            get_local_camera_metadata_section_name_vendor_id(tag, id);
-            get_local_camera_metadata_tag_name_vendor_id(tag, id);
-            get_local_camera_metadata_tag_type_vendor_id(tag, id);
-        }
+    int8_t count = mFDP->ConsumeIntegralInRange<int8_t>(kMinLoopIterations, kMaxLoopIterations);
+    while (--count > 0) {
+        auto callVendorTagDescriptorCache = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() { mVendorTagDescriptorCache->addVendorDescriptor(id, mVendorTagDescriptor); },
+                [&]() {
+                    VendorTagDescriptorCache::setAsGlobalVendorTagCache(mVendorTagDescriptorCache);
+                },
+                [&]() { VendorTagDescriptorCache::getGlobalVendorTagCache(); },
+                [&]() {
+                    sp<VendorTagDescriptor> tagDesc;
+                    mVendorTagDescriptorCache->getVendorTagDescriptor(id, &tagDesc);
+                },
+                [&]() {
+                    int32_t tagCount = mVendorTagDescriptorCache->getTagCount(id);
+                    if (tagCount > 0) {
+                        uint32_t tagArray[tagCount];
+                        mVendorTagDescriptorCache->getTagArray(tagArray, id);
+                        uint32_t tag;
+                        for (int32_t i = 0; i < tagCount; ++i) {
+                            tag = tagArray[i];
+                            get_local_camera_metadata_section_name_vendor_id(tag, id);
+                            get_local_camera_metadata_tag_name_vendor_id(tag, id);
+                            get_local_camera_metadata_tag_type_vendor_id(tag, id);
+                        }
+                    }
+                },
+                [&]() {
+                    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
+                    int32_t verbosity = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
+                    int32_t indentation = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
+                    mVendorTagDescriptorCache->dump(fd, verbosity, indentation);
+                    close(fd);
+                },
+                [&]() { VendorTagDescriptorCache::isVendorCachePresent(id); },
+                [&]() { mVendorTagDescriptorCache->getVendorIdsAndTagDescriptors(); },
+        });
+        callVendorTagDescriptorCache();
     }
 
-    int32_t fd = open("/dev/null", O_CLOEXEC | O_RDWR | O_CREAT);
-    int32_t verbosity = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
-    int32_t indentation = mFDP->ConsumeIntegralInRange<int>(kRangeMin, kRangeMax);
-    mVendorTagDescriptorCache->dump(fd, verbosity, indentation);
-
-    invokeReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache);
-    VendorTagDescriptorCache::isVendorCachePresent(id);
-    mVendorTagDescriptorCache->getVendorIdsAndTagDescriptors();
+    // Do not keep invokeReadWrite() APIs in while loop to avoid possible OOM.
+    if (mFDP->ConsumeBool()) {
+        invokeReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache);
+    } else {
+        invokeNewReadWriteParcelsp<VendorTagDescriptorCache>(mVendorTagDescriptorCache, *mFDP);
+    }
     mVendorTagDescriptorCache->clearGlobalVendorTagCache();
-    tagDesc.clear();
-    close(fd);
 }
 
 void VendorTagDescriptorFuzzer::invokeVendorTagErrorConditions() {
@@ -177,26 +219,39 @@
         VendorTagDescriptor::createDescriptorFromOps(/*vOps*/ NULL, vDesc);
     } else {
         VendorTagDescriptor::createDescriptorFromOps(&vOps, vDesc);
-        int32_t tagCount = vDesc->getTagCount();
-        uint32_t badTag = mFDP->ConsumeIntegral<uint32_t>();
-        uint32_t badTagArray[tagCount + 1];
-        vDesc->getTagArray(badTagArray);
-        vDesc->getSectionName(badTag);
-        vDesc->getTagName(badTag);
-        vDesc->getTagType(badTag);
-        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-        VendorTagDescriptor::getGlobalVendorTagDescriptor();
-        VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc);
+
+        int8_t count = mFDP->ConsumeIntegralInRange<int8_t>(kMinLoopIterations, kMaxLoopIterations);
+        while (--count > 0) {
+            int32_t tagCount = vDesc->getTagCount();
+            uint32_t badTag = mFDP->ConsumeIntegral<uint32_t>();
+            uint32_t badTagArray[tagCount + 1];
+            auto callVendorTagErrorConditions =
+                    mFDP->PickValueInArray<const std::function<void()>>({
+                            [&]() { vDesc->getTagArray(badTagArray); },
+                            [&]() { vDesc->getSectionName(badTag); },
+                            [&]() { vDesc->getTagName(badTag); },
+                            [&]() { vDesc->getTagType(badTag); },
+                            [&]() { VendorTagDescriptor::clearGlobalVendorTagDescriptor(); },
+                            [&]() { VendorTagDescriptor::getGlobalVendorTagDescriptor(); },
+                            [&]() { VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc); },
+                    });
+            callVendorTagErrorConditions();
+        }
         invokeReadWriteNullParcelsp<VendorTagDescriptor>(vDesc);
-        vDesc.clear();
     }
+    vDesc.clear();
 }
 
 void VendorTagDescriptorFuzzer::process(const uint8_t* data, size_t size) {
     mFDP = new FuzzedDataProvider(data, size);
-    invokeVendorTagDescriptor();
-    invokeVendorTagDescriptorCache();
-    invokeVendorTagErrorConditions();
+    while (mFDP->remaining_bytes()) {
+        auto invokeVendorTagDescriptorFuzzer = mFDP->PickValueInArray<const std::function<void()>>({
+                [&]() { invokeVendorTagDescriptor(); },
+                [&]() { invokeVendorTagDescriptorCache(); },
+                [&]() { invokeVendorTagErrorConditions(); },
+        });
+        invokeVendorTagDescriptorFuzzer();
+    }
     delete mFDP;
 }
 
diff --git a/media/audio/aconfig/Android.bp b/media/audio/aconfig/Android.bp
index b1d4ad4..6d21e97 100644
--- a/media/audio/aconfig/Android.bp
+++ b/media/audio/aconfig/Android.bp
@@ -8,18 +8,21 @@
 aconfig_declarations {
     name: "com.android.media.audioserver-aconfig",
     package: "com.android.media.audioserver",
+    container: "system",
     srcs: ["audioserver.aconfig"],
 }
 
 aconfig_declarations {
     name: "com.android.media.audio-aconfig",
     package: "com.android.media.audio",
+    container: "system",
     srcs: ["audio.aconfig"],
 }
 
 aconfig_declarations {
     name: "com.android.media.aaudio-aconfig",
     package: "com.android.media.aaudio",
+    container: "system",
     srcs: ["aaudio.aconfig"],
 }
 
@@ -43,6 +46,18 @@
     name: "com.android.media.audio-aconfig-cc",
     aconfig_declarations: "com.android.media.audio-aconfig",
     defaults: ["audio-aconfig-cc-defaults"],
+    double_loadable: true,
+    host_supported: true,
+    product_available: true,
+    vendor_available: true,
+    // TODO(b/316909431) native_bridge_supported: true,
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media",
+        "com.android.media.swcodec",
+        "com.android.btservices",
+    ],
+    min_sdk_version: "29",
 }
 
 cc_aconfig_library {
@@ -56,6 +71,12 @@
     aconfig_declarations: "com.android.media.audio-aconfig",
 }
 
+// For CTS usage
+java_aconfig_library {
+    name: "com.android.media.audioserver-aconfig-java",
+    aconfig_declarations: "com.android.media.audioserver-aconfig",
+}
+
 // Framework available flags to follow
 // Care must be taken to avoid namespace conflicts.
 // These flags are accessible outside of the platform! Limit usage to @FlaggedApi wherever possible
@@ -63,22 +84,25 @@
 aconfig_declarations {
     name: "android.media.audio-aconfig",
     package: "android.media.audio",
+    container: "system",
     srcs: ["audio_framework.aconfig"],
-    visibility: ["//visibility:private"],
+    visibility: ["//frameworks/base/api"],
 }
 
 aconfig_declarations {
     name: "android.media.audiopolicy-aconfig",
     package: "android.media.audiopolicy",
+    container: "system",
     srcs: ["audiopolicy_framework.aconfig"],
-    visibility: ["//visibility:private"],
+    visibility: ["//frameworks/base/api"],
 }
 
 aconfig_declarations {
     name: "android.media.midi-aconfig",
     package: "android.media.midi",
+    container: "system",
     srcs: ["midi_flags.aconfig"],
-    visibility: ["//visibility:private"],
+    visibility: ["//frameworks/base/api"],
 }
 
 java_aconfig_library {
@@ -91,6 +115,11 @@
     name: "android.media.audiopolicy-aconfig-java",
     aconfig_declarations: "android.media.audiopolicy-aconfig",
     defaults: ["framework-minus-apex-aconfig-java-defaults"],
+    min_sdk_version: "VanillaIceCream",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.car.framework",
+    ],
 }
 
 java_aconfig_library {
@@ -99,6 +128,12 @@
     defaults: ["framework-minus-apex-aconfig-java-defaults"],
 }
 
+cc_aconfig_library {
+    name: "android.media.audiopolicy-aconfig-cc",
+    aconfig_declarations: "android.media.audiopolicy-aconfig",
+    defaults: ["audio-aconfig-cc-defaults"],
+}
+
 aconfig_declarations_group {
     name: "audio-framework-aconfig",
     java_aconfig_libraries: [
diff --git a/media/audio/aconfig/aaudio.aconfig b/media/audio/aconfig/aaudio.aconfig
index 7196525..c160109 100644
--- a/media/audio/aconfig/aaudio.aconfig
+++ b/media/audio/aconfig/aaudio.aconfig
@@ -3,6 +3,7 @@
 # Please add flags in alphabetical order.
 
 package: "com.android.media.aaudio"
+container: "system"
 
 flag {
     name: "sample_rate_conversion"
diff --git a/media/audio/aconfig/audio.aconfig b/media/audio/aconfig/audio.aconfig
index 73cb8ca..cdbadc2 100644
--- a/media/audio/aconfig/audio.aconfig
+++ b/media/audio/aconfig/audio.aconfig
@@ -3,6 +3,7 @@
 # Please add flags in alphabetical order.
 
 package: "com.android.media.audio"
+container: "system"
 
 flag {
     name: "alarm_min_volume_zero"
@@ -12,6 +13,13 @@
 }
 
 flag {
+    name: "as_device_connection_failure"
+    namespace: "media_audio"
+    description: "AudioService handles device connection failures."
+    bug: "326597760"
+}
+
+flag {
     name: "bluetooth_mac_address_anonymization"
     namespace: "media_audio"
     description:
@@ -37,8 +45,30 @@
 }
 
 flag {
+    name: "ringer_mode_affects_alarm"
+    namespace: "media_audio"
+    description:
+        "Support a configuration where ringer mode affects alarm stream"
+    bug: "312456558"
+}
+
+flag {
     name: "spatializer_offload"
     namespace: "media_audio"
     description: "Enable spatializer offload"
     bug: "307842941"
 }
+
+flag {
+    name: "stereo_spatialization"
+    namespace: "media_audio"
+    description: "Enable stereo channel mask for spatialization."
+    bug: "303920722"
+}
+
+flag {
+    name: "volume_refactoring"
+    namespace: "media_audio"
+    description: "Refactor the audio volume internal architecture logic"
+    bug: "324152869"
+}
diff --git a/media/audio/aconfig/audio_framework.aconfig b/media/audio/aconfig/audio_framework.aconfig
index 294e67d..cfdf1ab 100644
--- a/media/audio/aconfig/audio_framework.aconfig
+++ b/media/audio/aconfig/audio_framework.aconfig
@@ -21,6 +21,31 @@
     bug: "302323921"
 }
 
+flag {
+    name: "feature_spatial_audio_headtracking_low_latency"
+    namespace: "media_audio"
+    description: "Define feature for low latency headtracking for SA"
+    bug: "324291076"
+}
+
+flag {
+    name: "focus_exclusive_with_recording"
+    namespace: "media_audio"
+    description:
+        "Audio focus GAIN_TRANSIENT_EXCLUSIVE only mutes"
+        "notifications when the focus owner is also recording"
+    bug: "316414750"
+}
+
+flag {
+    name: "foreground_audio_control"
+    namespace: "media_audio"
+    description:
+        "Audio focus gain requires FGS or delegation to "
+	"take effect"
+    bug: "296232417"
+}
+
 # TODO remove
 flag {
     name: "focus_freeze_test_api"
@@ -42,5 +67,35 @@
 Enable the API for providing loudness metadata and CTA-2075 \
 support."
     bug: "298463873"
+    is_exported: true
 }
 
+flag {
+    name: "mute_background_audio"
+    namespace: "media_audio"
+    description: "mute audio playing in background"
+    bug: "296232417"
+}
+
+flag {
+    name: "sco_managed_by_audio"
+    namespace: "media_audio"
+    description: "\
+Enable new implementation of headset profile device connection and\
+SCO audio activation."
+    bug: "265057196"
+}
+
+flag {
+    name: "supported_device_types_api"
+    namespace: "media_audio"
+    description: "Surface new API method AudioManager.getSupportedDeviceTypes()"
+    bug: "307537538"
+}
+
+flag {
+    name: "volume_ringer_api_hardening"
+    namespace: "media_audio"
+    description: "Limit access to volume and ringer SDK APIs in AudioManager"
+    bug: "296232417"
+}
diff --git a/media/audio/aconfig/audiopolicy_framework.aconfig b/media/audio/aconfig/audiopolicy_framework.aconfig
index 833730a..72a1e6c 100644
--- a/media/audio/aconfig/audiopolicy_framework.aconfig
+++ b/media/audio/aconfig/audiopolicy_framework.aconfig
@@ -4,6 +4,31 @@
 # Please add flags in alphabetical order.
 
 package: "android.media.audiopolicy"
+container: "system"
+
+flag {
+    name: "audio_mix_ownership"
+    namespace: "media_audio"
+    description: "Improves ownership model of AudioMixes and the relationship between AudioPolicy and AudioMix."
+    bug: "309080867"
+    is_fixed_read_only: true
+}
+
+flag {
+    name: "audio_mix_policy_ordering"
+    namespace: "media_audio"
+    description: "Orders AudioMixes per registered AudioPolicy."
+    bug: "309080867"
+    is_fixed_read_only: true
+}
+
+flag {
+    name: "audio_mix_test_api"
+    namespace: "media_audio"
+    description: "Enable new Test APIs that provide access to registered AudioMixes on system server and native side."
+    bug: "309080867"
+    is_fixed_read_only: true
+}
 
 flag {
     name: "audio_policy_update_mixing_rules_api"
@@ -11,3 +36,25 @@
     description: "Enable AudioPolicy.updateMixingRules API for hot-swapping audio mixing rules."
     bug: "293874525"
 }
+
+flag {
+    name: "enable_fade_manager_configuration"
+    namespace: "media_audio"
+    description: "Enable Fade Manager Configuration support to determine fade properties"
+    bug: "307354764"
+}
+
+flag {
+    name: "multi_zone_audio"
+    namespace: "media_audio"
+    description: "Enable multi-zone audio support in audio product strategies."
+    bug: "316643994"
+}
+
+flag {
+    name: "record_audio_device_aware_permission"
+    namespace: "media_audio"
+    description: "Enable device-aware permission handling for RECORD_AUDIO permission"
+    bug: "291737188"
+    is_fixed_read_only: true
+}
\ No newline at end of file
diff --git a/media/audio/aconfig/audioserver.aconfig b/media/audio/aconfig/audioserver.aconfig
index 21ea1a2..5c6504f 100644
--- a/media/audio/aconfig/audioserver.aconfig
+++ b/media/audio/aconfig/audioserver.aconfig
@@ -3,6 +3,7 @@
 # Please add flags in alphabetical order.
 
 package: "com.android.media.audioserver"
+container: "system"
 
 flag {
     name: "direct_track_reprioritization"
diff --git a/media/audio/aconfig/midi_flags.aconfig b/media/audio/aconfig/midi_flags.aconfig
index ff9238a..efb643f 100644
--- a/media/audio/aconfig/midi_flags.aconfig
+++ b/media/audio/aconfig/midi_flags.aconfig
@@ -4,6 +4,7 @@
 # Please add flags in alphabetical order.
 
 package: "android.media.midi"
+container: "system"
 
 flag {
     name: "virtual_ump"
diff --git a/media/audioserver/Android.bp b/media/audioserver/Android.bp
index 2030dc7..479e13a 100644
--- a/media/audioserver/Android.bp
+++ b/media/audioserver/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -71,7 +72,6 @@
         "frameworks/av/services/medialog",
         "frameworks/av/services/oboeservice", // TODO oboeservice is the old folder name for aaudioservice. It will be changed.
 
-
     ],
 
     init_rc: ["audioserver.rc"],
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 2a33048..0384e2e 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -1049,7 +1049,7 @@
             memcpy(wView.data(), encoded_packet->data.frame.buf, encoded_packet->data.frame.sz);
             ++mNumInputFrames;
 
-            ALOGD("bytes generated %zu", encoded_packet->data.frame.sz);
+            ALOGV("bytes generated %zu", encoded_packet->data.frame.sz);
             uint32_t flags = 0;
             if (eos) {
                 flags |= C2FrameData::FLAG_END_OF_STREAM;
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/Android.bp b/media/codec2/hal/hidl/1.0/vts/functional/Android.bp
index 2054fe6..ccdde5e 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/Android.bp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/Android.bp
@@ -15,6 +15,7 @@
 //
 
 package {
+    default_team: "trendy_team_media_codec_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/audio/Android.bp b/media/codec2/hal/hidl/1.0/vts/functional/audio/Android.bp
index 624aad2..2b1bca0 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/audio/Android.bp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/audio/Android.bp
@@ -15,6 +15,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_codec_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/common/Android.bp b/media/codec2/hal/hidl/1.0/vts/functional/common/Android.bp
index 0f07077..564de47 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/common/Android.bp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/common/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_codec_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/component/Android.bp b/media/codec2/hal/hidl/1.0/vts/functional/component/Android.bp
index cc019da..0640f02 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/component/Android.bp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/component/Android.bp
@@ -15,6 +15,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_codec_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/master/Android.bp b/media/codec2/hal/hidl/1.0/vts/functional/master/Android.bp
index 40f5201..5e52fde 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/master/Android.bp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/master/Android.bp
@@ -15,6 +15,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_codec_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/video/Android.bp b/media/codec2/hal/hidl/1.0/vts/functional/video/Android.bp
index ecc4f9d..d04c2f6 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/video/Android.bp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/video/Android.bp
@@ -15,6 +15,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_codec_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaaudio/examples/Android.bp b/media/libaaudio/examples/Android.bp
index e2c1878..aa3ae5e 100644
--- a/media/libaaudio/examples/Android.bp
+++ b/media/libaaudio/examples/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaaudio/examples/input_monitor/Android.bp b/media/libaaudio/examples/input_monitor/Android.bp
index 72adfd7..52a5914 100644
--- a/media/libaaudio/examples/input_monitor/Android.bp
+++ b/media/libaaudio/examples/input_monitor/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -11,7 +12,10 @@
     name: "input_monitor",
     gtest: false,
     srcs: ["src/input_monitor.cpp"],
-    cflags: ["-Wall", "-Werror"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
@@ -20,7 +24,10 @@
     name: "input_monitor_callback",
     gtest: false,
     srcs: ["src/input_monitor_callback.cpp"],
-    cflags: ["-Wall", "-Werror"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
diff --git a/media/libaaudio/examples/loopback/Android.bp b/media/libaaudio/examples/loopback/Android.bp
index b18aeec..6552113 100644
--- a/media/libaaudio/examples/loopback/Android.bp
+++ b/media/libaaudio/examples/loopback/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -11,13 +12,16 @@
     name: "aaudio_loopback",
     gtest: false,
     srcs: ["src/loopback.cpp"],
-    cflags: ["-Wall", "-Werror"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     static_libs: ["libsndfile"],
     include_dirs: ["external/oboe/apps/OboeTester/app/src/main/cpp"],
     shared_libs: [
         "libaaudio",
         "libaudioutils",
-        "liblog"
-        ],
+        "liblog",
+    ],
     header_libs: ["libaaudio_example_utils"],
 }
diff --git a/media/libaaudio/examples/write_sine/Android.bp b/media/libaaudio/examples/write_sine/Android.bp
index 1c7e0f1..fe78112 100644
--- a/media/libaaudio/examples/write_sine/Android.bp
+++ b/media/libaaudio/examples/write_sine/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -10,7 +11,10 @@
 cc_test {
     name: "write_sine",
     srcs: ["src/write_sine.cpp"],
-    cflags: ["-Wall", "-Werror"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
@@ -18,7 +22,10 @@
 cc_test {
     name: "write_sine_callback",
     srcs: ["src/write_sine_callback.cpp"],
-    cflags: ["-Wall", "-Werror"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
diff --git a/media/libaaudio/fuzzer/Android.bp b/media/libaaudio/fuzzer/Android.bp
index 46c4148..6d94f38 100644
--- a/media/libaaudio/fuzzer/Android.bp
+++ b/media/libaaudio/fuzzer/Android.bp
@@ -15,6 +15,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index c3b32e6..9d9b574 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -2011,10 +2011,20 @@
         __INTRODUCED_IN(28);
 
 /**
- * Passes back the time at which a particular frame was presented.
+ * Returns the time at which a particular frame was played on a speaker or headset,
+ * or was recorded on a microphone.
+ *
  * This can be used to synchronize audio with video or MIDI.
  * It can also be used to align a recorded stream with a playback stream.
  *
+ * The framePosition is an index into the stream of audio data.
+ * The first frame played or recorded is at framePosition 0.
+ *
+ * These framePositions are the same units that you get from AAudioStream_getFramesRead()
+ * or AAudioStream_getFramesWritten().
+ * A "frame" is a set of audio sample values that are played simultaneously.
+ * For example, a stereo stream has two samples in a frame, left and right.
+ *
  * Timestamps are only valid when the stream is in {@link #AAUDIO_STREAM_STATE_STARTED}.
  * {@link #AAUDIO_ERROR_INVALID_STATE} will be returned if the stream is not started.
  * Note that because requestStart() is asynchronous, timestamps will not be valid until
@@ -2030,8 +2040,8 @@
  *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @param clockid CLOCK_MONOTONIC or CLOCK_BOOTTIME
- * @param framePosition pointer to a variable to receive the position
- * @param timeNanoseconds pointer to a variable to receive the time
+ * @param[out] framePosition pointer to a variable to receive the position
+ * @param[out] timeNanoseconds pointer to a variable to receive the time
  * @return {@link #AAUDIO_OK} or a negative error
  */
 AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* _Nonnull stream,
diff --git a/media/libaaudio/include/aaudio/AAudioTesting.h b/media/libaaudio/include/aaudio/AAudioTesting.h
index 01d97b6..d67ec70 100644
--- a/media/libaaudio/include/aaudio/AAudioTesting.h
+++ b/media/libaaudio/include/aaudio/AAudioTesting.h
@@ -49,12 +49,6 @@
 };
 typedef int32_t aaudio_policy_t;
 
-// Internal error codes. Only used by the framework.
-enum {
-    AAUDIO_INTERNAL_ERROR_BASE = -1000,
-    AAUDIO_ERROR_STANDBY,
-};
-
 /**
  * Control whether AAudioStreamBuilder_openStream() will use the new MMAP data path
  * or the older "Legacy" data path.
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index fcb376c..d2cb265 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -128,7 +129,7 @@
     tidy_checks_as_errors: tidy_errors,
     tidy_flags: [
         "-format-style=file",
-    ]
+    ],
 }
 
 cc_library {
@@ -250,7 +251,7 @@
     tidy_checks_as_errors: tidy_errors,
     tidy_flags: [
         "-format-style=file",
-    ]
+    ],
 }
 
 aidl_interface {
@@ -274,8 +275,7 @@
         "shared-file-region-aidl",
         "framework-permission-aidl",
     ],
-    backend:
-    {
+    backend: {
         java: {
             sdk_version: "module_current",
         },
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index e1d517e..0c55fca 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -72,6 +72,12 @@
      */
     int32_t pull(void *destination, int32_t targetFramesToRead);
 
+    // Reset the entire graph so that volume ramps start at their
+    // target value and sample rate converters start with no phase offset.
+    void reset() {
+        mSink->pullReset();
+    }
+
     /**
      * Set numFramesToWrite frames from the source into the flowgraph.
      * Then, attempt to read targetFramesToRead from the flowgraph.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 52925d9..7648e25 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -199,6 +199,7 @@
         if (getSampleRate() != getDeviceSampleRate()) {
             ALOGD("%s - skipping sample rate converter. SR = %d, Device SR = %d", __func__,
                     getSampleRate(), getDeviceSampleRate());
+            result = AAUDIO_ERROR_INVALID_RATE;
             goto error;
         }
     }
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5bac2ca..5d4c3d4 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -100,6 +100,10 @@
 }
 
 void AudioStreamInternalPlay::prepareBuffersForStart() {
+    // Reset volume ramps to avoid a starting noise.
+    // This was called here instead of AudioStreamInternal so that
+    // it will be easier to backport.
+    mFlowGraph.reset();
     // Prevent stale data from being played.
     mAudioEndpoint->eraseDataMemory();
 }
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
index 6c22744..8af49b4 100644
--- a/media/libaaudio/src/core/AudioGlobal.h
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -22,6 +22,14 @@
 
 namespace aaudio {
 
+// Internal error codes. Only used by the framework.
+enum {
+    AAUDIO_INTERNAL_ERROR_BASE = -1000,
+    AAUDIO_ERROR_STANDBY,
+    AAUDIO_ERROR_ALREADY_CLOSED,
+
+};
+
 aaudio_policy_t AudioGlobal_getMMapPolicy();
 aaudio_result_t AudioGlobal_setMMapPolicy(aaudio_policy_t policy);
 
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 59fdabc..d729047 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -69,16 +69,24 @@
     audio_channel_mask_t channelMask =
             AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
 
+    // Set flags based on selected parameters.
     audio_output_flags_t flags;
     aaudio_performance_mode_t perfMode = getPerformanceMode();
     switch(perfMode) {
-        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY: {
             // Bypass the normal mixer and go straight to the FAST mixer.
-            // If the app asks for a sessionId then it means they want to use effects.
-            // So don't use RAW flag.
-            flags = (audio_output_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
-                    ? (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)
-                    : (AUDIO_OUTPUT_FLAG_FAST));
+            // Some Usages need RAW mode so they can get the lowest possible latency.
+            // Other Usages should avoid RAW because it can interfere with
+            // dual sink routing or other features.
+            bool usageBenefitsFromRaw = getUsage() == AAUDIO_USAGE_GAME ||
+                    getUsage() == AAUDIO_USAGE_MEDIA;
+            // If an app does not ask for a sessionId then there will be no effects.
+            // So we can use the use RAW flag.
+            flags = (audio_output_flags_t) (((requestedSessionId == AAUDIO_SESSION_ID_NONE)
+                                             && usageBenefitsFromRaw)
+                                            ? (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)
+                                            : (AUDIO_OUTPUT_FLAG_FAST));
+        }
             break;
 
         case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index d59afef..5ec8276 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -183,9 +184,9 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_full_queue.cpp"],
     shared_libs: [
-		"libaaudio",
-		"liblog"
-	],
+        "libaaudio",
+        "liblog",
+    ],
 }
 
 cc_test {
@@ -248,3 +249,30 @@
     srcs: ["test_idle_disconnected_shared_stream.cpp"],
     shared_libs: ["libaaudio"],
 }
+
+cc_test {
+    name: "test_multiple_close_simultaneously",
+    defaults: [
+        "latest_android_media_audio_common_types_cpp_shared",
+        "libaaudio_tests_defaults",
+    ],
+    srcs: ["test_multiple_close_simultaneously.cpp"],
+    shared_libs: [
+        "aaudio-aidl-cpp",
+        "framework-permission-aidl-cpp",
+        "libaaudio",
+        "libbinder",
+        "liblog",
+        "libutils",
+    ],
+    // This test will run 1 minute to ensure there is no crash happen.
+    // In that case, set the timeout as 2 minutes to allow the test to complete.
+    test_options: {
+        test_runner_options: [
+            {
+                name: "native-test-timeout",
+                value: "2m",
+            },
+        ],
+    },
+}
diff --git a/media/libaaudio/tests/test_multiple_close_simultaneously.cpp b/media/libaaudio/tests/test_multiple_close_simultaneously.cpp
new file mode 100644
index 0000000..f6351b6
--- /dev/null
+++ b/media/libaaudio/tests/test_multiple_close_simultaneously.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "test_multiple_close_simultaneously"
+
+#include <chrono>
+#include <condition_variable>
+#include <shared_mutex>
+#include <string>
+#include <thread>
+
+#include <gtest/gtest.h>
+
+#include <binder/IBinder.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+#include <aaudio/AAudio.h>
+#include <aaudio/IAAudioService.h>
+#include <aaudio/StreamRequest.h>
+#include <aaudio/StreamParameters.h>
+
+using namespace android;
+using namespace aaudio;
+
+#define AAUDIO_SERVICE_NAME "media.aaudio"
+
+static constexpr int THREAD_NUM = 2;
+static constexpr auto TEST_DURATION = std::chrono::minutes(1);
+
+static std::string sError;
+static bool sTestPassed = true;
+
+struct Signal {
+    std::atomic_int value{0};
+    std::shared_mutex lock;
+    std::condition_variable_any cv;
+};
+
+class AAudioServiceDeathRecipient : public IBinder::DeathRecipient {
+public:
+    void binderDied(const wp<IBinder>& who __unused) override {
+        sError = "AAudioService is dead";
+        ALOGE("%s", sError.c_str());
+        sTestPassed = false;
+    }
+};
+
+sp<IAAudioService> getAAudioService(const sp<IBinder::DeathRecipient>& recipient) {
+    auto sm = defaultServiceManager();
+    if (sm == nullptr) {
+        sError = "Cannot get service manager";
+        ALOGE("%s", sError.c_str());
+        return nullptr;
+    }
+    sp<IBinder> binder = sm->waitForService(String16(AAUDIO_SERVICE_NAME));
+    if (binder == nullptr) {
+        sError = "Cannot get aaudio service";
+        ALOGE("%s", sError.c_str());
+        return nullptr;
+    }
+    if (binder->linkToDeath(recipient) != NO_ERROR) {
+        sError = "Cannot link to binder death";
+        ALOGE("%s", sError.c_str());
+        return nullptr;
+    }
+    return interface_cast<IAAudioService>(binder);
+}
+
+void openAndMultipleClose(const sp<IAAudioService>& aaudioService) {
+    auto start = std::chrono::system_clock::now();
+    bool hasFailedOpening = false;
+    while (sTestPassed && std::chrono::system_clock::now() - start < TEST_DURATION) {
+        StreamRequest inRequest;
+        StreamParameters outParams;
+        int32_t handle = 0;
+        inRequest.attributionSource.uid = getuid();
+        inRequest.attributionSource.pid = getpid();
+        inRequest.attributionSource.token = sp<BBinder>::make();
+        auto status = aaudioService->openStream(inRequest, &outParams, &handle);
+        if (!status.isOk()) {
+            sError = "Cannot open stream, it can be caused by service death";
+            ALOGE("%s", sError.c_str());
+            sTestPassed = false;
+            break;
+        }
+        if (handle <= 0) {
+            sError = "Cannot get stream handle after open, returned handle"
+                    + std::to_string(handle);
+            ALOGE("%s", sError.c_str());
+            sTestPassed = false;
+            break;
+        }
+        hasFailedOpening = false;
+
+        Signal isReady;
+        Signal startWork;
+        Signal isCompleted;
+        std::unique_lock readyLock(isReady.lock);
+        std::unique_lock completedLock(isCompleted.lock);
+        for (int i = 0; i < THREAD_NUM; ++i) {
+            std::thread closeStream([aaudioService, handle, &isReady, &startWork, &isCompleted] {
+                isReady.value++;
+                isReady.cv.notify_one();
+                {
+                    std::shared_lock<std::shared_mutex> _l(startWork.lock);
+                    startWork.cv.wait(_l, [&startWork] { return startWork.value.load() == 1; });
+                }
+                int32_t result;
+                aaudioService->closeStream(handle, &result);
+                isCompleted.value++;
+                isCompleted.cv.notify_one();
+            });
+            closeStream.detach();
+        }
+        isReady.cv.wait(readyLock, [&isReady] { return isReady.value == THREAD_NUM; });
+        {
+            std::unique_lock startWorkLock(startWork.lock);
+            startWork.value.store(1);
+        }
+        startWork.cv.notify_all();
+        isCompleted.cv.wait_for(completedLock,
+                                std::chrono::milliseconds(1000),
+                                [&isCompleted] { return isCompleted.value == THREAD_NUM; });
+        if (isCompleted.value != THREAD_NUM) {
+            sError = "Close is not completed within 1 second";
+            ALOGE("%s", sError.c_str());
+            sTestPassed = false;
+            break;
+        }
+    }
+}
+
+TEST(test_multiple_close_simultaneously, open_multiple_close) {
+    const auto recipient = sp<AAudioServiceDeathRecipient>::make();
+    auto aaudioService = getAAudioService(recipient);
+    ASSERT_NE(nullptr, aaudioService) << sError;
+    openAndMultipleClose(aaudioService);
+    ASSERT_TRUE(sTestPassed) << sError;
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 51a679b..90910a1 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -52,7 +53,7 @@
         "AudioPolicy.cpp",
         "AudioProductStrategy.cpp",
         "AudioVolumeGroup.cpp",
-        "PolicyAidlConversion.cpp"
+        "PolicyAidlConversion.cpp",
     ],
     defaults: [
         "latest_android_media_audio_common_types_cpp_export_shared",
@@ -121,6 +122,7 @@
         "latest_android_media_audio_common_types_cpp_shared",
     ],
     shared_libs: [
+        "android.media.audiopolicy-aconfig-cc",
         "audioclient-types-aidl-cpp",
         "audioflinger-aidl-cpp",
         "audiopolicy-aidl-cpp",
@@ -185,6 +187,7 @@
         "-Wall",
         "-Werror",
         "-Wno-error=deprecated-declarations",
+        "-Wthread-safety",
     ],
     sanitize: {
         misc_undefined: [
@@ -331,6 +334,7 @@
         },
     },
 }
+
 aidl_interface {
     name: "audiopolicy-types-aidl",
     unstable: true,
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index d9fd58c..1417182 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -60,9 +60,13 @@
 }
 
 // Keep in sync with android/media/audiopolicy/AudioProductStrategy#attributeMatches
-int AudioProductStrategy::attributesMatchesScore(const audio_attributes_t refAttributes,
-                                                 const audio_attributes_t clientAttritubes)
+int AudioProductStrategy::attributesMatchesScore(audio_attributes_t refAttributes,
+                                                 audio_attributes_t clientAttritubes)
 {
+    refAttributes.flags = static_cast<audio_flags_mask_t>(
+            refAttributes.flags & AUDIO_FLAGS_AFFECT_STRATEGY_SELECTION);
+    clientAttritubes.flags = static_cast<audio_flags_mask_t>(
+            clientAttritubes.flags & AUDIO_FLAGS_AFFECT_STRATEGY_SELECTION);
     if (refAttributes == clientAttritubes) {
         return MATCH_EQUALS;
     }
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 5bfdd5f..d1b1849 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -22,6 +22,7 @@
 #include <android/media/IAudioPolicyService.h>
 #include <android/media/AudioMixUpdate.h>
 #include <android/media/BnCaptureStateListener.h>
+#include <android_media_audiopolicy.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
 #include <binder/IPCThreadState.h>
@@ -44,6 +45,8 @@
 
 // ----------------------------------------------------------------------------
 
+namespace audio_flags = android::media::audiopolicy;
+
 namespace android {
 using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
@@ -62,115 +65,184 @@
 using media::audio::common::AudioUsage;
 using media::audio::common::Int;
 
-// client singleton for AudioFlinger binder interface
-Mutex AudioSystem::gLock;
-Mutex AudioSystem::gLockErrorCallbacks;
-Mutex AudioSystem::gLockAPS;
-sp<IAudioFlinger> AudioSystem::gAudioFlinger;
-sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
-std::set<audio_error_callback> AudioSystem::gAudioErrorCallbacks;
+std::mutex AudioSystem::gMutex;
 dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL;
 record_config_callback AudioSystem::gRecordConfigCallback = NULL;
 routing_callback AudioSystem::gRoutingCallback = NULL;
 vol_range_init_req_callback AudioSystem::gVolRangeInitReqCallback = NULL;
 
-// Required to be held while calling into gSoundTriggerCaptureStateListener.
-class CaptureStateListenerImpl;
+std::mutex AudioSystem::gApsCallbackMutex;
+std::mutex AudioSystem::gErrorCallbacksMutex;
+std::set<audio_error_callback> AudioSystem::gAudioErrorCallbacks;
 
-Mutex gSoundTriggerCaptureStateListenerLock;
-sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener = nullptr;
+std::mutex AudioSystem::gSoundTriggerMutex;
+sp<CaptureStateListenerImpl> AudioSystem::gSoundTriggerCaptureStateListener;
 
-// Binder for the AudioFlinger service that's passed to this client process from the system server.
+// Sets the Binder for the AudioFlinger service, passed to this client process
+// from the system server.
 // This allows specific isolated processes to access the audio system. Currently used only for the
 // HotwordDetectionService.
-static sp<IBinder> gAudioFlingerBinder = nullptr;
+template <typename ServiceInterface, typename Client, typename AidlInterface,
+        typename ServiceTraits>
+class ServiceHandler {
+public:
+    sp<ServiceInterface> getService(bool canStartThreadPool = true)
+            EXCLUDES(mMutex) NO_THREAD_SAFETY_ANALYSIS {  // std::unique_ptr
+        sp<ServiceInterface> service;
+        sp<Client> client;
 
-void AudioSystem::setAudioFlingerBinder(const sp<IBinder>& audioFlinger) {
-    if (audioFlinger->getInterfaceDescriptor() != media::IAudioFlingerService::descriptor) {
-        ALOGE("setAudioFlingerBinder: received a binder of type %s",
-              String8(audioFlinger->getInterfaceDescriptor()).c_str());
-        return;
-    }
-    Mutex::Autolock _l(gLock);
-    if (gAudioFlinger != nullptr) {
-        ALOGW("setAudioFlingerBinder: ignoring; AudioFlinger connection already established.");
-        return;
-    }
-    gAudioFlingerBinder = audioFlinger;
-}
-
-static sp<IAudioFlinger> gLocalAudioFlinger; // set if we are local.
-
-status_t AudioSystem::setLocalAudioFlinger(const sp<IAudioFlinger>& af) {
-    Mutex::Autolock _l(gLock);
-    if (gAudioFlinger != nullptr) return INVALID_OPERATION;
-    gLocalAudioFlinger = af;
-    return OK;
-}
-
-// establish binder interface to AudioFlinger service
-const sp<IAudioFlinger> AudioSystem::getAudioFlingerImpl(bool canStartThreadPool = true) {
-    sp<IAudioFlinger> af;
-    sp<AudioFlingerClient> afc;
-    bool reportNoError = false;
-    {
-        Mutex::Autolock _l(gLock);
-        if (gAudioFlinger != nullptr) {
-            return gAudioFlinger;
+        bool reportNoError = false;
+        {
+            std::lock_guard _l(mMutex);
+            if (mService != nullptr) {
+                return mService;
+            }
         }
 
-        if (gAudioFlingerClient == nullptr) {
-            gAudioFlingerClient = sp<AudioFlingerClient>::make();
+        std::unique_lock ul_only1thread(mSingleGetter);
+        std::unique_lock ul(mMutex);
+        if (mService != nullptr) {
+            return mService;
+        }
+        if (mClient == nullptr) {
+            mClient = sp<Client>::make();
         } else {
             reportNoError = true;
         }
+        while (true) {
+            mService = mLocalService;
+            if (mService != nullptr) break;
 
-        if (gLocalAudioFlinger != nullptr) {
-            gAudioFlinger = gLocalAudioFlinger;
-        } else {
-            sp<IBinder> binder;
-            if (gAudioFlingerBinder != nullptr) {
-                binder = gAudioFlingerBinder;
-            } else {
-                sp<IServiceManager> sm = defaultServiceManager();
-                binder = sm->waitForService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
+            sp<IBinder> binder = mBinder;
+            if (binder == nullptr) {
+                sp <IServiceManager> sm = defaultServiceManager();
+                binder = sm->checkService(String16(ServiceTraits::SERVICE_NAME));
                 if (binder == nullptr) {
-                    return nullptr;
+                    ALOGD("%s: waiting for %s", __func__, ServiceTraits::SERVICE_NAME);
+
+                    // if the condition variable is present, setLocalService() and
+                    // setBinder() is allowed to use it to notify us.
+                    if (mCvGetter == nullptr) {
+                        mCvGetter = std::make_shared<std::condition_variable>();
+                    }
+                    mCvGetter->wait_for(ul, std::chrono::seconds(1));
+                    continue;
                 }
             }
-            binder->linkToDeath(gAudioFlingerClient);
-            const auto afs = interface_cast<media::IAudioFlingerService>(binder);
-            LOG_ALWAYS_FATAL_IF(afs == nullptr);
-            gAudioFlinger = sp<AudioFlingerClientAdapter>::make(afs);
+            binder->linkToDeath(mClient);
+            auto aidlInterface = interface_cast<AidlInterface>(binder);
+            LOG_ALWAYS_FATAL_IF(aidlInterface == nullptr);
+            if constexpr (std::is_same_v<ServiceInterface, AidlInterface>) {
+                mService = std::move(aidlInterface);
+            } else /* constexpr */ {
+                mService = ServiceTraits::createServiceAdapter(aidlInterface);
+            }
+            break;
         }
-        afc = gAudioFlingerClient;
-        af = gAudioFlinger;
-        // Make sure callbacks can be received by gAudioFlingerClient
-        if(canStartThreadPool) {
+        if (mCvGetter) mCvGetter.reset();  // remove condition variable.
+        client = mClient;
+        service = mService;
+        // Make sure callbacks can be received by the client
+        if (canStartThreadPool) {
             ProcessState::self()->startThreadPool();
         }
+        ul.unlock();
+        ul_only1thread.unlock();
+        ServiceTraits::onServiceCreate(service, client);
+        if (reportNoError) AudioSystem::reportError(NO_ERROR);
+        return service;
     }
-    const int64_t token = IPCThreadState::self()->clearCallingIdentity();
-    af->registerClient(afc);
-    IPCThreadState::self()->restoreCallingIdentity(token);
-    if (reportNoError) reportError(NO_ERROR);
-    return af;
+
+    status_t setLocalService(const sp<ServiceInterface>& service) EXCLUDES(mMutex) {
+        std::lock_guard _l(mMutex);
+        // we allow clearing once set, but not a double non-null set.
+        if (mService != nullptr && service != nullptr) return INVALID_OPERATION;
+        mLocalService = service;
+        if (mCvGetter) mCvGetter->notify_one();
+        return OK;
+    }
+
+    sp<Client> getClient() EXCLUDES(mMutex)  {
+        const auto service = getService();
+        if (service == nullptr) return nullptr;
+        std::lock_guard _l(mMutex);
+        return mClient;
+    }
+
+    void setBinder(const sp<IBinder>& binder) EXCLUDES(mMutex)  {
+        std::lock_guard _l(mMutex);
+        if (mService != nullptr) {
+            ALOGW("%s: ignoring; %s connection already established.",
+                    __func__, ServiceTraits::SERVICE_NAME);
+            return;
+        }
+        mBinder = binder;
+        if (mCvGetter) mCvGetter->notify_one();
+    }
+
+    void clearService() EXCLUDES(mMutex)  {
+        std::lock_guard _l(mMutex);
+        mService.clear();
+        if (mClient) ServiceTraits::onClearService(mClient);
+    }
+
+private:
+    std::mutex mSingleGetter;
+    std::mutex mMutex;
+    std::shared_ptr<std::condition_variable> mCvGetter GUARDED_BY(mMutex);
+    sp<IBinder> mBinder GUARDED_BY(mMutex);
+    sp<ServiceInterface> mLocalService GUARDED_BY(mMutex);
+    sp<ServiceInterface> mService GUARDED_BY(mMutex);
+    sp<Client> mClient GUARDED_BY(mMutex);
+};
+
+struct AudioFlingerTraits {
+    static void onServiceCreate(
+            const sp<IAudioFlinger>& af, const sp<AudioSystem::AudioFlingerClient>& afc) {
+        const int64_t token = IPCThreadState::self()->clearCallingIdentity();
+        af->registerClient(afc);
+        IPCThreadState::self()->restoreCallingIdentity(token);
+    }
+
+    static sp<IAudioFlinger> createServiceAdapter(
+            const sp<media::IAudioFlingerService>& aidlInterface) {
+        return sp<AudioFlingerClientAdapter>::make(aidlInterface);
+    }
+
+    static void onClearService(const sp<AudioSystem::AudioFlingerClient>& afc) {
+        afc->clearIoCache();
+    }
+
+    static constexpr const char* SERVICE_NAME = IAudioFlinger::DEFAULT_SERVICE_NAME;
+};
+
+[[clang::no_destroy]] static constinit ServiceHandler<IAudioFlinger,
+        AudioSystem::AudioFlingerClient, media::IAudioFlingerService,
+        AudioFlingerTraits> gAudioFlingerServiceHandler;
+
+sp<IAudioFlinger> AudioSystem::get_audio_flinger() {
+    return gAudioFlingerServiceHandler.getService();
 }
 
-const sp<IAudioFlinger> AudioSystem:: get_audio_flinger() {
-    return getAudioFlingerImpl();
+sp<IAudioFlinger> AudioSystem::get_audio_flinger_for_fuzzer() {
+    return gAudioFlingerServiceHandler.getService(false /* canStartThreadPool */);
 }
 
-const sp<IAudioFlinger> AudioSystem:: get_audio_flinger_for_fuzzer() {
-    return getAudioFlingerImpl(false);
+sp<AudioSystem::AudioFlingerClient> AudioSystem::getAudioFlingerClient() {
+    return gAudioFlingerServiceHandler.getClient();
 }
 
-const sp<AudioSystem::AudioFlingerClient> AudioSystem::getAudioFlingerClient() {
-    // calling get_audio_flinger() will initialize gAudioFlingerClient if needed
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    if (af == 0) return 0;
-    Mutex::Autolock _l(gLock);
-    return gAudioFlingerClient;
+void AudioSystem::setAudioFlingerBinder(const sp<IBinder>& audioFlinger) {
+    if (audioFlinger->getInterfaceDescriptor() != media::IAudioFlingerService::descriptor) {
+        ALOGE("%s: received a binder of type %s",
+                __func__, String8(audioFlinger->getInterfaceDescriptor()).c_str());
+        return;
+    }
+    gAudioFlingerServiceHandler.setBinder(audioFlinger);
+}
+
+status_t AudioSystem::setLocalAudioFlinger(const sp<IAudioFlinger>& af) {
+    return gAudioFlingerServiceHandler.setLocalService(af);
 }
 
 sp<AudioIoDescriptor> AudioSystem::getIoDescriptor(audio_io_handle_t ioHandle) {
@@ -192,41 +264,41 @@
 // FIXME Declare in binder opcode order, similarly to IAudioFlinger.h and IAudioFlinger.cpp
 
 status_t AudioSystem::muteMicrophone(bool state) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setMicMute(state);
 }
 
 status_t AudioSystem::isMicrophoneMuted(bool* state) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     *state = af->getMicMute();
     return NO_ERROR;
 }
 
 status_t AudioSystem::setMasterVolume(float value) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     af->setMasterVolume(value);
     return NO_ERROR;
 }
 
 status_t AudioSystem::setMasterMute(bool mute) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     af->setMasterMute(mute);
     return NO_ERROR;
 }
 
 status_t AudioSystem::getMasterVolume(float* volume) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     *volume = af->masterVolume();
     return NO_ERROR;
 }
 
 status_t AudioSystem::getMasterMute(bool* mute) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     *mute = af->masterMute();
     return NO_ERROR;
@@ -235,7 +307,7 @@
 status_t AudioSystem::setStreamVolume(audio_stream_type_t stream, float value,
                                       audio_io_handle_t output) {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     af->setStreamVolume(stream, value, output);
     return NO_ERROR;
@@ -243,7 +315,7 @@
 
 status_t AudioSystem::setStreamMute(audio_stream_type_t stream, bool mute) {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     af->setStreamMute(stream, mute);
     return NO_ERROR;
@@ -252,7 +324,7 @@
 status_t AudioSystem::getStreamVolume(audio_stream_type_t stream, float* volume,
                                       audio_io_handle_t output) {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     *volume = af->streamVolume(stream, output);
     return NO_ERROR;
@@ -260,7 +332,7 @@
 
 status_t AudioSystem::getStreamMute(audio_stream_type_t stream, bool* mute) {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     *mute = af->streamMute(stream);
     return NO_ERROR;
@@ -268,25 +340,25 @@
 
 status_t AudioSystem::setMode(audio_mode_t mode) {
     if (uint32_t(mode) >= AUDIO_MODE_CNT) return BAD_VALUE;
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setMode(mode);
 }
 
 status_t AudioSystem::setSimulateDeviceConnections(bool enabled) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setSimulateDeviceConnections(enabled);
 }
 
 status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setParameters(ioHandle, keyValuePairs);
 }
 
 String8 AudioSystem::getParameters(audio_io_handle_t ioHandle, const String8& keys) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     String8 result = String8("");
     if (af == 0) return result;
 
@@ -305,23 +377,23 @@
 // convert volume steps to natural log scale
 
 // change this value to change volume scaling
-static const float dBPerStep = 0.5f;
+constexpr float kdbPerStep = 0.5f;
 // shouldn't need to touch these
-static const float dBConvert = -dBPerStep * 2.302585093f / 20.0f;
-static const float dBConvertInverse = 1.0f / dBConvert;
+constexpr float kdBConvert = -kdbPerStep * 2.302585093f / 20.0f;
+constexpr float kdBConvertInverse = 1.0f / kdBConvert;
 
 float AudioSystem::linearToLog(int volume) {
-    // float v = volume ? exp(float(100 - volume) * dBConvert) : 0;
+    // float v = volume ? exp(float(100 - volume) * kdBConvert) : 0;
     // ALOGD("linearToLog(%d)=%f", volume, v);
     // return v;
-    return volume ? exp(float(100 - volume) * dBConvert) : 0;
+    return volume ? exp(float(100 - volume) * kdBConvert) : 0;
 }
 
 int AudioSystem::logToLinear(float volume) {
-    // int v = volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0;
+    // int v = volume ? 100 - int(kdBConvertInverse * log(volume) + 0.5) : 0;
     // ALOGD("logTolinear(%d)=%f", v, volume);
     // return v;
-    return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0;
+    return volume ? 100 - int(kdBConvertInverse * log(volume) + 0.5) : 0;
 }
 
 /* static */ size_t AudioSystem::calculateMinFrameCount(
@@ -366,7 +438,7 @@
 
 status_t AudioSystem::getSamplingRate(audio_io_handle_t ioHandle,
                                       uint32_t* samplingRate) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
     if (desc == 0) {
@@ -401,7 +473,7 @@
 
 status_t AudioSystem::getFrameCount(audio_io_handle_t ioHandle,
                                     size_t* frameCount) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
     if (desc == 0) {
@@ -436,7 +508,7 @@
 
 status_t AudioSystem::getLatency(audio_io_handle_t output,
                                  uint32_t* latency) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
     if (outputDesc == 0) {
@@ -460,21 +532,21 @@
 }
 
 status_t AudioSystem::setVoiceVolume(float value) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setVoiceVolume(value);
 }
 
 status_t AudioSystem::getRenderPosition(audio_io_handle_t output, uint32_t* halFrames,
                                         uint32_t* dspFrames) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
 
     return af->getRenderPosition(halFrames, dspFrames, output);
 }
 
 uint32_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     uint32_t result = 0;
     if (af == 0) return result;
     if (ioHandle == AUDIO_IO_HANDLE_NONE) return result;
@@ -485,46 +557,46 @@
 
 audio_unique_id_t AudioSystem::newAudioUniqueId(audio_unique_id_use_t use) {
     // Must not use AF as IDs will re-roll on audioserver restart, b/130369529.
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return AUDIO_UNIQUE_ID_ALLOCATE;
     return af->newAudioUniqueId(use);
 }
 
 void AudioSystem::acquireAudioSessionId(audio_session_t audioSession, pid_t pid, uid_t uid) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af != 0) {
         af->acquireAudioSessionId(audioSession, pid, uid);
     }
 }
 
 void AudioSystem::releaseAudioSessionId(audio_session_t audioSession, pid_t pid) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af != 0) {
         af->releaseAudioSessionId(audioSession, pid);
     }
 }
 
 audio_hw_sync_t AudioSystem::getAudioHwSyncForSession(audio_session_t sessionId) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return AUDIO_HW_SYNC_INVALID;
     return af->getAudioHwSyncForSession(sessionId);
 }
 
 status_t AudioSystem::systemReady() {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return NO_INIT;
     return af->systemReady();
 }
 
 status_t AudioSystem::audioPolicyReady() {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return NO_INIT;
     return af->audioPolicyReady();
 }
 
 status_t AudioSystem::getFrameCountHAL(audio_io_handle_t ioHandle,
                                        size_t* frameCount) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
     if (desc == 0) {
@@ -546,7 +618,7 @@
 
 
 void AudioSystem::AudioFlingerClient::clearIoCache() {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     mIoDescriptors.clear();
     mInBuffSize = 0;
     mInSamplingRate = 0;
@@ -555,14 +627,7 @@
 }
 
 void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused) {
-    {
-        Mutex::Autolock _l(AudioSystem::gLock);
-        AudioSystem::gAudioFlinger.clear();
-    }
-
-    // clear output handles and stream to output map caches
-    clearIoCache();
-
+    gAudioFlingerServiceHandler.clearService();
     reportError(DEAD_OBJECT);
 
     ALOGW("AudioFlinger server died!");
@@ -584,7 +649,7 @@
     audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
     std::vector<sp<AudioDeviceCallback>> callbacksToCall;
     {
-        Mutex::Autolock _l(mLock);
+        std::lock_guard _l(mMutex);
         auto callbacks = std::map<audio_port_handle_t, wp<AudioDeviceCallback>>();
 
         switch (event) {
@@ -592,13 +657,10 @@
             case AUDIO_OUTPUT_REGISTERED:
             case AUDIO_INPUT_OPENED:
             case AUDIO_INPUT_REGISTERED: {
-                sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
-                if (oldDesc == 0) {
-                    mIoDescriptors.add(ioDesc->getIoHandle(), ioDesc);
-                } else {
+                if (sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle())) {
                     deviceId = oldDesc->getDeviceId();
-                    mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
                 }
+                mIoDescriptors[ioDesc->getIoHandle()] = ioDesc;
 
                 if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
                     deviceId = ioDesc->getDeviceId();
@@ -627,7 +689,7 @@
                 ALOGV("ioConfigChanged() %s %d closed",
                       event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
 
-                mIoDescriptors.removeItem(ioDesc->getIoHandle());
+                mIoDescriptors.erase(ioDesc->getIoHandle());
                 mAudioDeviceCallbacks.erase(ioDesc->getIoHandle());
             }
                 break;
@@ -643,7 +705,7 @@
                 }
 
                 deviceId = oldDesc->getDeviceId();
-                mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
+                mIoDescriptors[ioDesc->getIoHandle()] = ioDesc;
 
                 if (deviceId != ioDesc->getDeviceId()) {
                     deviceId = ioDesc->getDeviceId();
@@ -689,8 +751,8 @@
         }
     }
 
-    // Callbacks must be called without mLock held. May lead to dead lock if calling for
-    // example getRoutedDevice that updates the device and tries to acquire mLock.
+    // Callbacks must be called without mMutex held. May lead to dead lock if calling for
+    // example getRoutedDevice that updates the device and tries to acquire mMutex.
     for (auto cb  : callbacksToCall) {
         // If callbacksToCall is not empty, it implies ioDesc->getIoHandle() and deviceId are valid
         cb->onAudioDeviceUpdate(ioDesc->getIoHandle(), deviceId);
@@ -709,7 +771,7 @@
 
     std::vector<sp<SupportedLatencyModesCallback>> callbacks;
     {
-        Mutex::Autolock _l(mLock);
+        std::lock_guard _l(mMutex);
         for (auto callback : mSupportedLatencyModesCallbacks) {
             if (auto ref = callback.promote(); ref != nullptr) {
                 callbacks.push_back(ref);
@@ -726,11 +788,11 @@
 status_t AudioSystem::AudioFlingerClient::getInputBufferSize(
         uint32_t sampleRate, audio_format_t format,
         audio_channel_mask_t channelMask, size_t* buffSize) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) {
         return PERMISSION_DENIED;
     }
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     // Do we have a stale mInBuffSize or are we requesting the input buffer size for new values
     if ((mInBuffSize == 0) || (sampleRate != mInSamplingRate) || (format != mInFormat)
         || (channelMask != mInChannelMask)) {
@@ -756,16 +818,15 @@
 
 sp<AudioIoDescriptor>
 AudioSystem::AudioFlingerClient::getIoDescriptor_l(audio_io_handle_t ioHandle) {
-    sp<AudioIoDescriptor> desc;
-    ssize_t index = mIoDescriptors.indexOfKey(ioHandle);
-    if (index >= 0) {
-        desc = mIoDescriptors.valueAt(index);
+    if (const auto it = mIoDescriptors.find(ioHandle);
+        it != mIoDescriptors.end()) {
+        return it->second;
     }
-    return desc;
+    return {};
 }
 
 sp<AudioIoDescriptor> AudioSystem::AudioFlingerClient::getIoDescriptor(audio_io_handle_t ioHandle) {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     return getIoDescriptor_l(ioHandle);
 }
 
@@ -773,7 +834,7 @@
         const wp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo,
         audio_port_handle_t portId) {
     ALOGV("%s audioIo %d portId %d", __func__, audioIo, portId);
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     auto& callbacks = mAudioDeviceCallbacks.emplace(
             audioIo,
             std::map<audio_port_handle_t, wp<AudioDeviceCallback>>()).first->second;
@@ -788,7 +849,7 @@
         const wp<AudioDeviceCallback>& callback __unused, audio_io_handle_t audioIo,
         audio_port_handle_t portId) {
     ALOGV("%s audioIo %d portId %d", __func__, audioIo, portId);
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     auto it = mAudioDeviceCallbacks.find(audioIo);
     if (it == mAudioDeviceCallbacks.end()) {
         return INVALID_OPERATION;
@@ -804,7 +865,7 @@
 
 status_t AudioSystem::AudioFlingerClient::addSupportedLatencyModesCallback(
         const sp<SupportedLatencyModesCallback>& callback) {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     if (std::find(mSupportedLatencyModesCallbacks.begin(),
                   mSupportedLatencyModesCallbacks.end(),
                   callback) != mSupportedLatencyModesCallbacks.end()) {
@@ -816,7 +877,7 @@
 
 status_t AudioSystem::AudioFlingerClient::removeSupportedLatencyModesCallback(
         const sp<SupportedLatencyModesCallback>& callback) {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mMutex);
     auto it = std::find(mSupportedLatencyModesCallbacks.begin(),
                                  mSupportedLatencyModesCallbacks.end(),
                                  callback);
@@ -828,93 +889,78 @@
 }
 
 /* static */ uintptr_t AudioSystem::addErrorCallback(audio_error_callback cb) {
-    Mutex::Autolock _l(gLockErrorCallbacks);
+    std::lock_guard _l(gErrorCallbacksMutex);
     gAudioErrorCallbacks.insert(cb);
     return reinterpret_cast<uintptr_t>(cb);
 }
 
 /* static */ void AudioSystem::removeErrorCallback(uintptr_t cb) {
-    Mutex::Autolock _l(gLockErrorCallbacks);
+    std::lock_guard _l(gErrorCallbacksMutex);
     gAudioErrorCallbacks.erase(reinterpret_cast<audio_error_callback>(cb));
 }
 
 /* static */ void AudioSystem::reportError(status_t err) {
-    Mutex::Autolock _l(gLockErrorCallbacks);
+    std::lock_guard _l(gErrorCallbacksMutex);
     for (auto callback : gAudioErrorCallbacks) {
         callback(err);
     }
 }
 
 /*static*/ void AudioSystem::setDynPolicyCallback(dynamic_policy_callback cb) {
-    Mutex::Autolock _l(gLock);
+    std::lock_guard _l(gMutex);
     gDynPolicyCallback = cb;
 }
 
 /*static*/ void AudioSystem::setRecordConfigCallback(record_config_callback cb) {
-    Mutex::Autolock _l(gLock);
+    std::lock_guard _l(gMutex);
     gRecordConfigCallback = cb;
 }
 
 /*static*/ void AudioSystem::setRoutingCallback(routing_callback cb) {
-    Mutex::Autolock _l(gLock);
+    std::lock_guard _l(gMutex);
     gRoutingCallback = cb;
 }
 
 /*static*/ void AudioSystem::setVolInitReqCallback(vol_range_init_req_callback cb) {
-    Mutex::Autolock _l(gLock);
+    std::lock_guard _l(gMutex);
     gVolRangeInitReqCallback = cb;
 }
 
-// client singleton for AudioPolicyService binder interface
-// protected by gLockAPS
-sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
-sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient;
-
-
-// establish binder interface to AudioPolicy service
-const sp<IAudioPolicyService> AudioSystem::get_audio_policy_service() {
-    sp<IAudioPolicyService> ap;
-    sp<AudioPolicyServiceClient> apc;
-    {
-        Mutex::Autolock _l(gLockAPS);
-        if (gAudioPolicyService == 0) {
-            sp<IServiceManager> sm = defaultServiceManager();
-            sp<IBinder> binder = sm->waitForService(String16("media.audio_policy"));
-            if (binder == nullptr) {
-                return nullptr;
-            }
-            if (gAudioPolicyServiceClient == NULL) {
-                gAudioPolicyServiceClient = new AudioPolicyServiceClient();
-            }
-            binder->linkToDeath(gAudioPolicyServiceClient);
-            gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
-            LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
-            apc = gAudioPolicyServiceClient;
-            // Make sure callbacks can be received by gAudioPolicyServiceClient
-            ProcessState::self()->startThreadPool();
-        }
-        ap = gAudioPolicyService;
-    }
-    if (apc != 0) {
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
+struct AudioPolicyTraits {
+    static void onServiceCreate(const sp<IAudioPolicyService>& ap,
+            const sp<AudioSystem::AudioPolicyServiceClient>& apc) {
+        const int64_t token = IPCThreadState::self()->clearCallingIdentity();
         ap->registerClient(apc);
         ap->setAudioPortCallbacksEnabled(apc->isAudioPortCbEnabled());
         ap->setAudioVolumeGroupCallbacksEnabled(apc->isAudioVolumeGroupCbEnabled());
         IPCThreadState::self()->restoreCallingIdentity(token);
     }
 
-    return ap;
+    static void onClearService(const sp<AudioSystem::AudioPolicyServiceClient>&) {}
+
+    static constexpr const char *SERVICE_NAME = "media.audio_policy";
+};
+
+[[clang::no_destroy]] static constinit ServiceHandler<IAudioPolicyService,
+        AudioSystem::AudioPolicyServiceClient, IAudioPolicyService,
+        AudioPolicyTraits> gAudioPolicyServiceHandler;
+
+status_t AudioSystem::setLocalAudioPolicyService(const sp<IAudioPolicyService>& aps) {
+    return gAudioPolicyServiceHandler.setLocalService(aps);
+}
+
+sp<IAudioPolicyService> AudioSystem::get_audio_policy_service() {
+    return gAudioPolicyServiceHandler.getService();
 }
 
 void AudioSystem::clearAudioPolicyService() {
-    Mutex::Autolock _l(gLockAPS);
-    gAudioPolicyService.clear();
+    gAudioPolicyServiceHandler.clearService();
 }
 
 // ---------------------------------------------------------------------------
 
 void AudioSystem::onNewAudioModulesAvailable() {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return;
     aps->onNewAudioModulesAvailable();
 }
@@ -922,7 +968,7 @@
 status_t AudioSystem::setDeviceConnectionState(audio_policy_dev_state_t state,
                                                const android::media::audio::common::AudioPort& port,
                                                audio_format_t encodedFormat) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
 
     if (aps == 0) return PERMISSION_DENIED;
 
@@ -937,7 +983,7 @@
 
 audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
                                                                const char* device_address) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 
     auto result = [&]() -> ConversionResult<audio_policy_dev_state_t> {
@@ -957,7 +1003,7 @@
                                                const char* device_address,
                                                const char* device_name,
                                                audio_format_t encodedFormat) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     const char* address = "";
     const char* name = "";
 
@@ -980,7 +1026,7 @@
 
 status_t AudioSystem::setPhoneState(audio_mode_t state, uid_t uid) {
     if (uint32_t(state) >= AUDIO_MODE_CNT) return BAD_VALUE;
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     return statusTFromBinderStatus(aps->setPhoneState(
@@ -990,7 +1036,7 @@
 
 status_t
 AudioSystem::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     return statusTFromBinderStatus(
@@ -1003,7 +1049,7 @@
 }
 
 audio_policy_forced_cfg_t AudioSystem::getForceUse(audio_policy_force_use_t usage) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return AUDIO_POLICY_FORCE_NONE;
 
     auto result = [&]() -> ConversionResult<audio_policy_forced_cfg_t> {
@@ -1020,7 +1066,7 @@
 
 
 audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return AUDIO_IO_HANDLE_NONE;
 
     auto result = [&]() -> ConversionResult<audio_io_handle_t> {
@@ -1068,7 +1114,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return NO_INIT;
 
     media::audio::common::AudioAttributes attrAidl = VALUE_OR_RETURN_STATUS(
@@ -1117,7 +1163,7 @@
 }
 
 status_t AudioSystem::startOutput(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t portIdAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(portId));
@@ -1125,7 +1171,7 @@
 }
 
 status_t AudioSystem::stopOutput(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t portIdAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(portId));
@@ -1133,7 +1179,7 @@
 }
 
 void AudioSystem::releaseOutput(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return;
 
     auto status = [&]() -> status_t {
@@ -1173,7 +1219,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return NO_INIT;
 
     media::audio::common::AudioAttributes attrAidl = VALUE_OR_RETURN_STATUS(
@@ -1207,7 +1253,7 @@
 }
 
 status_t AudioSystem::startInput(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t portIdAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(portId));
@@ -1215,7 +1261,7 @@
 }
 
 status_t AudioSystem::stopInput(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t portIdAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(portId));
@@ -1223,7 +1269,7 @@
 }
 
 void AudioSystem::releaseInput(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return;
 
     auto status = [&]() -> status_t {
@@ -1240,7 +1286,7 @@
 status_t AudioSystem::initStreamVolume(audio_stream_type_t stream,
                                        int indexMin,
                                        int indexMax) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
@@ -1261,7 +1307,7 @@
 status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream,
                                            int index,
                                            audio_devices_t device) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
@@ -1276,7 +1322,7 @@
 status_t AudioSystem::getStreamVolumeIndex(audio_stream_type_t stream,
                                            int* index,
                                            audio_devices_t device) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
@@ -1295,7 +1341,7 @@
 status_t AudioSystem::setVolumeIndexForAttributes(const audio_attributes_t& attr,
                                                   int index,
                                                   audio_devices_t device) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes attrAidl = VALUE_OR_RETURN_STATUS(
@@ -1310,7 +1356,7 @@
 status_t AudioSystem::getVolumeIndexForAttributes(const audio_attributes_t& attr,
                                                   int& index,
                                                   audio_devices_t device) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes attrAidl = VALUE_OR_RETURN_STATUS(
@@ -1325,7 +1371,7 @@
 }
 
 status_t AudioSystem::getMaxVolumeIndexForAttributes(const audio_attributes_t& attr, int& index) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes attrAidl = VALUE_OR_RETURN_STATUS(
@@ -1338,7 +1384,7 @@
 }
 
 status_t AudioSystem::getMinVolumeIndexForAttributes(const audio_attributes_t& attr, int& index) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes attrAidl = VALUE_OR_RETURN_STATUS(
@@ -1351,7 +1397,7 @@
 }
 
 product_strategy_t AudioSystem::getStrategyForStream(audio_stream_type_t stream) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PRODUCT_STRATEGY_NONE;
 
     auto result = [&]() -> ConversionResult<product_strategy_t> {
@@ -1371,7 +1417,7 @@
     if (devices == nullptr) {
         return BAD_VALUE;
     }
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes aaAidl = VALUE_OR_RETURN_STATUS(
@@ -1387,7 +1433,7 @@
 }
 
 audio_io_handle_t AudioSystem::getOutputForEffect(const effect_descriptor_t* desc) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     // FIXME change return type to status_t, and return PERMISSION_DENIED here
     if (aps == 0) return AUDIO_IO_HANDLE_NONE;
 
@@ -1408,7 +1454,7 @@
                                      product_strategy_t strategy,
                                      audio_session_t session,
                                      int id) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::EffectDescriptor descAidl = VALUE_OR_RETURN_STATUS(
@@ -1422,7 +1468,7 @@
 }
 
 status_t AudioSystem::unregisterEffect(int id) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t idAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(id));
@@ -1431,7 +1477,7 @@
 }
 
 status_t AudioSystem::setEffectEnabled(int id, bool enabled) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t idAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(id));
@@ -1440,7 +1486,7 @@
 }
 
 status_t AudioSystem::moveEffectsToIo(const std::vector<int>& ids, audio_io_handle_t io) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<int32_t> idsAidl = VALUE_OR_RETURN_STATUS(
@@ -1450,7 +1496,7 @@
 }
 
 status_t AudioSystem::isStreamActive(audio_stream_type_t stream, bool* state, uint32_t inPastMs) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
@@ -1464,7 +1510,7 @@
 
 status_t AudioSystem::isStreamActiveRemotely(audio_stream_type_t stream, bool* state,
                                              uint32_t inPastMs) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
@@ -1477,7 +1523,7 @@
 }
 
 status_t AudioSystem::isSourceActive(audio_source_t stream, bool* state) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     if (state == NULL) return BAD_VALUE;
 
@@ -1489,19 +1535,19 @@
 }
 
 uint32_t AudioSystem::getPrimaryOutputSamplingRate() {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return 0;
     return af->getPrimaryOutputSamplingRate();
 }
 
 size_t AudioSystem::getPrimaryOutputFrameCount() {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return 0;
     return af->getPrimaryOutputFrameCount();
 }
 
 status_t AudioSystem::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setLowRamDevice(isLowRamDevice, totalMemory);
 }
@@ -1509,18 +1555,12 @@
 void AudioSystem::clearAudioConfigCache() {
     // called by restoreTrack_l(), which needs new IAudioFlinger and IAudioPolicyService instances
     ALOGV("clearAudioConfigCache()");
-    {
-        Mutex::Autolock _l(gLock);
-        if (gAudioFlingerClient != 0) {
-            gAudioFlingerClient->clearIoCache();
-        }
-        gAudioFlinger.clear();
-    }
+    gAudioFlingerServiceHandler.clearService();
     clearAudioPolicyService();
 }
 
 status_t AudioSystem::setSupportedSystemUsages(const std::vector<audio_usage_t>& systemUsages) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == nullptr) return PERMISSION_DENIED;
 
     std::vector<AudioUsage> systemUsagesAidl = VALUE_OR_RETURN_STATUS(
@@ -1530,7 +1570,7 @@
 }
 
 status_t AudioSystem::setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t capturePolicy) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == nullptr) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
@@ -1541,7 +1581,7 @@
 
 audio_offload_mode_t AudioSystem::getOffloadSupport(const audio_offload_info_t& info) {
     ALOGV("%s", __func__);
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return AUDIO_OFFLOAD_NOT_SUPPORTED;
 
     auto result = [&]() -> ConversionResult<audio_offload_mode_t> {
@@ -1566,7 +1606,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::AudioPortRole roleAidl = VALUE_OR_RETURN_STATUS(
@@ -1590,7 +1630,7 @@
 status_t AudioSystem::listDeclaredDevicePorts(media::AudioPortRole role,
                                               std::vector<media::AudioPortFw>* result) {
     if (result == nullptr) return BAD_VALUE;
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(aps->listDeclaredDevicePorts(role, result)));
     return OK;
@@ -1600,7 +1640,7 @@
     if (port == nullptr) {
         return BAD_VALUE;
     }
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::AudioPortFw portAidl;
@@ -1616,7 +1656,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::AudioPatchFw patchAidl = VALUE_OR_RETURN_STATUS(
@@ -1629,7 +1669,7 @@
 }
 
 status_t AudioSystem::releaseAudioPatch(audio_patch_handle_t handle) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t handleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(handle));
@@ -1644,7 +1684,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
 
@@ -1667,7 +1707,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::AudioPortConfigFw configAidl = VALUE_OR_RETURN_STATUS(
@@ -1676,14 +1716,13 @@
 }
 
 status_t AudioSystem::addAudioPortCallback(const sp<AudioPortCallback>& callback) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
+    const auto apc = gAudioPolicyServiceHandler.getClient();
+    if (apc == nullptr) return NO_INIT;
 
-    Mutex::Autolock _l(gLockAPS);
-    if (gAudioPolicyServiceClient == 0) {
-        return NO_INIT;
-    }
-    int ret = gAudioPolicyServiceClient->addAudioPortCallback(callback);
+    std::lock_guard _l(gApsCallbackMutex);
+    const int ret = apc->addAudioPortCallback(callback);
     if (ret == 1) {
         aps->setAudioPortCallbacksEnabled(true);
     }
@@ -1692,14 +1731,13 @@
 
 /*static*/
 status_t AudioSystem::removeAudioPortCallback(const sp<AudioPortCallback>& callback) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
+    const auto apc = gAudioPolicyServiceHandler.getClient();
+    if (apc == nullptr) return NO_INIT;
 
-    Mutex::Autolock _l(gLockAPS);
-    if (gAudioPolicyServiceClient == 0) {
-        return NO_INIT;
-    }
-    int ret = gAudioPolicyServiceClient->removeAudioPortCallback(callback);
+    std::lock_guard _l(gApsCallbackMutex);
+    const int ret = apc->removeAudioPortCallback(callback);
     if (ret == 0) {
         aps->setAudioPortCallbacksEnabled(false);
     }
@@ -1707,14 +1745,13 @@
 }
 
 status_t AudioSystem::addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
+    const auto apc = gAudioPolicyServiceHandler.getClient();
+    if (apc == nullptr) return NO_INIT;
 
-    Mutex::Autolock _l(gLockAPS);
-    if (gAudioPolicyServiceClient == 0) {
-        return NO_INIT;
-    }
-    int ret = gAudioPolicyServiceClient->addAudioVolumeGroupCallback(callback);
+    std::lock_guard _l(gApsCallbackMutex);
+    const int ret = apc->addAudioVolumeGroupCallback(callback);
     if (ret == 1) {
         aps->setAudioVolumeGroupCallbacksEnabled(true);
     }
@@ -1722,14 +1759,13 @@
 }
 
 status_t AudioSystem::removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
+    const auto apc = gAudioPolicyServiceHandler.getClient();
+    if (apc == nullptr) return NO_INIT;
 
-    Mutex::Autolock _l(gLockAPS);
-    if (gAudioPolicyServiceClient == 0) {
-        return NO_INIT;
-    }
-    int ret = gAudioPolicyServiceClient->removeAudioVolumeGroupCallback(callback);
+    std::lock_guard _l(gApsCallbackMutex);
+    const int ret = apc->removeAudioVolumeGroupCallback(callback);
     if (ret == 0) {
         aps->setAudioVolumeGroupCallbacksEnabled(false);
     }
@@ -1745,7 +1781,7 @@
     }
     status_t status = afc->addAudioDeviceCallback(callback, audioIo, portId);
     if (status == NO_ERROR) {
-        const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+        const sp<IAudioFlinger> af = get_audio_flinger();
         if (af != 0) {
             af->registerClient(afc);
         }
@@ -1782,7 +1818,7 @@
 }
 
 audio_port_handle_t AudioSystem::getDeviceIdForIo(audio_io_handle_t audioIo) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     const sp<AudioIoDescriptor> desc = getIoDescriptor(audioIo);
     if (desc == 0) {
@@ -1797,7 +1833,7 @@
     if (session == nullptr || ioHandle == nullptr || device == nullptr) {
         return BAD_VALUE;
     }
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::SoundTriggerSession retAidl;
@@ -1811,7 +1847,7 @@
 }
 
 status_t AudioSystem::releaseSoundTriggerSession(audio_session_t session) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
@@ -1819,7 +1855,7 @@
 }
 
 audio_mode_t AudioSystem::getPhoneState() {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return AUDIO_MODE_INVALID;
 
     auto result = [&]() -> ConversionResult<audio_mode_t> {
@@ -1832,7 +1868,7 @@
 }
 
 status_t AudioSystem::registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     size_t mixesSize = std::min(mixes.size(), size_t{MAX_MIXES_PER_POLICY});
@@ -1843,10 +1879,29 @@
     return statusTFromBinderStatus(aps->registerPolicyMixes(mixesAidl, registration));
 }
 
+status_t AudioSystem::getRegisteredPolicyMixes(std::vector<AudioMix>& mixes) {
+    if (!audio_flags::audio_mix_test_api()) {
+        return INVALID_OPERATION;
+    }
+
+    const sp<IAudioPolicyService> aps = AudioSystem::get_audio_policy_service();
+    if (aps == nullptr) return PERMISSION_DENIED;
+
+    std::vector<::android::media::AudioMix> aidlMixes;
+    Status status = aps->getRegisteredPolicyMixes(&aidlMixes);
+
+    for (const auto& aidlMix : aidlMixes) {
+        AudioMix mix = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioMix(aidlMix));
+        mixes.push_back(mix);
+    }
+
+    return statusTFromBinderStatus(status);
+}
+
 status_t AudioSystem::updatePolicyMixes(
         const std::vector<std::pair<AudioMix, std::vector<AudioMixMatchCriterion>>>&
                 mixesWithUpdates) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<media::AudioMixUpdate> updatesAidl;
@@ -1865,7 +1920,7 @@
 }
 
 status_t AudioSystem::setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
@@ -1876,7 +1931,7 @@
 }
 
 status_t AudioSystem::removeUidDeviceAffinities(uid_t uid) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
@@ -1885,7 +1940,7 @@
 
 status_t AudioSystem::setUserIdDeviceAffinities(int userId,
                                                 const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t userIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(userId));
@@ -1897,7 +1952,7 @@
 }
 
 status_t AudioSystem::removeUserIdDeviceAffinities(int userId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     int32_t userIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(userId));
     return statusTFromBinderStatus(aps->removeUserIdDeviceAffinities(userIdAidl));
@@ -1909,7 +1964,7 @@
     if (source == nullptr || attributes == nullptr || portId == nullptr) {
         return BAD_VALUE;
     }
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::AudioPortConfigFw sourceAidl = VALUE_OR_RETURN_STATUS(
@@ -1924,7 +1979,7 @@
 }
 
 status_t AudioSystem::stopAudioSource(audio_port_handle_t portId) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t portIdAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(portId));
@@ -1932,7 +1987,7 @@
 }
 
 status_t AudioSystem::setMasterMono(bool mono) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     return statusTFromBinderStatus(aps->setMasterMono(mono));
 }
@@ -1941,26 +1996,26 @@
     if (mono == nullptr) {
         return BAD_VALUE;
     }
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     return statusTFromBinderStatus(aps->getMasterMono(mono));
 }
 
 status_t AudioSystem::setMasterBalance(float balance) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setMasterBalance(balance);
 }
 
 status_t AudioSystem::getMasterBalance(float* balance) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->getMasterBalance(balance);
 }
 
 float
 AudioSystem::getStreamVolumeDB(audio_stream_type_t stream, int index, audio_devices_t device) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return NAN;
 
     auto result = [&]() -> ConversionResult<float> {
@@ -1978,13 +2033,13 @@
 }
 
 status_t AudioSystem::getMicrophones(std::vector<media::MicrophoneInfoFw>* microphones) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->getMicrophones(microphones);
 }
 
 status_t AudioSystem::setAudioHalPids(const std::vector<pid_t>& pids) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) return PERMISSION_DENIED;
     return af->setAudioHalPids(pids);
 }
@@ -1998,7 +2053,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
@@ -2025,7 +2080,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     Int numSurroundFormatsAidl;
     numSurroundFormatsAidl.value =
@@ -2043,7 +2098,7 @@
 }
 
 status_t AudioSystem::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
@@ -2053,7 +2108,7 @@
 }
 
 status_t AudioSystem::setAssistantServicesUids(const std::vector<uid_t>& uids) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<int32_t> uidsAidl = VALUE_OR_RETURN_STATUS(
@@ -2062,7 +2117,7 @@
 }
 
 status_t AudioSystem::setActiveAssistantServicesUids(const std::vector<uid_t>& activeUids) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<int32_t> activeUidsAidl = VALUE_OR_RETURN_STATUS(
@@ -2071,7 +2126,7 @@
 }
 
 status_t AudioSystem::setA11yServicesUids(const std::vector<uid_t>& uids) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<int32_t> uidsAidl = VALUE_OR_RETURN_STATUS(
@@ -2080,7 +2135,7 @@
 }
 
 status_t AudioSystem::setCurrentImeUid(uid_t uid) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
@@ -2088,7 +2143,7 @@
 }
 
 bool AudioSystem::isHapticPlaybackSupported() {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return false;
 
     auto result = [&]() -> ConversionResult<bool> {
@@ -2101,7 +2156,7 @@
 }
 
 bool AudioSystem::isUltrasoundSupported() {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return false;
 
     auto result = [&]() -> ConversionResult<bool> {
@@ -2119,7 +2174,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<AudioFormatDescription> formatsAidl;
@@ -2135,7 +2190,7 @@
 }
 
 status_t AudioSystem::listAudioProductStrategies(AudioProductStrategyVector& strategies) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<media::AudioProductStrategy> strategiesAidl;
@@ -2197,7 +2252,7 @@
 status_t AudioSystem::getProductStrategyFromAudioAttributes(const audio_attributes_t& aa,
                                                             product_strategy_t& productStrategy,
                                                             bool fallbackOnDefault) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes aaAidl = VALUE_OR_RETURN_STATUS(
@@ -2213,7 +2268,7 @@
 }
 
 status_t AudioSystem::listAudioVolumeGroups(AudioVolumeGroupVector& groups) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     std::vector<media::AudioVolumeGroup> groupsAidl;
@@ -2227,7 +2282,7 @@
 status_t AudioSystem::getVolumeGroupFromAudioAttributes(const audio_attributes_t &aa,
                                                         volume_group_t& volumeGroup,
                                                         bool fallbackOnDefault) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     media::audio::common::AudioAttributes aaAidl = VALUE_OR_RETURN_STATUS(
@@ -2240,13 +2295,13 @@
 }
 
 status_t AudioSystem::setRttEnabled(bool enabled) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
     return statusTFromBinderStatus(aps->setRttEnabled(enabled));
 }
 
 bool AudioSystem::isCallScreenModeSupported() {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) return false;
 
     auto result = [&]() -> ConversionResult<bool> {
@@ -2261,7 +2316,7 @@
 status_t AudioSystem::setDevicesRoleForStrategy(product_strategy_t strategy,
                                                 device_role_t role,
                                                 const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2278,7 +2333,7 @@
 status_t AudioSystem::removeDevicesRoleForStrategy(product_strategy_t strategy,
                                                    device_role_t role,
                                                    const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2294,7 +2349,7 @@
 
 status_t
 AudioSystem::clearDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2307,7 +2362,7 @@
 status_t AudioSystem::getDevicesForRoleAndStrategy(product_strategy_t strategy,
                                                    device_role_t role,
                                                    AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2325,7 +2380,7 @@
 status_t AudioSystem::setDevicesRoleForCapturePreset(audio_source_t audioSource,
                                                      device_role_t role,
                                                      const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2343,7 +2398,7 @@
 status_t AudioSystem::addDevicesRoleForCapturePreset(audio_source_t audioSource,
                                                      device_role_t role,
                                                      const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2359,7 +2414,7 @@
 
 status_t AudioSystem::removeDevicesRoleForCapturePreset(
         audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2375,7 +2430,7 @@
 
 status_t AudioSystem::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
                                                        device_role_t role) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2389,7 +2444,7 @@
 status_t AudioSystem::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
                                                         device_role_t role,
                                                         AudioDeviceTypeAddrVector& devices) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2407,7 +2462,7 @@
 
 status_t AudioSystem::getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
                                           sp<media::ISpatializer>* spatializer) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (spatializer == nullptr) {
         return BAD_VALUE;
     }
@@ -2426,7 +2481,7 @@
                                     const audio_config_t *config,
                                     const AudioDeviceTypeAddrVector &devices,
                                     bool *canBeSpatialized) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (canBeSpatialized == nullptr) {
         return BAD_VALUE;
     }
@@ -2450,7 +2505,7 @@
 
 status_t AudioSystem::getSoundDoseInterface(const sp<media::ISoundDoseCallback>& callback,
                                             sp<media::ISoundDose>* soundDose) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2469,7 +2524,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2493,7 +2548,7 @@
         return BAD_VALUE;
     }
 
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
@@ -2512,7 +2567,7 @@
 
 status_t AudioSystem::setRequestedLatencyMode(
             audio_io_handle_t output, audio_latency_mode_t mode) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2521,7 +2576,7 @@
 
 status_t AudioSystem::getSupportedLatencyModes(audio_io_handle_t output,
         std::vector<audio_latency_mode_t>* modes) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2529,7 +2584,7 @@
 }
 
 status_t AudioSystem::setBluetoothVariableLatencyEnabled(bool enabled) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2538,7 +2593,7 @@
 
 status_t AudioSystem::isBluetoothVariableLatencyEnabled(
         bool *enabled) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2547,7 +2602,7 @@
 
 status_t AudioSystem::supportsBluetoothVariableLatency(
         bool *support) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2555,7 +2610,7 @@
 }
 
 status_t AudioSystem::getAudioPolicyConfig(media::AudioPolicyConfig *config) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2583,15 +2638,15 @@
     }
 
     binder::Status setCaptureState(bool active) override {
-        Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
+        std::lock_guard _l(AudioSystem::gSoundTriggerMutex);
         mListener->onStateChanged(active);
         return binder::Status::ok();
     }
 
     void binderDied(const wp<IBinder>&) override {
-        Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
+        std::lock_guard _l(AudioSystem::gSoundTriggerMutex);
         mListener->onServiceDied();
-        gSoundTriggerCaptureStateListener = nullptr;
+        AudioSystem::gSoundTriggerCaptureStateListener = nullptr;
     }
 
 private:
@@ -2604,13 +2659,12 @@
         const sp<CaptureStateListener>& listener) {
     LOG_ALWAYS_FATAL_IF(listener == nullptr);
 
-    const sp<IAudioPolicyService>& aps =
-            AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
 
-    Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
+    std::lock_guard _l(AudioSystem::gSoundTriggerMutex);
     gSoundTriggerCaptureStateListener = new CaptureStateListenerImpl(aps, listener);
     gSoundTriggerCaptureStateListener->init();
 
@@ -2619,7 +2673,7 @@
 
 status_t AudioSystem::setVibratorInfos(
         const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2628,7 +2682,7 @@
 
 status_t AudioSystem::getMmapPolicyInfo(
         AudioMMapPolicyType policyType, std::vector<AudioMMapPolicyInfo> *policyInfos) {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2636,7 +2690,7 @@
 }
 
 int32_t AudioSystem::getAAudioMixerBurstCount() {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2644,7 +2698,7 @@
 }
 
 int32_t AudioSystem::getAAudioHardwareBurstMinUsec() {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    const sp<IAudioFlinger> af = get_audio_flinger();
     if (af == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2653,7 +2707,7 @@
 
 status_t AudioSystem::getSupportedMixerAttributes(
         audio_port_handle_t portId, std::vector<audio_mixer_attributes_t> *mixerAttrs) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2673,7 +2727,7 @@
                                                   audio_port_handle_t portId,
                                                   uid_t uid,
                                                   const audio_mixer_attributes_t *mixerAttr) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2693,7 +2747,7 @@
         const audio_attributes_t *attr,
         audio_port_handle_t portId,
         std::optional<audio_mixer_attributes_t> *mixerAttr) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2716,7 +2770,7 @@
 status_t AudioSystem::clearPreferredMixerAttributes(const audio_attributes_t *attr,
                                                     audio_port_handle_t portId,
                                                     uid_t uid) {
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    const sp<IAudioPolicyService> aps = get_audio_policy_service();
     if (aps == nullptr) {
         return PERMISSION_DENIED;
     }
@@ -2733,45 +2787,28 @@
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
         const sp<AudioPortCallback>& callback) {
-    Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
-        if (mAudioPortCallbacks[i] == callback) {
-            return -1;
-        }
-    }
-    mAudioPortCallbacks.add(callback);
-    return mAudioPortCallbacks.size();
+    std::lock_guard _l(mMutex);
+    return mAudioPortCallbacks.insert(callback).second ? mAudioPortCallbacks.size() : -1;
 }
 
 int AudioSystem::AudioPolicyServiceClient::removeAudioPortCallback(
         const sp<AudioPortCallback>& callback) {
-    Mutex::Autolock _l(mLock);
-    size_t i;
-    for (i = 0; i < mAudioPortCallbacks.size(); i++) {
-        if (mAudioPortCallbacks[i] == callback) {
-            break;
-        }
-    }
-    if (i == mAudioPortCallbacks.size()) {
-        return -1;
-    }
-    mAudioPortCallbacks.removeAt(i);
-    return mAudioPortCallbacks.size();
+    std::lock_guard _l(mMutex);
+    return mAudioPortCallbacks.erase(callback) > 0 ? mAudioPortCallbacks.size() : -1;
 }
 
-
 Status AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate() {
-    Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
-        mAudioPortCallbacks[i]->onAudioPortListUpdate();
+    std::lock_guard _l(mMutex);
+    for (const auto& callback : mAudioPortCallbacks) {
+        callback->onAudioPortListUpdate();
     }
     return Status::ok();
 }
 
 Status AudioSystem::AudioPolicyServiceClient::onAudioPatchListUpdate() {
-    Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
-        mAudioPortCallbacks[i]->onAudioPatchListUpdate();
+    std::lock_guard _l(mMutex);
+    for (const auto& callback : mAudioPortCallbacks) {
+        callback->onAudioPatchListUpdate();
     }
     return Status::ok();
 }
@@ -2779,30 +2816,16 @@
 // ----------------------------------------------------------------------------
 int AudioSystem::AudioPolicyServiceClient::addAudioVolumeGroupCallback(
         const sp<AudioVolumeGroupCallback>& callback) {
-    Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
-        if (mAudioVolumeGroupCallback[i] == callback) {
-            return -1;
-        }
-    }
-    mAudioVolumeGroupCallback.add(callback);
-    return mAudioVolumeGroupCallback.size();
+    std::lock_guard _l(mMutex);
+    return mAudioVolumeGroupCallbacks.insert(callback).second
+            ? mAudioVolumeGroupCallbacks.size() : -1;
 }
 
 int AudioSystem::AudioPolicyServiceClient::removeAudioVolumeGroupCallback(
         const sp<AudioVolumeGroupCallback>& callback) {
-    Mutex::Autolock _l(mLock);
-    size_t i;
-    for (i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
-        if (mAudioVolumeGroupCallback[i] == callback) {
-            break;
-        }
-    }
-    if (i == mAudioVolumeGroupCallback.size()) {
-        return -1;
-    }
-    mAudioVolumeGroupCallback.removeAt(i);
-    return mAudioVolumeGroupCallback.size();
+    std::lock_guard _l(mMutex);
+    return mAudioVolumeGroupCallbacks.erase(callback) > 0
+            ? mAudioVolumeGroupCallbacks.size() : -1;
 }
 
 Status AudioSystem::AudioPolicyServiceClient::onAudioVolumeGroupChanged(int32_t group,
@@ -2811,9 +2834,9 @@
             aidl2legacy_int32_t_volume_group_t(group));
     int flagsLegacy = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(flags));
 
-    Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
-        mAudioVolumeGroupCallback[i]->onAudioVolumeGroupChanged(groupLegacy, flagsLegacy);
+    std::lock_guard _l(mMutex);
+    for (const auto& callback : mAudioVolumeGroupCallbacks) {
+        callback->onAudioVolumeGroupChanged(groupLegacy, flagsLegacy);
     }
     return Status::ok();
 }
@@ -2827,7 +2850,7 @@
     int stateLegacy = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(state));
     dynamic_policy_callback cb = NULL;
     {
-        Mutex::Autolock _l(AudioSystem::gLock);
+        std::lock_guard _l(AudioSystem::gMutex);
         cb = gDynPolicyCallback;
     }
 
@@ -2848,7 +2871,7 @@
         AudioSource source) {
     record_config_callback cb = NULL;
     {
-        Mutex::Autolock _l(AudioSystem::gLock);
+        std::lock_guard _l(AudioSystem::gMutex);
         cb = gRecordConfigCallback;
     }
 
@@ -2881,7 +2904,7 @@
 Status AudioSystem::AudioPolicyServiceClient::onRoutingUpdated() {
     routing_callback cb = NULL;
     {
-        Mutex::Autolock _l(AudioSystem::gLock);
+        std::lock_guard _l(AudioSystem::gMutex);
         cb = gRoutingCallback;
     }
 
@@ -2894,7 +2917,7 @@
 Status AudioSystem::AudioPolicyServiceClient::onVolumeRangeInitRequest() {
     vol_range_init_req_callback cb = NULL;
     {
-        Mutex::Autolock _l(AudioSystem::gLock);
+        std::lock_guard _l(AudioSystem::gMutex);
         cb = gVolRangeInitReqCallback;
     }
 
@@ -2906,12 +2929,12 @@
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused) {
     {
-        Mutex::Autolock _l(mLock);
-        for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
-            mAudioPortCallbacks[i]->onServiceDied();
+        std::lock_guard _l(mMutex);
+        for (const auto& callback : mAudioPortCallbacks) {
+            callback->onServiceDied();
         }
-        for (size_t i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
-            mAudioVolumeGroupCallback[i]->onServiceDied();
+        for (const auto& callback : mAudioVolumeGroupCallbacks) {
+            callback->onServiceDied();
         }
     }
     AudioSystem::clearAudioPolicyService();
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 2afe80c..98a1fde 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -2873,7 +2873,9 @@
     if (!forceRestore &&
         (isOffloadedOrDirect_l() || mDoNotReconnect)) {
         // FIXME re-creation of offloaded and direct tracks is not yet implemented;
-        // reconsider enabling for linear PCM encodings when position can be preserved.
+        // Disabled since (1) timestamp correction is not implemented for non-PCM and
+        // (2) We pre-empt existing direct tracks on resource constraint, so these tracks
+        // shouldn't reconnect.
         result = DEAD_OBJECT;
         return result;
     }
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 60b08fa..a71bb18 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -242,6 +242,7 @@
     legacy.mCbFlags = VALUE_OR_RETURN(aidl2legacy_AudioMixCallbackFlag_uint32_t_mask(aidl.cbFlags));
     legacy.mAllowPrivilegedMediaPlaybackCapture = aidl.allowPrivilegedMediaPlaybackCapture;
     legacy.mVoiceCommunicationCaptureAllowed = aidl.voiceCommunicationCaptureAllowed;
+    legacy.mToken = aidl.mToken;
     return legacy;
 }
 
@@ -265,6 +266,7 @@
     aidl.cbFlags = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixCallbackFlag_mask(legacy.mCbFlags));
     aidl.allowPrivilegedMediaPlaybackCapture = legacy.mAllowPrivilegedMediaPlaybackCapture;
     aidl.voiceCommunicationCaptureAllowed = legacy.mVoiceCommunicationCaptureAllowed;
+    aidl.mToken = legacy.mToken;
     return aidl;
 }
 
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 9c4ccb8..e213f08 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -872,6 +872,18 @@
                         { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
           .repeatCnt = 3,
           .repeatSegment = 0 },                              // TONE_NZ_CALL_WAITING
+        { .segments = { { .duration = 500, .waveFreq = { 425, 0 }, 0, 0 },
+                        { .duration = 250, .waveFreq = { 0 }, 0, 0 },
+                        { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
+          .repeatCnt = ToneGenerator::TONEGEN_INF,
+          .repeatSegment = 0 },                             // TONE_MY_CONGESTION
+        { .segments = { { .duration = 400, .waveFreq = { 425, 0 }, 0, 0 },
+                        { .duration = 200, .waveFreq = { 0 }, 0, 0 },
+                        { .duration = 400, .waveFreq = { 425, 0 }, 0, 0 },
+                        { .duration = 2000, .waveFreq = { 0 }, 0, 0},
+                        { .duration = 0, .waveFreq = { 0 }, 0, 0}},
+          .repeatCnt = ToneGenerator::TONEGEN_INF,
+          .repeatSegment = 0 }                              // TONE_MY_RINGTONE
 };
 
 // Used by ToneGenerator::getToneForRegion() to convert user specified supervisory tone type
@@ -976,6 +988,16 @@
             TONE_SUP_ERROR,               // TONE_SUP_ERROR
             TONE_NZ_CALL_WAITING,         // TONE_SUP_CALL_WAITING
             TONE_GB_RINGTONE              // TONE_SUP_RINGTONE
+        },
+        {   // MALAYSIA
+            TONE_SUP_DIAL,                // TONE_SUP_DIAL
+            TONE_SUP_BUSY,                // TONE_SUP_BUSY
+            TONE_MY_CONGESTION,           // TONE_SUP_CONGESTION
+            TONE_SUP_RADIO_ACK,           // TONE_SUP_RADIO_ACK
+            TONE_SUP_RADIO_NOTAVAIL,      // TONE_SUP_RADIO_NOTAVAIL
+            TONE_SUP_ERROR,               // TONE_SUP_ERROR
+            TONE_SUP_CALL_WAITING,        // TONE_SUP_CALL_WAITING
+            TONE_MY_RINGTONE              // TONE_SUP_RINGTONE
         }
 };
 
@@ -1055,6 +1077,8 @@
         mRegion = TAIWAN;
     } else if (strstr(value, "nz") != NULL) {
         mRegion = NZ;
+    } else if (strstr(value, "my") != NULL) {
+        mRegion = MY;
     } else {
         mRegion = CEPT;
     }
diff --git a/media/libaudioclient/aidl/android/media/AudioMix.aidl b/media/libaudioclient/aidl/android/media/AudioMix.aidl
index 88b0450..f0c561c 100644
--- a/media/libaudioclient/aidl/android/media/AudioMix.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMix.aidl
@@ -39,4 +39,6 @@
     boolean allowPrivilegedMediaPlaybackCapture;
     /** Indicates if the caller can capture voice communication output */
     boolean voiceCommunicationCaptureAllowed;
+    /** Identifies the owner of the AudioPolicy that this AudioMix belongs to */
+    IBinder mToken;
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 52c8da0..633493c 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -263,6 +263,8 @@
 
     void registerPolicyMixes(in AudioMix[] mixes, boolean registration);
 
+    List<AudioMix> getRegisteredPolicyMixes();
+
     void updatePolicyMixes(in AudioMixUpdate[] updates);
 
     void setUidDeviceAffinities(int /* uid_t */ uid, in AudioDevice[] devices);
diff --git a/media/libaudioclient/aidl/fuzzer/Android.bp b/media/libaudioclient/aidl/fuzzer/Android.bp
index 6093933..02c5a3f 100644
--- a/media/libaudioclient/aidl/fuzzer/Android.bp
+++ b/media/libaudioclient/aidl/fuzzer/Android.bp
@@ -1,4 +1,8 @@
 /*
+package {
+    default_team: "trendy_team_media_framework_audio",
+}
+
  * Copyright (C) 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -71,7 +75,7 @@
         "libbinder_headers",
         "libmedia_headers",
     ],
-     fuzz_config: {
+    fuzz_config: {
         cc: [
             "android-media-fuzzing-reports@google.com",
         ],
@@ -90,6 +94,6 @@
     srcs: ["audioflinger_aidl_fuzzer.cpp"],
     defaults: [
         "libaudioclient_aidl_fuzzer_defaults",
-        "service_fuzzer_defaults"
+        "service_fuzzer_defaults",
     ],
 }
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index fd3b0a8..f2ad91c 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -15,6 +15,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index ec35e93..9e4ae54 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -18,6 +18,7 @@
 #ifndef ANDROID_AUDIO_POLICY_H
 #define ANDROID_AUDIO_POLICY_H
 
+#include <binder/IBinder.h>
 #include <binder/Parcel.h>
 #include <media/AudioDeviceTypeAddr.h>
 #include <system/audio.h>
@@ -127,6 +128,7 @@
     audio_devices_t mDeviceType;
     String8         mDeviceAddress;
     uint32_t        mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
+    sp<IBinder>     mToken;
     /** Ignore the AUDIO_FLAG_NO_MEDIA_PROJECTION */
     bool            mAllowPrivilegedMediaPlaybackCapture = false;
     /** Indicates if the caller can capture voice communication output */
diff --git a/media/libaudioclient/include/media/AudioProductStrategy.h b/media/libaudioclient/include/media/AudioProductStrategy.h
index fcbb019..2505b11 100644
--- a/media/libaudioclient/include/media/AudioProductStrategy.h
+++ b/media/libaudioclient/include/media/AudioProductStrategy.h
@@ -58,11 +58,11 @@
      * @return {@code INVALID_SCORE} if not matching, {@code MATCH_ON_DEFAULT_SCORE} if matching
      * to default strategy, non zero positive score if matching a strategy.
      */
-    static int attributesMatchesScore(const audio_attributes_t refAttributes,
-                                      const audio_attributes_t clientAttritubes);
+    static int attributesMatchesScore(audio_attributes_t refAttributes,
+                                      audio_attributes_t clientAttritubes);
 
-    static bool attributesMatches(const audio_attributes_t refAttributes,
-                                      const audio_attributes_t clientAttritubes) {
+    static bool attributesMatches(audio_attributes_t refAttributes,
+                                  audio_attributes_t clientAttritubes) {
         return attributesMatchesScore(refAttributes, clientAttritubes) > 0;
     }
 
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index a1f7941..338534d 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,6 +19,7 @@
 
 #include <sys/types.h>
 
+#include <mutex>
 #include <set>
 #include <vector>
 
@@ -86,6 +87,7 @@
 typedef void (*routing_callback)();
 typedef void (*vol_range_init_req_callback)();
 
+class CaptureStateListenerImpl;
 class IAudioFlinger;
 class String8;
 
@@ -95,6 +97,13 @@
 
 class AudioSystem
 {
+    friend class AudioFlingerClient;
+    friend class AudioPolicyServiceClient;
+    friend class CaptureStateListenerImpl;
+    template <typename ServiceInterface, typename Client, typename AidlInterface,
+            typename ServiceTraits>
+    friend class ServiceHandler;
+
 public:
 
     // FIXME Declare in binder opcode order, similarly to IAudioFlinger.h and IAudioFlinger.cpp
@@ -177,8 +186,8 @@
     static status_t setLocalAudioFlinger(const sp<IAudioFlinger>& af);
 
     // helper function to obtain AudioFlinger service handle
-    static const sp<IAudioFlinger> get_audio_flinger();
-    static const sp<IAudioFlinger> get_audio_flinger_for_fuzzer();
+    static sp<IAudioFlinger> get_audio_flinger();
+    static sp<IAudioFlinger> get_audio_flinger_for_fuzzer();
 
     static float linearToLog(int volume);
     static int logToLinear(float volume);
@@ -402,7 +411,12 @@
     // and output configuration cache (gOutputs)
     static void clearAudioConfigCache();
 
-    static const sp<media::IAudioPolicyService> get_audio_policy_service();
+    // Sets a local AudioPolicyService interface to be used by AudioSystem.
+    // This is used by audioserver main() to allow client object initialization
+    // before exposing any interfaces to ServiceManager.
+    static status_t setLocalAudioPolicyService(const sp<media::IAudioPolicyService>& aps);
+
+    static sp<media::IAudioPolicyService> get_audio_policy_service();
     static void clearAudioPolicyService();
 
     // helpers for android.media.AudioManager.getProperty(), see description there for meaning
@@ -462,6 +476,8 @@
 
     static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
 
+    static status_t getRegisteredPolicyMixes(std::vector<AudioMix>& mixes);
+
     static status_t updatePolicyMixes(
         const std::vector<
                 std::pair<AudioMix, std::vector<AudioMixMatchCriterion>>>& mixesWithUpdates);
@@ -774,23 +790,18 @@
 
     static int32_t getAAudioHardwareBurstMinUsec();
 
-private:
-
     class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
     {
     public:
-        AudioFlingerClient() :
-            mInBuffSize(0), mInSamplingRate(0),
-            mInFormat(AUDIO_FORMAT_DEFAULT), mInChannelMask(AUDIO_CHANNEL_NONE) {
-        }
+        AudioFlingerClient() = default;
 
-        void clearIoCache();
+        void clearIoCache() EXCLUDES(mMutex);
         status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
-                                    audio_channel_mask_t channelMask, size_t* buffSize);
-        sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+                audio_channel_mask_t channelMask, size_t* buffSize) EXCLUDES(mMutex);
+        sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle) EXCLUDES(mMutex);
 
         // DeathRecipient
-        virtual void binderDied(const wp<IBinder>& who);
+        void binderDied(const wp<IBinder>& who) final;
 
         // IAudioFlingerClient
 
@@ -798,61 +809,71 @@
         // values for output/input parameters up-to-date in client process
         binder::Status ioConfigChanged(
                 media::AudioIoConfigEvent event,
-                const media::AudioIoDescriptor& ioDesc) override;
+                const media::AudioIoDescriptor& ioDesc) final EXCLUDES(mMutex);
 
         binder::Status onSupportedLatencyModesChanged(
                 int output,
-                const std::vector<media::audio::common::AudioLatencyMode>& latencyModes) override;
+                const std::vector<media::audio::common::AudioLatencyMode>& latencyModes)
+                final EXCLUDES(mMutex);
 
         status_t addAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
-                                               audio_io_handle_t audioIo,
-                                               audio_port_handle_t portId);
+                audio_io_handle_t audioIo, audio_port_handle_t portId) EXCLUDES(mMutex);
         status_t removeAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
-                                           audio_io_handle_t audioIo,
-                                           audio_port_handle_t portId);
+                audio_io_handle_t audioIo, audio_port_handle_t portId) EXCLUDES(mMutex);
 
         status_t addSupportedLatencyModesCallback(
-                        const sp<SupportedLatencyModesCallback>& callback);
+                const sp<SupportedLatencyModesCallback>& callback) EXCLUDES(mMutex);
         status_t removeSupportedLatencyModesCallback(
-                        const sp<SupportedLatencyModesCallback>& callback);
+                const sp<SupportedLatencyModesCallback>& callback) EXCLUDES(mMutex);
 
-        audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+        audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo) EXCLUDES(mMutex);
 
     private:
-        Mutex                               mLock;
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> >   mIoDescriptors;
+        mutable std::mutex mMutex;
+        std::map<audio_io_handle_t, sp<AudioIoDescriptor>> mIoDescriptors GUARDED_BY(mMutex);
 
         std::map<audio_io_handle_t, std::map<audio_port_handle_t, wp<AudioDeviceCallback>>>
-                mAudioDeviceCallbacks;
+                mAudioDeviceCallbacks GUARDED_BY(mMutex);
 
         std::vector<wp<SupportedLatencyModesCallback>>
-                mSupportedLatencyModesCallbacks GUARDED_BY(mLock);
+                mSupportedLatencyModesCallbacks GUARDED_BY(mMutex);
 
         // cached values for recording getInputBufferSize() queries
-        size_t                              mInBuffSize;    // zero indicates cache is invalid
-        uint32_t                            mInSamplingRate;
-        audio_format_t                      mInFormat;
-        audio_channel_mask_t                mInChannelMask;
-        sp<AudioIoDescriptor> getIoDescriptor_l(audio_io_handle_t ioHandle);
+        size_t mInBuffSize GUARDED_BY(mMutex) = 0; // zero indicates cache is invalid
+        uint32_t mInSamplingRate GUARDED_BY(mMutex) = 0;
+        audio_format_t mInFormat GUARDED_BY(mMutex) = AUDIO_FORMAT_DEFAULT;
+        audio_channel_mask_t mInChannelMask GUARDED_BY(mMutex) = AUDIO_CHANNEL_NONE;
+
+        sp<AudioIoDescriptor> getIoDescriptor_l(audio_io_handle_t ioHandle) REQUIRES(mMutex);
     };
 
     class AudioPolicyServiceClient: public IBinder::DeathRecipient,
-                                    public media::BnAudioPolicyServiceClient
-    {
+                                    public media::BnAudioPolicyServiceClient {
     public:
-        AudioPolicyServiceClient() {
+        AudioPolicyServiceClient() = default;
+
+        int addAudioPortCallback(const sp<AudioPortCallback>& callback) EXCLUDES(mMutex);
+
+        int removeAudioPortCallback(const sp<AudioPortCallback>& callback) EXCLUDES(mMutex);
+
+        bool isAudioPortCbEnabled() const EXCLUDES(mMutex) {
+            std::lock_guard _l(mMutex);
+            return !mAudioPortCallbacks.empty();
         }
 
-        int addAudioPortCallback(const sp<AudioPortCallback>& callback);
-        int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
-        bool isAudioPortCbEnabled() const { return (mAudioPortCallbacks.size() != 0); }
+        int addAudioVolumeGroupCallback(
+                const sp<AudioVolumeGroupCallback>& callback) EXCLUDES(mMutex);
 
-        int addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
-        int removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
-        bool isAudioVolumeGroupCbEnabled() const { return (mAudioVolumeGroupCallback.size() != 0); }
+        int removeAudioVolumeGroupCallback(
+                const sp<AudioVolumeGroupCallback>& callback) EXCLUDES(mMutex);
+
+        bool isAudioVolumeGroupCbEnabled() const EXCLUDES(mMutex) {
+            std::lock_guard _l(mMutex);
+            return !mAudioVolumeGroupCallbacks.empty();
+        }
 
         // DeathRecipient
-        virtual void binderDied(const wp<IBinder>& who);
+        void binderDied(const wp<IBinder>& who) final;
 
         // IAudioPolicyServiceClient
         binder::Status onAudioVolumeGroupChanged(int32_t group, int32_t flags) override;
@@ -873,43 +894,36 @@
         binder::Status onVolumeRangeInitRequest();
 
     private:
-        Mutex                               mLock;
-        Vector <sp <AudioPortCallback> >    mAudioPortCallbacks;
-        Vector <sp <AudioVolumeGroupCallback> > mAudioVolumeGroupCallback;
+        mutable std::mutex mMutex;
+        std::set<sp<AudioPortCallback>> mAudioPortCallbacks GUARDED_BY(mMutex);
+        std::set<sp<AudioVolumeGroupCallback>> mAudioVolumeGroupCallbacks GUARDED_BY(mMutex);
     };
 
+    private:
+
     static audio_io_handle_t getOutput(audio_stream_type_t stream);
-    static const sp<AudioFlingerClient> getAudioFlingerClient();
+    static sp<AudioFlingerClient> getAudioFlingerClient();
     static sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
-    static const sp<IAudioFlinger> getAudioFlingerImpl(bool canStartThreadPool);
 
     // Invokes all registered error callbacks with the given error code.
     static void reportError(status_t err);
 
-    static sp<AudioFlingerClient> gAudioFlingerClient;
-    static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
-    friend class AudioFlingerClient;
-    friend class AudioPolicyServiceClient;
+    [[clang::no_destroy]] static std::mutex gMutex;
+    static dynamic_policy_callback gDynPolicyCallback GUARDED_BY(gMutex);
+    static record_config_callback gRecordConfigCallback GUARDED_BY(gMutex);
+    static routing_callback gRoutingCallback GUARDED_BY(gMutex);
+    static vol_range_init_req_callback gVolRangeInitReqCallback GUARDED_BY(gMutex);
 
-    static Mutex gLock;      // protects gAudioFlinger
-    static Mutex gLockErrorCallbacks;      // protects gAudioErrorCallbacks
-    static Mutex gLockAPS;   // protects gAudioPolicyService and gAudioPolicyServiceClient
-    static sp<IAudioFlinger> gAudioFlinger;
-    static std::set<audio_error_callback> gAudioErrorCallbacks;
-    static dynamic_policy_callback gDynPolicyCallback;
-    static record_config_callback gRecordConfigCallback;
-    static routing_callback gRoutingCallback;
-    static vol_range_init_req_callback gVolRangeInitReqCallback;
+    [[clang::no_destroy]] static std::mutex gApsCallbackMutex;
+    [[clang::no_destroy]] static std::mutex gErrorCallbacksMutex;
+    [[clang::no_destroy]] static std::set<audio_error_callback> gAudioErrorCallbacks
+            GUARDED_BY(gErrorCallbacksMutex);
 
-    static size_t gInBuffSize;
-    // previous parameters for recording buffer size queries
-    static uint32_t gPrevInSamplingRate;
-    static audio_format_t gPrevInFormat;
-    static audio_channel_mask_t gPrevInChannelMask;
-
-    static sp<media::IAudioPolicyService> gAudioPolicyService;
+    [[clang::no_destroy]] static std::mutex gSoundTriggerMutex;
+    [[clang::no_destroy]] static sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener
+            GUARDED_BY(gSoundTriggerMutex);
 };
 
-};  // namespace android
+}  // namespace android
 
 #endif  /*ANDROID_AUDIOSYSTEM_H_*/
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
index 46e9501..3e515fc 100644
--- a/media/libaudioclient/include/media/ToneGenerator.h
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -225,11 +225,14 @@
         TONE_INDIA_CONGESTION,      // Congestion tone: 400 Hz, 250ms ON, 250ms OFF...
         TONE_INDIA_CALL_WAITING,    // Call waiting tone: 400 Hz, tone repeated in a 0.2s on, 0.1s off, 0.2s on, 7.5s off pattern.
         TONE_INDIA_RINGTONE,        // Ring tone: 400 Hz tone modulated with 25Hz, 0.4 on 0.2 off 0.4 on 2..0 off
-         // TAIWAN supervisory tones
+        // TAIWAN supervisory tones
         TONE_TW_RINGTONE,           // Ring Tone: 440 Hz + 480 Hz repeated with pattern 1s on, 3s off.
-         // NEW ZEALAND supervisory tones
+        // NEW ZEALAND supervisory tones
         TONE_NZ_CALL_WAITING,       // Call waiting tone: 400 Hz,  0.2s ON, 3s OFF,
                                     //        0.2s ON, 3s OFF, 0.2s ON, 3s OFF, 0.2s ON
+        // MALAYSIA supervisory tones
+        TONE_MY_CONGESTION,         // Congestion tone: 425 Hz, 500ms ON, 250ms OFF...
+        TONE_MY_RINGTONE,           // Ring tone: 425 Hz, 400ms ON 200ms OFF 400ms ON 2s OFF..
         NUM_ALTERNATE_TONES
     };
 
@@ -244,6 +247,7 @@
         INDIA,
         TAIWAN,
         NZ,
+        MY,
         CEPT,
         NUM_REGIONS
     };
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 913bbb4..b667c8d 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaudioclient/tests/audiorouting_tests.cpp b/media/libaudioclient/tests/audiorouting_tests.cpp
index 8f76f9b..3b2285e 100644
--- a/media/libaudioclient/tests/audiorouting_tests.cpp
+++ b/media/libaudioclient/tests/audiorouting_tests.cpp
@@ -17,6 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioRoutingTest"
 
+#include <string.h>
+
+#include <binder/Binder.h>
 #include <binder/ProcessState.h>
 #include <cutils/properties.h>
 #include <gtest/gtest.h>
@@ -149,6 +152,7 @@
         config.sample_rate = 48000;
         AudioMix mix(criteria, mixType, config, mixFlag, String8{mAddress.c_str()}, 0);
         mix.mDeviceType = deviceType;
+        mix.mToken = sp<BBinder>::make();
         mMixes.push(mix);
         if (OK == AudioSystem::registerPolicyMixes(mMixes, true)) {
             mPolicyMixRegistered = true;
diff --git a/media/libaudiohal/impl/EffectHalAidl.cpp b/media/libaudiohal/impl/EffectHalAidl.cpp
index ebda86a..b1b1dfe 100644
--- a/media/libaudiohal/impl/EffectHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectHalAidl.cpp
@@ -53,6 +53,7 @@
 #include "effectsAidlConversion/AidlConversionVisualizer.h"
 
 using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::CommandId;
 using ::aidl::android::hardware::audio::effect::Descriptor;
 using ::aidl::android::hardware::audio::effect::IEffect;
 using ::aidl::android::hardware::audio::effect::IFactory;
@@ -285,6 +286,7 @@
 
 status_t EffectHalAidl::close() {
     TIME_CHECK();
+    mEffect->command(CommandId::STOP);
     return statusTFromBinderStatus(mEffect->close());
 }
 
diff --git a/media/libaudiohal/impl/EffectProxy.cpp b/media/libaudiohal/impl/EffectProxy.cpp
index d73a36c..d440ef8 100644
--- a/media/libaudiohal/impl/EffectProxy.cpp
+++ b/media/libaudiohal/impl/EffectProxy.cpp
@@ -156,6 +156,7 @@
 }
 
 ndk::ScopedAStatus EffectProxy::close() {
+    command(CommandId::STOP);
     return runWithAllSubEffects([&](std::shared_ptr<IEffect>& effect) {
         return effect->close();
     });
diff --git a/media/libaudiohal/tests/Android.bp b/media/libaudiohal/tests/Android.bp
index 1a54500..b9af0bf 100644
--- a/media/libaudiohal/tests/Android.bp
+++ b/media/libaudiohal/tests/Android.bp
@@ -17,6 +17,7 @@
 // frameworks/av/include.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     default_applicable_licenses: ["frameworks_av_license"],
 }
 
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 57b860d..f9ae2d4 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -441,10 +441,10 @@
                 track->prepareForAdjustChannels(mFrameCount);
             }
             } break;
-        case HAPTIC_INTENSITY: {
-            const os::HapticScale hapticIntensity = static_cast<os::HapticScale>(valueInt);
-            if (track->mHapticIntensity != hapticIntensity) {
-                track->mHapticIntensity = hapticIntensity;
+        case HAPTIC_SCALE: {
+            const os::HapticScale hapticScale = *reinterpret_cast<os::HapticScale*>(value);
+            if (track->mHapticScale != hapticScale) {
+                track->mHapticScale = hapticScale;
             }
             } break;
         case HAPTIC_MAX_AMPLITUDE: {
@@ -585,7 +585,7 @@
     t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
     // haptic
     t->mHapticPlaybackEnabled = false;
-    t->mHapticIntensity = os::HapticScale::NONE;
+    t->mHapticScale = {/*level=*/os::HapticLevel::NONE };
     t->mHapticMaxAmplitude = NAN;
     t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
     t->mMixerHapticChannelCount = 0;
@@ -636,7 +636,7 @@
                 switch (t->mMixerFormat) {
                 // Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
                 case AUDIO_FORMAT_PCM_FLOAT: {
-                    os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity,
+                    os::scaleHapticData((float*) buffer, sampleCount, t->mHapticScale,
                                         t->mHapticMaxAmplitude);
                 } break;
                 default:
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index b39fb92..f558fd5 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -49,7 +49,7 @@
         DOWNMIX_TYPE    = 0x4004,
         // for haptic
         HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
-        HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+        HAPTIC_SCALE = 0x4008, // Set the scale to play haptic data.
         HAPTIC_MAX_AMPLITUDE = 0x4009, // Set the max amplitude allowed for haptic data.
         // for target TIMESTRETCH
         PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
@@ -141,7 +141,7 @@
 
         // Haptic
         bool                 mHapticPlaybackEnabled;
-        os::HapticScale      mHapticIntensity;
+        os::HapticScale      mHapticScale;
         float                mHapticMaxAmplitude;
         audio_channel_mask_t mHapticChannelMask;
         uint32_t             mHapticChannelCount;
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index ad402db..a33bf55 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -1,6 +1,7 @@
 // Build the unit tests for libaudioprocessing
 
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libaudioprocessing/tests/fuzzer/Android.bp b/media/libaudioprocessing/tests/fuzzer/Android.bp
index 8fb6fff..b96ec6b 100644
--- a/media/libaudioprocessing/tests/fuzzer/Android.bp
+++ b/media/libaudioprocessing/tests/fuzzer/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -8,23 +9,23 @@
 }
 
 cc_fuzz {
-  name: "libaudioprocessing_resampler_fuzzer",
-  srcs: [
-    "libaudioprocessing_resampler_fuzzer.cpp",
-  ],
-  defaults: ["libaudioprocessing_test_defaults"],
-  static_libs: [
-    "libsndfile",
-  ],
+    name: "libaudioprocessing_resampler_fuzzer",
+    srcs: [
+        "libaudioprocessing_resampler_fuzzer.cpp",
+    ],
+    defaults: ["libaudioprocessing_test_defaults"],
+    static_libs: [
+        "libsndfile",
+    ],
 }
 
 cc_fuzz {
-  name: "libaudioprocessing_record_buffer_converter_fuzzer",
-  srcs: [
-    "libaudioprocessing_record_buffer_converter_fuzzer.cpp",
-  ],
-  defaults: ["libaudioprocessing_test_defaults"],
-  static_libs: [
-    "libsndfile",
-  ],
+    name: "libaudioprocessing_record_buffer_converter_fuzzer",
+    srcs: [
+        "libaudioprocessing_record_buffer_converter_fuzzer.cpp",
+    ],
+    defaults: ["libaudioprocessing_test_defaults"],
+    static_libs: [
+        "libsndfile",
+    ],
 }
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index b56872c..0b25327 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -1,5 +1,6 @@
 // Multichannel downmix effect library
 package {
+    default_team: "trendy_team_media_framework_audio",
     default_applicable_licenses: [
         "frameworks_av_media_libeffects_downmix_license",
     ],
@@ -60,7 +61,7 @@
     ],
     header_libs: [
         "libaudioeffects",
-        "libhardware_headers"
+        "libhardware_headers",
     ],
     shared_libs: [
         "libaudioutils",
diff --git a/media/libeffects/downmix/benchmark/Android.bp b/media/libeffects/downmix/benchmark/Android.bp
index 10f14e2..5b62a0c 100644
--- a/media/libeffects/downmix/benchmark/Android.bp
+++ b/media/libeffects/downmix/benchmark/Android.bp
@@ -1,5 +1,6 @@
 // Build testbench for downmix module.
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_media_libeffects_downmix_license"
diff --git a/media/libeffects/downmix/tests/Android.bp b/media/libeffects/downmix/tests/Android.bp
index 392a6fa..77d8f83 100644
--- a/media/libeffects/downmix/tests/Android.bp
+++ b/media/libeffects/downmix/tests/Android.bp
@@ -1,5 +1,6 @@
 // Build testbench for downmix module.
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_media_libeffects_downmix_license"
@@ -14,7 +15,7 @@
 //
 // Use "atest downmix_tests" to run.
 cc_test {
-    name:"downmix_tests",
+    name: "downmix_tests",
     gtest: true,
     host_supported: true,
     vendor: true,
@@ -45,7 +46,7 @@
 // test application and outputs then compares files in a local directory
 // on device (/data/local/tmp/downmixtest/).
 cc_test {
-    name:"downmixtest",
+    name: "downmixtest",
     host_supported: false,
     proprietary: true,
 
diff --git a/media/libeffects/factory/Android.bp b/media/libeffects/factory/Android.bp
index d94093e..ad5188f 100644
--- a/media/libeffects/factory/Android.bp
+++ b/media/libeffects/factory/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -20,10 +21,10 @@
     name: "libeffects",
     vendor: true,
     srcs: [
-         "EffectsFactory.c",
-         "EffectsConfigLoader.c",
-         "EffectsFactoryState.c",
-         "EffectsXmlConfigLoader.cpp",
+        "EffectsFactory.c",
+        "EffectsConfigLoader.c",
+        "EffectsFactoryState.c",
+        "EffectsXmlConfigLoader.cpp",
     ],
 
     shared_libs: [
@@ -34,7 +35,7 @@
     ],
     cflags: ["-fvisibility=hidden"],
 
-    local_include_dirs:["include/media"],
+    local_include_dirs: ["include/media"],
 
     header_libs: [
         "libaudioeffects",
@@ -61,5 +62,8 @@
         "libeffectsconfig",
         "libeffects",
     ],
-    local_include_dirs:[".", "include"],
+    local_include_dirs: [
+        ".",
+        "include",
+    ],
 }
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index a8892d8..5d9886c 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -145,7 +145,7 @@
     memset(context->param.hapticChannelSource, 0, sizeof(context->param.hapticChannelSource));
     context->param.hapticChannelCount = 0;
     context->param.audioChannelCount = 0;
-    context->param.maxHapticIntensity = os::HapticScale::MUTE;
+    context->param.maxHapticIntensity = os::HapticLevel::MUTE;
 
     context->param.resonantFrequency = DEFAULT_RESONANT_FREQUENCY;
     context->param.bpfQ = 1.0f;
@@ -316,9 +316,10 @@
             return -EINVAL;
         }
         int id = *(int *) value;
-        os::HapticScale hapticIntensity = static_cast<os::HapticScale>(*((int *) value + 1));
+        os::HapticLevel hapticIntensity =
+                static_cast<os::HapticLevel>(*((int *) value + 1));
         ALOGD("Setting haptic intensity as %d", static_cast<int>(hapticIntensity));
-        if (hapticIntensity == os::HapticScale::MUTE) {
+        if (hapticIntensity == os::HapticLevel::MUTE) {
             context->param.id2Intensity.erase(id);
         } else {
             context->param.id2Intensity.emplace(id, hapticIntensity);
@@ -478,7 +479,7 @@
         return -ENODATA;
     }
 
-    if (context->param.maxHapticIntensity == os::HapticScale::MUTE) {
+    if (context->param.maxHapticIntensity == os::HapticLevel::MUTE) {
         // Haptic channels are muted, not need to generate haptic data.
         return 0;
     }
@@ -504,8 +505,9 @@
     float* hapticOutBuffer = HapticGenerator_runProcessingChain(
             context->processingChain, context->inputBuffer.data(),
             context->outputBuffer.data(), inBuffer->frameCount);
-    os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity,
-                        context->param.maxHapticAmplitude);
+        os::scaleHapticData(hapticOutBuffer, hapticSampleCount,
+                            { /*level=*/context->param.maxHapticIntensity},
+                            context->param.maxHapticAmplitude);
 
     // For haptic data, the haptic playback thread will copy the data from effect input buffer,
     // which contains haptic data at the end of the buffer, directly to sink buffer.
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index 85e961f..f122c0a 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -49,8 +49,8 @@
     uint32_t hapticChannelCount;
 
     // A map from track id to haptic intensity.
-    std::map<int, os::HapticScale> id2Intensity;
-    os::HapticScale maxHapticIntensity; // max intensity will be used to scale haptic data.
+    std::map<int, os::HapticLevel> id2Intensity;
+    os::HapticLevel maxHapticIntensity; // max intensity will be used to scale haptic data.
     float maxHapticAmplitude; // max amplitude will be used to limit haptic data absolute values.
 
     float resonantFrequency;
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
index e671543..5c38d17 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
@@ -174,7 +174,7 @@
             runProcessingChain(mInputBuffer.data(), mOutputBuffer.data(), mFrameCount);
     ::android::os::scaleHapticData(
             hapticOutBuffer, hapticSampleCount,
-            static_cast<::android::os::HapticScale>(mParams.mMaxVibratorScale),
+            {/*level=*/static_cast<::android::os::HapticLevel>(mParams.mMaxVibratorScale) },
             mParams.mVibratorInfo.qFactor);
 
     // For haptic data, the haptic playback thread will copy the data from effect input
diff --git a/media/libeffects/lvm/benchmarks/Android.bp b/media/libeffects/lvm/benchmarks/Android.bp
index c21c5f2..8036983 100644
--- a/media/libeffects/lvm/benchmarks/Android.bp
+++ b/media/libeffects/lvm/benchmarks/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 7998879..c1a77f0 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -1,5 +1,6 @@
 // Music bundle
 package {
+    default_team: "trendy_team_media_framework_audio",
     default_applicable_licenses: [
         "frameworks_av_media_libeffects_lvm_lib_license",
     ],
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
index 0568fbd..c32e91e 100644
--- a/media/libeffects/lvm/tests/Android.bp
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -1,6 +1,7 @@
 // Build the unit tests for effects
 
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -12,7 +13,7 @@
 cc_test {
     name: "EffectReverbTest",
     defaults: [
-      "libeffects-test-defaults",
+        "libeffects-test-defaults",
     ],
     srcs: [
         "EffectReverbTest.cpp",
@@ -29,7 +30,7 @@
 cc_test {
     name: "EffectBundleTest",
     defaults: [
-      "libeffects-test-defaults",
+        "libeffects-test-defaults",
     ],
     srcs: [
         "EffectBundleTest.cpp",
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index 62837b9..781aad6 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -1,5 +1,6 @@
 // music bundle wrapper
 package {
+    default_team: "trendy_team_media_framework_audio",
     default_applicable_licenses: [
         "frameworks_av_media_libeffects_lvm_wrapper_license",
     ],
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 994b061..d658536 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -1,5 +1,6 @@
 // audio preprocessing wrapper
 package {
+    default_team: "trendy_team_media_framework_audio",
     default_applicable_licenses: [
         "frameworks_av_media_libeffects_preprocessing_license",
     ],
diff --git a/media/libeffects/preprocessing/benchmarks/Android.bp b/media/libeffects/preprocessing/benchmarks/Android.bp
index fbbcab4..ca99bf8 100644
--- a/media/libeffects/preprocessing/benchmarks/Android.bp
+++ b/media/libeffects/preprocessing/benchmarks/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_media_libeffects_preprocessing_license"
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index d80b135..ad8d84d 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -1,5 +1,6 @@
 // audio preprocessing unit test
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_media_libeffects_preprocessing_license"
diff --git a/media/libeffects/spatializer/benchmarks/Android.bp b/media/libeffects/spatializer/benchmarks/Android.bp
index ab7e468..2d07a9b 100644
--- a/media/libeffects/spatializer/benchmarks/Android.bp
+++ b/media/libeffects/spatializer/benchmarks/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/libeffects/spatializer/tests/Android.bp b/media/libeffects/spatializer/tests/Android.bp
index 704e873..ddfcff3 100644
--- a/media/libeffects/spatializer/tests/Android.bp
+++ b/media/libeffects/spatializer/tests/Android.bp
@@ -1,6 +1,7 @@
 // Build the unit tests for spatializer effect
 
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -12,7 +13,7 @@
 cc_test {
     name: "SpatializerTest",
     defaults: [
-      "libeffects-test-defaults",
+        "libeffects-test-defaults",
     ],
     host_supported: false,
     srcs: [
diff --git a/media/libeffects/visualizer/aidl/Visualizer.cpp b/media/libeffects/visualizer/aidl/Visualizer.cpp
index fa651a6..9b1bac6 100644
--- a/media/libeffects/visualizer/aidl/Visualizer.cpp
+++ b/media/libeffects/visualizer/aidl/Visualizer.cpp
@@ -59,7 +59,8 @@
 const std::string VisualizerImpl::kEffectName = "Visualizer";
 const std::vector<Range::VisualizerRange> VisualizerImpl::kRanges = {
         MAKE_RANGE(Visualizer, latencyMs, 0, VisualizerContext::kMaxLatencyMs),
-        MAKE_RANGE(Visualizer, captureSamples, 0, VisualizerContext::kMaxCaptureBufSize),
+        MAKE_RANGE(Visualizer, captureSamples, VisualizerContext::kMinCaptureBufSize,
+                   VisualizerContext::kMaxCaptureBufSize),
         /* get only parameters, set invalid range (min > max) to indicate not support set */
         MAKE_RANGE(Visualizer, measurement, Visualizer::Measurement({.rms = 1, .peak = 1}),
                    Visualizer::Measurement({.rms = 0, .peak = 0})),
diff --git a/media/libeffects/visualizer/aidl/VisualizerContext.cpp b/media/libeffects/visualizer/aidl/VisualizerContext.cpp
index 5d2bb3a..c763b1a 100644
--- a/media/libeffects/visualizer/aidl/VisualizerContext.cpp
+++ b/media/libeffects/visualizer/aidl/VisualizerContext.cpp
@@ -93,7 +93,7 @@
     mCaptureSamples = samples;
     return RetCode::SUCCESS;
 }
-int VisualizerContext::getCaptureSamples() {
+int32_t VisualizerContext::getCaptureSamples() {
     std::lock_guard lg(mMutex);
     return mCaptureSamples;
 }
diff --git a/media/libeffects/visualizer/aidl/VisualizerContext.h b/media/libeffects/visualizer/aidl/VisualizerContext.h
index 958035f..b03e038 100644
--- a/media/libeffects/visualizer/aidl/VisualizerContext.h
+++ b/media/libeffects/visualizer/aidl/VisualizerContext.h
@@ -18,6 +18,7 @@
 
 #include <android-base/thread_annotations.h>
 #include <audio_effects/effect_dynamicsprocessing.h>
+#include <system/audio_effects/effect_visualizer.h>
 
 #include "effect-impl/EffectContext.h"
 
@@ -25,8 +26,11 @@
 
 class VisualizerContext final : public EffectContext {
   public:
-    static const uint32_t kMaxCaptureBufSize = 65536;
-    static const uint32_t kMaxLatencyMs = 3000;  // 3 seconds of latency for audio pipeline
+    // need align the min/max capture size to VISUALIZER_CAPTURE_SIZE_MIN and
+    // VISUALIZER_CAPTURE_SIZE_MAX because of limitation in audio_utils fixedfft.
+    static constexpr int32_t kMinCaptureBufSize = VISUALIZER_CAPTURE_SIZE_MIN;
+    static constexpr int32_t kMaxCaptureBufSize = VISUALIZER_CAPTURE_SIZE_MAX;
+    static constexpr uint32_t kMaxLatencyMs = 3000;  // 3 seconds of latency for audio pipeline
 
     VisualizerContext(int statusDepth, const Parameter::Common& common);
     ~VisualizerContext();
@@ -38,8 +42,8 @@
     // keep all parameters and reset buffer.
     void reset();
 
-    RetCode setCaptureSamples(int captureSize);
-    int getCaptureSamples();
+    RetCode setCaptureSamples(int32_t captureSize);
+    int32_t getCaptureSamples();
     RetCode setMeasurementMode(Visualizer::MeasurementMode mode);
     Visualizer::MeasurementMode getMeasurementMode();
     RetCode setScalingMode(Visualizer::ScalingMode mode);
@@ -86,7 +90,7 @@
     // capture buf with 8 bits mono PCM samples
     std::array<uint8_t, kMaxCaptureBufSize> mCaptureBuf GUARDED_BY(mMutex);
     uint32_t mDownstreamLatency GUARDED_BY(mMutex) = 0;
-    uint32_t mCaptureSamples GUARDED_BY(mMutex) = kMaxCaptureBufSize;
+    int32_t mCaptureSamples GUARDED_BY(mMutex) = kMaxCaptureBufSize;
 
     // to avoid recomputing it every time a buffer is processed
     uint8_t mChannelCount GUARDED_BY(mMutex) = 0;
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index 9955862..70a242d 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -11,18 +12,18 @@
     name: "libheadtracking",
     host_supported: true,
     srcs: [
-      "HeadTrackingProcessor.cpp",
-      "ModeSelector.cpp",
-      "Pose.cpp",
-      "PoseBias.cpp",
-      "PoseDriftCompensator.cpp",
-      "PosePredictor.cpp",
-      "PoseRateLimiter.cpp",
-      "QuaternionUtil.cpp",
-      "ScreenHeadFusion.cpp",
-      "StillnessDetector.cpp",
-      "Twist.cpp",
-      "VectorRecorder.cpp",
+        "HeadTrackingProcessor.cpp",
+        "ModeSelector.cpp",
+        "Pose.cpp",
+        "PoseBias.cpp",
+        "PoseDriftCompensator.cpp",
+        "PosePredictor.cpp",
+        "PoseRateLimiter.cpp",
+        "QuaternionUtil.cpp",
+        "ScreenHeadFusion.cpp",
+        "StillnessDetector.cpp",
+        "Twist.cpp",
+        "VectorRecorder.cpp",
     ],
     shared_libs: [
         "libaudioutils",
@@ -51,7 +52,7 @@
 cc_library {
     name: "libheadtracking-binding",
     srcs: [
-      "SensorPoseProvider.cpp",
+        "SensorPoseProvider.cpp",
     ],
     shared_libs: [
         "libbase",
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 085a7e4..ee4075f 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -32,6 +32,7 @@
 #include <private/media/VideoFrame.h>
 #include <utils/Log.h>
 #include <utils/RefBase.h>
+#include <algorithm>
 #include <vector>
 
 HeifDecoder* createHeifDecoder() {
@@ -42,7 +43,10 @@
 
 void initFrameInfo(HeifFrameInfo *info, const VideoFrame *videoFrame) {
     info->mWidth = videoFrame->mDisplayWidth;
-    info->mHeight = videoFrame->mDisplayHeight;
+    // Number of scanlines is mDisplayHeight. Clamp it to mHeight to guard
+    // against malformed streams claiming that mDisplayHeight is greater than
+    // mHeight.
+    info->mHeight = std::min(videoFrame->mDisplayHeight, videoFrame->mHeight);
     info->mRotationAngle = videoFrame->mRotationAngle;
     info->mBytesPerPixel = videoFrame->mBytesPerPixel;
     info->mDurationUs = videoFrame->mDurationUs;
@@ -746,7 +750,9 @@
                    (videoFrame->mRowBytes * (mCurScanline + videoFrame->mDisplayTop)) +
                    (videoFrame->mBytesPerPixel * videoFrame->mDisplayLeft);
     mCurScanline++;
-    memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mDisplayWidth);
+    // Do not try to copy more than |videoFrame->mWidth| pixels.
+    uint32_t width = std::min(videoFrame->mDisplayWidth, videoFrame->mWidth);
+    memcpy(dst, src, videoFrame->mBytesPerPixel * width);
     return true;
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 89348a4..3ab32f0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -111,9 +111,12 @@
 // To collect the encoder usage for the battery app
 static void addBatteryData(uint32_t params) {
     sp<IBinder> binder =
-        defaultServiceManager()->getService(String16("media.player"));
+        defaultServiceManager()->waitForService(String16("media.player"));
     sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
-    CHECK(service.get() != NULL);
+    if (service.get() == nullptr) {
+        ALOGE("%s: Failed to get media.player service", __func__);
+        return;
+    }
 
     service->addBatteryData(params);
 }
@@ -1453,29 +1456,44 @@
 }
 
 status_t StagefrightRecorder::setupAACRecording() {
-    // FIXME:
-    // Add support for OUTPUT_FORMAT_AAC_ADIF
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS);
+    // TODO(b/324512842): Add support for OUTPUT_FORMAT_AAC_ADIF
+    if (mOutputFormat != OUTPUT_FORMAT_AAC_ADTS) {
+        ALOGE("Invalid output format %d used for AAC recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
-    CHECK(mAudioEncoder == AUDIO_ENCODER_AAC ||
-          mAudioEncoder == AUDIO_ENCODER_HE_AAC ||
-          mAudioEncoder == AUDIO_ENCODER_AAC_ELD);
-    CHECK(mAudioSource != AUDIO_SOURCE_CNT);
+    if (mAudioEncoder != AUDIO_ENCODER_AAC
+            && mAudioEncoder != AUDIO_ENCODER_HE_AAC
+            && mAudioEncoder != AUDIO_ENCODER_AAC_ELD) {
+        ALOGE("Invalid encoder %d used for AAC recording", mAudioEncoder);
+        return BAD_VALUE;
+    }
+
+    if (mAudioSource == AUDIO_SOURCE_CNT) {
+        ALOGE("Audio source hasn't been set correctly");
+        return BAD_VALUE;
+    }
 
     mWriter = new AACWriter(mOutputFd);
     return setupRawAudioRecording();
 }
 
 status_t StagefrightRecorder::setupOggRecording() {
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_OGG);
+    if (mOutputFormat != OUTPUT_FORMAT_OGG) {
+        ALOGE("Invalid output format %d used for OGG recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     mWriter = new OggWriter(mOutputFd);
     return setupRawAudioRecording();
 }
 
 status_t StagefrightRecorder::setupAMRRecording() {
-    CHECK(mOutputFormat == OUTPUT_FORMAT_AMR_NB ||
-          mOutputFormat == OUTPUT_FORMAT_AMR_WB);
+    if (mOutputFormat != OUTPUT_FORMAT_AMR_NB
+            && mOutputFormat != OUTPUT_FORMAT_AMR_WB) {
+        ALOGE("Invalid output format %d used for AMR recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     if (mOutputFormat == OUTPUT_FORMAT_AMR_NB) {
         if (mAudioEncoder != AUDIO_ENCODER_DEFAULT &&
@@ -1528,7 +1546,10 @@
 }
 
 status_t StagefrightRecorder::setupRTPRecording() {
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_RTP_AVP);
+    if (mOutputFormat != OUTPUT_FORMAT_RTP_AVP) {
+        ALOGE("Invalid output format %d used for RTP recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     if ((mAudioSource != AUDIO_SOURCE_CNT
                 && mVideoSource != VIDEO_SOURCE_LIST_END)
@@ -1571,7 +1592,10 @@
 }
 
 status_t StagefrightRecorder::setupMPEG2TSRecording() {
-    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_MPEG2TS);
+    if (mOutputFormat != OUTPUT_FORMAT_MPEG2TS) {
+        ALOGE("Invalid output format %d used for MPEG2TS recording", mOutputFormat);
+        return BAD_VALUE;
+    }
 
     sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);
 
diff --git a/media/libmediaplayerservice/fuzzer/Android.bp b/media/libmediaplayerservice/fuzzer/Android.bp
index 507da29..74b0a85 100644
--- a/media/libmediaplayerservice/fuzzer/Android.bp
+++ b/media/libmediaplayerservice/fuzzer/Android.bp
@@ -110,6 +110,17 @@
         "libresourcemanagerservice",
         "libmediametricsservice",
         "mediametricsservice-aidl-cpp",
+        "libcameraservice",
+        "android.hardware.camera.common@1.0",
+        "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.provider@2.5",
+        "android.hardware.camera.provider@2.6",
+        "android.hardware.camera.provider@2.7",
+        "android.hardware.camera.provider-V3-ndk",
+        "android.hardware.camera.device@1.0",
+        "android.hardware.camera.device@3.2",
+        "android.hardware.camera.device@3.4",
+        "libaudiohal@7.0",
     ],
     header_libs: [
         "libaudiohal_headers",
diff --git a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
index fdac1a1..2518c21 100644
--- a/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
+++ b/media/libmediaplayerservice/fuzzer/mediarecorder_fuzzer.cpp
@@ -15,22 +15,22 @@
  *
  */
 
-#include <media/stagefright/foundation/AString.h>
-#include "fuzzer/FuzzedDataProvider.h"
-
 #include <AudioFlinger.h>
 #include <MediaPlayerService.h>
 #include <ResourceManagerService.h>
-#include <fakeservicemanager/FakeServiceManager.h>
 #include <StagefrightRecorder.h>
 #include <camera/Camera.h>
 #include <camera/android/hardware/ICamera.h>
+#include <fakeservicemanager/FakeServiceManager.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
 #include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/foundation/AString.h>
 #include <mediametricsservice/MediaMetricsService.h>
 #include <thread>
+#include "CameraService.h"
+#include "fuzzer/FuzzedDataProvider.h"
 
 using namespace std;
 using namespace android;
@@ -46,32 +46,27 @@
     AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_SOURCE_VOICE_COMMUNICATION,
     AUDIO_SOURCE_REMOTE_SUBMIX,     AUDIO_SOURCE_UNPROCESSED,
     AUDIO_SOURCE_VOICE_PERFORMANCE, AUDIO_SOURCE_ECHO_REFERENCE,
-    AUDIO_SOURCE_FM_TUNER,          AUDIO_SOURCE_HOTWORD};
+    AUDIO_SOURCE_FM_TUNER,          AUDIO_SOURCE_HOTWORD,
+    AUDIO_SOURCE_ULTRASOUND};
+
+constexpr output_format kOutputFormat[] = {
+        OUTPUT_FORMAT_DEFAULT,        OUTPUT_FORMAT_THREE_GPP,
+        OUTPUT_FORMAT_MPEG_4,         OUTPUT_FORMAT_AUDIO_ONLY_START,
+        OUTPUT_FORMAT_RAW_AMR,        OUTPUT_FORMAT_AMR_NB,
+        OUTPUT_FORMAT_AMR_WB,         OUTPUT_FORMAT_AAC_ADTS,
+        OUTPUT_FORMAT_AUDIO_ONLY_END, OUTPUT_FORMAT_RTP_AVP,
+        OUTPUT_FORMAT_MPEG2TS,        OUTPUT_FORMAT_WEBM,
+        OUTPUT_FORMAT_HEIF,           OUTPUT_FORMAT_OGG,
+        OUTPUT_FORMAT_LIST_END};
+
+constexpr video_encoder kVideoEncoder[] = {
+        VIDEO_ENCODER_DEFAULT,      VIDEO_ENCODER_H263, VIDEO_ENCODER_H264,
+        VIDEO_ENCODER_MPEG_4_SP,    VIDEO_ENCODER_VP8,  VIDEO_ENCODER_HEVC,
+        VIDEO_ENCODER_DOLBY_VISION, VIDEO_ENCODER_AV1,  VIDEO_ENCODER_LIST_END};
 
 constexpr audio_microphone_direction_t kSupportedMicrophoneDirections[] = {
     MIC_DIRECTION_UNSPECIFIED, MIC_DIRECTION_FRONT, MIC_DIRECTION_BACK, MIC_DIRECTION_EXTERNAL};
 
-struct RecordingConfig {
-    output_format outputFormat;
-    audio_encoder audioEncoder;
-    video_encoder videoEncoder;
-};
-
-const struct RecordingConfig kRecordingConfigList[] = {
-    {OUTPUT_FORMAT_AMR_NB, AUDIO_ENCODER_AMR_NB, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AMR_WB, AUDIO_ENCODER_AMR_WB, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_HE_AAC, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_AAC_ADTS, AUDIO_ENCODER_AAC_ELD, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_OGG, AUDIO_ENCODER_OPUS, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_RTP_AVP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_DEFAULT},
-    {OUTPUT_FORMAT_MPEG2TS, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
-    {OUTPUT_FORMAT_WEBM, AUDIO_ENCODER_VORBIS, VIDEO_ENCODER_VP8},
-    {OUTPUT_FORMAT_THREE_GPP, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
-    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_AAC, VIDEO_ENCODER_H264},
-    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_MPEG_4_SP},
-    {OUTPUT_FORMAT_MPEG_4, AUDIO_ENCODER_DEFAULT, VIDEO_ENCODER_HEVC}};
-
 const string kParametersList[] = {"max-duration",
                                   "max-filesize",
                                   "interleave-duration-us",
@@ -104,14 +99,16 @@
                                   "rtp-param-ext-cvo-degrees",
                                   "video-param-request-i-frame",
                                   "rtp-param-set-socket-dscp",
-                                  "rtp-param-set-socket-network"};
+                                  "rtp-param-set-socket-network",
+                                  "rtp-param-set-socket-ecn",
+                                  "rtp-param-remote-ip",
+                                  "rtp-param-set-socket-network",
+                                  "log-session-id"};
 
-constexpr int32_t kMaxSleepTimeInMs = 100;
-constexpr int32_t kMinSleepTimeInMs = 0;
 constexpr int32_t kMinVideoSize = 2;
 constexpr int32_t kMaxVideoSize = 8192;
-constexpr int32_t kNumRecordMin = 1;
-constexpr int32_t kNumRecordMax = 10;
+const char kOutputFile[] = "OutputFile";
+const char kNextOutputFile[] = "NextOutputFile";
 
 class TestAudioDeviceCallback : public AudioSystem::AudioDeviceCallback {
    public:
@@ -194,8 +191,7 @@
     int32_t max;
     mStfRecorder->getMaxAmplitude(&max);
 
-    int32_t deviceId = mFdp.ConsumeIntegral<int32_t>();
-    mStfRecorder->setInputDevice(deviceId);
+    int32_t deviceId;
     mStfRecorder->getRoutedDeviceId(&deviceId);
 
     vector<android::media::MicrophoneInfoFw> activeMicrophones{};
@@ -213,101 +209,189 @@
     sp<IGraphicBufferProducer> buffer = mStfRecorder->querySurfaceMediaSource();
 }
 
-void MediaRecorderClientFuzzer::dumpInfo() {
-    int32_t dumpFd = memfd_create("DumpFile", MFD_ALLOW_SEALING);
-    Vector<String16> args;
-    args.push_back(String16(mFdp.ConsumeRandomLengthString().c_str()));
-    mStfRecorder->dump(dumpFd, args);
-    close(dumpFd);
-}
-
-void MediaRecorderClientFuzzer::setConfig() {
-    mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
-    mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
-    mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
-    mStfRecorder->setPreferredMicrophoneDirection(
-        mFdp.PickValueInArray(kSupportedMicrophoneDirections));
-    mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool());
-    bool isPrivacySensitive;
-    mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
-    mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize),
-                               mFdp.ConsumeIntegralInRange<int32_t>(kMinVideoSize, kMaxVideoSize));
-    mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>());
-    mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool());
-    mStfRecorder->setPreferredMicrophoneFieldDimension(mFdp.ConsumeFloatingPoint<float>());
-    mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
-
-    int32_t Idx = mFdp.ConsumeIntegralInRange<int32_t>(0, size(kRecordingConfigList) - 1);
-    mStfRecorder->setOutputFormat(kRecordingConfigList[Idx].outputFormat);
-    mStfRecorder->setAudioEncoder(kRecordingConfigList[Idx].audioEncoder);
-    mStfRecorder->setVideoEncoder(kRecordingConfigList[Idx].videoEncoder);
-
-    int32_t nextOutputFd = memfd_create("NextOutputFile", MFD_ALLOW_SEALING);
-    mStfRecorder->setNextOutputFile(nextOutputFd);
-    close(nextOutputFd);
-
-    for (Idx = 0; Idx < size(kParametersList); ++Idx) {
-        if (mFdp.ConsumeBool()) {
-            int32_t value = mFdp.ConsumeIntegral<int32_t>();
-            mStfRecorder->setParameters(
-                String8((kParametersList[Idx] + "=" + to_string(value)).c_str()));
-        }
+template <typename FuncWrapper>
+void callMediaAPI(FuncWrapper funcWrapper, FuzzedDataProvider* fdp) {
+    if (fdp->ConsumeBool()) {
+        funcWrapper();
     }
 }
 
-MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t *data, size_t size)
-    : mFdp(data, size), mMediaRecorderOutputFd(memfd_create("OutputFile", MFD_ALLOW_SEALING)) {
+void MediaRecorderClientFuzzer::setConfig() {
+    callMediaAPI(
+            [this]() {
+                mSurfaceControl = mComposerClient.createSurface(
+                        String8(mFdp.ConsumeRandomLengthString().c_str()) /* name */,
+                        mFdp.ConsumeIntegral<uint32_t>() /* width */,
+                        mFdp.ConsumeIntegral<uint32_t>() /* height */,
+                        mFdp.ConsumeIntegral<int32_t>() /* pixel-format */,
+                        mFdp.ConsumeIntegral<int32_t>() /* flags */);
+                if (mSurfaceControl) {
+                    mSurface = mSurfaceControl->getSurface();
+                    mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
+                }
+            },
+            &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->setInputDevice(mFdp.ConsumeIntegral<int32_t>()); },
+                 &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
+                mStfRecorder->setListener(listener);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<TestCamera> testCamera = sp<TestCamera>::make();
+                sp<Camera> camera = Camera::create(testCamera);
+                mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
+                mStfRecorder->setInputSurface(persistentSurface);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
+                mStfRecorder->setAudioDeviceCallback(callback);
+                mStfRecorder->setOutputFile(mMediaRecorderOutputFd);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setAudioSource(mFdp.PickValueInArray(kSupportedAudioSources));
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setVideoSource(mFdp.PickValueInArray(kSupportedVideoSources));
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setPreferredMicrophoneDirection(
+                        mFdp.PickValueInArray(kSupportedMicrophoneDirections));
+            },
+            &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->setPrivacySensitive(mFdp.ConsumeBool()); }, &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                bool isPrivacySensitive;
+                mStfRecorder->isPrivacySensitive(&isPrivacySensitive);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setVideoSize(mFdp.ConsumeIntegralInRange<int32_t>(
+                                                   kMinVideoSize, kMaxVideoSize) /* width */,
+                                           mFdp.ConsumeIntegralInRange<int32_t>(
+                                                   kMinVideoSize, kMaxVideoSize) /* height */);
+            },
+            &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->setVideoFrameRate(mFdp.ConsumeIntegral<int32_t>()); },
+                 &mFdp);
+
+    callMediaAPI([this]() { mStfRecorder->enableAudioDeviceCallback(mFdp.ConsumeBool()); }, &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setPreferredMicrophoneFieldDimension(
+                        mFdp.ConsumeFloatingPoint<float>());
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                mStfRecorder->setClientName(String16(mFdp.ConsumeRandomLengthString().c_str()));
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                output_format OutputFormat = mFdp.PickValueInArray(kOutputFormat);
+                audio_encoder AudioEncoderFormat =
+                        (audio_encoder)mFdp.ConsumeIntegralInRange<int32_t>(AUDIO_ENCODER_DEFAULT,
+                                                                            AUDIO_ENCODER_LIST_END);
+                video_encoder VideoEncoderFormat = mFdp.PickValueInArray(kVideoEncoder);
+                if (OutputFormat == OUTPUT_FORMAT_AMR_NB) {
+                    AudioEncoderFormat =
+                            mFdp.ConsumeBool() ? AUDIO_ENCODER_DEFAULT : AUDIO_ENCODER_AMR_NB;
+                } else if (OutputFormat == OUTPUT_FORMAT_AMR_WB) {
+                    AudioEncoderFormat = AUDIO_ENCODER_AMR_WB;
+                } else if (OutputFormat == OUTPUT_FORMAT_AAC_ADIF ||
+                           OutputFormat == OUTPUT_FORMAT_AAC_ADTS ||
+                           OutputFormat == OUTPUT_FORMAT_MPEG2TS) {
+                    AudioEncoderFormat = (audio_encoder)mFdp.ConsumeIntegralInRange<int32_t>(
+                            AUDIO_ENCODER_AAC, AUDIO_ENCODER_AAC_ELD);
+                    if (OutputFormat == OUTPUT_FORMAT_MPEG2TS) {
+                        VideoEncoderFormat = VIDEO_ENCODER_H264;
+                    }
+                }
+                mStfRecorder->setOutputFormat(OutputFormat);
+                mStfRecorder->setAudioEncoder(AudioEncoderFormat);
+                mStfRecorder->setVideoEncoder(VideoEncoderFormat);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                int32_t nextOutputFd = memfd_create(kNextOutputFile, MFD_ALLOW_SEALING);
+                mStfRecorder->setNextOutputFile(nextOutputFd);
+                close(nextOutputFd);
+            },
+            &mFdp);
+
+    callMediaAPI(
+            [this]() {
+                for (int32_t idx = 0; idx < size(kParametersList); ++idx) {
+                    if (mFdp.ConsumeBool()) {
+                        int32_t value = mFdp.ConsumeIntegral<int32_t>();
+                        mStfRecorder->setParameters(
+                                String8((kParametersList[idx] + "=" + to_string(value)).c_str()));
+                    }
+                }
+            },
+            &mFdp);
+}
+
+MediaRecorderClientFuzzer::MediaRecorderClientFuzzer(const uint8_t* data, size_t size)
+    : mFdp(data, size), mMediaRecorderOutputFd(memfd_create(kOutputFile, MFD_ALLOW_SEALING)) {
     AttributionSourceState attributionSource;
     attributionSource.packageName = mFdp.ConsumeRandomLengthString().c_str();
     attributionSource.token = sp<BBinder>::make();
     mStfRecorder = make_unique<StagefrightRecorder>(attributionSource);
-
-    mSurfaceControl = mComposerClient.createSurface(
-        String8(mFdp.ConsumeRandomLengthString().c_str()), mFdp.ConsumeIntegral<uint32_t>(),
-        mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<int32_t>(),
-        mFdp.ConsumeIntegral<int32_t>());
-    if (mSurfaceControl) {
-        mSurface = mSurfaceControl->getSurface();
-        mStfRecorder->setPreviewSurface(mSurface->getIGraphicBufferProducer());
-    }
-
-    sp<TestMediaRecorderClient> listener = sp<TestMediaRecorderClient>::make();
-    mStfRecorder->setListener(listener);
-
-    sp<TestCamera> testCamera = sp<TestCamera>::make();
-    sp<Camera> camera = Camera::create(testCamera);
-    mStfRecorder->setCamera(camera->remote(), camera->getRecordingProxy());
-
-    sp<PersistentSurface> persistentSurface = sp<PersistentSurface>::make();
-    mStfRecorder->setInputSurface(persistentSurface);
-
-    sp<TestAudioDeviceCallback> callback = sp<TestAudioDeviceCallback>::make();
-    mStfRecorder->setAudioDeviceCallback(callback);
 }
 
 void MediaRecorderClientFuzzer::process() {
-    setConfig();
-
     mStfRecorder->init();
     mStfRecorder->prepare();
-    size_t numRecord = mFdp.ConsumeIntegralInRange<size_t>(kNumRecordMin, kNumRecordMax);
-    for (size_t Idx = 0; Idx < numRecord; ++Idx) {
-        mStfRecorder->start();
-        this_thread::sleep_for(chrono::milliseconds(
-            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
-        mStfRecorder->pause();
-        this_thread::sleep_for(chrono::milliseconds(
-            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
-        mStfRecorder->resume();
-        this_thread::sleep_for(chrono::milliseconds(
-            mFdp.ConsumeIntegralInRange<int32_t>(kMinSleepTimeInMs, kMaxSleepTimeInMs)));
-        mStfRecorder->stop();
+    while (mFdp.remaining_bytes()) {
+        auto invokeMediaPLayerApi = mFdp.PickValueInArray<const std::function<void()>>({
+                [&]() { setConfig(); },
+                [&]() { mStfRecorder->start(); },
+                [&]() { mStfRecorder->pause(); },
+                [&]() { mStfRecorder->resume(); },
+                [&]() { mStfRecorder->stop(); },
+                [&]() { getConfig(); },
+                [&]() { mStfRecorder->close(); },
+                [&]() { mStfRecorder->reset(); },
+        });
+        invokeMediaPLayerApi();
     }
-    dumpInfo();
-    getConfig();
-
-    mStfRecorder->close();
-    mStfRecorder->reset();
 }
 
 extern "C" int LLVMFuzzerInitialize(int /* *argc */, char /* ***argv */) {
@@ -320,6 +404,7 @@
     MediaPlayerService::instantiate();
     AudioFlinger::instantiate();
     ResourceManagerService::instantiate();
+    CameraService::instantiate();
     fakeServiceManager->addService(String16(MediaMetricsService::kServiceName),
                                     new MediaMetricsService());
     return 0;
diff --git a/media/libshmem/Android.bp b/media/libshmem/Android.bp
index 6e48078..486a34f 100644
--- a/media/libshmem/Android.bp
+++ b/media/libshmem/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index 19f9549..2341af1 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -1,4 +1,3 @@
-
 package {
     default_applicable_licenses: ["frameworks_av_media_mediaserver_license"],
 }
@@ -86,7 +85,16 @@
         "-Wall",
     ],
 
-    vintf_fragments: ["manifest_media_c2_software.xml"],
+    // AIDL is only used when release_aidl_use_unfrozen is true
+    // because the swcodec mainline module is a prebuilt from an
+    // Android U branch in that case.
+    // TODO(b/327508501)
+    vintf_fragments: ["manifest_media_c2_software_hidl.xml"],
+    product_variables: {
+        release_aidl_use_unfrozen: {
+            vintf_fragments: ["manifest_media_c2_software_aidl.xml"],
+        },
+    },
 
     soong_config_variables: {
         TARGET_DYNAMIC_64_32_MEDIASERVER: {
diff --git a/media/mediaserver/manifest_media_c2_software_aidl.xml b/media/mediaserver/manifest_media_c2_software_aidl.xml
new file mode 100644
index 0000000..e6bcafa
--- /dev/null
+++ b/media/mediaserver/manifest_media_c2_software_aidl.xml
@@ -0,0 +1,7 @@
+<manifest version="1.0" type="framework">
+    <hal format="aidl">
+        <name>android.hardware.media.c2</name>
+        <version>1</version>
+        <fqname>IComponentStore/software</fqname>
+    </hal>
+</manifest>
diff --git a/media/mediaserver/manifest_media_c2_software.xml b/media/mediaserver/manifest_media_c2_software_hidl.xml
similarity index 68%
rename from media/mediaserver/manifest_media_c2_software.xml
rename to media/mediaserver/manifest_media_c2_software_hidl.xml
index 31dfafb..69a27be 100644
--- a/media/mediaserver/manifest_media_c2_software.xml
+++ b/media/mediaserver/manifest_media_c2_software_hidl.xml
@@ -8,9 +8,4 @@
             <instance>software</instance>
         </interface>
     </hal>
-    <hal format="aidl">
-        <name>android.hardware.media.c2</name>
-        <version>1</version>
-        <fqname>IComponentStore/software</fqname>
-    </hal>
 </manifest>
diff --git a/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp b/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp
index e8fea73..fb9f1e9 100644
--- a/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp
+++ b/media/module/codecs/mp3dec/src/pvmp3_framedecoder.cpp
@@ -310,26 +310,31 @@
 }
 
 // Check if the input is valid by checking if it contains a sync word
-static bool isInputValid(uint8 *buf, uint32 inSize)
+static ERROR_CODE validate_input(uint8 *buf, uint32 inSize)
 {
-    // Buffer needs to contain at least 4 bytes which is the size of
-    // the header
-    if (inSize < 4) return false;
+    /*
+     * Verify that at least the header is complete
+     * Note that SYNC_WORD_LNGTH is in unit of bits, but inSize is in unit of bytes.
+     */
+    if (inSize < ((SYNC_WORD_LNGTH + 21) >> 3))
+    {
+        return NO_ENOUGH_MAIN_DATA_ERROR;
+    }
 
     size_t totalInSize = 0;
     size_t frameSize = 0;
     while (totalInSize <= (inSize - 4)) {
         if (!parseHeader(U32_AT(buf + totalInSize), &frameSize)) {
-            return false;
+            return SYNCH_LOST_ERROR;
         }
         // Buffer needs to be large enough to include complete frame
         if ((frameSize > inSize) || (totalInSize > (inSize - frameSize))) {
-            return false;
+            return SYNCH_LOST_ERROR;
         }
         totalInSize += frameSize;
     }
 
-    return true;
+    return NO_DECODING_ERROR;
 }
 
 ERROR_CODE pvmp3_framedecoder(tPVMP3DecoderExternal *pExt,
@@ -348,10 +353,11 @@
     mp3Header info_data;
     mp3Header *info = &info_data;
 
-    if (!isInputValid(pExt->pInputBuffer, pExt->inputBufferCurrentLength))
+    errorCode = validate_input(pExt->pInputBuffer, pExt->inputBufferCurrentLength);
+    if (errorCode != NO_DECODING_ERROR)
     {
         pExt->outputFrameSize = 0;
-        return SYNCH_LOST_ERROR;
+        return errorCode;
     }
 
     pVars->inputStream.pBuffer  = pExt->pInputBuffer;
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 2946398..c4f2808 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -27,7 +27,6 @@
 #include <media/AidlConversionUtil.h>
 #include <android/content/AttributionSourceState.h>
 
-#include <com_android_media_audio.h>
 #include <iterator>
 #include <algorithm>
 #include <pwd.h>
@@ -388,10 +387,6 @@
  */
 bool mustAnonymizeBluetoothAddress(
         const AttributionSourceState& attributionSource, const String16& caller) {
-    if (!com::android::media::audio::bluetooth_mac_address_anonymization()) {
-        return false;
-    }
-
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
     if (isAudioServerOrSystemServerUid(uid)) {
         return false;
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index bd9a462..0a047c1 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/media/utils/tests/Android.bp b/media/utils/tests/Android.bp
index 3fdc6eb..a68569a 100644
--- a/media/utils/tests/Android.bp
+++ b/media/utils/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index afd28e5..129541f 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -145,6 +145,7 @@
         "audioflinger-aidl-cpp",
         "audioclient-types-aidl-cpp",
         "av-types-aidl-cpp",
+        "com.android.media.audio-aconfig-cc",
         "effect-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "libactivitymanager_aidl",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index c5424a2..725e5a6 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -311,7 +311,8 @@
     }
 
     mPatchPanel = IAfPatchPanel::create(sp<IAfPatchPanelCallback>::fromExisting(this));
-    mMelReporter = sp<MelReporter>::make(sp<IAfMelReporterCallback>::fromExisting(this));
+    mMelReporter = sp<MelReporter>::make(sp<IAfMelReporterCallback>::fromExisting(this),
+                                         mPatchPanel);
 }
 
 status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
@@ -2175,30 +2176,24 @@
     sp<IAfThreadBase> thread;
 
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-        if (mPlaybackThreads.valueAt(i)->getEffect(sessionId, effectId) != 0) {
-            ALOG_ASSERT(thread == 0);
-            thread = mPlaybackThreads.valueAt(i);
+        thread = mPlaybackThreads.valueAt(i);
+        if (thread->getEffect(sessionId, effectId) != 0) {
+            return thread;
         }
     }
-    if (thread != nullptr) {
-        return thread;
-    }
     for (size_t i = 0; i < mRecordThreads.size(); i++) {
-        if (mRecordThreads.valueAt(i)->getEffect(sessionId, effectId) != 0) {
-            ALOG_ASSERT(thread == 0);
-            thread = mRecordThreads.valueAt(i);
+        thread = mRecordThreads.valueAt(i);
+        if (thread->getEffect(sessionId, effectId) != 0) {
+            return thread;
         }
     }
-    if (thread != nullptr) {
-        return thread;
-    }
     for (size_t i = 0; i < mMmapThreads.size(); i++) {
-        if (mMmapThreads.valueAt(i)->getEffect(sessionId, effectId) != 0) {
-            ALOG_ASSERT(thread == 0);
-            thread = mMmapThreads.valueAt(i);
+        thread = mMmapThreads.valueAt(i);
+        if (thread->getEffect(sessionId, effectId) != 0) {
+            return thread;
         }
     }
-    return thread;
+    return nullptr;
 }
 
 // ----------------------------------------------------------------------------
@@ -2551,6 +2546,7 @@
         bool mm;
         if (OK == dev->getMasterMute(&mm)) {
             mMasterMute = mm;
+            ALOGI_IF(mMasterMute, "%s: applying mute from HAL %s", __func__, name);
         }
     }
 
@@ -4149,7 +4145,7 @@
         }
 
         // Only audio policy service can create a spatializer effect
-        if ((memcmp(&descOut.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0) &&
+        if (IAfEffectModule::isSpatializer(&descOut.type) &&
             (callingUid != AID_AUDIOSERVER || currentPid != getpid())) {
             ALOGW("%s: attempt to create a spatializer effect from uid/pid %d/%d",
                     __func__, callingUid, currentPid);
@@ -4391,11 +4387,12 @@
 
     sp<IAfThreadBase> thread = getEffectThread_l(sessionId, effectId);
     if (thread == nullptr) {
-      return;
+        return;
     }
     audio_utils::lock_guard _sl(thread->mutex());
-    sp<IAfEffectModule> effect = thread->getEffect_l(sessionId, effectId);
-    thread->setEffectSuspended_l(&effect->desc().type, suspended, sessionId);
+    if (const auto& effect = thread->getEffect_l(sessionId, effectId)) {
+        thread->setEffectSuspended_l(&effect->desc().type, suspended, sessionId);
+    }
 }
 
 
@@ -4487,7 +4484,7 @@
             if (effect->state() == IAfEffectModule::ACTIVE ||
                     effect->state() == IAfEffectModule::STOPPING) {
                 ++started;
-                effect->start();
+                effect->start_l();
             }
         }
         dstChain->mutex().unlock();
@@ -4590,7 +4587,7 @@
         // removeEffect_l() has stopped the effect if it was active so it must be restarted
         if (effect->state() == IAfEffectModule::ACTIVE ||
             effect->state() == IAfEffectModule::STOPPING) {
-            effect->start();
+            effect->start_l();
         }
     }
 
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index 201d147..feae97e 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -143,7 +143,7 @@
         if (lStatus == NO_ERROR) {
             lStatus = effect->addHandle(handle.get());
             if (lStatus == NO_ERROR) {
-                lStatus = effect->init(patches);
+                lStatus = effect->init_l(patches);
                 if (lStatus == NAME_NOT_FOUND) {
                     lStatus = NO_ERROR;
                 }
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index 7045c8b..287d838 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -139,7 +139,7 @@
     // check if effects should be suspended or restored when a given effect is enable or disabled
     void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect __unused,
                           bool enabled __unused, bool threadLocked __unused) final {}
-    void resetVolume() final {}
+    void resetVolume_l() final REQUIRES(audio_utils::EffectChain_Mutex) {}
     product_strategy_t strategy() const final { return static_cast<product_strategy_t>(0); }
     int32_t activeTrackCnt() const final { return 0; }
     void onEffectEnable(const sp<IAfEffectBase>& effect __unused) final {}
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 73a89e5..b270813 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -570,10 +570,10 @@
     : EffectBase(callback, desc, id, sessionId, pinned),
       // clear mConfig to ensure consistent initial value of buffer framecount
       // in case buffers are associated by setInBuffer() or setOutBuffer()
-      // prior to configure().
+      // prior to configure_l().
       mConfig{{}, {}},
       mStatus(NO_INIT),
-      mMaxDisableWaitCnt(1), // set by configure(), should be >= 1
+      mMaxDisableWaitCnt(1), // set by configure_l(), should be >= 1
       mDisableWaitCnt(0),    // set by process() and updateState()
       mOffloaded(false),
       mIsOutput(false)
@@ -588,13 +588,13 @@
     if (mStatus != NO_ERROR) {
         return;
     }
-    lStatus = init();
+    lStatus = init_l();
     if (lStatus < 0) {
         mStatus = lStatus;
         goto Error;
     }
 
-    setOffloaded(callback->isOffload(), callback->io());
+    setOffloaded_l(callback->isOffload(), callback->io());
     ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
 
     return;
@@ -616,7 +616,7 @@
 
 }
 
-bool EffectModule::updateState() {
+bool EffectModule::updateState_l() {
     audio_utils::lock_guard _l(mutex());
 
     bool started = false;
@@ -632,7 +632,7 @@
                    0,
                    mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
         }
-        if (start_l() == NO_ERROR) {
+        if (start_ll() == NO_ERROR) {
             mState = ACTIVE;
             started = true;
         } else {
@@ -641,8 +641,8 @@
         break;
     case STOPPING:
         // volume control for offload and direct threads must take effect immediately.
-        if (stop_l() == NO_ERROR
-            && !(isVolumeControl() && isOffloadedOrDirect())) {
+        if (stop_ll() == NO_ERROR
+            && !(isVolumeControl() && isOffloadedOrDirect_l())) {
             mDisableWaitCnt = mMaxDisableWaitCnt;
         } else {
             mDisableWaitCnt = 1; // will cause immediate transition to IDLE
@@ -836,9 +836,9 @@
     mEffectInterface->command(EFFECT_CMD_RESET, 0, NULL, &replySize, &reply);
 }
 
-status_t EffectModule::configure()
+status_t EffectModule::configure_l()
 {
-    ALOGVV("configure() started");
+    ALOGVV("%s started", __func__);
     status_t status;
     uint32_t size;
     audio_channel_mask_t channelMask;
@@ -879,7 +879,7 @@
     mConfig.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
 
     // Don't use sample rate for thread if effect isn't offloadable.
-    if (callback->isOffloadOrDirect() && !isOffloaded()) {
+    if (callback->isOffloadOrDirect() && !isOffloaded_l()) {
         mConfig.inputCfg.samplingRate = DEFAULT_OUTPUT_SAMPLE_RATE;
         ALOGV("Overriding effect input as 48kHz");
     } else {
@@ -909,9 +909,9 @@
     mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
     mIsOutput = callback->isOutput();
 
-    ALOGV("configure() %p chain %p buffer %p framecount %zu",
-          this, callback->chain().promote().get(),
-          mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
+    ALOGV("%s %p chain %p buffer %p framecount %zu", __func__, this,
+          callback->chain().promote().get(), mConfig.inputCfg.buffer.raw,
+          mConfig.inputCfg.buffer.frameCount);
 
     status_t cmdStatus;
     size = sizeof(int);
@@ -1012,11 +1012,11 @@
 exit:
     // TODO: consider clearing mConfig on error.
     mStatus = status;
-    ALOGVV("configure ended");
+    ALOGVV("%s ended", __func__);
     return status;
 }
 
-status_t EffectModule::init()
+status_t EffectModule::init_l()
 {
     audio_utils::lock_guard _l(mutex());
     if (mEffectInterface == 0) {
@@ -1048,21 +1048,21 @@
     }
 }
 
-// start() must be called with PlaybackThread::mutex() or EffectChain::mutex() held
-status_t EffectModule::start()
+// start_l() must be called with EffectChain::mutex() held
+status_t EffectModule::start_l()
 {
     status_t status;
     {
         audio_utils::lock_guard _l(mutex());
-        status = start_l();
+        status = start_ll();
     }
     if (status == NO_ERROR) {
-        getCallback()->resetVolume();
+        getCallback()->resetVolume_l();
     }
     return status;
 }
 
-status_t EffectModule::start_l()
+status_t EffectModule::start_ll()
 {
     if (mEffectInterface == 0) {
         return NO_INIT;
@@ -1086,13 +1086,13 @@
     return status;
 }
 
-status_t EffectModule::stop()
+status_t EffectModule::stop_l()
 {
     audio_utils::lock_guard _l(mutex());
-    return stop_l();
+    return stop_ll();
 }
 
-status_t EffectModule::stop_l()
+status_t EffectModule::stop_ll()
 {
     if (mEffectInterface == 0) {
         return NO_INIT;
@@ -1103,11 +1103,11 @@
     status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
 
-    if (isVolumeControl() && isOffloadedOrDirect()) {
+    if (isVolumeControl() && isOffloadedOrDirect_l()) {
         // We have the EffectChain and EffectModule lock, permit a reentrant call to setVolume:
         // resetVolume_l --> setVolume_l --> EffectModule::setVolume
         mSetVolumeReentrantTid = gettid();
-        getCallback()->resetVolume();
+        getCallback()->resetVolume_l();
         mSetVolumeReentrantTid = INVALID_PID;
     }
 
@@ -1162,7 +1162,7 @@
                      std::vector<uint8_t>* reply)
 {
     audio_utils::lock_guard _l(mutex());
-    ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface.get());
+    ALOGVV("%s, cmdCode: %d, mEffectInterface: %p", __func__, cmdCode, mEffectInterface.get());
 
     if (mState == DESTROYED || mEffectInterface == 0) {
         return NO_INIT;
@@ -1258,20 +1258,20 @@
     }
 }
 
-bool EffectModule::isOffloadedOrDirect() const
+bool EffectModule::isOffloadedOrDirect_l() const
 {
     return getCallback()->isOffloadOrDirect();
 }
 
-bool EffectModule::isVolumeControlEnabled() const
+bool EffectModule::isVolumeControlEnabled_l() const
 {
-    return (isVolumeControl() && (isOffloadedOrDirect() ? isEnabled() : isProcessEnabled()));
+    return (isVolumeControl() && (isOffloadedOrDirect_l() ? isEnabled() : isProcessEnabled()));
 }
 
 void EffectModule::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
     ALOGVV("setInBuffer %p",(&buffer));
 
-    // mConfig.inputCfg.buffer.frameCount may be zero if configure() is not called yet.
+    // mConfig.inputCfg.buffer.frameCount may be zero if configure_l() is not called yet.
     if (buffer != 0) {
         mConfig.inputCfg.buffer.raw = buffer->audioBuffer()->raw;
         buffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
@@ -1317,7 +1317,7 @@
 void EffectModule::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
     ALOGVV("setOutBuffer %p",(&buffer));
 
-    // mConfig.outputCfg.buffer.frameCount may be zero if configure() is not called yet.
+    // mConfig.outputCfg.buffer.frameCount may be zero if configure_l() is not called yet.
     if (buffer != 0) {
         mConfig.outputCfg.buffer.raw = buffer->audioBuffer()->raw;
         buffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
@@ -1356,8 +1356,7 @@
     }
 }
 
-status_t EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
-{
+status_t EffectModule::setVolume(uint32_t* left, uint32_t* right, bool controller) {
     AutoLockReentrant _l(mutex(), mSetVolumeReentrantTid);
     if (mStatus != NO_ERROR) {
         return mStatus;
@@ -1480,7 +1479,7 @@
     return status;
 }
 
-status_t EffectModule::setOffloaded(bool offloaded, audio_io_handle_t io)
+status_t EffectModule::setOffloaded_l(bool offloaded, audio_io_handle_t io)
 {
     audio_utils::lock_guard _l(mutex());
     if (mStatus != NO_ERROR) {
@@ -1509,11 +1508,11 @@
         }
         mOffloaded = false;
     }
-    ALOGV("setOffloaded() offloaded %d io %d status %d", offloaded, io, status);
+    ALOGV("%s offloaded %d io %d status %d", __func__, offloaded, io, status);
     return status;
 }
 
-bool EffectModule::isOffloaded() const
+bool EffectModule::isOffloaded_l() const
 {
     audio_utils::lock_guard _l(mutex());
     return mOffloaded;
@@ -1528,8 +1527,16 @@
     return IAfEffectModule::isHapticGenerator(&mDescriptor.type);
 }
 
-status_t EffectModule::setHapticIntensity(int id, os::HapticScale intensity)
-{
+/*static*/
+bool IAfEffectModule::isSpatializer(const effect_uuid_t *type) {
+    return memcmp(type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0;
+}
+
+bool EffectModule::isSpatializer() const {
+    return IAfEffectModule::isSpatializer(&mDescriptor.type);
+}
+
+status_t EffectModule::setHapticScale_l(int id, os::HapticScale hapticScale) {
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
@@ -1544,7 +1551,7 @@
     param->vsize = sizeof(int32_t) * 2;
     *(int32_t*)param->data = HG_PARAM_HAPTIC_INTENSITY;
     *((int32_t*)param->data + 1) = id;
-    *((int32_t*)param->data + 2) = static_cast<int32_t>(intensity);
+    *((int32_t*)param->data + 2) = static_cast<int32_t>(hapticScale.getLevel());
     std::vector<uint8_t> response;
     status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
     if (status == NO_ERROR) {
@@ -1554,8 +1561,7 @@
     return status;
 }
 
-status_t EffectModule::setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo)
-{
+status_t EffectModule::setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo) {
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
@@ -1584,8 +1590,8 @@
     return status;
 }
 
-status_t EffectModule::getConfigs(
-        audio_config_base_t* inputCfg, audio_config_base_t* outputCfg, bool* isOutput) const {
+status_t EffectModule::getConfigs_l(audio_config_base_t* inputCfg, audio_config_base_t* outputCfg,
+                                    bool* isOutput) const {
     audio_utils::lock_guard _l(mutex());
     if (mConfig.inputCfg.mask == 0 || mConfig.outputCfg.mask == 0) {
         return NO_INIT;
@@ -1600,6 +1606,35 @@
     return NO_ERROR;
 }
 
+status_t EffectModule::sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata) {
+    if (mStatus != NO_ERROR) {
+        return mStatus;
+    }
+    // TODO b/307368176: send all metadata to effects if requested by the implementation.
+    // For now only send channel mask to Spatializer.
+    if (!isSpatializer()) {
+        return INVALID_OPERATION;
+    }
+
+    std::vector<uint8_t> request(
+            sizeof(effect_param_t) + sizeof(int32_t) + metadata.size() * sizeof(uint32_t));
+    effect_param_t *param = (effect_param_t*) request.data();
+    param->psize = sizeof(int32_t);
+    param->vsize = metadata.size() * sizeof(uint32_t);
+    *(int32_t*)param->data = SPATIALIZER_PARAM_INPUT_CHANNEL_MASK;
+    uint32_t* channelMasks = reinterpret_cast<uint32_t*>(param->data + sizeof(int32_t));
+    for (auto m : metadata) {
+        *channelMasks++ = m.channel_mask;
+    }
+    std::vector<uint8_t> response;
+    status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
+    if (status == NO_ERROR) {
+        LOG_ALWAYS_FATAL_IF(response.size() != sizeof(status_t));
+        status = *reinterpret_cast<const status_t*>(response.data());
+    }
+    return status;
+}
+
 static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
     std::stringstream ss;
 
@@ -1910,7 +1945,7 @@
     audio_config_base_t inputCfg = AUDIO_CONFIG_BASE_INITIALIZER;
     audio_config_base_t outputCfg = AUDIO_CONFIG_BASE_INITIALIZER;
     bool isOutput;
-    status_t status = effectModule->getConfigs(&inputCfg, &outputCfg, &isOutput);
+    status_t status = effectModule->getConfigs_l(&inputCfg, &outputCfg, &isOutput);
     if (status == NO_ERROR) {
         constexpr bool isInput = false; // effects always use 'OUT' channel masks.
         _config->inputCfg = VALUE_OR_RETURN_STATUS_AS_OUT(
@@ -2176,7 +2211,7 @@
     return 0;
 }
 
-std::vector<int> EffectChain::getEffectIds() const
+std::vector<int> EffectChain::getEffectIds_l() const
 {
     std::vector<int> ids;
     audio_utils::lock_guard _l(mutex());
@@ -2206,8 +2241,7 @@
 }
 
 // Must be called with EffectChain::mutex() locked
-void EffectChain::process_l()
-{
+void EffectChain::process_l() {
     // never process effects when:
     // - on an OFFLOAD thread
     // - no more tracks are on the session and the effect tail has been rendered
@@ -2250,7 +2284,7 @@
     }
     bool doResetVolume = false;
     for (size_t i = 0; i < size; i++) {
-        doResetVolume = mEffects[i]->updateState() || doResetVolume;
+        doResetVolume = mEffects[i]->updateState_l() || doResetVolume;
     }
     if (doResetVolume) {
         resetVolume_l();
@@ -2304,14 +2338,14 @@
                 numSamples * sizeof(float), &halBuffer);
         if (result != OK) return result;
 
-        effect->configure();
+        effect->configure_l();
 
         effect->setInBuffer(halBuffer);
         // auxiliary effects output samples to chain input buffer for further processing
         // by insert effects
         effect->setOutBuffer(mInBuffer);
     } else {
-        ssize_t idx_insert = getInsertIndex(desc);
+        ssize_t idx_insert = getInsertIndex_ll(desc);
         if (idx_insert < 0) {
             return INVALID_OPERATION;
         }
@@ -2319,7 +2353,7 @@
         size_t previousSize = mEffects.size();
         mEffects.insertAt(effect, idx_insert);
 
-        effect->configure();
+        effect->configure_l();
 
         // - By default:
         //   All effects read samples from chain input buffer.
@@ -2334,9 +2368,9 @@
             effect->setOutBuffer(mOutBuffer);
             if (idx_insert == 0) {
                 if (previousSize != 0) {
-                    mEffects[1]->configure();
+                    mEffects[1]->configure_l();
                     mEffects[1]->setInBuffer(mOutBuffer);
-                    mEffects[1]->updateAccessMode();      // reconfig if neeeded.
+                    mEffects[1]->updateAccessMode_l();  // reconfig if needed.
                 }
                 effect->setInBuffer(mInBuffer);
             } else {
@@ -2346,9 +2380,9 @@
             effect->setInBuffer(mInBuffer);
             if (idx_insert == static_cast<ssize_t>(previousSize)) {
                 if (idx_insert != 0) {
-                    mEffects[idx_insert-1]->configure();
+                    mEffects[idx_insert-1]->configure_l();
                     mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
-                    mEffects[idx_insert - 1]->updateAccessMode();      // reconfig if neeeded.
+                    mEffects[idx_insert - 1]->updateAccessMode_l();  // reconfig if needed.
                 }
                 effect->setOutBuffer(mOutBuffer);
             } else {
@@ -2358,21 +2392,21 @@
         ALOGV("%s effect %p, added in chain %p at rank %zu",
                 __func__, effect.get(), this, idx_insert);
     }
-    effect->configure();
+    effect->configure_l();
 
     return NO_ERROR;
 }
 
 std::optional<size_t> EffectChain::findVolumeControl_l(size_t from, size_t to) const {
     for (size_t i = std::min(to, mEffects.size()); i > from; i--) {
-        if (mEffects[i - 1]->isVolumeControlEnabled()) {
+        if (mEffects[i - 1]->isVolumeControlEnabled_l()) {
             return i - 1;
         }
     }
     return std::nullopt;
 }
 
-ssize_t EffectChain::getInsertIndex(const effect_descriptor_t& desc) {
+ssize_t EffectChain::getInsertIndex_ll(const effect_descriptor_t& desc) {
     // Insert effects are inserted at the end of mEffects vector as they are processed
     //  after track and auxiliary effects.
     // Insert effect order as a function of indicated preference:
@@ -2387,7 +2421,7 @@
     // already present
     // Spatializer or Downmixer effects are inserted in first position because
     // they adapt the channel count for all other effects in the chain
-    if ((memcmp(&desc.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0)
+    if (IAfEffectModule::isSpatializer(&desc.type)
             || (memcmp(&desc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0)) {
         return 0;
     }
@@ -2460,7 +2494,7 @@
             // the middle of a read from audio HAL
             if (mEffects[i]->state() == EffectModule::ACTIVE ||
                     mEffects[i]->state() == EffectModule::STOPPING) {
-                mEffects[i]->stop();
+                mEffects[i]->stop_l();
             }
             if (release) {
                 mEffects[i]->release_l();
@@ -2468,9 +2502,9 @@
 
             if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
                 if (i == size - 1 && i != 0) {
-                    mEffects[i - 1]->configure();
+                    mEffects[i - 1]->configure_l();
                     mEffects[i - 1]->setOutBuffer(mOutBuffer);
-                    mEffects[i - 1]->updateAccessMode();      // reconfig if neeeded.
+                    mEffects[i - 1]->updateAccessMode_l();      // reconfig if needed.
                 }
             }
             mEffects.removeAt(i);
@@ -2479,9 +2513,9 @@
             // is updated if needed (can switch from HAL channel mask to mixer channel mask)
             if (type != EFFECT_FLAG_TYPE_AUXILIARY // TODO(b/284522658) breaks for aux FX, why?
                     && i == 0 && size > 1) {
-                mEffects[0]->configure();
+                mEffects[0]->configure_l();
                 mEffects[0]->setInBuffer(mInBuffer);
-                mEffects[0]->updateAccessMode();      // reconfig if neeeded.
+                mEffects[0]->updateAccessMode_l();      // reconfig if needed.
             }
 
             ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
@@ -2531,14 +2565,19 @@
 
 bool EffectChain::hasVolumeControlEnabled_l() const {
     for (const auto &effect : mEffects) {
-        if (effect->isVolumeControlEnabled()) return true;
+        if (effect->isVolumeControlEnabled_l()) return true;
     }
     return false;
 }
 
-// setVolume_l() must be called with IAfThreadBase::mutex() or EffectChain::mutex() held
-bool EffectChain::setVolume_l(uint32_t *left, uint32_t *right, bool force)
-{
+// setVolume() must be called without EffectChain::mutex()
+bool EffectChain::setVolume(uint32_t* left, uint32_t* right, bool force) {
+    audio_utils::lock_guard _l(mutex());
+    return setVolume_l(left, right, force);
+}
+
+// setVolume_l() must be called with EffectChain::mutex() held
+bool EffectChain::setVolume_l(uint32_t* left, uint32_t* right, bool force) {
     uint32_t newLeft = *left;
     uint32_t newRight = *right;
     const size_t size = mEffects.size();
@@ -2613,7 +2652,7 @@
     return volumeControlIndex.has_value();
 }
 
-// resetVolume_l() must be called with IAfThreadBase::mutex() or EffectChain::mutex() held
+// resetVolume_l() must be called with EffectChain::mutex() held
 void EffectChain::resetVolume_l()
 {
     if ((mLeftVolume != UINT_MAX) && (mRightVolume != UINT_MAX)) {
@@ -2635,15 +2674,15 @@
     return false;
 }
 
-void EffectChain::setHapticIntensity_l(int id, os::HapticScale intensity)
+void EffectChain::setHapticScale_l(int id, os::HapticScale hapticScale)
 {
     audio_utils::lock_guard _l(mutex());
     for (size_t i = 0; i < mEffects.size(); ++i) {
-        mEffects[i]->setHapticIntensity(id, intensity);
+        mEffects[i]->setHapticScale_l(id, hapticScale);
     }
 }
 
-void EffectChain::syncHalEffectsState()
+void EffectChain::syncHalEffectsState_l()
 {
     audio_utils::lock_guard _l(mutex());
     for (size_t i = 0; i < mEffects.size(); i++) {
@@ -2712,7 +2751,7 @@
         }
 
         if (desc->mRefCount++ == 0) {
-            sp<IAfEffectModule> effect = getEffectIfEnabled(type);
+            sp<IAfEffectModule> effect = getEffectIfEnabled_l(type);
             if (effect != 0) {
                 desc->mEffect = effect;
                 effect->setSuspended(true);
@@ -2765,7 +2804,7 @@
         }
         if (desc->mRefCount++ == 0) {
             Vector< sp<IAfEffectModule> > effects;
-            getSuspendEligibleEffects(effects);
+            getSuspendEligibleEffects_l(effects);
             for (size_t i = 0; i < effects.size(); i++) {
                 setEffectSuspended_l(&effects[i]->desc().type, true);
             }
@@ -2806,8 +2845,7 @@
 #endif //OPENSL_ES_H_
 
 /* static */
-bool EffectChain::isEffectEligibleForBtNrecSuspend(const effect_uuid_t *type)
-{
+bool EffectChain::isEffectEligibleForBtNrecSuspend_l(const effect_uuid_t* type) {
     // Only NS and AEC are suspended when BtNRec is off
     if ((memcmp(type, FX_IID_AEC, sizeof(effect_uuid_t)) == 0) ||
         (memcmp(type, FX_IID_NS, sizeof(effect_uuid_t)) == 0)) {
@@ -2816,7 +2854,7 @@
     return false;
 }
 
-bool EffectChain::isEffectEligibleForSuspend(const effect_descriptor_t& desc)
+bool EffectChain::isEffectEligibleForSuspend_l(const effect_descriptor_t& desc)
 {
     // auxiliary effects and visualizer are never suspended on output mix
     if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
@@ -2829,26 +2867,24 @@
     return true;
 }
 
-void EffectChain::getSuspendEligibleEffects(
+void EffectChain::getSuspendEligibleEffects_l(
         Vector< sp<IAfEffectModule> > &effects)
 {
     effects.clear();
     for (size_t i = 0; i < mEffects.size(); i++) {
-        if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
+        if (isEffectEligibleForSuspend_l(mEffects[i]->desc())) {
             effects.add(mEffects[i]);
         }
     }
 }
 
-sp<IAfEffectModule> EffectChain::getEffectIfEnabled(const effect_uuid_t *type)
+sp<IAfEffectModule> EffectChain::getEffectIfEnabled_l(const effect_uuid_t *type)
 {
     sp<IAfEffectModule> effect = getEffectFromType_l(type);
     return effect != 0 && effect->isEnabled() ? effect : 0;
 }
 
-void EffectChain::checkSuspendOnEffectEnabled(const sp<IAfEffectModule>& effect,
-                                                            bool enabled)
-{
+void EffectChain::checkSuspendOnEffectEnabled_l(const sp<IAfEffectModule>& effect, bool enabled) {
     ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
     if (enabled) {
         if (index < 0) {
@@ -2857,18 +2893,17 @@
             if (index < 0) {
                 return;
             }
-            if (!isEffectEligibleForSuspend(effect->desc())) {
+            if (!isEffectEligibleForSuspend_l(effect->desc())) {
                 return;
             }
             setEffectSuspended_l(&effect->desc().type, enabled);
             index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
             if (index < 0) {
-                ALOGW("checkSuspendOnEffectEnabled() Fx should be suspended here!");
+                ALOGW("%s Fx should be suspended here!", __func__);
                 return;
             }
         }
-        ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x",
-            effect->desc().type.timeLow);
+        ALOGV("%s enable suspending fx %08x", __func__, effect->desc().type.timeLow);
         sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
         // if effect is requested to suspended but was not yet enabled, suspend it now.
         if (desc->mEffect == 0) {
@@ -2880,8 +2915,7 @@
         if (index < 0) {
             return;
         }
-        ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x",
-            effect->desc().type.timeLow);
+        ALOGV("%s disable restoring fx %08x", __func__, effect->desc().type.timeLow);
         sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
         desc->mEffect.clear();
         effect->setSuspended(false);
@@ -2983,6 +3017,20 @@
     return true;
 }
 
+// sendMetadata_l() must be called with thread->mutex() held
+void EffectChain::sendMetadata_l(const std::vector<playback_track_metadata_v7_t>& allMetadata,
+        const std::optional<const std::vector<playback_track_metadata_v7_t>> spatializedMetadata) {
+    audio_utils::lock_guard _l(mutex());
+    for (const auto& effect : mEffects) {
+        if (spatializedMetadata.has_value()
+                && IAfEffectModule::isSpatializer(&effect->desc().type)) {
+            effect->sendMetadata_ll(spatializedMetadata.value());
+        } else {
+            effect->sendMetadata_ll(allMetadata);
+        }
+    }
+}
+
 // EffectCallbackInterface implementation
 status_t EffectChain::EffectCallback::createEffectHal(
         const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t deviceId,
@@ -3196,8 +3244,9 @@
     t->setVolumeForOutput_l(left, right);
 }
 
-void EffectChain::EffectCallback::checkSuspendOnEffectEnabled(
-        const sp<IAfEffectBase>& effect, bool enabled, bool threadLocked) {
+void EffectChain::EffectCallback::checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect,
+                                                              bool enabled, bool threadLocked)
+        NO_THREAD_SAFETY_ANALYSIS {
     const sp<IAfThreadBase> t = thread().promote();
     if (t == nullptr) {
         return;
@@ -3209,7 +3258,7 @@
         return;
     }
     // in EffectChain context, an EffectBase is always from an EffectModule so static cast is safe
-    c->checkSuspendOnEffectEnabled(effect->asEffectModule(), enabled);
+    c->checkSuspendOnEffectEnabled_l(effect->asEffectModule(), enabled);
 }
 
 void EffectChain::EffectCallback::onEffectEnable(const sp<IAfEffectBase>& effect) {
@@ -3241,7 +3290,7 @@
     return true;
 }
 
-void EffectChain::EffectCallback::resetVolume() {
+void EffectChain::EffectCallback::resetVolume_l() {
     sp<IAfEffectChain> c = chain().promote();
     if (c == nullptr) {
         return;
@@ -3302,7 +3351,7 @@
     return status;
 }
 
-status_t DeviceEffectProxy::init(
+status_t DeviceEffectProxy::init_l(
         const std::map <audio_patch_handle_t, IAfPatchPanel::Patch>& patches) {
 //For all audio patches
 //If src or sink device match
@@ -3406,7 +3455,7 @@
             } else {
                 mHalEffect->setDevices({mDevice});
             }
-            mHalEffect->configure();
+            mHalEffect->configure_l();
         }
         *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
                                    mNotifyFramesProcessed);
@@ -3695,7 +3744,7 @@
     if (effect == nullptr) {
         return;
     }
-    effect->start();
+    effect->start_l();
 }
 
 void DeviceEffectProxy::ProxyCallback::onEffectDisable(
@@ -3704,7 +3753,7 @@
     if (effect == nullptr) {
         return;
     }
-    effect->stop();
+    effect->stop_l();
 }
 
 } // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 9208c88..46c44a6 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -78,11 +78,11 @@
                         { return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
                             == EFFECT_FLAG_VOLUME_MONITOR; }
 
-    status_t setEnabled(bool enabled, bool fromHandle) override;
-    status_t setEnabled_l(bool enabled) final;
+    status_t setEnabled(bool enabled, bool fromHandle) override EXCLUDES_EffectBase_Mutex;
+    status_t setEnabled_l(bool enabled) final REQUIRES(audio_utils::EffectBase_Mutex);
     bool isEnabled() const final;
-    void setSuspended(bool suspended) final;
-    bool suspended() const final;
+    void setSuspended(bool suspended) final EXCLUDES_EffectBase_Mutex;
+    bool suspended() const final EXCLUDES_EffectBase_Mutex;
 
     status_t command(int32_t __unused,
                              const std::vector<uint8_t>& __unused,
@@ -99,36 +99,40 @@
         return mCallback.load();
     }
 
-    status_t addHandle(IAfEffectHandle *handle) final;
-    ssize_t disconnectHandle(IAfEffectHandle *handle, bool unpinIfLast) final;
-    ssize_t removeHandle(IAfEffectHandle *handle) final;
-    ssize_t removeHandle_l(IAfEffectHandle *handle) final;
-    IAfEffectHandle* controlHandle_l() final;
-    bool purgeHandles() final;
+    status_t addHandle(IAfEffectHandle* handle) final EXCLUDES_EffectBase_Mutex;
+    ssize_t disconnectHandle(IAfEffectHandle* handle,
+                             bool unpinIfLast) final EXCLUDES_EffectBase_Mutex;
+    ssize_t removeHandle(IAfEffectHandle* handle) final EXCLUDES_EffectBase_Mutex;
+    ssize_t removeHandle_l(IAfEffectHandle* handle) final REQUIRES(audio_utils::EffectBase_Mutex);
+    IAfEffectHandle* controlHandle_l() final REQUIRES(audio_utils::EffectBase_Mutex);
+    bool purgeHandles() final EXCLUDES_EffectBase_Mutex;
 
-    void             checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) final;
+    void checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) final;
 
-    bool             isPinned() const final { return mPinned; }
-    void             unPin() final { mPinned = false; }
+    bool isPinned() const final { return mPinned; }
+    void unPin() final { mPinned = false; }
 
-    audio_utils::mutex& mutex() const final { return mMutex; }
+    audio_utils::mutex& mutex() const final
+            RETURN_CAPABILITY(android::audio_utils::EffectBase_Mutex) {
+        return mMutex;
+    }
 
-    status_t         updatePolicyState() final;
+    status_t updatePolicyState() final EXCLUDES_EffectBase_Mutex;
 
     sp<IAfEffectModule> asEffectModule() override { return nullptr; }
     sp<IAfDeviceEffectProxy> asDeviceEffectProxy() override { return nullptr; }
 
-    void             dump(int fd, const Vector<String16>& args) const override;
+    void dump(int fd, const Vector<String16>& args) const override;
 
 protected:
-    bool             isInternal_l() const {
-                         for (auto handle : mHandles) {
-                            if (handle->client() != nullptr) {
-                                return false;
-                            }
-                         }
-                         return true;
-                     }
+    bool isInternal_l() const REQUIRES(audio_utils::EffectBase_Mutex) {
+        for (auto handle : mHandles) {
+            if (handle->client() != nullptr) {
+                return false;
+            }
+        }
+        return true;
+    }
 
     bool             mPinned = false;
 
@@ -150,7 +154,10 @@
     // Audio policy effect state management
     // Mutex protecting transactions with audio policy manager as mutex() cannot
     // be held to avoid cross deadlocks with audio policy mutex
-    audio_utils::mutex& policyMutex() const { return mPolicyMutex; }
+    audio_utils::mutex& policyMutex() const
+            RETURN_CAPABILITY(android::audio_utils::EffectBase_PolicyMutex) {
+        return mPolicyMutex;
+    }
     mutable audio_utils::mutex mPolicyMutex{audio_utils::MutexOrder::kEffectBase_PolicyMutex};
     // Effect is registered in APM or not
     bool                      mPolicyRegistered = false;
@@ -175,25 +182,23 @@
                     int id,
                     audio_session_t sessionId,
                     bool pinned,
-                    audio_port_handle_t deviceId);
-    ~EffectModule() override;
+                    audio_port_handle_t deviceId) REQUIRES(audio_utils::EffectChain_Mutex);
+    ~EffectModule() override REQUIRES(audio_utils::EffectChain_Mutex);
 
-    void process() final;
-    bool updateState() final;
-    status_t command(int32_t cmdCode,
-                     const std::vector<uint8_t>& cmdData,
-                     int32_t maxReplySize,
-                     std::vector<uint8_t>* reply) final;
+    void process() final EXCLUDES_EffectBase_Mutex;
+    bool updateState_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t command(int32_t cmdCode, const std::vector<uint8_t>& cmdData, int32_t maxReplySize,
+                     std::vector<uint8_t>* reply) final EXCLUDES_EffectBase_Mutex;
 
-    void reset_l() final;
-    status_t configure() final;
-    status_t init() final;
+    void reset_l() final REQUIRES(audio_utils::EffectBase_Mutex);
+    status_t configure_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    status_t init_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
     uint32_t status() const final {
         return mStatus;
     }
     bool isProcessEnabled() const final;
-    bool isOffloadedOrDirect() const final;
-    bool isVolumeControlEnabled() const final;
+    bool isOffloadedOrDirect_l() const final REQUIRES(audio_utils::EffectChain_Mutex);
+    bool isVolumeControlEnabled_l() const final REQUIRES(audio_utils::EffectChain_Mutex);
     void setInBuffer(const sp<EffectBufferHalInterface>& buffer) final;
     int16_t *inBuffer() const final {
         return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
@@ -203,34 +208,42 @@
         return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
     }
     // Updates the access mode if it is out of date.  May issue a new effect configure.
-    void updateAccessMode() final {
-                    if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
-                        configure();
-                    }
-                }
-    status_t setDevices(const AudioDeviceTypeAddrVector &devices) final;
-    status_t setInputDevice(const AudioDeviceTypeAddr &device) final;
+    void updateAccessMode_l() final REQUIRES(audio_utils::EffectChain_Mutex) {
+        if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
+            configure_l();
+        }
+    }
+    status_t setDevices(const AudioDeviceTypeAddrVector& devices) final EXCLUDES_EffectBase_Mutex;
+    status_t setInputDevice(const AudioDeviceTypeAddr& device) final EXCLUDES_EffectBase_Mutex;
     status_t setVolume(uint32_t *left, uint32_t *right, bool controller) final;
-    status_t setMode(audio_mode_t mode) final;
-    status_t setAudioSource(audio_source_t source) final;
-    status_t start() final;
-    status_t stop() final;
+    status_t setMode(audio_mode_t mode) final EXCLUDES_EffectBase_Mutex;
+    status_t setAudioSource(audio_source_t source) final EXCLUDES_EffectBase_Mutex;
+    status_t start_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t stop_l() final REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
 
-    status_t setOffloaded(bool offloaded, audio_io_handle_t io) final;
-    bool isOffloaded() const final;
-    void addEffectToHal_l() final;
-    void release_l() final;
+    status_t setOffloaded_l(bool offloaded, audio_io_handle_t io) final
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    bool isOffloaded_l() const final
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
+    void addEffectToHal_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    void release_l() final REQUIRES(audio_utils::EffectChain_Mutex);
 
     sp<IAfEffectModule> asEffectModule() final { return this; }
 
     bool isHapticGenerator() const final;
+    bool isSpatializer() const final;
 
-    status_t setHapticIntensity(int id, os::HapticScale intensity) final;
-    status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo) final;
+    status_t setHapticScale_l(int id, os::HapticScale hapticScale) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex;
+    status_t sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata) final
+            REQUIRES(audio_utils::ThreadBase_Mutex,
+                     audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex;
 
-    status_t getConfigs(audio_config_base_t* inputCfg,
-                                audio_config_base_t* outputCfg,
-                                bool* isOutput) const final;
+    status_t getConfigs_l(audio_config_base_t* inputCfg, audio_config_base_t* outputCfg,
+                          bool* isOutput) const final
+            REQUIRES(audio_utils::EffectHandle_Mutex) EXCLUDES_EffectBase_Mutex;
 
     void dump(int fd, const Vector<String16>& args) const final;
 
@@ -241,9 +254,9 @@
 
     DISALLOW_COPY_AND_ASSIGN(EffectModule);
 
-    status_t start_l();
-    status_t stop_l();
-    status_t removeEffectFromHal_l();
+    status_t start_ll() REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
+    status_t stop_ll() REQUIRES(audio_utils::EffectChain_Mutex, audio_utils::EffectBase_Mutex);
+    status_t removeEffectFromHal_l() REQUIRES(audio_utils::EffectChain_Mutex);
     status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
     effect_buffer_access_e requiredEffectBufferAccessMode() const {
         return mConfig.inputCfg.buffer.raw == mConfig.outputCfg.buffer.raw
@@ -366,7 +379,9 @@
 private:
     DISALLOW_COPY_AND_ASSIGN(EffectHandle);
 
-    audio_utils::mutex& mutex() const { return mMutex; }
+    audio_utils::mutex& mutex() const RETURN_CAPABILITY(android::audio_utils::EffectHandle_Mutex) {
+        return mMutex;
+    }
     // protects IEffect method calls
     mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kEffectHandle_Mutex};
     const wp<IAfEffectBase> mEffect;               // pointer to controlled EffectModule
@@ -399,34 +414,43 @@
 public:
     EffectChain(const sp<IAfThreadBase>& thread, audio_session_t sessionId);
 
-    void process_l() final;
+    void process_l() final REQUIRES(audio_utils::EffectChain_Mutex);
 
-    audio_utils::mutex& mutex() const final { return mMutex; }
+    audio_utils::mutex& mutex() const final RETURN_CAPABILITY(audio_utils::EffectChain_Mutex) {
+        return mMutex;
+    }
 
-    status_t createEffect_l(sp<IAfEffectModule>& effect,
-                            effect_descriptor_t *desc,
-                            int id,
-                            audio_session_t sessionId,
-                            bool pinned) final;
-    status_t addEffect_l(const sp<IAfEffectModule>& handle) final;
-    status_t addEffect_ll(const sp<IAfEffectModule>& handle) final;
-    size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) final;
+    status_t createEffect_l(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
+                            audio_session_t sessionId, bool pinned) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+    status_t addEffect_l(const sp<IAfEffectModule>& handle) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
+    status_t addEffect_ll(const sp<IAfEffectModule>& handle) final
+            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
+    size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
     audio_session_t sessionId() const final { return mSessionId; }
     void setSessionId(audio_session_t sessionId) final { mSessionId = sessionId; }
 
-    sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor) const final;
-    sp<IAfEffectModule> getEffectFromId_l(int id) const final;
-    sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t *type) const final;
-    std::vector<int> getEffectIds() const final;
+    sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t* descriptor) const final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    sp<IAfEffectModule> getEffectFromId_l(int id) const final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t* type) const final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    std::vector<int> getEffectIds_l() const final REQUIRES(audio_utils::ThreadBase_Mutex);
     // FIXME use float to improve the dynamic range
 
-    bool setVolume_l(uint32_t *left, uint32_t *right, bool force = false) final;
-    void resetVolume_l() final;
-    void setDevices_l(const AudioDeviceTypeAddrVector &devices) final;
-    void setInputDevice_l(const AudioDeviceTypeAddr &device) final;
-    void setMode_l(audio_mode_t mode) final;
-    void setAudioSource_l(audio_source_t source) final;
+    bool setVolume(uint32_t* left, uint32_t* right,
+                   bool force = false) final EXCLUDES_EffectChain_Mutex;
+    void resetVolume_l() final REQUIRES(audio_utils::EffectChain_Mutex);
+    void setDevices_l(const AudioDeviceTypeAddrVector& devices) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    void setInputDevice_l(const AudioDeviceTypeAddr& device) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
+    void setMode_l(audio_mode_t mode) final REQUIRES(audio_utils::ThreadBase_Mutex);
+    void setAudioSource_l(audio_source_t source) final REQUIRES(audio_utils::ThreadBase_Mutex);
 
     void setInBuffer(const sp<EffectBufferHalInterface>& buffer) final {
         mInBuffer = buffer;
@@ -457,21 +481,22 @@
 
     // suspend or restore effects of the specified type. The number of suspend requests is counted
     // and restore occurs once all suspend requests are cancelled.
-    void setEffectSuspended_l(const effect_uuid_t *type,
-                              bool suspend) final;
+    void setEffectSuspended_l(const effect_uuid_t* type, bool suspend) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
     // suspend all eligible effects
-    void setEffectSuspendedAll_l(bool suspend) final;
+    void setEffectSuspendedAll_l(bool suspend) final REQUIRES(audio_utils::ThreadBase_Mutex);
     // check if effects should be suspended or restored when a given effect is enable or disabled
-    void checkSuspendOnEffectEnabled(
-            const sp<IAfEffectModule>& effect, bool enabled) final;
+    void checkSuspendOnEffectEnabled_l(const sp<IAfEffectModule>& effect, bool enabled) final
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
-    void clearInputBuffer() final;
+    void clearInputBuffer() final EXCLUDES_EffectChain_Mutex;
 
     // At least one non offloadable effect in the chain is enabled
-    bool isNonOffloadableEnabled() const final;
-    bool isNonOffloadableEnabled_l() const final;
+    bool isNonOffloadableEnabled() const final EXCLUDES_EffectChain_Mutex;
+    bool isNonOffloadableEnabled_l() const final REQUIRES(audio_utils::EffectChain_Mutex);
 
-    void syncHalEffectsState() final;
+    void syncHalEffectsState_l()
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex final;
 
     // flags is an ORed set of audio_output_flags_t which is updated on return.
     void checkOutputFlagCompatibility(audio_output_flags_t *flags) const final;
@@ -490,12 +515,13 @@
 
     // isCompatibleWithThread_l() must be called with thread->mutex() held
     bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const final
-            REQUIRES(audio_utils::ThreadBase_Mutex);
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
     // Requires either IAfThreadBase::mutex() or EffectChain::mutex() held
     bool containsHapticGeneratingEffect_l() final;
 
-    void setHapticIntensity_l(int id, os::HapticScale intensity) final;
+    void setHapticScale_l(int id, os::HapticScale hapticScale) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex;
 
     sp<EffectCallbackInterface> effectCallback() const final { return mEffectCallback; }
 
@@ -513,9 +539,15 @@
         return mEffects[index];
     }
 
-    void setThread(const sp<IAfThreadBase>& thread) final;
+    void sendMetadata_l(const std::vector<playback_track_metadata_v7_t>& allMetadata,
+        const std::optional<const std::vector<playback_track_metadata_v7_t>> spatializedMetadata)
+            final REQUIRES(audio_utils::ThreadBase_Mutex);
 
-private:
+    void setThread(const sp<IAfThreadBase>& thread) final EXCLUDES_EffectChain_Mutex;
+
+  private:
+    bool setVolume_l(uint32_t* left, uint32_t* right, bool force = false)
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
     // For transaction consistency, please consider holding the EffectChain lock before
     // calling the EffectChain::EffectCallback methods, excepting
@@ -562,9 +594,10 @@
         void setVolumeForOutput(float left, float right) const override;
 
         // check if effects should be suspended/restored when a given effect is enable/disabled
-        void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect,
-                              bool enabled, bool threadLocked) override;
-        void resetVolume() override;
+        void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect, bool enabled,
+                                         bool threadLocked) override;
+        void resetVolume_l() override
+                REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
         product_strategy_t strategy() const override;
         int32_t activeTrackCnt() const override;
         void onEffectEnable(const sp<IAfEffectBase>& effect) override;
@@ -604,27 +637,34 @@
 
     // get a list of effect modules to suspend when an effect of the type
     // passed is enabled.
-    void  getSuspendEligibleEffects(Vector<sp<IAfEffectModule>> &effects);
+    void getSuspendEligibleEffects_l(Vector<sp<IAfEffectModule>>& effects)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
     // get an effect module if it is currently enable
-    sp<IAfEffectModule> getEffectIfEnabled(const effect_uuid_t *type);
+    sp<IAfEffectModule> getEffectIfEnabled_l(const effect_uuid_t* type)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
     // true if the effect whose descriptor is passed can be suspended
     // OEMs can modify the rules implemented in this method to exclude specific effect
     // types or implementations from the suspend/restore mechanism.
-    bool isEffectEligibleForSuspend(const effect_descriptor_t& desc);
+    bool isEffectEligibleForSuspend_l(const effect_descriptor_t& desc)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
-    static bool isEffectEligibleForBtNrecSuspend(const effect_uuid_t *type);
+    static bool isEffectEligibleForBtNrecSuspend_l(const effect_uuid_t* type)
+            REQUIRES(audio_utils::ThreadBase_Mutex);
 
-    void clearInputBuffer_l();
+    void clearInputBuffer_l() REQUIRES(audio_utils::EffectChain_Mutex);
 
     // true if any effect module within the chain has volume control
-    bool hasVolumeControlEnabled_l() const;
+    bool hasVolumeControlEnabled_l() const REQUIRES(audio_utils::EffectChain_Mutex);
 
-    void setVolumeForOutput_l(uint32_t left, uint32_t right);
+    void setVolumeForOutput_l(uint32_t left, uint32_t right)
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
-    ssize_t getInsertIndex(const effect_descriptor_t& desc);
+    ssize_t getInsertIndex_ll(const effect_descriptor_t& desc)
+            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex);
 
-    std::optional<size_t> findVolumeControl_l(size_t from, size_t to) const;
+    std::optional<size_t> findVolumeControl_l(size_t from, size_t to) const
+            REQUIRES(audio_utils::EffectChain_Mutex);
 
     // mutex protecting effect list
     mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kEffectChain_Mutex};
@@ -668,11 +708,11 @@
     status_t setEnabled(bool enabled, bool fromHandle) final;
     sp<IAfDeviceEffectProxy> asDeviceEffectProxy() final { return this; }
 
-    status_t init(const std::map<audio_patch_handle_t,
-            IAfPatchPanel::Patch>& patches) final;
+    status_t init_l(const std::map<audio_patch_handle_t, IAfPatchPanel::Patch>& patches) final
+            REQUIRES(audio_utils::DeviceEffectManager_Mutex) EXCLUDES_EffectBase_Mutex;
 
     status_t onCreatePatch(audio_patch_handle_t patchHandle,
-            const IAfPatchPanel::Patch& patch) final;
+                           const IAfPatchPanel::Patch& patch) final;
 
     status_t onUpdatePatch(audio_patch_handle_t oldPatchHandle, audio_patch_handle_t newPatchHandle,
            const IAfPatchPanel::Patch& patch) final;
@@ -690,10 +730,8 @@
     audio_channel_mask_t channelMask() const final;
     uint32_t channelCount() const final;
 
-    status_t command(int32_t cmdCode,
-                     const std::vector<uint8_t>& cmdData,
-                     int32_t maxReplySize,
-                     std::vector<uint8_t>* reply) final;
+    status_t command(int32_t cmdCode, const std::vector<uint8_t>& cmdData, int32_t maxReplySize,
+                     std::vector<uint8_t>* reply) final EXCLUDES_DeviceEffectProxy_ProxyMutex;
 
     void dump2(int fd, int spaces) const final;
 
@@ -739,7 +777,7 @@
 
         void checkSuspendOnEffectEnabled(const sp<IAfEffectBase>& effect __unused,
                               bool enabled __unused, bool threadLocked __unused) override {}
-        void resetVolume() override {}
+        void resetVolume_l() override REQUIRES(audio_utils::EffectChain_Mutex) {}
         product_strategy_t strategy() const override  { return static_cast<product_strategy_t>(0); }
         int32_t activeTrackCnt() const override { return 0; }
         void onEffectEnable(const sp<IAfEffectBase>& effect __unused) override;
@@ -759,13 +797,16 @@
     };
 
     status_t checkPort(const IAfPatchPanel::Patch& patch,
-            const struct audio_port_config *port, sp<IAfEffectHandle> *handle);
+            const struct audio_port_config* port, sp<IAfEffectHandle>* handle);
 
     const AudioDeviceTypeAddr mDevice;
     const sp<DeviceEffectManagerCallback> mManagerCallback;
     const sp<ProxyCallback> mMyCallback;
 
-    audio_utils::mutex& proxyMutex() const { return mProxyMutex; }
+    audio_utils::mutex& proxyMutex() const
+            RETURN_CAPABILITY(android::audio_utils::DeviceEffectProxy_ProxyMutex) {
+        return mProxyMutex;
+    }
     mutable audio_utils::mutex mProxyMutex{
             audio_utils::MutexOrder::kDeviceEffectProxy_ProxyMutex};
     std::map<audio_patch_handle_t, sp<IAfEffectHandle>> mEffectHandles; // protected by mProxyMutex
diff --git a/services/audioflinger/IAfEffect.h b/services/audioflinger/IAfEffect.h
index 8c5bc4b..fd4dd62 100644
--- a/services/audioflinger/IAfEffect.h
+++ b/services/audioflinger/IAfEffect.h
@@ -80,7 +80,7 @@
     // Methods usually implemented with help from EffectChain: pay attention to mutex locking order
     virtual product_strategy_t strategy() const = 0;
     virtual int32_t activeTrackCnt() const = 0;
-    virtual void resetVolume() = 0;
+    virtual void resetVolume_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual wp<IAfEffectChain> chain() const = 0;
     virtual bool isAudioPolicyReady() const = 0;
 };
@@ -106,43 +106,45 @@
     virtual bool isOffloadable() const = 0;
     virtual bool isImplementationSoftware() const = 0;
     virtual bool isProcessImplemented() const = 0;
-    virtual bool isVolumeControl() const = 0;
+    virtual bool isVolumeControl() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
     virtual bool isVolumeMonitor() const = 0;
     virtual bool isEnabled() const = 0;
     virtual bool isPinned() const = 0;
     virtual void unPin() = 0;
-    virtual status_t updatePolicyState() = 0;
-    virtual bool purgeHandles() = 0;
+    virtual status_t updatePolicyState() EXCLUDES_EffectBase_Mutex = 0;
+    virtual bool purgeHandles() EXCLUDES_EffectBase_Mutex = 0;
     virtual void checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) = 0;
 
     // mCallback is atomic so this can be lock-free.
     virtual void setCallback(const sp<EffectCallbackInterface>& callback) = 0;
     virtual sp<EffectCallbackInterface> getCallback() const = 0;
 
-    virtual status_t addHandle(IAfEffectHandle *handle) = 0;
-    virtual ssize_t removeHandle(IAfEffectHandle *handle) = 0;
+    virtual status_t addHandle(IAfEffectHandle* handle) EXCLUDES_EffectBase_Mutex = 0;
+    virtual ssize_t removeHandle(IAfEffectHandle* handle) EXCLUDES_EffectBase_Mutex = 0;
 
     virtual sp<IAfEffectModule> asEffectModule() = 0;
     virtual sp<IAfDeviceEffectProxy> asDeviceEffectProxy() = 0;
 
-    virtual status_t command(int32_t cmdCode,
-            const std::vector<uint8_t>& cmdData,
-            int32_t maxReplySize,
-            std::vector<uint8_t>* reply) = 0;
+    virtual status_t command(int32_t cmdCode, const std::vector<uint8_t>& cmdData,
+                             int32_t maxReplySize, std::vector<uint8_t>* reply)
+            EXCLUDES(audio_utils::EffectBase_Mutex) = 0;
 
     virtual void dump(int fd, const Vector<String16>& args) const = 0;
 
 private:
-    virtual status_t setEnabled(bool enabled, bool fromHandle) = 0;
-    virtual status_t setEnabled_l(bool enabled) = 0;
-    virtual void setSuspended(bool suspended) = 0;
-    virtual bool suspended() const = 0;
+    virtual status_t setEnabled(bool enabled, bool fromHandle) EXCLUDES_EffectBase_Mutex = 0;
+    virtual status_t setEnabled_l(bool enabled) REQUIRES(audio_utils::EffectBase_Mutex) = 0;
+    virtual void setSuspended(bool suspended) EXCLUDES_EffectBase_Mutex = 0;
+    virtual bool suspended() const EXCLUDES_EffectBase_Mutex = 0;
 
-    virtual ssize_t disconnectHandle(IAfEffectHandle *handle, bool unpinIfLast) = 0;
-    virtual ssize_t removeHandle_l(IAfEffectHandle *handle) = 0;
-    virtual IAfEffectHandle* controlHandle_l() = 0;
+    virtual ssize_t disconnectHandle(IAfEffectHandle* handle,
+                                     bool unpinIfLast) EXCLUDES_EffectBase_Mutex = 0;
+    virtual ssize_t removeHandle_l(IAfEffectHandle* handle)
+            REQUIRES(audio_utils::EffectBase_Mutex) = 0;
+    virtual IAfEffectHandle* controlHandle_l() REQUIRES(audio_utils::EffectBase_Mutex) = 0;
 
-    virtual audio_utils::mutex& mutex() const = 0;
+    virtual audio_utils::mutex& mutex() const
+            RETURN_CAPABILITY(android::audio_utils::EffectBase_Mutex) = 0;
 };
 
 class IAfEffectModule : public virtual IAfEffectBase {
@@ -162,41 +164,51 @@
     virtual status_t setDevices(const AudioDeviceTypeAddrVector &devices) = 0;
     virtual status_t setInputDevice(const AudioDeviceTypeAddr &device) = 0;
     virtual status_t setVolume(uint32_t *left, uint32_t *right, bool controller) = 0;
-    virtual status_t setOffloaded(bool offloaded, audio_io_handle_t io) = 0;
-    virtual bool isOffloaded() const = 0;
+    virtual status_t setOffloaded_l(bool offloaded, audio_io_handle_t io) = 0;
+    virtual bool isOffloaded_l() const = 0;
 
     virtual status_t setAudioSource(audio_source_t source) = 0;
     virtual status_t setMode(audio_mode_t mode) = 0;
 
-    virtual status_t start() = 0;
-    virtual status_t getConfigs(audio_config_base_t* inputCfg,
-            audio_config_base_t* outputCfg,
-            bool* isOutput) const = 0;
+    virtual status_t start_l() = 0;
+    virtual status_t getConfigs_l(audio_config_base_t* inputCfg, audio_config_base_t* outputCfg,
+                                  bool* isOutput) const
+            REQUIRES(audio_utils::EffectHandle_Mutex) EXCLUDES_EffectBase_Mutex = 0;
 
     static bool isHapticGenerator(const effect_uuid_t* type);
     virtual bool isHapticGenerator() const = 0;
-    virtual status_t setHapticIntensity(int id, os::HapticScale intensity) = 0;
-    virtual status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo) = 0;
+    static bool isSpatializer(const effect_uuid_t* type);
+    virtual bool isSpatializer() const = 0;
+
+    virtual status_t setHapticScale_l(int id, os::HapticScale hapticScale)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    virtual status_t setVibratorInfo_l(const media::AudioVibratorInfo& vibratorInfo)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    virtual status_t sendMetadata_ll(const std::vector<playback_track_metadata_v7_t>& metadata)
+            REQUIRES(audio_utils::ThreadBase_Mutex,
+                     audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
 
 private:
     virtual void process() = 0;
-    virtual bool updateState() = 0;
-    virtual void reset_l() = 0;
-    virtual status_t configure() = 0;
-    virtual status_t init() = 0;
+    virtual bool updateState_l()
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
+    virtual void reset_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual status_t configure_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual status_t init_l()
+            REQUIRES(audio_utils::EffectChain_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual uint32_t status() const = 0;
     virtual bool isProcessEnabled() const = 0;
-    virtual bool isOffloadedOrDirect() const = 0;
-    virtual bool isVolumeControlEnabled() const = 0;
+    virtual bool isOffloadedOrDirect_l() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual bool isVolumeControlEnabled_l() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
     virtual void setInBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
     virtual void setOutBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
     virtual int16_t *outBuffer() const = 0;
 
     // Updates the access mode if it is out of date.  May issue a new effect configure.
-    virtual void updateAccessMode() = 0;
+    virtual void updateAccessMode_l() = 0;
 
-    virtual status_t stop() = 0;
+    virtual status_t stop_l() = 0;
     virtual void addEffectToHal_l() = 0;
     virtual void release_l() = 0;
 };
@@ -216,33 +228,41 @@
     // a session is stopped or removed to allow effect tail to be rendered
     static constexpr int kProcessTailDurationMs = 1000;
 
-    virtual void process_l() = 0;
+    virtual void process_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual audio_utils::mutex& mutex() const = 0;
+    virtual audio_utils::mutex& mutex() const RETURN_CAPABILITY(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual status_t createEffect_l(sp<IAfEffectModule>& effect,
-                            effect_descriptor_t *desc,
-                            int id,
-                            audio_session_t sessionId,
-                            bool pinned) = 0;
+    virtual status_t createEffect_l(sp<IAfEffectModule>& effect, effect_descriptor_t* desc, int id,
+                                    audio_session_t sessionId, bool pinned)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
-    virtual status_t addEffect_l(const sp<IAfEffectModule>& handle) = 0;
-    virtual status_t addEffect_ll(const sp<IAfEffectModule>& handle) = 0;
-    virtual size_t removeEffect_l(const sp<IAfEffectModule>& handle, bool release = false) = 0;
+    virtual status_t addEffect_l(const sp<IAfEffectModule>& handle)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
+    virtual status_t addEffect_ll(const sp<IAfEffectModule>& handle)
+            REQUIRES(audio_utils::ThreadBase_Mutex, audio_utils::EffectChain_Mutex) = 0;
+    virtual size_t removeEffect_l(const sp<IAfEffectModule>& handle,
+                                  bool release = false) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual audio_session_t sessionId() const = 0;
     virtual void setSessionId(audio_session_t sessionId) = 0;
 
-    virtual sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor) const = 0;
-    virtual sp<IAfEffectModule> getEffectFromId_l(int id) const = 0;
-    virtual sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t *type) const = 0;
-    virtual std::vector<int> getEffectIds() const = 0;
-    virtual bool setVolume_l(uint32_t *left, uint32_t *right, bool force = false) = 0;
-    virtual void resetVolume_l() = 0;
-    virtual void setDevices_l(const AudioDeviceTypeAddrVector &devices) = 0;
-    virtual void setInputDevice_l(const AudioDeviceTypeAddr &device) = 0;
-    virtual void setMode_l(audio_mode_t mode) = 0;
-    virtual void setAudioSource_l(audio_source_t source) = 0;
+    virtual sp<IAfEffectModule> getEffectFromDesc_l(effect_descriptor_t* descriptor) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual sp<IAfEffectModule> getEffectFromId_l(int id) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual sp<IAfEffectModule> getEffectFromType_l(const effect_uuid_t* type) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual std::vector<int> getEffectIds_l() const = 0;
+    virtual bool setVolume(uint32_t* left, uint32_t* right,
+                           bool force = false) EXCLUDES_EffectChain_Mutex = 0;
+    virtual void resetVolume_l() REQUIRES(audio_utils::EffectChain_Mutex) = 0;
+    virtual void setDevices_l(const AudioDeviceTypeAddrVector& devices)
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual void setInputDevice_l(const AudioDeviceTypeAddr& device)
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual void setMode_l(audio_mode_t mode) REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
+    virtual void setAudioSource_l(audio_source_t source)
+            REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
 
     virtual void setInBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
     virtual float *inBuffer() const = 0;
@@ -262,20 +282,21 @@
 
     // suspend or restore effects of the specified type. The number of suspend requests is counted
     // and restore occurs once all suspend requests are cancelled.
-    virtual void setEffectSuspended_l(
-            const effect_uuid_t *type, bool suspend) = 0;
+    virtual void setEffectSuspended_l(const effect_uuid_t* type, bool suspend) = 0;
     // suspend all eligible effects
     virtual void setEffectSuspendedAll_l(bool suspend) = 0;
     // check if effects should be suspended or restored when a given effect is enable or disabled
-    virtual void checkSuspendOnEffectEnabled(const sp<IAfEffectModule>& effect, bool enabled) = 0;
+    virtual void checkSuspendOnEffectEnabled_l(const sp<IAfEffectModule>& effect, bool enabled)
+            REQUIRES(audio_utils::ThreadBase_Mutex) REQUIRES(audio_utils::ThreadBase_Mutex) = 0;
 
-    virtual void clearInputBuffer() = 0;
+    virtual void clearInputBuffer() EXCLUDES_EffectChain_Mutex = 0;
 
     // At least one non offloadable effect in the chain is enabled
-    virtual bool isNonOffloadableEnabled() const = 0;
-    virtual bool isNonOffloadableEnabled_l() const = 0;
+    virtual bool isNonOffloadableEnabled() const EXCLUDES_EffectChain_Mutex = 0;
+    virtual bool isNonOffloadableEnabled_l() const REQUIRES(audio_utils::EffectChain_Mutex) = 0;
 
-    virtual void syncHalEffectsState() = 0;
+    virtual void syncHalEffectsState_l()
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
     // flags is an ORed set of audio_output_flags_t which is updated on return.
     virtual void checkOutputFlagCompatibility(audio_output_flags_t *flags) const = 0;
@@ -293,22 +314,28 @@
     virtual bool isBitPerfectCompatible() const = 0;
 
     // isCompatibleWithThread_l() must be called with thread->mLock held
-    virtual bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const = 0;
+    virtual bool isCompatibleWithThread_l(const sp<IAfThreadBase>& thread) const
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual bool containsHapticGeneratingEffect_l() = 0;
 
-    virtual void setHapticIntensity_l(int id, os::HapticScale intensity) = 0;
+    virtual void setHapticScale_l(int id, os::HapticScale hapticScale)
+            REQUIRES(audio_utils::ThreadBase_Mutex) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual sp<EffectCallbackInterface> effectCallback() const = 0;
 
     virtual wp<IAfThreadBase> thread() const = 0;
-    virtual void setThread(const sp<IAfThreadBase>& thread) = 0;
+    virtual void setThread(const sp<IAfThreadBase>& thread) EXCLUDES_EffectChain_Mutex = 0;
 
     virtual bool isFirstEffect(int id) const = 0;
 
     virtual size_t numberOfEffects() const = 0;
     virtual sp<IAfEffectModule> getEffectModule(size_t index) const = 0;
 
+    // sendMetadata_l() must be called with thread->mLock held
+    virtual void sendMetadata_l(const std::vector<playback_track_metadata_v7_t>& allMetadata,
+        const std::optional<const std::vector<playback_track_metadata_v7_t>> spatializedMetadata);
+
     virtual void dump(int fd, const Vector<String16>& args) const = 0;
 };
 
@@ -352,9 +379,8 @@
                 const sp<DeviceEffectManagerCallback>& callback,
                 effect_descriptor_t *desc, int id, bool notifyFramesProcessed);
 
-    virtual status_t init(
-            const std::map<audio_patch_handle_t,
-            IAfPatchPanel::Patch>& patches) = 0;
+    virtual status_t init_l(const std::map<audio_patch_handle_t, IAfPatchPanel::Patch>& patches)
+            REQUIRES(audio_utils::DeviceEffectManager_Mutex) EXCLUDES_EffectBase_Mutex = 0;
     virtual const AudioDeviceTypeAddr& device() const = 0;
 
     virtual status_t onCreatePatch(
diff --git a/services/audioflinger/IAfThread.h b/services/audioflinger/IAfThread.h
index 7084be9..d701288 100644
--- a/services/audioflinger/IAfThread.h
+++ b/services/audioflinger/IAfThread.h
@@ -279,7 +279,7 @@
     // integrity of the chains during the process.
     // Also sets the parameter 'effectChains' to current value of mEffectChains.
     virtual void lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains)
-            REQUIRES(mutex()) = 0;
+            REQUIRES(mutex()) EXCLUDES_EffectChain_Mutex = 0;
     // unlock effect chains after process
     virtual void unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains)
             EXCLUDES_ThreadBase_Mutex = 0;
@@ -386,6 +386,12 @@
             const effect_uuid_t* type, bool suspend, audio_session_t sessionId)
             REQUIRES(mutex()) = 0;
 
+    // Wait while the Thread is busy.  This is done to ensure that
+    // the Thread is not busy releasing the Tracks, during which the Thread mutex
+    // may be temporarily unlocked.  Some Track methods will use this method to
+    // avoid races.
+    virtual void waitWhileThreadBusy_l(audio_utils::unique_lock& ul)
+            REQUIRES(mutex()) = 0;
     // Dynamic cast to derived interface
     virtual sp<IAfDirectOutputThread> asIAfDirectOutputThread() { return nullptr; }
     virtual sp<IAfDuplicatingThread> asIAfDuplicatingThread() { return nullptr; }
diff --git a/services/audioflinger/IAfTrack.h b/services/audioflinger/IAfTrack.h
index 2302e13..8ed44c6 100644
--- a/services/audioflinger/IAfTrack.h
+++ b/services/audioflinger/IAfTrack.h
@@ -18,6 +18,7 @@
 
 #include <android/media/BnAudioRecord.h>
 #include <android/media/BnAudioTrack.h>
+#include <audio_utils/mutex.h>
 #include <audiomanager/IAudioManager.h>
 #include <binder/IMemory.h>
 #include <fastpath/FastMixerDumpState.h>
@@ -338,12 +339,12 @@
     /** Set haptic playback of the track is enabled or not, should be
      * set after query or get callback from vibrator service */
     virtual void setHapticPlaybackEnabled(bool hapticPlaybackEnabled) = 0;
-    /** Return at what intensity to play haptics, used in mixer. */
-    virtual os::HapticScale getHapticIntensity() const = 0;
+    /** Return the haptics scale, used in mixer. */
+    virtual os::HapticScale getHapticScale() const = 0;
     /** Return the maximum amplitude allowed for haptics data, used in mixer. */
     virtual float getHapticMaxAmplitude() const = 0;
-    /** Set intensity of haptic playback, should be set after querying vibrator service. */
-    virtual void setHapticIntensity(os::HapticScale hapticIntensity) = 0;
+    /** Set scale for haptic playback, should be set after querying vibrator service. */
+    virtual void setHapticScale(os::HapticScale hapticScale) = 0;
     /** Set maximum amplitude allowed for haptic data, should be set after querying
      *  vibrator service.
      */
@@ -351,7 +352,8 @@
     virtual sp<os::ExternalVibration> getExternalVibration() const = 0;
 
     // This function should be called with holding thread lock.
-    virtual void updateTeePatches_l() = 0;
+    virtual void updateTeePatches_l() REQUIRES(audio_utils::ThreadBase_Mutex)
+            EXCLUDES_BELOW_ThreadBase_Mutex = 0;
 
     // Argument teePatchesToUpdate is by value, use std::move to optimize.
     virtual void setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) = 0;
diff --git a/services/audioflinger/MelReporter.cpp b/services/audioflinger/MelReporter.cpp
index 41c5096..1d38306 100644
--- a/services/audioflinger/MelReporter.cpp
+++ b/services/audioflinger/MelReporter.cpp
@@ -307,6 +307,22 @@
 
 }
 
+void MelReporter::applyAllAudioPatches() {
+    ALOGV("%s", __func__);
+
+    std::vector<IAfPatchPanel::Patch> patchesCopy;
+    {
+        audio_utils::lock_guard _laf(mAfMelReporterCallback->mutex());
+        for (const auto& patch : mAfPatchPanel->patches_l()) {
+            patchesCopy.emplace_back(patch.second);
+        }
+    }
+
+    for (const auto& patch : patchesCopy) {
+        onCreateAudioPatch(patch.mHalHandle, patch);
+    }
+}
+
 std::optional<audio_patch_handle_t> MelReporter::activePatchStreamHandle_l(
         audio_io_handle_t streamHandle) {
     for(const auto& patchIt : mActiveMelPatches) {
diff --git a/services/audioflinger/MelReporter.h b/services/audioflinger/MelReporter.h
index 235dd11..0aeb225 100644
--- a/services/audioflinger/MelReporter.h
+++ b/services/audioflinger/MelReporter.h
@@ -27,8 +27,6 @@
 
 namespace android {
 
-constexpr static int kMaxTimestampDeltaInSec = 120;
-
 class IAfMelReporterCallback : public virtual RefBase {
 public:
     virtual audio_utils::mutex& mutex() const
@@ -45,8 +43,10 @@
 class MelReporter : public PatchCommandThread::PatchCommandListener,
                     public IMelReporterCallback {
 public:
-    explicit MelReporter(const sp<IAfMelReporterCallback>& afMelReporterCallback)
-        : mAfMelReporterCallback(afMelReporterCallback) {}
+    MelReporter(const sp<IAfMelReporterCallback>& afMelReporterCallback,
+                const sp<IAfPatchPanel>& afPatchPanel)
+        : mAfMelReporterCallback(afMelReporterCallback),
+          mAfPatchPanel(afPatchPanel) {}
 
     void onFirstRef() override;
 
@@ -80,9 +80,10 @@
 
     // IMelReporterCallback methods
     void stopMelComputationForDeviceId(audio_port_handle_t deviceId) final
-            EXCLUDES_MelReporter_Mutex;
+            EXCLUDES_AudioFlinger_Mutex EXCLUDES_MelReporter_Mutex;
     void startMelComputationForDeviceId(audio_port_handle_t deviceId) final
-            EXCLUDES_MelReporter_Mutex;
+            EXCLUDES_AudioFlinger_Mutex EXCLUDES_MelReporter_Mutex;
+    void applyAllAudioPatches() final EXCLUDES_AudioFlinger_Mutex EXCLUDES_MelReporter_Mutex;
 
     // PatchCommandListener methods
     void onCreateAudioPatch(audio_patch_handle_t handle,
@@ -131,6 +132,7 @@
     bool useHalSoundDoseInterface_l() REQUIRES(mutex());
 
     const sp<IAfMelReporterCallback> mAfMelReporterCallback;
+    const sp<IAfPatchPanel> mAfPatchPanel;
 
     /* const */ sp<SoundDoseManager> mSoundDoseManager;  // set onFirstRef
 
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index b4cb805..6c22e21 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -174,15 +174,15 @@
     void setHapticPlaybackEnabled(bool hapticPlaybackEnabled) final {
                 mHapticPlaybackEnabled = hapticPlaybackEnabled;
             }
-            /** Return at what intensity to play haptics, used in mixer. */
-    os::HapticScale getHapticIntensity() const final { return mHapticIntensity; }
+            /** Return the haptics scale, used in mixer. */
+    os::HapticScale getHapticScale() const final { return mHapticScale; }
             /** Return the maximum amplitude allowed for haptics data, used in mixer. */
     float getHapticMaxAmplitude() const final { return mHapticMaxAmplitude; }
             /** Set intensity of haptic playback, should be set after querying vibrator service. */
-    void setHapticIntensity(os::HapticScale hapticIntensity) final {
-                if (os::isValidHapticScale(hapticIntensity)) {
-                    mHapticIntensity = hapticIntensity;
-                    setHapticPlaybackEnabled(mHapticIntensity != os::HapticScale::MUTE);
+    void setHapticScale(os::HapticScale hapticScale) final {
+                if (os::isValidHapticScale(hapticScale)) {
+                    mHapticScale = hapticScale;
+                    setHapticPlaybackEnabled(!mHapticScale.isScaleMute());
                 }
             }
             /** Set maximum amplitude allowed for haptic data, should be set after querying
@@ -194,7 +194,8 @@
     sp<os::ExternalVibration> getExternalVibration() const final { return mExternalVibration; }
 
             // This function should be called with holding thread lock.
-    void updateTeePatches_l() final;
+    void updateTeePatches_l() final REQUIRES(audio_utils::ThreadBase_Mutex)
+            EXCLUDES_BELOW_ThreadBase_Mutex;
     void setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) final;
 
     void tallyUnderrunFrames(size_t frames) final {
@@ -328,8 +329,8 @@
     sp<OpPlayAudioMonitor>  mOpPlayAudioMonitor;
 
     bool                mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
-    // intensity to play haptic data
-    os::HapticScale mHapticIntensity = os::HapticScale::MUTE;
+    // scale to play haptic data
+    os::HapticScale mHapticScale = os::HapticScale::mute();
     // max amplitude allowed for haptic data
     float mHapticMaxAmplitude = NAN;
     class AudioVibrationController : public os::BnExternalVibrationController {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d61621a..d1a09a4 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -47,6 +47,7 @@
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/PersistableBundle.h>
+#include <com_android_media_audio.h>
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
 #include <fastpath/AutoPark.h>
@@ -222,6 +223,8 @@
 static const int kPriorityAudioApp = 2;
 static const int kPriorityFastMixer = 3;
 static const int kPriorityFastCapture = 3;
+// Request real-time priority for PlaybackThread in ARC
+static const int kPriorityPlaybackThreadArc = 1;
 
 // IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the
 // track buffer in shared memory.  Zero on input means to use a default value.  For fast tracks,
@@ -721,8 +724,9 @@
     {
         audio_utils::unique_lock _l(event->mutex());
         while (event->mWaitStatus) {
-            if (event->mCondition.wait_for(_l, std::chrono::nanoseconds(kConfigEventTimeoutNs))
-                        == std::cv_status::timeout) {
+            if (event->mCondition.wait_for(
+                    _l, std::chrono::nanoseconds(kConfigEventTimeoutNs), getTid())
+                            == std::cv_status::timeout) {
                 event->mStatus = TIMED_OUT;
                 event->mWaitStatus = false;
             }
@@ -1483,7 +1487,7 @@
         return BAD_VALUE;
     }
 
-    if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+    if (IAfEffectModule::isSpatializer(&desc->type)
             && mType != SPATIALIZER) {
         ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
                 __func__, mType);
@@ -1571,7 +1575,7 @@
             return BAD_VALUE;
         } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
             // only post processing , downmixer or spatializer effects on output stage session
-            if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+            if (IAfEffectModule::isSpatializer(&desc->type)
                     || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
                 break;
             }
@@ -1690,7 +1694,7 @@
                     std::move(mAfThreadCallback->getDefaultVibratorInfo_l());
             if (defaultVibratorInfo) {
                 // Only set the vibrator info when it is a valid one.
-                effect->setVibratorInfo(*defaultVibratorInfo);
+                effect->setVibratorInfo_l(*defaultVibratorInfo);
             }
         }
         // create effect handle and connect it to effect module
@@ -1792,7 +1796,7 @@
 std::vector<int> ThreadBase::getEffectIds_l(audio_session_t sessionId) const
 {
     sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
-    return chain != nullptr ? chain->getEffectIds() : std::vector<int>{};
+    return chain != nullptr ? chain->getEffectIds_l() : std::vector<int>{};
 }
 
 // PlaybackThread::addEffect_ll() must be called with AudioFlinger::mutex() and
@@ -1824,7 +1828,7 @@
         return BAD_VALUE;
     }
 
-    effect->setOffloaded(mType == OFFLOAD, mId);
+    effect->setOffloaded_l(mType == OFFLOAD, mId);
 
     status_t status = chain->addEffect_l(effect);
     if (status != NO_ERROR) {
@@ -1861,22 +1865,20 @@
     }
 }
 
-void ThreadBase::lockEffectChains_l(
-        Vector<sp<IAfEffectChain>>& effectChains)
-NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::lock()
+void ThreadBase::lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains)
+        NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::lock()
 {
     effectChains = mEffectChains;
-    for (size_t i = 0; i < mEffectChains.size(); i++) {
-        mEffectChains[i]->mutex().lock();
+    for (const auto& effectChain : effectChains) {
+        effectChain->mutex().lock();
     }
 }
 
-void ThreadBase::unlockEffectChains(
-        const Vector<sp<IAfEffectChain>>& effectChains)
-NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::unlock()
+void ThreadBase::unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains)
+        NO_THREAD_SAFETY_ANALYSIS  // calls EffectChain::unlock()
 {
-    for (size_t i = 0; i < effectChains.size(); i++) {
-        effectChains[i]->mutex().unlock();
+    for (const auto& effectChain : effectChains) {
+        effectChain->mutex().unlock();
     }
 }
 
@@ -2849,6 +2851,8 @@
         // effectively get the latency it requested.
         if (track->isExternalTrack()) {
             IAfTrackBase::track_state state = track->state();
+            // Because the track is not on the ActiveTracks,
+            // at this point, only the TrackHandle will be adding the track.
             mutex().unlock();
             status = AudioSystem::startOutput(track->portId());
             mutex().lock();
@@ -2897,7 +2901,7 @@
             // Unlock due to VibratorService will lock for this call and will
             // call Tracks.mute/unmute which also require thread's lock.
             mutex().unlock();
-            const os::HapticScale intensity = afutils::onExternalVibrationStart(
+            const os::HapticScale hapticScale = afutils::onExternalVibrationStart(
                     track->getExternalVibration());
             std::optional<media::AudioVibratorInfo> vibratorInfo;
             {
@@ -2907,7 +2911,7 @@
                 vibratorInfo = std::move(mAfThreadCallback->getDefaultVibratorInfo_l());
             }
             mutex().lock();
-            track->setHapticIntensity(intensity);
+            track->setHapticScale(hapticScale);
             if (vibratorInfo) {
                 track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
             }
@@ -2923,13 +2927,19 @@
 
             // Set haptic intensity for effect
             if (chain != nullptr) {
-                chain->setHapticIntensity_l(track->id(), intensity);
+                // TODO(b/324559333): Add adaptive haptics scaling support for the HapticGenerator.
+                chain->setHapticScale_l(track->id(), hapticScale);
             }
         }
 
         track->setResetDone(false);
         track->resetPresentationComplete();
+
+        // Do not release the ThreadBase mutex after the track is added to mActiveTracks unless
+        // all key changes are complete.  It is possible that the threadLoop will begin
+        // processing the added track immediately after the ThreadBase mutex is released.
         mActiveTracks.add(track);
+
         if (chain != 0) {
             ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
                     track->sessionId());
@@ -3311,10 +3321,48 @@
         return {}; // nothing to do
     }
     StreamOutHalInterface::SourceMetadata metadata;
-    auto backInserter = std::back_inserter(metadata.tracks);
-    for (const sp<IAfTrack>& track : mActiveTracks) {
-        // No track is invalid as this is called after prepareTrack_l in the same critical section
-        track->copyMetadataTo(backInserter);
+    static const bool stereo_spatialization_property =
+            property_get_bool("ro.audio.stereo_spatialization_enabled", false);
+    const bool stereo_spatialization_enabled =
+            stereo_spatialization_property && com_android_media_audio_stereo_spatialization();
+    if (stereo_spatialization_enabled) {
+        std::map<audio_session_t, std::vector<playback_track_metadata_v7_t> >allSessionsMetadata;
+        for (const sp<IAfTrack>& track : mActiveTracks) {
+            std::vector<playback_track_metadata_v7_t>& sessionMetadata =
+                    allSessionsMetadata[track->sessionId()];
+            auto backInserter = std::back_inserter(sessionMetadata);
+            // No track is invalid as this is called after prepareTrack_l in the same
+            // critical section
+            track->copyMetadataTo(backInserter);
+        }
+        std::vector<playback_track_metadata_v7_t> spatializedTracksMetaData;
+        for (const auto& [session, sessionTrackMetadata] : allSessionsMetadata) {
+            metadata.tracks.insert(metadata.tracks.end(),
+                    sessionTrackMetadata.begin(), sessionTrackMetadata.end());
+            if (auto chain = getEffectChain_l(session) ; chain != nullptr) {
+                chain->sendMetadata_l(sessionTrackMetadata, {});
+            }
+            if ((hasAudioSession_l(session) & IAfThreadBase::SPATIALIZED_SESSION) != 0) {
+                spatializedTracksMetaData.insert(spatializedTracksMetaData.end(),
+                        sessionTrackMetadata.begin(), sessionTrackMetadata.end());
+            }
+        }
+        if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); chain != nullptr) {
+            chain->sendMetadata_l(metadata.tracks, {});
+        }
+        if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE); chain != nullptr) {
+            chain->sendMetadata_l(metadata.tracks, spatializedTracksMetaData);
+        }
+        if (auto chain = getEffectChain_l(AUDIO_SESSION_DEVICE); chain != nullptr) {
+            chain->sendMetadata_l(metadata.tracks, {});
+        }
+    } else {
+        auto backInserter = std::back_inserter(metadata.tracks);
+        for (const sp<IAfTrack>& track : mActiveTracks) {
+            // No track is invalid as this is called after prepareTrack_l in the same
+            // critical section
+            track->copyMetadataTo(backInserter);
+        }
     }
     sendMetadataToBackend_l(metadata);
     MetadataUpdate change;
@@ -3909,6 +3957,27 @@
                 stream()->setHalThreadPriority(priorityBoost);
             }
         }
+    } else if (property_get_bool("ro.boot.container", false /* default_value */)) {
+        // In ARC experiments (b/73091832), the latency under using CFS scheduler with any priority
+        // is not enough for PlaybackThread to process audio data in time. We request the lowest
+        // real-time priority, SCHED_FIFO=1, for PlaybackThread in ARC. ro.boot.container is true
+        // only on ARC.
+        const pid_t tid = getTid();
+        if (tid == -1) {
+            ALOGW("%s: Cannot update PlaybackThread priority for ARC, no tid", __func__);
+        } else {
+            const status_t status = requestPriority(getpid(),
+                                                    tid,
+                                                    kPriorityPlaybackThreadArc,
+                                                    false /* isForApp */,
+                                                    true /* asynchronous */);
+            if (status != OK) {
+                ALOGW("%s: Cannot update PlaybackThread priority for ARC, status %d", __func__,
+                        status);
+            } else {
+                stream()->setHalThreadPriority(kPriorityPlaybackThreadArc);
+            }
+        }
     }
 
     Vector<sp<IAfTrack>> tracksToRemove;
@@ -4120,6 +4189,30 @@
 
             metadataUpdate = updateMetadata_l();
 
+            // Acquire a local copy of active tracks with lock (release w/o lock).
+            //
+            // Control methods on the track acquire the ThreadBase lock (e.g. start()
+            // stop(), pause(), etc.), but the threadLoop is entitled to call audio
+            // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
+            activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
+
+            setHalLatencyMode_l();
+
+            // updateTeePatches_l will acquire the ThreadBase_Mutex of other threads,
+            // so this is done before we lock our effect chains.
+            for (const auto& track : mActiveTracks) {
+                track->updateTeePatches_l();
+            }
+
+            // signal actual start of output stream when the render position reported by
+            // the kernel starts moving.
+            if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
+                    && (mKernelPositionOnStandby
+                            != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
+                mHalStarted = true;
+                mWaitHalStartCV.notify_all();
+            }
+
             // prevent any changes in effect chain list and in each effect chain
             // during mixing and effect process as the audio buffers could be deleted
             // or modified if an effect is created or deleted
@@ -4147,28 +4240,6 @@
                     }
                 }
             }
-
-            // Acquire a local copy of active tracks with lock (release w/o lock).
-            //
-            // Control methods on the track acquire the ThreadBase lock (e.g. start()
-            // stop(), pause(), etc.), but the threadLoop is entitled to call audio
-            // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
-            activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
-
-            setHalLatencyMode_l();
-
-            for (const auto &track : mActiveTracks ) {
-                track->updateTeePatches_l();
-            }
-
-            // signal actual start of output stream when the render position reported by the kernel
-            // starts moving.
-            if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
-                    && (mKernelPositionOnStandby
-                            != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
-                mHalStarted = true;
-                mWaitHalStartCV.notify_all();
-            }
         } // mutex() scope ends
 
         if (mBytesRemaining == 0) {
@@ -4704,8 +4775,12 @@
 void PlaybackThread::removeTracks_l(const Vector<sp<IAfTrack>>& tracksToRemove)
 NO_THREAD_SAFETY_ANALYSIS  // release and re-acquire mutex()
 {
+    if (tracksToRemove.empty()) return;
+
+    // Block all incoming TrackHandle requests until we are finished with the release.
+    setThreadBusy_l(true);
+
     for (const auto& track : tracksToRemove) {
-        mActiveTracks.remove(track);
         ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
         sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
         if (chain != 0) {
@@ -4713,17 +4788,16 @@
                     __func__, track->id(), chain.get(), track->sessionId());
             chain->decActiveTrackCnt();
         }
+
         // If an external client track, inform APM we're no longer active, and remove if needed.
-        // We do this under lock so that the state is consistent if the Track is destroyed.
+        // Since the track is active, we do it here instead of TrackBase::destroy().
         if (track->isExternalTrack()) {
+            mutex().unlock();
             AudioSystem::stopOutput(track->portId());
             if (track->isTerminated()) {
                 AudioSystem::releaseOutput(track->portId());
             }
-        }
-        if (track->isTerminated()) {
-            // remove from our tracks vector
-            removeTrack_l(track);
+            mutex().lock();
         }
         if (mHapticChannelCount > 0 &&
                 ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
@@ -4737,10 +4811,27 @@
             // When the track is stop, set the haptic intensity as MUTE
             // for the HapticGenerator effect.
             if (chain != nullptr) {
-                chain->setHapticIntensity_l(track->id(), os::HapticScale::MUTE);
+                chain->setHapticScale_l(track->id(), os::HapticScale::mute());
             }
         }
+
+        // Under lock, the track is removed from the active tracks list.
+        //
+        // Once the track is no longer active, the TrackHandle may directly
+        // modify it as the threadLoop() is no longer responsible for its maintenance.
+        // Do not modify the track from threadLoop after the mutex is unlocked
+        // if it is not active.
+        mActiveTracks.remove(track);
+
+        if (track->isTerminated()) {
+            // remove from our tracks vector
+            removeTrack_l(track);
+        }
     }
+
+    // Allow incoming TrackHandle requests.  We still hold the mutex,
+    // so pending TrackHandle requests will occur after we unlock it.
+    setThreadBusy_l(false);
 }
 
 status_t PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
@@ -5080,7 +5171,7 @@
                                                     // audio to FastMixer
         fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
         fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
-        fastTrack->mHapticIntensity = os::HapticScale::NONE;
+        fastTrack->mHapticScale = {/*level=*/os::HapticLevel::NONE };
         fastTrack->mHapticMaxAmplitude = NAN;
         fastTrack->mGeneration++;
         state->mFastTracksGen++;
@@ -5439,7 +5530,7 @@
     sp<IAfEffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
     if (chain != 0) {
         uint32_t v = (uint32_t)(masterVolume * (1 << 24));
-        chain->setVolume_l(&v, &v);
+        chain->setVolume(&v, &v);
         masterVolume = (float)((v + (1 << 23)) >> 24);
         chain.clear();
     }
@@ -5638,7 +5729,7 @@
                     fastTrack->mChannelMask = track->channelMask();
                     fastTrack->mFormat = track->format();
                     fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
-                    fastTrack->mHapticIntensity = track->getHapticIntensity();
+                    fastTrack->mHapticScale = track->getHapticScale();
                     fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
                     fastTrack->mGeneration++;
                     state->mTrackMask |= 1 << j;
@@ -5774,7 +5865,7 @@
 
             mixedTracks++;
 
-            // track->mainBuffer() != mSinkBuffer or mMixerBuffer means
+            // track->mainBuffer() != mSinkBuffer and mMixerBuffer means
             // there is an effect chain connected to the track
             chain.clear();
             if (track->mainBuffer() != mSinkBuffer &&
@@ -5878,7 +5969,7 @@
             track->setFinalVolume(vrf, vlf);
 
             // Delegate volume control to effect in track effect chain if needed
-            if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
+            if (chain != 0 && chain->setVolume(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
                 param = AudioMixer::VOLUME;
                 // Update remaining floating point volume levels
@@ -5999,10 +6090,11 @@
                 trackId,
                 AudioMixer::TRACK,
                 AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
+            const os::HapticScale hapticScale = track->getHapticScale();
             mAudioMixer->setParameter(
-                trackId,
-                AudioMixer::TRACK,
-                AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
+                    trackId,
+                    AudioMixer::TRACK,
+                    AudioMixer::HAPTIC_SCALE, (void *)&hapticScale);
             const float hapticMaxAmplitude = track->getHapticMaxAmplitude();
             mAudioMixer->setParameter(
                 trackId,
@@ -6618,8 +6710,8 @@
                 // Convert volumes from float to 8.24
                 uint32_t vl = (uint32_t)(left * (1 << 24));
                 uint32_t vr = (uint32_t)(right * (1 << 24));
-                // Direct/Offload effect chains set output volume in setVolume_l().
-                (void)mEffectChains[0]->setVolume_l(&vl, &vr);
+                // Direct/Offload effect chains set output volume in setVolume().
+                (void)mEffectChains[0]->setVolume(&vl, &vr);
             } else {
                 // otherwise we directly set the volume.
                 setVolumeForOutput_l(left, right);
@@ -7813,16 +7905,12 @@
         //   (mRequestedLatencyMode = AUDIO_LATENCY_MODE_LOW)
         //      AND
         // - At least one active track is spatialized
-        bool hasSpatializedActiveTrack = false;
         for (const auto& track : mActiveTracks) {
             if (track->isSpatialized()) {
-                hasSpatializedActiveTrack = true;
+                latencyMode = mRequestedLatencyMode;
                 break;
             }
         }
-        if (hasSpatializedActiveTrack && mRequestedLatencyMode == AUDIO_LATENCY_MODE_LOW) {
-            latencyMode = AUDIO_LATENCY_MODE_LOW;
-        }
     }
 
     if (latencyMode != mSetLatencyMode) {
@@ -7836,7 +7924,7 @@
 }
 
 status_t SpatializerThread::setRequestedLatencyMode(audio_latency_mode_t mode) {
-    if (mode != AUDIO_LATENCY_MODE_LOW && mode != AUDIO_LATENCY_MODE_FREE) {
+    if (mode < 0 || mode >= AUDIO_LATENCY_MODE_CNT) {
         return BAD_VALUE;
     }
     audio_utils::lock_guard _l(mutex());
@@ -9686,7 +9774,7 @@
 
     // make sure enabled pre processing effects state is communicated to the HAL as we
     // just moved them to a new input stream.
-    chain->syncHalEffectsState();
+    chain->syncHalEffectsState_l();
 
     mEffectChains.add(chain);
 
@@ -10577,6 +10665,16 @@
         }
     }
 
+    // For mmap streams, once the routing has changed, they will be disconnected. It should be
+    // okay to notify the client earlier before the new patch creation.
+    if (mDeviceId != deviceId) {
+        if (const sp<MmapStreamCallback> callback = mCallback.promote()) {
+            // The aaudioservice handle the routing changed event asynchronously. In that case,
+            // it is safe to hold the lock here.
+            callback->onRoutingChanged(deviceId);
+        }
+    }
+
     if (mAudioHwDev->supportsAudioPatches()) {
         status = mHalDevice->createAudioPatch(patch->num_sources, patch->sources, patch->num_sinks,
                                               patch->sinks, handle);
@@ -10602,12 +10700,6 @@
             sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
             mInDeviceTypeAddr = sourceDeviceTypeAddr;
         }
-        sp<MmapStreamCallback> callback = mCallback.promote();
-        if (mDeviceId != deviceId && callback != 0) {
-            mutex().unlock();
-            callback->onRoutingChanged(deviceId);
-            mutex().lock();
-        }
         mPatch = *patch;
         mDeviceId = deviceId;
     }
@@ -10671,7 +10763,7 @@
     chain->setThread(this);
     chain->setInBuffer(nullptr);
     chain->setOutBuffer(nullptr);
-    chain->syncHalEffectsState();
+    chain->syncHalEffectsState_l();
 
     mEffectChains.add(chain);
     checkSuspendOnAddEffectChain_l(chain);
@@ -10759,22 +10851,19 @@
 
 void MmapThread::checkInvalidTracks_l()
 {
-    sp<MmapStreamCallback> callback;
     for (const sp<IAfMmapTrack>& track : mActiveTracks) {
         if (track->isInvalid()) {
-            callback = mCallback.promote();
-            if (callback == nullptr &&  mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+            if (const sp<MmapStreamCallback> callback = mCallback.promote()) {
+                // The aaudioservice handle the routing changed event asynchronously. In that case,
+                // it is safe to hold the lock here.
+                callback->onRoutingChanged(AUDIO_PORT_HANDLE_NONE);
+            } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
                 ALOGW("Could not notify MMAP stream tear down: no onRoutingChanged callback!");
                 mNoCallbackWarningCount++;
             }
             break;
         }
     }
-    if (callback != 0) {
-        mutex().unlock();
-        callback->onRoutingChanged(AUDIO_PORT_HANDLE_NONE);
-        mutex().lock();
-    }
 }
 
 void MmapThread::dumpInternals_l(int fd, const Vector<String16>& /* args */)
@@ -10964,7 +11053,7 @@
         // only one effect chain can be present on DirectOutputThread, so if
         // there is one, the track is connected to it
         if (!mEffectChains.isEmpty()) {
-            mEffectChains[0]->setVolume_l(&vol, &vol);
+            mEffectChains[0]->setVolume(&vol, &vol);
             volume = (float)vol / (1 << 24);
         }
         // Try to use HW volume control and fall back to SW control if not implemented
@@ -11033,7 +11122,7 @@
             char *endptr;
             unsigned long ul = strtoul(value, &endptr, 0);
             if (*endptr == '\0' && ul != 0) {
-                ALOGD("Silence is golden");
+                ALOGW("%s: mute from ro.audio.silent. Silence is golden", __func__);
                 // The setprop command will not allow a property to be changed after
                 // the first time it is set, so we don't have to worry about un-muting.
                 setMasterMute_l(true);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 21134a2..86e1894 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -436,9 +436,11 @@
                 // ThreadBase mutex before processing the mixer and effects. This guarantees the
                 // integrity of the chains during the process.
                 // Also sets the parameter 'effectChains' to current value of mEffectChains.
-    void lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains) final REQUIRES(mutex());
+    void lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains) final
+            REQUIRES(audio_utils::ThreadBase_Mutex) ACQUIRE(audio_utils::EffectChain_Mutex);
                 // unlock effect chains after process
-    void unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains) final;
+    void unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains) final
+            RELEASE(audio_utils::EffectChain_Mutex);
                 // get a copy of mEffectChains vector
     Vector<sp<IAfEffectChain>> getEffectChains_l() const final REQUIRES(mutex()) {
         return mEffectChains;
@@ -599,6 +601,35 @@
                 // check if some effects must be suspended when an effect chain is added
     void checkSuspendOnAddEffectChain_l(const sp<IAfEffectChain>& chain) REQUIRES(mutex());
 
+    /**
+     * waitWhileThreadBusy_l() serves as a mutex gate, which does not allow
+     * progress beyond the method while the PlaybackThread is busy (see setThreadBusy_l()).
+     * During the wait, the ThreadBase_Mutex is temporarily unlocked.
+     *
+     * This implementation uses a condition variable.  Alternative methods to gate
+     * the thread may use a second mutex (i.e. entry based on scoped_lock(mutex, gating_mutex)),
+     * but those have less flexibility and more lock order issues.
+     *
+     * Current usage by Track::destroy(), Track::start(), Track::stop(), Track::pause(),
+     * and Track::flush() block this way, and the primary caller is through TrackHandle
+     * with no other mutexes held.
+     *
+     * Special tracks like PatchTrack and OutputTrack may also hold the another thread's
+     * ThreadBase_Mutex during this time.  No other mutex is held.
+     */
+
+    void waitWhileThreadBusy_l(audio_utils::unique_lock& ul) final REQUIRES(mutex()) {
+        // the wait returns immediately if the predicate is satisfied.
+        mThreadBusyCv.wait(ul, [&]{ return mThreadBusy == false;});
+    }
+
+    void setThreadBusy_l(bool busy) REQUIRES(mutex()) {
+        if (busy == mThreadBusy) return;
+        mThreadBusy = busy;
+        if (busy == true) return;  // no need to wake threads if we become busy.
+        mThreadBusyCv.notify_all();
+    }
+
                 // sends the metadata of the active tracks to the HAL
                 struct MetadataUpdate {
                     std::vector<playback_track_metadata_v7_t> playbackMetadataUpdate;
@@ -641,6 +672,13 @@
                 ThreadMetrics           mThreadMetrics;
                 const bool              mIsOut;
 
+    // mThreadBusy is checked under the ThreadBase_Mutex to ensure that
+    // TrackHandle operations do not proceed while the ThreadBase is busy
+    // with the track.  mThreadBusy is only true if the track is active.
+    //
+    bool mThreadBusy = false; // GUARDED_BY(ThreadBase_Mutex) but read in lambda.
+    audio_utils::condition_variable mThreadBusyCv;
+
                 // updated by PlaybackThread::readOutputParameters_l() or
                 // RecordThread::readInputParameters_l()
                 uint32_t                mSampleRate;
@@ -839,7 +877,7 @@
 
                 SimpleLog mLocalLog;  // locked internally
 
-private:
+    private:
     void dumpBase_l(int fd, const Vector<String16>& args) REQUIRES(mutex());
     void dumpEffectChains_l(int fd, const Vector<String16>& args) REQUIRES(mutex());
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f18e69b..77abaf6 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -890,12 +890,17 @@
         bool wasActive = false;
         const sp<IAfThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            audio_utils::lock_guard _l(thread->mutex());
+            audio_utils::unique_lock ul(thread->mutex());
+            thread->waitWhileThreadBusy_l(ul);
+
             auto* const playbackThread = thread->asIAfPlaybackThread().get();
             wasActive = playbackThread->destroyTrack_l(this);
             forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
         }
         if (isExternalTrack() && !wasActive) {
+            // If the track is not active, the TrackHandle is responsible for
+            // releasing the port id, not the ThreadBase::threadLoop().
+            // At this point, there is no concurrency issue as the track is going away.
             AudioSystem::releaseOutput(mPortId);
         }
     }
@@ -1187,7 +1192,9 @@
                 return PERMISSION_DENIED;
             }
         }
-        audio_utils::lock_guard _lth(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
@@ -1312,7 +1319,9 @@
     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
     const sp<IAfThreadBase> thread = mThread.promote();
     if (thread != 0) {
-        audio_utils::lock_guard _l(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         track_state state = mState;
         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
             // If the track is not active (PAUSED and buffers full), flush buffers
@@ -1347,7 +1356,9 @@
     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
     const sp<IAfThreadBase> thread = mThread.promote();
     if (thread != 0) {
-        audio_utils::lock_guard _l(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         auto* const playbackThread = thread->asIAfPlaybackThread().get();
         switch (mState) {
         case STOPPING_1:
@@ -1384,7 +1395,9 @@
     ALOGV("%s(%d)", __func__, mId);
     const sp<IAfThreadBase> thread = mThread.promote();
     if (thread != 0) {
-        audio_utils::lock_guard _l(thread->mutex());
+        audio_utils::unique_lock ul(thread->mutex());
+        thread->waitWhileThreadBusy_l(ul);
+
         auto* const playbackThread = thread->asIAfPlaybackThread().get();
 
         // Flush the ring buffer now if the track is not active in the PlaybackThread.
@@ -1661,7 +1674,7 @@
 
     if (result == OK) {
         ALOGI("%s(%d): processed mute state for port ID %d from %d to %d", __func__, id(), mPortId,
-              int(muteState), int(mMuteState));
+                static_cast<int>(mMuteState), static_cast<int>(muteState));
         mMuteState = muteState;
     } else {
         ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d", __func__, id(),
@@ -3541,6 +3554,8 @@
     }
 
     if (result == OK) {
+        ALOGI("%s(%d): processed mute state for port ID %d from %d to %d", __func__, id(), mPortId,
+                static_cast<int>(mMuteState), static_cast<int>(muteState));
         mMuteState = muteState;
     } else {
         ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d",
diff --git a/services/audioflinger/afutils/Vibrator.cpp b/services/audioflinger/afutils/Vibrator.cpp
index 25fcc6a..7c99ca9 100644
--- a/services/audioflinger/afutils/Vibrator.cpp
+++ b/services/audioflinger/afutils/Vibrator.cpp
@@ -20,6 +20,7 @@
 
 #include "Vibrator.h"
 
+#include <android/os/ExternalVibrationScale.h>
 #include <android/os/IExternalVibratorService.h>
 #include <binder/IServiceManager.h>
 #include <utils/Log.h>
@@ -44,12 +45,17 @@
 }
 
 os::HapticScale onExternalVibrationStart(const sp<os::ExternalVibration>& externalVibration) {
+    if (externalVibration->getAudioAttributes().flags & AUDIO_FLAG_MUTE_HAPTIC) {
+        ALOGD("%s, mute haptic according to audio attributes flag", __func__);
+        return os::HapticScale::mute();
+    }
     const sp<os::IExternalVibratorService> evs = getExternalVibratorService();
     if (evs != nullptr) {
-        int32_t ret;
+
+        os::ExternalVibrationScale ret;
         binder::Status status = evs->onExternalVibrationStart(*externalVibration, &ret);
         if (status.isOk()) {
-            ALOGD("%s, start external vibration with intensity as %d", __func__, ret);
+            ALOGD("%s, start external vibration with intensity as %d", __func__, ret.scaleLevel);
             return os::ExternalVibration::externalVibrationScaleToHapticScale(ret);
         }
     }
@@ -57,7 +63,7 @@
             __func__,
             evs == nullptr ? "external vibration service not found"
                            : "error when querying intensity");
-    return os::HapticScale::MUTE;
+    return os::HapticScale::mute();
 }
 
 void onExternalVibrationStop(const sp<os::ExternalVibration>& externalVibration) {
diff --git a/services/audioflinger/fastpath/FastMixer.cpp b/services/audioflinger/fastpath/FastMixer.cpp
index e0a15c1..1d41b3f 100644
--- a/services/audioflinger/fastpath/FastMixer.cpp
+++ b/services/audioflinger/fastpath/FastMixer.cpp
@@ -178,8 +178,8 @@
                 (void *)(uintptr_t)mSinkChannelMask);
         mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
                 (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
-        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
-                (void *)(uintptr_t)fastTrack->mHapticIntensity);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_SCALE,
+                (void *)(&(fastTrack->mHapticScale)));
         mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_MAX_AMPLITUDE,
                 (void *)(&(fastTrack->mHapticMaxAmplitude)));
 
diff --git a/services/audioflinger/fastpath/FastMixerState.h b/services/audioflinger/fastpath/FastMixerState.h
index 8ab6d25..0a56f92 100644
--- a/services/audioflinger/fastpath/FastMixerState.h
+++ b/services/audioflinger/fastpath/FastMixerState.h
@@ -54,7 +54,7 @@
     audio_format_t          mFormat = AUDIO_FORMAT_INVALID;         // track format
     int                     mGeneration = 0;     // increment when any field is assigned
     bool                    mHapticPlaybackEnabled = false; // haptic playback is enabled or not
-    os::HapticScale         mHapticIntensity = os::HapticScale::MUTE; // intensity of haptic data
+    os::HapticScale mHapticScale = os::HapticScale::mute(); // scale of haptic data
     float                   mHapticMaxAmplitude = NAN; // max amplitude allowed for haptic data
 };
 
diff --git a/services/audioflinger/sounddose/SoundDoseManager.cpp b/services/audioflinger/sounddose/SoundDoseManager.cpp
index 1ff08dc..3b764d1 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.cpp
+++ b/services/audioflinger/sounddose/SoundDoseManager.cpp
@@ -21,10 +21,12 @@
 #include "SoundDoseManager.h"
 
 #include "android/media/SoundDoseRecord.h"
+#include <algorithm>
 #include <android-base/stringprintf.h>
-#include <media/AidlConversionCppNdk.h>
 #include <cinttypes>
 #include <ctime>
+#include <functional>
+#include <media/AidlConversionCppNdk.h>
 #include <utils/Log.h>
 
 namespace android {
@@ -46,6 +48,8 @@
     return now_ts.tv_sec;
 }
 
+constexpr float kDefaultRs2LowerBound = 80.f;  // dBA
+
 }  // namespace
 
 sp<audio_utils::MelProcessor> SoundDoseManager::getOrCreateProcessorForDevice(
@@ -53,7 +57,7 @@
         size_t channelCount, audio_format_t format) {
     const std::lock_guard _l(mLock);
 
-    if (mHalSoundDose.size() > 0 && mEnabledCsd) {
+    if (!mUseFrameworkMel && mHalSoundDose.size() > 0 && mEnabledCsd) {
         ALOGD("%s: using HAL MEL computation, no MelProcessor needed.", __func__);
         return nullptr;
     }
@@ -143,7 +147,7 @@
     ALOGV("%s", __func__);
     const std::lock_guard _l(mLock);
 
-    if (mHalSoundDose.size() > 0) {
+    if (!mUseFrameworkMel && mHalSoundDose.size() > 0) {
         bool success = true;
         for (auto& halSoundDose : mHalSoundDose) {
             // using the HAL sound dose interface
@@ -187,6 +191,21 @@
     }
 }
 
+float SoundDoseManager::getAttenuationForDeviceId(audio_port_handle_t id) const {
+    float attenuation = 0.f;
+
+    const std::lock_guard _l(mLock);
+    const auto deviceTypeIt = mActiveDeviceTypes.find(id);
+    if (deviceTypeIt != mActiveDeviceTypes.end()) {
+        auto attenuationIt = mMelAttenuationDB.find(deviceTypeIt->second);
+        if (attenuationIt != mMelAttenuationDB.end()) {
+            attenuation = attenuationIt->second;
+        }
+    }
+
+    return attenuation;
+}
+
 audio_port_handle_t SoundDoseManager::getIdForAudioDevice(const AudioDevice& audioDevice) const {
     if (isComputeCsdForcedOnAllDevices()) {
         // If CSD is forced on all devices return random port id. Used only in testing.
@@ -212,6 +231,15 @@
         ALOGI("%s: could not find port id for device %s", __func__, adt.toString().c_str());
         return AUDIO_PORT_HANDLE_NONE;
     }
+
+    if (audio_is_ble_out_device(type) || audio_is_a2dp_device(type)) {
+        const auto btDeviceIt = mBluetoothDevicesWithCsd.find(std::make_pair(address, type));
+        if (btDeviceIt == mBluetoothDevicesWithCsd.end() || !btDeviceIt->second) {
+            ALOGI("%s: bt device %s does not support sound dose", __func__,
+                  adt.toString().c_str());
+            return AUDIO_PORT_HANDLE_NONE;
+        }
+    }
     return deviceIt->second;
 }
 
@@ -260,7 +288,11 @@
                 in_audioDevice.address.toString().c_str());
         return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
     }
-    soundDoseManager->onMomentaryExposure(in_currentDbA, id);
+
+    float attenuation = soundDoseManager->getAttenuationForDeviceId(id);
+    ALOGV("%s: attenuating received momentary exposure with %f dB", __func__, attenuation);
+    // TODO: remove attenuation when enforcing HAL MELs to always be attenuated
+    soundDoseManager->onMomentaryExposure(in_currentDbA - attenuation, id);
 
     return ndk::ScopedAStatus::ok();
 }
@@ -289,9 +321,10 @@
                 in_audioDevice.address.toString().c_str());
         return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
     }
+
     // TODO: introduce timestamp in onNewMelValues callback
-    soundDoseManager->onNewMelValues(in_melRecord.melValues, 0,
-                                     in_melRecord.melValues.size(), id);
+    soundDoseManager->onNewMelValues(in_melRecord.melValues, 0, in_melRecord.melValues.size(),
+                                     id, /*attenuated=*/false);
 
     return ndk::ScopedAStatus::ok();
 }
@@ -549,9 +582,6 @@
 }
 
 void SoundDoseManager::setUseFrameworkMel(bool useFrameworkMel) {
-    // invalidate any HAL sound dose interface used
-    resetHalSoundDoseInterfaces();
-
     const std::lock_guard _l(mLock);
     mUseFrameworkMel = useFrameworkMel;
 }
@@ -562,8 +592,19 @@
 }
 
 void SoundDoseManager::setComputeCsdOnAllDevices(bool computeCsdOnAllDevices) {
-    const std::lock_guard _l(mLock);
-    mComputeCsdOnAllDevices = computeCsdOnAllDevices;
+    bool changed = false;
+    {
+        const std::lock_guard _l(mLock);
+        if (mHalSoundDose.size() != 0) {
+            // when using the HAL path we cannot enforce to deliver values for all devices
+            changed = mUseFrameworkMel != computeCsdOnAllDevices;
+            mUseFrameworkMel = computeCsdOnAllDevices;
+        }
+        mComputeCsdOnAllDevices = computeCsdOnAllDevices;
+    }
+    if (changed && computeCsdOnAllDevices) {
+        mMelReporterCallback->applyAllAudioPatches();
+    }
 }
 
 bool SoundDoseManager::isComputeCsdForcedOnAllDevices() const {
@@ -582,7 +623,7 @@
 
 bool SoundDoseManager::useHalSoundDose() const {
     const std::lock_guard _l(mLock);
-    return mHalSoundDose.size() > 0;
+    return !mUseFrameworkMel && mHalSoundDose.size() > 0;
 }
 
 void SoundDoseManager::resetSoundDose() {
@@ -604,26 +645,68 @@
 }
 
 void SoundDoseManager::onNewMelValues(const std::vector<float>& mels, size_t offset, size_t length,
-                                      audio_port_handle_t deviceId) const {
+                                      audio_port_handle_t deviceId, bool attenuated) const {
     ALOGV("%s", __func__);
 
-
     sp<media::ISoundDoseCallback> soundDoseCallback;
     std::vector<audio_utils::CsdRecord> records;
     float currentCsd;
+
+    // TODO: delete this case when enforcing HAL MELs to always be attenuated
+    float attenuation = attenuated ? 0.0f : getAttenuationForDeviceId(deviceId);
+
     {
         const std::lock_guard _l(mLock);
         if (!mEnabledCsd) {
             return;
         }
 
-
         const int64_t timestampSec = getMonotonicSecond();
 
-        // only for internal callbacks
-        records = mMelAggregator->aggregateAndAddNewMelRecord(audio_utils::MelRecord(
-                deviceId, std::vector<float>(mels.begin() + offset, mels.begin() + offset + length),
-                timestampSec - length));
+        if (attenuated) {
+            records = mMelAggregator->aggregateAndAddNewMelRecord(audio_utils::MelRecord(
+                    deviceId,
+                    std::vector<float>(mels.begin() + offset, mels.begin() + offset + length),
+                    timestampSec - length));
+        } else {
+            ALOGV("%s: attenuating received values with %f dB", __func__, attenuation);
+
+            // Extracting all intervals that contain values >= RS2 low limit (80dBA) after the
+            // attenuation is applied
+            size_t start = offset;
+            size_t stop = offset;
+            for (; stop < mels.size() && stop < offset + length; ++stop) {
+                if (mels[stop] - attenuation < kDefaultRs2LowerBound) {
+                    if (start < stop) {
+                        std::vector<float> attMel(stop-start, -attenuation);
+                        // attMel[i] = mels[i] + attenuation, i in [start, stop)
+                        std::transform(mels.begin() + start, mels.begin() + stop, attMel.begin(),
+                                       attMel.begin(), std::plus<float>());
+                        std::vector<audio_utils::CsdRecord> newRec =
+                                mMelAggregator->aggregateAndAddNewMelRecord(
+                                        audio_utils::MelRecord(deviceId,
+                                                               attMel,
+                                                               timestampSec - length + start -
+                                                               offset));
+                        std::copy(newRec.begin(), newRec.end(), std::back_inserter(records));
+                    }
+                    start = stop+1;
+                }
+            }
+            if (start < stop) {
+                std::vector<float> attMel(stop-start, -attenuation);
+                // attMel[i] = mels[i] + attenuation, i in [start, stop)
+                std::transform(mels.begin() + start, mels.begin() + stop, attMel.begin(),
+                               attMel.begin(), std::plus<float>());
+                std::vector<audio_utils::CsdRecord> newRec =
+                        mMelAggregator->aggregateAndAddNewMelRecord(
+                                audio_utils::MelRecord(deviceId,
+                                                       attMel,
+                                                       timestampSec - length + start -
+                                                       offset));
+                std::copy(newRec.begin(), newRec.end(), std::back_inserter(records));
+            }
+        }
 
         currentCsd = mMelAggregator->getCsd();
     }
@@ -658,6 +741,10 @@
         if (!mEnabledCsd) {
             return;
         }
+
+        if (currentMel < mRs2UpperBound) {
+            return;
+        }
     }
 
     auto soundDoseCallback = getSoundDoseCallback();
diff --git a/services/audioflinger/sounddose/SoundDoseManager.h b/services/audioflinger/sounddose/SoundDoseManager.h
index 347eabe..52a3fd6 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.h
+++ b/services/audioflinger/sounddose/SoundDoseManager.h
@@ -39,6 +39,8 @@
 
     virtual void stopMelComputationForDeviceId(audio_port_handle_t deviceId) = 0;
     virtual void startMelComputationForDeviceId(audio_port_handle_t deviceId) = 0;
+
+    virtual void applyAllAudioPatches() = 0;
 };
 
 class SoundDoseManager : public audio_utils::MelProcessor::MelCallback {
@@ -53,6 +55,13 @@
           mMelAggregator(sp<audio_utils::MelAggregator>::make(kCsdWindowSeconds)),
           mRs2UpperBound(kDefaultRs2UpperBound) {};
 
+    // Used only for testing
+    SoundDoseManager(const sp<IMelReporterCallback>& melReporterCallback,
+                     const sp<audio_utils::MelAggregator>& melAggregator)
+            : mMelReporterCallback(melReporterCallback),
+              mMelAggregator(melAggregator),
+              mRs2UpperBound(kDefaultRs2UpperBound) {};
+
     /**
      * \brief Creates or gets the MelProcessor assigned to the streamHandle
      *
@@ -144,7 +153,7 @@
 
     // ------ Override audio_utils::MelProcessor::MelCallback ------
     void onNewMelValues(const std::vector<float>& mels, size_t offset, size_t length,
-                        audio_port_handle_t deviceId) const override;
+                        audio_port_handle_t deviceId, bool attenuated) const override;
 
     void onMomentaryExposure(float currentMel, audio_port_handle_t deviceId) const override;
 
@@ -205,6 +214,8 @@
 
     sp<media::ISoundDoseCallback> getSoundDoseCallback() const;
 
+    float getAttenuationForDeviceId(audio_port_handle_t id) const;
+
     void updateAttenuation(float attenuationDB, audio_devices_t deviceType);
     void setCsdEnabled(bool enabled);
     void setUseFrameworkMel(bool useFrameworkMel);
diff --git a/services/audioflinger/sounddose/tests/Android.bp b/services/audioflinger/sounddose/tests/Android.bp
index 2a2addf..60e170d 100644
--- a/services/audioflinger/sounddose/tests/Android.bp
+++ b/services/audioflinger/sounddose/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_base_license"
@@ -11,7 +12,7 @@
     name: "sounddosemanager_tests",
 
     srcs: [
-        "sounddosemanager_tests.cpp"
+        "sounddosemanager_tests.cpp",
     ],
 
     defaults: [
diff --git a/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp b/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp
index 294080b..e79b05e 100644
--- a/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp
+++ b/services/audioflinger/sounddose/tests/sounddosemanager_tests.cpp
@@ -20,6 +20,7 @@
 #include <SoundDoseManager.h>
 
 #include <aidl/android/hardware/audio/core/sounddose/BnSoundDose.h>
+#include <audio_utils/MelAggregator.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <media/AidlConversionCppNdk.h>
@@ -43,6 +44,15 @@
 public:
     MOCK_METHOD(void, startMelComputationForDeviceId, (audio_port_handle_t), (override));
     MOCK_METHOD(void, stopMelComputationForDeviceId, (audio_port_handle_t), (override));
+    MOCK_METHOD(void, applyAllAudioPatches, (), (override));
+};
+
+class MelAggregatorMock : public audio_utils::MelAggregator {
+public:
+    MelAggregatorMock() : MelAggregator(100) {}
+
+    MOCK_METHOD(std::vector<audio_utils::CsdRecord>, aggregateAndAddNewMelRecord,
+                (const audio_utils::MelRecord&), (override));
 };
 
 constexpr char kPrimaryModule[] = "primary";
@@ -52,7 +62,8 @@
 protected:
     void SetUp() override {
         mMelReporterCallback = sp<MelReporterCallback>::make();
-        mSoundDoseManager = sp<SoundDoseManager>::make(mMelReporterCallback);
+        mMelAggregator = sp<MelAggregatorMock>::make();
+        mSoundDoseManager = sp<SoundDoseManager>::make(mMelReporterCallback, mMelAggregator);
         mHalSoundDose = ndk::SharedRefBase::make<HalSoundDoseMock>();
         mSecondaryHalSoundDose = ndk::SharedRefBase::make<HalSoundDoseMock>();
 
@@ -69,6 +80,7 @@
     }
 
     sp<MelReporterCallback> mMelReporterCallback;
+    sp<MelAggregatorMock> mMelAggregator;
     sp<SoundDoseManager> mSoundDoseManager;
     std::shared_ptr<HalSoundDoseMock> mHalSoundDose;
     std::shared_ptr<HalSoundDoseMock> mSecondaryHalSoundDose;
@@ -110,12 +122,33 @@
     EXPECT_NE(processor1, processor2);
 }
 
-TEST_F(SoundDoseManagerTest, NewMelValuesCacheNewRecord) {
-    std::vector<float>mels{1, 1};
+TEST_F(SoundDoseManagerTest, NewMelValuesAttenuatedAggregateMels) {
+    std::vector<float>mels{1.f, 1.f};
 
-    mSoundDoseManager->onNewMelValues(mels, 0, mels.size(), /*deviceId=*/1);
+    EXPECT_CALL(*mMelAggregator.get(), aggregateAndAddNewMelRecord)
+            .Times(1)
+            .WillOnce([&] (const audio_utils::MelRecord& record) {
+                EXPECT_THAT(record.mels, ::testing::ElementsAreArray(mels));
+                return std::vector<audio_utils::CsdRecord>();
+            });
 
-    EXPECT_EQ(mSoundDoseManager->getCachedMelRecordsSize(), size_t{1});
+    mSoundDoseManager->onNewMelValues(mels, 0, mels.size(), /*deviceId=*/1,
+                                      /*attenuated=*/true);
+}
+
+TEST_F(SoundDoseManagerTest, NewMelValuesUnattenuatedAreSplit) {
+    std::vector<float>mels{79.f, 80.f, 79.f, 80.f, 79.f, 79.f, 80.f};
+
+    EXPECT_CALL(*mMelAggregator.get(), aggregateAndAddNewMelRecord)
+            .Times(3)
+            .WillRepeatedly([&] (const audio_utils::MelRecord& record) {
+                EXPECT_EQ(record.mels.size(), size_t {1});
+                EXPECT_EQ(record.mels[0], 80.f);
+                return std::vector<audio_utils::CsdRecord>();
+            });
+
+    mSoundDoseManager->onNewMelValues(mels, 0, mels.size(), /*deviceId=*/1,
+            /*attenuated=*/false);
 }
 
 TEST_F(SoundDoseManagerTest, InvalidHalInterfaceIsNotSet) {
diff --git a/services/audioflinger/timing/tests/Android.bp b/services/audioflinger/timing/tests/Android.bp
index d1e5563..040a914 100644
--- a/services/audioflinger/timing/tests/Android.bp
+++ b/services/audioflinger/timing/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_base_license"
@@ -13,7 +14,7 @@
     host_supported: true,
 
     srcs: [
-        "mediasyncevent_tests.cpp"
+        "mediasyncevent_tests.cpp",
     ],
 
     header_libs: [
@@ -38,7 +39,7 @@
     host_supported: true,
 
     srcs: [
-        "monotonicframecounter_tests.cpp"
+        "monotonicframecounter_tests.cpp",
     ],
 
     static_libs: [
@@ -54,26 +55,26 @@
 }
 
 cc_test {
-     name: "synchronizedrecordstate_tests",
+    name: "synchronizedrecordstate_tests",
 
-     host_supported: true,
+    host_supported: true,
 
-     srcs: [
-         "synchronizedrecordstate_tests.cpp"
-     ],
+    srcs: [
+        "synchronizedrecordstate_tests.cpp",
+    ],
 
-     header_libs: [
-         "libaudioclient_headers",
-     ],
+    header_libs: [
+        "libaudioclient_headers",
+    ],
 
-     static_libs: [
-         "liblog",
-         "libutils", // RefBase
-     ],
+    static_libs: [
+        "liblog",
+        "libutils", // RefBase
+    ],
 
-     cflags: [
-         "-Wall",
-         "-Werror",
-         "-Wextra",
-     ],
- }
\ No newline at end of file
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
diff --git a/services/audiopolicy/Android.bp b/services/audiopolicy/Android.bp
index e018dd3..66ba7e2 100644
--- a/services/audiopolicy/Android.bp
+++ b/services/audiopolicy/Android.bp
@@ -1,10 +1,11 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
     // to get the below license kinds:
     //   SPDX-license-identifier-Apache-2.0
-    default_applicable_licenses: ["frameworks_av_license"],
+    default_applicable_licenses: ["Android-Apache-2.0"],
 }
 
 cc_library_headers {
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index b164159..bfc3132 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -269,6 +269,7 @@
 
     virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes) = 0;
     virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes) = 0;
+    virtual status_t getRegisteredPolicyMixes(std::vector<AudioMix>& mixes) = 0;
 
     virtual status_t updatePolicyMix(
         const AudioMix& mix,
@@ -285,7 +286,8 @@
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
                                       audio_port_handle_t *portId,
-                                      uid_t uid) = 0;
+                                      uid_t uid,
+                                      bool internal = false) = 0;
     virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
 
     virtual status_t setMasterMono(bool mono) = 0;
diff --git a/services/audiopolicy/common/Android.bp b/services/audiopolicy/common/Android.bp
index 91701ad..a699b8b 100644
--- a/services/audiopolicy/common/Android.bp
+++ b/services/audiopolicy/common/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index 8b76842..e8b04ce 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -35,6 +36,7 @@
         "src/TypeConverter.cpp",
     ],
     shared_libs: [
+        "android.media.audiopolicy-aconfig-cc",
         "audioclient-types-aidl-cpp",
         "audiopolicy-types-aidl-cpp",
         "libaudioclient_aidl_conversion",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 13b70e5..7c70877 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -364,7 +364,7 @@
 
     void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     virtual DeviceVector devices() const;
-    void setDevices(const DeviceVector &devices) { mDevices = devices; }
+    void setDevices(const DeviceVector &devices);
     bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
     virtual DeviceVector supportedDevices() const;
     virtual bool devicesSupportEncodedFormats(const DeviceTypeSet& deviceTypes);
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 7119b85..fe90a1e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -222,7 +222,8 @@
                            const struct audio_port_config &config,
                            const sp<DeviceDescriptor>& srcDevice,
                            audio_stream_type_t stream, product_strategy_t strategy,
-                           VolumeSource volumeSource);
+                           VolumeSource volumeSource,
+                           bool isInternal);
 
     ~SourceClientDescriptor() override = default;
 
@@ -248,6 +249,7 @@
     void setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput, bool closeOutput = false);
     wp<HwAudioOutputDescriptor> hwOutput() const { return mHwOutput; }
     void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
+    bool isInternal() const override { return mIsInternal; }
 
     using ClientDescriptor::dump;
     void dump(String8 *dst, int spaces) const override;
@@ -268,34 +270,17 @@
      * behavior of AudioDeviceCallback.
      */
     bool mCloseOutput = false;
-};
-
-/**
- * @brief The InternalSourceClientDescriptor class
- * Specialized Client Descriptor for either a raw patch created from @see createAudioPatch API
- * or for internal audio patches managed by APM (e.g. phone call patches).
- * Whatever the bridge created (software or hardware), we need a client to track the activity
- * and manage volumes.
- * The Audio Patch requested sink is expressed as a preferred device which allows to route
- * the SwOutput. Then APM will performs checks on the UID (against UID of Audioserver) of the
- * requester to prevent rerouting SwOutput involved in raw patches.
- */
-class InternalSourceClientDescriptor: public SourceClientDescriptor
-{
-public:
-    InternalSourceClientDescriptor(
-            audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
-            const struct audio_port_config &config, const sp<DeviceDescriptor>& srcDevice,
-             const sp<DeviceDescriptor>& sinkDevice,
-            product_strategy_t strategy, VolumeSource volumeSource) :
-        SourceClientDescriptor(
-            portId, uid, attributes, config, srcDevice, AUDIO_STREAM_PATCH, strategy,
-            volumeSource)
-    {
-        setPreferredDeviceId(sinkDevice->getId());
-    }
-    bool isInternal() const override { return true; }
-    ~InternalSourceClientDescriptor() override = default;
+    /**
+     * True for specialized Client Descriptor for either a raw patch created from
+     * @see createAudioPatch API or for internal audio patches managed by APM
+     * (e.g. phone call patches).
+     * Whatever the bridge created (software or hardware), we need a client to track the activity
+     * and manage volumes.
+     * The Audio Patch requested sink is expressed as a preferred device which allows to route
+     * the SwOutput. Then APM will performs checks on the UID (against UID of Audioserver) of the
+     * requester to prevent rerouting SwOutput involved in raw patches.
+     */
+    bool mIsInternal = false;
 };
 
 class SourceClientCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 6c130fd..c502fc2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -93,6 +93,8 @@
 
     void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
 
+    void setPreferredConfig(const audio_config_base_t * preferredConfig);
+
     void dump(String8 *dst, int spaces, bool verbose = true) const;
 
 private:
@@ -107,6 +109,7 @@
     audio_format_t      mCurrentEncodedFormat;
     bool                mIsDynamic = false;
     std::string         mDeclaredAddress; // Original device address
+    std::optional<audio_config_base_t> mPreferredConfig;
 };
 
 class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index f3a9518..688772c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -70,10 +70,17 @@
         return mMixerBehaviors;
     }
 
+    enum CompatibilityScore{
+        NO_MATCH = 0,
+        PARTIAL_MATCH = 1,
+        EXACT_MATCH = 2
+    };
+
     /**
-     * @brief isCompatibleProfile: This method is used for input and direct output,
+     * @brief compatibilityScore: This method is used for input and direct output,
      * and is not used for other output.
-     * Checks if the IO profile is compatible with specified parameters.
+     * Return the compatibility score to measure how much the IO profile is compatible
+     * with specified parameters.
      * For input, flags is interpreted as audio_input_flags_t.
      * TODO: merge audio_output_flags_t and audio_input_flags_t.
      *
@@ -86,18 +93,18 @@
      * @param updatedChannelMask if non-NULL, it is assigned the actual channel mask
      * @param flags to be checked for compatibility
      * @param exactMatchRequiredForInputFlags true if exact match is required on flags
-     * @return true if the profile is compatible, false otherwise.
+     * @return how the IO profile is compatible with the given parameters.
      */
-    bool isCompatibleProfile(const DeviceVector &devices,
-                             uint32_t samplingRate,
-                             uint32_t *updatedSamplingRate,
-                             audio_format_t format,
-                             audio_format_t *updatedFormat,
-                             audio_channel_mask_t channelMask,
-                             audio_channel_mask_t *updatedChannelMask,
-                             // FIXME parameter type
-                             uint32_t flags,
-                             bool exactMatchRequiredForInputFlags = false) const;
+    CompatibilityScore getCompatibilityScore(const DeviceVector &devices,
+                                             uint32_t samplingRate,
+                                             uint32_t *updatedSamplingRate,
+                                             audio_format_t format,
+                                             audio_format_t *updatedFormat,
+                                             audio_channel_mask_t channelMask,
+                                             audio_channel_mask_t *updatedChannelMask,
+                                             // FIXME parameter type
+                                             uint32_t flags,
+                                             bool exactMatchRequiredForInputFlags = false) const;
 
     /**
      * @brief areAllDevicesSupported: Checks if the given devices are supported by the IO profile.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index d027564..6537a00 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -778,6 +778,19 @@
     }
 }
 
+void SwAudioOutputDescriptor::setDevices(const android::DeviceVector &devices) {
+    if ((mFlags & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_BIT_PERFECT) {
+        for (auto device : mDevices) {
+            device->setPreferredConfig(nullptr);
+        }
+        auto config = getConfig();
+        for (auto device : devices) {
+            device->setPreferredConfig(&config);
+        }
+    }
+    mDevices = devices;
+}
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index dc0f466..d1819fd 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -15,7 +15,7 @@
  */
 
 #define LOG_TAG "APM_AudioPolicyMix"
-// #define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
 
 #include <algorithm>
 #include <iterator>
@@ -28,6 +28,9 @@
 #include "PolicyAudioPort.h"
 #include "IOProfile.h"
 #include <AudioOutputDescriptor.h>
+#include <android_media_audiopolicy.h>
+
+namespace audio_flags = android::media::audiopolicy;
 
 namespace android {
 namespace {
@@ -190,6 +193,12 @@
                     mix.mDeviceType, mix.mDeviceAddress.c_str());
             return BAD_VALUE;
         }
+        if (audio_flags::audio_mix_ownership()) {
+            if (mix.mToken == registeredMix->mToken) {
+                ALOGE("registerMix(): same mix already registered - skipping");
+                return BAD_VALUE;
+            }
+        }
     }
     if (!areMixCriteriaConsistent(mix.mCriteria)) {
         ALOGE("registerMix(): Mix contains inconsistent criteria "
@@ -212,12 +221,21 @@
 {
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioPolicyMix>& registeredMix = itemAt(i);
-        if (mix.mDeviceType == registeredMix->mDeviceType
+        if (audio_flags::audio_mix_ownership()) {
+            if (mix.mToken == registeredMix->mToken) {
+                ALOGD("unregisterMix(): removing mix for dev=0x%x addr=%s",
+                      mix.mDeviceType, mix.mDeviceAddress.c_str());
+                removeAt(i);
+                return NO_ERROR;
+            }
+        } else {
+            if (mix.mDeviceType == registeredMix->mDeviceType
                 && mix.mDeviceAddress.compare(registeredMix->mDeviceAddress) == 0) {
-            ALOGD("unregisterMix(): removing mix for dev=0x%x addr=%s",
-                    mix.mDeviceType, mix.mDeviceAddress.c_str());
-            removeAt(i);
-            return NO_ERROR;
+                ALOGD("unregisterMix(): removing mix for dev=0x%x addr=%s",
+                      mix.mDeviceType, mix.mDeviceAddress.c_str());
+                removeAt(i);
+                return NO_ERROR;
+            }
         }
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 8b6866e..2aee501 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -96,12 +96,12 @@
 SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
          audio_attributes_t attributes, const struct audio_port_config &config,
          const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream,
-         product_strategy_t strategy, VolumeSource volumeSource) :
+         product_strategy_t strategy, VolumeSource volumeSource, bool isInternal) :
     TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
         {config.sample_rate, config.channel_mask, config.format}, AUDIO_PORT_HANDLE_NONE,
         stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
         {} /* Sources do not support secondary outputs*/, nullptr),
-    mSrcDevice(srcDevice)
+    mSrcDevice(srcDevice), mIsInternal(isInternal)
 {
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index fe25693..9f7b8fc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -132,6 +132,20 @@
 {
     DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
     dstConfig->ext.device.hw_module = getModuleHandle();
+    if (mPreferredConfig.has_value()) {
+        if (mPreferredConfig->format != AUDIO_FORMAT_DEFAULT) {
+            dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
+            dstConfig->format = mPreferredConfig->format;
+        }
+        if (mPreferredConfig->sample_rate != 0) {
+            dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
+            dstConfig->sample_rate = mPreferredConfig->sample_rate;
+        }
+        if (mPreferredConfig->channel_mask != AUDIO_CHANNEL_NONE) {
+            dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
+            dstConfig->channel_mask = mPreferredConfig->channel_mask;
+        }
+    }
 }
 
 void DeviceDescriptor::toAudioPort(struct audio_port *port) const
@@ -183,6 +197,14 @@
     }
 }
 
+void DeviceDescriptor::setPreferredConfig(const audio_config_base_t* preferredConfig) {
+    if (preferredConfig == nullptr) {
+        mPreferredConfig.reset();
+    } else {
+        mPreferredConfig = *preferredConfig;
+    }
+}
+
 void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
 {
     String8 extraInfo;
@@ -193,6 +215,13 @@
     std::string descBaseDumpStr;
     DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.c_str(), verbose);
     dst->append(descBaseDumpStr.c_str());
+
+    if (mPreferredConfig.has_value()) {
+        dst->append(base::StringPrintf(
+                "%*sPreferred Config: format=%#x, channelMask=%#x, sampleRate=%u\n",
+                spaces, "", mPreferredConfig.value().format, mPreferredConfig.value().channel_mask,
+                mPreferredConfig.value().sample_rate).c_str());
+    }
 }
 
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index c7d2e6b..d9fbd89 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -33,17 +33,17 @@
     }
 }
 
-bool IOProfile::isCompatibleProfile(const DeviceVector &devices,
-                                    uint32_t samplingRate,
-                                    uint32_t *updatedSamplingRate,
-                                    audio_format_t format,
-                                    audio_format_t *updatedFormat,
-                                    audio_channel_mask_t channelMask,
-                                    audio_channel_mask_t *updatedChannelMask,
-                                    // FIXME type punning here
-                                    uint32_t flags,
-                                    bool exactMatchRequiredForInputFlags) const
-{
+IOProfile::CompatibilityScore IOProfile::getCompatibilityScore(
+        const android::DeviceVector &devices,
+        uint32_t samplingRate,
+        uint32_t *updatedSamplingRate,
+        audio_format_t format,
+        audio_format_t *updatedFormat,
+        audio_channel_mask_t channelMask,
+        audio_channel_mask_t *updatedChannelMask,
+        // FIXME type punning here
+        uint32_t flags,
+        bool exactMatchRequiredForInputFlags) const {
     const bool isPlaybackThread =
             getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
     const bool isRecordThread =
@@ -51,13 +51,13 @@
     ALOG_ASSERT(isPlaybackThread != isRecordThread);
     if (!areAllDevicesSupported(devices) ||
             !isCompatibleProfileForFlags(flags, exactMatchRequiredForInputFlags)) {
-        return false;
+        return NO_MATCH;
     }
 
     if (!audio_is_valid_format(format) ||
             (isPlaybackThread && (samplingRate == 0 || !audio_is_output_channel(channelMask))) ||
             (isRecordThread && (!audio_is_input_channel(channelMask)))) {
-         return false;
+         return NO_MATCH;
     }
 
     audio_format_t myUpdatedFormat = format;
@@ -69,32 +69,40 @@
         .channel_mask = channelMask,
         .format = format,
     };
+    auto result = NO_MATCH;
     if (isRecordThread)
     {
         if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             if (checkExactAudioProfile(&config) != NO_ERROR) {
-                return false;
+                return result;
             }
-        } else if (checkExactAudioProfile(&config) != NO_ERROR && checkCompatibleAudioProfile(
-                myUpdatedSamplingRate, myUpdatedChannelMask, myUpdatedFormat) != NO_ERROR) {
-            return false;
+            result = EXACT_MATCH;
+        } else if (checkExactAudioProfile(&config) == NO_ERROR) {
+            result = EXACT_MATCH;
+        } else if (checkCompatibleAudioProfile(
+                myUpdatedSamplingRate, myUpdatedChannelMask, myUpdatedFormat) == NO_ERROR) {
+            result = PARTIAL_MATCH;
+        } else {
+            return result;
         }
     } else {
-        if (checkExactAudioProfile(&config) != NO_ERROR) {
-            return false;
+        if (checkExactAudioProfile(&config) == NO_ERROR) {
+            result = EXACT_MATCH;
+        } else {
+            return result;
         }
     }
 
-    if (updatedSamplingRate != NULL) {
+    if (updatedSamplingRate != nullptr) {
         *updatedSamplingRate = myUpdatedSamplingRate;
     }
-    if (updatedFormat != NULL) {
+    if (updatedFormat != nullptr) {
         *updatedFormat = myUpdatedFormat;
     }
-    if (updatedChannelMask != NULL) {
+    if (updatedChannelMask != nullptr) {
         *updatedChannelMask = myUpdatedChannelMask;
     }
-    return true;
+    return result;
 }
 
 bool IOProfile::areAllDevicesSupported(const DeviceVector &devices) const {
diff --git a/services/audiopolicy/config/Android.bp b/services/audiopolicy/config/Android.bp
index 86600f4..7d529df 100644
--- a/services/audiopolicy/config/Android.bp
+++ b/services/audiopolicy/config/Android.bp
@@ -18,6 +18,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -31,41 +32,49 @@
     vendor: true,
     src: ":a2dp_in_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "a2dp_audio_policy_configuration.xml",
     vendor: true,
     src: ":a2dp_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "audio_policy_configuration.xml",
     vendor: true,
     src: ":audio_policy_configuration_generic",
 }
+
 prebuilt_etc {
     name: "r_submix_audio_policy_configuration.xml",
     vendor: true,
     src: ":r_submix_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "audio_policy_volumes.xml",
     vendor: true,
     src: ":audio_policy_volumes",
 }
+
 prebuilt_etc {
     name: "default_volume_tables.xml",
     vendor: true,
     src: ":default_volume_tables",
 }
+
 prebuilt_etc {
     name: "surround_sound_configuration_5_0.xml",
     vendor: true,
     src: ":surround_sound_configuration_5_0",
 }
+
 prebuilt_etc {
     name: "usb_audio_policy_configuration.xml",
     vendor: true,
     src: ":usb_audio_policy_configuration",
 }
+
 prebuilt_etc {
     name: "primary_audio_policy_configuration.xml",
     src: ":primary_audio_policy_configuration",
@@ -76,50 +85,62 @@
     name: "a2dp_in_audio_policy_configuration",
     srcs: ["a2dp_in_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "a2dp_audio_policy_configuration",
     srcs: ["a2dp_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "primary_audio_policy_configuration",
     srcs: ["primary_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "surround_sound_configuration_5_0",
     srcs: ["surround_sound_configuration_5_0.xml"],
 }
+
 filegroup {
     name: "default_volume_tables",
     srcs: ["default_volume_tables.xml"],
 }
+
 filegroup {
     name: "audio_policy_volumes",
     srcs: ["audio_policy_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_configuration_generic",
     srcs: ["audio_policy_configuration_generic.xml"],
 }
+
 filegroup {
     name: "audio_policy_configuration_generic_configurable",
     srcs: ["audio_policy_configuration_generic_configurable.xml"],
 }
+
 filegroup {
     name: "usb_audio_policy_configuration",
     srcs: ["usb_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "r_submix_audio_policy_configuration",
     srcs: ["r_submix_audio_policy_configuration.xml"],
 }
+
 filegroup {
     name: "bluetooth_audio_policy_configuration_7_0",
     srcs: ["bluetooth_audio_policy_configuration_7_0.xml"],
 }
+
 filegroup {
     name: "bluetooth_with_le_audio_policy_configuration_7_0",
     srcs: ["bluetooth_with_le_audio_policy_configuration_7_0.xml"],
 }
+
 filegroup {
     name: "hearing_aid_audio_policy_configuration_7_0",
     srcs: ["hearing_aid_audio_policy_configuration_7_0.xml"],
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
index 0034a04..a93c816 100644
--- a/services/audiopolicy/engine/common/Android.bp
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/config/Android.bp b/services/audiopolicy/engine/config/Android.bp
index 12597de..0864e6a 100644
--- a/services/audiopolicy/engine/config/Android.bp
+++ b/services/audiopolicy/engine/config/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/config/tests/Android.bp b/services/audiopolicy/engine/config/tests/Android.bp
index 5d1aa16..8c7b7db 100644
--- a/services/audiopolicy/engine/config/tests/Android.bp
+++ b/services/audiopolicy/engine/config/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/config/tests/resources/Android.bp b/services/audiopolicy/engine/config/tests/resources/Android.bp
index 9cee978..99d62a3 100644
--- a/services/audiopolicy/engine/config/tests/resources/Android.bp
+++ b/services/audiopolicy/engine/config/tests/resources/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engine/interface/Android.bp b/services/audiopolicy/engine/interface/Android.bp
index 5dd5adb..b1f7666 100644
--- a/services/audiopolicy/engine/interface/Android.bp
+++ b/services/audiopolicy/engine/interface/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index eb2e2f4..d59ab5a 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -40,7 +41,7 @@
         "libaudiopolicyengineconfigurable_pfwwrapper",
 
     ],
-  shared_libs: [
+    shared_libs: [
         "libaudio_aidl_conversion_common_cpp",
         "libaudiofoundation",
         "libaudiopolicycomponents",
diff --git a/services/audiopolicy/engineconfigurable/config/Android.bp b/services/audiopolicy/engineconfigurable/config/Android.bp
index b3d1f97..8dd13e8 100644
--- a/services/audiopolicy/engineconfigurable/config/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/Android.bp
@@ -17,6 +17,7 @@
 // Root soong_namespace for common components
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -30,10 +31,12 @@
     vendor: true,
     src: ":audio_policy_engine_criteria",
 }
+
 filegroup {
     name: "audio_policy_engine_criterion_types_template",
     srcs: ["example/common/audio_policy_engine_criterion_types.xml.in"],
 }
+
 filegroup {
     name: "audio_policy_engine_criteria",
     srcs: ["example/common/audio_policy_engine_criteria.xml"],
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp b/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
index e46b60f..fb1a71c 100644
--- a/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
@@ -23,6 +23,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,16 +43,19 @@
         ":audio_policy_engine_volumes.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_volumes.xml",
     vendor: true,
     src: ":audio_policy_engine_volumes",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_criterion_types.xml",
     vendor: true,
@@ -69,6 +73,7 @@
         ":audio_policy_configuration_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_configuration_files",
     srcs: [
@@ -79,18 +84,22 @@
         ":primary_audio_policy_configuration",
     ],
 }
+
 filegroup {
-    name : "audio_policy_configuration_top_file",
+    name: "audio_policy_configuration_top_file",
     srcs: [":audio_policy_configuration_generic"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration",
     srcs: ["audio_policy_engine_configuration.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_volumes",
     srcs: ["audio_policy_engine_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp b/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
index ad6eeb1..b9abb54 100644
--- a/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
@@ -24,6 +24,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -43,11 +44,13 @@
         ":audio_policy_engine_volumes.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_criterion_types.xml",
     vendor: true,
@@ -65,6 +68,7 @@
         ":audio_policy_configuration_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_configuration_files",
     srcs: [
@@ -75,10 +79,12 @@
         ":primary_audio_policy_configuration",
     ],
 }
+
 filegroup {
-    name : "audio_policy_configuration_top_file",
+    name: "audio_policy_configuration_top_file",
     srcs: [":audio_policy_configuration_generic"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp b/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
index 773a99a..67a6128 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
@@ -23,6 +23,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,21 +43,25 @@
         ":audio_policy_engine_volumes.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_stream_volumes.xml",
     vendor: true,
     src: ":audio_policy_engine_stream_volumes",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_default_stream_volumes.xml",
     vendor: true,
     src: ":audio_policy_engine_default_stream_volumes",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_criterion_types.xml",
     vendor: true,
@@ -74,6 +79,7 @@
         ":audio_policy_configuration_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_configuration_files",
     srcs: [
@@ -84,22 +90,27 @@
         ":primary_audio_policy_configuration",
     ],
 }
+
 filegroup {
-    name : "audio_policy_configuration_top_file",
+    name: "audio_policy_configuration_top_file",
     srcs: [":audio_policy_configuration_generic"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration",
     srcs: ["audio_policy_engine_configuration.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_stream_volumes",
     srcs: ["audio_policy_engine_stream_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_default_stream_volumes",
     srcs: ["audio_policy_engine_default_stream_volumes.xml"],
 }
+
 filegroup {
     name: "audio_policy_engine_configuration_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
index ee62d5e..7fe111f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
@@ -17,6 +17,7 @@
 // Root soong_namespace for common components
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -31,18 +32,21 @@
     src: ":PolicyClass",
     sub_dir: "parameter-framework/Structure/Policy",
 }
+
 prebuilt_etc {
     name: "PolicySubsystem.xml",
     vendor: true,
     src: ":PolicySubsystem",
     sub_dir: "parameter-framework/Structure/Policy",
 }
+
 prebuilt_etc {
     name: "PolicySubsystem-CommonTypes.xml",
     vendor: true,
     src: ":buildcommontypesstructure_gen",
     sub_dir: "parameter-framework/Structure/Policy",
 }
+
 genrule {
     name: "buildcommontypesstructure_gen",
     defaults: ["buildcommontypesstructurerule"],
@@ -52,34 +56,42 @@
     name: "product_strategies_structure_template",
     srcs: ["examples/common/Structure/ProductStrategies.xml.in"],
 }
+
 filegroup {
     name: "PolicySubsystem",
     srcs: ["examples/common/Structure/PolicySubsystem.xml"],
 }
+
 filegroup {
     name: "PolicySubsystem-no-strategy",
     srcs: ["examples/common/Structure/PolicySubsystem-no-strategy.xml"],
 }
+
 filegroup {
     name: "common_types_structure_template",
     srcs: ["examples/common/Structure/PolicySubsystem-CommonTypes.xml.in"],
 }
+
 filegroup {
     name: "PolicyClass",
     srcs: ["examples/common/Structure/PolicyClass.xml"],
 }
+
 filegroup {
     name: "volumes.pfw",
     srcs: ["examples/Settings/volumes.pfw"],
 }
+
 filegroup {
     name: "device_for_input_source.pfw",
     srcs: ["examples/Settings/device_for_input_source.pfw"],
 }
+
 filegroup {
     name: "ParameterFrameworkConfigurationPolicy.userdebug.xml",
     srcs: ["examples/ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "ParameterFrameworkConfigurationPolicy.user.xml",
     srcs: ["examples/ParameterFrameworkConfigurationPolicy.user.xml"],
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
index 7d2d293..38451f2 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
@@ -27,6 +27,7 @@
 // Generate Audio Policy Parameter Framework Product Strategies Structure file from template
 //
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,6 +43,7 @@
     sub_dir: "parameter-framework/Structure/Policy",
     required: ["libpolicy-subsystem"],
 }
+
 genrule {
     name: "buildstrategiesstructure_gen",
     defaults: ["buildstrategiesstructurerule"],
@@ -67,6 +69,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -78,6 +81,7 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -86,11 +90,13 @@
         "Settings/device_for_product_strategies.pfw",
     ],
 }
+
 // This is for Settings generation, must use socket port, so userdebug version is required
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
index f825e5f..eae6ae2 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
@@ -28,6 +28,7 @@
 // Generate Audio Policy Parameter Framework Product Strategies Structure file from template
 //
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -43,6 +44,7 @@
     sub_dir: "parameter-framework/Structure/Policy",
     required: ["libpolicy-subsystem"],
 }
+
 genrule {
     name: "buildstrategiesstructure_gen",
     defaults: ["buildstrategiesstructurerule"],
@@ -68,6 +70,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -79,6 +82,7 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -87,11 +91,13 @@
         "Settings/device_for_product_strategies.pfw",
     ],
 }
+
 // This is for Settings generation, must use socket port, so userdebug version is required
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
index 4a83cbc..4e8654b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
@@ -27,6 +27,7 @@
 // Generate Audio Policy Parameter Framework Product Strategies Structure file from template
 //
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -42,6 +43,7 @@
     sub_dir: "parameter-framework/Structure/Policy",
     required: ["libpolicy-subsystem"],
 }
+
 genrule {
     name: "buildstrategiesstructure_gen",
     defaults: ["buildstrategiesstructurerule"],
@@ -67,6 +69,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -78,6 +81,7 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -95,11 +99,13 @@
         "Settings/device_for_product_strategy_patch.pfw",
     ],
 }
+
 // This is for Settings generation, must use socket port, so userdebug version is required
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
index 89ab892..e279a8f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
@@ -24,6 +24,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -57,10 +58,12 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
@@ -69,6 +72,7 @@
         ":buildcommontypesstructure_gen",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -76,6 +80,7 @@
         ":volumes.pfw",
     ],
 }
+
 prebuilt_etc {
     name: "PolicySubsystem.xml",
     vendor: true,
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
index 4880547..47b8b54 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
@@ -24,6 +24,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -45,6 +46,7 @@
         "PolicySubsystem-CommonTypes.xml",
     ],
 }
+
 genrule {
     name: "domaingeneratorpolicyrule_gen",
     enabled: false, // TODO: This module fails to build
@@ -56,10 +58,12 @@
         ":edd_files",
     ],
 }
+
 filegroup {
     name: "audio_policy_pfw_toplevel",
     srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
 }
+
 filegroup {
     name: "audio_policy_pfw_structure_files",
     srcs: [
@@ -68,6 +72,7 @@
         ":buildcommontypesstructure_gen",
     ],
 }
+
 filegroup {
     name: "edd_files",
     srcs: [
@@ -76,6 +81,7 @@
         ":device_for_input_source.pfw",
     ],
 }
+
 prebuilt_etc {
     name: "PolicySubsystem.xml",
     vendor: true,
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp
index f7159c5..aa2163e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -40,6 +41,6 @@
         "liblog",
         "libutils",
         "libmedia_helper",
-        "libparameter"
+        "libparameter",
     ],
 }
diff --git a/services/audiopolicy/engineconfigurable/tools/Android.bp b/services/audiopolicy/engineconfigurable/tools/Android.bp
index 3aec064..2f77372 100644
--- a/services/audiopolicy/engineconfigurable/tools/Android.bp
+++ b/services/audiopolicy/engineconfigurable/tools/Android.bp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -36,13 +37,13 @@
     name: "buildpolicycriteriontypesrule",
     tools: ["buildPolicyCriterionTypes"],
     cmd: "cp $(locations :audio_policy_configuration_files) $(genDir)/. && " +
-         "cp $(location :audio_policy_configuration_top_file) $(genDir)/audio_policy_configuration.xml && " +
-         "$(location buildPolicyCriterionTypes) " +
-         " --androidaudiobaseheader $(location :libaudio_system_audio_base) " +
-         " --androidaudiocommonbaseheader $(location :libaudio_system_audio_common_base) " +
-         "--audiopolicyconfigurationfile $(genDir)/audio_policy_configuration.xml " +
-         "--criteriontypes $(location :audio_policy_engine_criterion_types_template) " +
-         "--outputfile $(out)",
+        "cp $(location :audio_policy_configuration_top_file) $(genDir)/audio_policy_configuration.xml && " +
+        "$(location buildPolicyCriterionTypes) " +
+        " --androidaudiobaseheader $(location :libaudio_system_audio_base) " +
+        " --androidaudiocommonbaseheader $(location :libaudio_system_audio_common_base) " +
+        "--audiopolicyconfigurationfile $(genDir)/audio_policy_configuration.xml " +
+        "--criteriontypes $(location :audio_policy_engine_criterion_types_template) " +
+        "--outputfile $(out)",
     srcs: [
         // The commented inputs must be provided to use this genrule_defaults
         // @todo uncomment if 1428659 is merged":android_audio_base_header_file",
@@ -81,17 +82,17 @@
         "domainGeneratorConnector",
     ],
     cmd: "mkdir -p $(genDir)/Structure/Policy && " +
-         "cp $(locations :audio_policy_pfw_structure_files) $(genDir)/Structure/Policy && " +
-         "cp $(location :audio_policy_pfw_toplevel) $(genDir)/top_level && " +
-         "$(location domainGeneratorPolicy) " +
-         "--validate " +
-         "--domain-generator-tool $(location domainGeneratorConnector) " +
-         "--toplevel-config $(genDir)/top_level " +
-         "--criteria $(location :audio_policy_engine_criteria) " +
-         "--criteriontypes $(location :audio_policy_engine_criterion_types) " +
-         "--add-edds $(locations :edd_files) " +
-         "--schemas-dir external/parameter-framework/upstream/schemas " +
-         " > $(out)",
+        "cp $(locations :audio_policy_pfw_structure_files) $(genDir)/Structure/Policy && " +
+        "cp $(location :audio_policy_pfw_toplevel) $(genDir)/top_level && " +
+        "$(location domainGeneratorPolicy) " +
+        "--validate " +
+        "--domain-generator-tool $(location domainGeneratorConnector) " +
+        "--toplevel-config $(genDir)/top_level " +
+        "--criteria $(location :audio_policy_engine_criteria) " +
+        "--criteriontypes $(location :audio_policy_engine_criterion_types) " +
+        "--add-edds $(locations :edd_files) " +
+        "--schemas-dir external/parameter-framework/upstream/schemas " +
+        " > $(out)",
     srcs: [
         // The commented inputs must be provided to use this genrule_defaults
         // ":audio_policy_pfw_toplevel",
@@ -118,11 +119,11 @@
 genrule_defaults {
     name: "buildstrategiesstructurerule",
     tools: ["buildStrategiesStructureFile"],
-    cmd: "cp $(locations :audio_policy_engine_configuration_files) $(genDir) && ls -l $(genDir) &&"+
-         "$(location buildStrategiesStructureFile) " +
-         "--audiopolicyengineconfigurationfile $(genDir)/audio_policy_engine_configuration.xml "+
-         "--productstrategiesstructurefile $(location :product_strategies_structure_template) " +
-         "--outputfile $(out)",
+    cmd: "cp $(locations :audio_policy_engine_configuration_files) $(genDir) && ls -l $(genDir) &&" +
+        "$(location buildStrategiesStructureFile) " +
+        "--audiopolicyengineconfigurationfile $(genDir)/audio_policy_engine_configuration.xml " +
+        "--productstrategiesstructurefile $(location :product_strategies_structure_template) " +
+        "--outputfile $(out)",
     srcs: [
         // The commented inputs must be provided to use this genrule_defaults
         // ":audio_policy_engine_configuration_files",
@@ -146,9 +147,9 @@
     name: "buildcommontypesstructurerule",
     tools: ["buildCommonTypesStructureFile"],
     cmd: "$(location buildCommonTypesStructureFile) " +
-         "--androidaudiobaseheader $(location :libaudio_system_audio_base) " +
-         "--commontypesstructure $(location :common_types_structure_template) " +
-         "--outputfile $(out)",
+        "--androidaudiobaseheader $(location :libaudio_system_audio_base) " +
+        "--commontypesstructure $(location :common_types_structure_template) " +
+        "--outputfile $(out)",
     srcs: [
         ":common_types_structure_template",
         ":libaudio_system_audio_base",
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.bp b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
index 0ef0b82..a897880 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.bp
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7d4ccab..98adff0 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/enginedefault/config/example/Android.bp b/services/audiopolicy/enginedefault/config/example/Android.bp
index 59a704b..f305c39 100644
--- a/services/audiopolicy/enginedefault/config/example/Android.bp
+++ b/services/audiopolicy/enginedefault/config/example/Android.bp
@@ -20,6 +20,7 @@
 }
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -38,16 +39,19 @@
         ":audio_policy_engine_product_strategies.xml",
     ],
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_product_strategies.xml",
     vendor: true,
     src: "phone/audio_policy_engine_product_strategies.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_stream_volumes.xml",
     vendor: true,
     src: "phone/audio_policy_engine_stream_volumes.xml",
 }
+
 prebuilt_etc {
     name: "audio_policy_engine_default_stream_volumes.xml",
     vendor: true,
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
index fd240e3..fca02e4 100644
--- a/services/audiopolicy/fuzzer/Android.bp
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/fuzzer/aidl/Android.bp b/services/audiopolicy/fuzzer/aidl/Android.bp
index 38a2cde..8b37d36 100644
--- a/services/audiopolicy/fuzzer/aidl/Android.bp
+++ b/services/audiopolicy/fuzzer/aidl/Android.bp
@@ -16,6 +16,10 @@
  *
  ******************************************************************************/
 
+package {
+    default_team: "trendy_team_android_media_audio_framework",
+}
+
 cc_defaults {
     name: "audiopolicy_aidl_fuzzer_defaults",
     shared_libs: [
diff --git a/services/audiopolicy/fuzzer/resources/Android.bp b/services/audiopolicy/fuzzer/resources/Android.bp
index 22ee256..2a2b83b 100644
--- a/services/audiopolicy/fuzzer/resources/Android.bp
+++ b/services/audiopolicy/fuzzer/resources/Android.bp
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/managerdefault/Android.bp b/services/audiopolicy/managerdefault/Android.bp
index a1785da..2f46d48 100644
--- a/services/audiopolicy/managerdefault/Android.bp
+++ b/services/audiopolicy/managerdefault/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -22,6 +23,7 @@
     export_include_dirs: ["."],
 
     shared_libs: [
+        "com.android.media.audio-aconfig-cc",
         "libaudiofoundation",
         "libaudiopolicycomponents",
         "libcutils",
@@ -42,6 +44,9 @@
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "audioclient-types-aidl-cpp",
+        // Flag support
+        "android.media.audiopolicy-aconfig-cc",
+        "com.android.media.audioserver-aconfig-cc",
     ],
 
     header_libs: [
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 54a35b6..3bebb11 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -41,6 +41,9 @@
 
 #include <Serializer.h>
 #include <android/media/audio/common/AudioPort.h>
+#include <com_android_media_audio.h>
+#include <android_media_audiopolicy.h>
+#include <com_android_media_audioserver.h>
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
@@ -56,6 +59,9 @@
 
 namespace android {
 
+
+namespace audio_flags = android::media::audiopolicy;
+
 using android::media::audio::common::AudioDevice;
 using android::media::audio::common::AudioDeviceAddress;
 using android::media::audio::common::AudioPortDeviceExt;
@@ -781,7 +787,11 @@
         .ext.device.type = AUDIO_DEVICE_IN_TELEPHONY_RX, .ext.device.address = ""
     };
     const auto aa = mEngine->getAttributesForStreamType(AUDIO_STREAM_VOICE_CALL);
-    mCallRxSourceClient = startAudioSourceInternal(&source, &aa, 0/*uid*/);
+
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    status_t status = startAudioSource(&source, &aa, &portId, 0 /*uid*/, true /*internal*/);
+    ALOGE_IF(status != OK, "%s: failed to start audio source (%d)", __func__, status);
+    mCallRxSourceClient = mAudioSources.valueFor(portId);
     ALOGE_IF(mCallRxSourceClient == nullptr,
              "%s failed to start Telephony Rx AudioSource", __func__);
 }
@@ -814,9 +824,11 @@
 
     struct audio_port_config source = {};
     srcDevice->toAudioPortConfig(&source);
-    mCallTxSourceClient = new InternalSourceClientDescriptor(
-                callTxSourceClientPortId, mUidCached, aa, source, srcDevice, sinkDevice,
-                mCommunnicationStrategy, toVolumeSource(aa));
+    mCallTxSourceClient = new SourceClientDescriptor(
+                callTxSourceClientPortId, mUidCached, aa, source, srcDevice, AUDIO_STREAM_PATCH,
+                mCommunnicationStrategy, toVolumeSource(aa), true);
+    mCallTxSourceClient->setPreferredDeviceId(sinkDevice->getId());
+
     audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
     status_t status = connectAudioSourceToSink(
                 mCallTxSourceClient, sinkDevice, patchBuilder.patch(), patchHandle, mUidCached,
@@ -1043,11 +1055,11 @@
     sp<IOProfile> profile;
     for (const auto& hwModule : hwModules) {
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
-             if (!curProfile->isCompatibleProfile(devices,
+             if (curProfile->getCompatibilityScore(devices,
                      samplingRate, NULL /*updatedSamplingRate*/,
                      format, NULL /*updatedFormat*/,
                      channelMask, NULL /*updatedChannelMask*/,
-                     flags)) {
+                     flags) == IOProfile::NO_MATCH) {
                  continue;
              }
              // reject profiles not corresponding to a device currently available
@@ -1509,11 +1521,30 @@
     }
 
     if (!profile->canOpenNewIo()) {
+        if (!com::android::media::audioserver::direct_track_reprioritization()) {
+            return NAME_NOT_FOUND;
+        } else if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) != 0) {
+            // MMAP gracefully handles lack of an exclusive track resource by mixing
+            // above the audio framework. For AAudio to know that the limit is reached,
+            // return an error.
+            return NAME_NOT_FOUND;
+        } else {
+            // Close outputs on this profile, if available, to free resources for this request
+            for (int i = 0; i < mOutputs.size() && !profile->canOpenNewIo(); i++) {
+                const auto desc = mOutputs.valueAt(i);
+                if (desc->mProfile == profile) {
+                    closeOutput(desc->mIoHandle);
+                }
+            }
+        }
+    }
+
+    // Unable to close streams to find free resources for this request
+    if (!profile->canOpenNewIo()) {
         return NAME_NOT_FOUND;
     }
 
-    sp<SwAudioOutputDescriptor> outputDesc =
-            new SwAudioOutputDescriptor(profile, mpClientInterface);
+    auto outputDesc = sp<SwAudioOutputDescriptor>::make(profile, mpClientInterface);
 
     // An MSD patch may be using the only output stream that can service this request. Release
     // all MSD patches to prioritize this request over any active output on MSD.
@@ -1606,9 +1637,13 @@
         *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_ULTRASOUND);
     }
 
+    // Use the spatializer output if the content can be spatialized, no preferred mixer
+    // was specified and offload or direct playback is not explicitly requested.
     *isSpatialized = false;
     if (mSpatializerOutput != nullptr
-            && canBeSpatializedInt(attr, config, devices.toTypeAddrVector())) {
+            && canBeSpatializedInt(attr, config, devices.toTypeAddrVector())
+            && prefMixerConfigInfo == nullptr
+            && ((*flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0)) {
         *isSpatialized = true;
         return mSpatializerOutput->mIoHandle;
     }
@@ -3643,6 +3678,7 @@
     status_t res = NO_ERROR;
     bool checkOutputs = false;
     sp<HwModule> rSubmixModule;
+    Vector<AudioMix> registeredMixes;
     // examine each mix's route type
     for (size_t i = 0; i < mixes.size(); i++) {
         AudioMix mix = mixes[i];
@@ -3766,11 +3802,19 @@
                 break;
             } else {
                 checkOutputs = true;
+                registeredMixes.add(mix);
             }
         }
     }
     if (res != NO_ERROR) {
-        unregisterPolicyMixes(mixes);
+        if (audio_flags::audio_mix_ownership()) {
+            // Only unregister mixes that were actually registered to not accidentally unregister
+            // mixes that already existed previously.
+            unregisterPolicyMixes(registeredMixes);
+            registeredMixes.clear();
+        } else {
+            unregisterPolicyMixes(mixes);
+        }
     } else if (checkOutputs) {
         checkForDeviceAndOutputChanges();
         updateCallAndOutputRouting();
@@ -3781,6 +3825,7 @@
 status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
 {
     ALOGV("unregisterPolicyMixes() num mixes %zu", mixes.size());
+    status_t endResult = NO_ERROR;
     status_t res = NO_ERROR;
     bool checkOutputs = false;
     sp<HwModule> rSubmixModule;
@@ -3793,6 +3838,7 @@
                         AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
                 if (rSubmixModule == 0) {
                     res = INVALID_OPERATION;
+                    endResult = INVALID_OPERATION;
                     continue;
                 }
             }
@@ -3801,6 +3847,7 @@
 
             if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
                 res = INVALID_OPERATION;
+                endResult = INVALID_OPERATION;
                 continue;
             }
 
@@ -3813,6 +3860,7 @@
                     if (res != OK) {
                         ALOGE("Error making RemoteSubmix device unavailable for mix "
                               "with type %d, address %s", device, address.c_str());
+                        endResult = INVALID_OPERATION;
                     }
                 }
             }
@@ -3822,19 +3870,47 @@
         } else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
             if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
                 res = INVALID_OPERATION;
+                endResult = INVALID_OPERATION;
                 continue;
             } else {
                 checkOutputs = true;
             }
         }
     }
-    if (res == NO_ERROR && checkOutputs) {
-        checkForDeviceAndOutputChanges();
-        updateCallAndOutputRouting();
+    if (audio_flags::audio_mix_ownership()) {
+        res = endResult;
+        if (res == NO_ERROR && checkOutputs) {
+            checkForDeviceAndOutputChanges();
+            updateCallAndOutputRouting();
+        }
+    } else {
+        if (res == NO_ERROR && checkOutputs) {
+            checkForDeviceAndOutputChanges();
+            updateCallAndOutputRouting();
+        }
     }
     return res;
 }
 
+status_t AudioPolicyManager::getRegisteredPolicyMixes(std::vector<AudioMix>& _aidl_return) {
+    if (!audio_flags::audio_mix_test_api()) {
+        return INVALID_OPERATION;
+    }
+
+    _aidl_return.clear();
+    _aidl_return.reserve(mPolicyMixes.size());
+    for (const auto &policyMix: mPolicyMixes) {
+        _aidl_return.emplace_back(policyMix->mCriteria, policyMix->mMixType,
+                             policyMix->mFormat, policyMix->mRouteFlags, policyMix->mDeviceAddress,
+                             policyMix->mCbFlags);
+        _aidl_return.back().mDeviceType = policyMix->mDeviceType;
+        _aidl_return.back().mToken = policyMix->mToken;
+    }
+
+    ALOGVV("%s() returning %zu registered mixes", __func__, _aidl_return->size());
+    return OK;
+}
+
 status_t AudioPolicyManager::updatePolicyMix(
             const AudioMix& mix,
             const std::vector<AudioMixMatchCriterion>& updatedCriteria) {
@@ -4456,11 +4532,11 @@
             outputDevices = getMsdAudioOutDevices();
         }
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
-            if (!curProfile->isCompatibleProfile(outputDevices,
+            if (curProfile->getCompatibilityScore(outputDevices,
                     config->sample_rate, nullptr /*updatedSamplingRate*/,
                     config->format, nullptr /*updatedFormat*/,
                     config->channel_mask, nullptr /*updatedChannelMask*/,
-                    flags)) {
+                    flags) == IOProfile::NO_MATCH) {
                 continue;
             }
             // reject profiles not corresponding to a device currently available
@@ -4566,15 +4642,17 @@
     for (const auto& hwModule : mHwModules) {
         for (const auto& curProfile : hwModule->getOutputProfiles()) {
             if (curProfile->hasDynamicAudioProfile()
-                    && curProfile->isCompatibleProfile(devices,
-                                                       mixerAttributes->config.sample_rate,
-                                                       nullptr /*updatedSamplingRate*/,
-                                                       mixerAttributes->config.format,
-                                                       nullptr /*updatedFormat*/,
-                                                       mixerAttributes->config.channel_mask,
-                                                       nullptr /*updatedChannelMask*/,
-                                                       flags,
-                                                       false /*exactMatchRequiredForInputFlags*/)) {
+                    && curProfile->getCompatibilityScore(
+                            devices,
+                            mixerAttributes->config.sample_rate,
+                            nullptr /*updatedSamplingRate*/,
+                            mixerAttributes->config.format,
+                            nullptr /*updatedFormat*/,
+                            mixerAttributes->config.channel_mask,
+                            nullptr /*updatedChannelMask*/,
+                            flags,
+                            false /*exactMatchRequiredForInputFlags*/)
+                            != IOProfile::NO_MATCH) {
                 profile = curProfile;
                 break;
             }
@@ -4855,9 +4933,11 @@
     audio_attributes_t attributes = attributes_initializer(AUDIO_USAGE_MEDIA);
     const struct audio_port_config *source = &patch->sources[0];
     sp<SourceClientDescriptor> sourceDesc =
-            new InternalSourceClientDescriptor(
-                portId, uid, attributes, *source, srcDevice, sinkDevice,
-                mEngine->getProductStrategyForAttributes(attributes), toVolumeSource(attributes));
+            new SourceClientDescriptor(
+                portId, uid, attributes, *source, srcDevice, AUDIO_STREAM_PATCH,
+                mEngine->getProductStrategyForAttributes(attributes), toVolumeSource(attributes),
+                true);
+    sourceDesc->setPreferredDeviceId(sinkDevice->getId());
 
     status_t status =
             connectAudioSourceToSink(sourceDesc, sinkDevice, patch, *handle, uid, 0 /* delayMs */);
@@ -4977,14 +5057,15 @@
                 return BAD_VALUE;
             }
 
-            if (!outputDesc->mProfile->isCompatibleProfile(DeviceVector(devDesc),
-                                                           patch->sources[0].sample_rate,
-                                                           NULL,  // updatedSamplingRate
-                                                           patch->sources[0].format,
-                                                           NULL,  // updatedFormat
-                                                           patch->sources[0].channel_mask,
-                                                           NULL,  // updatedChannelMask
-                                                           AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
+            if (outputDesc->mProfile->getCompatibilityScore(
+                    DeviceVector(devDesc),
+                    patch->sources[0].sample_rate,
+                    nullptr,  // updatedSamplingRate
+                    patch->sources[0].format,
+                    nullptr,  // updatedFormat
+                    patch->sources[0].channel_mask,
+                    nullptr,  // updatedChannelMask
+                    AUDIO_OUTPUT_FLAG_NONE /*FIXME*/) == IOProfile::NO_MATCH) {
                 ALOGV("%s profile not supported for device %08x", __func__, devDesc->type());
                 return INVALID_OPERATION;
             }
@@ -5032,17 +5113,18 @@
                 return BAD_VALUE;
             }
 
-            if (!inputDesc->mProfile->isCompatibleProfile(DeviceVector(device),
-                                                          patch->sinks[0].sample_rate,
-                                                          NULL, /*updatedSampleRate*/
-                                                          patch->sinks[0].format,
-                                                          NULL, /*updatedFormat*/
-                                                          patch->sinks[0].channel_mask,
-                                                          NULL, /*updatedChannelMask*/
-                                                          // FIXME for the parameter type,
-                                                          // and the NONE
-                                                          (audio_output_flags_t)
-                                                            AUDIO_INPUT_FLAG_NONE)) {
+            if (inputDesc->mProfile->getCompatibilityScore(
+                    DeviceVector(device),
+                    patch->sinks[0].sample_rate,
+                    nullptr, /*updatedSampleRate*/
+                    patch->sinks[0].format,
+                    nullptr, /*updatedFormat*/
+                    patch->sinks[0].channel_mask,
+                    nullptr, /*updatedChannelMask*/
+                    // FIXME for the parameter type,
+                    // and the NONE
+                    (audio_output_flags_t)
+                    AUDIO_INPUT_FLAG_NONE) == IOProfile::NO_MATCH) {
                 return INVALID_OPERATION;
             }
             // TODO: reconfigure output format and channels here
@@ -5524,7 +5606,7 @@
 status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
                                               const audio_attributes_t *attributes,
                                               audio_port_handle_t *portId,
-                                              uid_t uid)
+                                              uid_t uid, bool internal)
 {
     ALOGV("%s", __FUNCTION__);
     *portId = AUDIO_PORT_HANDLE_NONE;
@@ -5557,7 +5639,7 @@
         new SourceClientDescriptor(*portId, uid, *attributes, *source, srcDevice,
                                    mEngine->getStreamTypeForAttributes(*attributes),
                                    mEngine->getProductStrategyForAttributes(*attributes),
-                                   toVolumeSource(*attributes));
+                                   toVolumeSource(*attributes), internal);
 
     status_t status = connectAudioSource(sourceDesc);
     if (status == NO_ERROR) {
@@ -5566,18 +5648,6 @@
     return status;
 }
 
-sp<SourceClientDescriptor> AudioPolicyManager::startAudioSourceInternal(
-        const struct audio_port_config *source, const audio_attributes_t *attributes, uid_t uid)
-{
-    ALOGV("%s", __FUNCTION__);
-    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
-
-    status_t status = startAudioSource(source, attributes, &portId, uid);
-    ALOGE_IF(status != OK, "%s: failed to start audio source (%d)", __func__, status);
-    return mAudioSources.valueFor(portId);
-}
-
-
 status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
 {
     ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
@@ -5986,15 +6056,26 @@
     // The caller can have the audio config criteria ignored by either passing a null ptr or
     // the AUDIO_CONFIG_INITIALIZER value.
     // If an audio config is specified, current policy is to only allow spatialization for
-    // some positional channel masks and PCM format
+    // some positional channel masks and PCM format and for stereo if low latency performance
+    // mode is not requested.
 
     if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
-        if (!audio_is_channel_mask_spatialized(config->channel_mask)) {
+        static const bool stereo_spatialization_enabled =
+                property_get_bool("ro.audio.stereo_spatialization_enabled", false);
+        const bool channel_mask_spatialized =
+                (stereo_spatialization_enabled && com_android_media_audio_stereo_spatialization())
+                ? audio_channel_mask_contains_stereo(config->channel_mask)
+                : audio_is_channel_mask_spatialized(config->channel_mask);
+        if (!channel_mask_spatialized) {
             return false;
         }
         if (!audio_is_linear_pcm(config->format)) {
             return false;
         }
+        if (config->channel_mask == AUDIO_CHANNEL_OUT_STEREO
+                && ((attr->flags & AUDIO_FLAG_LOW_LATENCY) != 0)) {
+            return false;
+        }
     }
 
     sp<IOProfile> profile =
@@ -6770,6 +6851,12 @@
         closingOutput->stop();
     }
     closingOutput->close();
+    if ((closingOutput->getFlags().output & AUDIO_OUTPUT_FLAG_BIT_PERFECT)
+            == AUDIO_OUTPUT_FLAG_BIT_PERFECT) {
+        for (const auto device : closingOutput->devices()) {
+            device->setPreferredConfig(nullptr);
+        }
+    }
 
     removeOutput(output);
     mPreviousOutputs = mOutputs;
@@ -7659,9 +7746,6 @@
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
     // The flags can be ignored if it doesn't contain a much match flag.
-    //
-    // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
-    // the best matching profile, not the first one.
 
     using underlying_input_flag_t = std::underlying_type_t<audio_input_flags_t>;
     const underlying_input_flag_t mustMatchFlag = AUDIO_INPUT_FLAG_MMAP_NOIRQ |
@@ -7678,27 +7762,35 @@
             for (const auto& profile : hwModule->getInputProfiles()) {
                 // profile->log();
                 //updatedFormat = format;
-                if (profile->isCompatibleProfile(DeviceVector(device), samplingRate,
-                                                 &samplingRate  /*updatedSamplingRate*/,
-                                                 format,
-                                                 &format,       /*updatedFormat*/
-                                                 channelMask,
-                                                 &channelMask   /*updatedChannelMask*/,
-                                                 // FIXME ugly cast
-                                                 (audio_output_flags_t) flags,
-                                                 true /*exactMatchRequiredForInputFlags*/)) {
+                if (profile->getCompatibilityScore(
+                        DeviceVector(device),
+                        samplingRate,
+                        &updatedSamplingRate,
+                        format,
+                        &updatedFormat,
+                        channelMask,
+                        &updatedChannelMask,
+                        // FIXME ugly cast
+                        (audio_output_flags_t) flags,
+                        true /*exactMatchRequiredForInputFlags*/) == IOProfile::EXACT_MATCH) {
+                    samplingRate = updatedSamplingRate;
+                    format = updatedFormat;
+                    channelMask = updatedChannelMask;
                     return profile;
                 }
-                if (firstInexact == nullptr && profile->isCompatibleProfile(DeviceVector(device),
-                                                 samplingRate,
-                                                 &updatedSamplingRate,
-                                                 format,
-                                                 &updatedFormat,
-                                                 channelMask,
-                                                 &updatedChannelMask,
-                                                 // FIXME ugly cast
-                                                 (audio_output_flags_t) flags,
-                                                 false /*exactMatchRequiredForInputFlags*/)) {
+                if (firstInexact == nullptr
+                        && profile->getCompatibilityScore(
+                                DeviceVector(device),
+                                samplingRate,
+                                &updatedSamplingRate,
+                                format,
+                                &updatedFormat,
+                                channelMask,
+                                &updatedChannelMask,
+                                // FIXME ugly cast
+                                (audio_output_flags_t) flags,
+                                false /*exactMatchRequiredForInputFlags*/)
+                                != IOProfile::NO_MATCH) {
                     firstInexact = profile;
                 }
             }
@@ -8404,6 +8496,12 @@
         ALOGE("%s failed to open output %d", __func__, status);
         return nullptr;
     }
+    if ((flags & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_BIT_PERFECT) {
+        auto portConfig = desc->getConfig();
+        for (const auto& device : devices) {
+            device->setPreferredConfig(&portConfig);
+        }
+    }
 
     // Here is where the out_set_parameters() for card & device gets called
     sp<DeviceDescriptor> device = devices.getDeviceForOpening();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 61be09f..a3232a2 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -292,6 +292,7 @@
 
         virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes);
         virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
+        virtual status_t getRegisteredPolicyMixes(std::vector<AudioMix>& mixes) override;
         virtual status_t updatePolicyMix(
                 const AudioMix& mix,
                 const std::vector<AudioMixMatchCriterion>& updatedCriteria) override;
@@ -339,7 +340,8 @@
         virtual status_t startAudioSource(const struct audio_port_config *source,
                                           const audio_attributes_t *attributes,
                                           audio_port_handle_t *portId,
-                                          uid_t uid);
+                                          uid_t uid,
+                                          bool internal = false);
         virtual status_t stopAudioSource(audio_port_handle_t portId);
 
         virtual status_t setMasterMono(bool mono);
@@ -1055,9 +1057,6 @@
         bool isMsdPatch(const audio_patch_handle_t &handle) const;
 
 private:
-        sp<SourceClientDescriptor> startAudioSourceInternal(
-                const struct audio_port_config *source, const audio_attributes_t *attributes,
-                uid_t uid);
 
         void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
 
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index fb55225..cddbf39 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -52,7 +53,7 @@
     static_libs: [
         "libeffectsconfig",
         "libaudiopolicycomponents",
-    ]
+    ],
 }
 
 cc_library {
@@ -75,10 +76,9 @@
     ],
 
     include_dirs: [
-        "frameworks/av/services/audioflinger"
+        "frameworks/av/services/audioflinger",
     ],
 
-
     static_libs: [
         "framework-permission-aidl-cpp",
     ],
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 85b7ad9..71edd57 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -42,15 +42,19 @@
 // ----------------------------------------------------------------------------
 
 AudioPolicyEffects::AudioPolicyEffects(const sp<EffectsFactoryHalInterface>& effectsFactoryHal) {
+    // Note: clang thread-safety permits the ctor to call guarded _l methods without
+    // acquiring the associated mutex capability as standard practice is to assume
+    // single threaded construction and destruction.
+
     // load xml config with effectsFactoryHal
-    status_t loadResult = loadAudioEffectConfig(effectsFactoryHal);
+    status_t loadResult = loadAudioEffectConfig_ll(effectsFactoryHal);
     if (loadResult < 0) {
         ALOGW("Failed to query effect configuration, fallback to load .conf");
         // load automatic audio effect modules
         if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
-            loadAudioEffectConfigLegacy(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
+            loadAudioEffectConfigLegacy_l(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
         } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
-            loadAudioEffectConfigLegacy(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
+            loadAudioEffectConfigLegacy_l(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
         }
     } else if (loadResult > 0) {
         ALOGE("Effect config is partially invalid, skipped %d elements", loadResult);
@@ -62,35 +66,6 @@
                 std::launch::async, &AudioPolicyEffects::initDefaultDeviceEffects, this);
 }
 
-AudioPolicyEffects::~AudioPolicyEffects()
-{
-    size_t i = 0;
-    // release audio input processing resources
-    for (i = 0; i < mInputSources.size(); i++) {
-        delete mInputSources.valueAt(i);
-    }
-    mInputSources.clear();
-
-    for (i = 0; i < mInputSessions.size(); i++) {
-        mInputSessions.valueAt(i)->mEffects.clear();
-        delete mInputSessions.valueAt(i);
-    }
-    mInputSessions.clear();
-
-    // release audio output processing resources
-    for (i = 0; i < mOutputStreams.size(); i++) {
-        delete mOutputStreams.valueAt(i);
-    }
-    mOutputStreams.clear();
-
-    for (i = 0; i < mOutputSessions.size(); i++) {
-        mOutputSessions.valueAt(i)->mEffects.clear();
-        delete mOutputSessions.valueAt(i);
-    }
-    mOutputSessions.clear();
-}
-
-
 status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input,
                              audio_source_t inputSource,
                              audio_session_t audioSession)
@@ -101,48 +76,43 @@
     audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ?
                                     AUDIO_SOURCE_VOICE_RECOGNITION : inputSource;
 
-    Mutex::Autolock _l(mLock);
-    ssize_t index = mInputSources.indexOfKey(aliasSource);
-    if (index < 0) {
+    audio_utils::lock_guard _l(mMutex);
+    auto sourceIt = mInputSources.find(aliasSource);
+    if (sourceIt == mInputSources.end()) {
         ALOGV("addInputEffects(): no processing needs to be attached to this source");
         return status;
     }
-    ssize_t idx = mInputSessions.indexOfKey(audioSession);
-    EffectVector *sessionDesc;
-    if (idx < 0) {
-        sessionDesc = new EffectVector(audioSession);
-        mInputSessions.add(audioSession, sessionDesc);
-    } else {
-        // EffectVector is existing and we just need to increase ref count
-        sessionDesc = mInputSessions.valueAt(idx);
+    std::shared_ptr<EffectVector>& sessionDesc = mInputSessions[audioSession];
+    if (sessionDesc == nullptr) {
+        sessionDesc = std::make_shared<EffectVector>(audioSession);
     }
     sessionDesc->mRefCount++;
 
     ALOGV("addInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
     if (sessionDesc->mRefCount == 1) {
         int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
-        for (size_t i = 0; i < effects.size(); i++) {
-            EffectDesc *effect = effects[i];
+        const std::shared_ptr<EffectDescVector>& effects = sourceIt->second;
+        for (const std::shared_ptr<EffectDesc>& effect : *effects) {
             AttributionSourceState attributionSource;
             attributionSource.packageName = "android";
             attributionSource.token = sp<BBinder>::make();
-            sp<AudioEffect> fx = new AudioEffect(attributionSource);
+            auto fx = sp<AudioEffect>::make(attributionSource);
             fx->set(nullptr /*type */, &effect->mUuid, -1 /* priority */, nullptr /* callback */,
                     audioSession, input);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGW("addInputEffects(): failed to create Fx %s on source %d",
-                      effect->mName, (int32_t)aliasSource);
+                      effect->mName.c_str(), (int32_t)aliasSource);
                 // fx goes out of scope and strong ref on AudioEffect is released
                 continue;
             }
             for (size_t j = 0; j < effect->mParams.size(); j++) {
-                fx->setParameter(effect->mParams[j]);
+                // const_cast here due to API.
+                fx->setParameter(const_cast<effect_param_t*>(effect->mParams[j].get()));
             }
             ALOGV("addInputEffects(): added Fx %s on source: %d",
-                  effect->mName, (int32_t)aliasSource);
-            sessionDesc->mEffects.add(fx);
+                  effect->mName.c_str(), (int32_t)aliasSource);
+            sessionDesc->mEffects.push_back(std::move(fx));
         }
         sessionDesc->setProcessorEnabled(true);
         IPCThreadState::self()->restoreCallingIdentity(token);
@@ -156,18 +126,17 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
-    ssize_t index = mInputSessions.indexOfKey(audioSession);
-    if (index < 0) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mInputSessions.find(audioSession);
+    if (it == mInputSessions.end()) {
         return status;
     }
-    EffectVector *sessionDesc = mInputSessions.valueAt(index);
+    std::shared_ptr<EffectVector> sessionDesc = it->second;
     sessionDesc->mRefCount--;
     ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
     if (sessionDesc->mRefCount == 0) {
         sessionDesc->setProcessorEnabled(false);
-        delete sessionDesc;
-        mInputSessions.removeItemsAt(index);
+        mInputSessions.erase(it);
         ALOGV("releaseInputEffects(): all effects released");
     }
     return status;
@@ -179,24 +148,16 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
-    size_t index;
-    for (index = 0; index < mInputSessions.size(); index++) {
-        if (mInputSessions.valueAt(index)->mSessionId == audioSession) {
-            break;
-        }
-    }
-    if (index == mInputSessions.size()) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mInputSessions.find(audioSession);
+    if (it == mInputSessions.end()) {
         *count = 0;
         return BAD_VALUE;
     }
-    Vector< sp<AudioEffect> > effects = mInputSessions.valueAt(index)->mEffects;
-
-    for (size_t i = 0; i < effects.size(); i++) {
-        effect_descriptor_t desc = effects[i]->descriptor();
-        if (i < *count) {
-            descriptors[i] = desc;
-        }
+    const std::vector<sp<AudioEffect>>& effects = it->second->mEffects;
+    const size_t copysize = std::min(effects.size(), (size_t)*count);
+    for (size_t i = 0; i < copysize; i++) {
+        descriptors[i] = effects[i]->descriptor();
     }
     if (effects.size() > *count) {
         status = NO_MEMORY;
@@ -212,24 +173,16 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
-    size_t index;
-    for (index = 0; index < mOutputSessions.size(); index++) {
-        if (mOutputSessions.valueAt(index)->mSessionId == audioSession) {
-            break;
-        }
-    }
-    if (index == mOutputSessions.size()) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mOutputSessions.find(audioSession);
+    if (it == mOutputSessions.end()) {
         *count = 0;
         return BAD_VALUE;
     }
-    Vector< sp<AudioEffect> > effects = mOutputSessions.valueAt(index)->mEffects;
-
-    for (size_t i = 0; i < effects.size(); i++) {
-        effect_descriptor_t desc = effects[i]->descriptor();
-        if (i < *count) {
-            descriptors[i] = desc;
-        }
+    const std::vector<sp<AudioEffect>>& effects = it->second->mEffects;
+    const size_t copysize = std::min(effects.size(), (size_t)*count);
+    for (size_t i = 0; i < copysize; i++) {
+        descriptors[i] = effects[i]->descriptor();
     }
     if (effects.size() > *count) {
         status = NO_MEMORY;
@@ -245,27 +198,22 @@
 {
     status_t status = NO_ERROR;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     // create audio processors according to stream
     // FIXME: should we have specific post processing settings for internal streams?
     // default to media for now.
     if (stream >= AUDIO_STREAM_PUBLIC_CNT) {
         stream = AUDIO_STREAM_MUSIC;
     }
-    ssize_t index = mOutputStreams.indexOfKey(stream);
-    if (index < 0) {
+    auto it = mOutputStreams.find(stream);
+    if (it == mOutputStreams.end()) {
         ALOGV("addOutputSessionEffects(): no output processing needed for this stream");
         return NO_ERROR;
     }
 
-    ssize_t idx = mOutputSessions.indexOfKey(audioSession);
-    EffectVector *procDesc;
-    if (idx < 0) {
-        procDesc = new EffectVector(audioSession);
-        mOutputSessions.add(audioSession, procDesc);
-    } else {
-        // EffectVector is existing and we just need to increase ref count
-        procDesc = mOutputSessions.valueAt(idx);
+    std::shared_ptr<EffectVector>& procDesc = mOutputSessions[audioSession];
+    if (procDesc == nullptr) {
+        procDesc = std::make_shared<EffectVector>(audioSession);
     }
     procDesc->mRefCount++;
 
@@ -274,25 +222,24 @@
     if (procDesc->mRefCount == 1) {
         // make sure effects are associated to audio server even if we are executing a binder call
         int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
-        for (size_t i = 0; i < effects.size(); i++) {
-            EffectDesc *effect = effects[i];
+        const std::shared_ptr<EffectDescVector>& effects = it->second;
+        for (const std::shared_ptr<EffectDesc>& effect : *effects) {
             AttributionSourceState attributionSource;
             attributionSource.packageName = "android";
             attributionSource.token = sp<BBinder>::make();
-            sp<AudioEffect> fx = new AudioEffect(attributionSource);
+            auto fx = sp<AudioEffect>::make(attributionSource);
             fx->set(nullptr /* type */, &effect->mUuid, 0 /* priority */, nullptr /* callback */,
                     audioSession, output);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGE("addOutputSessionEffects(): failed to create Fx  %s on session %d",
-                      effect->mName, audioSession);
+                      effect->mName.c_str(), audioSession);
                 // fx goes out of scope and strong ref on AudioEffect is released
                 continue;
             }
             ALOGV("addOutputSessionEffects(): added Fx %s on session: %d for stream: %d",
-                  effect->mName, audioSession, (int32_t)stream);
-            procDesc->mEffects.add(fx);
+                  effect->mName.c_str(), audioSession, (int32_t)stream);
+            procDesc->mEffects.push_back(std::move(fx));
         }
 
         procDesc->setProcessorEnabled(true);
@@ -305,30 +252,28 @@
                          audio_stream_type_t stream,
                          audio_session_t audioSession)
 {
-    status_t status = NO_ERROR;
     (void) output; // argument not used for now
     (void) stream; // argument not used for now
 
-    Mutex::Autolock _l(mLock);
-    ssize_t index = mOutputSessions.indexOfKey(audioSession);
-    if (index < 0) {
+    audio_utils::lock_guard _l(mMutex);
+    auto it = mOutputSessions.find(audioSession);
+    if (it == mOutputSessions.end()) {
         ALOGV("releaseOutputSessionEffects: no output processing was attached to this stream");
         return NO_ERROR;
     }
 
-    EffectVector *procDesc = mOutputSessions.valueAt(index);
+    std::shared_ptr<EffectVector> procDesc = it->second;
     procDesc->mRefCount--;
     ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d",
           audioSession, procDesc->mRefCount);
     if (procDesc->mRefCount == 0) {
         procDesc->setProcessorEnabled(false);
         procDesc->mEffects.clear();
-        delete procDesc;
-        mOutputSessions.removeItemsAt(index);
+        mOutputSessions.erase(it);
         ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
               audioSession);
     }
-    return status;
+    return NO_ERROR;
 }
 
 status_t AudioPolicyEffects::addSourceDefaultEffect(const effect_uuid_t *type,
@@ -370,17 +315,12 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Find the EffectDescVector for the given source type, or create a new one if necessary.
-    ssize_t index = mInputSources.indexOfKey(source);
-    EffectDescVector *desc = NULL;
-    if (index < 0) {
-        // No effects for this source type yet.
-        desc = new EffectDescVector();
-        mInputSources.add(source, desc);
-    } else {
-        desc = mInputSources.valueAt(index);
+    std::shared_ptr<EffectDescVector>& desc = mInputSources[source];
+    if (desc == nullptr) {
+        desc = std::make_shared<EffectDescVector>();
     }
 
     // Create a new effect and add it to the vector.
@@ -389,9 +329,9 @@
         ALOGE("addSourceDefaultEffect(): failed to get new unique id.");
         return res;
     }
-    EffectDesc *effect = new EffectDesc(
+    std::shared_ptr<EffectDesc> effect = std::make_shared<EffectDesc>(
             descriptor.name, descriptor.type, opPackageName, descriptor.uuid, priority, *id);
-    desc->mEffects.add(effect);
+    desc->push_back(std::move(effect));
     // TODO(b/71813697): Support setting params as well.
 
     // TODO(b/71814300): Retroactively attach to any existing sources of the given type.
@@ -435,17 +375,13 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Find the EffectDescVector for the given stream type, or create a new one if necessary.
-    ssize_t index = mOutputStreams.indexOfKey(stream);
-    EffectDescVector *desc = NULL;
-    if (index < 0) {
+    std::shared_ptr<EffectDescVector>& desc = mOutputStreams[stream];
+    if (desc == nullptr) {
         // No effects for this stream type yet.
-        desc = new EffectDescVector();
-        mOutputStreams.add(stream, desc);
-    } else {
-        desc = mOutputStreams.valueAt(index);
+        desc = std::make_shared<EffectDescVector>();
     }
 
     // Create a new effect and add it to the vector.
@@ -454,9 +390,9 @@
         ALOGE("addStreamDefaultEffect(): failed to get new unique id.");
         return res;
     }
-    EffectDesc *effect = new EffectDesc(
+    std::shared_ptr<EffectDesc> effect = std::make_shared<EffectDesc>(
             descriptor.name, descriptor.type, opPackageName, descriptor.uuid, priority, *id);
-    desc->mEffects.add(effect);
+    desc->push_back(std::move(effect));
     // TODO(b/71813697): Support setting params as well.
 
     // TODO(b/71814300): Retroactively attach to any existing streams of the given type.
@@ -475,18 +411,16 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Check each source type.
-    size_t numSources = mInputSources.size();
-    for (size_t i = 0; i < numSources; ++i) {
+    for (auto& [source, descVector] : mInputSources) {
         // Check each effect for each source.
-        EffectDescVector* descVector = mInputSources[i];
-        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+        for (auto desc = descVector->begin(); desc != descVector->end(); ++desc) {
             if ((*desc)->mId == id) {
                 // Found it!
                 // TODO(b/71814300): Remove from any sources the effect was attached to.
-                descVector->mEffects.erase(desc);
+                descVector->erase(desc);
                 // Handles are unique; there can only be one match, so return early.
                 return NO_ERROR;
             }
@@ -506,18 +440,16 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // Check each stream type.
-    size_t numStreams = mOutputStreams.size();
-    for (size_t i = 0; i < numStreams; ++i) {
+    for (auto& [stream, descVector] : mOutputStreams) {
         // Check each effect for each stream.
-        EffectDescVector* descVector = mOutputStreams[i];
-        for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+        for (auto desc = descVector->begin(); desc != descVector->end(); ++desc) {
             if ((*desc)->mId == id) {
                 // Found it!
                 // TODO(b/71814300): Remove from any streams the effect was attached to.
-                descVector->mEffects.erase(desc);
+                descVector->erase(desc);
                 // Handles are unique; there can only be one match, so return early.
                 return NO_ERROR;
             }
@@ -530,8 +462,8 @@
 
 void AudioPolicyEffects::EffectVector::setProcessorEnabled(bool enabled)
 {
-    for (size_t i = 0; i < mEffects.size(); i++) {
-        mEffects.itemAt(i)->setEnabled(enabled);
+    for (const auto& effect : mEffects) {
+        effect->setEnabled(enabled);
     }
 }
 
@@ -540,7 +472,8 @@
 // Audio processing configuration
 // ----------------------------------------------------------------------------
 
-/*static*/ const char * const AudioPolicyEffects::kInputSourceNames[AUDIO_SOURCE_CNT -1] = {
+// we keep to const char* instead of std::string_view as comparison is believed faster.
+constexpr const char* kInputSourceNames[AUDIO_SOURCE_CNT - 1] = {
     MIC_SRC_TAG,
     VOICE_UL_SRC_TAG,
     VOICE_DL_SRC_TAG,
@@ -567,7 +500,8 @@
     return (audio_source_t)i;
 }
 
-const char *AudioPolicyEffects::kStreamNames[AUDIO_STREAM_PUBLIC_CNT+1] = {
+// +1 as enum starts from -1
+constexpr const char* kStreamNames[AUDIO_STREAM_PUBLIC_CNT + 1] = {
     AUDIO_STREAM_DEFAULT_TAG,
     AUDIO_STREAM_VOICE_CALL_TAG,
     AUDIO_STREAM_SYSTEM_TAG,
@@ -584,6 +518,7 @@
 
 // returns the audio_stream_t enum corresponding to the output stream name or
 // AUDIO_STREAM_PUBLIC_CNT is no match found
+/* static */
 audio_stream_type_t AudioPolicyEffects::streamNameToEnum(const char *name)
 {
     int i;
@@ -600,6 +535,7 @@
 // Audio Effect Config parser
 // ----------------------------------------------------------------------------
 
+/* static */
 size_t AudioPolicyEffects::growParamSize(char **param,
                                          size_t size,
                                          size_t *curSize,
@@ -623,7 +559,7 @@
     return pos;
 }
 
-
+/* static */
 size_t AudioPolicyEffects::readParamValue(cnode *node,
                                           char **param,
                                           size_t *curSize,
@@ -692,7 +628,8 @@
     return len;
 }
 
-effect_param_t *AudioPolicyEffects::loadEffectParameter(cnode *root)
+/* static */
+std::shared_ptr<const effect_param_t> AudioPolicyEffects::loadEffectParameter(cnode* root)
 {
     cnode *param;
     cnode *value;
@@ -722,7 +659,7 @@
             *ptr = atoi(param->value);
             fx_param->psize = sizeof(int);
             fx_param->vsize = sizeof(int);
-            return fx_param;
+            return {fx_param, free};
         }
     }
     if (param == NULL || value == NULL) {
@@ -760,42 +697,43 @@
         value = value->next;
     }
 
-    return fx_param;
+    return {fx_param, free};
 
 error:
     free(fx_param);
     return NULL;
 }
 
-void AudioPolicyEffects::loadEffectParameters(cnode *root, Vector <effect_param_t *>& params)
+/* static */
+void AudioPolicyEffects::loadEffectParameters(
+        cnode* root, std::vector<std::shared_ptr<const effect_param_t>>& params)
 {
     cnode *node = root->first_child;
     while (node) {
         ALOGV("loadEffectParameters() loading param %s", node->name);
-        effect_param_t *param = loadEffectParameter(node);
-        if (param != NULL) {
-            params.add(param);
+        const auto param = loadEffectParameter(node);
+        if (param != nullptr) {
+            params.push_back(param);
         }
         node = node->next;
     }
 }
 
-
-AudioPolicyEffects::EffectDescVector *AudioPolicyEffects::loadEffectConfig(
-                                                            cnode *root,
-                                                            const Vector <EffectDesc *>& effects)
+/* static */
+std::shared_ptr<AudioPolicyEffects::EffectDescVector> AudioPolicyEffects::loadEffectConfig(
+        cnode* root, const EffectDescVector& effects)
 {
     cnode *node = root->first_child;
     if (node == NULL) {
         ALOGW("loadInputSource() empty element %s", root->name);
         return NULL;
     }
-    EffectDescVector *desc = new EffectDescVector();
+    auto desc = std::make_shared<EffectDescVector>();
     while (node) {
         size_t i;
 
         for (i = 0; i < effects.size(); i++) {
-            if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) {
+            if (effects[i]->mName == node->name) {
                 ALOGV("loadEffectConfig() found effect %s in list", node->name);
                 break;
             }
@@ -805,23 +743,22 @@
             node = node->next;
             continue;
         }
-        EffectDesc *effect = new EffectDesc(*effects[i]);   // deep copy
+        auto effect = std::make_shared<EffectDesc>(*effects[i]);   // deep copy
         loadEffectParameters(node, effect->mParams);
         ALOGV("loadEffectConfig() adding effect %s uuid %08x",
-              effect->mName, effect->mUuid.timeLow);
-        desc->mEffects.add(effect);
+              effect->mName.c_str(), effect->mUuid.timeLow);
+        desc->push_back(std::move(effect));
         node = node->next;
     }
-    if (desc->mEffects.size() == 0) {
+    if (desc->empty()) {
         ALOGW("loadEffectConfig() no valid effects found in config %s", root->name);
-        delete desc;
-        return NULL;
+        return nullptr;
     }
     return desc;
 }
 
-status_t AudioPolicyEffects::loadInputEffectConfigurations(cnode *root,
-                                                           const Vector <EffectDesc *>& effects)
+status_t AudioPolicyEffects::loadInputEffectConfigurations_l(cnode* root,
+        const EffectDescVector& effects)
 {
     cnode *node = config_find(root, PREPROCESSING_TAG);
     if (node == NULL) {
@@ -831,24 +768,24 @@
     while (node) {
         audio_source_t source = inputSourceNameToEnum(node->name);
         if (source == AUDIO_SOURCE_CNT) {
-            ALOGW("loadInputSources() invalid input source %s", node->name);
+            ALOGW("%s() invalid input source %s", __func__, node->name);
             node = node->next;
             continue;
         }
-        ALOGV("loadInputSources() loading input source %s", node->name);
-        EffectDescVector *desc = loadEffectConfig(node, effects);
+        ALOGV("%s() loading input source %s", __func__, node->name);
+        auto desc = loadEffectConfig(node, effects);
         if (desc == NULL) {
             node = node->next;
             continue;
         }
-        mInputSources.add(source, desc);
+        mInputSources[source] = std::move(desc);
         node = node->next;
     }
     return NO_ERROR;
 }
 
-status_t AudioPolicyEffects::loadStreamEffectConfigurations(cnode *root,
-                                                            const Vector <EffectDesc *>& effects)
+status_t AudioPolicyEffects::loadStreamEffectConfigurations_l(cnode* root,
+        const EffectDescVector& effects)
 {
     cnode *node = config_find(root, OUTPUT_SESSION_PROCESSING_TAG);
     if (node == NULL) {
@@ -858,23 +795,24 @@
     while (node) {
         audio_stream_type_t stream = streamNameToEnum(node->name);
         if (stream == AUDIO_STREAM_PUBLIC_CNT) {
-            ALOGW("loadStreamEffectConfigurations() invalid output stream %s", node->name);
+            ALOGW("%s() invalid output stream %s", __func__, node->name);
             node = node->next;
             continue;
         }
-        ALOGV("loadStreamEffectConfigurations() loading output stream %s", node->name);
-        EffectDescVector *desc = loadEffectConfig(node, effects);
+        ALOGV("%s() loading output stream %s", __func__, node->name);
+        std::shared_ptr<EffectDescVector> desc = loadEffectConfig(node, effects);
         if (desc == NULL) {
             node = node->next;
             continue;
         }
-        mOutputStreams.add(stream, desc);
+        mOutputStreams[stream] = std::move(desc);
         node = node->next;
     }
     return NO_ERROR;
 }
 
-AudioPolicyEffects::EffectDesc *AudioPolicyEffects::loadEffect(cnode *root)
+/* static */
+std::shared_ptr<AudioPolicyEffects::EffectDesc> AudioPolicyEffects::loadEffect(cnode* root)
 {
     cnode *node = config_find(root, UUID_TAG);
     if (node == NULL) {
@@ -885,30 +823,33 @@
         ALOGW("loadEffect() invalid uuid %s", node->value);
         return NULL;
     }
-    return new EffectDesc(root->name, uuid);
+    return std::make_shared<EffectDesc>(root->name, uuid);
 }
 
-status_t AudioPolicyEffects::loadEffects(cnode *root, Vector <EffectDesc *>& effects)
+/* static */
+android::AudioPolicyEffects::EffectDescVector AudioPolicyEffects::loadEffects(cnode *root)
 {
+    EffectDescVector effects;
     cnode *node = config_find(root, EFFECTS_TAG);
     if (node == NULL) {
-        return -ENOENT;
+        ALOGW("%s() Cannot find %s configuration", __func__, EFFECTS_TAG);
+        return effects;
     }
     node = node->first_child;
     while (node) {
         ALOGV("loadEffects() loading effect %s", node->name);
-        EffectDesc *effect = loadEffect(node);
+        auto effect = loadEffect(node);
         if (effect == NULL) {
             node = node->next;
             continue;
         }
-        effects.add(effect);
+        effects.push_back(std::move(effect));
         node = node->next;
     }
-    return NO_ERROR;
+    return effects;
 }
 
-status_t AudioPolicyEffects::loadAudioEffectConfig(
+status_t AudioPolicyEffects::loadAudioEffectConfig_ll(
         const sp<EffectsFactoryHalInterface>& effectsFactoryHal) {
     if (!effectsFactoryHal) {
         ALOGE("%s Null EffectsFactoryHalInterface", __func__);
@@ -924,11 +865,12 @@
 
     auto loadProcessingChain = [](auto& processingChain, auto& streams) {
         for (auto& stream : processingChain) {
-            auto effectDescs = std::make_unique<EffectDescVector>();
+            auto effectDescs = std::make_shared<EffectDescVector>();
             for (auto& effect : stream.effects) {
-                effectDescs->mEffects.add(new EffectDesc{effect->name.c_str(), effect->uuid});
+                effectDescs->push_back(
+                        std::make_shared<EffectDesc>(effect->name, effect->uuid));
             }
-            streams.add(stream.type, effectDescs.release());
+            streams[stream.type] = std::move(effectDescs);
         }
     };
 
@@ -936,26 +878,26 @@
         for (auto& deviceProcess : processingChain) {
             auto effectDescs = std::make_unique<EffectDescVector>();
             for (auto& effect : deviceProcess.effects) {
-                effectDescs->mEffects.add(new EffectDesc{effect->name.c_str(), effect->uuid});
+                effectDescs->push_back(
+                        std::make_shared<EffectDesc>(effect->name, effect->uuid));
             }
-            auto deviceEffects = std::make_unique<DeviceEffects>(
+            auto devEffects = std::make_unique<DeviceEffects>(
                         std::move(effectDescs), deviceProcess.type, deviceProcess.address);
-            devicesEffects.emplace(deviceProcess.address, std::move(deviceEffects));
+            devicesEffects.emplace(deviceProcess.address, std::move(devEffects));
         }
     };
 
+    // access to mInputSources and mOutputStreams requires mMutex;
     loadProcessingChain(processings->preprocess, mInputSources);
     loadProcessingChain(processings->postprocess, mOutputStreams);
 
-    {
-        Mutex::Autolock _l(mLock);
-        loadDeviceProcessingChain(processings->deviceprocess, mDeviceEffects);
-    }
+    // access to mDeviceEffects requires mDeviceEffectsMutex
+    loadDeviceProcessingChain(processings->deviceprocess, mDeviceEffects);
 
     return skippedElements;
 }
 
-status_t AudioPolicyEffects::loadAudioEffectConfigLegacy(const char *path)
+status_t AudioPolicyEffects::loadAudioEffectConfigLegacy_l(const char *path)
 {
     cnode *root;
     char *data;
@@ -967,15 +909,11 @@
     root = config_node("", "");
     config_load(root, data);
 
-    Vector <EffectDesc *> effects;
-    loadEffects(root, effects);
-    loadInputEffectConfigurations(root, effects);
-    loadStreamEffectConfigurations(root, effects);
+    const EffectDescVector effects = loadEffects(root);
 
-    for (size_t i = 0; i < effects.size(); i++) {
-        delete effects[i];
-    }
-
+    // requires mMutex
+    loadInputEffectConfigurations_l(root, effects);
+    loadStreamEffectConfigurations_l(root, effects);
     config_free(root);
     free(root);
     free(data);
@@ -985,14 +923,14 @@
 
 void AudioPolicyEffects::initDefaultDeviceEffects()
 {
-    Mutex::Autolock _l(mLock);
+    std::lock_guard _l(mDeviceEffectsMutex);
     for (const auto& deviceEffectsIter : mDeviceEffects) {
         const auto& deviceEffects =  deviceEffectsIter.second;
-        for (const auto& effectDesc : deviceEffects->mEffectDescriptors->mEffects) {
+        for (const auto& effectDesc : *deviceEffects->mEffectDescriptors) {
             AttributionSourceState attributionSource;
             attributionSource.packageName = "android";
             attributionSource.token = sp<BBinder>::make();
-            sp<AudioEffect> fx = new AudioEffect(attributionSource);
+            sp<AudioEffect> fx = sp<AudioEffect>::make(attributionSource);
             fx->set(EFFECT_UUID_NULL, &effectDesc->mUuid, 0 /* priority */, nullptr /* callback */,
                     AUDIO_SESSION_DEVICE, AUDIO_IO_HANDLE_NONE,
                     AudioDeviceTypeAddr{deviceEffects->getDeviceType(),
@@ -1000,16 +938,16 @@
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGE("%s(): failed to create Fx %s on port type=%d address=%s", __func__,
-                      effectDesc->mName, deviceEffects->getDeviceType(),
+                      effectDesc->mName.c_str(), deviceEffects->getDeviceType(),
                       deviceEffects->getDeviceAddress().c_str());
                 // fx goes out of scope and strong ref on AudioEffect is released
                 continue;
             }
             fx->setEnabled(true);
             ALOGV("%s(): create Fx %s added on port type=%d address=%s", __func__,
-                  effectDesc->mName, deviceEffects->getDeviceType(),
+                  effectDesc->mName.c_str(), deviceEffects->getDeviceType(),
                   deviceEffects->getDeviceAddress().c_str());
-            deviceEffects->mEffects.push_back(fx);
+            deviceEffects->mEffects.push_back(std::move(fx));
         }
     }
 }
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index e17df48..a9628c2 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_AUDIOPOLICYEFFECTS_H
-#define ANDROID_AUDIOPOLICYEFFECTS_H
+#pragma once
 
 #include <stdlib.h>
 #include <stdio.h>
@@ -23,6 +22,7 @@
 #include <future>
 
 #include <android-base/thread_annotations.h>
+#include <audio_utils/mutex.h>
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
 #include <media/audiohal/EffectsFactoryHalInterface.h>
@@ -56,44 +56,43 @@
     // First it will look whether vendor specific file exists,
     // otherwise it will parse the system default file.
     explicit AudioPolicyEffects(const sp<EffectsFactoryHalInterface>& effectsFactoryHal);
-    virtual ~AudioPolicyEffects();
 
     // NOTE: methods on AudioPolicyEffects should never be called with the AudioPolicyService
-    // main mutex (mLock) held as they will indirectly call back into AudioPolicyService when
+    // main mutex (mMutex) held as they will indirectly call back into AudioPolicyService when
     // managing audio effects.
 
     // Return a list of effect descriptors for default input effects
     // associated with audioSession
     status_t queryDefaultInputEffects(audio_session_t audioSession,
                              effect_descriptor_t *descriptors,
-                             uint32_t *count);
+                             uint32_t* count) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add all input effects associated with this input
     // Effects are attached depending on the audio_source_t
     status_t addInputEffects(audio_io_handle_t input,
                              audio_source_t inputSource,
-                             audio_session_t audioSession);
+                             audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add all input effects associated to this input
     status_t releaseInputEffects(audio_io_handle_t input,
-                                 audio_session_t audioSession);
+                                 audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Return a list of effect descriptors for default output effects
     // associated with audioSession
     status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
                              effect_descriptor_t *descriptors,
-                             uint32_t *count);
+                             uint32_t* count) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add all output effects associated to this output
     // Effects are attached depending on the audio_stream_type_t
     status_t addOutputSessionEffects(audio_io_handle_t output,
                              audio_stream_type_t stream,
-                             audio_session_t audioSession);
+                             audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // release all output effects associated with this output stream and audiosession
     status_t releaseOutputSessionEffects(audio_io_handle_t output,
                              audio_stream_type_t stream,
-                             audio_session_t audioSession);
+                             audio_session_t audioSession) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add the effect to the list of default effects for sources of type |source|.
     status_t addSourceDefaultEffect(const effect_uuid_t *type,
@@ -101,7 +100,7 @@
                                     const effect_uuid_t *uuid,
                                     int32_t priority,
                                     audio_source_t source,
-                                    audio_unique_id_t* id);
+                                    audio_unique_id_t* id) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Add the effect to the list of default effects for streams of a given usage.
     status_t addStreamDefaultEffect(const effect_uuid_t *type,
@@ -109,36 +108,39 @@
                                     const effect_uuid_t *uuid,
                                     int32_t priority,
                                     audio_usage_t usage,
-                                    audio_unique_id_t* id);
+                                    audio_unique_id_t* id) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Remove the default source effect from wherever it's attached.
-    status_t removeSourceDefaultEffect(audio_unique_id_t id);
+    status_t removeSourceDefaultEffect(audio_unique_id_t id) EXCLUDES_AudioPolicyEffects_Mutex;
 
     // Remove the default stream effect from wherever it's attached.
-    status_t removeStreamDefaultEffect(audio_unique_id_t id);
+    status_t removeStreamDefaultEffect(audio_unique_id_t id) EXCLUDES_AudioPolicyEffects_Mutex;
 
+    // Called by AudioPolicyService::onFirstRef() to load device effects
+    // on a separate worker thread.
+    // TODO(b/319515492) move this initialization after AudioPolicyService::onFirstRef().
     void setDefaultDeviceEffects();
 
 private:
-    void initDefaultDeviceEffects();
 
     // class to store the description of an effects and its parameters
     // as defined in audio_effects.conf
     class EffectDesc {
     public:
-        EffectDesc(const char *name,
+        EffectDesc(std::string_view name,
                    const effect_uuid_t& typeUuid,
                    const String16& opPackageName,
                    const effect_uuid_t& uuid,
                    uint32_t priority,
                    audio_unique_id_t id) :
-                        mName(strdup(name)),
+                        mName(name),
                         mTypeUuid(typeUuid),
                         mOpPackageName(opPackageName),
                         mUuid(uuid),
                         mPriority(priority),
                         mId(id) { }
-        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+        // Modern EffectDesc usage:
+        EffectDesc(std::string_view name, const effect_uuid_t& uuid) :
                         EffectDesc(name,
                                    *EFFECT_UUID_NULL,
                                    String16(""),
@@ -146,67 +148,36 @@
                                    0,
                                    AUDIO_UNIQUE_ID_ALLOCATE) { }
         EffectDesc(const EffectDesc& orig) :
-                        mName(strdup(orig.mName)),
+                        mName(orig.mName),
                         mTypeUuid(orig.mTypeUuid),
                         mOpPackageName(orig.mOpPackageName),
                         mUuid(orig.mUuid),
                         mPriority(orig.mPriority),
-                        mId(orig.mId) {
-                            // deep copy mParams
-                            for (size_t k = 0; k < orig.mParams.size(); k++) {
-                                effect_param_t *origParam = orig.mParams[k];
-                                // psize and vsize are rounded up to an int boundary for allocation
-                                size_t origSize = sizeof(effect_param_t) +
-                                                  ((origParam->psize + 3) & ~3) +
-                                                  ((origParam->vsize + 3) & ~3);
-                                effect_param_t *dupParam = (effect_param_t *) malloc(origSize);
-                                memcpy(dupParam, origParam, origSize);
-                                // This works because the param buffer allocation is also done by
-                                // multiples of 4 bytes originally. In theory we should memcpy only
-                                // the actual param size, that is without rounding vsize.
-                                mParams.add(dupParam);
-                            }
-                        }
-        /*virtual*/ ~EffectDesc() {
-            free(mName);
-            for (size_t k = 0; k < mParams.size(); k++) {
-                free(mParams[k]);
-            }
-        }
-        char *mName;
-        effect_uuid_t mTypeUuid;
-        String16 mOpPackageName;
-        effect_uuid_t mUuid;
-        int32_t mPriority;
-        audio_unique_id_t mId;
-        Vector <effect_param_t *> mParams;
+                        mId(orig.mId),
+                        mParams(orig.mParams) { }
+
+        const std::string mName;
+        const effect_uuid_t mTypeUuid;
+        const String16 mOpPackageName;
+        const effect_uuid_t mUuid;
+        const int32_t mPriority;
+        const audio_unique_id_t mId;
+        std::vector<std::shared_ptr<const effect_param_t>> mParams;
     };
 
-    // class to store voctor of EffectDesc
-    class EffectDescVector {
-    public:
-        EffectDescVector() {}
-        /*virtual*/ ~EffectDescVector() {
-            for (size_t j = 0; j < mEffects.size(); j++) {
-                delete mEffects[j];
-            }
-        }
-        Vector <EffectDesc *> mEffects;
-    };
+    using EffectDescVector = std::vector<std::shared_ptr<EffectDesc>>;
 
-    // class to store voctor of AudioEffects
     class EffectVector {
     public:
-        explicit EffectVector(audio_session_t session) : mSessionId(session), mRefCount(0) {}
-        /*virtual*/ ~EffectVector() {}
+        explicit EffectVector(audio_session_t session) : mSessionId(session) {}
 
         // Enable or disable all effects in effect vector
         void setProcessorEnabled(bool enabled);
 
         const audio_session_t mSessionId;
-        // AudioPolicyManager keeps mLock, no need for lock on reference count here
-        int mRefCount;
-        Vector< sp<AudioEffect> >mEffects;
+        // AudioPolicyManager keeps mMutex, no need for lock on reference count here
+        int mRefCount = 0;
+        std::vector<sp<AudioEffect>> mEffects;
     };
 
     /**
@@ -215,12 +186,11 @@
     class DeviceEffects {
     public:
         DeviceEffects(std::unique_ptr<EffectDescVector> effectDescriptors,
-                               audio_devices_t device, const std::string& address) :
+                               audio_devices_t device, std::string_view address) :
             mEffectDescriptors(std::move(effectDescriptors)),
             mDeviceType(device), mDeviceAddress(address) {}
-        /*virtual*/ ~DeviceEffects() = default;
 
-        std::vector< sp<AudioEffect> > mEffects;
+        std::vector<sp<AudioEffect>> mEffects;
         audio_devices_t getDeviceType() const { return mDeviceType; }
         std::string getDeviceAddress() const { return mDeviceAddress; }
         const std::unique_ptr<EffectDescVector> mEffectDescriptors;
@@ -231,65 +201,98 @@
 
     };
 
-    static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1];
+    // Called on an async thread because it creates AudioEffects
+    // which register with AudioFlinger and AudioPolicy.
+    // We must therefore exclude the EffectHandle_Mutex.
+    void initDefaultDeviceEffects() EXCLUDES(mDeviceEffectsMutex) EXCLUDES_EffectHandle_Mutex;
+
+    status_t loadAudioEffectConfig_ll(const sp<EffectsFactoryHalInterface>& effectsFactoryHal)
+            REQUIRES(mMutex, mDeviceEffectsMutex);
+
+    // Legacy: Begin methods below.
+    // Parse audio_effects.conf - called from constructor.
+    status_t loadAudioEffectConfigLegacy_l(const char* path) REQUIRES(mMutex);
+
+    // Legacy: Load all automatic effect configurations
+    status_t loadInputEffectConfigurations_l(cnode* root,
+            const EffectDescVector& effects) REQUIRES(mMutex);
+    status_t loadStreamEffectConfigurations_l(cnode* root,
+            const EffectDescVector& effects) REQUIRES(mMutex);
+
+    // Legacy: static methods below.
+
     static audio_source_t inputSourceNameToEnum(const char *name);
 
-    static const char *kStreamNames[AUDIO_STREAM_PUBLIC_CNT+1]; //+1 required as streams start from -1
-    audio_stream_type_t streamNameToEnum(const char *name);
-
-    // Parse audio_effects.conf
-    status_t loadAudioEffectConfigLegacy(const char *path);
-    status_t loadAudioEffectConfig(const sp<EffectsFactoryHalInterface>& effectsFactoryHal);
+    static audio_stream_type_t streamNameToEnum(const char* name);
 
     // Load all effects descriptors in configuration file
-    status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects);
-    EffectDesc *loadEffect(cnode *root);
-
-    // Load all automatic effect configurations
-    status_t loadInputEffectConfigurations(cnode *root, const Vector <EffectDesc *>& effects);
-    status_t loadStreamEffectConfigurations(cnode *root, const Vector <EffectDesc *>& effects);
-    EffectDescVector *loadEffectConfig(cnode *root, const Vector <EffectDesc *>& effects);
+    static EffectDescVector loadEffects(cnode* root);
+    static std::shared_ptr<AudioPolicyEffects::EffectDesc> loadEffect(cnode* root);
+    static std::shared_ptr<EffectDescVector> loadEffectConfig(cnode* root,
+            const EffectDescVector& effects);
 
     // Load all automatic effect parameters
-    void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params);
-    effect_param_t *loadEffectParameter(cnode *root);
-    size_t readParamValue(cnode *node,
+    static void loadEffectParameters(
+            cnode* root, std::vector<std::shared_ptr<const effect_param_t>>& params);
+
+    // loadEffectParameter returns a shared_ptr instead of a unique_ptr as there may
+    // be multiple references to the same effect parameter.
+    static std::shared_ptr<const effect_param_t> loadEffectParameter(cnode* root);
+    static size_t readParamValue(cnode* node,
                           char **param,
                           size_t *curSize,
                           size_t *totSize);
-    size_t growParamSize(char **param,
+    static size_t growParamSize(char** param,
                          size_t size,
                          size_t *curSize,
                          size_t *totSize);
 
+    // Legacy: End methods above.
+
+    // Note: The association of Effects to audio source, session, or stream
+    // is done through std::map instead of std::unordered_map.  This gives
+    // better reproducibility of issues, since map is ordered and more predictable
+    // in enumeration.
+
     // protects access to mInputSources, mInputSessions, mOutputStreams, mOutputSessions
-    // never hold AudioPolicyService::mLock when calling AudioPolicyEffects methods as
+    // never hold AudioPolicyService::mMutex when calling AudioPolicyEffects methods as
     // those can call back into AudioPolicyService methods and try to acquire the mutex
-    Mutex mLock;
+    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kAudioPolicyEffects_Mutex};
     // Automatic input effects are configured per audio_source_t
-    KeyedVector< audio_source_t, EffectDescVector* > mInputSources;
-    // Automatic input effects are unique for audio_io_handle_t
-    KeyedVector< audio_session_t, EffectVector* > mInputSessions;
+    std::map<audio_source_t, std::shared_ptr<EffectDescVector>> mInputSources
+            GUARDED_BY(mMutex);
+    // Automatic input effects are unique for an audio_session_t.
+    std::map<audio_session_t, std::shared_ptr<EffectVector>> mInputSessions
+            GUARDED_BY(mMutex);
 
     // Automatic output effects are organized per audio_stream_type_t
-    KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
-    // Automatic output effects are unique for audiosession ID
-    KeyedVector< audio_session_t, EffectVector* > mOutputSessions;
+    std::map<audio_stream_type_t, std::shared_ptr<EffectDescVector>> mOutputStreams
+            GUARDED_BY(mMutex);
+    // Automatic output effects are unique for an audio_session_t.
+    std::map<audio_session_t, std::shared_ptr<EffectVector>> mOutputSessions
+            GUARDED_BY(mMutex);
 
     /**
      * @brief mDeviceEffects map of device effects indexed by the device address
      */
-    std::map<std::string, std::unique_ptr<DeviceEffects>> mDeviceEffects GUARDED_BY(mLock);
+
+    // mDeviceEffects is never accessed through AudioPolicyEffects methods.
+    // We keep a separate mutex here to catch future methods attempting to access this variable.
+    std::mutex mDeviceEffectsMutex;
+    std::map<std::string, std::unique_ptr<DeviceEffects>> mDeviceEffects
+            GUARDED_BY(mDeviceEffectsMutex);
 
     /**
      * Device Effect initialization must be asynchronous: the audio_policy service parses and init
      * effect on first reference. AudioFlinger will handle effect creation and register these
      * effect on audio_policy service.
-     * We must store the reference of the furture garantee real asynchronous operation.
+     *
+     * The future is associated with the std::async launched thread - no need to lock as
+     * it is only set once on init.  Due to the async nature, it is conceivable that
+     * some device effects are not available immediately after AudioPolicyService::onFirstRef()
+     * while the effects are being created.
      */
     std::future<void> mDefaultDeviceEffectFuture;
 };
 
 } // namespace android
-
-#endif // ANDROID_AUDIOPOLICYEFFECTS_H
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 509b673..2a4c069 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -114,7 +114,7 @@
 void AudioPolicyService::doOnNewAudioModulesAvailable()
 {
     if (mAudioPolicyManager == NULL) return;
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     mAudioPolicyManager->onNewAudioModulesAvailable();
 }
@@ -140,7 +140,7 @@
     }
 
     ALOGV("setDeviceConnectionState()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status = mAudioPolicyManager->setDeviceConnectionState(
             state, port, encodedFormat);
@@ -162,7 +162,7 @@
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
         return Status::ok();
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
@@ -190,7 +190,7 @@
     }
 
     ALOGV("handleDeviceConfigChange()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status =  mAudioPolicyManager->handleDeviceConfigChange(
             device, address.c_str(), deviceNameAidl.c_str(), encodedFormat);
@@ -221,7 +221,7 @@
     // acquire lock before calling setMode() so that setMode() + setPhoneState() are an atomic
     // operation from policy manager standpoint (no other operation (e.g track start or stop)
     // can be interleaved).
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     // TODO: check if it is more appropriate to do it in platform specific policy manager
 
     // Audio HAL mode conversion for call redirect modes
@@ -242,7 +242,7 @@
 }
 
 Status AudioPolicyService::getPhoneState(AudioMode* _aidl_return) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_mode_t_AudioMode(mPhoneState));
     return Status::ok();
 }
@@ -270,7 +270,7 @@
         return binderStatusFromStatusT(BAD_VALUE);
     }
     ALOGV("setForceUse()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     mAudioPolicyManager->setForceUse(usage, config);
     onCheckSpatializer_l();
@@ -312,7 +312,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
     ALOGV("getOutput()");
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(mAudioPolicyManager->getOutput(stream)));
@@ -352,7 +352,7 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
 
     ALOGV("%s()", __func__);
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     if (!mPackageManager.allowPlaybackCapture(VALUE_OR_RETURN_BINDER_STATUS(
         aidl2legacy_int32_t_uid_t(attributionSource.uid)))) {
@@ -458,7 +458,7 @@
                                                      sp<AudioPolicyEffects>& effects,
                                                      const char *context)
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
     if (index < 0) {
         ALOGE("%s AudioTrack client not found for portId %d", context, portId);
@@ -489,7 +489,7 @@
             ALOGW("Failed to add effects on session %d", client->session);
         }
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status = mAudioPolicyManager->startOutput(portId);
     if (status == NO_ERROR) {
@@ -531,7 +531,7 @@
             ALOGW("Failed to release effects on session %d", client->session);
         }
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     status_t status = mAudioPolicyManager->stopOutput(portId);
     if (status == NO_ERROR) {
@@ -567,7 +567,7 @@
         audioPolicyEffects->releaseOutputSessionEffects(
             client->io, client->stream, client->session);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (client != nullptr && client->active) {
         onUpdateActiveSpatializerTracks_l();
     }
@@ -691,7 +691,7 @@
         status_t status;
         AudioPolicyInterface::input_type_t inputType;
 
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         {
             AutoCallerClear acc;
             // the audio_in_acoustics_t parameter is ignored by get_input()
@@ -794,7 +794,7 @@
     }
     sp<AudioRecordClient> client;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
 
         ssize_t index = mAudioRecordClients.indexOfKey(portId);
         if (index < 0) {
@@ -817,7 +817,7 @@
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     ALOGW_IF(client->silenced, "startInput on silenced input for port %d, uid %d. Unsilencing.",
             portIdAidl,
@@ -937,7 +937,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     ssize_t index = mAudioRecordClients.indexOfKey(portId);
     if (index < 0) {
@@ -967,7 +967,7 @@
     sp<AudioPolicyEffects>audioPolicyEffects;
     sp<AudioRecordClient> client;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         audioPolicyEffects = mAudioPolicyEffects;
         ssize_t index = mAudioRecordClients.indexOfKey(portId);
         if (index < 0) {
@@ -995,7 +995,7 @@
         }
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         AutoCallerClear acc;
         mAudioPolicyManager->releaseInput(portId);
     }
@@ -1019,7 +1019,7 @@
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
     return binderStatusFromStatusT(NO_ERROR);
@@ -1043,7 +1043,7 @@
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->setStreamVolumeIndex(stream,
                                                                              index,
@@ -1065,7 +1065,7 @@
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getStreamVolumeIndex(stream, &index, device)));
@@ -1090,7 +1090,7 @@
     if (!settingsAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(
             mAudioPolicyManager->setVolumeIndexForAttributes(attributes, index, device));
@@ -1110,7 +1110,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getVolumeIndexForAttributes(attributes, index, device)));
@@ -1129,7 +1129,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getMinVolumeIndexForAttributes(attributes, index)));
@@ -1148,7 +1148,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getMaxVolumeIndexForAttributes(attributes, index)));
@@ -1190,7 +1190,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForAttributes(aa, &devices, forVolume)));
@@ -1210,7 +1210,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(mAudioPolicyManager->getOutputForEffect(&desc)));
@@ -1235,7 +1235,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(
             mAudioPolicyManager->registerEffect(&desc, io, strategy, session, id));
@@ -1247,7 +1247,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->unregisterEffect(id));
 }
@@ -1258,7 +1258,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->setEffectEnabled(id, enabled));
 }
@@ -1277,7 +1277,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->moveEffectsToIo(ids, io));
 }
@@ -1295,7 +1295,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isStreamActive(stream, inPastMs);
     return Status::ok();
@@ -1315,7 +1315,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
     return Status::ok();
@@ -1327,7 +1327,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isSourceActive(source);
     return Status::ok();
@@ -1339,7 +1339,7 @@
         return NO_INIT;
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         audioPolicyEffects = mAudioPolicyEffects;
     }
     if (audioPolicyEffects == 0) {
@@ -1463,7 +1463,7 @@
             convertRange(systemUsagesAidl.begin(), systemUsagesAidl.begin() + size,
                          std::back_inserter(systemUsages), aidl2legacy_AudioUsage_audio_usage_t)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1483,7 +1483,7 @@
     audio_flags_mask_t capturePolicy = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_flags_mask_t_mask(capturePolicyAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         ALOGV("%s() mAudioPolicyManager == NULL", __func__);
         return binderStatusFromStatusT(NO_INIT);
@@ -1500,7 +1500,7 @@
         ALOGV("mAudioPolicyManager == NULL");
         return binderStatusFromStatusT(AUDIO_OFFLOAD_NOT_SUPPORTED);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_offload_mode_t_AudioOffloadMode(
             mAudioPolicyManager->getOffloadSupport(info)));
@@ -1525,7 +1525,7 @@
 
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
     return Status::ok();
 }
@@ -1561,7 +1561,7 @@
     std::unique_ptr<audio_port_v7[]> ports(new audio_port_v7[num_ports]);
     unsigned int generation;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1589,7 +1589,7 @@
 
 Status AudioPolicyService::listDeclaredDevicePorts(media::AudioPortRole role,
                                                     std::vector<media::AudioPortFw>* _aidl_return) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1601,7 +1601,7 @@
 Status AudioPolicyService::getAudioPort(int portId,
                                         media::AudioPortFw* _aidl_return) {
     audio_port_v7 port{ .id = portId };
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1628,7 +1628,7 @@
             aidl2legacy_int32_t_audio_port_handle_t(handleAidl));
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(AudioValidator::validateAudioPatch(patch)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1647,7 +1647,7 @@
 {
     audio_patch_handle_t handle = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_patch_handle_t(handleAidl));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1672,7 +1672,7 @@
     std::unique_ptr<audio_patch[]> patches(new audio_patch[num_patches]);
     unsigned int generation;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1710,7 +1710,7 @@
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(AudioValidator::validateAudioPortConfig(config)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1728,7 +1728,7 @@
     audio_devices_t device;
 
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         if (mAudioPolicyManager == NULL) {
             return binderStatusFromStatusT(NO_INIT);
         }
@@ -1750,7 +1750,7 @@
 {
     audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_session_t(sessionAidl));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1769,7 +1769,7 @@
             convertRange(mixesAidl.begin(), mixesAidl.begin() + size, std::back_inserter(mixes),
                          aidl2legacy_AudioMix)));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
 
     // loopback|render only need a MediaProjection (checked in caller AudioService.java)
     bool needModifyAudioRouting = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
@@ -1810,9 +1810,26 @@
     }
 }
 
+Status
+AudioPolicyService::getRegisteredPolicyMixes(std::vector<::android::media::AudioMix>* mixesAidl) {
+    if (mAudioPolicyManager == nullptr) {
+        return binderStatusFromStatusT(NO_INIT);
+    }
+
+    std::vector<AudioMix> mixes;
+    int status = mAudioPolicyManager->getRegisteredPolicyMixes(mixes);
+
+    for (const auto& mix : mixes) {
+        media::AudioMix aidlMix = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_AudioMix(mix));
+        mixesAidl->push_back(aidlMix);
+    }
+
+    return binderStatusFromStatusT(status);
+}
+
 Status AudioPolicyService::updatePolicyMixes(
         const ::std::vector<::android::media::AudioMixUpdate>& updates) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     for (const auto& update : updates) {
         AudioMix mix = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_AudioMix(update.audioMix));
         std::vector<AudioMixMatchCriterion> newCriteria =
@@ -1834,7 +1851,7 @@
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
                                                         aidl2legacy_AudioDeviceTypeAddress));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1848,7 +1865,7 @@
 Status AudioPolicyService::removeUidDeviceAffinities(int32_t uidAidl) {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1867,7 +1884,7 @@
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
                                                         aidl2legacy_AudioDeviceTypeAddress));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1881,7 +1898,7 @@
 Status AudioPolicyService::removeUserIdDeviceAffinities(int32_t userIdAidl) {
     int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if(!modifyAudioRoutingAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
@@ -1905,7 +1922,7 @@
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             AudioValidator::validateAudioAttributes(attributes, "68953950")));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1926,7 +1943,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
@@ -1942,7 +1959,7 @@
     if (!settingsAllowed()) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->setMasterMono(mono));
 }
@@ -1952,7 +1969,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(mAudioPolicyManager->getMasterMono(_aidl_return));
 }
@@ -1970,7 +1987,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->getStreamVolumeDB(stream, index, device);
     return Status::ok();
@@ -1991,7 +2008,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getSurroundFormats(&numSurroundFormats, surroundFormats.get(),
@@ -2022,7 +2039,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getReportedSurroundFormats(
@@ -2044,7 +2061,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
@@ -2064,7 +2081,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     return binderStatusFromStatusT(
             mAudioPolicyManager->setSurroundFormatEnabled(audioFormat, enabled));
@@ -2087,7 +2104,7 @@
     std::vector<uid_t> uids;
     RETURN_IF_BINDER_ERROR(convertInt32VectorToUidVectorWithLimit(uidsAidl, uids));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setAssistantUids(uids);
     return Status::ok();
 }
@@ -2097,7 +2114,7 @@
     std::vector<uid_t> activeUids;
     RETURN_IF_BINDER_ERROR(convertInt32VectorToUidVectorWithLimit(activeUidsAidl, activeUids));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setActiveAssistantUids(activeUids);
     return Status::ok();
 }
@@ -2107,7 +2124,7 @@
     std::vector<uid_t> uids;
     RETURN_IF_BINDER_ERROR(convertInt32VectorToUidVectorWithLimit(uidsAidl, uids));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setA11yUids(uids);
     return Status::ok();
 }
@@ -2115,7 +2132,7 @@
 Status AudioPolicyService::setCurrentImeUid(int32_t uidAidl)
 {
     uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setCurrentImeUid(uid);
     return Status::ok();
 }
@@ -2125,7 +2142,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isHapticPlaybackSupported();
     return Status::ok();
@@ -2136,7 +2153,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isUltrasoundSupported();
     return Status::ok();
@@ -2147,7 +2164,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isHotwordStreamSupported(lookbackAudio);
     return Status::ok();
@@ -2160,7 +2177,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->listAudioProductStrategies(strategies)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2180,7 +2197,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getProductStrategyFromAudioAttributes(
                     aa, productStrategy, fallbackOnDefault)));
@@ -2195,7 +2212,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->listAudioVolumeGroups(groups)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2214,7 +2231,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(
                     mAudioPolicyManager->getVolumeGroupFromAudioAttributes(
@@ -2225,7 +2242,7 @@
 
 Status AudioPolicyService::setRttEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mUidPolicy->setRttEnabled(enabled);
     return Status::ok();
 }
@@ -2235,7 +2252,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     AutoCallerClear acc;
     *_aidl_return = mAudioPolicyManager->isCallScreenModeSupported();
     return Status::ok();
@@ -2256,7 +2273,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     status_t status = mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -2279,7 +2296,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     status_t status = mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role, devices);
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -2296,7 +2313,7 @@
    if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     status_t status = mAudioPolicyManager->clearDevicesRoleForStrategy(strategy, role);
     if (status == NO_ERROR) {
        onCheckSpatializer_l();
@@ -2317,7 +2334,7 @@
     if (mAudioPolicyManager == NULL) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2347,7 +2364,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->setDevicesRoleForCapturePreset(audioSource, role, devices));
 }
@@ -2367,7 +2384,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->addDevicesRoleForCapturePreset(audioSource, role, devices));
 }
@@ -2387,7 +2404,7 @@
    if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
 }
@@ -2402,7 +2419,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->clearDevicesRoleForCapturePreset(audioSource, role));
 }
@@ -2420,7 +2437,7 @@
     if (mAudioPolicyManager == nullptr) {
         return binderStatusFromStatusT(NO_INIT);
     }
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2467,7 +2484,7 @@
             convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
                                                         aidl2legacy_AudioDeviceTypeAddress));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = mAudioPolicyManager->canBeSpatialized(&attr, &config, devices);
     return Status::ok();
 }
@@ -2486,7 +2503,7 @@
             aidl2legacy_AudioAttributes_audio_attributes_t(attrAidl));
     audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     *_aidl_return = static_cast<media::AudioDirectMode>(
             VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_direct_mode_t_int32_t_mask(
                     mAudioPolicyManager->getDirectPlaybackSupport(&attr, &config))));
@@ -2503,7 +2520,7 @@
             aidl2legacy_AudioAttributes_audio_attributes_t(attrAidl));
     AudioProfileVector audioProfiles;
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
             mAudioPolicyManager->getDirectProfilesForAttributes(&attr, audioProfiles)));
     *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
@@ -2523,7 +2540,7 @@
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
     std::vector<audio_mixer_attributes_t> mixerAttrs;
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->getSupportedMixerAttributes(
                     portId, mixerAttrs)));
@@ -2551,7 +2568,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->setPreferredMixerAttributes(&attr, portId, uid, &mixerAttr));
 }
@@ -2569,7 +2586,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     audio_mixer_attributes_t mixerAttr = AUDIO_MIXER_ATTRIBUTES_INITIALIZER;
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(mAudioPolicyManager->getPreferredMixerAttributes(
@@ -2593,7 +2610,7 @@
     audio_port_handle_t portId = VALUE_OR_RETURN_BINDER_STATUS(
             aidl2legacy_int32_t_audio_port_handle_t(portIdAidl));
 
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     return binderStatusFromStatusT(
             mAudioPolicyManager->clearPreferredMixerAttributes(&attr, portId, uid));
 }
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 5d3788d..bc6498a 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -166,7 +166,8 @@
 BINDER_METHOD_ENTRY(setPreferredMixerAttributes) \
 BINDER_METHOD_ENTRY(getPreferredMixerAttributes) \
 BINDER_METHOD_ENTRY(clearPreferredMixerAttributes) \
-
+BINDER_METHOD_ENTRY(getRegisteredPolicyMixes) \
+                                                     \
 // singleton for Binder Method Statistics for IAudioPolicyService
 static auto& getIAudioPolicyServiceStatistics() {
     using Code = int;
@@ -265,7 +266,7 @@
             .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
             .record(); });
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
 
         // start audio commands thread
         mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
@@ -280,11 +281,11 @@
 
     // load audio processing modules
     const sp<EffectsFactoryHalInterface> effectsFactoryHal = EffectsFactoryHalInterface::create();
-    sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects(effectsFactoryHal);
-    sp<UidPolicy> uidPolicy = new UidPolicy(this);
-    sp<SensorPrivacyPolicy> sensorPrivacyPolicy = new SensorPrivacyPolicy(this);
+    auto audioPolicyEffects = sp<AudioPolicyEffects>::make(effectsFactoryHal);
+    auto uidPolicy = sp<UidPolicy>::make(this);
+    auto sensorPrivacyPolicy = sp<SensorPrivacyPolicy>::make(this);
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mAudioPolicyEffects = audioPolicyEffects;
         mUidPolicy = uidPolicy;
         mSensorPrivacyPolicy = sensorPrivacyPolicy;
@@ -294,16 +295,16 @@
 
     // Create spatializer if supported
     if (mAudioPolicyManager != nullptr) {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
         AudioDeviceTypeAddrVector devices;
         bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
         if (hasSpatializer) {
             // Unlock as Spatializer::create() will use the callback and acquire the
             // AudioPolicyService_Mutex.
-            mLock.unlock();
+            mMutex.unlock();
             mSpatializer = Spatializer::create(this, effectsFactoryHal);
-            mLock.lock();
+            mMutex.lock();
         }
         if (mSpatializer == nullptr) {
             // No spatializer created, signal the reason: NO_INIT a failure, OK means intended.
@@ -356,7 +357,7 @@
         ALOGW("%s got NULL client", __FUNCTION__);
         return Status::ok();
     }
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
     pid_t pid = IPCThreadState::self()->getCallingPid();
@@ -379,7 +380,7 @@
 
 Status AudioPolicyService::setAudioPortCallbacksEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
     pid_t pid = IPCThreadState::self()->getCallingPid();
@@ -394,7 +395,7 @@
 
 Status AudioPolicyService::setAudioVolumeGroupCallbacksEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
     pid_t pid = IPCThreadState::self()->getCallingPid();
@@ -412,7 +413,7 @@
 {
     bool hasSameUid = false;
     {
-        Mutex::Autolock _l(mNotificationClientsLock);
+        audio_utils::lock_guard _l(mNotificationClientsMutex);
         int64_t token = ((int64_t)uid<<32) | pid;
         mNotificationClients.removeItem(token);
         for (size_t i = 0; i < mNotificationClients.size(); i++) {
@@ -423,7 +424,7 @@
         }
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         if (mAudioPolicyManager && !hasSameUid) {
             // called from binder death notification: no need to clear caller identity
             mAudioPolicyManager->releaseResourcesForUid(uid);
@@ -438,7 +439,7 @@
 
 void AudioPolicyService::doOnAudioPortListUpdate()
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onAudioPortListUpdate();
     }
@@ -451,7 +452,7 @@
 
 void AudioPolicyService::doOnAudioPatchListUpdate()
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onAudioPatchListUpdate();
     }
@@ -464,7 +465,7 @@
 
 void AudioPolicyService::doOnAudioVolumeGroupChanged(volume_group_t group, int flags)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onAudioVolumeGroupChanged(group, flags);
     }
@@ -479,7 +480,7 @@
 
 void AudioPolicyService::doOnDynamicPolicyMixStateUpdate(const String8& regId, int32_t state)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onDynamicPolicyMixStateUpdate(regId, state);
     }
@@ -509,7 +510,7 @@
                                                   audio_patch_handle_t patchHandle,
                                                   audio_source_t source)
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onRecordingConfigurationUpdate(event, clientInfo,
                 clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
@@ -523,7 +524,7 @@
 
 void AudioPolicyService::doOnRoutingUpdated()
 {
-  Mutex::Autolock _l(mNotificationClientsLock);
+  audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onRoutingUpdated();
     }
@@ -536,7 +537,7 @@
 
 void AudioPolicyService::doOnVolumeRangeInitRequest()
 {
-    Mutex::Autolock _l(mNotificationClientsLock);
+    audio_utils::lock_guard _l(mNotificationClientsMutex);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onVolumeRangeInitRequest();
     }
@@ -544,7 +545,7 @@
 
 void AudioPolicyService::onCheckSpatializer()
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     onCheckSpatializer_l();
 }
 
@@ -568,7 +569,7 @@
             const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
             audio_config_base_t config = mSpatializer->getAudioInConfig();
 
-            Mutex::Autolock _l(mLock);
+            audio_utils::lock_guard _l(mMutex);
             status_t status =
                     mAudioPolicyManager->getSpatializerOutput(&config, &attr, &newOutput);
             ALOGV("%s currentOutput %d newOutput %d channel_mask %#x",
@@ -577,13 +578,13 @@
                 return;
             }
             size_t numActiveTracks = countActiveClientsOnOutput_l(newOutput);
-            mLock.unlock();
+            mMutex.unlock();
             // It is OK to call detachOutput() is none is already attached.
             mSpatializer->detachOutput();
             if (status == NO_ERROR && newOutput != AUDIO_IO_HANDLE_NONE) {
                 status = mSpatializer->attachOutput(newOutput, numActiveTracks);
             }
-            mLock.lock();
+            mMutex.lock();
             if (status != NO_ERROR) {
                 mAudioPolicyManager->releaseSpatializerOutput(newOutput);
             }
@@ -592,7 +593,7 @@
             audio_io_handle_t output = mSpatializer->detachOutput();
 
             if (output != AUDIO_IO_HANDLE_NONE) {
-                Mutex::Autolock _l(mLock);
+                audio_utils::lock_guard _l(mMutex);
                 mAudioPolicyManager->releaseSpatializerOutput(output);
             }
         }
@@ -627,7 +628,7 @@
     audio_io_handle_t output = mSpatializer->getOutput();
     size_t activeClients;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         activeClients = countActiveClientsOnOutput_l(output);
     }
     mSpatializer->updateActiveTracks(activeClients);
@@ -783,12 +784,8 @@
             IPCThreadState::self()->getCallingPid());
 }
 
-static bool dumpTryLock(Mutex& mutex) ACQUIRE(mutex) NO_THREAD_SAFETY_ANALYSIS
-{
-    return mutex.timedLock(kDumpLockTimeoutNs) == NO_ERROR;
-}
-
-static void dumpReleaseLock(Mutex& mutex, bool locked) RELEASE(mutex) NO_THREAD_SAFETY_ANALYSIS
+static void dumpReleaseLock(audio_utils::mutex& mutex, bool locked)
+        RELEASE(mutex) NO_THREAD_SAFETY_ANALYSIS
 {
     if (locked) mutex.unlock();
 }
@@ -825,7 +822,7 @@
 
 void AudioPolicyService::updateUidStates()
 {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     updateUidStates_l();
 }
 
@@ -1027,7 +1024,7 @@
         bool isTopOrLatestAssistant = latestActiveAssistant == nullptr ? false :
             current->attributionSource.uid == latestActiveAssistant->attributionSource.uid;
 
-        auto canCaptureIfInCallOrCommunication = [&](const auto &recordClient) REQUIRES(mLock) {
+        auto canCaptureIfInCallOrCommunication = [&](const auto &recordClient) REQUIRES(mMutex) {
             uid_t recordUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(
                 recordClient->attributionSource.uid));
             bool canCaptureCall = recordClient->canCaptureOutput;
@@ -1205,11 +1202,12 @@
 }
 
 status_t AudioPolicyService::dump(int fd, const Vector<String16>& args __unused)
+NO_THREAD_SAFETY_ANALYSIS  // update for trylock.
 {
     if (!dumpAllowed()) {
         dumpPermissionDenial(fd);
     } else {
-        const bool locked = dumpTryLock(mLock);
+        const bool locked = mMutex.try_lock(kDumpLockTimeoutNs);
         if (!locked) {
             String8 result(kDeadlockedString);
             write(fd, result.c_str(), result.size());
@@ -1238,7 +1236,7 @@
 
         mPackageManager.dump(fd);
 
-        dumpReleaseLock(mLock, locked);
+        dumpReleaseLock(mMutex, locked);
 
         if (mSpatializer != nullptr) {
             std::string dumpString = mSpatializer->toString(1 /* level */);
@@ -1351,7 +1349,8 @@
         case TRANSACTION_getDevicesForRoleAndCapturePreset:
         case TRANSACTION_getSpatializer:
         case TRANSACTION_setPreferredMixerAttributes:
-        case TRANSACTION_clearPreferredMixerAttributes: {
+        case TRANSACTION_clearPreferredMixerAttributes:
+        case TRANSACTION_getRegisteredPolicyMixes: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1483,7 +1482,7 @@
 
     sp<UidPolicy> uidPolicy;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         uidPolicy = mUidPolicy;
     }
     if (uidPolicy) {
@@ -1512,7 +1511,7 @@
 
     sp<UidPolicy> uidPolicy;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         uidPolicy = mUidPolicy;
     }
     if (uidPolicy) {
@@ -1541,7 +1540,7 @@
 
     sp<UidPolicy> uidPolicy;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         uidPolicy = mUidPolicy;
     }
     if (uidPolicy) {
@@ -1579,7 +1578,7 @@
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("audioserver"));
     if (!res) {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mObserverRegistered = true;
     } else {
         ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
@@ -1591,12 +1590,12 @@
 void AudioPolicyService::UidPolicy::unregisterSelf() {
     mAm.unlinkToDeath(this);
     mAm.unregisterUidObserver(this);
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mObserverRegistered = false;
 }
 
 void AudioPolicyService::UidPolicy::binderDied(__unused const wp<IBinder> &who) {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     mCachedUids.clear();
     mObserverRegistered = false;
 }
@@ -1604,7 +1603,7 @@
 void AudioPolicyService::UidPolicy::checkRegistered() {
     bool needToReregister = false;
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         needToReregister = !mObserverRegistered;
     }
     if (needToReregister) {
@@ -1617,7 +1616,7 @@
     if (isServiceUid(uid)) return true;
     checkRegistered();
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         auto overrideIter = mOverrideUids.find(uid);
         if (overrideIter != mOverrideUids.end()) {
             return overrideIter->second.first;
@@ -1632,7 +1631,7 @@
     ActivityManager am;
     bool active = am.isUidActive(uid, String16("audioserver"));
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mCachedUids.insert(std::pair<uid_t,
                            std::pair<bool, int>>(uid, std::pair<bool, int>(active,
                                                       ActivityManager::PROCESS_STATE_UNKNOWN)));
@@ -1646,7 +1645,7 @@
     }
     checkRegistered();
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         auto overrideIter = mOverrideUids.find(uid);
         if (overrideIter != mOverrideUids.end()) {
             if (overrideIter->second.first) {
@@ -1681,7 +1680,7 @@
         state = am.getUidProcessState(uid, String16("audioserver"));
     }
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         mCachedUids.insert(std::pair<uid_t,
                            std::pair<bool, int>>(uid, std::pair<bool, int>(active, state)));
     }
@@ -1736,7 +1735,7 @@
     bool wasActive = isUidActive(uid);
     int previousState = getUidState(uid);
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         updateUidLocked(uids, uid, active, state, insert);
     }
     if (wasActive != isUidActive(uid) || state != previousState) {
@@ -1771,7 +1770,7 @@
 }
 
 bool AudioPolicyService::UidPolicy::isA11yOnTop() {
-    Mutex::Autolock _l(mLock);
+    audio_utils::lock_guard _l(mMutex);
     for (const auto &uid : mCachedUids) {
         if (!isA11yUid(uid.first)) {
             continue;
@@ -1902,7 +1901,7 @@
 {
     nsecs_t waitTime = -1;
 
-    mLock.lock();
+    audio_utils::unique_lock ul(mMutex);
     while (!exitPending())
     {
         sp<AudioPolicyService> svc;
@@ -1923,27 +1922,27 @@
                     VolumeData *data = (VolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set volume stream %d, \
                             volume %f, output %d", data->mStream, data->mVolume, data->mIO);
-                    mLock.unlock();
+                    ul.unlock();
                     command->mStatus = AudioSystem::setStreamVolume(data->mStream,
                                                                     data->mVolume,
                                                                     data->mIO);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case SET_PARAMETERS: {
                     ParametersData *data = (ParametersData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
                             data->mKeyValuePairs.c_str(), data->mIO);
-                    mLock.unlock();
+                    ul.unlock();
                     command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case SET_VOICE_VOLUME: {
                     VoiceVolumeData *data = (VoiceVolumeData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set voice volume volume %f",
                             data->mVolume);
-                    mLock.unlock();
+                    ul.unlock();
                     command->mStatus = AudioSystem::setVoiceVolume(data->mVolume);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case STOP_OUTPUT: {
                     StopOutputData *data = (StopOutputData *)command->mParam.get();
@@ -1953,9 +1952,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doStopOutput(data->mPortId);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case RELEASE_OUTPUT: {
                     ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get();
@@ -1965,9 +1964,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doReleaseOutput(data->mPortId);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case CREATE_AUDIO_PATCH: {
                     CreateAudioPatchData *data = (CreateAudioPatchData *)command->mParam.get();
@@ -1976,9 +1975,9 @@
                     if (af == 0) {
                         command->mStatus = PERMISSION_DENIED;
                     } else {
-                        mLock.unlock();
+                        ul.unlock();
                         command->mStatus = af->createAudioPatch(&data->mPatch, &data->mHandle);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case RELEASE_AUDIO_PATCH: {
@@ -1988,9 +1987,9 @@
                     if (af == 0) {
                         command->mStatus = PERMISSION_DENIED;
                     } else {
-                        mLock.unlock();
+                        ul.unlock();
                         command->mStatus = af->releaseAudioPatch(data->mHandle);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case UPDATE_AUDIOPORT_LIST: {
@@ -1999,9 +1998,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnAudioPortListUpdate();
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case UPDATE_AUDIOPATCH_LIST: {
                     ALOGV("AudioCommandThread() processing update audio patch list");
@@ -2009,9 +2008,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnAudioPatchListUpdate();
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case CHANGED_AUDIOVOLUMEGROUP: {
                     AudioVolumeGroupData *data =
@@ -2021,9 +2020,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnAudioVolumeGroupChanged(data->mGroup, data->mFlags);
-                    mLock.lock();
+                    ul.lock();
                     }break;
                 case SET_AUDIOPORT_CONFIG: {
                     SetAudioPortConfigData *data = (SetAudioPortConfigData *)command->mParam.get();
@@ -2032,9 +2031,9 @@
                     if (af == 0) {
                         command->mStatus = PERMISSION_DENIED;
                     } else {
-                        mLock.unlock();
+                        ul.unlock();
                         command->mStatus = af->setAudioPortConfig(&data->mConfig);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case DYN_POLICY_MIX_STATE_UPDATE: {
@@ -2046,9 +2045,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnDynamicPolicyMixStateUpdate(data->mRegId, data->mState);
-                    mLock.lock();
+                    ul.lock();
                     } break;
                 case RECORDING_CONFIGURATION_UPDATE: {
                     RecordingConfigurationUpdateData *data =
@@ -2058,21 +2057,21 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnRecordingConfigurationUpdate(data->mEvent, &data->mClientInfo,
                             &data->mClientConfig, data->mClientEffects,
                             &data->mDeviceConfig, data->mEffects,
                             data->mPatchHandle, data->mSource);
-                    mLock.lock();
+                    ul.lock();
                     } break;
                 case SET_EFFECT_SUSPENDED: {
                     SetEffectSuspendedData *data = (SetEffectSuspendedData *)command->mParam.get();
                     ALOGV("AudioCommandThread() processing set effect suspended");
                     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
                     if (af != 0) {
-                        mLock.unlock();
+                        ul.unlock();
                         af->setEffectSuspended(data->mEffectId, data->mSessionId, data->mSuspended);
-                        mLock.lock();
+                        ul.lock();
                     }
                     } break;
                 case AUDIO_MODULES_UPDATE: {
@@ -2081,9 +2080,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnNewAudioModulesAvailable();
-                    mLock.lock();
+                    ul.lock();
                     } break;
                 case ROUTING_UPDATED: {
                     ALOGV("AudioCommandThread() processing routing update");
@@ -2091,9 +2090,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnRoutingUpdated();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case UPDATE_UID_STATES: {
@@ -2102,9 +2101,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->updateUidStates();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case CHECK_SPATIALIZER_OUTPUT: {
@@ -2113,9 +2112,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnCheckSpatializer();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case UPDATE_ACTIVE_SPATIALIZER_TRACKS: {
@@ -2124,9 +2123,9 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnUpdateActiveSpatializerTracks();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 case VOL_RANGE_INIT_REQUEST: {
@@ -2135,28 +2134,28 @@
                     if (svc == 0) {
                         break;
                     }
-                    mLock.unlock();
+                    ul.unlock();
                     svc->doOnVolumeRangeInitRequest();
-                    mLock.lock();
+                    ul.lock();
                     } break;
 
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
                 {
-                    Mutex::Autolock _l(command->mLock);
+                    audio_utils::lock_guard _l(command->mMutex);
                     if (command->mWaitStatus) {
                         command->mWaitStatus = false;
-                        command->mCond.signal();
+                        command->mCond.notify_one();
                     }
                 }
                 waitTime = -1;
-                // release mLock before releasing strong reference on the service as
+                // release ul before releasing strong reference on the service as
                 // AudioPolicyService destructor calls AudioCommandThread::exit() which
-                // acquires mLock.
-                mLock.unlock();
+                // acquires ul.
+                ul.unlock();
                 svc.clear();
-                mLock.lock();
+                ul.lock();
             } else {
                 waitTime = mAudioCommands[0]->mTime - curTime;
                 break;
@@ -2174,9 +2173,10 @@
         if (!exitPending()) {
             ALOGV("AudioCommandThread() going to sleep");
             if (waitTime == -1) {
-                mWaitWorkCV.wait(mLock);
+                mWaitWorkCV.wait(ul);
             } else {
-                mWaitWorkCV.waitRelative(mLock, waitTime);
+                // discard return value.
+                mWaitWorkCV.wait_for(ul, std::chrono::nanoseconds(waitTime));
             }
         }
     }
@@ -2184,17 +2184,17 @@
     if (!mAudioCommands.isEmpty()) {
         release_wake_lock(mName.c_str());
     }
-    mLock.unlock();
     return false;
 }
 
 status_t AudioPolicyService::AudioCommandThread::dump(int fd)
+NO_THREAD_SAFETY_ANALYSIS  // trylock
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
 
-    const bool locked = dumpTryLock(mLock);
+    const bool locked = mMutex.try_lock(kDumpLockTimeoutNs);
     if (!locked) {
         String8 result2(kCmdDeadlockedString);
         write(fd, result2.c_str(), result2.size());
@@ -2217,7 +2217,7 @@
 
     write(fd, result.c_str(), result.size());
 
-    dumpReleaseLock(mLock, locked);
+    dumpReleaseLock(mMutex, locked);
 
     return NO_ERROR;
 }
@@ -2475,14 +2475,15 @@
 status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
 {
     {
-        Mutex::Autolock _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         insertCommand_l(command, delayMs);
-        mWaitWorkCV.signal();
+        mWaitWorkCV.notify_one();
     }
-    Mutex::Autolock _l(command->mLock);
+    audio_utils::unique_lock ul(command->mMutex);
     while (command->mWaitStatus) {
         nsecs_t timeOutNs = kAudioCommandTimeoutNs + milliseconds(delayMs);
-        if (command->mCond.waitRelative(command->mLock, timeOutNs) != NO_ERROR) {
+        if (command->mCond.wait_for(
+                ul, std::chrono::nanoseconds(timeOutNs), getTid()) == std::cv_status::timeout) {
             command->mStatus = TIMED_OUT;
             command->mWaitStatus = false;
         }
@@ -2490,7 +2491,7 @@
     return command->mStatus;
 }
 
-// insertCommand_l() must be called with mLock held
+// insertCommand_l() must be called with mMutex held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(sp<AudioCommand>& command, int delayMs)
 {
     ssize_t i;  // not size_t because i will count down to -1
@@ -2678,9 +2679,9 @@
 {
     ALOGV("AudioCommandThread::exit");
     {
-        AutoMutex _l(mLock);
+        audio_utils::lock_guard _l(mMutex);
         requestExit();
-        mWaitWorkCV.signal();
+        mWaitWorkCV.notify_one();
     }
     // Note that we can call it from the thread loop if all other references have been released
     // but it will safely return WOULD_BLOCK in this case
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index aaf0b1b..bd56366 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -20,6 +20,7 @@
 #include <android/media/BnAudioPolicyService.h>
 #include <android/media/GetSpatializerResponse.h>
 #include <android-base/thread_annotations.h>
+#include <audio_utils/mutex.h>
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
 #include <cutils/compiler.h>
@@ -310,6 +311,8 @@
     binder::Status clearPreferredMixerAttributes(const media::audio::common::AudioAttributes& attr,
                                                  int32_t portId,
                                                  int32_t uid) override;
+    binder::Status getRegisteredPolicyMixes(
+            std::vector <::android::media::AudioMix>* mixes) override;
 
     status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
 
@@ -387,10 +390,10 @@
      * by audio policy manager and attach/detach the spatializer effect accordingly.
      */
     void onCheckSpatializer() override;
-    void onCheckSpatializer_l() REQUIRES(mLock);
+    void onCheckSpatializer_l() REQUIRES(mMutex);
     void doOnCheckSpatializer();
 
-    void onUpdateActiveSpatializerTracks_l() REQUIRES(mLock);
+    void onUpdateActiveSpatializerTracks_l() REQUIRES(mMutex);
     void doOnUpdateActiveSpatializerTracks();
 
 
@@ -402,14 +405,14 @@
                         AudioPolicyService() ANDROID_API;
     virtual             ~AudioPolicyService();
 
-            status_t dumpInternals(int fd) REQUIRES(mLock);
+    status_t dumpInternals(int fd) REQUIRES(mMutex);
 
     // Handles binder shell commands
     virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
 
 
     // Sets whether the given UID records only silence
-    virtual void setAppState_l(sp<AudioRecordClient> client, app_state_t state) REQUIRES(mLock);
+    virtual void setAppState_l(sp<AudioRecordClient> client, app_state_t state) REQUIRES(mMutex);
 
     // Overrides the UID state as if it is idle
     status_t handleSetUidState(Vector<String16>& args, int err);
@@ -435,9 +438,9 @@
                            const AttributionSourceState& attributionSource);
 
     void updateUidStates();
-    void updateUidStates_l() REQUIRES(mLock);
+    void updateUidStates_l() REQUIRES(mMutex);
 
-    void silenceAllRecordings_l() REQUIRES(mLock);
+    void silenceAllRecordings_l() REQUIRES(mMutex);
 
     static bool isVirtualSource(audio_source_t source);
 
@@ -510,11 +513,11 @@
         void checkRegistered();
 
         wp<AudioPolicyService> mService;
-        Mutex mLock;
+        audio_utils::mutex mMutex{audio_utils::MutexOrder::kUidPolicy_Mutex};
         ActivityManager mAm;
         bool mObserverRegistered = false;
-        std::unordered_map<uid_t, std::pair<bool, int>> mOverrideUids GUARDED_BY(mLock);
-        std::unordered_map<uid_t, std::pair<bool, int>> mCachedUids GUARDED_BY(mLock);
+        std::unordered_map<uid_t, std::pair<bool, int>> mOverrideUids GUARDED_BY(mMutex);
+        std::unordered_map<uid_t, std::pair<bool, int>> mCachedUids GUARDED_BY(mMutex);
         std::vector<uid_t> mAssistantUids;
         std::vector<uid_t> mActiveAssistantUids;
         std::vector<uid_t> mA11yUids;
@@ -539,6 +542,10 @@
             binder::Status onSensorPrivacyChanged(int toggleType, int sensor,
                                                   bool enabled);
 
+            binder::Status onSensorPrivacyStateChanged(int, int, int) {
+                return binder::Status::ok();
+            }
+
         private:
             wp<AudioPolicyService> mService;
             std::atomic_bool mSensorPrivacyEnabled = false;
@@ -641,8 +648,8 @@
 
             int mCommand;   // SET_VOLUME, SET_PARAMETERS...
             nsecs_t mTime;  // time stamp
-            Mutex mLock;    // mutex associated to mCond
-            Condition mCond; // condition for status return
+            audio_utils::mutex mMutex{audio_utils::MutexOrder::kAudioCommand_Mutex};
+            audio_utils::condition_variable mCond; // condition for status return
             status_t mStatus; // command status
             bool mWaitStatus; // true if caller is waiting for status
             sp<AudioCommandData> mParam;     // command specific parameter data
@@ -730,8 +737,8 @@
             bool mSuspended;
         };
 
-        Mutex   mLock;
-        Condition mWaitWorkCV;
+        mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kCommandThread_Mutex};
+        audio_utils::condition_variable mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
         sp<AudioCommand> mLastCommand;      // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
@@ -996,12 +1003,12 @@
      * @return the number of active tracks.
      */
     size_t countActiveClientsOnOutput_l(
-        audio_io_handle_t output, bool spatializedOnly = true) REQUIRES(mLock);
+            audio_io_handle_t output, bool spatializedOnly = true) REQUIRES(mMutex);
 
-    mutable Mutex mLock;    // prevents concurrent access to AudioPolicy manager functions changing
-                            // device connection state  or routing
-    // Note: lock acquisition order is always mLock > mEffectsLock:
-    // mLock protects AudioPolicyManager methods that can call into audio flinger
+    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kAudioPolicyService_Mutex};
+    // prevents concurrent access to AudioPolicy manager functions changing
+    // device connection state or routing.
+    // mMutex protects AudioPolicyManager methods that can call into audio flinger
     // and possibly back in to audio policy service and acquire mEffectsLock.
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
@@ -1009,29 +1016,30 @@
     AudioPolicyClient *mAudioPolicyClient;
     std::vector<audio_usage_t> mSupportedSystemUsages;
 
-    Mutex mNotificationClientsLock;
+    mutable audio_utils::mutex mNotificationClientsMutex{
+            audio_utils::MutexOrder::kAudioPolicyService_NotificationClientsMutex};
     DefaultKeyedVector<int64_t, sp<NotificationClient>> mNotificationClients
-        GUARDED_BY(mNotificationClientsLock);
+            GUARDED_BY(mNotificationClientsMutex);
     // Manage all effects configured in audio_effects.conf
-    // never hold AudioPolicyService::mLock when calling AudioPolicyEffects methods as
+    // never hold AudioPolicyService::mMutex when calling AudioPolicyEffects methods as
     // those can call back into AudioPolicyService methods and try to acquire the mutex
-    sp<AudioPolicyEffects> mAudioPolicyEffects GUARDED_BY(mLock);
-    audio_mode_t mPhoneState GUARDED_BY(mLock);
-    uid_t mPhoneStateOwnerUid GUARDED_BY(mLock);
+    sp<AudioPolicyEffects> mAudioPolicyEffects GUARDED_BY(mMutex);
+    audio_mode_t mPhoneState GUARDED_BY(mMutex);
+    uid_t mPhoneStateOwnerUid GUARDED_BY(mMutex);
 
-    sp<UidPolicy> mUidPolicy GUARDED_BY(mLock);
-    sp<SensorPrivacyPolicy> mSensorPrivacyPolicy GUARDED_BY(mLock);
+    sp<UidPolicy> mUidPolicy GUARDED_BY(mMutex);
+    sp<SensorPrivacyPolicy> mSensorPrivacyPolicy GUARDED_BY(mMutex);
 
     DefaultKeyedVector<audio_port_handle_t, sp<AudioRecordClient>> mAudioRecordClients
-        GUARDED_BY(mLock);
+            GUARDED_BY(mMutex);
     DefaultKeyedVector<audio_port_handle_t, sp<AudioPlaybackClient>> mAudioPlaybackClients
-        GUARDED_BY(mLock);
+            GUARDED_BY(mMutex);
 
     MediaPackageManager mPackageManager; // To check allowPlaybackCapture
 
     CaptureStateNotifier mCaptureStateNotifier;
 
-    // created in onFirstRef() and never cleared: does not need to be guarded by mLock
+    // created in onFirstRef() and never cleared: does not need to be guarded by mMutex
     sp<Spatializer> mSpatializer;
 
     void *mLibraryHandle = nullptr;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 83030a3..16f3a9a 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -31,6 +31,7 @@
 #include <audio_utils/fixedfft.h>
 #include <com_android_media_audio.h>
 #include <cutils/bitops.h>
+#include <cutils/properties.h>
 #include <hardware/sensors.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -49,12 +50,12 @@
 using aidl_utils::statusTFromBinderStatus;
 using android::content::AttributionSourceState;
 using binder::Status;
+using internal::ToString;
 using media::HeadTrackingMode;
 using media::Pose3f;
 using media::SensorPoseProvider;
 using media::audio::common::HeadTracking;
 using media::audio::common::Spatialization;
-using ::android::internal::ToString;
 
 using namespace std::chrono_literals;
 
@@ -229,7 +230,7 @@
         return;
     }
     auto latencyModesStrs = android::sysprop::BluetoothProperties::dsa_transport_preference();
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     // First load preferred low latency modes ordered from the property
     for (auto str : latencyModesStrs) {
         if (!str.has_value()) continue;
@@ -348,7 +349,8 @@
     bool activeLevelFound = false;
     for (const auto spatializationLevel : spatializationLevels) {
         if (!aidl_utils::isValidEnum(spatializationLevel)) {
-            ALOGW("%s: ignoring spatializationLevel:%d", __func__, (int)spatializationLevel);
+            ALOGW("%s: ignoring spatializationLevel:%s", __func__,
+                  ToString(spatializationLevel).c_str());
             continue;
         }
         if (spatializationLevel == Spatialization::Level::NONE) {
@@ -375,7 +377,8 @@
 
     for (const auto spatializationMode : spatializationModes) {
         if (!aidl_utils::isValidEnum(spatializationMode)) {
-            ALOGW("%s: ignoring spatializationMode:%d", __func__, (int)spatializationMode);
+            ALOGW("%s: ignoring spatializationMode:%s", __func__,
+                  ToString(spatializationMode).c_str());
             continue;
         }
         // we don't detect duplicates.
@@ -394,7 +397,13 @@
         return status;
     }
     for (const auto channelMask : channelMasks) {
-        if (!audio_is_channel_mask_spatialized(channelMask)) {
+        static const bool stereo_spatialization_enabled =
+                property_get_bool("ro.audio.stereo_spatialization_enabled", false);
+        const bool channel_mask_spatialized =
+                (stereo_spatialization_enabled && com_android_media_audio_stereo_spatialization())
+                ? audio_channel_mask_contains_stereo(channelMask)
+                : audio_is_channel_mask_spatialized(channelMask);
+        if (!channel_mask_spatialized) {
             ALOGW("%s: ignoring channelMask:%#x", __func__, channelMask);
             continue;
         }
@@ -406,27 +415,26 @@
         return BAD_VALUE;
     }
 
-    //TODO b/273373363: use AIDL enum when available
     if (com::android::media::audio::dsa_over_bt_le_audio()
             && mSupportsHeadTracking) {
-        mHeadtrackingConnectionMode = HEADTRACKING_CONNECTION_FRAMEWORK_PROCESSED;
-        std::vector<uint8_t> headtrackingConnectionModes;
+        mHeadtrackingConnectionMode = HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED;
+        std::vector<HeadTracking::ConnectionMode> headtrackingConnectionModes;
         status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_HEADTRACKING_CONNECTION,
                 &headtrackingConnectionModes);
         if (status == NO_ERROR) {
             for (const auto htConnectionMode : headtrackingConnectionModes) {
-                if (htConnectionMode < HEADTRACKING_CONNECTION_FRAMEWORK_PROCESSED ||
-                        htConnectionMode > HEADTRACKING_CONNECTION_DIRECT_TO_SENSOR_TUNNEL) {
-                    ALOGW("%s: ignoring HT connection mode:%d", __func__, (int)htConnectionMode);
+                if (htConnectionMode < HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED ||
+                    htConnectionMode > HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_TUNNEL) {
+                    ALOGW("%s: ignoring HT connection mode:%s", __func__,
+                          ToString(htConnectionMode).c_str());
                     continue;
                 }
-                mSupportedHeadtrackingConnectionModes.insert(
-                        static_cast<headtracking_connection_t> (htConnectionMode));
+                mSupportedHeadtrackingConnectionModes.insert(htConnectionMode);
             }
             ALOGW_IF(mSupportedHeadtrackingConnectionModes.find(
-                    HEADTRACKING_CONNECTION_FRAMEWORK_PROCESSED)
-                        == mSupportedHeadtrackingConnectionModes.end(),
-                    "%s: HEADTRACKING_CONNECTION_FRAMEWORK_PROCESSED not reported", __func__);
+                    HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED) ==
+                        mSupportedHeadtrackingConnectionModes.end(),
+                    "%s: Headtracking FRAMEWORK_PROCESSED not reported", __func__);
         }
     }
 
@@ -461,7 +469,7 @@
 
 /** Gets the channel mask, sampling rate and format set for the spatializer input. */
 audio_config_base_t Spatializer::getAudioInConfig() const {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
     // For now use highest supported channel count
     config.channel_mask = getMaxChannelMask(mChannelMasks, FCC_LIMIT);
@@ -470,7 +478,7 @@
 
 status_t Spatializer::registerCallback(
         const sp<media::INativeSpatializerCallback>& callback) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (callback == nullptr) {
         return BAD_VALUE;
     }
@@ -498,7 +506,7 @@
 // IBinder::DeathRecipient
 void Spatializer::binderDied(__unused const wp<IBinder> &who) {
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         mLevel = Spatialization::Level::NONE;
         mSpatializerCallback.clear();
     }
@@ -527,7 +535,7 @@
     sp<media::INativeSpatializerCallback> callback;
     bool levelChanged = false;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         levelChanged = mLevel != level;
         mLevel = level;
         callback = mSpatializerCallback;
@@ -551,25 +559,25 @@
     if (level == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *level = mLevel;
-    ALOGV("%s level %d", __func__, (int)*level);
+    ALOGV("%s level %s", __func__, ToString(*level).c_str());
     return Status::ok();
 }
 
 Status Spatializer::isHeadTrackingSupported(bool *supports) {
-    ALOGV("%s mSupportsHeadTracking %d", __func__, mSupportsHeadTracking);
+    ALOGV("%s mSupportsHeadTracking %s", __func__, ToString(mSupportsHeadTracking).c_str());
     if (supports == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *supports = mSupportsHeadTracking;
     return Status::ok();
 }
 
 Status Spatializer::getSupportedHeadTrackingModes(
         std::vector<HeadTracking::Mode>* modes) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     ALOGV("%s", __func__);
     if (modes == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
@@ -585,7 +593,7 @@
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
     mLocalLog.log("%s with %s", __func__, ToString(mode).c_str());
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     switch (mode) {
         case HeadTracking::Mode::OTHER:
             return binderStatusFromStatusT(BAD_VALUE);
@@ -610,7 +618,7 @@
     if (mode == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *mode = mActualHeadTrackingMode;
     ALOGV("%s mode %d", __func__, (int)*mode);
     return Status::ok();
@@ -620,7 +628,7 @@
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mPoseController != nullptr) {
         mPoseController->recenter();
     }
@@ -637,7 +645,7 @@
         ALOGW("Invalid screenToStage vector.");
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mPoseController != nullptr) {
         mLocalLog.log("%s with screenToStage %s", __func__,
                 media::VectorRecorder::toString<float>(screenToStage).c_str());
@@ -650,7 +658,7 @@
     ALOGV("%s", __func__);
     bool levelChanged = false;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         if (mSpatializerCallback == nullptr) {
             return binderStatusFromStatusT(INVALID_OPERATION);
         }
@@ -674,7 +682,7 @@
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mHeadSensor != sensorHandle) {
         mLocalLog.log("%s with 0x%08x", __func__, sensorHandle);
         mHeadSensor = sensorHandle;
@@ -689,7 +697,7 @@
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mScreenSensor != sensorHandle) {
         mLocalLog.log("%s with 0x%08x", __func__, sensorHandle);
         mScreenSensor = sensorHandle;
@@ -708,7 +716,7 @@
     // It is possible due to numerical inaccuracies to exceed the boundaries of 0 to 2 * M_PI.
     ALOGI_IF(angle != physicalToLogicalAngle,
             "%s: clamping %f to %f", __func__, physicalToLogicalAngle, angle);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     mDisplayOrientation = angle;
     if (mPoseController != nullptr) {
         // This turns on the rate-limiter.
@@ -728,7 +736,7 @@
     // It is possible due to numerical inaccuracies to exceed the boundaries of 0 to 2 * M_PI.
     ALOGI_IF(angle != hingeAngle,
             "%s: clamping %f to %f", __func__, hingeAngle, angle);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     mHingeAngle = angle;
     if (mEngine != nullptr) {
         setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{angle});
@@ -739,7 +747,7 @@
 Status Spatializer::setFoldState(bool folded) {
     ALOGV("%s foldState %d", __func__, (int)folded);
     mLocalLog.log("%s with %d", __func__, (int)folded);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     mFoldedState = folded;
     if (mEngine != nullptr) {
         // we don't suppress multiple calls with the same folded state - that's
@@ -761,7 +769,7 @@
 Status Spatializer::registerHeadTrackingCallback(
         const sp<media::ISpatializerHeadTrackingCallback>& callback) {
     ALOGV("%s callback %p", __func__, callback.get());
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (!mSupportsHeadTracking) {
         return binderStatusFromStatusT(INVALID_OPERATION);
     }
@@ -771,7 +779,7 @@
 
 Status Spatializer::setParameter(int key, const std::vector<unsigned char>& value) {
     ALOGV("%s key %d", __func__, key);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     status_t status = INVALID_OPERATION;
     if (mEngine != nullptr) {
         status = setEffectParameter_l(key, value);
@@ -785,7 +793,7 @@
     if (value == nullptr) {
         return binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     status_t status = INVALID_OPERATION;
     if (mEngine != nullptr) {
         ALOGV("%s key %d mEngine %p", __func__, key, mEngine.get());
@@ -799,7 +807,7 @@
     if (output == nullptr) {
         binderStatusFromStatusT(BAD_VALUE);
     }
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     *output = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_io_handle_t_int32_t(mOutput));
     ALOGV("%s got output %d", __func__, *output);
     return Status::ok();
@@ -837,7 +845,7 @@
     ALOGV("%s", __func__);
     sp<media::ISpatializerHeadTrackingCallback> callback;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         callback = mHeadTrackingCallback;
         if (mEngine != nullptr) {
             setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
@@ -853,7 +861,7 @@
 }
 
 void Spatializer::onActualModeChange(HeadTrackingMode mode) {
-    std::string modeStr = media::toString(mode);
+    std::string modeStr = ToString(mode);
     ALOGV("%s(%s)", __func__, modeStr.c_str());
     sp<AMessage> msg = new AMessage(EngineCallbackHandler::kWhatOnActualModeChange, mHandler);
     msg->setInt32(EngineCallbackHandler::kModeKey, static_cast<int>(mode));
@@ -861,11 +869,11 @@
 }
 
 void Spatializer::onActualModeChangeMsg(HeadTrackingMode mode) {
-    ALOGV("%s(%d)", __func__, (int) mode);
+    ALOGV("%s(%s)", __func__, ToString(mode).c_str());
     sp<media::ISpatializerHeadTrackingCallback> callback;
     HeadTracking::Mode spatializerMode;
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         if (!mSupportsHeadTracking) {
             spatializerMode = HeadTracking::Mode::DISABLED;
         } else {
@@ -880,7 +888,7 @@
                     spatializerMode = HeadTracking::Mode::RELATIVE_SCREEN;
                     break;
                 default:
-                    LOG_ALWAYS_FATAL("Unknown mode: %d", static_cast<int>(mode));
+                    LOG_ALWAYS_FATAL("Unknown mode: %s", ToString(mode).c_str());
             }
         }
         mActualHeadTrackingMode = spatializerMode;
@@ -894,7 +902,7 @@
             }
         }
         callback = mHeadTrackingCallback;
-        mLocalLog.log("%s: updating mode to %s", __func__, media::toString(mode).c_str());
+        mLocalLog.log("%s: updating mode to %s", __func__, ToString(mode).c_str());
     }
     if (callback != nullptr) {
         callback->onHeadTrackingModeChanged(spatializerMode);
@@ -932,7 +940,7 @@
     sp<media::INativeSpatializerCallback> callback;
 
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
         mLocalLog.log("%s with output %d tracks %zu (mOutput %d)", __func__, (int)output,
                       numActiveTracks, (int)mOutput);
@@ -998,7 +1006,7 @@
     sp<media::INativeSpatializerCallback> callback;
 
     {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         mLocalLog.log("%s with output %d tracks %zu", __func__, (int)mOutput, mNumActiveTracks);
         ALOGV("%s mOutput %d", __func__, (int)mOutput);
         if (mOutput == AUDIO_IO_HANDLE_NONE) {
@@ -1032,7 +1040,7 @@
 
 void Spatializer::onSupportedLatencyModesChangedMsg(
         audio_io_handle_t output, std::vector<audio_latency_mode_t>&& modes) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     ALOGV("%s output %d mOutput %d num modes %zu",
             __func__, (int)output, (int)mOutput, modes.size());
     if (output == mOutput) {
@@ -1043,7 +1051,7 @@
 }
 
 void Spatializer::updateActiveTracks(size_t numActiveTracks) {
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mNumActiveTracks != numActiveTracks) {
         mLocalLog.log("%s from %zu to %zu", __func__, mNumActiveTracks, numActiveTracks);
         mNumActiveTracks = numActiveTracks;
@@ -1052,24 +1060,23 @@
     }
 }
 
-//TODO b/273373363: use AIDL enum when available
 audio_latency_mode_t Spatializer::selectHeadtrackingConnectionMode_l() {
     if (!com::android::media::audio::dsa_over_bt_le_audio()) {
         return AUDIO_LATENCY_MODE_LOW;
     }
     // mSupportedLatencyModes is ordered according to system preferences loaded in
     // mOrderedLowLatencyModes
-    mHeadtrackingConnectionMode = HEADTRACKING_CONNECTION_FRAMEWORK_PROCESSED;
+    mHeadtrackingConnectionMode = HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED;
     audio_latency_mode_t requestedLatencyMode = mSupportedLatencyModes[0];
     if (requestedLatencyMode == AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_HARDWARE) {
         if (mSupportedHeadtrackingConnectionModes.find(
-                HEADTRACKING_CONNECTION_DIRECT_TO_SENSOR_TUNNEL)
+                HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_TUNNEL)
                     != mSupportedHeadtrackingConnectionModes.end()) {
-            mHeadtrackingConnectionMode = HEADTRACKING_CONNECTION_DIRECT_TO_SENSOR_TUNNEL;
+            mHeadtrackingConnectionMode = HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_TUNNEL;
         } else if (mSupportedHeadtrackingConnectionModes.find(
-                HEADTRACKING_CONNECTION_DIRECT_TO_SENSOR_SW)
+                HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_SW)
                     != mSupportedHeadtrackingConnectionModes.end()) {
-            mHeadtrackingConnectionMode = HEADTRACKING_CONNECTION_DIRECT_TO_SENSOR_SW;
+            mHeadtrackingConnectionMode = HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_SW;
         } else {
             // if the engine does not support direct reading of IMU data, do not allow
             // DYNAMIC_SPATIAL_AUDIO_HARDWARE mode and fallback to next mode
@@ -1174,7 +1181,7 @@
 
 void Spatializer::calculateHeadPose() {
     ALOGV("%s", __func__);
-    std::lock_guard lock(mLock);
+    audio_utils::lock_guard lock(mMutex);
     if (mPoseController != nullptr) {
         mPoseController->calculateAsync();
     }
@@ -1193,7 +1200,7 @@
     bool needUnlock = false;
 
     prefixSpace += ' ';
-    if (!mLock.try_lock()) {
+    if (!mMutex.try_lock()) {
         // dumpsys even try_lock failed, information dump can be useful although may not accurate
         ss.append(prefixSpace).append("try_lock failed, dumpsys below maybe INACCURATE!\n");
     } else {
@@ -1213,7 +1220,7 @@
         base::StringAppendF(&ss, " %s", ToString(mode).c_str());
     }
     base::StringAppendF(&ss, "], Desired: %s, Actual %s\n",
-                        media::toString(mDesiredHeadTrackingMode).c_str(),
+                        ToString(mDesiredHeadTrackingMode).c_str(),
                         ToString(mActualHeadTrackingMode).c_str());
 
     base::StringAppendF(&ss, "%smSpatializationModes: [", prefixSpace.c_str());
@@ -1239,6 +1246,10 @@
     base::StringAppendF(&ss, "%sDisplayOrientation: %f\n", prefixSpace.c_str(),
                         mDisplayOrientation);
 
+    // 4. Show flag or property state.
+    base::StringAppendF(&ss, "%sStereo Spatialization: %s\n", prefixSpace.c_str(),
+            com_android_media_audio_stereo_spatialization() ? "true" : "false");
+
     ss.append(prefixSpace + "CommandLog:\n");
     ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), mMaxLocalLogLine);
 
@@ -1258,7 +1269,7 @@
     }
 
     if (needUnlock) {
-        mLock.unlock();
+        mMutex.unlock();
     }
     return ss;
 }
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 123517e..355df18 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -23,6 +23,7 @@
 #include <android/media/audio/common/AudioLatencyMode.h>
 #include <android/media/audio/common/HeadTracking.h>
 #include <android/media/audio/common/Spatialization.h>
+#include <audio_utils/mutex.h>
 #include <audio_utils/SimpleLog.h>
 #include <math.h>
 #include <media/AudioEffect.h>
@@ -148,7 +149,7 @@
 
     /** Level getter for use by local classes. */
     media::audio::common::Spatialization::Level getLevel() const {
-        std::lock_guard lock(mLock);
+        audio_utils::lock_guard lock(mMutex);
         return mLevel;
     }
 
@@ -161,7 +162,7 @@
      */
     audio_io_handle_t detachOutput();
     /** Returns the output stream the spatializer is attached to. */
-    audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
+    audio_io_handle_t getOutput() const { audio_utils::lock_guard lock(mMutex); return mOutput; }
 
     void updateActiveTracks(size_t numActiveTracks);
 
@@ -261,7 +262,7 @@
      *  according to values vector size.
      */
     template<typename T>
-    status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mLock) {
+    status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mMutex) {
         static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
 
         uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values.size()];
@@ -286,7 +287,7 @@
      * The variant is for compound parameters with two values of different base types
      */
     template<typename P1, typename P2>
-    status_t setEffectParameter_l(uint32_t type, const P1 val1, const P2 val2) REQUIRES(mLock) {
+    status_t setEffectParameter_l(uint32_t type, const P1 val1, const P2 val2) REQUIRES(mMutex) {
         static_assert(sizeof(P1) <= sizeof(uint32_t), "The size of P1 must less than 32 bits");
         static_assert(sizeof(P2) <= sizeof(uint32_t), "The size of P2 must less than 32 bits");
 
@@ -314,7 +315,7 @@
      * by specifying values vector size.
      */
     template<typename T>
-    status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mLock) {
+    status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mMutex) {
         static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
 
         uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values->size()];
@@ -345,7 +346,7 @@
      * The variant is for compound parameters with two values of different base types
      */
     template<typename P1, typename P2>
-    status_t getEffectParameter_l(uint32_t type, P1 *val1, P2 *val2) REQUIRES(mLock) {
+    status_t getEffectParameter_l(uint32_t type, P1 *val1, P2 *val2) REQUIRES(mMutex) {
         static_assert(sizeof(P1) <= sizeof(uint32_t), "The size of P1 must less than 32 bits");
         static_assert(sizeof(P2) <= sizeof(uint32_t), "The size of P2 must less than 32 bits");
 
@@ -375,25 +376,25 @@
      * spatializer state and playback activity and configures the pose controller
      * accordingly.
      */
-    void checkSensorsState_l() REQUIRES(mLock);
+    void checkSensorsState_l() REQUIRES(mMutex);
 
     /**
      * Checks if the head pose controller should be created or destroyed according
      * to desired head tracking mode.
      */
-    void checkPoseController_l() REQUIRES(mLock);
+    void checkPoseController_l() REQUIRES(mMutex);
 
     /**
      * Checks if the spatializer effect should be enabled based on
      * playback activity and requested level.
      */
-    void checkEngineState_l() REQUIRES(mLock);
+    void checkEngineState_l() REQUIRES(mMutex);
 
     /**
      * Reset head tracking mode and recenter pose in engine: Called when the head tracking
      * is disabled.
      */
-    void resetEngineHeadPose_l() REQUIRES(mLock);
+    void resetEngineHeadPose_l() REQUIRES(mMutex);
 
     /** Read bluetooth.core.le.dsa_transport_preference property and populate the ordered list of
      * preferred low latency modes in mOrderedLowLatencyModes.
@@ -406,7 +407,7 @@
      * Note: Because MODE_FREE is not in mOrderedLowLatencyModes, it will always be at
      * the end of the list.
      */
-    void sortSupportedLatencyModes_l() REQUIRES(mLock);
+    void sortSupportedLatencyModes_l() REQUIRES(mMutex);
 
     /**
      * Called after enabling head tracking in the spatializer engine to indicate which
@@ -415,14 +416,14 @@
      * When the connection mode is direct to the sensor, the sensor ID is also communicated
      * to the spatializer engine.
      */
-    void setEngineHeadtrackingConnectionMode_l() REQUIRES(mLock);
+    void setEngineHeadtrackingConnectionMode_l() REQUIRES(mMutex);
 
     /**
      * Select the desired head tracking connection mode for the spatializer engine among the list
      * stored in mSupportedHeadtrackingConnectionModes at init time.
      * Also returns the desired low latency mode according to selected connection mode.
      */
-    audio_latency_mode_t selectHeadtrackingConnectionMode_l() REQUIRES(mLock);
+    audio_latency_mode_t selectHeadtrackingConnectionMode_l() REQUIRES(mMutex);
 
     /** Effect engine descriptor */
     const effect_descriptor_t mEngineDescriptor;
@@ -435,48 +436,48 @@
     const std::string mMetricsId = kDefaultMetricsId;
 
     /** Mutex protecting internal state */
-    mutable std::mutex mLock;
+    mutable audio_utils::mutex mMutex{audio_utils::MutexOrder::kSpatializer_Mutex};
 
     /** Client AudioEffect for the engine */
-    sp<AudioEffect> mEngine GUARDED_BY(mLock);
+    sp<AudioEffect> mEngine GUARDED_BY(mMutex);
     /** Output stream the spatializer mixer thread is attached to */
-    audio_io_handle_t mOutput GUARDED_BY(mLock) = AUDIO_IO_HANDLE_NONE;
+    audio_io_handle_t mOutput GUARDED_BY(mMutex) = AUDIO_IO_HANDLE_NONE;
 
     /** Callback interface to the client (AudioService) controlling this`Spatializer */
-    sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mLock);
+    sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mMutex);
 
     /** Callback interface for head tracking */
-    sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mLock);
+    sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mMutex);
 
     /** Requested spatialization level */
-    media::audio::common::Spatialization::Level mLevel GUARDED_BY(mLock) =
+    media::audio::common::Spatialization::Level mLevel GUARDED_BY(mMutex) =
             media::audio::common::Spatialization::Level::NONE;
 
     /** Control logic for head-tracking, etc. */
-    std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mLock);
+    std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mMutex);
 
     /** Last requested head tracking mode */
-    media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mLock)
+    media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mMutex)
             = media::HeadTrackingMode::STATIC;
 
     /** Last-reported actual head-tracking mode. */
-    media::audio::common::HeadTracking::Mode mActualHeadTrackingMode GUARDED_BY(mLock)
+    media::audio::common::HeadTracking::Mode mActualHeadTrackingMode GUARDED_BY(mMutex)
             = media::audio::common::HeadTracking::Mode::DISABLED;
 
     /** Selected Head pose sensor */
-    int32_t mHeadSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+    int32_t mHeadSensor GUARDED_BY(mMutex) = SpatializerPoseController::INVALID_SENSOR;
 
     /** Selected Screen pose sensor */
-    int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+    int32_t mScreenSensor GUARDED_BY(mMutex) = SpatializerPoseController::INVALID_SENSOR;
 
     /** Last display orientation received */
-    float mDisplayOrientation GUARDED_BY(mLock) = 0.f;  // aligned to natural up orientation.
+    float mDisplayOrientation GUARDED_BY(mMutex) = 0.f;  // aligned to natural up orientation.
 
     /** Last folded state */
-    bool mFoldedState GUARDED_BY(mLock) = false;  // foldable: true means folded.
+    bool mFoldedState GUARDED_BY(mMutex) = false;  // foldable: true means folded.
 
     /** Last hinge angle */
-    float mHingeAngle GUARDED_BY(mLock) = 0.f;  // foldable: 0.f is closed, M_PI flat open.
+    float mHingeAngle GUARDED_BY(mMutex) = 0.f;  // foldable: 0.f is closed, M_PI flat open.
 
     std::vector<media::audio::common::Spatialization::Level> mLevels;
     std::vector<media::audio::common::HeadTracking::Mode> mHeadTrackingModes;
@@ -485,11 +486,13 @@
     bool mSupportsHeadTracking;
     /** List of supported headtracking connection modes reported by the spatializer.
      * If the list is empty, the spatializer does not support any optional connection
-     * mode and mode HEADTRACKING_CONNECTION_FRAMEWORK_PROCESSED is assumed.
+     * mode and mode HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED is assumed.
      */
-    std::unordered_set<headtracking_connection_t> mSupportedHeadtrackingConnectionModes;
+    std::unordered_set<media::audio::common::HeadTracking::ConnectionMode>
+            mSupportedHeadtrackingConnectionModes;
     /** Selected HT connection mode when several modes are supported by the spatializer */
-    headtracking_connection_t mHeadtrackingConnectionMode;
+    media::audio::common::HeadTracking::ConnectionMode mHeadtrackingConnectionMode =
+            media::audio::common::HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED;
 
     // Looper thread for mEngine callbacks
     class EngineCallbackHandler;
@@ -497,8 +500,8 @@
     sp<ALooper> mLooper;
     sp<EngineCallbackHandler> mHandler;
 
-    size_t mNumActiveTracks GUARDED_BY(mLock) = 0;
-    std::vector<audio_latency_mode_t> mSupportedLatencyModes GUARDED_BY(mLock);
+    size_t mNumActiveTracks GUARDED_BY(mMutex) = 0;
+    std::vector<audio_latency_mode_t> mSupportedLatencyModes GUARDED_BY(mMutex);
     /** preference order for low latency modes according to persist.bluetooth.hid.transport */
     std::vector<audio_latency_mode_t> mOrderedLowLatencyModes;
     /** string to latency mode map used to parse bluetooth.core.le.dsa_transport_preference */
@@ -514,10 +517,10 @@
      * Dump to local log with max/average pose angle every mPoseRecordThreshold.
      */
     // Record one log line per second (up to mMaxLocalLogLine) to capture most recent sensor data.
-    media::VectorRecorder mPoseRecorder GUARDED_BY(mLock) {
+    media::VectorRecorder mPoseRecorder GUARDED_BY(mMutex) {
         6 /* vectorSize */, std::chrono::seconds(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
     // Record one log line per minute (up to mMaxLocalLogLine) to capture durable sensor data.
-    media::VectorRecorder mPoseDurableRecorder  GUARDED_BY(mLock) {
+    media::VectorRecorder mPoseDurableRecorder GUARDED_BY(mMutex) {
         6 /* vectorSize */, std::chrono::minutes(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
 };  // Spatializer
 
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index a4a0cd4..34bd3b4 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -33,11 +34,14 @@
         "libutils",
         "libcutils",
         "libxml2",
+        "server_configurable_flags",
     ],
 
     static_libs: [
+        "android.media.audiopolicy-aconfig-cc",
         "audioclient-types-aidl-cpp",
         "libaudiopolicycomponents",
+        "libflagtest",
         "libgmock",
     ],
 
@@ -49,7 +53,7 @@
 
     srcs: ["audiopolicymanager_tests.cpp"],
 
-    data: [":audiopolicytest_configuration_files",],
+    data: [":audiopolicytest_configuration_files"],
 
     cflags: [
         "-Werror",
@@ -63,7 +67,6 @@
 
 }
 
-
 cc_test {
     name: "audio_health_tests",
 
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index 31ee252..aa7c9cd 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -31,6 +31,7 @@
     using AudioPolicyManager::getConfig;
     using AudioPolicyManager::initialize;
     using AudioPolicyManager::getOutputs;
+    using AudioPolicyManager::getInputs;
     using AudioPolicyManager::getAvailableOutputDevices;
     using AudioPolicyManager::getAvailableInputDevices;
     using AudioPolicyManager::setSurroundFormatEnabled;
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 74d3474..e02c93a 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -28,6 +28,8 @@
 #include <android-base/file.h>
 #include <android-base/properties.h>
 #include <android/content/AttributionSourceState.h>
+#include <android_media_audiopolicy.h>
+#include <flag_macros.h>
 #include <hardware/audio_effect.h>
 #include <media/AudioPolicy.h>
 #include <media/PatchBuilder.h>
@@ -43,6 +45,7 @@
 
 using namespace android;
 using testing::UnorderedElementsAre;
+using testing::IsEmpty;
 using android::content::AttributionSourceState;
 
 namespace {
@@ -92,6 +95,12 @@
     return attributionSourceState;
 }
 
+bool equals(const audio_config_base_t& config1, const audio_config_base_t& config2) {
+    return config1.format == config2.format
+            && config1.sample_rate == config2.sample_rate
+            && config1.channel_mask == config2.channel_mask;
+}
+
 } // namespace
 
 TEST(AudioPolicyConfigTest, DefaultConfigForTestsIsEmpty) {
@@ -1266,6 +1275,53 @@
                                                            "", "", AUDIO_FORMAT_LDAC));
 }
 
+TEST_F(AudioPolicyManagerTestWithConfigurationFile, PreferExactConfigForInput) {
+    const audio_channel_mask_t deviceChannelMask = AUDIO_CHANNEL_IN_3POINT1;
+    mClient->addSupportedFormat(AUDIO_FORMAT_PCM_16_BIT);
+    mClient->addSupportedChannelMask(deviceChannelMask);
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(AUDIO_DEVICE_IN_USB_DEVICE,
+                                                           AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                                                           "", "", AUDIO_FORMAT_DEFAULT));
+
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                               AUDIO_SOURCE_VOICE_COMMUNICATION,AUDIO_FLAG_NONE, ""};
+    AudioPolicyInterface::input_type_t inputType;
+    audio_io_handle_t input = AUDIO_PORT_HANDLE_NONE;
+    AttributionSourceState attributionSource = createAttributionSourceState(/*uid=*/ 0);
+    audio_config_base_t requestedConfig = {
+            .channel_mask = AUDIO_CHANNEL_IN_STEREO,
+            .format = AUDIO_FORMAT_PCM_16_BIT,
+            .sample_rate = 48000
+    };
+    audio_config_base_t config = requestedConfig;
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    ASSERT_EQ(OK, mManager->getInputForAttr(
+            &attr, &input, 1 /*riid*/, AUDIO_SESSION_NONE, attributionSource, &config,
+            AUDIO_INPUT_FLAG_NONE,
+            &selectedDeviceId, &inputType, &portId));
+    ASSERT_NE(AUDIO_PORT_HANDLE_NONE, portId);
+    ASSERT_TRUE(equals(requestedConfig, config));
+
+    attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+            AUDIO_SOURCE_VOICE_COMMUNICATION, AUDIO_FLAG_NONE, ""};
+    requestedConfig.channel_mask = deviceChannelMask;
+    config = requestedConfig;
+    selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    input = AUDIO_PORT_HANDLE_NONE;
+    portId = AUDIO_PORT_HANDLE_NONE;
+    ASSERT_EQ(OK, mManager->getInputForAttr(
+            &attr, &input, 1 /*riid*/, AUDIO_SESSION_NONE, attributionSource, &config,
+            AUDIO_INPUT_FLAG_NONE,
+            &selectedDeviceId, &inputType, &portId));
+    ASSERT_NE(AUDIO_PORT_HANDLE_NONE, portId);
+    ASSERT_TRUE(equals(requestedConfig, config));
+
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(AUDIO_DEVICE_IN_USB_DEVICE,
+                                                           AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                                           "", "", AUDIO_FORMAT_DEFAULT));
+}
+
 class AudioPolicyManagerTestDynamicPolicy : public AudioPolicyManagerTestWithConfigurationFile {
 protected:
     void TearDown() override;
@@ -1273,6 +1329,12 @@
     status_t addPolicyMix(int mixType, int mixFlag, audio_devices_t deviceType,
             std::string mixAddress, const audio_config_t& audioConfig,
             const std::vector<AudioMixMatchCriterion>& matchCriteria);
+
+    status_t addPolicyMix(const AudioMix& mix);
+
+    status_t removePolicyMixes(const Vector<AudioMix>& mixes);
+
+    std::vector<AudioMix> getRegisteredPolicyMixes();
     void clearPolicyMix();
     void addPolicyMixAndStartInputForLoopback(
             int mixType, int mixFlag, audio_devices_t deviceType, std::string mixAddress,
@@ -1309,7 +1371,11 @@
     myAudioMix.mDeviceType = deviceType;
     // Clear mAudioMix before add new one to make sure we don't add already exist mixes.
     mAudioMixes.clear();
-    mAudioMixes.add(myAudioMix);
+    return addPolicyMix(myAudioMix);
+}
+
+status_t AudioPolicyManagerTestDynamicPolicy::addPolicyMix(const AudioMix& mix) {
+    mAudioMixes.add(mix);
 
     // As the policy mixes registration may fail at some case,
     // caller need to check the returned status.
@@ -1317,6 +1383,20 @@
     return ret;
 }
 
+status_t AudioPolicyManagerTestDynamicPolicy::removePolicyMixes(const Vector<AudioMix>& mixes) {
+    status_t ret = mManager->unregisterPolicyMixes(mixes);
+    return ret;
+}
+
+std::vector<AudioMix> AudioPolicyManagerTestDynamicPolicy::getRegisteredPolicyMixes() {
+    std::vector<AudioMix> audioMixes;
+    if (mManager != nullptr) {
+        status_t ret = mManager->getRegisteredPolicyMixes(audioMixes);
+        EXPECT_EQ(NO_ERROR, ret);
+    }
+    return audioMixes;
+}
+
 void AudioPolicyManagerTestDynamicPolicy::clearPolicyMix() {
     if (mManager != nullptr) {
         mManager->stopInput(mLoopbackInputPortId);
@@ -1470,6 +1550,142 @@
     ASSERT_EQ(INVALID_OPERATION, ret);
 }
 
+TEST_F_WITH_FLAGS(
+        AudioPolicyManagerTestDynamicPolicy,
+        RegisterInvalidMixesDoesNotImpactPriorMixes,
+        REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(android::media::audiopolicy, audio_mix_test_api),
+                               ACONFIG_FLAG(android::media::audiopolicy, audio_mix_ownership))
+) {
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = k48000SamplingRate;
+
+    std::vector<AudioMixMatchCriterion> validMixMatchCriteria = {
+            createUidCriterion(/*uid=*/42),
+            createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/true)};
+    AudioMix validAudioMix(validMixMatchCriteria, MIX_TYPE_PLAYERS, audioConfig,
+                           MIX_ROUTE_FLAG_LOOP_BACK, String8(mMixAddress.c_str()), 0);
+    validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+
+    mAudioMixes.clear();
+    mAudioMixes.add(validAudioMix);
+    status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+
+    ASSERT_EQ(NO_ERROR, ret);
+
+    std::vector<AudioMix> registeredMixes = getRegisteredPolicyMixes();
+    ASSERT_EQ(1, registeredMixes.size());
+
+    std::vector<AudioMixMatchCriterion> invalidMixMatchCriteria = {
+            createUidCriterion(/*uid=*/42),
+            createUidCriterion(/*uid=*/1235, /*exclude=*/true),
+            createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/true)};
+
+    AudioMix invalidAudioMix(invalidMixMatchCriteria, MIX_TYPE_PLAYERS, audioConfig,
+                             MIX_ROUTE_FLAG_LOOP_BACK, String8(mMixAddress.c_str()), 0);
+    validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+
+    mAudioMixes.add(invalidAudioMix);
+    ret = mManager->registerPolicyMixes(mAudioMixes);
+
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    std::vector<AudioMix> remainingMixes = getRegisteredPolicyMixes();
+    ASSERT_EQ(registeredMixes.size(), remainingMixes.size());
+}
+
+TEST_F_WITH_FLAGS(
+        AudioPolicyManagerTestDynamicPolicy,
+        UnregisterInvalidMixesReturnsError,
+        REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(android::media::audiopolicy, audio_mix_test_api),
+                               ACONFIG_FLAG(android::media::audiopolicy, audio_mix_ownership))
+) {
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = k48000SamplingRate;
+
+    std::vector<AudioMixMatchCriterion> validMixMatchCriteria = {
+            createUidCriterion(/*uid=*/42),
+            createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/true)};
+    AudioMix validAudioMix(validMixMatchCriteria, MIX_TYPE_PLAYERS, audioConfig,
+                           MIX_ROUTE_FLAG_LOOP_BACK, String8(mMixAddress.c_str()), 0);
+    validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+
+    mAudioMixes.clear();
+    mAudioMixes.add(validAudioMix);
+    status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+
+    ASSERT_EQ(NO_ERROR, ret);
+
+    std::vector<AudioMix> registeredMixes = getRegisteredPolicyMixes();
+    ASSERT_EQ(1, registeredMixes.size());
+
+    std::vector<AudioMixMatchCriterion> invalidMixMatchCriteria = {
+            createUidCriterion(/*uid=*/42),
+            createUidCriterion(/*uid=*/1235, /*exclude=*/true),
+            createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/true)};
+
+    AudioMix invalidAudioMix(invalidMixMatchCriteria, MIX_TYPE_PLAYERS, audioConfig,
+                             MIX_ROUTE_FLAG_LOOP_BACK, String8(mMixAddress.c_str()), 0);
+    validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+
+    Vector<AudioMix> mixes;
+    mixes.add(invalidAudioMix);
+    mixes.add(validAudioMix);
+    ret = removePolicyMixes(mixes);
+
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    std::vector<AudioMix> remainingMixes = getRegisteredPolicyMixes();
+    EXPECT_THAT(remainingMixes, IsEmpty());
+}
+
+TEST_F_WITH_FLAGS(
+        AudioPolicyManagerTestDynamicPolicy,
+        GetRegisteredPolicyMixes,
+        REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(android::media::audiopolicy, audio_mix_test_api))
+) {
+    std::vector<AudioMix> mixes = getRegisteredPolicyMixes();
+    EXPECT_THAT(mixes, IsEmpty());
+}
+
+TEST_F_WITH_FLAGS(AudioPolicyManagerTestDynamicPolicy,
+        AddPolicyMixAndVerifyGetRegisteredPolicyMixes,
+        REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(android::media::audiopolicy, audio_mix_test_api))
+) {
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = k48000SamplingRate;
+
+    std::vector<AudioMixMatchCriterion> mixMatchCriteria = {
+            createUidCriterion(/*uid=*/42),
+            createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/true)};
+    status_t ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+                                AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig,
+                                mixMatchCriteria);
+    ASSERT_EQ(NO_ERROR, ret);
+
+    std::vector<AudioMix> mixes = getRegisteredPolicyMixes();
+    ASSERT_EQ(mixes.size(), 1);
+
+    const AudioMix& mix = mixes[0];
+    ASSERT_EQ(mix.mCriteria.size(), mixMatchCriteria.size());
+    for (uint32_t i = 0; i < mixMatchCriteria.size(); i++) {
+        EXPECT_EQ(mix.mCriteria[i].mRule, mixMatchCriteria[i].mRule);
+        EXPECT_EQ(mix.mCriteria[i].mValue.mUsage, mixMatchCriteria[i].mValue.mUsage);
+    }
+    EXPECT_EQ(mix.mDeviceType, AUDIO_DEVICE_OUT_REMOTE_SUBMIX);
+    EXPECT_EQ(mix.mRouteFlags, MIX_ROUTE_FLAG_LOOP_BACK);
+    EXPECT_EQ(mix.mMixType, MIX_TYPE_PLAYERS);
+    EXPECT_EQ(mix.mFormat.channel_mask, audioConfig.channel_mask);
+    EXPECT_EQ(mix.mFormat.format, audioConfig.format);
+    EXPECT_EQ(mix.mFormat.sample_rate, audioConfig.sample_rate);
+    EXPECT_EQ(mix.mFormat.frame_count, audioConfig.frame_count);
+}
+
 class AudioPolicyManagerTestForHdmi
         : public AudioPolicyManagerTestWithConfigurationFile,
           public testing::WithParamInterface<audio_format_t> {
diff --git a/services/audiopolicy/tests/resources/Android.bp b/services/audiopolicy/tests/resources/Android.bp
index 5e71210..43e2e39 100644
--- a/services/audiopolicy/tests/resources/Android.bp
+++ b/services/audiopolicy/tests/resources/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_android_media_audio_framework",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
index 4efdf8a..1a299c6 100644
--- a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -65,6 +65,7 @@
                         samplingRates="48000"
                         channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
                 </mixPort>
+                <mixPort name="hifi_input" role="sink" />
             </mixPorts>
             <devicePorts>
                 <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
@@ -111,6 +112,8 @@
                        sources="primary output,hifi_output,mmap_no_irq_out"/>
                 <route type="mix" sink="mixport_bus_input"
                     sources="BUS Device In"/>
+                <route type="mix" sink="hifi_input"
+                        sources="USB Device In" />
             </routes>
         </module>
 
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 4883a09..b748888 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -101,7 +101,7 @@
         "android.frameworks.cameraservice.device-V2-ndk",
         "android.hardware.camera.common-V1-ndk",
         "android.hardware.camera.device-V3-ndk",
-        "android.hardware.camera.metadata-V2-ndk",
+        "android.hardware.camera.metadata-V3-ndk",
         "android.hardware.camera.provider@2.4",
         "android.hardware.camera.provider@2.5",
         "android.hardware.camera.provider@2.6",
@@ -112,6 +112,7 @@
         "libcameraservice_device_independent",
         "libdynamic_depth",
         "libprocessinfoservice_aidl",
+        "libvirtualdevicebuildflags",
         "media_permission-aidl-cpp",
     ],
 }
@@ -185,6 +186,7 @@
         "aidl/AidlCameraServiceListener.cpp",
         "aidl/AidlUtils.cpp",
         "aidl/DeathPipe.cpp",
+        "utils/AttributionAndPermissionUtils.cpp",
         "utils/CameraServiceProxyWrapper.cpp",
         "utils/CameraThreadState.cpp",
         "utils/CameraTraces.cpp",
@@ -195,6 +197,7 @@
         "utils/SessionStatsBuilder.cpp",
         "utils/TagMonitor.cpp",
         "utils/LatencyHistogram.cpp",
+        "utils/Utils.cpp",
     ],
 
     header_libs: [
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 725f1eb..ebe771e 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -38,7 +38,6 @@
 #include <aidl/AidlCameraService.h>
 #include <android-base/macros.h>
 #include <android-base/parseint.h>
-#include <android/permission/PermissionChecker.h>
 #include <binder/ActivityManager.h>
 #include <binder/AppOpsManager.h>
 #include <binder/IPCThreadState.h>
@@ -129,18 +128,16 @@
 
 // ----------------------------------------------------------------------------
 
-static const std::string sDumpPermission("android.permission.DUMP");
-static const std::string sManageCameraPermission("android.permission.MANAGE_CAMERA");
-static const std::string sCameraPermission("android.permission.CAMERA");
-static const std::string sSystemCameraPermission("android.permission.SYSTEM_CAMERA");
-static const std::string sCameraHeadlessSystemUserPermission(
-        "android.permission.CAMERA_HEADLESS_SYSTEM_USER");
-static const std::string
-        sCameraSendSystemEventsPermission("android.permission.CAMERA_SEND_SYSTEM_EVENTS");
-static const std::string sCameraOpenCloseListenerPermission(
-        "android.permission.CAMERA_OPEN_CLOSE_LISTENER");
-static const std::string
-        sCameraInjectExternalCameraPermission("android.permission.CAMERA_INJECT_EXTERNAL_CAMERA");
+// Permission strings (references to AttributionAndPermissionUtils for brevity)
+static const std::string &sDumpPermission =
+        AttributionAndPermissionUtils::sDumpPermission;
+static const std::string &sManageCameraPermission =
+        AttributionAndPermissionUtils::sManageCameraPermission;
+static const std::string &sCameraSendSystemEventsPermission =
+        AttributionAndPermissionUtils::sCameraSendSystemEventsPermission;
+static const std::string &sCameraInjectExternalCameraPermission =
+        AttributionAndPermissionUtils::sCameraInjectExternalCameraPermission;
+
 // Constant integer for FGS Logging, used to denote the API type for logger
 static const int LOG_FGS_CAMERA_API = 1;
 const char *sFileName = "lastOpenSessionDumpFile";
@@ -156,9 +153,13 @@
 static std::set<std::string> sServiceErrorEventSet;
 
 CameraService::CameraService(
-        std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper) :
+        std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils) :
         mCameraServiceProxyWrapper(cameraServiceProxyWrapper == nullptr ?
                 std::make_shared<CameraServiceProxyWrapper>() : cameraServiceProxyWrapper),
+        mAttributionAndPermissionUtils(attributionAndPermissionUtils == nullptr ?
+                std::make_shared<AttributionAndPermissionUtils>(this)\
+                : attributionAndPermissionUtils),
         mEventLog(DEFAULT_EVENT_LOG_LENGTH),
         mNumberOfCameras(0),
         mNumberOfCamerasWithoutSystemCamera(0),
@@ -213,7 +214,7 @@
 
     mUidPolicy = new UidPolicy(this);
     mUidPolicy->registerSelf();
-    mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
+    mSensorPrivacyPolicy = new SensorPrivacyPolicy(this, mAttributionAndPermissionUtils);
     mSensorPrivacyPolicy->registerSelf();
     mInjectionStatusListener = new InjectionStatusListener(this);
 
@@ -706,34 +707,15 @@
     broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
 }
 
-static bool isAutomotiveDevice() {
-    // Checks the property ro.hardware.type and returns true if it is
-    // automotive.
-    char value[PROPERTY_VALUE_MAX] = {0};
-    property_get("ro.hardware.type", value, "");
-    return strncmp(value, "automotive", PROPERTY_VALUE_MAX) == 0;
+bool CameraService::isAutomotiveDevice() const {
+    return mAttributionAndPermissionUtils->isAutomotiveDevice();
 }
 
-static bool isHeadlessSystemUserMode() {
-    // Checks if the device is running in headless system user mode
-    // by checking the property ro.fw.mu.headless_system_user.
-    char value[PROPERTY_VALUE_MAX] = {0};
-    property_get("ro.fw.mu.headless_system_user", value, "");
-    return strncmp(value, "true", PROPERTY_VALUE_MAX) == 0;
+bool CameraService::isAutomotivePrivilegedClient(int32_t uid) const {
+    return mAttributionAndPermissionUtils->isAutomotivePrivilegedClient(uid);
 }
 
-static bool isAutomotivePrivilegedClient(int32_t uid) {
-    // Returns false if this is not an automotive device type.
-    if (!isAutomotiveDevice())
-        return false;
-
-    // Returns true if the uid is AID_AUTOMOTIVE_EVS which is a
-    // privileged client uid used for safety critical use cases such as
-    // rear view and surround view.
-    return uid == AID_AUTOMOTIVE_EVS;
-}
-
-bool CameraService::isAutomotiveExteriorSystemCamera(const std::string& cam_id) const{
+bool CameraService::isAutomotiveExteriorSystemCamera(const std::string& cam_id) const {
     // Returns false if this is not an automotive device type.
     if (!isAutomotiveDevice())
         return false;
@@ -778,46 +760,47 @@
     return true;
 }
 
-bool CameraService::checkPermission(const std::string& cameraId, const std::string& permission,
-        const AttributionSourceState& attributionSource, const std::string& message,
-        int32_t attributedOpCode) const{
-    if (isAutomotivePrivilegedClient(attributionSource.uid)) {
-        // If cameraId is empty, then it means that this check is not used for the
-        // purpose of accessing a specific camera, hence grant permission just
-        // based on uid to the automotive privileged client.
-        if (cameraId.empty())
-            return true;
-        // If this call is used for accessing a specific camera then cam_id must be provided.
-        // In that case, only pre-grants the permission for accessing the exterior system only
-        // camera.
-        return isAutomotiveExteriorSystemCamera(cameraId);
-    }
+static AttributionSourceState attributionSourceFromPidAndUid(int callingPid, int callingUid) {
+    AttributionSourceState attributionSource{};
+    attributionSource.pid = callingPid;
+    attributionSource.uid = callingUid;
+    return attributionSource;
+}
 
-    permission::PermissionChecker permissionChecker;
-    return permissionChecker.checkPermissionForPreflight(toString16(permission), attributionSource,
-            toString16(message), attributedOpCode)
-            != permission::PermissionChecker::PERMISSION_HARD_DENIED;
+bool CameraService::hasPermissionsForCamera(int callingPid, int callingUid) const {
+    return hasPermissionsForCamera(std::string(), callingPid, callingUid);
+}
+
+bool CameraService::hasPermissionsForCamera(const std::string& cameraId, int callingPid,
+        int callingUid) const {
+    auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
+    return mAttributionAndPermissionUtils->hasPermissionsForCamera(cameraId, attributionSource);
 }
 
 bool CameraService::hasPermissionsForSystemCamera(const std::string& cameraId, int callingPid,
-        int callingUid) const{
-    AttributionSourceState attributionSource{};
-    attributionSource.pid = callingPid;
-    attributionSource.uid = callingUid;
-    bool checkPermissionForSystemCamera = checkPermission(cameraId,
-            sSystemCameraPermission, attributionSource, std::string(), AppOpsManager::OP_NONE);
-    bool checkPermissionForCamera = checkPermission(cameraId,
-            sCameraPermission, attributionSource, std::string(), AppOpsManager::OP_NONE);
-    return checkPermissionForSystemCamera && checkPermissionForCamera;
+        int callingUid, bool checkCameraPermissions) const {
+    auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
+    return mAttributionAndPermissionUtils->hasPermissionsForSystemCamera(
+                cameraId, attributionSource, checkCameraPermissions);
 }
 
 bool CameraService::hasPermissionsForCameraHeadlessSystemUser(const std::string& cameraId,
-        int callingPid, int callingUid) const{
-    AttributionSourceState attributionSource{};
-    attributionSource.pid = callingPid;
-    attributionSource.uid = callingUid;
-    return checkPermission(cameraId, sCameraHeadlessSystemUserPermission, attributionSource,
-            std::string(), AppOpsManager::OP_NONE);
+        int callingPid, int callingUid) const {
+    auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
+    return mAttributionAndPermissionUtils->hasPermissionsForCameraHeadlessSystemUser(
+                cameraId, attributionSource);
+}
+
+bool CameraService::hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const {
+    auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
+    return mAttributionAndPermissionUtils->hasPermissionsForCameraPrivacyAllowlist(
+            attributionSource);
+}
+
+bool CameraService::hasPermissionsForOpenCloseListener(int callingPid, int callingUid) const {
+    auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
+    return mAttributionAndPermissionUtils->hasPermissionsForOpenCloseListener(
+            attributionSource);
 }
 
 Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
@@ -906,13 +889,6 @@
                 "request for system only device %s: ", cameraId.c_str());
     }
 
-    // Check for camera permissions
-    if (!hasCameraPermissions()) {
-        return STATUS_ERROR(ERROR_PERMISSION_DENIED,
-                "android.permission.CAMERA needed to call"
-                "createDefaultRequest");
-    }
-
     CameraMetadata metadata;
     status_t err = mCameraProviderManager->createDefaultRequest(cameraId, tempId, &metadata);
     if (err == OK) {
@@ -961,13 +937,6 @@
                 cameraId.c_str());
     }
 
-    // Check for camera permissions
-    if (!hasCameraPermissions()) {
-        return STATUS_ERROR(ERROR_PERMISSION_DENIED,
-                "android.permission.CAMERA needed to call"
-                "isSessionConfigurationWithParametersSupported");
-    }
-
     *supported = false;
     status_t ret = mCameraProviderManager->isSessionConfigurationSupported(cameraId.c_str(),
             sessionConfiguration, /*mOverrideForPerfClass*/false, /*checkSessionParams*/true,
@@ -998,6 +967,61 @@
     return res;
 }
 
+Status CameraService::getSessionCharacteristics(const std::string& unresolvedCameraId,
+                                                int targetSdkVersion, bool overrideToPortrait,
+                                                const SessionConfiguration& sessionConfiguration,
+                                                /*out*/ CameraMetadata* outMetadata) {
+    ATRACE_CALL();
+
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
+        logServiceError("Camera subsystem is not available", ERROR_DISCONNECTED);
+        return STATUS_ERROR(ERROR_DISCONNECTED, "Camera subsystem is not available");
+    }
+
+    const std::string cameraId =
+            resolveCameraId(unresolvedCameraId, CameraThreadState::getCallingUid());
+
+    if (outMetadata == nullptr) {
+        std::string msg =
+                fmt::sprintf("Camera %s: Invalid 'outMetadata' input!", unresolvedCameraId.c_str());
+        ALOGE("%s: %s", __FUNCTION__, msg.c_str());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.c_str());
+    }
+
+    bool overrideForPerfClass = SessionConfigurationUtils::targetPerfClassPrimaryCamera(
+            mPerfClassPrimaryCameraIds, cameraId, targetSdkVersion);
+
+    status_t ret = mCameraProviderManager->getSessionCharacteristics(
+            cameraId, sessionConfiguration, overrideForPerfClass, overrideToPortrait, outMetadata);
+
+    // TODO(b/303645857): Remove fingerprintable metadata if the caller process does not have
+    //                    camera access permission.
+
+    Status res = Status::ok();
+    switch (ret) {
+        case OK:
+            // Expected, no handling needed.
+            break;
+        case INVALID_OPERATION: {
+                std::string msg = fmt::sprintf(
+                        "Camera %s: Session characteristics query not supported!",
+                        cameraId.c_str());
+                ALOGD("%s: %s", __FUNCTION__, msg.c_str());
+                res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.c_str());
+            }
+            break;
+        default: {
+                std::string msg = fmt::sprintf("Camera %s: Error: %s (%d)", cameraId.c_str(),
+                                               strerror(-ret), ret);
+                ALOGE("%s: %s", __FUNCTION__, msg.c_str());
+                res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.c_str());
+            }
+    }
+
+    return res;
+}
+
 Status CameraService::parseCameraIdRemapping(
         const hardware::CameraIdRemapping& cameraIdRemapping,
         /* out */ TCameraIdRemapping* cameraIdRemappingMap) {
@@ -1221,13 +1245,9 @@
     const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
     auto callingPid = CameraThreadState::getCallingPid();
     auto callingUid = CameraThreadState::getCallingUid();
-    AttributionSourceState attributionSource{};
-    attributionSource.pid = callingPid;
-    attributionSource.uid = callingUid;
-    bool checkPermissionForSystemCamera = checkPermission(std::to_string(cameraIdInt),
-                sSystemCameraPermission, attributionSource, std::string(),
-                AppOpsManager::OP_NONE);
-    if (checkPermissionForSystemCamera || getpid() == callingPid) {
+    bool systemCameraPermissions = hasPermissionsForSystemCamera(std::to_string(cameraIdInt),
+            callingPid, callingUid, /* checkCameraPermissions= */ false);
+    if (systemCameraPermissions || getpid() == callingPid) {
         deviceIds = &mNormalDeviceIds;
     }
     if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
@@ -1300,11 +1320,7 @@
     // If it's not calling from cameraserver, check the permission only if
     // android.permission.CAMERA is required. If android.permission.SYSTEM_CAMERA was needed,
     // it would've already been checked in shouldRejectSystemCameraConnection.
-    AttributionSourceState attributionSource{};
-    attributionSource.pid = callingPid;
-    attributionSource.uid = callingUid;
-    bool checkPermissionForCamera = checkPermission(cameraId, sCameraPermission,
-            attributionSource, std::string(), AppOpsManager::OP_NONE);
+    bool checkPermissionForCamera = hasPermissionsForCamera(cameraId, callingPid, callingUid);
     if ((callingPid != getpid()) &&
             (deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
             !checkPermissionForCamera) {
@@ -1490,7 +1506,7 @@
     if (effectiveApiLevel == API_1) { // Camera1 API route
         sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
         *client = new Camera2Client(cameraService, tmp, cameraService->mCameraServiceProxyWrapper,
-                packageName, featureId, cameraId,
+                cameraService->mAttributionAndPermissionUtils, packageName, featureId, cameraId,
                 api1CameraId, facing, sensorOrientation,
                 clientPid, clientUid, servicePid, overrideForPerfClass, overrideToPortrait,
                 forceSlowJpegMode);
@@ -1500,7 +1516,8 @@
         sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
                 static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
         *client = new CameraDeviceClient(cameraService, tmp,
-                cameraService->mCameraServiceProxyWrapper, packageName, systemNativeClient,
+                cameraService->mCameraServiceProxyWrapper,
+                cameraService->mAttributionAndPermissionUtils, packageName, systemNativeClient,
                 featureId, cameraId, facing, sensorOrientation, clientPid, clientUid, servicePid,
                 overrideForPerfClass, overrideToPortrait, originalCameraId);
         ALOGI("%s: Camera2 API, override to portrait %d", __FUNCTION__, overrideToPortrait);
@@ -1665,35 +1682,13 @@
 }
 
 // Can camera service trust the caller based on the calling UID?
-static bool isTrustedCallingUid(uid_t uid) {
-    switch (uid) {
-        case AID_MEDIA:        // mediaserver
-        case AID_CAMERASERVER: // cameraserver
-        case AID_RADIO:        // telephony
-            return true;
-        default:
-            return false;
-    }
+bool CameraService::isTrustedCallingUid(uid_t uid) const {
+    return mAttributionAndPermissionUtils->isTrustedCallingUid(uid);
 }
 
-static status_t getUidForPackage(const std::string &packageName, int userId, /*inout*/uid_t& uid,
-        int err) {
-    PermissionController pc;
-    uid = pc.getPackageUid(toString16(packageName), 0);
-    if (uid <= 0) {
-        ALOGE("Unknown package: '%s'", packageName.c_str());
-        dprintf(err, "Unknown package: '%s'\n", packageName.c_str());
-        return BAD_VALUE;
-    }
-
-    if (userId < 0) {
-        ALOGE("Invalid user: %d", userId);
-        dprintf(err, "Invalid user: %d\n", userId);
-        return BAD_VALUE;
-    }
-
-    uid = multiuser_get_uid(userId, uid);
-    return NO_ERROR;
+status_t CameraService::getUidForPackage(const std::string &packageName, int userId,
+        /*inout*/uid_t& uid, int err) const {
+    return mAttributionAndPermissionUtils->getUidForPackage(packageName, userId, uid, err);
 }
 
 Status CameraService::validateConnectLocked(const std::string& cameraId,
@@ -1747,8 +1742,6 @@
 Status CameraService::validateClientPermissionsLocked(const std::string& cameraId,
         const std::string& clientName, int& clientUid, int& clientPid,
         /*out*/int& originalClientPid) const {
-    AttributionSourceState attributionSource{};
-
     int callingPid = CameraThreadState::getCallingPid();
     int callingUid = CameraThreadState::getCallingUid();
 
@@ -1795,11 +1788,7 @@
     // If it's not calling from cameraserver, check the permission if the
     // device isn't a system only camera (shouldRejectSystemCameraConnection already checks for
     // android.permission.SYSTEM_CAMERA for system only camera devices).
-    attributionSource.pid = clientPid;
-    attributionSource.uid = clientUid;
-    attributionSource.packageName = clientName;
-    bool checkPermissionForCamera = checkPermission(cameraId, sCameraPermission, attributionSource,
-            std::string(), AppOpsManager::OP_NONE);
+    bool checkPermissionForCamera = hasPermissionsForCamera(cameraId, clientPid, clientUid);
     if (callingPid != getpid() &&
                 (deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) && !checkPermissionForCamera) {
         ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
@@ -1855,8 +1844,9 @@
         // If the System User tries to access the camera when the device is running in
         // headless system user mode, ensure that client has the required permission
         // CAMERA_HEADLESS_SYSTEM_USER.
-        if (isHeadlessSystemUserMode() && (clientUserId == USER_SYSTEM) &&
-                !hasPermissionsForCameraHeadlessSystemUser(cameraId, callingPid, callingUid)) {
+        if (mAttributionAndPermissionUtils->isHeadlessSystemUserMode()
+                && (clientUserId == USER_SYSTEM)
+                && !hasPermissionsForCameraHeadlessSystemUser(cameraId, callingPid, callingUid)) {
             ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
             return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
                     "Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" as Headless System \
@@ -2222,7 +2212,7 @@
     }
 
     // (1) Cameraserver trying to connect, accept.
-    if (CameraThreadState::getCallingPid() == getpid()) {
+    if (mAttributionAndPermissionUtils->isCallerCameraServerNotDelegating()) {
         return false;
     }
     // (2)
@@ -2345,6 +2335,39 @@
     return ret;
 }
 
+bool CameraService::isCameraPrivacyEnabled(const String16& packageName, const std::string& cam_id,
+        int callingPid, int callingUid) {
+    if (!isAutomotiveDevice()) {
+        return mSensorPrivacyPolicy->isCameraPrivacyEnabled();
+    }
+
+    // Automotive privileged client AID_AUTOMOTIVE_EVS using exterior system camera for
+    // safety-critical use cases cannot be disabled and are exempt from camera privacy policy.
+    if ((isAutomotivePrivilegedClient(callingUid) && isAutomotiveExteriorSystemCamera(cam_id))) {
+        ALOGI("Camera privacy cannot be enabled for automotive privileged client %d "
+                "using camera %s", callingUid, cam_id.c_str());
+        return false;
+    }
+
+    if (mSensorPrivacyPolicy->isCameraPrivacyEnabled(packageName)) {
+        return true;
+    } else if (mSensorPrivacyPolicy->getCameraPrivacyState() == SensorPrivacyManager::DISABLED) {
+        return false;
+    } else if ((mSensorPrivacyPolicy->getCameraPrivacyState()
+            == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_HELPFUL_APPS) ||
+            (mSensorPrivacyPolicy->getCameraPrivacyState()
+            == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_REQUIRED_APPS) ||
+            (mSensorPrivacyPolicy->getCameraPrivacyState() ==
+            SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_APPS)) {
+        if (hasPermissionsForCameraPrivacyAllowlist(callingPid, callingUid)) {
+            return false;
+        } else {
+            return true;
+        }
+    }
+    return false;
+}
+
 std::string CameraService::getPackageNameFromUid(int clientUid) {
     std::string packageName("");
 
@@ -2617,38 +2640,39 @@
             }
         }
 
-        // Automotive privileged client AID_AUTOMOTIVE_EVS using exterior system camera for use
-        // cases such as rear view and surround view cannot be disabled and are exempt from camera
-        // privacy policy.
-        if ((!isAutomotivePrivilegedClient(packageUid) ||
-                !isAutomotiveExteriorSystemCamera(cameraId))) {
+        bool isCameraPrivacyEnabled;
+        if (flags::camera_privacy_allowlist()) {
             // Set camera muting behavior.
-            bool isCameraPrivacyEnabled =
+            isCameraPrivacyEnabled = this->isCameraPrivacyEnabled(
+                    toString16(client->getPackageName()), cameraId, packagePid, packageUid);
+        } else {
+            isCameraPrivacyEnabled =
                     mSensorPrivacyPolicy->isCameraPrivacyEnabled();
-            if (client->supportsCameraMute()) {
-                client->setCameraMute(
-                        mOverrideCameraMuteMode || isCameraPrivacyEnabled);
-            } else if (isCameraPrivacyEnabled) {
-                // no camera mute supported, but privacy is on! => disconnect
-                ALOGI("Camera mute not supported for package: %s, camera id: %s",
-                        client->getPackageName().c_str(), cameraId.c_str());
-                // Do not hold mServiceLock while disconnecting clients, but
-                // retain the condition blocking other clients from connecting
-                // in mServiceLockWrapper if held.
-                mServiceLock.unlock();
-                // Clear caller identity temporarily so client disconnect PID
-                // checks work correctly
-                int64_t token = CameraThreadState::clearCallingIdentity();
-                // Note AppOp to trigger the "Unblock" dialog
-                client->noteAppOp();
-                client->disconnect();
-                CameraThreadState::restoreCallingIdentity(token);
-                // Reacquire mServiceLock
-                mServiceLock.lock();
+        }
 
-                return STATUS_ERROR_FMT(ERROR_DISABLED,
-                        "Camera \"%s\" disabled due to camera mute", cameraId.c_str());
-            }
+        if (client->supportsCameraMute()) {
+            client->setCameraMute(
+                    mOverrideCameraMuteMode || isCameraPrivacyEnabled);
+        } else if (isCameraPrivacyEnabled) {
+            // no camera mute supported, but privacy is on! => disconnect
+            ALOGI("Camera mute not supported for package: %s, camera id: %s",
+                    client->getPackageName().c_str(), cameraId.c_str());
+            // Do not hold mServiceLock while disconnecting clients, but
+            // retain the condition blocking other clients from connecting
+            // in mServiceLockWrapper if held.
+            mServiceLock.unlock();
+            // Clear caller identity temporarily so client disconnect PID
+            // checks work correctly
+            int64_t token = CameraThreadState::clearCallingIdentity();
+            // Note AppOp to trigger the "Unblock" dialog
+            client->noteAppOp();
+            client->disconnect();
+            CameraThreadState::restoreCallingIdentity(token);
+            // Reacquire mServiceLock
+            mServiceLock.lock();
+
+            return STATUS_ERROR_FMT(ERROR_DISABLED,
+                    "Camera \"%s\" disabled due to camera mute", cameraId.c_str());
         }
 
         if (shimUpdateOnly) {
@@ -3238,22 +3262,6 @@
     return Status::ok();
 }
 
-bool CameraService::hasCameraPermissions() const {
-    int callingPid = CameraThreadState::getCallingPid();
-    int callingUid = CameraThreadState::getCallingUid();
-    AttributionSourceState attributionSource{};
-    attributionSource.pid = callingPid;
-    attributionSource.uid = callingUid;
-    bool res = checkPermission(std::string(), sCameraPermission,
-            attributionSource, std::string(), AppOpsManager::OP_NONE);
-
-    bool hasPermission = ((callingPid == getpid()) || res);
-    if (!hasPermission) {
-        ALOGE("%s: pid %d doesn't have camera permissions", __FUNCTION__, callingPid);
-    }
-    return hasPermission;
-}
-
 Status CameraService::isConcurrentSessionConfigurationSupported(
         const std::vector<CameraIdAndSessionConfiguration>& cameraIdsAndSessionConfigurations,
         int targetSdkVersion, /*out*/bool* isSupported) {
@@ -3269,7 +3277,11 @@
     }
 
     // Check for camera permissions
-    if (!hasCameraPermissions()) {
+    int callingPid = CameraThreadState::getCallingPid();
+    int callingUid = CameraThreadState::getCallingUid();
+    bool hasCameraPermission = ((callingPid == getpid()) ||
+            hasPermissionsForCamera(callingPid, callingUid));
+    if (!hasCameraPermission) {
         return STATUS_ERROR(ERROR_PERMISSION_DENIED,
                 "android.permission.CAMERA needed to call"
                 "isConcurrentSessionConfigurationSupported");
@@ -3313,15 +3325,9 @@
         return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Null listener given to addListener");
     }
 
-    auto clientUid = CameraThreadState::getCallingUid();
     auto clientPid = CameraThreadState::getCallingPid();
-    AttributionSourceState attributionSource{};
-    attributionSource.uid = clientUid;
-    attributionSource.pid = clientPid;
-
-   bool openCloseCallbackAllowed = checkPermission(std::string(),
-            sCameraOpenCloseListenerPermission, attributionSource, std::string(),
-            AppOpsManager::OP_NONE);
+    auto clientUid = CameraThreadState::getCallingUid();
+    bool openCloseCallbackAllowed = hasPermissionsForOpenCloseListener(clientPid, clientUid);
 
     Mutex::Autolock lock(mServiceLock);
 
@@ -3964,6 +3970,7 @@
 
 CameraService::Client::Client(const sp<CameraService>& cameraService,
         const sp<ICameraClient>& cameraClient,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
         const std::string& clientPackageName, bool systemNativeClient,
         const std::optional<std::string>& clientFeatureId,
         const std::string& cameraIdStr,
@@ -3972,6 +3979,7 @@
         int servicePid, bool overrideToPortrait) :
         CameraService::BasicClient(cameraService,
                 IInterface::asBinder(cameraClient),
+                attributionAndPermissionUtils,
                 clientPackageName, systemNativeClient, clientFeatureId,
                 cameraIdStr, cameraFacing, sensorOrientation,
                 clientPid, clientUid,
@@ -4002,10 +4010,12 @@
 
 CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
         const sp<IBinder>& remoteCallback,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
         const std::string& clientPackageName, bool nativeClient,
         const std::optional<std::string>& clientFeatureId, const std::string& cameraIdStr,
         int cameraFacing, int sensorOrientation, int clientPid, uid_t clientUid,
         int servicePid, bool overrideToPortrait):
+        mAttributionAndPermissionUtils(attributionAndPermissionUtils),
         mDestructionStarted(false),
         mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing), mOrientation(sensorOrientation),
         mClientPackageName(clientPackageName), mSystemNativeClient(nativeClient),
@@ -4038,7 +4048,7 @@
         mAppOpsManager = std::make_unique<AppOpsManager>();
     }
 
-    mUidIsTrusted = isTrustedCallingUid(mClientUid);
+    mUidIsTrusted = mAttributionAndPermissionUtils->isTrustedCallingUid(mClientUid);
 }
 
 CameraService::BasicClient::~BasicClient() {
@@ -4169,8 +4179,15 @@
         // return MODE_IGNORED. Do not treat such case as error.
         bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid,
                 mClientPackageName);
-        bool isCameraPrivacyEnabled =
+
+        bool isCameraPrivacyEnabled;
+        if (flags::camera_privacy_allowlist()) {
+            isCameraPrivacyEnabled = sCameraService->isCameraPrivacyEnabled(
+                    toString16(mClientPackageName), std::string(), mClientPid, mClientUid);
+        } else {
+            isCameraPrivacyEnabled =
                 sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled();
+        }
         // We don't want to return EACCESS if the CameraPrivacy is enabled.
         // We prefer to successfully open the camera and perform camera muting
         // or blocking in connectHelper as handleAppOpMode can be called before the
@@ -4196,8 +4213,15 @@
     if (mAppOpsManager != nullptr) {
         // Notify app ops that the camera is not available
         mOpsCallback = new OpsCallback(this);
-        mAppOpsManager->startWatchingMode(AppOpsManager::OP_CAMERA,
+
+        if (flags::watch_foreground_changes()) {
+            mAppOpsManager->startWatchingMode(AppOpsManager::OP_CAMERA,
+                toString16(mClientPackageName),
+                AppOpsManager::WATCH_FOREGROUND_CHANGES, mOpsCallback);
+        } else {
+            mAppOpsManager->startWatchingMode(AppOpsManager::OP_CAMERA,
                 toString16(mClientPackageName), mOpsCallback);
+        }
 
         // Just check for camera acccess here on open - delay startOp until
         // camera frames start streaming in startCameraStreamingOps
@@ -4357,20 +4381,42 @@
         block();
     } else if (res == AppOpsManager::MODE_IGNORED) {
         bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid, mClientPackageName);
-        bool isCameraPrivacyEnabled =
+
+        // Uid may be active, but not visible to the user (e.g. PROCESS_STATE_FOREGROUND_SERVICE).
+        // If not visible, but still active, then we want to block instead of muting the camera.
+        int32_t procState = sCameraService->mUidPolicy->getProcState(mClientUid);
+        bool isUidVisible = (procState <= ActivityManager::PROCESS_STATE_BOUND_TOP);
+
+        bool isCameraPrivacyEnabled;
+        if (flags::camera_privacy_allowlist()) {
+            isCameraPrivacyEnabled = sCameraService->isCameraPrivacyEnabled(
+                    toString16(mClientPackageName),std::string(),mClientPid,mClientUid);
+        } else {
+            isCameraPrivacyEnabled =
                 sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled();
-        ALOGI("Camera %s: Access for \"%s\" has been restricted, isUidTrusted %d, isUidActive %d",
-                mCameraIdStr.c_str(), mClientPackageName.c_str(),
-                mUidIsTrusted, isUidActive);
-        // If the calling Uid is trusted (a native service), or the client Uid is active (WAR for
-        // b/175320666), the AppOpsManager could return MODE_IGNORED. Do not treat such cases as
-        // error.
+        }
+
+        ALOGI("Camera %s: Access for \"%s\" has been restricted, isUidTrusted %d, isUidActive %d"
+                " isUidVisible %d, isCameraPrivacyEnabled %d", mCameraIdStr.c_str(),
+                mClientPackageName.c_str(), mUidIsTrusted, isUidActive, isUidVisible,
+                isCameraPrivacyEnabled);
+        // If the calling Uid is trusted (a native service), or the client Uid is active / visible
+        // (WAR for b/175320666)the AppOpsManager could return MODE_IGNORED. Do not treat such
+        // cases as error.
         if (!mUidIsTrusted) {
-            if (isUidActive && isCameraPrivacyEnabled && supportsCameraMute()) {
-                setCameraMute(true);
-            } else if (!isUidActive
-                || (isCameraPrivacyEnabled && !supportsCameraMute())) {
-                block();
+            if (flags::watch_foreground_changes()) {
+                if (isUidVisible && isCameraPrivacyEnabled && supportsCameraMute()) {
+                    setCameraMute(true);
+                } else {
+                    block();
+                }
+            } else {
+                if (isUidActive && isCameraPrivacyEnabled && supportsCameraMute()) {
+                    setCameraMute(true);
+                } else if (!isUidActive
+                    || (isCameraPrivacyEnabled && !supportsCameraMute())) {
+                    block();
+                }
             }
         }
     } else if (res == AppOpsManager::MODE_ALLOWED) {
@@ -4741,7 +4787,15 @@
     }
     hasCameraPrivacyFeature(); // Called so the result is cached
     mSpm.addSensorPrivacyListener(this);
+    if (mAttributionAndPermissionUtils->isAutomotiveDevice()) {
+        mSpm.addToggleSensorPrivacyListener(this);
+    }
     mSensorPrivacyEnabled = mSpm.isSensorPrivacyEnabled();
+    if (flags::camera_privacy_allowlist()) {
+        mCameraPrivacyState = mSpm.getToggleSensorPrivacyState(
+                SensorPrivacyManager::TOGGLE_TYPE_SOFTWARE,
+                SensorPrivacyManager::TOGGLE_SENSOR_CAMERA);
+    }
     status_t res = mSpm.linkToDeath(this);
     if (res == OK) {
         mRegistered = true;
@@ -4773,6 +4827,9 @@
 void CameraService::SensorPrivacyPolicy::unregisterSelf() {
     Mutex::Autolock _l(mSensorPrivacyLock);
     mSpm.removeSensorPrivacyListener(this);
+    if (mAttributionAndPermissionUtils->isAutomotiveDevice()) {
+        mSpm.removeToggleSensorPrivacyListener(this);
+    }
     mSpm.unlinkToDeath(this);
     mRegistered = false;
     ALOGV("SensorPrivacyPolicy: Unregistered with SensorPrivacyManager");
@@ -4787,6 +4844,15 @@
     return mSensorPrivacyEnabled;
 }
 
+int CameraService::SensorPrivacyPolicy::getCameraPrivacyState() {
+    if (!mRegistered) {
+        registerWithSensorPrivacyManager();
+    }
+
+    Mutex::Autolock _l(mSensorPrivacyLock);
+    return mCameraPrivacyState;
+}
+
 bool CameraService::SensorPrivacyPolicy::isCameraPrivacyEnabled() {
     if (!hasCameraPrivacyFeature()) {
         return false;
@@ -4794,18 +4860,53 @@
     return mSpm.isToggleSensorPrivacyEnabled(SensorPrivacyManager::TOGGLE_SENSOR_CAMERA);
 }
 
+bool CameraService::SensorPrivacyPolicy::isCameraPrivacyEnabled(const String16& packageName) {
+    if (!hasCameraPrivacyFeature()) {
+        return SensorPrivacyManager::DISABLED;
+    }
+    return mSpm.isCameraPrivacyEnabled(packageName);
+}
+
 binder::Status CameraService::SensorPrivacyPolicy::onSensorPrivacyChanged(
-    int toggleType __unused, int sensor __unused, bool enabled) {
+    int toggleType, int sensor, bool enabled) {
+    if ((toggleType == SensorPrivacyManager::TOGGLE_TYPE_UNKNOWN)
+            && (sensor == SensorPrivacyManager::TOGGLE_SENSOR_UNKNOWN)) {
+        {
+            Mutex::Autolock _l(mSensorPrivacyLock);
+            mSensorPrivacyEnabled = enabled;
+        }
+        // if sensor privacy is enabled then block all clients from accessing the camera
+        if (enabled) {
+            sp<CameraService> service = mService.promote();
+            if (service != nullptr) {
+                service->blockAllClients();
+            }
+        }
+    }
+    return binder::Status::ok();
+}
+
+binder::Status CameraService::SensorPrivacyPolicy::onSensorPrivacyStateChanged(
+    int, int sensor, int state) {
+    if (!flags::camera_privacy_allowlist()
+            || (sensor != SensorPrivacyManager::TOGGLE_SENSOR_CAMERA)) {
+        return binder::Status::ok();
+    }
     {
         Mutex::Autolock _l(mSensorPrivacyLock);
-        mSensorPrivacyEnabled = enabled;
+        mCameraPrivacyState = state;
+    }
+    sp<CameraService> service = mService.promote();
+    if (!service) {
+        return binder::Status::ok();
     }
     // if sensor privacy is enabled then block all clients from accessing the camera
-    if (enabled) {
-        sp<CameraService> service = mService.promote();
-        if (service != nullptr) {
-            service->blockAllClients();
-        }
+    if (state == SensorPrivacyManager::ENABLED) {
+        service->blockAllClients();
+    } else if ((state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_APPS)
+            || (state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_HELPFUL_APPS)
+            || (state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_REQUIRED_APPS)) {
+        service->blockPrivacyEnabledClients();
     }
     return binder::Status::ok();
 }
@@ -5676,6 +5777,23 @@
     }
 }
 
+void CameraService::blockPrivacyEnabledClients() {
+    const auto clients = mActiveClientManager.getAll();
+    for (auto& current : clients) {
+        if (current != nullptr) {
+            const auto basicClient = current->getValue();
+            if (basicClient.get() != nullptr) {
+                std::string pkgName = basicClient->getPackageName();
+                bool cameraPrivacyEnabled =
+                        mSensorPrivacyPolicy->isCameraPrivacyEnabled(toString16(pkgName));
+                if (cameraPrivacyEnabled) {
+                    basicClient->block();
+                }
+           }
+        }
+    }
+}
+
 // NOTE: This is a remote API - make sure all args are validated
 status_t CameraService::shellCommand(int in, int out, int err, const Vector<String16>& args) {
     if (!checkCallingPermission(toString16(sManageCameraPermission), nullptr, nullptr)) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 1487013..11cf1a1 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -53,6 +53,7 @@
 #include "utils/ClientManager.h"
 #include "utils/IPCTransport.h"
 #include "utils/CameraServiceProxyWrapper.h"
+#include "utils/AttributionAndPermissionUtils.h"
 
 #include <set>
 #include <string>
@@ -119,7 +120,9 @@
                         // Non-null arguments for cameraServiceProxyWrapper should be provided for
                         // testing purposes only.
                         CameraService(std::shared_ptr<CameraServiceProxyWrapper>
-                                cameraServiceProxyWrapper = nullptr);
+                                cameraServiceProxyWrapper = nullptr,
+                                std::shared_ptr<AttributionAndPermissionUtils>
+                                attributionAndPermissionUtils = nullptr);
     virtual             ~CameraService();
 
     /////////////////////////////////////////////////////////////////////
@@ -248,6 +251,11 @@
             /*out*/
             bool* supported);
 
+    virtual binder::Status getSessionCharacteristics(
+            const std::string& cameraId, int targetSdkVersion, bool overrideToPortrait,
+            const SessionConfiguration& sessionConfiguration,
+            /*out*/ CameraMetadata* outMetadata);
+
     // Extra permissions checks
     virtual status_t    onTransact(uint32_t code, const Parcel& data,
                                    Parcel* reply, uint32_t flags);
@@ -309,6 +317,21 @@
     // Shared utilities
     static binder::Status filterGetInfoErrorCode(status_t err);
 
+    bool isAutomotiveDevice() const;
+
+    /**
+     * Returns true if the client has uid AID_AUTOMOTIVE_EVS and the device is an automotive device.
+     */
+    bool isAutomotivePrivilegedClient(int32_t uid) const;
+
+    /**
+     * Returns true if the device is an automotive device and cameraId is system
+     * only camera which has characteristic AUTOMOTIVE_LOCATION value as either
+     * AUTOMOTIVE_LOCATION_EXTERIOR_LEFT,AUTOMOTIVE_LOCATION_EXTERIOR_RIGHT,
+     * AUTOMOTIVE_LOCATION_EXTERIOR_FRONT or AUTOMOTIVE_LOCATION_EXTERIOR_REAR.
+     */
+    bool isAutomotiveExteriorSystemCamera(const std::string& cameraId) const;
+
     /////////////////////////////////////////////////////////////////////
     // CameraClient functionality
 
@@ -423,6 +446,7 @@
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
+                std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
                 const std::string& clientPackageName,
                 bool nativeClient,
                 const std::optional<std::string>& clientFeatureId,
@@ -436,6 +460,8 @@
 
         virtual ~BasicClient();
 
+        std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
+
         // the instance is in the middle of destruction. When this is set,
         // the instance should not be accessed from callback.
         // CameraService's mClientLock should be acquired to access this.
@@ -536,6 +562,7 @@
         // Interface used by CameraService
         Client(const sp<CameraService>& cameraService,
                 const sp<hardware::ICameraClient>& cameraClient,
+                std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
                 const std::string& clientPackageName,
                 bool systemNativeClient,
                 const std::optional<std::string>& clientFeatureId,
@@ -639,13 +666,6 @@
     int32_t updateAudioRestrictionLocked();
 
 private:
-    /**
-     * Returns true if the device is an automotive device and cameraId is system
-     * only camera which has characteristic AUTOMOTIVE_LOCATION value as either
-     * AUTOMOTIVE_LOCATION_EXTERIOR_LEFT,AUTOMOTIVE_LOCATION_EXTERIOR_RIGHT,
-     * AUTOMOTIVE_LOCATION_EXTERIOR_FRONT or AUTOMOTIVE_LOCATION_EXTERIOR_REAR.
-     */
-    bool isAutomotiveExteriorSystemCamera(const std::string& cameraId) const;
 
     // TODO: b/263304156 update this to make use of a death callback for more
     // robust/fault tolerant logging
@@ -661,26 +681,20 @@
         return activityManager;
     }
 
-    /**
-     * Pre-grants the permission if the attribution source uid is for an automotive
-     * privileged client. Otherwise uses system service permission checker to check
-     * for the appropriate permission. If this function is called for accessing a specific
-     * camera,then the cameraID must not be empty. CameraId is used only in case of automotive
-     * privileged client so that permission is pre-granted only to access system camera device
-     * which is located outside of the vehicle body frame because camera located inside the vehicle
-     * cabin would need user permission.
-     */
-    bool checkPermission(const std::string& cameraId, const std::string& permission,
-            const content::AttributionSourceState& attributionSource, const std::string& message,
-            int32_t attributedOpCode) const;
+    bool hasPermissionsForCamera(int callingPid, int callingUid) const;
 
-    bool hasPermissionsForSystemCamera(const std::string& cameraId, int callingPid, int callingUid)
-            const;
+    bool hasPermissionsForCamera(const std::string& cameraId, int callingPid, int callingUid) const;
+
+    bool hasPermissionsForSystemCamera(const std::string& cameraId, int callingPid, int callingUid,
+            bool checkCameraPermissions = true) const;
 
     bool hasPermissionsForCameraHeadlessSystemUser(const std::string& cameraId, int callingPid,
             int callingUid) const;
 
-    bool hasCameraPermissions() const;
+    bool hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const;
+
+    bool hasPermissionsForOpenCloseListener(int callingPid, int callingUid) const;
+
    /**
      * Typesafe version of device status, containing both the HAL-layer and the service interface-
      * layer values.
@@ -867,17 +881,24 @@
             public virtual IBinder::DeathRecipient,
             public virtual IServiceManager::LocalRegistrationCallback {
         public:
-            explicit SensorPrivacyPolicy(wp<CameraService> service)
-                    : mService(service), mSensorPrivacyEnabled(false), mRegistered(false) {}
+            explicit SensorPrivacyPolicy(wp<CameraService> service,
+                    std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils)
+                    : mService(service),
+                      mAttributionAndPermissionUtils(attributionAndPermissionUtils),
+                      mSensorPrivacyEnabled(false),
+                    mCameraPrivacyState(SensorPrivacyManager::DISABLED), mRegistered(false) {}
 
             void registerSelf();
             void unregisterSelf();
 
             bool isSensorPrivacyEnabled();
             bool isCameraPrivacyEnabled();
+            int getCameraPrivacyState();
+            bool isCameraPrivacyEnabled(const String16& packageName);
 
             binder::Status onSensorPrivacyChanged(int toggleType, int sensor,
                                                   bool enabled);
+            binder::Status onSensorPrivacyStateChanged(int toggleType, int sensor, int state);
 
             // Implementation of IServiceManager::LocalRegistrationCallback
             virtual void onServiceRegistration(const String16& name,
@@ -888,8 +909,10 @@
         private:
             SensorPrivacyManager mSpm;
             wp<CameraService> mService;
+            std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
             Mutex mSensorPrivacyLock;
             bool mSensorPrivacyEnabled;
+            int mCameraPrivacyState;
             bool mRegistered;
 
             bool hasCameraPrivacyFeature();
@@ -901,6 +924,7 @@
     sp<SensorPrivacyPolicy> mSensorPrivacyPolicy;
 
     std::shared_ptr<CameraServiceProxyWrapper> mCameraServiceProxyWrapper;
+    std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
 
     // Delay-load the Camera HAL module
     virtual void onFirstRef();
@@ -913,6 +937,11 @@
     void addStates(const std::string& id);
     void removeStates(const std::string& id);
 
+    bool isTrustedCallingUid(uid_t uid) const;
+
+    status_t getUidForPackage(const std::string &packageName, int userId,
+            /*inout*/uid_t& uid, int err) const;
+
     // Check if we can connect, before we acquire the service lock.
     // The returned originalClientPid is the PID of the original process that wants to connect to
     // camera.
@@ -926,6 +955,9 @@
             const std::string& clientName, /*inout*/int& clientUid, /*inout*/int& clientPid,
             /*out*/int& originalClientPid) const;
 
+    bool isCameraPrivacyEnabled(const String16& packageName,const std::string& cameraId,
+           int clientPid, int ClientUid);
+
     // Handle active client evictions, and update service state.
     // Only call with with mServiceLock held.
     status_t handleEvictionsLocked(const std::string& cameraId, int clientPid,
@@ -1385,6 +1417,9 @@
     // Blocks all active clients.
     void blockAllClients();
 
+    // Blocks clients whose privacy is enabled.
+    void blockPrivacyEnabledClients();
+
     // Overrides the UID state as if it is idle
     status_t handleSetUidState(const Vector<String16>& args, int err);
 
diff --git a/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp b/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp
index 954cb8b..9e6a925 100644
--- a/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/aidl/AidlCameraDeviceUser.cpp
@@ -20,6 +20,7 @@
 #include <aidl/AidlUtils.h>
 #include <aidl/android/frameworks/cameraservice/device/CaptureMetadataInfo.h>
 #include <android-base/properties.h>
+#include <utils/Utils.h>
 
 namespace android::frameworks::cameraservice::device::implementation {
 
@@ -56,7 +57,7 @@
 AidlCameraDeviceUser::AidlCameraDeviceUser(const sp<UICameraDeviceUser>& deviceRemote):
       mDeviceRemote(deviceRemote) {
     mInitSuccess = initDevice();
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 
 bool AidlCameraDeviceUser::initDevice() {
diff --git a/services/camera/libcameraservice/aidl/AidlCameraService.cpp b/services/camera/libcameraservice/aidl/AidlCameraService.cpp
index 8cd7d1f..79dbfed 100644
--- a/services/camera/libcameraservice/aidl/AidlCameraService.cpp
+++ b/services/camera/libcameraservice/aidl/AidlCameraService.cpp
@@ -27,6 +27,7 @@
 #include <android/binder_manager.h>
 #include <binder/Status.h>
 #include <hidl/HidlTransportSupport.h>
+#include <utils/Utils.h>
 
 namespace android::frameworks::cameraservice::service::implementation {
 
@@ -79,7 +80,7 @@
 
 AidlCameraService::AidlCameraService(::android::CameraService* cameraService):
       mCameraService(cameraService) {
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 ScopedAStatus AidlCameraService::getCameraCharacteristics(const std::string& in_cameraId,
                                                           SCameraMetadata* _aidl_return) {
diff --git a/services/camera/libcameraservice/aidl/AidlUtils.cpp b/services/camera/libcameraservice/aidl/AidlUtils.cpp
index f5d68eb..14e5fad 100644
--- a/services/camera/libcameraservice/aidl/AidlUtils.cpp
+++ b/services/camera/libcameraservice/aidl/AidlUtils.cpp
@@ -17,12 +17,13 @@
 #define LOG_TAG "AidlUtils"
 
 #include <aidl/AidlUtils.h>
+#include <aidl/ExtensionMetadataTags.h>
 #include <aidl/VndkVersionMetadataTags.h>
 #include <aidlcommonsupport/NativeHandle.h>
+#include <camera/StringUtils.h>
 #include <device3/Camera3StreamInterface.h>
 #include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
 #include <mediautils/AImageReaderUtils.h>
-#include <camera/StringUtils.h>
 
 namespace android::hardware::cameraservice::utils::conversion::aidl {
 
@@ -310,8 +311,8 @@
 
 status_t filterVndkKeys(int vndkVersion, CameraMetadata &metadata, bool isStatic) {
     if (vndkVersion == __ANDROID_API_FUTURE__) {
-        // VNDK version in ro.vndk.version is a version code-name that
-        // corresponds to the current version.
+        // VNDK version derived from ro.board.api_level is a version code-name that
+        // corresponds to the current SDK version.
         return OK;
     }
     const auto &apiLevelToKeys =
@@ -333,4 +334,47 @@
     return OK;
 }
 
+bool areExtensionKeysSupported(const CameraMetadata& metadata) {
+    auto requestKeys = metadata.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
+    if (requestKeys.count == 0) {
+        ALOGE("%s: No ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS entries!", __FUNCTION__);
+        return false;
+    }
+
+    auto resultKeys = metadata.find(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS);
+    if (resultKeys.count == 0) {
+        ALOGE("%s: No ANDROID_REQUEST_AVAILABLE_RESULT_KEYS entries!", __FUNCTION__);
+        return false;
+    }
+
+    for (const auto& extensionKey : extension_metadata_keys) {
+        if (std::find(requestKeys.data.i32, requestKeys.data.i32 + requestKeys.count, extensionKey)
+                != requestKeys.data.i32 + requestKeys.count) {
+            return true;
+        }
+
+        if (std::find(resultKeys.data.i32, resultKeys.data.i32 + resultKeys.count, extensionKey)
+                != resultKeys.data.i32 + resultKeys.count) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+status_t filterExtensionKeys(CameraMetadata* metadata /*out*/) {
+    if (metadata == nullptr) {
+        return BAD_VALUE;
+    }
+
+    for (const auto& key : extension_metadata_keys) {
+        status_t res = metadata->erase(key);
+        if (res != OK) {
+            ALOGE("%s metadata key %d could not be erased", __FUNCTION__, key);
+            return res;
+        }
+    }
+    return OK;
+}
+
 } // namespace android::hardware::cameraservice::utils::conversion::aidl
diff --git a/services/camera/libcameraservice/aidl/AidlUtils.h b/services/camera/libcameraservice/aidl/AidlUtils.h
index c89d7ff..562aa70 100644
--- a/services/camera/libcameraservice/aidl/AidlUtils.h
+++ b/services/camera/libcameraservice/aidl/AidlUtils.h
@@ -122,6 +122,9 @@
 
 status_t filterVndkKeys(int vndkVersion, CameraMetadata &metadata, bool isStatic = true);
 
+bool areExtensionKeysSupported(const CameraMetadata& metadata);
+
+status_t filterExtensionKeys(CameraMetadata* metadata /*out*/);
 } // namespace android::hardware::cameraservice::utils::conversion::aidl
 
 #endif  // FRAMEWORKS_AV_SERVICES_CAMERA_LIBCAMERASERVICE_AIDL_AIDLUTILS_H_
diff --git a/services/camera/libcameraservice/aidl/ExtensionMetadataTags.h b/services/camera/libcameraservice/aidl/ExtensionMetadataTags.h
new file mode 100644
index 0000000..86af36c
--- /dev/null
+++ b/services/camera/libcameraservice/aidl/ExtensionMetadataTags.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#pragma once
+/**
+ * ! Do not edit this file directly !
+ *
+ * Generated automatically from extensions_camera_metadata_tags.mako. To be included in libcameraservice
+ * only by aidl/AidlUtils.cpp.
+ */
+
+/**
+ * API level to dynamic keys mapping. To be used for filtering out keys depending on vndk version
+ * used by vendor clients.
+ */
+std::vector<camera_metadata_tag> extension_metadata_keys{
+            ANDROID_EXTENSION_STRENGTH,
+            ANDROID_EXTENSION_CURRENT_TYPE,
+            ANDROID_EFV_PADDING_ZOOM_FACTOR,
+            ANDROID_EFV_AUTO_ZOOM,
+            ANDROID_EFV_MAX_PADDING_ZOOM_FACTOR,
+            ANDROID_EFV_STABILIZATION_MODE,
+            ANDROID_EFV_TRANSLATE_VIEWPORT,
+            ANDROID_EFV_ROTATE_VIEWPORT,
+            ANDROID_EFV_PADDING_REGION,
+            ANDROID_EFV_AUTO_ZOOM_PADDING_REGION,
+            ANDROID_EFV_TARGET_COORDINATES,
+};
diff --git a/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h b/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h
index e403b97..7965474 100644
--- a/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h
+++ b/services/camera/libcameraservice/aidl/VndkVersionMetadataTags.h
@@ -78,6 +78,7 @@
           ANDROID_CONTROL_AUTOFRAMING_AVAILABLE,
           ANDROID_CONTROL_AVAILABLE_SETTINGS_OVERRIDES,
           ANDROID_CONTROL_LOW_LIGHT_BOOST_INFO_LUMINANCE_RANGE,
+          ANDROID_EFV_PADDING_ZOOM_FACTOR_RANGE,
           ANDROID_FLASH_SINGLE_STRENGTH_DEFAULT_LEVEL,
           ANDROID_FLASH_SINGLE_STRENGTH_MAX_LEVEL,
           ANDROID_FLASH_TORCH_STRENGTH_DEFAULT_LEVEL,
@@ -112,6 +113,15 @@
           ANDROID_CONTROL_LOW_LIGHT_BOOST_STATE,
           ANDROID_CONTROL_SETTINGS_OVERRIDE,
           ANDROID_CONTROL_SETTINGS_OVERRIDING_FRAME_NUMBER,
+          ANDROID_EFV_AUTO_ZOOM,
+          ANDROID_EFV_AUTO_ZOOM_PADDING_REGION,
+          ANDROID_EFV_MAX_PADDING_ZOOM_FACTOR,
+          ANDROID_EFV_PADDING_REGION,
+          ANDROID_EFV_PADDING_ZOOM_FACTOR,
+          ANDROID_EFV_ROTATE_VIEWPORT,
+          ANDROID_EFV_STABILIZATION_MODE,
+          ANDROID_EFV_TARGET_COORDINATES,
+          ANDROID_EFV_TRANSLATE_VIEWPORT,
           ANDROID_EXTENSION_CURRENT_TYPE,
           ANDROID_EXTENSION_STRENGTH,
           ANDROID_FLASH_STRENGTH_LEVEL,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index caa6424..19e2999 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -56,6 +56,7 @@
 Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
         const sp<hardware::ICameraClient>& cameraClient,
         std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
         const std::string& clientPackageName,
         const std::optional<std::string>& clientFeatureId,
         const std::string& cameraDeviceId,
@@ -68,7 +69,8 @@
         bool overrideForPerfClass,
         bool overrideToPortrait,
         bool forceSlowJpegMode):
-        Camera2ClientBase(cameraService, cameraClient, cameraServiceProxyWrapper, clientPackageName,
+        Camera2ClientBase(cameraService, cameraClient, cameraServiceProxyWrapper,
+                attributionAndPermissionUtils, clientPackageName,
                 false/*systemNativeClient - since no ndk for api1*/, clientFeatureId,
                 cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation, clientPid,
                 clientUid, servicePid, overrideForPerfClass, overrideToPortrait,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 2cb7af0..2654a25 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -103,6 +103,7 @@
     Camera2Client(const sp<CameraService>& cameraService,
             const sp<hardware::ICameraClient>& cameraClient,
             std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+            std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
             const std::string& clientPackageName,
             const std::optional<std::string>& clientFeatureId,
             const std::string& cameraDeviceId,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 3488629..7c2f71c 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -61,6 +61,7 @@
 CameraDeviceClientBase::CameraDeviceClientBase(
         const sp<CameraService>& cameraService,
         const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
         const std::string& clientPackageName,
         bool systemNativeClient,
         const std::optional<std::string>& clientFeatureId,
@@ -74,6 +75,7 @@
         bool overrideToPortrait) :
     BasicClient(cameraService,
             IInterface::asBinder(remoteCallback),
+            attributionAndPermissionUtils,
             clientPackageName,
             systemNativeClient,
             clientFeatureId,
@@ -92,6 +94,7 @@
 CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
         const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
         std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
         const std::string& clientPackageName,
         bool systemNativeClient,
         const std::optional<std::string>& clientFeatureId,
@@ -104,7 +107,8 @@
         bool overrideForPerfClass,
         bool overrideToPortrait,
         const std::string& originalCameraId) :
-    Camera2ClientBase(cameraService, remoteCallback, cameraServiceProxyWrapper, clientPackageName,
+    Camera2ClientBase(cameraService, remoteCallback, cameraServiceProxyWrapper,
+            attributionAndPermissionUtils, clientPackageName,
             systemNativeClient, clientFeatureId, cameraId, /*API1 camera ID*/ -1, cameraFacing,
             sensorOrientation, clientPid, clientUid, servicePid, overrideForPerfClass,
             overrideToPortrait),
@@ -777,60 +781,6 @@
     return res;
 }
 
-binder::Status CameraDeviceClient::getSessionCharacteristics(
-        const SessionConfiguration& sessionConfiguration,
-        /*out*/
-        hardware::camera2::impl::CameraMetadataNative* sessionCharacteristics) {
-    ATRACE_CALL();
-    binder::Status res;
-    status_t ret = OK;
-    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) {
-        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
-    }
-
-    auto operatingMode = sessionConfiguration.getOperatingMode();
-    res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
-            mCameraIdStr);
-    if (!res.isOk()) {
-        return res;
-    }
-
-    camera3::metadataGetter getMetadata = [this](const std::string &id,
-            bool /*overrideForPerfClass*/) {
-          return mDevice->infoPhysical(id);};
-    ret = mProviderManager->getSessionCharacteristics(mCameraIdStr.c_str(),
-            sessionConfiguration, mOverrideForPerfClass, getMetadata,
-            sessionCharacteristics);
-
-    switch (ret) {
-        case OK:
-            // Expected, do nothing.
-            break;
-        case INVALID_OPERATION: {
-                std::string msg = fmt::sprintf(
-                        "Camera %s: Session characteristics query not supported!",
-                        mCameraIdStr.c_str());
-                ALOGD("%s: %s", __FUNCTION__, msg.c_str());
-                res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.c_str());
-            }
-
-            break;
-        default: {
-                std::string msg = fmt::sprintf( "Camera %s: Error: %s (%d)", mCameraIdStr.c_str(),
-                        strerror(-ret), ret);
-                ALOGE("%s: %s", __FUNCTION__, msg.c_str());
-                res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
-                        msg.c_str());
-            }
-    }
-
-    return res;
-}
-
 binder::Status CameraDeviceClient::deleteStream(int streamId) {
     ATRACE_CALL();
     ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
@@ -1954,9 +1904,9 @@
     sp<CameraOfflineSessionClient> offlineClient;
     if (offlineSession.get() != nullptr) {
         offlineClient = new CameraOfflineSessionClient(sCameraService,
-                offlineSession, offlineCompositeStreamMap, cameraCb, mClientPackageName,
-                mClientFeatureId, mCameraIdStr, mCameraFacing, mOrientation, mClientPid, mClientUid,
-                mServicePid);
+                offlineSession, offlineCompositeStreamMap, cameraCb, mAttributionAndPermissionUtils,
+                mClientPackageName, mClientFeatureId, mCameraIdStr, mCameraFacing, mOrientation,
+                mClientPid, mClientUid, mServicePid);
         ret = sCameraService->addOfflineClient(mCameraIdStr, offlineClient);
     }
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index c2f7f56..d93eaff 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -50,6 +50,7 @@
 protected:
     CameraDeviceClientBase(const sp<CameraService>& cameraService,
             const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
+            std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
             const std::string& clientPackageName,
             bool systemNativeClient,
             const std::optional<std::string>& clientFeatureId,
@@ -109,11 +110,6 @@
             /*out*/
             bool* streamStatus) override;
 
-    virtual binder::Status getSessionCharacteristics(
-            const SessionConfiguration& sessionConfiguration,
-            /*out*/
-            hardware::camera2::impl::CameraMetadataNative* sessionCharacteristics) override;
-
     // Returns -EBUSY if device is not idle or in error state
     virtual binder::Status deleteStream(int streamId) override;
 
@@ -186,6 +182,7 @@
     CameraDeviceClient(const sp<CameraService>& cameraService,
             const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
             std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+            std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
             const std::string& clientPackageName,
             bool clientPackageOverride,
             const std::optional<std::string>& clientFeatureId,
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 804498f..c6f3e06 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -47,6 +47,7 @@
             sp<CameraOfflineSessionBase> session,
             const KeyedVector<sp<IBinder>, sp<CompositeStream>>& offlineCompositeStreamMap,
             const sp<ICameraDeviceCallbacks>& remoteCallback,
+            std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
             const std::string& clientPackageName,
             const std::optional<std::string>& clientFeatureId,
             const std::string& cameraIdStr, int cameraFacing, int sensorOrientation,
@@ -54,6 +55,7 @@
             CameraService::BasicClient(
                     cameraService,
                     IInterface::asBinder(remoteCallback),
+                    attributionAndPermissionUtils,
                     // (v)ndk doesn't have offline session support
                     clientPackageName, /*overridePackageName*/false, clientFeatureId,
                     cameraIdStr, cameraFacing, sensorOrientation, clientPid, clientUid, servicePid,
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index a126f61..3a78937 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -50,6 +50,7 @@
         const sp<CameraService>& cameraService,
         const sp<TCamCallbacks>& remoteCallback,
         std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+        std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
         const std::string& clientPackageName,
         bool systemNativeClient,
         const std::optional<std::string>& clientFeatureId,
@@ -63,9 +64,9 @@
         bool overrideForPerfClass,
         bool overrideToPortrait,
         bool legacyClient):
-        TClientBase(cameraService, remoteCallback, clientPackageName, systemNativeClient,
-                clientFeatureId, cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid,
-                clientUid, servicePid, overrideToPortrait),
+        TClientBase(cameraService, remoteCallback, attributionAndPermissionUtils, clientPackageName,
+                systemNativeClient, clientFeatureId, cameraId, api1CameraId, cameraFacing,
+                sensorOrientation, clientPid, clientUid, servicePid, overrideToPortrait),
         mSharedCameraCallbacks(remoteCallback),
         mCameraServiceProxyWrapper(cameraServiceProxyWrapper),
         mDeviceActive(false), mApi1CameraId(api1CameraId)
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 2bb90d9..b8a6d8b 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -21,6 +21,7 @@
 #include "camera/CameraMetadata.h"
 #include "camera/CaptureResult.h"
 #include "utils/CameraServiceProxyWrapper.h"
+#include "utils/AttributionAndPermissionUtils.h"
 #include "CameraServiceWatchdog.h"
 
 namespace android {
@@ -51,6 +52,7 @@
     Camera2ClientBase(const sp<CameraService>& cameraService,
                       const sp<TCamCallbacks>& remoteCallback,
                       std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
+                      std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils,
                       const std::string& clientPackageName,
                       bool systemNativeClient,
                       const std::optional<std::string>& clientFeatureId,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 1ba3de4..15e2755 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -33,6 +33,7 @@
 #include <future>
 #include <inttypes.h>
 #include <android_companion_virtualdevice_flags.h>
+#include <android_companion_virtualdevice_build_flags.h>
 #include <android/binder_manager.h>
 #include <android/hidl/manager/1.2/IServiceManager.h>
 #include <hidl/ServiceManagement.h>
@@ -139,7 +140,7 @@
 }
 
 std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-CameraProviderManager::AidlServiceInteractionProxyImpl::getAidlService(
+CameraProviderManager::AidlServiceInteractionProxyImpl::getService(
         const std::string& serviceName) {
     using aidl::android::hardware::camera::provider::ICameraProvider;
 
@@ -147,19 +148,35 @@
     if (flags::lazy_aidl_wait_for_service()) {
         binder = AServiceManager_waitForService(serviceName.c_str());
     } else {
-        binder = AServiceManager_getService(serviceName.c_str());
+        binder = AServiceManager_checkService(serviceName.c_str());
     }
 
     if (binder == nullptr) {
-        ALOGD("%s: AIDL Camera provider HAL '%s' is not actually available", __FUNCTION__,
-              serviceName.c_str());
+        ALOGE("%s: AIDL Camera provider HAL '%s' is not actually available, despite waiting "
+              "indefinitely?", __FUNCTION__, serviceName.c_str());
         return nullptr;
     }
     std::shared_ptr<ICameraProvider> interface =
             ICameraProvider::fromBinder(ndk::SpAIBinder(binder));
 
     return interface;
-};
+}
+
+std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+CameraProviderManager::AidlServiceInteractionProxyImpl::tryGetService(
+        const std::string& serviceName) {
+    using aidl::android::hardware::camera::provider::ICameraProvider;
+
+    std::shared_ptr<ICameraProvider> interface = ICameraProvider::fromBinder(
+                    ndk::SpAIBinder(AServiceManager_checkService(serviceName.c_str())));
+    if (interface == nullptr) {
+        ALOGD("%s: AIDL Camera provider HAL '%s' is not actually available", __FUNCTION__,
+              serviceName.c_str());
+        return nullptr;
+    }
+
+    return interface;
+}
 
 static std::string getFullAidlProviderName(const std::string instance) {
     std::string aidlHalServiceDescriptor =
@@ -442,19 +459,31 @@
     return OK;
 }
 
-status_t CameraProviderManager::getSessionCharacteristics(const std::string& id,
-        const SessionConfiguration &configuration, bool overrideForPerfClass,
-        metadataGetter getMetadata,
-        CameraMetadata* sessionCharacteristics /*out*/) const {
+status_t CameraProviderManager::getSessionCharacteristics(
+        const std::string& id, const SessionConfiguration& configuration, bool overrideForPerfClass,
+        bool overrideToPortrait, CameraMetadata* sessionCharacteristics /*out*/) const {
     if (!flags::feature_combination_query()) {
         return INVALID_OPERATION;
     }
+
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     auto deviceInfo = findDeviceInfoLocked(id);
     if (deviceInfo == nullptr) {
         return NAME_NOT_FOUND;
     }
 
+    metadataGetter getMetadata = [this, overrideToPortrait](const std::string& id,
+                                                            bool overrideForPerfClass) {
+        CameraMetadata metadata;
+        status_t ret = this->getCameraCharacteristicsLocked(id, overrideForPerfClass, &metadata,
+                                                            overrideToPortrait);
+        if (ret != OK) {
+            ALOGE("%s: Could not get CameraCharacteristics for device %s", __FUNCTION__,
+                  id.c_str());
+        }
+        return metadata;
+    };
+
     return deviceInfo->getSessionCharacteristics(configuration,
             overrideForPerfClass, getMetadata, sessionCharacteristics);
 }
@@ -1807,18 +1836,13 @@
     auto& c = mCameraCharacteristics;
 
     auto entry = c.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
-    if (entry.count != 0) {
-        ALOGE("%s: CameraCharacteristics must not contain ANDROID_SENSOR_READOUT_TIMESTAMP!",
-                __FUNCTION__);
+    if (entry.count == 0) {
+        uint8_t defaultReadoutTimestamp = readoutTimestampSupported ?
+                                          ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE :
+                                          ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
+        res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &defaultReadoutTimestamp, 1);
     }
 
-    uint8_t readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
-    if (readoutTimestampSupported) {
-        readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
-    }
-
-    res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &readoutTimestamp, 1);
-
     return res;
 }
 
@@ -2101,8 +2125,14 @@
         const std::string& providerName, const sp<ProviderInfo>& providerInfo) {
     using aidl::android::hardware::camera::provider::ICameraProvider;
 
-    std::shared_ptr<ICameraProvider> interface =
-            mAidlServiceProxy->getAidlService(providerName.c_str());
+    std::shared_ptr<ICameraProvider> interface;
+    if (flags::delay_lazy_hal_instantiation()) {
+        // Only get remote instance if already running. Lazy Providers will be
+        // woken up later.
+        interface = mAidlServiceProxy->tryGetService(providerName);
+    } else {
+        interface = mAidlServiceProxy->getService(providerName);
+    }
 
     if (interface == nullptr) {
         ALOGW("%s: AIDL Camera provider HAL '%s' is not actually available", __FUNCTION__,
@@ -3249,7 +3279,8 @@
 }
 
 bool CameraProviderManager::isVirtualCameraHalEnabled() {
-    return vd_flags::virtual_camera_service_discovery();
+    return vd_flags::virtual_camera_service_discovery() &&
+           vd_flags::virtual_camera_service_build_flag();
 }
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 53a2102..5ff3fcd 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -178,9 +178,15 @@
     // Proxy to inject fake services in test.
     class AidlServiceInteractionProxy {
       public:
-        // Returns the Aidl service with the given serviceName
+        // Returns the Aidl service with the given serviceName. Will wait indefinitely
+        // for the service to come up if not running.
         virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-        getAidlService(const std::string& serviceName) = 0;
+        getService(const std::string& serviceName) = 0;
+
+        // Attempts to get an already running AIDL service of the given serviceName.
+        // Returns nullptr immediately if service is not running.
+        virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+        tryGetService(const std::string& serviceName) = 0;
 
         virtual ~AidlServiceInteractionProxy() = default;
     };
@@ -190,7 +196,10 @@
     class AidlServiceInteractionProxyImpl : public AidlServiceInteractionProxy {
       public:
         virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-        getAidlService(const std::string& serviceName) override;
+        getService(const std::string& serviceName) override;
+
+        virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+        tryGetService(const std::string& serviceName) override;
     };
 
     /**
@@ -321,7 +330,8 @@
      */
      status_t getSessionCharacteristics(const std::string& id,
             const SessionConfiguration &configuration,
-            bool overrideForPerfClass, camera3::metadataGetter getMetadata,
+            bool overrideForPerfClass,
+            bool overrideToPortrait,
             CameraMetadata* sessionCharacteristics /*out*/) const;
 
     /**
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index d773af3..a721d28 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -275,54 +275,58 @@
     if (mSavedInterface != nullptr) {
         return mSavedInterface;
     }
+
     if (!kEnableLazyHal) {
         ALOGE("Bad provider state! Should not be here on a non-lazy HAL!");
         return nullptr;
     }
 
     auto interface = mActiveInterface.lock();
-    if (interface == nullptr) {
-        // Try to get service without starting
-        interface =
-                    ICameraProvider::fromBinder(
-                            ndk::SpAIBinder(AServiceManager_checkService(mProviderName.c_str())));
-        if (interface == nullptr) {
-            ALOGV("Camera provider actually needs restart, calling getService(%s)",
-                  mProviderName.c_str());
-            interface = mManager->mAidlServiceProxy->getAidlService(mProviderName.c_str());
-
-            if (interface == nullptr) {
-                ALOGD("%s: %s service not started", __FUNCTION__, mProviderName.c_str());
-                return nullptr;
-            }
-
-            // Set all devices as ENUMERATING, provider should update status
-            // to PRESENT after initializing.
-            // This avoids failing getCameraDeviceInterface_V3_x before devices
-            // are ready.
-            for (auto& device : mDevices) {
-              device->mIsDeviceAvailable = false;
-            }
-
-            interface->setCallback(mCallbacks);
-            auto link = AIBinder_linkToDeath(interface->asBinder().get(), mDeathRecipient.get(),
-                    this);
-            if (link != STATUS_OK) {
-                ALOGW("%s: Unable to link to provider '%s' death notifications",
-                        __FUNCTION__, mProviderName.c_str());
-                mManager->removeProvider(mProviderInstance);
-                return nullptr;
-            }
-
-            // Send current device state
-            interface->notifyDeviceStateChange(mDeviceState);
-        }
-        mActiveInterface = interface;
-    } else {
-        ALOGV("Camera provider (%s) already in use. Re-using instance.",
-              mProviderName.c_str());
+    if (interface != nullptr) {
+        ALOGV("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
+        return interface;
     }
 
+    // Try to get service without starting
+    interface = ICameraProvider::fromBinder(
+            ndk::SpAIBinder(AServiceManager_checkService(mProviderName.c_str())));
+    if (interface != nullptr) {
+        // Service is already running. Cache and return.
+        mActiveInterface = interface;
+        return interface;
+    }
+
+    ALOGV("Camera provider actually needs restart, calling getService(%s)", mProviderName.c_str());
+    interface = mManager->mAidlServiceProxy->getService(mProviderName);
+
+    if (interface == nullptr) {
+        ALOGE("%s: %s service not started", __FUNCTION__, mProviderName.c_str());
+        return nullptr;
+    }
+
+    // Set all devices as ENUMERATING, provider should update status
+    // to PRESENT after initializing.
+    // This avoids failing getCameraDeviceInterface_V3_x before devices
+    // are ready.
+    for (auto& device : mDevices) {
+      device->mIsDeviceAvailable = false;
+    }
+
+    interface->setCallback(mCallbacks);
+    auto link = AIBinder_linkToDeath(interface->asBinder().get(), mDeathRecipient.get(),
+            this);
+    if (link != STATUS_OK) {
+        ALOGW("%s: Unable to link to provider '%s' death notifications",
+                __FUNCTION__, mProviderName.c_str());
+        mManager->removeProvider(mProviderInstance);
+        return nullptr;
+    }
+
+    // Send current device state
+    interface->notifyDeviceStateChange(mDeviceState);
+    // Cache interface to return early for future calls.
+    mActiveInterface = interface;
+
     return interface;
 }
 
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
index d2643c1..065f0c5 100644
--- a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
@@ -692,6 +692,14 @@
         mHasFlashUnit = false;
     }
 
+    if (flags::feature_combination_query()) {
+        res = addSessionConfigQueryVersionTag();
+        if (OK != res) {
+            ALOGE("%s: Unable to add sessionConfigurationQueryVersion tag: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+        }
+    }
+
     camera_metadata_entry entry =
             mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL);
     if (entry.count == 1) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 28b2d78..9792089 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -69,6 +69,7 @@
 #include "utils/SchedulingPolicyUtils.h"
 #include "utils/SessionConfigurationUtils.h"
 #include "utils/TraceHFR.h"
+#include "utils/Utils.h"
 
 #include <algorithm>
 #include <optional>
@@ -77,10 +78,11 @@
 using namespace android::camera3;
 using namespace android::camera3::SessionConfigurationUtils;
 using namespace android::hardware::camera;
+using namespace android::hardware::cameraservice::utils::conversion::aidl;
 
 namespace flags = com::android::internal::camera::flags;
 namespace android {
-namespace flags = com::android::internal::camera::flags;
+
 Camera3Device::Camera3Device(std::shared_ptr<CameraServiceProxyWrapper>& cameraServiceProxyWrapper,
         const std::string &id, bool overrideForPerfClass, bool overrideToPortrait,
         bool legacyClient):
@@ -145,17 +147,6 @@
     /** Register in-flight map to the status tracker */
     mInFlightStatusId = mStatusTracker->addComponent("InflightRequests");
 
-    if (mUseHalBufManager) {
-        res = mRequestBufferSM.initialize(mStatusTracker);
-        if (res != OK) {
-            SET_ERR_L("Unable to start request buffer state machine: %s (%d)",
-                    strerror(-res), res);
-            mInterface->close();
-            mStatusTracker.clear();
-            return res;
-        }
-    }
-
     /** Create buffer manager */
     mBufferManager = new Camera3BufferManager();
 
@@ -264,6 +255,8 @@
         return res;
     }
 
+    mSupportsExtensionKeys = areExtensionKeysSupported(mDeviceInfo);
+
     return OK;
 }
 
@@ -1622,7 +1615,9 @@
     mStatusWaiters++;
 
     bool signalPipelineDrain = false;
-    if (!active && mUseHalBufManager) {
+    if (!active &&
+            (mUseHalBufManager ||
+                    (flags::session_hal_buf_manager() && mHalBufManagedStreamIds.size() != 0))) {
         auto streamIds = mOutputStreams.getStreamIds();
         if (mStatus == STATUS_ACTIVE) {
             mRequestThread->signalPipelineDrain(streamIds);
@@ -2538,11 +2533,14 @@
     }
 
     config.streams = streams.editArray();
+    config.hal_buffer_managed_streams = mHalBufManagedStreamIds;
     config.use_hal_buf_manager = mUseHalBufManager;
 
     // Do the HAL configuration; will potentially touch stream
-    // max_buffers, usage, and priv fields, as well as data_space and format
-    // fields for IMPLEMENTATION_DEFINED formats.
+    // max_buffers, usage, priv fields, data_space and format
+    // fields for IMPLEMENTATION_DEFINED formats as well as hal buffer managed
+    // streams and use_hal_buf_manager (in case aconfig flag session_hal_buf_manager
+    // is not enabled but the HAL supports session specific hal buffer manager).
 
     int64_t logId = mCameraServiceProxyWrapper->getCurrentLogIdForCamera(mId);
     const camera_metadata_t *sessionBuffer = sessionParams.getAndLock();
@@ -2562,13 +2560,19 @@
                 strerror(-res), res);
         return res;
     }
+    // It is possible that use hal buffer manager behavior was changed by the
+    // configureStreams call.
+    mUseHalBufManager = config.use_hal_buf_manager;
     if (flags::session_hal_buf_manager()) {
-        bool prevSessionHalBufManager = mUseHalBufManager;
-        // It is possible that configureStreams() changed config.use_hal_buf_manager
-        mUseHalBufManager = config.use_hal_buf_manager;
-        if (prevSessionHalBufManager && !mUseHalBufManager) {
+        bool prevSessionHalBufManager = (mHalBufManagedStreamIds.size() != 0);
+        // It is possible that configureStreams() changed config.hal_buffer_managed_streams
+        mHalBufManagedStreamIds = config.hal_buffer_managed_streams;
+
+        bool thisSessionHalBufManager = mHalBufManagedStreamIds.size() != 0;
+
+        if (prevSessionHalBufManager && !thisSessionHalBufManager) {
             mRequestBufferSM.deInit();
-        } else if (!prevSessionHalBufManager && mUseHalBufManager) {
+        } else if (!prevSessionHalBufManager && thisSessionHalBufManager) {
             res = mRequestBufferSM.initialize(mStatusTracker);
             if (res != OK) {
                 SET_ERR_L("%s: Camera %s: RequestBuffer State machine couldn't be initialized!",
@@ -2576,7 +2580,7 @@
                 return res;
             }
         }
-        mRequestThread->setHalBufferManager(mUseHalBufManager);
+        mRequestThread->setHalBufferManagedStreams(mHalBufManagedStreamIds);
     }
     // Finish all stream configuration immediately.
     // TODO: Try to relax this later back to lazy completion, which should be
@@ -2904,7 +2908,8 @@
 
     FlushInflightReqStates states {
         mId, mInFlightLock, mInFlightMap, mUseHalBufManager,
-        listener, *this, *mInterface, *this, mSessionStatsBuilder};
+        mHalBufManagedStreamIds, listener, *this, *mInterface, *this,
+        mSessionStatsBuilder};
 
     camera3::flushInflightRequests(states);
 }
@@ -2969,6 +2974,11 @@
     return mBufferRecords.verifyBufferIds(streamId, bufIds);
 }
 
+bool Camera3Device::HalInterface::isHalBufferManagedStream(int32_t streamId) const {
+    return (mUseHalBufManager || (flags::session_hal_buf_manager() &&
+                                  contains(mHalBufManagedStreamIds, streamId)));
+}
+
 status_t Camera3Device::HalInterface::popInflightBuffer(
         int32_t frameNumber, int32_t streamId,
         /*out*/ buffer_handle_t **buffer) {
@@ -3061,7 +3071,7 @@
         mOverrideToPortrait(overrideToPortrait),
         mSupportSettingsOverride(supportSettingsOverride) {
     mStatusId = statusTracker->addComponent("RequestThread");
-    mVndkVersion = property_get_int32("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 
 Camera3Device::RequestThread::~RequestThread() {}
@@ -3287,8 +3297,9 @@
     mDoPauseSignal.signal();
 }
 
-void Camera3Device::RequestThread::setHalBufferManager(bool enabled) {
-    mUseHalBufManager = enabled;
+void Camera3Device::RequestThread::setHalBufferManagedStreams(
+            const std::set<int32_t> &halBufferManagedStreams) {
+    mHalBufManagedStreamIds = halBufferManagedStreams;
 }
 
 status_t Camera3Device::RequestThread::waitUntilRequestProcessed(
@@ -3884,13 +3895,22 @@
 
                     for (it = captureRequest->mSettingsList.begin();
                             it != captureRequest->mSettingsList.end(); it++) {
-                        res = hardware::cameraservice::utils::conversion::aidl::filterVndkKeys(
-                                mVndkVersion, it->metadata, false /*isStatic*/);
+                        res = filterVndkKeys(mVndkVersion, it->metadata, false /*isStatic*/);
                         if (res != OK) {
                             SET_ERR("RequestThread: Failed during VNDK filter of capture requests "
                                     "%d: %s (%d)", halRequest->frame_number, strerror(-res), res);
                             return INVALID_OPERATION;
                         }
+
+                        if (!parent->mSupportsExtensionKeys) {
+                            res = filterExtensionKeys(&it->metadata);
+                            if (res != OK) {
+                                SET_ERR("RequestThread: Failed during extension filter of capture "
+                                        "requests %d: %s (%d)", halRequest->frame_number,
+                                        strerror(-res), res);
+                                return INVALID_OPERATION;
+                            }
+                        }
                     }
                 }
             }
@@ -3972,11 +3992,15 @@
         nsecs_t waitDuration = kBaseGetBufferWait + parent->getExpectedInFlightDuration();
 
         SurfaceMap uniqueSurfaceIdMap;
+        bool containsHalBufferManagedStream = false;
         for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
             sp<Camera3OutputStreamInterface> outputStream =
                     captureRequest->mOutputStreams.editItemAt(j);
             int streamId = outputStream->getId();
-
+            if (!containsHalBufferManagedStream) {
+                containsHalBufferManagedStream =
+                        contains(mHalBufManagedStreamIds, streamId);
+            }
             // Prepare video buffers for high speed recording on the first video request.
             if (mPrepareVideoStream && outputStream->isVideoStream()) {
                 // Only try to prepare video stream on the first video request.
@@ -4008,7 +4032,7 @@
                 uniqueSurfaceIdMap.insert({streamId, std::move(uniqueSurfaceIds)});
             }
 
-            if (mUseHalBufManager) {
+            if (parent->isHalBufferManagedStream(streamId)) {
                 if (outputStream->isAbandoned()) {
                     ALOGV("%s: stream %d is abandoned, skipping request", __FUNCTION__, streamId);
                     return TIMED_OUT;
@@ -4099,6 +4123,9 @@
                 isZslCapture = true;
             }
         }
+        bool passSurfaceMap =
+                mUseHalBufManager ||
+                        (flags::session_hal_buf_manager() && containsHalBufferManagedStream);
         auto expectedDurationInfo = calculateExpectedDurationRange(settings);
         res = parent->registerInFlight(halRequest->frame_number,
                 totalNumBuffers, captureRequest->mResultExtras,
@@ -4110,7 +4137,7 @@
                 requestedPhysicalCameras, isStillCapture, isZslCapture,
                 captureRequest->mRotateAndCropAuto, captureRequest->mAutoframingAuto,
                 mPrevCameraIdsWithZoom,
-                (mUseHalBufManager) ? uniqueSurfaceIdMap :
+                passSurfaceMap ? uniqueSurfaceIdMap :
                                       SurfaceMap{}, captureRequest->mRequestTimeNs);
         ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
                ", burstId = %" PRId32 ".",
@@ -4213,7 +4240,8 @@
 }
 
 void Camera3Device::RequestThread::signalPipelineDrain(const std::vector<int>& streamIds) {
-    if (!mUseHalBufManager) {
+    if (!mUseHalBufManager &&
+            (flags::session_hal_buf_manager() && mHalBufManagedStreamIds.size() == 0)) {
         ALOGE("%s called for camera device not supporting HAL buffer management", __FUNCTION__);
         return;
     }
@@ -4365,22 +4393,28 @@
             captureRequest->mInputStream->returnInputBuffer(captureRequest->mInputBuffer);
         }
 
-        // No output buffer can be returned when using HAL buffer manager
-        if (!mUseHalBufManager) {
-            for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
-                //Buffers that failed processing could still have
-                //valid acquire fence.
-                int acquireFence = (*outputBuffers)[i].acquire_fence;
-                if (0 <= acquireFence) {
-                    close(acquireFence);
-                    outputBuffers->editItemAt(i).acquire_fence = -1;
-                }
-                outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
-                        /*timestamp*/0, /*readoutTimestamp*/0,
-                        /*timestampIncreasing*/true, std::vector<size_t> (),
-                        captureRequest->mResultExtras.frameNumber);
+        for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
+            //Buffers that failed processing could still have
+            //valid acquire fence.
+            Camera3Stream *stream = Camera3Stream::cast((*outputBuffers)[i].stream);
+            int32_t streamId = stream->getId();
+            bool skipBufferForStream =
+                    mUseHalBufManager || (flags::session_hal_buf_manager() &&
+                            contains(mHalBufManagedStreamIds, streamId));
+            if (skipBufferForStream) {
+                // No output buffer can be returned when using HAL buffer manager for its stream
+                continue;
             }
+            int acquireFence = (*outputBuffers)[i].acquire_fence;
+            if (0 <= acquireFence) {
+                close(acquireFence);
+                outputBuffers->editItemAt(i).acquire_fence = -1;
+            }
+            outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
+            captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
+                    /*timestamp*/0, /*readoutTimestamp*/0,
+                    /*timestampIncreasing*/true, std::vector<size_t> (),
+                    captureRequest->mResultExtras.frameNumber);
         }
 
         if (sendRequestError) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e32a36f..ac4b039 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -97,6 +97,10 @@
         return mInterface->getTransportType();
     }
 
+    bool isHalBufferManagedStream(int32_t streamId) const {
+        return mInterface->isHalBufferManagedStream(streamId);
+    };
+
     /**
      * CameraDeviceBase interface
      */
@@ -476,6 +480,9 @@
 
         /////////////////////////////////////////////////////////////////////
 
+        //Check if a stream is hal buffer managed
+        bool isHalBufferManagedStream(int32_t streamId) const;
+
         // Get a vector of (frameNumber, streamId) pair of currently inflight
         // buffers
         void getInflightBufferKeys(std::vector<std::pair<int32_t, int32_t>>* out);
@@ -547,7 +554,9 @@
 
         uint32_t mNextStreamConfigCounter = 1;
 
+        // TODO: This can be removed after flags::session_hal_buf_manager is removed
         bool mUseHalBufManager = false;
+        std::set<int32_t > mHalBufManagedStreamIds;
         bool mIsReconfigurationQuerySupported;
 
         const bool mSupportOfflineProcessing;
@@ -948,11 +957,11 @@
         void     setPaused(bool paused);
 
         /**
-         * Set Hal buffer manager behavior
-         * @param enabled Whether HAL buffer manager is enabled for the current session.
+         * Set Hal buffer managed streams
+         * @param halBufferManagedStreams The streams for which hal buffer manager is enabled
          *
          */
-        void setHalBufferManager(bool enabled);
+        void setHalBufferManagedStreams(const std::set<int32_t> &halBufferManagedStreams);
 
         /**
          * Wait until thread processes the capture request with settings'
@@ -1203,6 +1212,7 @@
         std::map<int32_t, std::set<std::string>> mGroupIdPhysicalCameraMap;
 
         bool               mUseHalBufManager = false;
+        std::set<int32_t > mHalBufManagedStreamIds;
         const bool         mSupportCameraMute;
         const bool         mOverrideToPortrait;
         const bool         mSupportSettingsOverride;
@@ -1393,6 +1403,7 @@
 
     // Whether HAL request buffers through requestStreamBuffers API
     bool mUseHalBufManager = false;
+    std::set<int32_t > mHalBufManagedStreamIds;
     bool mSessionHalBufManager = false;
     // Lock to ensure requestStreamBuffers() callbacks are serialized
     std::mutex mRequestBufferInterfaceLock;
@@ -1512,6 +1523,9 @@
     // AE_TARGET_FPS_RANGE
     bool mIsFixedFps = false;
 
+    // Flag to indicate that we shouldn't forward extension related metadata
+    bool mSupportsExtensionKeys = false;
+
     // Injection camera related methods.
     class Camera3DeviceInjectionMethods : public virtual RefBase {
       public:
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
index 172b62a..1025061 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
@@ -58,6 +58,7 @@
         mTagMonitor(offlineStates.mTagMonitor),
         mVendorTagId(offlineStates.mVendorTagId),
         mUseHalBufManager(offlineStates.mUseHalBufManager),
+        mHalBufManagedStreamIds(offlineStates.mHalBufManagedStreamIds),
         mNeedFixupMonochromeTags(offlineStates.mNeedFixupMonochromeTags),
         mUsePartialResult(offlineStates.mUsePartialResult),
         mNumPartialResults(offlineStates.mNumPartialResults),
@@ -136,7 +137,7 @@
 
     FlushInflightReqStates states {
         mId, mOfflineReqsLock, mOfflineReqs, mUseHalBufManager,
-        listener, *this, mBufferRecords, *this, mSessionStatsBuilder};
+        mHalBufManagedStreamIds, listener, *this, mBufferRecords, *this, mSessionStatsBuilder};
 
     camera3::flushInflightRequests(states);
 
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.h b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
index b5fd486..1ef3921 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
@@ -51,7 +51,8 @@
 struct Camera3OfflineStates {
     Camera3OfflineStates(
             const TagMonitor& tagMonitor, const metadata_vendor_id_t vendorTagId,
-            const bool useHalBufManager, const bool needFixupMonochromeTags,
+            const bool useHalBufManager, const std::set<int32_t> &halBufferManagedStreamIds,
+            const bool needFixupMonochromeTags,
             const bool usePartialResult, const uint32_t numPartialResults,
             const int64_t lastCompletedRegularFN, const int64_t lastCompletedReprocessFN,
             const int64_t lastCompletedZslFN, const uint32_t nextResultFN,
@@ -64,7 +65,8 @@
             const std::unordered_map<std::string, camera3::RotateAndCropMapper>&
                 rotateAndCropMappers) :
             mTagMonitor(tagMonitor), mVendorTagId(vendorTagId),
-            mUseHalBufManager(useHalBufManager), mNeedFixupMonochromeTags(needFixupMonochromeTags),
+            mUseHalBufManager(useHalBufManager), mHalBufManagedStreamIds(halBufferManagedStreamIds),
+            mNeedFixupMonochromeTags(needFixupMonochromeTags),
             mUsePartialResult(usePartialResult), mNumPartialResults(numPartialResults),
             mLastCompletedRegularFrameNumber(lastCompletedRegularFN),
             mLastCompletedReprocessFrameNumber(lastCompletedReprocessFN),
@@ -85,6 +87,7 @@
     const metadata_vendor_id_t mVendorTagId;
 
     const bool mUseHalBufManager;
+    const std::set<int32_t > &mHalBufManagedStreamIds;
     const bool mNeedFixupMonochromeTags;
 
     const bool mUsePartialResult;
@@ -181,6 +184,7 @@
     const metadata_vendor_id_t mVendorTagId;
 
     const bool mUseHalBufManager;
+    const std::set<int32_t > &mHalBufManagedStreamIds;
     const bool mNeedFixupMonochromeTags;
 
     const bool mUsePartialResult;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index f98636b..2ce04e8 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -235,65 +235,6 @@
     return OK;
 }
 
-status_t Camera3OutputStream::getBuffersLocked(std::vector<OutstandingBuffer>* outBuffers) {
-    status_t res;
-
-    if ((res = getBufferPreconditionCheckLocked()) != OK) {
-        return res;
-    }
-
-    if (mUseBufferManager) {
-        ALOGE("%s: stream %d is managed by buffer manager and does not support batch operation",
-                __FUNCTION__, mId);
-        return INVALID_OPERATION;
-    }
-
-    sp<Surface> consumer = mConsumer;
-    /**
-     * Release the lock briefly to avoid deadlock for below scenario:
-     * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
-     * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
-     * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
-     * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
-     * StreamingProcessor lock.
-     * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
-     * and try to lock bufferQueue lock.
-     * Then there is circular locking dependency.
-     */
-    mLock.unlock();
-
-    size_t numBuffersRequested = outBuffers->size();
-    std::vector<Surface::BatchBuffer> buffers(numBuffersRequested);
-
-    nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
-    res = consumer->dequeueBuffers(&buffers);
-    nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
-    mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
-
-    mLock.lock();
-
-    if (res != OK) {
-        if (shouldLogError(res, mState)) {
-            ALOGE("%s: Stream %d: Can't dequeue %zu output buffers: %s (%d)",
-                    __FUNCTION__, mId, numBuffersRequested, strerror(-res), res);
-        }
-        checkRetAndSetAbandonedLocked(res);
-        return res;
-    }
-    checkRemovedBuffersLocked();
-
-    /**
-     * FenceFD now owned by HAL except in case of error,
-     * in which case we reassign it to acquire_fence
-     */
-    for (size_t i = 0; i < numBuffersRequested; i++) {
-        handoutBufferLocked(*(outBuffers->at(i).outBuffer),
-                &(buffers[i].buffer->handle), /*acquireFence*/buffers[i].fenceFd,
-                /*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
-    }
-    return OK;
-}
-
 status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
             ANativeWindowBuffer* buffer, int anwReleaseFence,
             const std::vector<size_t>&) {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 65791a9..da0ed87 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -388,8 +388,6 @@
     virtual status_t getBufferLocked(camera_stream_buffer *buffer,
             const std::vector<size_t>& surface_ids);
 
-    virtual status_t getBuffersLocked(/*out*/std::vector<OutstandingBuffer>* buffers) override;
-
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp, nsecs_t readoutTimestamp,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 450f3dd..89e08a1 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -45,13 +45,17 @@
 #include <camera/CameraUtils.h>
 #include <camera/StringUtils.h>
 #include <camera_metadata_hidden.h>
+#include <com_android_internal_camera_flags.h>
 
 #include "device3/Camera3OutputUtils.h"
+#include "utils/SessionConfigurationUtils.h"
 
 #include "system/camera_metadata.h"
 
 using namespace android::camera3;
+using namespace android::camera3::SessionConfigurationUtils;
 using namespace android::hardware::camera;
+namespace flags = com::android::internal::camera::flags;
 
 namespace android {
 namespace camera3 {
@@ -495,7 +499,8 @@
     states.inflightIntf.onInflightEntryRemovedLocked(duration);
 }
 
-void removeInFlightRequestIfReadyLocked(CaptureOutputStates& states, int idx) {
+void removeInFlightRequestIfReadyLocked(CaptureOutputStates& states, int idx,
+        std::vector<BufferToReturn> *returnableBuffers) {
     InFlightRequestMap& inflightMap = states.inflightMap;
     const InFlightRequest &request = inflightMap.valueAt(idx);
     const uint32_t frameNumber = inflightMap.keyAt(idx);
@@ -533,11 +538,13 @@
         assert(request.requestStatus != OK ||
                request.pendingOutputBuffers.size() == 0);
 
-        returnOutputBuffers(
-            states.useHalBufManager, states.listener,
+        collectReturnableOutputBuffers(
+            states.useHalBufManager, states.halBufManagedStreamIds,
+            states.listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
             /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+            /*out*/ returnableBuffers,
             /*timestampIncreasing*/true,
             request.outputSurfaces, request.resultExtras,
             request.errorBufStrategy, request.transform);
@@ -632,6 +639,7 @@
     // in-flight request and they will be returned when the shutter timestamp
     // arrives. Update the in-flight status and remove the in-flight entry if
     // all result data and shutter timestamp have been received.
+    std::vector<BufferToReturn> returnableBuffers{};
     nsecs_t shutterTimestamp = 0;
     {
         std::lock_guard<std::mutex> l(states.inflightLock);
@@ -793,9 +801,11 @@
         request.pendingOutputBuffers.appendArray(result->output_buffers,
                 result->num_output_buffers);
         if (shutterTimestamp != 0) {
-            returnAndRemovePendingOutputBuffers(
-                states.useHalBufManager, states.listener,
-                request, states.sessionStatsBuilder);
+            collectAndRemovePendingOutputBuffers(
+                states.useHalBufManager, states.halBufManagedStreamIds,
+                states.listener,
+                request, states.sessionStatsBuilder,
+                /*out*/ &returnableBuffers);
         }
 
         if (result->result != NULL && !isPartialResult) {
@@ -820,9 +830,18 @@
                     request.physicalMetadatas);
             }
         }
-        removeInFlightRequestIfReadyLocked(states, idx);
+        removeInFlightRequestIfReadyLocked(states, idx, &returnableBuffers);
+        if (!flags::return_buffers_outside_locks()) {
+            finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+        }
     } // scope for states.inFlightLock
 
+    if (flags::return_buffers_outside_locks()) {
+        finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+    }
+
     if (result->input_buffer != NULL) {
         if (hasInputBufferInRequest) {
             Camera3Stream *stream =
@@ -843,16 +862,17 @@
     }
 }
 
-void returnOutputBuffers(
+void collectReturnableOutputBuffers(
         bool useHalBufManager,
+        const std::set<int32_t> &halBufferManagedStreams,
         sp<NotificationListener> listener,
         const camera_stream_buffer_t *outputBuffers, size_t numBuffers,
         nsecs_t timestamp, nsecs_t readoutTimestamp, bool requested,
         nsecs_t requestTimeNs, SessionStatsBuilder& sessionStatsBuilder,
+        /*out*/ std::vector<BufferToReturn> *returnableBuffers,
         bool timestampIncreasing, const SurfaceMap& outputSurfaces,
-        const CaptureResultExtras &inResultExtras,
+        const CaptureResultExtras &resultExtras,
         ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
-
     for (size_t i = 0; i < numBuffers; i++)
     {
         Camera3StreamInterface *stream = Camera3Stream::cast(outputBuffers[i].stream);
@@ -862,7 +882,7 @@
         if (outputBuffers[i].status == CAMERA_BUFFER_STATUS_ERROR &&
                 errorBufStrategy == ERROR_BUF_RETURN_NOTIFY) {
             if (listener != nullptr) {
-                CaptureResultExtras extras = inResultExtras;
+                CaptureResultExtras extras = resultExtras;
                 extras.errorStreamId = streamId;
                 listener->notifyError(
                         hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
@@ -871,7 +891,9 @@
         }
 
         if (outputBuffers[i].buffer == nullptr) {
-            if (!useHalBufManager) {
+            if (!useHalBufManager &&
+                    !(flags::session_hal_buf_manager() &&
+                            contains(halBufferManagedStreams, streamId))) {
                 // With HAL buffer management API, HAL sometimes will have to return buffers that
                 // has not got a output buffer handle filled yet. This is though illegal if HAL
                 // buffer management API is not being used.
@@ -885,22 +907,35 @@
         }
 
         const auto& it = outputSurfaces.find(streamId);
-        status_t res = OK;
 
         // Do not return the buffer if the buffer status is error, and the error
         // buffer strategy is CACHE.
         if (outputBuffers[i].status != CAMERA_BUFFER_STATUS_ERROR ||
                 errorBufStrategy != ERROR_BUF_CACHE) {
             if (it != outputSurfaces.end()) {
-                res = stream->returnBuffer(
+                returnableBuffers->emplace_back(stream,
                         outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
-                        it->second, inResultExtras.frameNumber, transform);
+                        it->second, resultExtras,
+                        transform, requested ? requestTimeNs : 0);
             } else {
-                res = stream->returnBuffer(
+                returnableBuffers->emplace_back(stream,
                         outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
-                        std::vector<size_t> (), inResultExtras.frameNumber, transform);
+                        std::vector<size_t> (), resultExtras,
+                        transform, requested ? requestTimeNs : 0 );
             }
         }
+    }
+}
+
+void finishReturningOutputBuffers(const std::vector<BufferToReturn> &returnableBuffers,
+        sp<NotificationListener> listener, SessionStatsBuilder& sessionStatsBuilder) {
+    for (auto& b : returnableBuffers) {
+        const int streamId = b.stream->getId();
+
+        status_t res = b.stream->returnBuffer(b.buffer, b.timestamp,
+                b.readoutTimestamp, b.timestampIncreasing,
+                b.surfaceIds, b.resultExtras.frameNumber, b.transform);
+
         // Note: stream may be deallocated at this point, if this buffer was
         // the last reference to it.
         bool dropped = false;
@@ -911,50 +946,55 @@
             ALOGE("Can't return buffer to its stream: %s (%d)", strerror(-res), res);
             dropped = true;
         } else {
-            if (outputBuffers[i].status == CAMERA_BUFFER_STATUS_ERROR || timestamp == 0) {
+            if (b.buffer.status == CAMERA_BUFFER_STATUS_ERROR || b.timestamp == 0) {
                 dropped = true;
             }
         }
-        if (requested) {
+        if (b.requestTimeNs > 0) {
             nsecs_t bufferTimeNs = systemTime();
-            int32_t captureLatencyMs = ns2ms(bufferTimeNs - requestTimeNs);
+            int32_t captureLatencyMs = ns2ms(bufferTimeNs - b.requestTimeNs);
             sessionStatsBuilder.incCounter(streamId, dropped, captureLatencyMs);
         }
 
         // Long processing consumers can cause returnBuffer timeout for shared stream
         // If that happens, cancel the buffer and send a buffer error to client
-        if (it != outputSurfaces.end() && res == TIMED_OUT &&
-                outputBuffers[i].status == CAMERA_BUFFER_STATUS_OK) {
+        if (b.surfaceIds.size() > 0 && res == TIMED_OUT &&
+                b.buffer.status == CAMERA_BUFFER_STATUS_OK) {
             // cancel the buffer
-            camera_stream_buffer_t sb = outputBuffers[i];
+            camera_stream_buffer_t sb = b.buffer;
             sb.status = CAMERA_BUFFER_STATUS_ERROR;
-            stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
-                    timestampIncreasing, std::vector<size_t> (),
-                    inResultExtras.frameNumber, transform);
+            b.stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
+                    b.timestampIncreasing, std::vector<size_t> (),
+                    b.resultExtras.frameNumber, b.transform);
 
             if (listener != nullptr) {
-                CaptureResultExtras extras = inResultExtras;
+                CaptureResultExtras extras = b.resultExtras;
                 extras.errorStreamId = streamId;
                 listener->notifyError(
                         hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
                         extras);
             }
         }
+
     }
 }
 
-void returnAndRemovePendingOutputBuffers(bool useHalBufManager,
+void collectAndRemovePendingOutputBuffers(bool useHalBufManager,
+        const std::set<int32_t> &halBufferManagedStreams,
         sp<NotificationListener> listener, InFlightRequest& request,
-        SessionStatsBuilder& sessionStatsBuilder) {
+        SessionStatsBuilder& sessionStatsBuilder,
+        std::vector<BufferToReturn> *returnableBuffers) {
     bool timestampIncreasing =
             !((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
     nsecs_t readoutTimestamp = request.resultExtras.hasReadoutTimestamp ?
             request.resultExtras.readoutTimestamp : 0;
-    returnOutputBuffers(useHalBufManager, listener,
+    collectReturnableOutputBuffers(useHalBufManager, halBufferManagedStreams, listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(),
             request.shutterTimestamp, readoutTimestamp,
-            /*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
+            /*requested*/true, request.requestTimeNs, sessionStatsBuilder,
+            /*out*/ returnableBuffers,
+            timestampIncreasing,
             request.outputSurfaces, request.resultExtras,
             request.errorBufStrategy, request.transform);
 
@@ -974,6 +1014,9 @@
     ATRACE_CALL();
     ssize_t idx;
 
+    std::vector<BufferToReturn> returnableBuffers{};
+    CaptureResultExtras pendingNotificationResultExtras{};
+
     // Set timestamp for the request in the in-flight tracking
     // and get the request ID to send upstream
     {
@@ -1040,9 +1083,13 @@
                             states.lastCompletedReprocessFrameNumber;
                     r.resultExtras.lastCompletedZslFrameNumber =
                             states.lastCompletedZslFrameNumber;
-                    states.listener->notifyShutter(r.resultExtras, msg.timestamp);
+                    if (flags::return_buffers_outside_locks()) {
+                        pendingNotificationResultExtras = r.resultExtras;
+                    } else {
+                        states.listener->notifyShutter(r.resultExtras, msg.timestamp);
+                    }
                 }
-                // send pending result and buffers
+                // send pending result and buffers; this queues them up for delivery later
                 const auto& cameraIdsWithZoom = getCameraIdsWithZoomLocked(
                         inflightMap, r.pendingMetadata, r.cameraIdsWithZoom);
                 sendCaptureResult(states,
@@ -1051,16 +1098,35 @@
                     r.hasInputBuffer, r.zslCapture && r.stillCapture,
                     r.rotateAndCropAuto, cameraIdsWithZoom, r.physicalMetadatas);
             }
-            returnAndRemovePendingOutputBuffers(
-                    states.useHalBufManager, states.listener, r, states.sessionStatsBuilder);
+            collectAndRemovePendingOutputBuffers(
+                    states.useHalBufManager, states.halBufManagedStreamIds,
+                    states.listener, r, states.sessionStatsBuilder, &returnableBuffers);
 
-            removeInFlightRequestIfReadyLocked(states, idx);
+            if (!flags::return_buffers_outside_locks()) {
+                finishReturningOutputBuffers(returnableBuffers,
+                        states.listener, states.sessionStatsBuilder);
+            }
+
+            removeInFlightRequestIfReadyLocked(states, idx, &returnableBuffers);
+
         }
     }
     if (idx < 0) {
         SET_ERR("Shutter notification for non-existent frame number %d",
                 msg.frame_number);
     }
+    // Call notifyShutter outside of in-flight mutex
+    if (flags::return_buffers_outside_locks() && pendingNotificationResultExtras.isValid()) {
+        states.listener->notifyShutter(pendingNotificationResultExtras, msg.timestamp);
+    }
+
+    // With no locks held, finish returning buffers to streams, which may take a while since
+    // binder calls are involved
+    if (flags::return_buffers_outside_locks()) {
+        finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+    }
+
 }
 
 void notifyError(CaptureOutputStates& states, const camera_error_msg_t &msg) {
@@ -1106,6 +1172,8 @@
             break;
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+        {
+            std::vector<BufferToReturn> returnableBuffers{};
             {
                 std::lock_guard<std::mutex> l(states.inflightLock);
                 ssize_t idx = states.inflightMap.indexOfKey(msg.frame_number);
@@ -1142,7 +1210,12 @@
 
                         // Check whether the buffers returned. If they returned,
                         // remove inflight request.
-                        removeInFlightRequestIfReadyLocked(states, idx);
+                        removeInFlightRequestIfReadyLocked(states, idx, &returnableBuffers);
+                        if (!flags::return_buffers_outside_locks()) {
+                            finishReturningOutputBuffers(returnableBuffers,
+                                    states.listener, states.sessionStatsBuilder);
+                        }
+
                     }
                 } else {
                     resultExtras.frameNumber = msg.frame_number;
@@ -1151,6 +1224,12 @@
                             resultExtras.frameNumber);
                 }
             }
+
+            if (flags::return_buffers_outside_locks()) {
+                finishReturningOutputBuffers(returnableBuffers,
+                        states.listener, states.sessionStatsBuilder);
+            }
+
             resultExtras.errorStreamId = streamId;
             if (states.listener != nullptr) {
                 states.listener->notifyError(errorCode, resultExtras);
@@ -1159,6 +1238,7 @@
                         states.cameraId.c_str(), __FUNCTION__);
             }
             break;
+        }
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
             // Do not depend on HAL ERROR_CAMERA_BUFFER to send buffer error
             // callback to the app. Rather, use STATUS_ERROR of image buffers.
@@ -1188,17 +1268,24 @@
 
 void flushInflightRequests(FlushInflightReqStates& states) {
     ATRACE_CALL();
+    std::vector<BufferToReturn> returnableBuffers{};
     { // First return buffers cached in inFlightMap
         std::lock_guard<std::mutex> l(states.inflightLock);
         for (size_t idx = 0; idx < states.inflightMap.size(); idx++) {
             const InFlightRequest &request = states.inflightMap.valueAt(idx);
-            returnOutputBuffers(
-                states.useHalBufManager, states.listener,
+            collectReturnableOutputBuffers(
+                states.useHalBufManager, states.halBufManagedStreamIds,
+                states.listener,
                 request.pendingOutputBuffers.array(),
                 request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
                 /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+                /*out*/ &returnableBuffers,
                 /*timestampIncreasing*/true, request.outputSurfaces, request.resultExtras,
                 request.errorBufStrategy);
+            if (!flags::return_buffers_outside_locks()) {
+                finishReturningOutputBuffers(returnableBuffers,
+                        states.listener, states.sessionStatsBuilder);
+            }
             ALOGW("%s: Frame %d |  Timestamp: %" PRId64 ", metadata"
                     " arrived: %s, buffers left: %d.\n", __FUNCTION__,
                     states.inflightMap.keyAt(idx), request.shutterTimestamp,
@@ -1209,6 +1296,10 @@
         states.inflightMap.clear();
         states.inflightIntf.onInflightMapFlushedLocked();
     }
+    if (flags::return_buffers_outside_locks()) {
+        finishReturningOutputBuffers(returnableBuffers,
+                states.listener, states.sessionStatsBuilder);
+    }
 
     // Then return all inflight buffers not returned by HAL
     std::vector<std::pair<int32_t, int32_t>> inflightKeys;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 134c037..75864d7 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -44,15 +44,50 @@
      * Helper methods shared between Camera3Device/Camera3OfflineSession for HAL callbacks
      */
 
-    // helper function to return the output buffers to output streams. The
-    // function also optionally calls notify(ERROR_BUFFER).
-    void returnOutputBuffers(
+    struct BufferToReturn {
+        Camera3StreamInterface *stream;
+        camera_stream_buffer_t buffer;
+        nsecs_t timestamp;
+        nsecs_t readoutTimestamp;
+        bool timestampIncreasing;
+        std::vector<size_t> surfaceIds;
+        const CaptureResultExtras resultExtras;
+        int32_t transform;
+        nsecs_t requestTimeNs;
+
+        BufferToReturn(Camera3StreamInterface *stream,
+                camera_stream_buffer_t buffer,
+                nsecs_t timestamp, nsecs_t readoutTimestamp,
+                bool timestampIncreasing, std::vector<size_t> surfaceIds,
+                const CaptureResultExtras &resultExtras,
+                int32_t transform, nsecs_t requestTimeNs):
+            stream(stream),
+            buffer(buffer),
+            timestamp(timestamp),
+            readoutTimestamp(readoutTimestamp),
+            timestampIncreasing(timestampIncreasing),
+            surfaceIds(surfaceIds),
+            resultExtras(resultExtras),
+            transform(transform),
+            requestTimeNs(requestTimeNs) {}
+    };
+
+    // helper function to return the output buffers to output
+    // streams. The function also optionally calls
+    // notify(ERROR_BUFFER).  Returns the list of buffers to hand back
+    // to streams in returnableBuffers.  Does not make any two-way
+    // binder calls, so suitable for use when critical locks are being
+    // held
+    void collectReturnableOutputBuffers(
             bool useHalBufManager,
+            const std::set<int32_t> &halBufferManagedStreams,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
             const camera_stream_buffer_t *outputBuffers,
             size_t numBuffers, nsecs_t timestamp,
             nsecs_t readoutTimestamp, bool requested, nsecs_t requestTimeNs,
-            SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing = true,
+            SessionStatsBuilder& sessionStatsBuilder,
+            /*out*/ std::vector<BufferToReturn> *returnableBuffers,
+            bool timestampIncreasing = true,
             // The following arguments are only meant for surface sharing use case
             const SurfaceMap& outputSurfaces = SurfaceMap{},
             // Used to send buffer error callback when failing to return buffer
@@ -60,13 +95,24 @@
             ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN,
             int32_t transform = -1);
 
-    // helper function to return the output buffers to output streams, and
-    // remove the returned buffers from the inflight request's pending buffers
-    // vector.
-    void returnAndRemovePendingOutputBuffers(
+    // helper function to collect the output buffers ready to be
+    // returned to output streams, and to remove these buffers from
+    // the inflight request's pending buffers vector.  Does not make
+    // any two-way binder calls, so suitable for use when critical
+    // locks are being held
+    void collectAndRemovePendingOutputBuffers(
             bool useHalBufManager,
+            const std::set<int32_t> &halBufferManagedStreams,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
-            InFlightRequest& request, SessionStatsBuilder& sessionStatsBuilder);
+            InFlightRequest& request, SessionStatsBuilder& sessionStatsBuilder,
+            /*out*/ std::vector<BufferToReturn> *returnableBuffers);
+
+    // Actually return filled output buffers to the consumer to use, using the list
+    // provided by collectReturnableOutputBuffers / collectAndRemovePendingOutputBuffers
+    // Makes two-way binder calls to applications, so do not hold any critical locks when
+    // calling.
+    void finishReturningOutputBuffers(const std::vector<BufferToReturn> &returnableBuffers,
+            sp<NotificationListener> listener, SessionStatsBuilder& sessionStatsBuilder);
 
     // Camera3Device/Camera3OfflineSession internal states used in notify/processCaptureResult
     // callbacks
@@ -87,6 +133,7 @@
         uint32_t& nextReprocResultFrameNum;
         uint32_t& nextZslResultFrameNum; // end of outputLock scope
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         const bool usePartialResult;
         const bool needFixupMonoChrome;
         const uint32_t numPartialResults;
@@ -118,6 +165,7 @@
         const std::string& cameraId;
         std::mutex& reqBufferLock; // lock to serialize request buffer calls
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         StreamSet& outputStreams;
         SessionStatsBuilder& sessionStatsBuilder;
         SetErrorInterface& setErrIntf;
@@ -128,6 +176,7 @@
     struct ReturnBufferStates {
         const std::string& cameraId;
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         StreamSet& outputStreams;
         SessionStatsBuilder& sessionStatsBuilder;
         BufferRecordsInterface& bufferRecordsIntf;
@@ -138,6 +187,7 @@
         std::mutex& inflightLock;
         InFlightRequestMap& inflightMap; // end of inflightLock scope
         const bool useHalBufManager;
+        const std::set<int32_t > &halBufManagedStreamIds;
         sp<NotificationListener> listener;
         InflightRequestUpdateInterface& inflightIntf;
         BufferRecordsInterface& bufferRecordsIntf;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
index 3ac666b..aca7a67 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtilsTemplated.h
@@ -32,13 +32,17 @@
 
 #include <camera/CameraUtils.h>
 #include <camera_metadata_hidden.h>
+#include <com_android_internal_camera_flags.h>
 
 #include "device3/Camera3OutputUtils.h"
+#include "utils/SessionConfigurationUtils.h"
 
 #include "system/camera_metadata.h"
 
 using namespace android::camera3;
+using namespace android::camera3::SessionConfigurationUtils;
 using namespace android::hardware::camera;
+namespace flags = com::android::internal::camera::flags;
 
 namespace android {
 namespace camera3 {
@@ -207,7 +211,9 @@
 
         bool noBufferReturned = false;
         buffer_handle_t *buffer = nullptr;
-        if (states.useHalBufManager) {
+        if (states.useHalBufManager ||
+                (flags::session_hal_buf_manager() &&
+                        contains(states.halBufManagedStreamIds, bSrc.streamId))) {
             // This is suspicious most of the time but can be correct during flush where HAL
             // has to return capture result before a buffer is requested
             if (bSrc.bufferId == BUFFER_ID_NO_BUFFER) {
@@ -294,13 +300,15 @@
 template <class VecStreamBufferType>
 void returnStreamBuffersT(ReturnBufferStates& states,
         const VecStreamBufferType& buffers) {
-    if (!states.useHalBufManager) {
-        ALOGE("%s: Camera %s does not support HAL buffer managerment",
-                __FUNCTION__, states.cameraId.c_str());
-        return;
-    }
 
     for (const auto& buf : buffers) {
+        if (!states.useHalBufManager &&
+            !(flags::session_hal_buf_manager() &&
+             contains(states.halBufManagedStreamIds, buf.streamId))) {
+            ALOGE("%s: Camera %s does not support HAL buffer management for stream id %d",
+                  __FUNCTION__, states.cameraId.c_str(), buf.streamId);
+            return;
+        }
         if (buf.bufferId == BUFFER_ID_NO_BUFFER) {
             ALOGE("%s: cannot return a buffer without bufferId", __FUNCTION__);
             continue;
@@ -337,9 +345,15 @@
             continue;
         }
         streamBuffer.stream = stream->asHalStream();
-        returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*readoutTimestamp*/0,
-                /*requested*/false, /*requestTimeNs*/0, states.sessionStatsBuilder);
+        std::vector<BufferToReturn> returnableBuffers{};
+        collectReturnableOutputBuffers(states.useHalBufManager, states.halBufManagedStreamIds,
+                /*listener*/nullptr, &streamBuffer, /*size*/1, /*timestamp*/ 0,
+                /*readoutTimestamp*/0, /*requested*/false, /*requestTimeNs*/0,
+                states.sessionStatsBuilder,
+                /*out*/&returnableBuffers);
+        finishReturningOutputBuffers(returnableBuffers, /*listener*/ nullptr,
+                states.sessionStatsBuilder);
+
     }
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 701c472..79a767a 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -969,11 +969,6 @@
     return INVALID_OPERATION;
 }
 
-status_t Camera3Stream::getBuffersLocked(std::vector<OutstandingBuffer>*) {
-    ALOGE("%s: This type of stream does not support output", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
 status_t Camera3Stream::returnBufferLocked(const camera_stream_buffer &,
                                            nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ALOGE("%s: This type of stream does not support output", __FUNCTION__);
@@ -1047,92 +1042,6 @@
     mBufferFreedListener = listener;
 }
 
-status_t Camera3Stream::getBuffers(std::vector<OutstandingBuffer>* buffers,
-        nsecs_t waitBufferTimeout) {
-    ATRACE_CALL();
-    Mutex::Autolock l(mLock);
-    status_t res = OK;
-
-    if (buffers == nullptr) {
-        ALOGI("%s: buffers must not be null!", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    size_t numBuffersRequested = buffers->size();
-    if (numBuffersRequested == 0) {
-        ALOGE("%s: 0 buffers are requested!", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    // This function should be only called when the stream is configured already.
-    if (mState != STATE_CONFIGURED) {
-        ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
-                __FUNCTION__, mId, mState);
-        if (mState == STATE_ABANDONED) {
-            return DEAD_OBJECT;
-        } else {
-            return INVALID_OPERATION;
-        }
-    }
-
-    size_t numOutstandingBuffers = getHandoutOutputBufferCountLocked();
-    size_t numCachedBuffers = getCachedOutputBufferCountLocked();
-    size_t maxNumCachedBuffers = getMaxCachedOutputBuffersLocked();
-    // Wait for new buffer returned back if we are running into the limit. There
-    // are 2 limits:
-    // 1. The number of HAL buffers is greater than max_buffers
-    // 2. The number of HAL buffers + cached buffers is greater than max_buffers
-    //    + maxCachedBuffers
-    while (numOutstandingBuffers + numBuffersRequested > camera_stream::max_buffers ||
-            numOutstandingBuffers + numCachedBuffers + numBuffersRequested >
-            camera_stream::max_buffers + maxNumCachedBuffers) {
-        ALOGV("%s: Already dequeued %zu(+%zu) output buffers and requesting %zu "
-                "(max is %d(+%zu)), waiting.", __FUNCTION__, numOutstandingBuffers,
-                numCachedBuffers, numBuffersRequested, camera_stream::max_buffers,
-                maxNumCachedBuffers);
-        nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
-        if (waitBufferTimeout < kWaitForBufferDuration) {
-            waitBufferTimeout = kWaitForBufferDuration;
-        }
-        res = mOutputBufferReturnedSignal.waitRelative(mLock, waitBufferTimeout);
-        nsecs_t waitEnd = systemTime(SYSTEM_TIME_MONOTONIC);
-        mBufferLimitLatency.add(waitStart, waitEnd);
-        if (res != OK) {
-            if (res == TIMED_OUT) {
-                ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
-                        __FUNCTION__, waitBufferTimeout / 1000000LL,
-                        camera_stream::max_buffers);
-            }
-            return res;
-        }
-        size_t updatedNumOutstandingBuffers = getHandoutOutputBufferCountLocked();
-        size_t updatedNumCachedBuffers = getCachedOutputBufferCountLocked();
-        if (updatedNumOutstandingBuffers >= numOutstandingBuffers &&
-                updatedNumCachedBuffers == numCachedBuffers) {
-            ALOGE("%s: outstanding buffer count goes from %zu to %zu, "
-                    "getBuffer(s) call must not run in parallel!", __FUNCTION__,
-                    numOutstandingBuffers, updatedNumOutstandingBuffers);
-            return INVALID_OPERATION;
-        }
-        numOutstandingBuffers = updatedNumOutstandingBuffers;
-        numCachedBuffers = updatedNumCachedBuffers;
-    }
-
-    res = getBuffersLocked(buffers);
-    if (res == OK) {
-        for (auto& outstandingBuffer : *buffers) {
-            camera_stream_buffer* buffer = outstandingBuffer.outBuffer;
-            fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
-            if (buffer->buffer) {
-                Mutex::Autolock l(mOutstandingBuffersLock);
-                mOutstandingBuffers.push_back(*buffer->buffer);
-            }
-        }
-    }
-
-    return res;
-}
-
 void Camera3Stream::queueHDRMetadata(buffer_handle_t buffer, sp<ANativeWindow>& anw,
         int64_t dynamicRangeProfile) {
     auto& mapper = GraphicBufferMapper::get();
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index f06ccf3..0df09cd 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -343,12 +343,6 @@
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
 
     /**
-     * Similar to getBuffer() except this method fills multiple buffers.
-     */
-    status_t         getBuffers(std::vector<OutstandingBuffer>* buffers,
-            nsecs_t waitBufferTimeout);
-
-    /**
      * Return a buffer to the stream after use by the HAL.
      *
      * Multiple surfaces could share the same HAL stream, but a request may
@@ -535,8 +529,6 @@
             nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
 
-    virtual status_t getBuffersLocked(std::vector<OutstandingBuffer>*);
-
     virtual status_t getInputBufferLocked(camera_stream_buffer *buffer, Size* size);
 
     virtual status_t returnInputBufferLocked(
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 7fa6273..26fa04f 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -395,11 +395,6 @@
          */
         std::vector<size_t> surface_ids;
     };
-    /**
-     * Similar to getBuffer() except this method fills multiple buffers.
-     */
-    virtual status_t getBuffers(std::vector<OutstandingBuffer>* buffers,
-            nsecs_t waitBufferTimeout) = 0;
 
     /**
      * Return a buffer to the stream after use by the HAL.
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index a7bd312..3626f20 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -35,6 +35,7 @@
     uint32_t operation_mode;
     bool input_is_multi_resolution;
     bool use_hal_buf_manager = false;
+    std::set<int32_t> hal_buffer_managed_streams;
 } camera_stream_configuration_t;
 
 typedef struct camera_capture_request {
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 97475f0..e8ef692 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -59,6 +59,7 @@
 #include <com_android_internal_camera_flags.h>
 
 #include "utils/CameraTraces.h"
+#include "utils/SessionConfigurationUtils.h"
 #include "mediautils/SchedulingPolicyService.h"
 #include "device3/Camera3OutputStream.h"
 #include "device3/Camera3InputStream.h"
@@ -79,6 +80,7 @@
 #include "AidlCamera3Device.h"
 
 using namespace android::camera3;
+using namespace android::camera3::SessionConfigurationUtils;
 using namespace aidl::android::hardware;
 using aidl::android::hardware::camera::metadata::SensorPixelMode;
 using aidl::android::hardware::camera::metadata::RequestAvailableDynamicRangeProfilesMap;
@@ -337,6 +339,16 @@
 
     mBatchSizeLimitEnabled = (deviceVersion >= CAMERA_DEVICE_API_VERSION_1_2);
 
+    camera_metadata_entry readoutSupported = mDeviceInfo.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
+    if (readoutSupported.count == 0) {
+        ALOGW("%s: Could not find value corresponding to ANDROID_SENSOR_READOUT_TIMESTAMP. "
+              "Assuming true.", __FUNCTION__);
+        mSensorReadoutTimestampSupported = true;
+    } else {
+        mSensorReadoutTimestampSupported =
+                readoutSupported.data.u8[0] == ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
+    }
+
     return initializeCommonLocked();
 }
 
@@ -400,7 +412,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -442,7 +454,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -450,7 +462,7 @@
         mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
-        camera3::notify(states, msg);
+        camera3::notify(states, msg, mSensorReadoutTimestampSupported);
     }
     return ::ndk::ScopedAStatus::ok();
 
@@ -531,7 +543,7 @@
         }
 
         // When not using HAL buf manager, only allow streams requested by app to be preserved
-        if (!mUseHalBufManager) {
+        if (!isHalBufferManagedStream(id)) {
             if (std::find(streamsToKeep.begin(), streamsToKeep.end(), id) == streamsToKeep.end()) {
                 SET_ERR("stream ID %d must not be switched to offline!", id);
                 return UNKNOWN_ERROR;
@@ -611,17 +623,18 @@
     // TODO: check if we need to lock before copying states
     //       though technically no other thread should be talking to Camera3Device at this point
     Camera3OfflineStates offlineStates(
-            mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
-            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
-            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
-            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
-            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
-            mZoomRatioMappers, mRotateAndCropMappers);
+            mTagMonitor, mVendorTagId, mUseHalBufManager, mHalBufManagedStreamIds,
+            mNeedFixupMonochromeTags, mUsePartialResult, mNumPartialResults,
+            mLastCompletedRegularFrameNumber, mLastCompletedReprocessFrameNumber,
+            mLastCompletedZslFrameNumber, mNextResultFrameNumber,
+            mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+            mNextShutterFrameNumber, mNextReprocessShutterFrameNumber,
+            mNextZslStillShutterFrameNumber, mDeviceInfo, mPhysicalDeviceInfoMap,
+            mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers);
 
     *session = new AidlCamera3OfflineSession(mId, inputStream, offlineStreamSet,
-            std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
+                                             std::move(bufferRecords), offlineReqs, offlineStates,
+                                             offlineSession, mSensorReadoutTimestampSupported);
 
     // Delete all streams that has been transferred to offline session
     Mutex::Autolock l(mLock);
@@ -688,8 +701,8 @@
         aidl::android::hardware::camera::device::BufferRequestStatus* status) {
 
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams,
-        mSessionStatsBuilder, *this, *(mInterface), *this};
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mHalBufManagedStreamIds,
+        mOutputStreams, mSessionStatsBuilder, *this, *(mInterface), *this};
     camera3::requestStreamBuffers(states, bufReqs, outBuffers, status);
     return ::ndk::ScopedAStatus::ok();
 }
@@ -713,7 +726,7 @@
 ::ndk::ScopedAStatus AidlCamera3Device::returnStreamBuffers(
         const std::vector<camera::device::StreamBuffer>& buffers) {
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams,  mSessionStatsBuilder,
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams,  mSessionStatsBuilder,
         *(mInterface)};
     camera3::returnStreamBuffers(states, buffers);
     return ::ndk::ScopedAStatus::ok();
@@ -905,6 +918,12 @@
         camera3::camera_stream_t *src = config->streams[i];
 
         Camera3Stream* cam3stream = Camera3Stream::cast(src);
+        // For stream configurations with multi res streams, hal buffer manager has to be used.
+        if (!flags::session_hal_buf_manager() && cam3stream->getHalStreamGroupId() != -1 &&
+                src->stream_type != CAMERA_STREAM_INPUT) {
+            mUseHalBufManager = true;
+            config->use_hal_buf_manager = true;
+        }
         cam3stream->setBufferFreedListener(this);
         int streamId = cam3stream->getId();
         StreamType streamType;
@@ -975,31 +994,38 @@
     requestedConfiguration.multiResolutionInputImage = config->input_is_multi_resolution;
     requestedConfiguration.logId = logId;
     ndk::ScopedAStatus err = ndk::ScopedAStatus::ok();
+    int32_t interfaceVersion = 0;
     camera::device::ConfigureStreamsRet configureStreamsRet;
-    if (flags::session_hal_buf_manager()) {
-        int32_t interfaceVersion = 0;
-        err = mAidlSession->getInterfaceVersion(&interfaceVersion);
-        if (!err.isOk()) {
-            ALOGE("%s: Transaction error getting interface version: %s", __FUNCTION__,
-                    err.getMessage());
-            return AidlProviderInfo::mapToStatusT(err);
-        }
-        if (interfaceVersion >= AIDL_DEVICE_SESSION_V3 && mSupportSessionHalBufManager) {
-            err = mAidlSession->configureStreamsV2(requestedConfiguration, &configureStreamsRet);
-            finalConfiguration = std::move(configureStreamsRet.halStreams);
-        } else {
-            err = mAidlSession->configureStreams(requestedConfiguration, &finalConfiguration);
-        }
+    err = mAidlSession->getInterfaceVersion(&interfaceVersion);
+    if (!err.isOk()) {
+        ALOGE("%s: Transaction error getting interface version: %s", __FUNCTION__,
+              err.getMessage());
+        return AidlProviderInfo::mapToStatusT(err);
+    }
+    if (flags::session_hal_buf_manager() && interfaceVersion >= AIDL_DEVICE_SESSION_V3
+            && mSupportSessionHalBufManager) {
+        err = mAidlSession->configureStreamsV2(requestedConfiguration, &configureStreamsRet);
+        finalConfiguration = std::move(configureStreamsRet.halStreams);
     } else {
         err = mAidlSession->configureStreams(requestedConfiguration, &finalConfiguration);
     }
+
     if (!err.isOk()) {
         ALOGE("%s: Transaction error: %s", __FUNCTION__, err.getMessage());
         return AidlProviderInfo::mapToStatusT(err);
     }
-    if (flags::session_hal_buf_manager() && mSupportSessionHalBufManager) {
-        mUseHalBufManager = configureStreamsRet.enableHalBufferManager;
-        config->use_hal_buf_manager = configureStreamsRet.enableHalBufferManager;
+
+    if (flags::session_hal_buf_manager()) {
+        std::set<int32_t> halBufferManagedStreamIds;
+        for (const auto &halStream: finalConfiguration) {
+            if ((interfaceVersion >= AIDL_DEVICE_SESSION_V3 &&
+                    mSupportSessionHalBufManager && halStream.enableHalBufferManager)
+                    || mUseHalBufManager) {
+                halBufferManagedStreamIds.insert(halStream.id);
+            }
+        }
+        mHalBufManagedStreamIds = std::move(halBufferManagedStreamIds);
+        config->hal_buffer_managed_streams = mHalBufManagedStreamIds;
     }
     // And convert output stream configuration from AIDL
     for (size_t i = 0; i < config->num_streams; i++) {
@@ -1070,9 +1096,9 @@
             }
             dstStream->setUsage(
                     mapProducerToFrameworkUsage(src.producerUsage));
-
             if (flags::session_hal_buf_manager()) {
-                dstStream->setHalBufferManager(mUseHalBufManager);
+                dstStream->setHalBufferManager(
+                        contains(config->hal_buffer_managed_streams, streamId));
             }
         }
         dst->max_buffers = src.maxBuffers;
@@ -1396,7 +1422,7 @@
                     handlesCreated->push_back(acquireFence);
                 }
                 dst.acquireFence = camera3::dupToAidlIfNotNull(acquireFence);
-            } else if (mUseHalBufManager) {
+            } else if (isHalBufferManagedStream(streamId)) {
                 // HAL buffer management path
                 dst.bufferId = BUFFER_ID_NO_BUFFER;
                 dst.buffer = aidl::android::hardware::common::NativeHandle();
@@ -1410,7 +1436,7 @@
             dst.releaseFence = aidl::android::hardware::common::NativeHandle();
 
             // Output buffers are empty when using HAL buffer manager
-            if (!mUseHalBufManager) {
+            if (!isHalBufferManagedStream(streamId)) {
                 mBufferRecords.pushInflightBuffer(
                         captureRequest->frameNumber, streamId, src->buffer);
                 inflightBuffers->push_back(std::make_pair(captureRequest->frameNumber, streamId));
@@ -1456,8 +1482,9 @@
                 bool supportCameraMute,
                 bool overrideToPortrait,
                 bool supportSettingsOverride) :
-          RequestThread(parent, statusTracker, interface, sessionParamKeys, useHalBufManager,
-                  supportCameraMute, overrideToPortrait, supportSettingsOverride) {}
+          RequestThread(parent, statusTracker, interface, sessionParamKeys,
+                  useHalBufManager, supportCameraMute, overrideToPortrait,
+                  supportSettingsOverride) {}
 
 status_t AidlCamera3Device::AidlRequestThread::switchToOffline(
         const std::vector<int32_t>& streamsToKeep,
@@ -1690,7 +1717,8 @@
                 bool overrideToPortrait,
                 bool supportSettingsOverride) {
     return new AidlRequestThread(parent, statusTracker, interface, sessionParamKeys,
-            useHalBufManager, supportCameraMute, overrideToPortrait, supportSettingsOverride);
+            useHalBufManager, supportCameraMute, overrideToPortrait,
+            supportSettingsOverride);
 };
 
 sp<Camera3Device::Camera3DeviceInjectionMethods>
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
index 90e2f97..f0a5f7e 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
@@ -289,6 +289,9 @@
     // capture requests.
     bool mBatchSizeLimitEnabled = false;
 
+    // Whether the HAL supports reporting sensor readout timestamp
+    bool mSensorReadoutTimestampSupported = true;
+
 }; // class AidlCamera3Device
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
index 01c4e88..f8308df 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
@@ -122,7 +122,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -169,7 +169,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
@@ -177,7 +177,7 @@
         /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
-        camera3::notify(states, msg);
+        camera3::notify(states, msg, mSensorReadoutTimestampSupported);
     }
     return ::ndk::ScopedAStatus::ok();
 }
@@ -208,7 +208,8 @@
     }
 
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager,
+        mHalBufManagedStreamIds, mOutputStreams, mSessionStatsBuilder,
         *this, mBufferRecords, *this};
     camera3::requestStreamBuffers(states, bufReqs, buffers, status);
     return ::ndk::ScopedAStatus::ok();
@@ -241,7 +242,7 @@
     }
 
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams, mSessionStatsBuilder,
         mBufferRecords};
 
     camera3::returnStreamBuffers(states, buffers);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
index 33b638c..f8fdeb9 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
@@ -105,19 +105,20 @@
     };
 
     // initialize by Camera3Device.
-    explicit AidlCamera3OfflineSession(const std::string& id,
-            const sp<camera3::Camera3Stream>& inputStream,
-            const camera3::StreamSet& offlineStreamSet,
-            camera3::BufferRecords&& bufferRecords,
+    explicit AidlCamera3OfflineSession(
+            const std::string& id, const sp<camera3::Camera3Stream>& inputStream,
+            const camera3::StreamSet& offlineStreamSet, camera3::BufferRecords&& bufferRecords,
             const camera3::InFlightRequestMap& offlineReqs,
             const Camera3OfflineStates& offlineStates,
             std::shared_ptr<aidl::android::hardware::camera::device::ICameraOfflineSession>
-                    offlineSession) :
-      Camera3OfflineSession(id, inputStream, offlineStreamSet, std::move(bufferRecords),
-              offlineReqs, offlineStates),
-      mSession(offlineSession) {
-        mCallbacks = ndk::SharedRefBase::make<AidlCameraDeviceCallbacks>(this);
-      };
+                    offlineSession,
+            bool sensorReadoutTimestampSupported)
+        : Camera3OfflineSession(id, inputStream, offlineStreamSet, std::move(bufferRecords),
+                                offlineReqs, offlineStates),
+          mSession(offlineSession),
+          mSensorReadoutTimestampSupported(sensorReadoutTimestampSupported) {
+            mCallbacks = ndk::SharedRefBase::make<AidlCameraDeviceCallbacks>(this);
+    };
 
     /**
      * End of CameraOfflineSessionBase interface
@@ -130,6 +131,8 @@
 
     std::shared_ptr<AidlCameraDeviceCallbacks> mCallbacks;
 
+    bool mSensorReadoutTimestampSupported;
+
     virtual void closeSessionLocked() override;
 
     virtual void releaseSessionLocked() override;
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
index 74d4230..d9c8e57 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
@@ -67,7 +67,8 @@
 }
 
 void notify(CaptureOutputStates& states,
-        const aidl::android::hardware::camera::device::NotifyMsg& msg) {
+            const aidl::android::hardware::camera::device::NotifyMsg& msg,
+            bool hasReadoutTimestamp) {
 
     using ErrorCode = aidl::android::hardware::camera::device::ErrorCode;
     using Tag = aidl::android::hardware::camera::device::NotifyMsg::Tag;
@@ -110,8 +111,9 @@
             m.type = CAMERA_MSG_SHUTTER;
             m.message.shutter.frame_number = msg.get<Tag::shutter>().frameNumber;
             m.message.shutter.timestamp = msg.get<Tag::shutter>().timestamp;
-            m.message.shutter.readout_timestamp_valid = true;
-            m.message.shutter.readout_timestamp = msg.get<Tag::shutter>().readoutTimestamp;
+            m.message.shutter.readout_timestamp_valid = hasReadoutTimestamp;
+            m.message.shutter.readout_timestamp =
+                    hasReadoutTimestamp ? msg.get<Tag::shutter>().readoutTimestamp : 0LL;
             break;
     }
     notify(states, &m);
@@ -143,12 +145,6 @@
     std::lock_guard<std::mutex> lock(states.reqBufferLock);
     std::vector<StreamBufferRet> bufRets;
     outBuffers->clear();
-    if (!states.useHalBufManager) {
-        ALOGE("%s: Camera %s does not support HAL buffer management",
-                __FUNCTION__, states.cameraId.c_str());
-        *status = BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS;
-        return;
-    }
 
     SortedVector<int32_t> streamIds;
     ssize_t sz = streamIds.setCapacity(bufReqs.size());
@@ -174,6 +170,13 @@
             *status = BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS;
             return;
         }
+        if (!states.useHalBufManager &&
+                !contains(states.halBufManagedStreamIds, bufReq.streamId)) {
+            ALOGE("%s: Camera %s does not support HAL buffer management for stream id %d",
+                  __FUNCTION__, states.cameraId.c_str(), bufReq.streamId);
+            *status = BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS;
+            return;
+        }
         streamIds.add(bufReq.streamId);
     }
 
@@ -316,10 +319,15 @@
                 sb.acquire_fence = -1;
                 sb.status = CAMERA_BUFFER_STATUS_ERROR;
             }
-            returnOutputBuffers(states.useHalBufManager, nullptr,
-                    streamBuffers.data(), numAllocatedBuffers, 0,
-                    0, false,
-                    0, states.sessionStatsBuilder);
+            std::vector<BufferToReturn> returnableBuffers{};
+            collectReturnableOutputBuffers(states.useHalBufManager, states.halBufManagedStreamIds,
+                    /*listener*/ nullptr,
+                    streamBuffers.data(), numAllocatedBuffers, /*timestamp*/ 0,
+                    /*readoutTimestamp*/ 0, /*requested*/ false,
+                    /*requestTimeNs*/ 0, states.sessionStatsBuilder,
+                    /*out*/ &returnableBuffers);
+            finishReturningOutputBuffers(returnableBuffers, /*listener*/ nullptr,
+                    states.sessionStatsBuilder);
             for (auto buf : newBuffers) {
                 states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
             }
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h
index e795624..d3a8ede 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.h
@@ -79,11 +79,8 @@
                     &physicalCameraMetadata);
 
     void notify(CaptureOutputStates& states,
-        const aidl::android::hardware::camera::device::NotifyMsg& msg,
-        bool hasReadoutTime, uint64_t readoutTime);
-
-    void notify(CaptureOutputStates& states,
-        const aidl::android::hardware::camera::device::NotifyMsg& msg);
+            const aidl::android::hardware::camera::device::NotifyMsg& msg,
+            bool hasReadoutTimestamp);
 
     void requestStreamBuffers(RequestBufferStates& states,
         const std::vector<aidl::android::hardware::camera::device::BufferRequest>& bufReqs,
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 4488067..f2e618f 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -47,6 +47,7 @@
 #include <utils/Timers.h>
 #include <cutils/properties.h>
 #include <camera/StringUtils.h>
+#include <com_android_internal_camera_flags.h>
 
 #include <android/hardware/camera/device/3.7/ICameraInjectionSession.h>
 #include <android/hardware/camera2/ICameraDeviceUser.h>
@@ -66,6 +67,7 @@
 using namespace android::hardware::camera;
 using namespace android::hardware::camera::device::V3_2;
 using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
+namespace flags = com::android::internal::camera::flags;
 
 namespace android {
 
@@ -307,7 +309,8 @@
         const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
         requestStreamBuffers_cb _hidl_cb) {
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mHalBufManagedStreamIds,
+        mOutputStreams, mSessionStatsBuilder,
         *this, *mInterface, *this};
     camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
     return hardware::Void();
@@ -316,7 +319,8 @@
 hardware::Return<void> HidlCamera3Device::returnStreamBuffers(
         const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, *mInterface};
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams,
+        mSessionStatsBuilder, *mInterface};
     camera3::returnStreamBuffers(states, buffers);
     return hardware::Void();
 }
@@ -362,7 +366,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -425,7 +429,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -473,7 +477,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -641,14 +645,14 @@
     // TODO: check if we need to lock before copying states
     //       though technically no other thread should be talking to Camera3Device at this point
     Camera3OfflineStates offlineStates(
-            mTagMonitor, mVendorTagId, mUseHalBufManager, mNeedFixupMonochromeTags,
-            mUsePartialResult, mNumPartialResults, mLastCompletedRegularFrameNumber,
-            mLastCompletedReprocessFrameNumber, mLastCompletedZslFrameNumber,
-            mNextResultFrameNumber, mNextReprocessResultFrameNumber,
-            mNextZslStillResultFrameNumber, mNextShutterFrameNumber,
-            mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
-            mDeviceInfo, mPhysicalDeviceInfoMap, mDistortionMappers,
-            mZoomRatioMappers, mRotateAndCropMappers);
+            mTagMonitor, mVendorTagId, mUseHalBufManager, mHalBufManagedStreamIds,
+            mNeedFixupMonochromeTags, mUsePartialResult, mNumPartialResults,
+            mLastCompletedRegularFrameNumber, mLastCompletedReprocessFrameNumber,
+            mLastCompletedZslFrameNumber, mNextResultFrameNumber,
+            mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
+            mNextShutterFrameNumber, mNextReprocessShutterFrameNumber,
+            mNextZslStillShutterFrameNumber, mDeviceInfo, mPhysicalDeviceInfoMap,
+            mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers);
 
     *session = new HidlCamera3OfflineSession(mId, inputStream, offlineStreamSet,
             std::move(bufferRecords), offlineReqs, offlineStates, offlineSession);
@@ -716,7 +720,8 @@
                 bool overrideToPortrait,
                 bool supportSettingsOverride) {
         return new HidlRequestThread(parent, statusTracker, interface, sessionParamKeys,
-                useHalBufManager, supportCameraMute, overrideToPortrait, supportSettingsOverride);
+                useHalBufManager, supportCameraMute, overrideToPortrait,
+                supportSettingsOverride);
 };
 
 sp<Camera3Device::Camera3DeviceInjectionMethods>
@@ -909,6 +914,7 @@
     requestedConfiguration3_2.streams.resize(config->num_streams);
     requestedConfiguration3_4.streams.resize(config->num_streams);
     requestedConfiguration3_7.streams.resize(config->num_streams);
+    mHalBufManagedStreamIds.clear();
     for (size_t i = 0; i < config->num_streams; i++) {
         device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
         device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
@@ -922,6 +928,9 @@
         switch (src->stream_type) {
             case CAMERA_STREAM_OUTPUT:
                 streamType = StreamType::OUTPUT;
+                if (flags::session_hal_buf_manager() && mUseHalBufManager) {
+                    mHalBufManagedStreamIds.insert(streamId);
+                }
                 break;
             case CAMERA_STREAM_INPUT:
                 streamType = StreamType::INPUT;
@@ -931,6 +940,7 @@
                         __FUNCTION__, streamId, config->streams[i]->stream_type);
                 return BAD_VALUE;
         }
+
         dst3_2.id = streamId;
         dst3_2.streamType = streamType;
         dst3_2.width = src->width;
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
index e328ef6..aa4b762 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -103,7 +103,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -145,7 +145,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -182,7 +182,7 @@
         mNextReprocessShutterFrameNumber, mNextZslStillShutterFrameNumber,
         mNextResultFrameNumber,
         mNextReprocessResultFrameNumber, mNextZslStillResultFrameNumber,
-        mUseHalBufManager, mUsePartialResult, mNeedFixupMonochromeTags,
+        mUseHalBufManager, mHalBufManagedStreamIds, mUsePartialResult, mNeedFixupMonochromeTags,
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
@@ -207,7 +207,8 @@
     }
 
     RequestBufferStates states {
-        mId, mRequestBufferInterfaceLock, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder,
+        mId, mRequestBufferInterfaceLock, mUseHalBufManager,mHalBufManagedStreamIds,
+        mOutputStreams, mSessionStatsBuilder,
         *this, mBufferRecords, *this};
     camera3::requestStreamBuffers(states, bufReqs, _hidl_cb);
     return hardware::Void();
@@ -224,7 +225,8 @@
     }
 
     ReturnBufferStates states {
-        mId, mUseHalBufManager, mOutputStreams, mSessionStatsBuilder, mBufferRecords};
+        mId, mUseHalBufManager, mHalBufManagedStreamIds, mOutputStreams, mSessionStatsBuilder,
+        mBufferRecords};
 
     camera3::returnStreamBuffers(states, buffers);
     return hardware::Void();
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
index 59fc1cd..d607d10 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
@@ -25,6 +25,7 @@
 #include <hidl/Utils.h>
 #include <android/hardware/camera/device/3.2/types.h>
 #include <android-base/properties.h>
+#include <utils/Utils.h>
 
 namespace android {
 namespace frameworks {
@@ -58,7 +59,7 @@
     const sp<hardware::camera2::ICameraDeviceUser> &deviceRemote)
   : mDeviceRemote(deviceRemote) {
     mInitSuccess = initDevice();
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
 }
 
 bool HidlCameraDeviceUser::initDevice() {
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 94bf653..1a5a6b9 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -25,6 +25,8 @@
 
 #include <hidl/HidlTransportSupport.h>
 
+#include <utils/Utils.h>
+
 namespace android {
 namespace frameworks {
 namespace cameraservice {
@@ -56,8 +58,8 @@
 }
 
 HidlCameraService::HidlCameraService(android::CameraService *cs) : mAidlICameraService(cs) {
-    mVndkVersion = base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
-};
+    mVndkVersion = getVNDKVersionFromProp(__ANDROID_API_FUTURE__);
+}
 
 Return<void>
 HidlCameraService::getCameraCharacteristics(const hidl_string& cameraId,
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index a53d26d..939126c 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -372,28 +372,22 @@
 };
 
 /**
- * Simple test version of the interaction proxy, to use to inject onRegistered calls to the
+ * Simple test version of HidlServiceInteractionProxy, to use to inject onRegistered calls to the
  * CameraProviderManager
  */
-struct TestInteractionProxy : public CameraProviderManager::HidlServiceInteractionProxy,
-                              public CameraProviderManager::AidlServiceInteractionProxy {
+struct TestHidlInteractionProxy : public CameraProviderManager::HidlServiceInteractionProxy {
     sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
     sp<TestICameraProvider> mTestCameraProvider;
-    std::shared_ptr<TestAidlICameraProvider> mTestAidlCameraProvider;
 
-    TestInteractionProxy() {}
+    TestHidlInteractionProxy() {}
 
     void setProvider(sp<TestICameraProvider> provider) {
         mTestCameraProvider = provider;
     }
 
-    void setAidlProvider(std::shared_ptr<TestAidlICameraProvider> provider) {
-        mTestAidlCameraProvider = provider;
-    }
-
     std::vector<std::string> mLastRequestedServiceNames;
 
-    virtual ~TestInteractionProxy() {}
+    virtual ~TestHidlInteractionProxy() {}
 
     virtual bool registerForNotifications(
             [[maybe_unused]] const std::string &serviceName,
@@ -430,9 +424,47 @@
         hardware::hidl_vec<hardware::hidl_string> ret = {"test/0"};
         return ret;
     }
+};
+
+/**
+ * Simple test version of AidlServiceInteractionProxy, to use to inject onRegistered calls to the
+ * CameraProviderManager
+ */
+struct TestAidlInteractionProxy : public CameraProviderManager::AidlServiceInteractionProxy {
+    std::shared_ptr<TestAidlICameraProvider> mTestAidlCameraProvider;
+
+    TestAidlInteractionProxy() {}
+
+    void setProvider(std::shared_ptr<TestAidlICameraProvider> provider) {
+        mTestAidlCameraProvider = provider;
+    }
+
+    std::vector<std::string> mLastRequestedServiceNames;
+
+    virtual ~TestAidlInteractionProxy() {}
 
     virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
-    getAidlService(const std::string&) {
+            getService(const std::string& serviceName) override {
+        if (!flags::delay_lazy_hal_instantiation()) {
+            return mTestAidlCameraProvider;
+        }
+
+        // If no provider has been given, fail; in reality, getService would
+        // block for HALs that don't start correctly, so we should never use
+        // getService when we don't have a valid HAL running
+        if (mTestAidlCameraProvider == nullptr) {
+            ADD_FAILURE() << __FUNCTION__ << "called with no valid provider;"
+                          << " would block indefinitely";
+            // Real getService would block, but that's bad in unit tests. So
+            // just record an error and return nullptr
+            return nullptr;
+        }
+        mLastRequestedServiceNames.push_back(serviceName);
+        return mTestAidlCameraProvider;
+    }
+
+    virtual std::shared_ptr<aidl::android::hardware::camera::provider::ICameraProvider>
+    tryGetService(const std::string&) override {
         return mTestAidlCameraProvider;
     }
 };
@@ -462,7 +494,7 @@
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
 
     android::hardware::hidl_vec<uint8_t> chars;
     CameraMetadata meta;
@@ -510,7 +542,7 @@
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
     serviceProxy.setProvider(provider);
@@ -560,7 +592,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
 
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
@@ -696,7 +728,7 @@
     status_t res;
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
     serviceProxy.setProvider(provider);
@@ -730,7 +762,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
 
@@ -779,7 +811,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
     sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
             vendorSection);
 
@@ -821,7 +853,7 @@
 
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestHidlInteractionProxy serviceProxy;
 
     android::hardware::hidl_vec<uint8_t> chars;
     CameraMetadata meta;
@@ -857,9 +889,11 @@
                 REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(vd_flags, virtual_camera_service_discovery))) {
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestAidlInteractionProxy aidlServiceProxy;
+    TestHidlInteractionProxy hidlServiceProxy;
 
-    status_t res = providerManager->initialize(statusListener, &serviceProxy, &serviceProxy);
+    status_t res = providerManager->initialize(statusListener,
+                                               &hidlServiceProxy, &aidlServiceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
 
     std::vector<std::string> cameraList = {"device@1.1/virtual/123"};
@@ -868,7 +902,7 @@
             ndk::SharedRefBase::make<TestAidlICameraProvider>(cameraList);
     ndk::SpAIBinder spBinder = aidlProvider->asBinder();
     AIBinder* aiBinder = spBinder.get();
-    serviceProxy.setAidlProvider(aidlProvider);
+    aidlServiceProxy.setProvider(aidlProvider);
     providerManager->onServiceRegistration(
             String16("android.hardware.camera.provider.ICameraProvider/virtual/0"),
             AIBinder_toPlatformBinder(aiBinder));
@@ -883,15 +917,17 @@
                 REQUIRES_FLAGS_ENABLED(ACONFIG_FLAG(vd_flags, virtual_camera_service_discovery))) {
     sp<CameraProviderManager> providerManager = new CameraProviderManager();
     sp<TestStatusListener> statusListener = new TestStatusListener();
-    TestInteractionProxy serviceProxy;
+    TestAidlInteractionProxy aidlServiceProxy;
+    TestHidlInteractionProxy hidlServiceProxy;
 
     std::vector<std::string> cameraList = {"device@1.1/virtual/123"};
 
     std::shared_ptr<TestAidlICameraProvider> aidlProvider =
             ndk::SharedRefBase::make<TestAidlICameraProvider>(cameraList);
-    serviceProxy.setAidlProvider(aidlProvider);
+    aidlServiceProxy.setProvider(aidlProvider);
 
-    status_t res = providerManager->initialize(statusListener, &serviceProxy, &serviceProxy);
+    status_t res = providerManager->initialize(statusListener,
+                                               &hidlServiceProxy, &aidlServiceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
 
     std::unordered_map<std::string, std::set<std::string>> unavailableDeviceIds;
diff --git a/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.cpp b/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.cpp
new file mode 100644
index 0000000..e63b30b
--- /dev/null
+++ b/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AttributionAndPermissionUtils.h"
+
+#include <binder/AppOpsManager.h>
+#include <binder/PermissionController.h>
+#include <cutils/properties.h>
+#include <private/android_filesystem_config.h>
+
+#include "CameraService.h"
+#include "CameraThreadState.h"
+
+namespace android {
+
+const std::string AttributionAndPermissionUtils::sDumpPermission("android.permission.DUMP");
+const std::string AttributionAndPermissionUtils::sManageCameraPermission(
+        "android.permission.MANAGE_CAMERA");
+const std::string AttributionAndPermissionUtils::sCameraPermission(
+        "android.permission.CAMERA");
+const std::string AttributionAndPermissionUtils::sSystemCameraPermission(
+        "android.permission.SYSTEM_CAMERA");
+const std::string AttributionAndPermissionUtils::sCameraHeadlessSystemUserPermission(
+        "android.permission.CAMERA_HEADLESS_SYSTEM_USER");
+const std::string AttributionAndPermissionUtils::sCameraPrivacyAllowlistPermission(
+        "android.permission.CAMERA_PRIVACY_ALLOWLIST");
+const std::string AttributionAndPermissionUtils::sCameraSendSystemEventsPermission(
+        "android.permission.CAMERA_SEND_SYSTEM_EVENTS");
+const std::string AttributionAndPermissionUtils::sCameraOpenCloseListenerPermission(
+        "android.permission.CAMERA_OPEN_CLOSE_LISTENER");
+const std::string AttributionAndPermissionUtils::sCameraInjectExternalCameraPermission(
+        "android.permission.CAMERA_INJECT_EXTERNAL_CAMERA");
+
+bool AttributionAndPermissionUtils::checkAutomotivePrivilegedClient(const std::string &cameraId,
+        const AttributionSourceState &attributionSource) {
+    if (isAutomotivePrivilegedClient(attributionSource.uid)) {
+        // If cameraId is empty, then it means that this check is not used for the
+        // purpose of accessing a specific camera, hence grant permission just
+        // based on uid to the automotive privileged client.
+        if (cameraId.empty())
+            return true;
+
+        auto cameraService = mCameraService.promote();
+        if (cameraService == nullptr) {
+            ALOGE("%s: CameraService unavailable.", __FUNCTION__);
+            return false;
+        }
+
+        // If this call is used for accessing a specific camera then cam_id must be provided.
+        // In that case, only pre-grants the permission for accessing the exterior system only
+        // camera.
+        return cameraService->isAutomotiveExteriorSystemCamera(cameraId);
+    }
+
+    return false;
+}
+
+bool AttributionAndPermissionUtils::checkPermissionForPreflight(const std::string &cameraId,
+        const std::string &permission, const AttributionSourceState &attributionSource,
+        const std::string& message, int32_t attributedOpCode) {
+    if (checkAutomotivePrivilegedClient(cameraId, attributionSource)) {
+        return true;
+    }
+
+    PermissionChecker permissionChecker;
+    return permissionChecker.checkPermissionForPreflight(toString16(permission), attributionSource,
+            toString16(message), attributedOpCode) != PermissionChecker::PERMISSION_HARD_DENIED;
+}
+
+// Can camera service trust the caller based on the calling UID?
+bool AttributionAndPermissionUtils::isTrustedCallingUid(uid_t uid) {
+    switch (uid) {
+        case AID_MEDIA:        // mediaserver
+        case AID_CAMERASERVER: // cameraserver
+        case AID_RADIO:        // telephony
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool AttributionAndPermissionUtils::isAutomotiveDevice() {
+    // Checks the property ro.hardware.type and returns true if it is
+    // automotive.
+    char value[PROPERTY_VALUE_MAX] = {0};
+    property_get("ro.hardware.type", value, "");
+    return strncmp(value, "automotive", PROPERTY_VALUE_MAX) == 0;
+}
+
+bool AttributionAndPermissionUtils::isHeadlessSystemUserMode() {
+    // Checks if the device is running in headless system user mode
+    // by checking the property ro.fw.mu.headless_system_user.
+    char value[PROPERTY_VALUE_MAX] = {0};
+    property_get("ro.fw.mu.headless_system_user", value, "");
+    return strncmp(value, "true", PROPERTY_VALUE_MAX) == 0;
+}
+
+bool AttributionAndPermissionUtils::isAutomotivePrivilegedClient(int32_t uid) {
+    // Returns false if this is not an automotive device type.
+    if (!isAutomotiveDevice())
+        return false;
+
+    // Returns true if the uid is AID_AUTOMOTIVE_EVS which is a
+    // privileged client uid used for safety critical use cases such as
+    // rear view and surround view.
+    return uid == AID_AUTOMOTIVE_EVS;
+}
+
+status_t AttributionAndPermissionUtils::getUidForPackage(const std::string &packageName,
+        int userId, /*inout*/uid_t& uid, int err) {
+    PermissionController pc;
+    uid = pc.getPackageUid(toString16(packageName), 0);
+    if (uid <= 0) {
+        ALOGE("Unknown package: '%s'", packageName.c_str());
+        dprintf(err, "Unknown package: '%s'\n", packageName.c_str());
+        return BAD_VALUE;
+    }
+
+    if (userId < 0) {
+        ALOGE("Invalid user: %d", userId);
+        dprintf(err, "Invalid user: %d\n", userId);
+        return BAD_VALUE;
+    }
+
+    uid = multiuser_get_uid(userId, uid);
+    return NO_ERROR;
+}
+
+bool AttributionAndPermissionUtils::isCallerCameraServerNotDelegating() {
+    return CameraThreadState::getCallingPid() == getpid();
+}
+
+bool AttributionAndPermissionUtils::hasPermissionsForCamera(const std::string& cameraId,
+        const AttributionSourceState& attributionSource) {
+    return checkPermissionForPreflight(cameraId, sCameraPermission,
+            attributionSource, std::string(), AppOpsManager::OP_NONE);
+}
+
+bool AttributionAndPermissionUtils::hasPermissionsForSystemCamera(const std::string& cameraId,
+        const AttributionSourceState& attributionSource, bool checkCameraPermissions) {
+    bool systemCameraPermission = checkPermissionForPreflight(cameraId,
+            sSystemCameraPermission, attributionSource, std::string(), AppOpsManager::OP_NONE);
+    return systemCameraPermission && (!checkCameraPermissions
+            || hasPermissionsForCamera(cameraId, attributionSource));
+}
+
+bool AttributionAndPermissionUtils::hasPermissionsForCameraHeadlessSystemUser(
+        const std::string& cameraId, const AttributionSourceState& attributionSource) {
+    return checkPermissionForPreflight(cameraId, sCameraHeadlessSystemUserPermission,
+            attributionSource, std::string(), AppOpsManager::OP_NONE);
+}
+
+bool AttributionAndPermissionUtils::hasPermissionsForCameraPrivacyAllowlist(
+        const AttributionSourceState& attributionSource) {
+    return checkPermissionForPreflight(std::string(), sCameraPrivacyAllowlistPermission,
+            attributionSource, std::string(), AppOpsManager::OP_NONE);
+}
+
+bool AttributionAndPermissionUtils::hasPermissionsForOpenCloseListener(
+        const AttributionSourceState& attributionSource) {
+    return checkPermissionForPreflight(std::string(), sCameraOpenCloseListenerPermission,
+            attributionSource, std::string(), AppOpsManager::OP_NONE);
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h b/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h
new file mode 100644
index 0000000..dc4cfb1
--- /dev/null
+++ b/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_SERVERS_CAMERA_ATTRIBUTION_AND_PERMISSION_UTILS_H
+#define ANDROID_SERVERS_CAMERA_ATTRIBUTION_AND_PERMISSION_UTILS_H
+
+#include <android/content/AttributionSourceState.h>
+#include <android/permission/PermissionChecker.h>
+#include <binder/BinderService.h>
+
+namespace android {
+
+class CameraService;
+
+using content::AttributionSourceState;
+using permission::PermissionChecker;
+
+/**
+ * Utility class consolidating methods/data for verifying permissions and the identity of the
+ * caller.
+ */
+class AttributionAndPermissionUtils {
+public:
+    AttributionAndPermissionUtils(wp<CameraService> cameraService) : mCameraService(cameraService)
+            {}
+    virtual ~AttributionAndPermissionUtils() {}
+
+    /**
+     * Pre-grants the permission if the attribution source uid is for an automotive
+     * privileged client. Otherwise uses system service permission checker to check
+     * for the appropriate permission. If this function is called for accessing a specific
+     * camera,then the cameraID must not be empty. CameraId is used only in case of automotive
+     * privileged client so that permission is pre-granted only to access system camera device
+     * which is located outside of the vehicle body frame because camera located inside the vehicle
+     * cabin would need user permission.
+     */
+    virtual bool checkPermissionForPreflight(const std::string &cameraId,
+            const std::string &permission, const AttributionSourceState& attributionSource,
+            const std::string& message, int32_t attributedOpCode);
+    virtual bool isTrustedCallingUid(uid_t uid);
+    virtual bool isAutomotiveDevice();
+    virtual bool isHeadlessSystemUserMode();
+    virtual bool isAutomotivePrivilegedClient(int32_t uid);
+    virtual status_t getUidForPackage(const std::string &packageName, int userId,
+            /*inout*/uid_t& uid, int err);
+    virtual bool isCallerCameraServerNotDelegating();
+
+    // Utils for checking specific permissions
+    virtual bool hasPermissionsForCamera(const std::string& cameraId,
+            const AttributionSourceState& attributionSource);
+    virtual bool hasPermissionsForSystemCamera(const std::string& cameraId,
+            const AttributionSourceState& attributionSource, bool checkCameraPermissions = true);
+    virtual bool hasPermissionsForCameraHeadlessSystemUser(const std::string& cameraId,
+            const AttributionSourceState& attributionSource);
+    virtual bool hasPermissionsForCameraPrivacyAllowlist(
+            const AttributionSourceState& attributionSource);
+    virtual bool hasPermissionsForOpenCloseListener(
+            const AttributionSourceState& attributionSource);
+
+    static const std::string sDumpPermission;
+    static const std::string sManageCameraPermission;
+    static const std::string sCameraPermission;
+    static const std::string sSystemCameraPermission;
+    static const std::string sCameraHeadlessSystemUserPermission;
+    static const std::string sCameraPrivacyAllowlistPermission;
+    static const std::string sCameraSendSystemEventsPermission;
+    static const std::string sCameraOpenCloseListenerPermission;
+    static const std::string sCameraInjectExternalCameraPermission;
+
+protected:
+    wp<CameraService> mCameraService;
+
+    bool checkAutomotivePrivilegedClient(const std::string &cameraId,
+            const AttributionSourceState &attributionSource);
+};
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_ATTRIBUTION_AND_PERMISSION_UTILS_H
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index aef6531..11ef9b7 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -1133,7 +1133,7 @@
 }
 
 void filterParameters(const CameraMetadata& src, const CameraMetadata& deviceInfo,
-        int vendorTagId, CameraMetadata& dst) {
+        metadata_vendor_id_t vendorTagId, CameraMetadata& dst) {
     const CameraMetadata params(src);
     camera_metadata_ro_entry_t availableSessionKeys = deviceInfo.find(
             ANDROID_REQUEST_AVAILABLE_SESSION_KEYS);
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 29e3eca..0545cea 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -177,7 +177,11 @@
         aidl::android::hardware::camera::device::RequestTemplate* tempId /*out*/);
 
 void filterParameters(const CameraMetadata& src, const CameraMetadata& deviceInfo,
-        int vendorTagId, CameraMetadata& dst);
+        metadata_vendor_id_t vendorTagId, CameraMetadata& dst);
+
+template <typename T> bool contains(std::set<T> container, T value) {
+    return container.find(value) != container.end();
+}
 
 constexpr int32_t MAX_SURFACES_PER_STREAM = 4;
 
diff --git a/services/camera/libcameraservice/utils/Utils.cpp b/services/camera/libcameraservice/utils/Utils.cpp
new file mode 100644
index 0000000..c8f5e86
--- /dev/null
+++ b/services/camera/libcameraservice/utils/Utils.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Utils.h"
+#include <android-base/properties.h>
+#include <com_android_internal_camera_flags.h>
+
+
+namespace android {
+
+using namespace com::android::internal::camera::flags;
+
+constexpr const char *LEGACY_VNDK_VERSION_PROP = "ro.vndk.version";
+constexpr const char *BOARD_API_LEVEL_PROP = "ro.board.api_level";
+constexpr int MAX_VENDOR_API_LEVEL = 1000000;
+constexpr int FIRST_VNDK_VERSION = 202404;
+
+int getVNDKVersionFromProp(int defaultVersion) {
+    if (!com_android_internal_camera_flags_use_ro_board_api_level_for_vndk_version()) {
+        return base::GetIntProperty(LEGACY_VNDK_VERSION_PROP, defaultVersion);
+    }
+
+    int vndkVersion = base::GetIntProperty(BOARD_API_LEVEL_PROP, MAX_VENDOR_API_LEVEL);
+
+    if (vndkVersion == MAX_VENDOR_API_LEVEL) {
+        // Couldn't find property
+        return defaultVersion;
+    }
+
+    if (vndkVersion < __ANDROID_API_V__) {
+        // VNDK versions below V return the corresponding SDK version.
+        return vndkVersion;
+    }
+
+    // VNDK for Android V and above are of the format YYYYMM starting with 202404 and is bumped
+    // up once a year. So V would be 202404 and the next one would be 202504.
+    // This is the same assumption as that made in system/core/init/property_service.cpp.
+    vndkVersion = (vndkVersion - FIRST_VNDK_VERSION) / 100;
+    return __ANDROID_API_V__ + vndkVersion;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/utils/Utils.h b/services/camera/libcameraservice/utils/Utils.h
new file mode 100644
index 0000000..f8a107d
--- /dev/null
+++ b/services/camera/libcameraservice/utils/Utils.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_UTILS_H
+#define ANDROID_SERVERS_CAMERA_UTILS_H
+
+namespace android {
+
+/**
+ * As of Android V, ro.board.api_level returns the year and month of release (ex. 202404)
+ * instead of release SDK version. This function maps year/month format back to release
+ * SDK version.
+ *
+ * Returns defaultVersion if the property is not found.
+ */
+int getVNDKVersionFromProp(int defaultVersion);
+
+} // namespace android
+
+#endif //ANDROID_SERVERS_CAMERA_UTILS_H
diff --git a/services/camera/virtualcamera/Android.bp b/services/camera/virtualcamera/Android.bp
index cb4e10f..90530f6 100644
--- a/services/camera/virtualcamera/Android.bp
+++ b/services/camera/virtualcamera/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_xr_framework",
     // See: http://go/android-license-faq
     default_applicable_licenses: ["Android-Apache-2.0"],
 }
@@ -11,6 +12,7 @@
         "libbinder",
         "libbinder_ndk",
         "libcamera_metadata",
+        "libexif",
         "liblog",
         "libfmq",
         "libgui",
@@ -46,7 +48,7 @@
     name: "libvirtualcamera_utils",
     srcs: [
         "util/JpegUtil.cc",
-        "util/MetadataBuilder.cc",
+        "util/MetadataUtil.cc",
         "util/Util.cc",
         "util/TestPatternHelper.cc",
         "util/EglDisplayContext.cc",
@@ -54,7 +56,7 @@
         "util/EglProgram.cc",
         "util/EglSurfaceTexture.cc",
         "util/EglUtil.cc",
-        "util/Permissions.cc"
+        "util/Permissions.cc",
     ],
     defaults: [
         "libvirtualcamera_defaults",
diff --git a/services/camera/virtualcamera/TEST_MAPPING b/services/camera/virtualcamera/TEST_MAPPING
index 66c5e52..25fca73 100644
--- a/services/camera/virtualcamera/TEST_MAPPING
+++ b/services/camera/virtualcamera/TEST_MAPPING
@@ -2,9 +2,7 @@
   "presubmit" : [
     {
       "name": "virtual_camera_tests"
-    }
-  ],
-  "postsubmit": [
+    },
     {
       "name": "CtsVirtualDevicesCameraTestCases",
       "options": [
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.cc b/services/camera/virtualcamera/VirtualCameraDevice.cc
index 84f721b..7636cbd 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.cc
+++ b/services/camera/virtualcamera/VirtualCameraDevice.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -23,11 +23,14 @@
 #include <chrono>
 #include <cstdint>
 #include <iterator>
+#include <numeric>
 #include <optional>
 #include <string>
+#include <vector>
 
 #include "VirtualCameraSession.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
@@ -35,7 +38,7 @@
 #include "android/binder_status.h"
 #include "log/log.h"
 #include "system/camera_metadata.h"
-#include "util/MetadataBuilder.h"
+#include "util/MetadataUtil.h"
 #include "util/Util.h"
 
 namespace android {
@@ -44,7 +47,10 @@
 
 using ::aidl::android::companion::virtualcamera::Format;
 using ::aidl::android::companion::virtualcamera::IVirtualCameraCallback;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraResourceCost;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
@@ -64,27 +70,81 @@
 // Prefix of camera name - "device@1.1/virtual/{numerical_id}"
 const char* kDevicePathPrefix = "device@1.1/virtual/";
 
-constexpr std::chrono::nanoseconds kMinFrameDuration30Fps = 1s / 30;
 constexpr int32_t kMaxJpegSize = 3 * 1024 * 1024 /*3MiB*/;
 
+constexpr int32_t kMinFps = 15;
+
+constexpr std::chrono::nanoseconds kMaxFrameDuration =
+    std::chrono::duration_cast<std::chrono::nanoseconds>(1e9ns / kMinFps);
+
+constexpr uint8_t kPipelineMaxDepth = 2;
+
 constexpr MetadataBuilder::ControlRegion kDefaultEmptyControlRegion{};
 
-struct Resolution {
-  Resolution(const int w, const int h) : width(w), height(h) {
-  }
+const std::array<Resolution, 5> kStandardJpegThumbnailSizes{
+    Resolution(176, 144), Resolution(240, 144), Resolution(256, 144),
+    Resolution(240, 160), Resolution(240, 180)};
 
-  bool operator<(const Resolution& other) const {
-    return width * height < other.width * other.height;
-  }
+const std::array<PixelFormat, 3> kOutputFormats{
+    PixelFormat::IMPLEMENTATION_DEFINED, PixelFormat::YCBCR_420_888,
+    PixelFormat::BLOB};
 
-  bool operator==(const Resolution& other) const {
-    return width == other.width && height == other.height;
-  }
-
-  const int width;
-  const int height;
+// The resolutions below will used to extend the set of supported output formats.
+// All resolutions with lower pixel count and same aspect ratio as some supported
+// input resolution will be added to the set of supported output resolutions.
+const std::array<Resolution, 10> kOutputResolutions{
+    Resolution(320, 240),   Resolution(640, 360),  Resolution(640, 480),
+    Resolution(720, 480),   Resolution(720, 576),  Resolution(800, 600),
+    Resolution(1024, 576),  Resolution(1280, 720), Resolution(1280, 960),
+    Resolution(1280, 1080),
 };
 
+std::vector<Resolution> getSupportedJpegThumbnailSizes(
+    const std::vector<SupportedStreamConfiguration>& configs) {
+  auto isSupportedByAnyInputConfig =
+      [&configs](const Resolution thumbnailResolution) {
+        return std::any_of(
+            configs.begin(), configs.end(),
+            [thumbnailResolution](const SupportedStreamConfiguration& config) {
+              return isApproximatellySameAspectRatio(
+                  thumbnailResolution, Resolution(config.width, config.height));
+            });
+      };
+
+  std::vector<Resolution> supportedThumbnailSizes({Resolution(0, 0)});
+  std::copy_if(kStandardJpegThumbnailSizes.begin(),
+               kStandardJpegThumbnailSizes.end(),
+               std::back_insert_iterator(supportedThumbnailSizes),
+               isSupportedByAnyInputConfig);
+  return supportedThumbnailSizes;
+}
+
+bool isSupportedOutputFormat(const PixelFormat pixelFormat) {
+  return std::find(kOutputFormats.begin(), kOutputFormats.end(), pixelFormat) !=
+         kOutputFormats.end();
+}
+
+std::vector<MetadataBuilder::FpsRange> fpsRangesForInputConfig(
+    const std::vector<SupportedStreamConfiguration>& configs) {
+  std::set<MetadataBuilder::FpsRange> availableRanges;
+
+  for (const SupportedStreamConfiguration& config : configs) {
+    availableRanges.insert({.minFps = kMinFps, .maxFps = config.maxFps});
+    availableRanges.insert({.minFps = config.maxFps, .maxFps = config.maxFps});
+  }
+
+  if (std::any_of(configs.begin(), configs.end(),
+                  [](const SupportedStreamConfiguration& config) {
+                    return config.maxFps >= 30;
+                  })) {
+    availableRanges.insert({.minFps = kMinFps, .maxFps = 30});
+    availableRanges.insert({.minFps = 30, .maxFps = 30});
+  }
+
+  return std::vector<MetadataBuilder::FpsRange>(availableRanges.begin(),
+                                                availableRanges.end());
+}
+
 std::optional<Resolution> getMaxResolution(
     const std::vector<SupportedStreamConfiguration>& configs) {
   auto itMax = std::max_element(configs.begin(), configs.end(),
@@ -103,24 +163,64 @@
   return Resolution(itMax->width, itMax->height);
 }
 
-std::set<Resolution> getUniqueResolutions(
+// Returns a map of unique resolution to maximum maxFps for all streams with
+// that resolution.
+std::map<Resolution, int> getResolutionToMaxFpsMap(
     const std::vector<SupportedStreamConfiguration>& configs) {
-  std::set<Resolution> uniqueResolutions;
-  std::transform(configs.begin(), configs.end(),
-                 std::inserter(uniqueResolutions, uniqueResolutions.begin()),
-                 [](const SupportedStreamConfiguration& config) {
-                   return Resolution(config.width, config.height);
-                 });
-  return uniqueResolutions;
+  std::map<Resolution, int> resolutionToMaxFpsMap;
+
+  for (const SupportedStreamConfiguration& config : configs) {
+    Resolution resolution(config.width, config.height);
+    if (resolutionToMaxFpsMap.find(resolution) == resolutionToMaxFpsMap.end()) {
+      resolutionToMaxFpsMap[resolution] = config.maxFps;
+    } else {
+      int currentMaxFps = resolutionToMaxFpsMap[resolution];
+      resolutionToMaxFpsMap[resolution] = std::max(currentMaxFps, config.maxFps);
+    }
+  }
+
+  std::map<Resolution, int> additionalResolutionToMaxFpsMap;
+  // Add additional resolutions we can support by downscaling input streams with
+  // same aspect ratio.
+  for (const Resolution& outputResolution : kOutputResolutions) {
+    for (const auto& [resolution, maxFps] : resolutionToMaxFpsMap) {
+      if (resolutionToMaxFpsMap.find(outputResolution) !=
+          resolutionToMaxFpsMap.end()) {
+        // Resolution is already in the map, skip it.
+        continue;
+      }
+
+      if (outputResolution < resolution &&
+          isApproximatellySameAspectRatio(outputResolution, resolution)) {
+        // Lower resolution with same aspect ratio, we can achieve this by
+        // downscaling, let's add it to the map.
+        ALOGD(
+            "Extending set of output resolutions with %dx%d which has same "
+            "aspect ratio as supported input %dx%d.",
+            outputResolution.width, outputResolution.height, resolution.width,
+            resolution.height);
+        additionalResolutionToMaxFpsMap[outputResolution] = maxFps;
+        break;
+      }
+    }
+  }
+
+  // Add all resolution we can achieve by downscaling to the map.
+  resolutionToMaxFpsMap.insert(additionalResolutionToMaxFpsMap.begin(),
+                               additionalResolutionToMaxFpsMap.end());
+
+  return resolutionToMaxFpsMap;
 }
 
 // TODO(b/301023410) - Populate camera characteristics according to camera configuration.
 std::optional<CameraMetadata> initCameraCharacteristics(
-    const std::vector<SupportedStreamConfiguration>& supportedInputConfig) {
+    const std::vector<SupportedStreamConfiguration>& supportedInputConfig,
+    const SensorOrientation sensorOrientation, const LensFacing lensFacing) {
   if (!std::all_of(supportedInputConfig.begin(), supportedInputConfig.end(),
                    [](const SupportedStreamConfiguration& config) {
                      return isFormatSupportedForInput(
-                         config.width, config.height, config.pixelFormat);
+                         config.width, config.height, config.pixelFormat,
+                         config.maxFps);
                    })) {
     ALOGE("%s: input configuration contains unsupported format", __func__);
     return std::nullopt;
@@ -131,26 +231,86 @@
           .setSupportedHardwareLevel(
               ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL)
           .setFlashAvailable(false)
-          .setLensFacing(ANDROID_LENS_FACING_EXTERNAL)
-          .setSensorOrientation(0)
+          .setLensFacing(
+              static_cast<camera_metadata_enum_android_lens_facing>(lensFacing))
+          .setAvailableFocalLengths({VirtualCameraDevice::kFocalLength})
+          .setSensorOrientation(static_cast<int32_t>(sensorOrientation))
+          .setSensorReadoutTimestamp(
+              ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED)
+          .setSensorTimestampSource(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN)
+          .setSensorPhysicalSize(36.0, 24.0)
+          .setAvailableAberrationCorrectionModes(
+              {ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF})
+          .setAvailableNoiseReductionModes({ANDROID_NOISE_REDUCTION_MODE_OFF})
           .setAvailableFaceDetectModes({ANDROID_STATISTICS_FACE_DETECT_MODE_OFF})
+          .setAvailableTestPatternModes({ANDROID_SENSOR_TEST_PATTERN_MODE_OFF})
           .setAvailableMaxDigitalZoom(1.0)
           .setControlAvailableModes({ANDROID_CONTROL_MODE_AUTO})
           .setControlAfAvailableModes({ANDROID_CONTROL_AF_MODE_OFF})
-          .setControlAeAvailableFpsRange(10, 30)
+          .setControlAvailableSceneModes({ANDROID_CONTROL_SCENE_MODE_DISABLED})
+          .setControlAvailableEffects({ANDROID_CONTROL_EFFECT_MODE_OFF})
+          .setControlAvailableVideoStabilizationModes(
+              {ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF})
+          .setControlAeAvailableModes({ANDROID_CONTROL_AE_MODE_ON})
+          .setControlAeAvailableAntibandingModes(
+              {ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO})
+          .setControlAeAvailableFpsRanges(
+              fpsRangesForInputConfig(supportedInputConfig))
           .setControlMaxRegions(0, 0, 0)
           .setControlAfRegions({kDefaultEmptyControlRegion})
           .setControlAeRegions({kDefaultEmptyControlRegion})
           .setControlAwbRegions({kDefaultEmptyControlRegion})
-          .setControlAeCompensationRange(0, 1)
+          .setControlAeCompensationRange(0, 0)
           .setControlAeCompensationStep(camera_metadata_rational_t{0, 1})
+          .setControlAwbLockAvailable(false)
+          .setControlAeLockAvailable(false)
+          .setControlAvailableAwbModes({ANDROID_CONTROL_AWB_MODE_AUTO})
           .setControlZoomRatioRange(/*min=*/1.0, /*max=*/1.0)
+          .setCroppingType(ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY)
+          .setJpegAvailableThumbnailSizes(
+              getSupportedJpegThumbnailSizes(supportedInputConfig))
           .setMaxJpegSize(kMaxJpegSize)
-          .setAvailableRequestKeys({ANDROID_CONTROL_AF_MODE})
-          .setAvailableResultKeys({ANDROID_CONTROL_AF_MODE})
+          .setMaxFaceCount(0)
+          .setMaxFrameDuration(kMaxFrameDuration)
+          .setMaxNumberOutputStreams(
+              VirtualCameraDevice::kMaxNumberOfRawStreams,
+              VirtualCameraDevice::kMaxNumberOfProcessedStreams,
+              VirtualCameraDevice::kMaxNumberOfStallStreams)
+          .setRequestPartialResultCount(1)
+          .setPipelineMaxDepth(kPipelineMaxDepth)
+          .setSyncMaxLatency(ANDROID_SYNC_MAX_LATENCY_UNKNOWN)
+          .setAvailableRequestKeys({ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+                                    ANDROID_CONTROL_CAPTURE_INTENT,
+                                    ANDROID_CONTROL_AE_MODE,
+                                    ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+                                    ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+                                    ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+                                    ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+                                    ANDROID_CONTROL_AF_TRIGGER,
+                                    ANDROID_CONTROL_AF_MODE,
+                                    ANDROID_CONTROL_AWB_MODE,
+                                    ANDROID_SCALER_CROP_REGION,
+                                    ANDROID_CONTROL_EFFECT_MODE,
+                                    ANDROID_CONTROL_MODE,
+                                    ANDROID_CONTROL_SCENE_MODE,
+                                    ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+                                    ANDROID_CONTROL_ZOOM_RATIO,
+                                    ANDROID_FLASH_MODE,
+                                    ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+                                    ANDROID_JPEG_QUALITY,
+                                    ANDROID_JPEG_THUMBNAIL_QUALITY,
+                                    ANDROID_NOISE_REDUCTION_MODE,
+                                    ANDROID_STATISTICS_FACE_DETECT_MODE})
+          .setAvailableResultKeys(
+              {ANDROID_COLOR_CORRECTION_ABERRATION_MODE, ANDROID_CONTROL_AE_MODE,
+               ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, ANDROID_CONTROL_AF_MODE,
+               ANDROID_CONTROL_AWB_MODE, ANDROID_CONTROL_EFFECT_MODE,
+               ANDROID_CONTROL_MODE, ANDROID_FLASH_MODE, ANDROID_FLASH_STATE,
+               ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, ANDROID_JPEG_QUALITY,
+               ANDROID_JPEG_THUMBNAIL_QUALITY, ANDROID_LENS_FOCAL_LENGTH,
+               ANDROID_SENSOR_TIMESTAMP, ANDROID_NOISE_REDUCTION_MODE})
           .setAvailableCapabilities(
-              {ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE})
-          .setAvailableCharacteristicKeys();
+              {ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE});
 
   // Active array size must correspond to largest supported input resolution.
   std::optional<Resolution> maxResolution =
@@ -160,56 +320,36 @@
   }
   builder.setSensorActiveArraySize(0, 0, maxResolution->width,
                                    maxResolution->height);
+  builder.setSensorPixelArraySize(maxResolution->width, maxResolution->height);
 
   std::vector<MetadataBuilder::StreamConfiguration> outputConfigurations;
 
   // TODO(b/301023410) Add also all "standard" resolutions we can rescale the
   // streams to (all standard resolutions with same aspect ratio).
 
-  // Add IMPLEMENTATION_DEFINED format for all supported input resolutions.
-  std::set<Resolution> uniqueResolutions =
-      getUniqueResolutions(supportedInputConfig);
-  std::transform(
-      uniqueResolutions.begin(), uniqueResolutions.end(),
-      std::back_inserter(outputConfigurations),
-      [](const Resolution& resolution) {
-        return MetadataBuilder::StreamConfiguration{
-            .width = resolution.width,
-            .height = resolution.height,
-            .format = ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
-            .minFrameDuration = kMinFrameDuration30Fps,
-            .minStallDuration = 0s};
-      });
+  std::map<Resolution, int> resolutionToMaxFpsMap =
+      getResolutionToMaxFpsMap(supportedInputConfig);
 
-  // Add all supported configuration with explicit pixel format.
-  std::transform(supportedInputConfig.begin(), supportedInputConfig.end(),
-                 std::back_inserter(outputConfigurations),
-                 [](const SupportedStreamConfiguration& config) {
-                   return MetadataBuilder::StreamConfiguration{
-                       .width = config.width,
-                       .height = config.height,
-                       .format = static_cast<int>(config.pixelFormat),
-                       .minFrameDuration = kMinFrameDuration30Fps,
-                       .minStallDuration = 0s};
-                 });
-
-  // TODO(b/301023410) We currently don't support rescaling for still capture,
-  // so only announce BLOB support for formats exactly matching the input.
-  std::transform(uniqueResolutions.begin(), uniqueResolutions.end(),
-                 std::back_inserter(outputConfigurations),
-                 [](const Resolution& resolution) {
-                   return MetadataBuilder::StreamConfiguration{
-                       .width = resolution.width,
-                       .height = resolution.height,
-                       .format = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
-                       .minFrameDuration = kMinFrameDuration30Fps,
-                       .minStallDuration = 0s};
-                 });
+  // Add configurations for all unique input resolutions and output formats.
+  for (const PixelFormat format : kOutputFormats) {
+    std::transform(
+        resolutionToMaxFpsMap.begin(), resolutionToMaxFpsMap.end(),
+        std::back_inserter(outputConfigurations), [format](const auto& entry) {
+          Resolution resolution = entry.first;
+          int maxFps = entry.second;
+          return MetadataBuilder::StreamConfiguration{
+              .width = resolution.width,
+              .height = resolution.height,
+              .format = static_cast<int32_t>(format),
+              .minFrameDuration = std::chrono::nanoseconds(1s) / maxFps,
+              .minStallDuration = 0s};
+        });
+  }
 
   ALOGV("Adding %zu output configurations", outputConfigurations.size());
   builder.setAvailableOutputStreamConfigurations(outputConfigurations);
 
-  auto metadata = builder.build();
+  auto metadata = builder.setAvailableCharacteristicKeys().build();
   if (metadata == nullptr) {
     ALOGE("Failed to build metadata!");
     return CameraMetadata();
@@ -221,14 +361,13 @@
 }  // namespace
 
 VirtualCameraDevice::VirtualCameraDevice(
-    const uint32_t cameraId,
-    const std::vector<SupportedStreamConfiguration>& supportedInputConfig,
-    std::shared_ptr<IVirtualCameraCallback> virtualCameraClientCallback)
+    const uint32_t cameraId, const VirtualCameraConfiguration& configuration)
     : mCameraId(cameraId),
-      mVirtualCameraClientCallback(virtualCameraClientCallback),
-      mSupportedInputConfigurations(supportedInputConfig) {
-  std::optional<CameraMetadata> metadata =
-      initCameraCharacteristics(mSupportedInputConfigurations);
+      mVirtualCameraClientCallback(configuration.virtualCameraCallback),
+      mSupportedInputConfigurations(configuration.supportedStreamConfigs) {
+  std::optional<CameraMetadata> metadata = initCameraCharacteristics(
+      mSupportedInputConfigurations, configuration.sensorOrientation,
+      configuration.lensFacing);
   if (metadata.has_value()) {
     mCameraCharacteristics = *metadata;
   } else {
@@ -286,6 +425,29 @@
 
 bool VirtualCameraDevice::isStreamCombinationSupported(
     const StreamConfiguration& streamConfiguration) const {
+  if (streamConfiguration.streams.empty()) {
+    ALOGE("%s: Querying empty configuration", __func__);
+    return false;
+  }
+
+  const std::vector<Stream>& streams = streamConfiguration.streams;
+
+  Resolution firstStreamResolution(streams[0].width, streams[0].height);
+  auto isSameAspectRatioAsFirst = [firstStreamResolution](const Stream& stream) {
+    return isApproximatellySameAspectRatio(
+        firstStreamResolution, Resolution(stream.width, stream.height));
+  };
+  if (!std::all_of(streams.begin(), streams.end(), isSameAspectRatioAsFirst)) {
+    ALOGW(
+        "%s: Requested streams do not have same aspect ratio. Different aspect "
+        "ratios are currently "
+        "not supported by virtual camera. Stream configuration: %s",
+        __func__, streamConfiguration.toString().c_str());
+    return false;
+  }
+
+  int numberOfProcessedStreams = 0;
+  int numberOfStallStreams = 0;
   for (const Stream& stream : streamConfiguration.streams) {
     ALOGV("%s: Configuration queried: %s", __func__, stream.toString().c_str());
 
@@ -294,18 +456,25 @@
       return false;
     }
 
-    // TODO(b/301023410) remove hardcoded format checks, verify against configuration.
     if (stream.rotation != StreamRotation::ROTATION_0 ||
-        (stream.format != PixelFormat::IMPLEMENTATION_DEFINED &&
-         stream.format != PixelFormat::YCBCR_420_888 &&
-         stream.format != PixelFormat::BLOB)) {
+        !isSupportedOutputFormat(stream.format)) {
       ALOGV("Unsupported output stream type");
       return false;
     }
 
+    if (stream.format == PixelFormat::BLOB) {
+      numberOfStallStreams++;
+    } else {
+      numberOfProcessedStreams++;
+    }
+
+    Resolution requestedResolution(stream.width, stream.height);
     auto matchesSupportedInputConfig =
-        [&stream](const SupportedStreamConfiguration& config) {
-          return stream.width == config.width && stream.height == config.height;
+        [requestedResolution](const SupportedStreamConfiguration& config) {
+          Resolution supportedInputResolution(config.width, config.height);
+          return requestedResolution <= supportedInputResolution &&
+                 isApproximatellySameAspectRatio(requestedResolution,
+                                                 supportedInputResolution);
         };
     if (std::none_of(mSupportedInputConfigurations.begin(),
                      mSupportedInputConfigurations.end(),
@@ -314,6 +483,19 @@
       return false;
     }
   }
+
+  if (numberOfProcessedStreams > kMaxNumberOfProcessedStreams) {
+    ALOGE("%s: %d processed streams exceeds the supported maximum of %d",
+          __func__, numberOfProcessedStreams, kMaxNumberOfProcessedStreams);
+    return false;
+  }
+
+  if (numberOfStallStreams > kMaxNumberOfStallStreams) {
+    ALOGE("%s: %d stall streams exceeds the supported maximum of %d", __func__,
+          numberOfStallStreams, kMaxNumberOfStallStreams);
+    return false;
+  }
+
   return true;
 }
 
@@ -368,6 +550,24 @@
   return std::string(kDevicePathPrefix) + std::to_string(mCameraId);
 }
 
+const std::vector<SupportedStreamConfiguration>&
+VirtualCameraDevice::getInputConfigs() const {
+  return mSupportedInputConfigurations;
+}
+
+Resolution VirtualCameraDevice::getMaxInputResolution() const {
+  std::optional<Resolution> maxResolution =
+      getMaxResolution(mSupportedInputConfigurations);
+  if (!maxResolution.has_value()) {
+    ALOGE(
+        "%s: Cannot determine sensor size for virtual camera - input "
+        "configurations empty?",
+        __func__);
+    return Resolution(0, 0);
+  }
+  return maxResolution.value();
+}
+
 std::shared_ptr<VirtualCameraDevice> VirtualCameraDevice::sharedFromThis() {
   // SharedRefBase which BnCameraDevice inherits from breaks
   // std::enable_shared_from_this. This is recommended replacement for
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.h b/services/camera/virtualcamera/VirtualCameraDevice.h
index 402de6c..c274dc9 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.h
+++ b/services/camera/virtualcamera/VirtualCameraDevice.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -22,7 +22,9 @@
 
 #include "aidl/android/companion/virtualcamera/IVirtualCameraCallback.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/device/BnCameraDevice.h"
+#include "util/Util.h"
 
 namespace android {
 namespace companion {
@@ -35,12 +37,8 @@
  public:
   explicit VirtualCameraDevice(
       uint32_t cameraId,
-      const std::vector<
-          aidl::android::companion::virtualcamera::SupportedStreamConfiguration>&
-          supportedInputConfig,
-      std::shared_ptr<
-          ::aidl::android::companion::virtualcamera::IVirtualCameraCallback>
-          virtualCameraClientCallback = nullptr);
+      const aidl::android::companion::virtualcamera::VirtualCameraConfiguration&
+          configuration);
 
   virtual ~VirtualCameraDevice() override = default;
 
@@ -97,6 +95,32 @@
 
   uint32_t getCameraId() const { return mCameraId; }
 
+  const std::vector<
+      aidl::android::companion::virtualcamera::SupportedStreamConfiguration>&
+  getInputConfigs() const;
+
+  // Returns largest supported input resolution.
+  Resolution getMaxInputResolution() const;
+
+  // Maximal number of RAW streams - virtual camera doesn't support RAW streams.
+  static constexpr int32_t kMaxNumberOfRawStreams = 0;
+
+  // Maximal number of non-jpeg streams configured concurrently in single
+  // session. This should be at least 3 and can be increased at the potential
+  // cost of more CPU/GPU load if there are many concurrent streams.
+  static constexpr int32_t kMaxNumberOfProcessedStreams = 3;
+
+  // Maximal number of stalling (in case of virtual camera only jpeg for now)
+  // streams. Can be increaed at the cost of potential cost of more GPU/CPU
+  // load.
+  static constexpr int32_t kMaxNumberOfStallStreams = 1;
+
+  // Focal length for full frame sensor.
+  static constexpr float kFocalLength = 43.0;
+
+  // Default JPEG compression quality.
+  static constexpr uint8_t kDefaultJpegQuality = 80;
+
  private:
   std::shared_ptr<VirtualCameraDevice> sharedFromThis();
 
diff --git a/services/camera/virtualcamera/VirtualCameraProvider.cc b/services/camera/virtualcamera/VirtualCameraProvider.cc
index 25a43d6..e4a68f5 100644
--- a/services/camera/virtualcamera/VirtualCameraProvider.cc
+++ b/services/camera/virtualcamera/VirtualCameraProvider.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -33,8 +33,7 @@
 namespace companion {
 namespace virtualcamera {
 
-using ::aidl::android::companion::virtualcamera::IVirtualCameraCallback;
-using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraDeviceStatus;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::common::VendorTagSection;
@@ -155,10 +154,9 @@
 }
 
 std::shared_ptr<VirtualCameraDevice> VirtualCameraProvider::createCamera(
-    const std::vector<SupportedStreamConfiguration>& supportedInputConfig,
-    std::shared_ptr<IVirtualCameraCallback> virtualCameraClientCallback) {
-  auto camera = ndk::SharedRefBase::make<VirtualCameraDevice>(
-      sNextId++, supportedInputConfig, virtualCameraClientCallback);
+    const VirtualCameraConfiguration& configuration) {
+  auto camera =
+      ndk::SharedRefBase::make<VirtualCameraDevice>(sNextId++, configuration);
   std::shared_ptr<ICameraProviderCallback> callback;
   {
     const std::lock_guard<std::mutex> lock(mLock);
diff --git a/services/camera/virtualcamera/VirtualCameraProvider.h b/services/camera/virtualcamera/VirtualCameraProvider.h
index d41a005..11d3123 100644
--- a/services/camera/virtualcamera/VirtualCameraProvider.h
+++ b/services/camera/virtualcamera/VirtualCameraProvider.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -74,14 +74,9 @@
 
   // Create new virtual camera devices
   // Returns nullptr if creation was not successful.
-  //
-  // TODO(b/301023410) - Add camera configuration.
   std::shared_ptr<VirtualCameraDevice> createCamera(
-      const std::vector<
-          aidl::android::companion::virtualcamera::SupportedStreamConfiguration>&
-          supportedInputConfig,
-      std::shared_ptr<aidl::android::companion::virtualcamera::IVirtualCameraCallback>
-          virtualCameraClientCallback = nullptr);
+      const aidl::android::companion::virtualcamera::VirtualCameraConfiguration&
+          configuration);
 
   std::shared_ptr<VirtualCameraDevice> getCamera(const std::string& name);
 
@@ -105,4 +100,4 @@
 }  // namespace companion
 }  // namespace android
 
-#endif  // ANDROID_SERVICES_VIRTUAL_CAMERA_VIRTUALCAMERAPROVIDER_H
+#endif  // ANDROID_COMPANION_VIRTUALCAMERA_VIRTUALCAMERAPROVIDER_H
diff --git a/services/camera/virtualcamera/VirtualCameraRenderThread.cc b/services/camera/virtualcamera/VirtualCameraRenderThread.cc
index 79c91ef..9b0fc07 100644
--- a/services/camera/virtualcamera/VirtualCameraRenderThread.cc
+++ b/services/camera/virtualcamera/VirtualCameraRenderThread.cc
@@ -14,21 +14,27 @@
  * limitations under the License.
  */
 
+#include "system/camera_metadata.h"
 #define LOG_TAG "VirtualCameraRenderThread"
 #include "VirtualCameraRenderThread.h"
 
 #include <chrono>
-#include <cstddef>
 #include <cstdint>
+#include <cstring>
 #include <future>
 #include <memory>
 #include <mutex>
 #include <thread>
+#include <vector>
 
+#include "Exif.h"
 #include "GLES/gl.h"
+#include "VirtualCameraDevice.h"
 #include "VirtualCameraSessionContext.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/BufferStatus.h"
+#include "aidl/android/hardware/camera/device/CameraBlob.h"
+#include "aidl/android/hardware/camera/device/CameraBlobId.h"
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/CaptureResult.h"
 #include "aidl/android/hardware/camera/device/ErrorCode.h"
@@ -42,7 +48,7 @@
 #include "ui/GraphicBuffer.h"
 #include "util/EglFramebuffer.h"
 #include "util/JpegUtil.h"
-#include "util/MetadataBuilder.h"
+#include "util/MetadataUtil.h"
 #include "util/TestPatternHelper.h"
 #include "util/Util.h"
 #include "utils/Errors.h"
@@ -53,6 +59,8 @@
 
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::BufferStatus;
+using ::aidl::android::hardware::camera::device::CameraBlob;
+using ::aidl::android::hardware::camera::device::CameraBlobId;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
 using ::aidl::android::hardware::camera::device::CaptureResult;
 using ::aidl::android::hardware::camera::device::ErrorCode;
@@ -65,16 +73,49 @@
 using ::aidl::android::hardware::graphics::common::PixelFormat;
 using ::android::base::ScopedLockAssertion;
 
+using ::android::hardware::camera::common::helper::ExifUtils;
+
 namespace {
 
 using namespace std::chrono_literals;
 
 static constexpr std::chrono::milliseconds kAcquireFenceTimeout = 500ms;
 
+// See REQUEST_PIPELINE_DEPTH in CaptureResult.java.
+// This roughly corresponds to frame latency, we set to
+// documented minimum of 2.
+static constexpr uint8_t kPipelineDepth = 2;
+
+static constexpr size_t kJpegThumbnailBufferSize = 32 * 1024;  // 32 KiB
+
 CameraMetadata createCaptureResultMetadata(
-    const std::chrono::nanoseconds timestamp) {
+    const std::chrono::nanoseconds timestamp,
+    const RequestSettings& requestSettings,
+    const Resolution reportedSensorSize) {
   std::unique_ptr<CameraMetadata> metadata =
-      MetadataBuilder().setSensorTimestamp(timestamp).build();
+      MetadataBuilder()
+          .setAberrationCorrectionMode(
+              ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF)
+          .setControlAeMode(ANDROID_CONTROL_AE_MODE_ON)
+          .setControlAePrecaptureTrigger(
+              ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE)
+          .setControlAfMode(ANDROID_CONTROL_AF_MODE_OFF)
+          .setControlAwbMode(ANDROID_CONTROL_AWB_MODE_AUTO)
+          .setControlEffectMode(ANDROID_CONTROL_EFFECT_MODE_OFF)
+          .setControlMode(ANDROID_CONTROL_MODE_AUTO)
+          .setCropRegion(0, 0, reportedSensorSize.width,
+                         reportedSensorSize.height)
+          .setFaceDetectMode(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF)
+          .setFlashState(ANDROID_FLASH_STATE_UNAVAILABLE)
+          .setFocalLength(VirtualCameraDevice::kFocalLength)
+          .setJpegQuality(requestSettings.jpegQuality)
+          .setJpegThumbnailSize(requestSettings.thumbnailResolution.width,
+                                requestSettings.thumbnailResolution.height)
+          .setJpegThumbnailQuality(requestSettings.thumbnailJpegQuality)
+          .setNoiseReductionMode(ANDROID_NOISE_REDUCTION_MODE_OFF)
+          .setPipelineDepth(kPipelineDepth)
+          .setSensorTimestamp(timestamp)
+          .build();
   if (metadata == nullptr) {
     ALOGE("%s: Failed to build capture result metadata", __func__);
     return CameraMetadata();
@@ -150,6 +191,34 @@
   }
 }
 
+std::vector<uint8_t> createExif(
+    Resolution imageSize, const std::vector<uint8_t>& compressedThumbnail = {}) {
+  std::unique_ptr<ExifUtils> exifUtils(ExifUtils::create());
+  exifUtils->initialize();
+  exifUtils->setImageWidth(imageSize.width);
+  exifUtils->setImageHeight(imageSize.height);
+  // TODO(b/324383963) Set Make/Model and orientation.
+
+  std::vector<uint8_t> app1Data;
+
+  size_t thumbnailDataSize = compressedThumbnail.size();
+  const void* thumbnailData =
+      thumbnailDataSize > 0
+          ? reinterpret_cast<const void*>(compressedThumbnail.data())
+          : nullptr;
+
+  if (!exifUtils->generateApp1(thumbnailData, thumbnailDataSize)) {
+    ALOGE("%s: Failed to generate APP1 segment for EXIF metadata", __func__);
+    return app1Data;
+  }
+
+  const uint8_t* data = exifUtils->getApp1Buffer();
+  const size_t size = exifUtils->getApp1Length();
+
+  app1Data.insert(app1Data.end(), data, data + size);
+  return app1Data;
+}
+
 }  // namespace
 
 CaptureRequestBuffer::CaptureRequestBuffer(int streamId, int bufferId,
@@ -170,12 +239,12 @@
 }
 
 VirtualCameraRenderThread::VirtualCameraRenderThread(
-    VirtualCameraSessionContext& sessionContext, const int mWidth,
-    const int mHeight,
+    VirtualCameraSessionContext& sessionContext,
+    const Resolution inputSurfaceSize, const Resolution reportedSensorSize,
     std::shared_ptr<ICameraDeviceCallback> cameraDeviceCallback, bool testMode)
     : mCameraDeviceCallback(cameraDeviceCallback),
-      mInputSurfaceWidth(mWidth),
-      mInputSurfaceHeight(mHeight),
+      mInputSurfaceSize(inputSurfaceSize),
+      mReportedSensorSize(reportedSensorSize),
       mTestMode(testMode),
       mSessionContext(sessionContext) {
 }
@@ -188,8 +257,11 @@
 }
 
 ProcessCaptureRequestTask::ProcessCaptureRequestTask(
-    int frameNumber, const std::vector<CaptureRequestBuffer>& requestBuffers)
-    : mFrameNumber(frameNumber), mBuffers(requestBuffers) {
+    int frameNumber, const std::vector<CaptureRequestBuffer>& requestBuffers,
+    const RequestSettings& requestSettings)
+    : mFrameNumber(frameNumber),
+      mBuffers(requestBuffers),
+      mRequestSettings(requestSettings) {
 }
 
 int ProcessCaptureRequestTask::getFrameNumber() const {
@@ -201,6 +273,10 @@
   return mBuffers;
 }
 
+const RequestSettings& ProcessCaptureRequestTask::getRequestSettings() const {
+  return mRequestSettings;
+}
+
 void VirtualCameraRenderThread::enqueueTask(
     std::unique_ptr<ProcessCaptureRequestTask> task) {
   std::lock_guard<std::mutex> lock(mLock);
@@ -263,8 +339,8 @@
       std::make_unique<EglTextureProgram>(EglTextureProgram::TextureFormat::YUV);
   mEglTextureRgbProgram = std::make_unique<EglTextureProgram>(
       EglTextureProgram::TextureFormat::RGBA);
-  mEglSurfaceTexture = std::make_unique<EglSurfaceTexture>(mInputSurfaceWidth,
-                                                           mInputSurfaceHeight);
+  mEglSurfaceTexture = std::make_unique<EglSurfaceTexture>(
+      mInputSurfaceSize.width, mInputSurfaceSize.height);
   mInputSurfacePromise.set_value(mEglSurfaceTexture->getSurface());
 
   while (std::unique_ptr<ProcessCaptureRequestTask> task = dequeueTask()) {
@@ -287,7 +363,8 @@
   captureResult.partialResult = 1;
   captureResult.inputBuffer.streamId = -1;
   captureResult.physicalCameraMetadata.resize(0);
-  captureResult.result = createCaptureResultMetadata(timestamp);
+  captureResult.result = createCaptureResultMetadata(
+      timestamp, request.getRequestSettings(), mReportedSensorSize);
 
   const std::vector<CaptureRequestBuffer>& buffers = request.getBuffers();
   captureResult.outputBuffers.resize(buffers.size());
@@ -316,9 +393,9 @@
     }
 
     auto status = streamConfig->format == PixelFormat::BLOB
-                      ? renderIntoBlobStreamBuffer(reqBuffer.getStreamId(),
-                                                   reqBuffer.getBufferId(),
-                                                   reqBuffer.getFence())
+                      ? renderIntoBlobStreamBuffer(
+                            reqBuffer.getStreamId(), reqBuffer.getBufferId(),
+                            request.getRequestSettings(), reqBuffer.getFence())
                       : renderIntoImageStreamBuffer(reqBuffer.getStreamId(),
                                                     reqBuffer.getBufferId(),
                                                     reqBuffer.getFence());
@@ -398,9 +475,70 @@
   }
 }
 
+std::vector<uint8_t> VirtualCameraRenderThread::createThumbnail(
+    const Resolution resolution, const int quality) {
+  if (resolution.width == 0 || resolution.height == 0) {
+    ALOGV("%s: Skipping thumbnail creation, zero size requested", __func__);
+    return {};
+  }
+
+  ALOGV("%s: Creating thumbnail with size %d x %d, quality %d", __func__,
+        resolution.width, resolution.height, quality);
+  std::shared_ptr<EglFrameBuffer> framebuffer = allocateTemporaryFramebuffer(
+      mEglDisplayContext->getEglDisplay(), resolution.width, resolution.height);
+  if (framebuffer == nullptr) {
+    ALOGE(
+        "Failed to allocate temporary framebuffer for JPEG thumbnail "
+        "compression");
+    return {};
+  }
+
+  // TODO(b/324383963) Add support for letterboxing if the thumbnail size
+  // doesn't correspond
+  //  to input texture aspect ratio.
+  if (!renderIntoEglFramebuffer(*framebuffer).isOk()) {
+    ALOGE(
+        "Failed to render input texture into temporary framebuffer for JPEG "
+        "thumbnail");
+    return {};
+  }
+
+  std::shared_ptr<AHardwareBuffer> inHwBuffer = framebuffer->getHardwareBuffer();
+  GraphicBuffer* gBuffer = GraphicBuffer::fromAHardwareBuffer(inHwBuffer.get());
+
+  if (gBuffer->getPixelFormat() != HAL_PIXEL_FORMAT_YCbCr_420_888) {
+    // This should never happen since we're allocating the temporary buffer
+    // with YUV420 layout above.
+    ALOGE("%s: Cannot compress non-YUV buffer (pixelFormat %d)", __func__,
+          gBuffer->getPixelFormat());
+    return {};
+  }
+
+  YCbCrLockGuard yCbCrLock(inHwBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN);
+  if (yCbCrLock.getStatus() != NO_ERROR) {
+    ALOGE("%s: Failed to lock graphic buffer while generating thumbnail: %d",
+          __func__, yCbCrLock.getStatus());
+    return {};
+  }
+
+  std::vector<uint8_t> compressedThumbnail;
+  compressedThumbnail.resize(kJpegThumbnailBufferSize);
+  ALOGE("%s: Compressing thumbnail %d x %d", __func__, gBuffer->getWidth(),
+        gBuffer->getHeight());
+  std::optional<size_t> compressedSize = compressJpeg(
+      gBuffer->getWidth(), gBuffer->getHeight(), quality, *yCbCrLock, {},
+      compressedThumbnail.size(), compressedThumbnail.data());
+  if (!compressedSize.has_value()) {
+    ALOGE("%s: Failed to compress jpeg thumbnail", __func__);
+    return {};
+  }
+  compressedThumbnail.resize(compressedSize.value());
+  return compressedThumbnail;
+}
+
 ndk::ScopedAStatus VirtualCameraRenderThread::renderIntoBlobStreamBuffer(
-    const int streamId, const int bufferId, sp<Fence> fence) {
-  ALOGV("%s", __func__);
+    const int streamId, const int bufferId,
+    const RequestSettings& requestSettings, sp<Fence> fence) {
   std::shared_ptr<AHardwareBuffer> hwBuffer =
       mSessionContext.fetchHardwareBuffer(streamId, bufferId);
   if (hwBuffer == nullptr) {
@@ -415,6 +553,9 @@
     return cameraStatus(Status::INTERNAL_ERROR);
   }
 
+  ALOGV("%s: Rendering JPEG with size %d x %d, quality %d", __func__,
+        stream->width, stream->height, requestSettings.jpegQuality);
+
   // Let's create YUV framebuffer and render the surface into this.
   // This will take care about rescaling as well as potential format conversion.
   std::shared_ptr<EglFrameBuffer> framebuffer = allocateTemporaryFramebuffer(
@@ -431,58 +572,62 @@
     return status;
   }
 
-  AHardwareBuffer_Planes planes_info;
-
-  int32_t rawFence = fence != nullptr ? fence->get() : -1;
-  int result = AHardwareBuffer_lockPlanes(hwBuffer.get(),
-                                          AHARDWAREBUFFER_USAGE_CPU_READ_RARELY,
-                                          rawFence, nullptr, &planes_info);
-  if (result != OK) {
-    ALOGE("%s: Failed to lock planes for BLOB buffer: %d", __func__, result);
+  PlanesLockGuard planesLock(hwBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_RARELY,
+                             fence);
+  if (planesLock.getStatus() != OK) {
     return cameraStatus(Status::INTERNAL_ERROR);
   }
 
   std::shared_ptr<AHardwareBuffer> inHwBuffer = framebuffer->getHardwareBuffer();
   GraphicBuffer* gBuffer = GraphicBuffer::fromAHardwareBuffer(inHwBuffer.get());
 
-  bool compressionSuccess = true;
-  if (gBuffer != nullptr) {
-    android_ycbcr ycbcr;
-    if (gBuffer->getPixelFormat() != HAL_PIXEL_FORMAT_YCbCr_420_888) {
-      // This should never happen since we're allocating the temporary buffer
-      // with YUV420 layout above.
-      ALOGE("%s: Cannot compress non-YUV buffer (pixelFormat %d)", __func__,
-            gBuffer->getPixelFormat());
-      AHardwareBuffer_unlock(hwBuffer.get(), nullptr);
-      return cameraStatus(Status::INTERNAL_ERROR);
-    }
-
-    status_t status =
-        gBuffer->lockYCbCr(AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, &ycbcr);
-    ALOGV("Locked buffers");
-    if (status != NO_ERROR) {
-      AHardwareBuffer_unlock(hwBuffer.get(), nullptr);
-      ALOGE("%s: Failed to lock graphic buffer: %d", __func__, status);
-      return cameraStatus(Status::INTERNAL_ERROR);
-    }
-
-    compressionSuccess =
-        compressJpeg(gBuffer->getWidth(), gBuffer->getHeight(), ycbcr,
-                     stream->bufferSize, planes_info.planes[0].data);
-
-    status_t res = gBuffer->unlock();
-    if (res != NO_ERROR) {
-      ALOGE("Failed to unlock graphic buffer: %d", res);
-    }
-  } else {
-    compressionSuccess =
-        compressBlackJpeg(stream->width, stream->height, stream->bufferSize,
-                          planes_info.planes[0].data);
+  if (gBuffer == nullptr) {
+    ALOGE(
+        "%s: Encountered invalid temporary buffer while rendering JPEG "
+        "into BLOB stream",
+        __func__);
+    return cameraStatus(Status::INTERNAL_ERROR);
   }
-  AHardwareBuffer_unlock(hwBuffer.get(), nullptr);
-  ALOGV("Unlocked buffers");
-  return compressionSuccess ? ndk::ScopedAStatus::ok()
-                            : cameraStatus(Status::INTERNAL_ERROR);
+
+  if (gBuffer->getPixelFormat() != HAL_PIXEL_FORMAT_YCbCr_420_888) {
+    // This should never happen since we're allocating the temporary buffer
+    // with YUV420 layout above.
+    ALOGE("%s: Cannot compress non-YUV buffer (pixelFormat %d)", __func__,
+          gBuffer->getPixelFormat());
+    return cameraStatus(Status::INTERNAL_ERROR);
+  }
+
+  YCbCrLockGuard yCbCrLock(inHwBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN);
+  if (yCbCrLock.getStatus() != OK) {
+    return cameraStatus(Status::INTERNAL_ERROR);
+  }
+
+  std::vector<uint8_t> app1ExifData =
+      createExif(Resolution(stream->width, stream->height),
+                 createThumbnail(requestSettings.thumbnailResolution,
+                                 requestSettings.thumbnailJpegQuality));
+  std::optional<size_t> compressedSize = compressJpeg(
+      gBuffer->getWidth(), gBuffer->getHeight(), requestSettings.jpegQuality,
+      *yCbCrLock, app1ExifData, stream->bufferSize - sizeof(CameraBlob),
+      (*planesLock).planes[0].data);
+
+  if (!compressedSize.has_value()) {
+    ALOGE("%s: Failed to compress JPEG image", __func__);
+    return cameraStatus(Status::INTERNAL_ERROR);
+  }
+
+  CameraBlob cameraBlob{
+      .blobId = CameraBlobId::JPEG,
+      .blobSizeBytes = static_cast<int32_t>(compressedSize.value())};
+
+  memcpy(reinterpret_cast<uint8_t*>((*planesLock).planes[0].data) +
+             (stream->bufferSize - sizeof(cameraBlob)),
+         &cameraBlob, sizeof(cameraBlob));
+
+  ALOGV("%s: Successfully compressed JPEG image, resulting size %zu B",
+        __func__, compressedSize.value());
+
+  return ndk::ScopedAStatus::ok();
 }
 
 ndk::ScopedAStatus VirtualCameraRenderThread::renderIntoImageStreamBuffer(
@@ -542,8 +687,12 @@
   } else {
     const bool renderSuccess =
         isYuvFormat(static_cast<PixelFormat>(textureBuffer->getPixelFormat()))
-            ? mEglTextureYuvProgram->draw(mEglSurfaceTexture->updateTexture())
-            : mEglTextureRgbProgram->draw(mEglSurfaceTexture->updateTexture());
+            ? mEglTextureYuvProgram->draw(
+                  mEglSurfaceTexture->getTextureId(),
+                  mEglSurfaceTexture->getTransformMatrix())
+            : mEglTextureRgbProgram->draw(
+                  mEglSurfaceTexture->getTextureId(),
+                  mEglSurfaceTexture->getTransformMatrix());
     if (!renderSuccess) {
       ALOGE("%s: Failed to render texture", __func__);
       return cameraStatus(Status::INTERNAL_ERROR);
diff --git a/services/camera/virtualcamera/VirtualCameraRenderThread.h b/services/camera/virtualcamera/VirtualCameraRenderThread.h
index b3aaed8..86dad0b 100644
--- a/services/camera/virtualcamera/VirtualCameraRenderThread.h
+++ b/services/camera/virtualcamera/VirtualCameraRenderThread.h
@@ -17,18 +17,23 @@
 #ifndef ANDROID_COMPANION_VIRTUALCAMERA_VIRTUALCAMERARENDERTHREAD_H
 #define ANDROID_COMPANION_VIRTUALCAMERA_VIRTUALCAMERARENDERTHREAD_H
 
+#include <cstdint>
 #include <deque>
 #include <future>
 #include <memory>
 #include <thread>
+#include <vector>
 
+#include "VirtualCameraDevice.h"
 #include "VirtualCameraSessionContext.h"
+#include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/ICameraDeviceCallback.h"
 #include "android/binder_auto_utils.h"
 #include "util/EglDisplayContext.h"
 #include "util/EglFramebuffer.h"
 #include "util/EglProgram.h"
 #include "util/EglSurfaceTexture.h"
+#include "util/Util.h"
 
 namespace android {
 namespace companion {
@@ -49,11 +54,18 @@
   const sp<Fence> mFence;
 };
 
+struct RequestSettings {
+  int jpegQuality = VirtualCameraDevice::kDefaultJpegQuality;
+  Resolution thumbnailResolution = Resolution(0, 0);
+  int thumbnailJpegQuality = VirtualCameraDevice::kDefaultJpegQuality;
+};
+
 // Represents single capture request to fill set of buffers.
 class ProcessCaptureRequestTask {
  public:
   ProcessCaptureRequestTask(
-      int frameNumber, const std::vector<CaptureRequestBuffer>& requestBuffers);
+      int frameNumber, const std::vector<CaptureRequestBuffer>& requestBuffers,
+      const RequestSettings& RequestSettings = {});
 
   // Returns frame number corresponding to the request.
   int getFrameNumber() const;
@@ -65,9 +77,12 @@
   // so it cannot be access outside of its lifetime.
   const std::vector<CaptureRequestBuffer>& getBuffers() const;
 
+  const RequestSettings& getRequestSettings() const;
+
  private:
   const int mFrameNumber;
   const std::vector<CaptureRequestBuffer> mBuffers;
+  const RequestSettings mRequestSettings;
 };
 
 // Wraps dedicated rendering thread and rendering business with corresponding
@@ -77,14 +92,14 @@
   // Create VirtualCameraRenderThread instance:
   // * sessionContext - VirtualCameraSessionContext reference for shared access
   // to mapped buffers.
-  // * inputWidth - requested width of input surface ("virtual camera sensor")
-  // * inputHeight - requested height of input surface ("virtual camera sensor")
+  // * inputSurfaceSize - requested size of input surface.
+  // * reportedSensorSize - reported static sensor size of virtual camera.
   // * cameraDeviceCallback - callback for corresponding camera instance
   // * testMode - when set to true, test pattern is rendered to input surface
   // before each capture request is processed to simulate client input.
   VirtualCameraRenderThread(
-      VirtualCameraSessionContext& sessionContext, int inputWidth,
-      int inputHeight,
+      VirtualCameraSessionContext& sessionContext, Resolution inputSurfaceSize,
+      Resolution reportedSensorSize,
       std::shared_ptr<
           ::aidl::android::hardware::camera::device::ICameraDeviceCallback>
           cameraDeviceCallback,
@@ -122,13 +137,19 @@
   // TODO(b/301023410) - Refactor the actual rendering logic off this class for
   // easier testability.
 
+  // Create thumbnail with specified size for current image.
+  // The compressed image size is limited by 32KiB.
+  // Returns vector with compressed thumbnail if successful,
+  // empty vector otherwise.
+  std::vector<uint8_t> createThumbnail(Resolution resolution, int quality);
+
   // Render current image to the BLOB buffer.
   // If fence is specified, this function will block until the fence is cleared
   // before writing to the buffer.
   // Always called on render thread.
-  ndk::ScopedAStatus renderIntoBlobStreamBuffer(const int streamId,
-                                                const int bufferId,
-                                                sp<Fence> fence = nullptr);
+  ndk::ScopedAStatus renderIntoBlobStreamBuffer(
+      const int streamId, const int bufferId,
+      const RequestSettings& requestSettings, sp<Fence> fence = nullptr);
 
   // Render current image to the YCbCr buffer.
   // If fence is specified, this function will block until the fence is cleared
@@ -149,8 +170,8 @@
       ::aidl::android::hardware::camera::device::ICameraDeviceCallback>
       mCameraDeviceCallback;
 
-  const int mInputSurfaceWidth;
-  const int mInputSurfaceHeight;
+  const Resolution mInputSurfaceSize;
+  const Resolution mReportedSensorSize;
   const int mTestMode;
 
   VirtualCameraSessionContext& mSessionContext;
diff --git a/services/camera/virtualcamera/VirtualCameraService.cc b/services/camera/virtualcamera/VirtualCameraService.cc
index 370a5a8..1144997 100644
--- a/services/camera/virtualcamera/VirtualCameraService.cc
+++ b/services/camera/virtualcamera/VirtualCameraService.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -41,6 +41,8 @@
 namespace virtualcamera {
 
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
 using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 
@@ -48,6 +50,7 @@
 
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
+constexpr int kMaxFps = 60;
 constexpr char kEnableTestCameraCmd[] = "enable_test_camera";
 constexpr char kDisableTestCameraCmd[] = "disable_test_camera";
 constexpr char kShellCmdHelp[] = R"(
@@ -69,13 +72,29 @@
   for (const SupportedStreamConfiguration& config :
        configuration.supportedStreamConfigs) {
     if (!isFormatSupportedForInput(config.width, config.height,
-                                   config.pixelFormat)) {
+                                   config.pixelFormat, config.maxFps)) {
       ALOGE("%s: Requested unsupported input format: %d x %d (%d)", __func__,
             config.width, config.height, static_cast<int>(config.pixelFormat));
       return ndk::ScopedAStatus::fromServiceSpecificError(
           Status::EX_ILLEGAL_ARGUMENT);
     }
   }
+
+  if (configuration.sensorOrientation != SensorOrientation::ORIENTATION_0 &&
+      configuration.sensorOrientation != SensorOrientation::ORIENTATION_90 &&
+      configuration.sensorOrientation != SensorOrientation::ORIENTATION_180 &&
+      configuration.sensorOrientation != SensorOrientation::ORIENTATION_270) {
+    return ndk::ScopedAStatus::fromServiceSpecificError(
+        Status::EX_ILLEGAL_ARGUMENT);
+  }
+
+  if (configuration.lensFacing != LensFacing::FRONT &&
+      configuration.lensFacing != LensFacing::BACK &&
+      configuration.lensFacing != LensFacing::EXTERNAL) {
+    return ndk::ScopedAStatus::fromServiceSpecificError(
+        Status::EX_ILLEGAL_ARGUMENT);
+  }
+
   return ndk::ScopedAStatus::ok();
 }
 
@@ -121,10 +140,8 @@
     return ndk::ScopedAStatus::ok();
   }
 
-  // TODO(b/301023410) Validate configuration and pass it to the camera.
   std::shared_ptr<VirtualCameraDevice> camera =
-      mVirtualCameraProvider->createCamera(configuration.supportedStreamConfigs,
-                                           configuration.virtualCameraCallback);
+      mVirtualCameraProvider->createCamera(configuration);
   if (camera == nullptr) {
     ALOGE("Failed to create camera for binder token 0x%" PRIxPTR,
           reinterpret_cast<uintptr_t>(token.get()));
@@ -241,8 +258,11 @@
 
   bool ret;
   VirtualCameraConfiguration configuration;
-  configuration.supportedStreamConfigs.push_back(
-      {.width = kVgaWidth, .height = kVgaHeight, Format::YUV_420_888});
+  configuration.supportedStreamConfigs.push_back({.width = kVgaWidth,
+                                                  .height = kVgaHeight,
+                                                  Format::YUV_420_888,
+                                                  .maxFps = kMaxFps});
+  configuration.lensFacing = LensFacing::EXTERNAL;
   registerCamera(mTestCameraToken, configuration, &ret);
   if (ret) {
     dprintf(out, "Successfully registered test camera %s",
diff --git a/services/camera/virtualcamera/VirtualCameraSession.cc b/services/camera/virtualcamera/VirtualCameraSession.cc
index 47780d8..2a691c1 100644
--- a/services/camera/virtualcamera/VirtualCameraSession.cc
+++ b/services/camera/virtualcamera/VirtualCameraSession.cc
@@ -18,14 +18,17 @@
 #define LOG_TAG "VirtualCameraSession"
 #include "VirtualCameraSession.h"
 
+#include <algorithm>
 #include <atomic>
 #include <chrono>
+#include <cmath>
 #include <cstddef>
 #include <cstdint>
 #include <cstring>
 #include <map>
 #include <memory>
 #include <mutex>
+#include <numeric>
 #include <optional>
 #include <tuple>
 #include <unordered_set>
@@ -37,13 +40,17 @@
 #include "VirtualCameraDevice.h"
 #include "VirtualCameraRenderThread.h"
 #include "VirtualCameraStream.h"
+#include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/BufferCache.h"
 #include "aidl/android/hardware/camera/device/BufferStatus.h"
+#include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/CaptureRequest.h"
 #include "aidl/android/hardware/camera/device/HalStream.h"
 #include "aidl/android/hardware/camera/device/NotifyMsg.h"
+#include "aidl/android/hardware/camera/device/RequestTemplate.h"
 #include "aidl/android/hardware/camera/device/ShutterMsg.h"
+#include "aidl/android/hardware/camera/device/Stream.h"
 #include "aidl/android/hardware/camera/device/StreamBuffer.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
 #include "aidl/android/hardware/camera/device/StreamRotation.h"
@@ -58,7 +65,7 @@
 #include "util/EglFramebuffer.h"
 #include "util/EglProgram.h"
 #include "util/JpegUtil.h"
-#include "util/MetadataBuilder.h"
+#include "util/MetadataUtil.h"
 #include "util/TestPatternHelper.h"
 #include "util/Util.h"
 
@@ -68,6 +75,7 @@
 
 using ::aidl::android::companion::virtualcamera::Format;
 using ::aidl::android::companion::virtualcamera::IVirtualCameraCallback;
+using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::BufferCache;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
@@ -96,36 +104,75 @@
 
 // Size of request/result metadata fast message queue.
 // Setting to 0 to always disables FMQ.
-static constexpr size_t kMetadataMsgQueueSize = 0;
+constexpr size_t kMetadataMsgQueueSize = 0;
 
 // Maximum number of buffers to use per single stream.
-static constexpr size_t kMaxStreamBuffers = 2;
+constexpr size_t kMaxStreamBuffers = 2;
 
-CameraMetadata createDefaultRequestSettings(RequestTemplate type) {
-  hardware::camera::common::V1_0::helper::CameraMetadata metadataHelper;
+// Thumbnail size (0,0) correspods to disabling thumbnail.
+const Resolution kDefaultJpegThumbnailSize(0, 0);
 
-  camera_metadata_enum_android_control_capture_intent_t intent =
-      ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+camera_metadata_enum_android_control_capture_intent_t requestTemplateToIntent(
+    const RequestTemplate type) {
   switch (type) {
     case RequestTemplate::PREVIEW:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
     case RequestTemplate::STILL_CAPTURE:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
     case RequestTemplate::VIDEO_RECORD:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
     case RequestTemplate::VIDEO_SNAPSHOT:
-      intent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
-      break;
+      return ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
     default:
-      // Leave default.
-      break;
+      // Return PREVIEW by default
+      return ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
   }
+}
 
-  auto metadata = MetadataBuilder().setControlCaptureIntent(intent).build();
-  return (metadata != nullptr) ? std::move(*metadata) : CameraMetadata();
+int getMaxFps(const std::vector<SupportedStreamConfiguration>& configs) {
+  return std::transform_reduce(
+      configs.begin(), configs.end(), 0,
+      [](const int a, const int b) { return std::max(a, b); },
+      [](const SupportedStreamConfiguration& config) { return config.maxFps; });
+}
+
+CameraMetadata createDefaultRequestSettings(
+    const RequestTemplate type,
+    const std::vector<SupportedStreamConfiguration>& inputConfigs) {
+  int maxFps = getMaxFps(inputConfigs);
+  auto metadata =
+      MetadataBuilder()
+          .setAberrationCorrectionMode(
+              ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF)
+          .setControlCaptureIntent(requestTemplateToIntent(type))
+          .setControlMode(ANDROID_CONTROL_MODE_AUTO)
+          .setControlAeMode(ANDROID_CONTROL_AE_MODE_ON)
+          .setControlAeExposureCompensation(0)
+          .setControlAeTargetFpsRange(maxFps, maxFps)
+          .setControlAeAntibandingMode(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO)
+          .setControlAePrecaptureTrigger(
+              ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE)
+          .setControlAfTrigger(ANDROID_CONTROL_AF_TRIGGER_IDLE)
+          .setControlAfMode(ANDROID_CONTROL_AF_MODE_OFF)
+          .setControlAwbMode(ANDROID_CONTROL_AWB_MODE_AUTO)
+          .setControlEffectMode(ANDROID_CONTROL_EFFECT_MODE_OFF)
+          .setFaceDetectMode(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF)
+          .setFlashMode(ANDROID_FLASH_MODE_OFF)
+          .setFlashState(ANDROID_FLASH_STATE_UNAVAILABLE)
+          .setJpegQuality(VirtualCameraDevice::kDefaultJpegQuality)
+          .setJpegThumbnailQuality(VirtualCameraDevice::kDefaultJpegQuality)
+          .setJpegThumbnailSize(0, 0)
+          .setNoiseReductionMode(ANDROID_NOISE_REDUCTION_MODE_OFF)
+          .build();
+  if (metadata == nullptr) {
+    ALOGE("%s: Failed to construct metadata for default request type %s",
+          __func__, toString(type).c_str());
+    return CameraMetadata();
+  } else {
+    ALOGV("%s: Successfully created metadata for request type %s", __func__,
+          toString(type).c_str());
+  }
+  return *metadata;
 }
 
 HalStream getHalStream(const Stream& stream) {
@@ -150,6 +197,72 @@
   return halStream;
 }
 
+Stream getHighestResolutionStream(const std::vector<Stream>& streams) {
+  return *(std::max_element(streams.begin(), streams.end(),
+                            [](const Stream& a, const Stream& b) {
+                              return a.width * a.height < b.width * b.height;
+                            }));
+}
+
+Resolution resolutionFromStream(const Stream& stream) {
+  return Resolution(stream.width, stream.height);
+}
+
+Resolution resolutionFromInputConfig(
+    const SupportedStreamConfiguration& inputConfig) {
+  return Resolution(inputConfig.width, inputConfig.height);
+}
+
+std::optional<SupportedStreamConfiguration> pickInputConfigurationForStreams(
+    const std::vector<Stream>& requestedStreams,
+    const std::vector<SupportedStreamConfiguration>& supportedInputConfigs) {
+  Stream maxResolutionStream = getHighestResolutionStream(requestedStreams);
+  Resolution maxResolution = resolutionFromStream(maxResolutionStream);
+
+  // Find best fitting stream to satisfy all requested streams:
+  // Best fitting => same or higher resolution as input with lowest pixel count
+  // difference and same aspect ratio.
+  auto isBetterInputConfig = [maxResolution](
+                                 const SupportedStreamConfiguration& configA,
+                                 const SupportedStreamConfiguration& configB) {
+    int maxResPixelCount = maxResolution.width * maxResolution.height;
+    int pixelCountDiffA =
+        std::abs((configA.width * configA.height) - maxResPixelCount);
+    int pixelCountDiffB =
+        std::abs((configB.width * configB.height) - maxResPixelCount);
+
+    return pixelCountDiffA < pixelCountDiffB;
+  };
+
+  std::optional<SupportedStreamConfiguration> bestConfig;
+  for (const SupportedStreamConfiguration& inputConfig : supportedInputConfigs) {
+    Resolution inputConfigResolution = resolutionFromInputConfig(inputConfig);
+    if (inputConfigResolution < maxResolution ||
+        !isApproximatellySameAspectRatio(inputConfigResolution, maxResolution)) {
+      // We don't want to upscale from lower resolution, or use different aspect
+      // ratio, skip.
+      continue;
+    }
+
+    if (!bestConfig.has_value() ||
+        isBetterInputConfig(inputConfig, bestConfig.value())) {
+      bestConfig = inputConfig;
+    }
+  }
+
+  return bestConfig;
+}
+
+RequestSettings createSettingsFromMetadata(const CameraMetadata& metadata) {
+  return RequestSettings{
+      .jpegQuality = getJpegQuality(metadata).value_or(
+          VirtualCameraDevice::kDefaultJpegQuality),
+      .thumbnailResolution =
+          getJpegThumbnailSize(metadata).value_or(Resolution(0, 0)),
+      .thumbnailJpegQuality = getJpegThumbnailQuality(metadata).value_or(
+          VirtualCameraDevice::kDefaultJpegQuality)};
+}
+
 }  // namespace
 
 VirtualCameraSession::VirtualCameraSession(
@@ -215,15 +328,13 @@
   halStreams.clear();
   halStreams.resize(in_requestedConfiguration.streams.size());
 
-  sp<Surface> inputSurface = nullptr;
-  int inputWidth;
-  int inputHeight;
-
   if (!virtualCamera->isStreamCombinationSupported(in_requestedConfiguration)) {
     ALOGE("%s: Requested stream configuration is not supported", __func__);
     return cameraStatus(Status::ILLEGAL_ARGUMENT);
   }
 
+  sp<Surface> inputSurface = nullptr;
+  std::optional<SupportedStreamConfiguration> inputConfig;
   {
     std::lock_guard<std::mutex> lock(mLock);
     for (int i = 0; i < in_requestedConfiguration.streams.size(); ++i) {
@@ -233,13 +344,21 @@
       }
     }
 
-    inputWidth = streams[0].width;
-    inputHeight = streams[0].height;
+    inputConfig = pickInputConfigurationForStreams(
+        streams, virtualCamera->getInputConfigs());
+    if (!inputConfig.has_value()) {
+      ALOGE(
+          "%s: Failed to pick any input configuration for stream configuration "
+          "request: %s",
+          __func__, in_requestedConfiguration.toString().c_str());
+      return cameraStatus(Status::ILLEGAL_ARGUMENT);
+    }
     if (mRenderThread == nullptr) {
       // If there's no client callback, start camera in test mode.
       const bool testMode = mVirtualCameraClientCallback == nullptr;
       mRenderThread = std::make_unique<VirtualCameraRenderThread>(
-          mSessionContext, inputWidth, inputHeight, mCameraDeviceCallback,
+          mSessionContext, resolutionFromInputConfig(*inputConfig),
+          virtualCamera->getMaxInputResolution(), mCameraDeviceCallback,
           testMode);
       mRenderThread->start();
       inputSurface = mRenderThread->getInputSurface();
@@ -252,10 +371,9 @@
     // create single texture.
     mVirtualCameraClientCallback->onStreamConfigured(
         /*streamId=*/0, aidl::android::view::Surface(inputSurface.get()),
-        inputWidth, inputHeight, Format::YUV_420_888);
+        inputConfig->width, inputConfig->height, inputConfig->pixelFormat);
   }
 
-  mFirstRequest.store(true);
   return ndk::ScopedAStatus::ok();
 }
 
@@ -263,12 +381,22 @@
     RequestTemplate in_type, CameraMetadata* _aidl_return) {
   ALOGV("%s: type %d", __func__, static_cast<int32_t>(in_type));
 
+  std::shared_ptr<VirtualCameraDevice> camera = mCameraDevice.lock();
+  if (camera == nullptr) {
+    ALOGW(
+        "%s: constructDefaultRequestSettings called on already unregistered "
+        "camera",
+        __func__);
+    return cameraStatus(Status::CAMERA_DISCONNECTED);
+  }
+
   switch (in_type) {
     case RequestTemplate::PREVIEW:
     case RequestTemplate::STILL_CAPTURE:
     case RequestTemplate::VIDEO_RECORD:
     case RequestTemplate::VIDEO_SNAPSHOT: {
-      *_aidl_return = createDefaultRequestSettings(in_type);
+      *_aidl_return =
+          createDefaultRequestSettings(in_type, camera->getInputConfigs());
       return ndk::ScopedAStatus::ok();
     }
     case RequestTemplate::MANUAL:
@@ -387,13 +515,25 @@
     const CaptureRequest& request) {
   ALOGD("%s: request: %s", __func__, request.toString().c_str());
 
-  if (mFirstRequest.exchange(false) && request.settings.metadata.empty()) {
-    return cameraStatus(Status::ILLEGAL_ARGUMENT);
-  }
-
   std::shared_ptr<ICameraDeviceCallback> cameraCallback = nullptr;
+  RequestSettings requestSettings;
   {
     std::lock_guard<std::mutex> lock(mLock);
+
+    // If metadata it empty, last received metadata applies, if  it's non-empty
+    // update it.
+    if (!request.settings.metadata.empty()) {
+      mCurrentRequestMetadata = request.settings;
+    }
+
+    // We don't have any metadata for this request - this means we received none
+    // in first request, this is an error state.
+    if (mCurrentRequestMetadata.metadata.empty()) {
+      return cameraStatus(Status::ILLEGAL_ARGUMENT);
+    }
+
+    requestSettings = createSettingsFromMetadata(mCurrentRequestMetadata);
+
     cameraCallback = mCameraDeviceCallback;
   }
 
@@ -427,7 +567,7 @@
       return cameraStatus(Status::INTERNAL_ERROR);
     }
     mRenderThread->enqueueTask(std::make_unique<ProcessCaptureRequestTask>(
-        request.frameNumber, taskBuffers));
+        request.frameNumber, taskBuffers, requestSettings));
   }
 
   if (mVirtualCameraClientCallback != nullptr) {
diff --git a/services/camera/virtualcamera/VirtualCameraSession.h b/services/camera/virtualcamera/VirtualCameraSession.h
index 82a7a34..556314f 100644
--- a/services/camera/virtualcamera/VirtualCameraSession.h
+++ b/services/camera/virtualcamera/VirtualCameraSession.h
@@ -25,6 +25,7 @@
 #include "VirtualCameraSessionContext.h"
 #include "aidl/android/companion/virtualcamera/IVirtualCameraCallback.h"
 #include "aidl/android/hardware/camera/device/BnCameraDeviceSession.h"
+#include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/ICameraDeviceCallback.h"
 #include "utils/Mutex.h"
 
@@ -138,7 +139,8 @@
       int8_t, ::aidl::android::hardware::common::fmq::SynchronizedReadWrite>;
   std::shared_ptr<ResultMetadataQueue> mResultMetadataQueue;
 
-  std::atomic_bool mFirstRequest{true};
+  aidl::android::hardware::camera::device::CameraMetadata mCurrentRequestMetadata
+      GUARDED_BY(mLock);
 
   std::unique_ptr<VirtualCameraRenderThread> mRenderThread GUARDED_BY(mLock);
 };
diff --git a/services/camera/virtualcamera/aidl/Android.bp b/services/camera/virtualcamera/aidl/Android.bp
index fdeb7f2..b3fe3ad 100644
--- a/services/camera/virtualcamera/aidl/Android.bp
+++ b/services/camera/virtualcamera/aidl/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_xr_framework",
     // See: http://go/android-license-faq
     default_applicable_licenses: ["Android-Apache-2.0"],
 }
@@ -8,9 +9,11 @@
     unstable: true,
     srcs: [
         "android/companion/virtualcamera/Format.aidl",
+        "android/companion/virtualcamera/LensFacing.aidl",
         "android/companion/virtualcamera/IVirtualCameraCallback.aidl",
         "android/companion/virtualcamera/IVirtualCameraService.aidl",
         "android/companion/virtualcamera/VirtualCameraConfiguration.aidl",
+        "android/companion/virtualcamera/SensorOrientation.aidl",
         "android/companion/virtualcamera/SupportedStreamConfiguration.aidl",
     ],
     local_include_dir: ".",
@@ -34,6 +37,6 @@
         java: {
             enabled: true,
             platform_apis: true,
-        }
+        },
     },
 }
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl
index cbe03e9..f5a84f7 100644
--- a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/IVirtualCameraCallback.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -36,7 +36,8 @@
      * @param height - height of the surface.
      * @param pixelFormat - pixel format of the surface.
      */
-    void onStreamConfigured(int streamId, in Surface surface, int width, int height, in Format pixelFormat);
+    void onStreamConfigured(int streamId, in Surface surface, int width, int height,
+            in Format pixelFormat);
 
     /**
      * Called when framework requests capture. This can be used by the client as a hint
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/LensFacing.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/LensFacing.aidl
new file mode 100644
index 0000000..8568c91
--- /dev/null
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/LensFacing.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.companion.virtualcamera;
+
+/**
+ * Direction that the virtual camera faces relative to the device's screen.
+ *
+ * @hide
+ */
+@Backing(type="int")
+enum LensFacing {
+    FRONT = 0,
+    BACK = 1,
+    EXTERNAL = 2,
+}
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SensorOrientation.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SensorOrientation.aidl
new file mode 100644
index 0000000..ef91f00
--- /dev/null
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SensorOrientation.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.companion.virtualcamera;
+
+/**
+ * Sensor orientation of a virtual camera.
+ *
+ * @hide
+ */
+@Backing(type="int")
+enum SensorOrientation {
+    ORIENTATION_0 = 0,
+    ORIENTATION_90 = 90,
+    ORIENTATION_180 = 180,
+    ORIENTATION_270 = 270,
+}
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl
index 7070cbd..6f86cbe 100644
--- a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/SupportedStreamConfiguration.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.companion.virtualcamera;
 
 import android.companion.virtualcamera.Format;
@@ -26,4 +27,5 @@
     int width;
     int height;
     Format pixelFormat = Format.UNKNOWN;
+    int maxFps;
 }
diff --git a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl
index c1a2f22..887ad26 100644
--- a/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl
+++ b/services/camera/virtualcamera/aidl/android/companion/virtualcamera/VirtualCameraConfiguration.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,9 +13,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package android.companion.virtualcamera;
 
 import android.companion.virtualcamera.IVirtualCameraCallback;
+import android.companion.virtualcamera.LensFacing;
+import android.companion.virtualcamera.SensorOrientation;
 import android.companion.virtualcamera.SupportedStreamConfiguration;
 
 /**
@@ -26,4 +29,6 @@
 parcelable VirtualCameraConfiguration {
     SupportedStreamConfiguration[] supportedStreamConfigs;
     IVirtualCameraCallback virtualCameraCallback;
+    SensorOrientation sensorOrientation = SensorOrientation.ORIENTATION_0;
+    LensFacing lensFacing;
 }
diff --git a/services/camera/virtualcamera/flags/Android.bp b/services/camera/virtualcamera/flags/Android.bp
new file mode 100644
index 0000000..5fa8852
--- /dev/null
+++ b/services/camera/virtualcamera/flags/Android.bp
@@ -0,0 +1,37 @@
+package {
+    default_team: "trendy_team_xr_framework",
+}
+
+soong_config_module_type {
+    name: "virtual_device_build_flags_cc_defaults",
+    module_type: "cc_defaults",
+    config_namespace: "vdm",
+    bool_variables: [
+        "virtual_camera_service_enabled",
+    ],
+    properties: [
+        "cflags",
+    ],
+}
+
+soong_config_bool_variable {
+    name: "virtual_camera_service_enabled",
+}
+
+virtual_device_build_flags_cc_defaults {
+    name: "virtual_device_build_flags_defaults",
+    soong_config_variables: {
+        virtual_camera_service_enabled: {
+            cflags: ["-DVIRTUAL_CAMERA_SERVICE_ENABLED=1"],
+        },
+    },
+}
+
+cc_library_static {
+    name: "libvirtualdevicebuildflags",
+    srcs: [
+        "android_companion_virtualdevice_build_flags.cc",
+    ],
+    export_include_dirs: ["."],
+    defaults: ["virtual_device_build_flags_defaults"],
+}
diff --git a/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.cc b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.cc
new file mode 100644
index 0000000..5525bc9
--- /dev/null
+++ b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+namespace companion {
+namespace virtualdevice {
+namespace flags {
+
+bool virtual_camera_service_build_flag() {
+#if VIRTUAL_CAMERA_SERVICE_ENABLED
+  return true;
+#else
+  return false;
+#endif
+}
+
+}  // namespace flags
+}  // namespace virtualdevice
+}  // namespace companion
+}  // namespace android
diff --git a/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.h b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.h
new file mode 100644
index 0000000..718ce9b
--- /dev/null
+++ b/services/camera/virtualcamera/flags/android_companion_virtualdevice_build_flags.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+namespace companion {
+namespace virtualdevice {
+namespace flags {
+
+// Returns true if the virtual camera service is enabled
+// in the build.
+//
+// TODO(b/309090563) - Deprecate in favor of autogened library to query build
+// flags once available.
+bool virtual_camera_service_build_flag();
+
+}  // namespace flags
+}  // namespace virtualdevice
+}  // namespace companion
+}  // namespace android
diff --git a/services/camera/virtualcamera/fuzzer/Android.bp b/services/camera/virtualcamera/fuzzer/Android.bp
index 71e8f50..6a72167 100644
--- a/services/camera/virtualcamera/fuzzer/Android.bp
+++ b/services/camera/virtualcamera/fuzzer/Android.bp
@@ -15,7 +15,8 @@
  * limitations under the License.
  *
  *****************************************************************************/
- package {
+package {
+    default_team: "trendy_team_xr_framework",
     // See: http://go/android-license-faq
     default_applicable_licenses: ["Android-Apache-2.0"],
 }
diff --git a/services/camera/virtualcamera/tests/Android.bp b/services/camera/virtualcamera/tests/Android.bp
index bc46ba0..c51b4a3 100644
--- a/services/camera/virtualcamera/tests/Android.bp
+++ b/services/camera/virtualcamera/tests/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_xr_framework",
     // See: http://go/android-license-faq
     default_applicable_licenses: ["Android-Apache-2.0"],
 }
@@ -14,11 +15,13 @@
         "libgtest",
         "libgmock",
     ],
-    srcs: ["EglUtilTest.cc",
-           "VirtualCameraDeviceTest.cc",
-           "VirtualCameraProviderTest.cc",
-           "VirtualCameraRenderThreadTest.cc",
-           "VirtualCameraServiceTest.cc",
-           "VirtualCameraSessionTest.cc"],
+    srcs: [
+        "EglUtilTest.cc",
+        "VirtualCameraDeviceTest.cc",
+        "VirtualCameraProviderTest.cc",
+        "VirtualCameraRenderThreadTest.cc",
+        "VirtualCameraServiceTest.cc",
+        "VirtualCameraSessionTest.cc",
+    ],
     test_suites: ["device-tests"],
 }
diff --git a/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc b/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
index 140ae65..ad9d83b 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,18 +14,24 @@
  * limitations under the License.
  */
 
+#include <algorithm>
+#include <iterator>
 #include <memory>
 
 #include "VirtualCameraDevice.h"
 #include "aidl/android/companion/virtualcamera/Format.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/device/CameraMetadata.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
+#include "aidl/android/hardware/graphics/common/PixelFormat.h"
 #include "android/binder_interface_utils.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "log/log_main.h"
 #include "system/camera_metadata.h"
+#include "util/MetadataUtil.h"
+#include "util/Util.h"
 #include "utils/Errors.h"
 
 namespace android {
@@ -34,27 +40,51 @@
 namespace {
 
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::device::CameraMetadata;
 using ::aidl::android::hardware::camera::device::Stream;
 using ::aidl::android::hardware::camera::device::StreamConfiguration;
 using ::aidl::android::hardware::camera::device::StreamType;
 using ::aidl::android::hardware::graphics::common::PixelFormat;
+using ::testing::ElementsAre;
 using ::testing::UnorderedElementsAreArray;
 using metadata_stream_t =
     camera_metadata_enum_android_scaler_available_stream_configurations_t;
 
 constexpr int kCameraId = 42;
+constexpr int kQvgaWidth = 320;
+constexpr int kQvgaHeight = 240;
+constexpr int k360pWidth = 640;
+constexpr int k360pHeight = 360;
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
 constexpr int kHdWidth = 1280;
 constexpr int kHdHeight = 720;
+constexpr int kMaxFps = 30;
+
+const Stream kVgaYUV420Stream = Stream{
+    .streamType = StreamType::OUTPUT,
+    .width = kVgaWidth,
+    .height = kVgaHeight,
+    .format = PixelFormat::YCBCR_420_888,
+};
+
+const Stream kVgaJpegStream = Stream{
+    .streamType = StreamType::OUTPUT,
+    .width = kVgaWidth,
+    .height = kVgaHeight,
+    .format = PixelFormat::BLOB,
+};
 
 struct AvailableStreamConfiguration {
   const int width;
   const int height;
   const int pixelFormat;
-  const metadata_stream_t streamConfiguration;
+  const metadata_stream_t streamConfiguration =
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT;
 };
 
 bool operator==(const AvailableStreamConfiguration& a,
@@ -96,18 +126,19 @@
 }
 
 struct VirtualCameraConfigTestParam {
-  std::vector<SupportedStreamConfiguration> inputConfig;
+  VirtualCameraConfiguration inputConfig;
   std::vector<AvailableStreamConfiguration> expectedAvailableStreamConfigs;
 };
 
-class VirtualCameraDeviceTest
+class VirtualCameraDeviceCharacterisicsTest
     : public testing::TestWithParam<VirtualCameraConfigTestParam> {};
 
-TEST_P(VirtualCameraDeviceTest, cameraCharacteristicsForInputFormat) {
+TEST_P(VirtualCameraDeviceCharacterisicsTest,
+       cameraCharacteristicsForInputFormat) {
   const VirtualCameraConfigTestParam& param = GetParam();
   std::shared_ptr<VirtualCameraDevice> camera =
-      ndk::SharedRefBase::make<VirtualCameraDevice>(
-          kCameraId, param.inputConfig, /*virtualCameraClientCallback=*/nullptr);
+      ndk::SharedRefBase::make<VirtualCameraDevice>(kCameraId,
+                                                    param.inputConfig);
 
   CameraMetadata metadata;
   ASSERT_TRUE(camera->getCameraCharacteristics(&metadata).isOk());
@@ -132,81 +163,202 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(
-    cameraCharacteristicsForInputFormat, VirtualCameraDeviceTest,
+    cameraCharacteristicsForInputFormat, VirtualCameraDeviceCharacterisicsTest,
     testing::Values(
         VirtualCameraConfigTestParam{
-            .inputConfig = {SupportedStreamConfiguration{
-                .width = kVgaWidth,
-                .height = kVgaHeight,
-                .pixelFormat = Format::YUV_420_888}},
+            .inputConfig =
+                VirtualCameraConfiguration{
+                    .supportedStreamConfigs = {SupportedStreamConfiguration{
+                        .width = kVgaWidth,
+                        .height = kVgaHeight,
+                        .pixelFormat = Format::YUV_420_888,
+                        .maxFps = kMaxFps}},
+                    .virtualCameraCallback = nullptr,
+                    .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                    .lensFacing = LensFacing::FRONT},
             .expectedAvailableStreamConfigs =
                 {AvailableStreamConfiguration{
-                     .width = kVgaWidth,
-                     .height = kVgaHeight,
-                     .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
-                     .streamConfiguration =
-                         ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                     .width = kQvgaWidth,
+                     .height = kQvgaHeight,
+                     .pixelFormat =
+                         ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+                 AvailableStreamConfiguration{
+                     .width = kQvgaWidth,
+                     .height = kQvgaHeight,
+                     .pixelFormat =
+                         ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+                 AvailableStreamConfiguration{
+                     .width = kQvgaWidth,
+                     .height = kQvgaHeight,
+                     .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
                  AvailableStreamConfiguration{
                      .width = kVgaWidth,
                      .height = kVgaHeight,
                      .pixelFormat =
-                         ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
-                     .streamConfiguration =
-                         ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                         ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
                  AvailableStreamConfiguration{
                      .width = kVgaWidth,
                      .height = kVgaHeight,
-                     .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
-                     .streamConfiguration =
-                         ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}}},
+                     .pixelFormat =
+                         ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+                 AvailableStreamConfiguration{
+                     .width = kVgaWidth,
+                     .height = kVgaHeight,
+                     .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB}}},
         VirtualCameraConfigTestParam{
-            .inputConfig = {SupportedStreamConfiguration{
-                                .width = kVgaWidth,
-                                .height = kVgaHeight,
-                                .pixelFormat = Format::YUV_420_888},
-                            SupportedStreamConfiguration{
-                                .width = kHdWidth,
-                                .height = kHdHeight,
-                                .pixelFormat = Format::YUV_420_888}},
+            .inputConfig =
+                VirtualCameraConfiguration{
+                    .supportedStreamConfigs =
+                        {SupportedStreamConfiguration{
+                             .width = kVgaWidth,
+                             .height = kVgaHeight,
+                             .pixelFormat = Format::YUV_420_888,
+                             .maxFps = kMaxFps},
+                         SupportedStreamConfiguration{
+                             .width = kHdWidth,
+                             .height = kHdHeight,
+                             .pixelFormat = Format::YUV_420_888,
+                             .maxFps = kMaxFps}},
+                    .virtualCameraCallback = nullptr,
+                    .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                    .lensFacing = LensFacing::BACK},
             .expectedAvailableStreamConfigs = {
                 AvailableStreamConfiguration{
+                    .width = kQvgaWidth,
+                    .height = kQvgaHeight,
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+                AvailableStreamConfiguration{
+                    .width = kQvgaWidth,
+                    .height = kQvgaHeight,
+                    .pixelFormat =
+                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+                AvailableStreamConfiguration{
+                    .width = kQvgaWidth,
+                    .height = kQvgaHeight,
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
+                AvailableStreamConfiguration{
+                    .width = 640,
+                    .height = 360,
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+                AvailableStreamConfiguration{
+                    .width = 640,
+                    .height = 360,
+                    .pixelFormat =
+                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+                AvailableStreamConfiguration{
+                    .width = 640,
+                    .height = 360,
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
+                AvailableStreamConfiguration{
                     .width = kVgaWidth,
                     .height = kVgaHeight,
-                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
-                    .streamConfiguration =
-                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
                 AvailableStreamConfiguration{
                     .width = kVgaWidth,
                     .height = kVgaHeight,
                     .pixelFormat =
-                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
-                    .streamConfiguration =
-                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
                 AvailableStreamConfiguration{
                     .width = kVgaWidth,
                     .height = kVgaHeight,
-                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
-                    .streamConfiguration =
-                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
+                AvailableStreamConfiguration{
+                    .width = 1024,
+                    .height = 576,
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+                AvailableStreamConfiguration{
+                    .width = 1024,
+                    .height = 576,
+                    .pixelFormat =
+                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+                AvailableStreamConfiguration{
+                    .width = 1024,
+                    .height = 576,
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
                 AvailableStreamConfiguration{
                     .width = kHdWidth,
                     .height = kHdHeight,
-                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
-                    .streamConfiguration =
-                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
                 AvailableStreamConfiguration{
                     .width = kHdWidth,
                     .height = kHdHeight,
                     .pixelFormat =
-                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
-                    .streamConfiguration =
-                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+                        ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
                 AvailableStreamConfiguration{
                     .width = kHdWidth,
                     .height = kHdHeight,
-                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
-                    .streamConfiguration =
-                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}}}));
+                    .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB}}}));
+
+class VirtualCameraDeviceTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    mCamera = ndk::SharedRefBase::make<VirtualCameraDevice>(
+        kCameraId, VirtualCameraConfiguration{
+                       .supportedStreamConfigs = {SupportedStreamConfiguration{
+                           .width = kVgaWidth,
+                           .height = kVgaHeight,
+                           .pixelFormat = Format::YUV_420_888,
+                           .maxFps = kMaxFps}},
+                       .virtualCameraCallback = nullptr,
+                       .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                       .lensFacing = LensFacing::FRONT});
+  }
+
+ protected:
+  std::shared_ptr<VirtualCameraDevice> mCamera;
+};
+
+TEST_F(VirtualCameraDeviceTest, configureMaximalNumberOfNonStallStreamsSuceeds) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfProcessedStreams,
+              kVgaYUV420Stream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_TRUE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, configureTooManyNonStallStreamsFails) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfProcessedStreams + 1,
+              kVgaYUV420Stream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_FALSE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, configureMaximalNumberOfStallStreamsSuceeds) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfStallStreams, kVgaJpegStream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_TRUE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, configureTooManyStallStreamsFails) {
+  StreamConfiguration config;
+  std::fill_n(std::back_insert_iterator(config.streams),
+              VirtualCameraDevice::kMaxNumberOfStallStreams + 1, kVgaJpegStream);
+
+  bool aidl_ret;
+  ASSERT_TRUE(mCamera->isStreamCombinationSupported(config, &aidl_ret).isOk());
+  EXPECT_FALSE(aidl_ret);
+}
+
+TEST_F(VirtualCameraDeviceTest, thumbnailSizeWithCompatibleAspectRatio) {
+  CameraMetadata metadata;
+  ASSERT_TRUE(mCamera->getCameraCharacteristics(&metadata).isOk());
+
+  // Camera is configured with VGA input, we expect 240 x 180 thumbnail size in
+  // characteristics, since it has same aspect ratio.
+  EXPECT_THAT(getJpegAvailableThumbnailSizes(metadata),
+              ElementsAre(Resolution(0, 0), Resolution(240, 180)));
+}
 
 }  // namespace
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc b/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc
index 615a77c..ab647a4 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraProviderTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -33,7 +33,10 @@
 namespace {
 
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraDeviceStatus;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::common::TorchModeStatus;
@@ -49,6 +52,7 @@
 
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
+constexpr int kMaxFps = 30;
 constexpr char kVirtualCameraNameRegex[] =
     "device@[0-9]+\\.[0-9]+/virtual/[0-9]+";
 
@@ -79,10 +83,15 @@
   std::shared_ptr<VirtualCameraProvider> mCameraProvider;
   std::shared_ptr<MockCameraProviderCallback> mMockCameraProviderCallback =
       ndk::SharedRefBase::make<MockCameraProviderCallback>();
-  std::vector<SupportedStreamConfiguration> mInputConfigs = {
-      SupportedStreamConfiguration{.width = kVgaWidth,
-                                   .height = kVgaHeight,
-                                   .pixelFormat = Format::YUV_420_888}};
+  VirtualCameraConfiguration mInputConfig = VirtualCameraConfiguration{
+      .supportedStreamConfigs = {SupportedStreamConfiguration{
+          .width = kVgaWidth,
+          .height = kVgaHeight,
+          .pixelFormat = Format::YUV_420_888,
+          .maxFps = kMaxFps}},
+      .virtualCameraCallback = nullptr,
+      .sensorOrientation = SensorOrientation::ORIENTATION_0,
+      .lensFacing = LensFacing::FRONT};
 };
 
 TEST_F(VirtualCameraProviderTest, SetNullCameraCallbackFails) {
@@ -109,7 +118,7 @@
 
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
   EXPECT_THAT(camera, Not(IsNull()));
   EXPECT_THAT(camera->getCameraName(), MatchesRegex(kVirtualCameraNameRegex));
 
@@ -127,7 +136,7 @@
       .WillOnce(Return(ndk::ScopedAStatus::ok()));
 
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
 
   // Created camera should be in the list of cameras.
@@ -139,7 +148,7 @@
 TEST_F(VirtualCameraProviderTest, RemoveCamera) {
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
 
   EXPECT_CALL(*mMockCameraProviderCallback,
               cameraDeviceStatusChange(Eq(camera->getCameraName()),
@@ -156,7 +165,7 @@
 TEST_F(VirtualCameraProviderTest, RemoveNonExistingCamera) {
   ASSERT_TRUE(mCameraProvider->setCallback(mMockCameraProviderCallback).isOk());
   std::shared_ptr<VirtualCameraDevice> camera =
-      mCameraProvider->createCamera(mInputConfigs);
+      mCameraProvider->createCamera(mInputConfig);
 
   // Removing non-existing camera should fail.
   const std::string cameraName = "DefinitelyNoTCamera";
diff --git a/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc b/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc
index 5f899b8..ddcb789 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraRenderThreadTest.cc
@@ -33,6 +33,7 @@
 #include "android/binder_auto_utils.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "util/Util.h"
 
 namespace android {
 namespace companion {
@@ -62,6 +63,7 @@
 
 constexpr int kInputWidth = 640;
 constexpr int kInputHeight = 480;
+const Resolution kInputResolution(kInputWidth, kInputHeight);
 
 Matcher<StreamBuffer> IsStreamBufferWithStatus(const int streamId,
                                                const int bufferId,
@@ -102,7 +104,8 @@
     mMockCameraDeviceCallback =
         ndk::SharedRefBase::make<MockCameraDeviceCallback>();
     mRenderThread = std::make_unique<VirtualCameraRenderThread>(
-        *mSessionContext, kInputWidth, kInputHeight, mMockCameraDeviceCallback);
+        *mSessionContext, kInputResolution,
+        /*reportedSensorSize*/ kInputResolution, mMockCameraDeviceCallback);
   }
 
  protected:
diff --git a/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc b/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc
index 38261fb..d4d00a2 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraServiceTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -39,6 +39,8 @@
 
 using ::aidl::android::companion::virtualcamera::BnVirtualCameraCallback;
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::CameraDeviceStatus;
 using ::aidl::android::hardware::camera::common::TorchModeStatus;
@@ -56,16 +58,25 @@
 
 constexpr int kVgaWidth = 640;
 constexpr int kVgaHeight = 480;
+constexpr int kMaxFps = 30;
+constexpr SensorOrientation kSensorOrientation =
+    SensorOrientation::ORIENTATION_0;
+constexpr LensFacing kLensFacing = LensFacing::FRONT;
 constexpr char kCreateVirtualDevicePermissions[] =
     "android.permission.CREATE_VIRTUAL_DEVICE";
 
 const VirtualCameraConfiguration kEmptyVirtualCameraConfiguration;
 
 VirtualCameraConfiguration createConfiguration(const int width, const int height,
-                                               const Format format) {
+                                               const Format format,
+                                               const int maxFps) {
   VirtualCameraConfiguration configuration;
-  configuration.supportedStreamConfigs.push_back(
-      {.width = width, .height = height, .pixelFormat = format});
+  configuration.supportedStreamConfigs.push_back({.width = width,
+                                                  .height = height,
+                                                  .pixelFormat = format,
+                                                  .maxFps = maxFps});
+  configuration.sensorOrientation = kSensorOrientation;
+  configuration.lensFacing = kLensFacing;
   return configuration;
 }
 
@@ -150,7 +161,7 @@
   int mDevNullFd;
 
   VirtualCameraConfiguration mVgaYUV420OnlyConfiguration =
-      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888);
+      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888, kMaxFps);
 };
 
 TEST_F(VirtualCameraServiceTest, RegisterCameraWithYuvInputSucceeds) {
@@ -173,7 +184,7 @@
   bool aidlRet;
 
   VirtualCameraConfiguration config =
-      createConfiguration(kVgaWidth, kVgaHeight, Format::RGBA_8888);
+      createConfiguration(kVgaWidth, kVgaHeight, Format::RGBA_8888, kMaxFps);
 
   ASSERT_TRUE(mCameraService->registerCamera(ndkToken, config, &aidlRet).isOk());
 
@@ -208,7 +219,7 @@
   bool aidlRet;
 
   VirtualCameraConfiguration config =
-      createConfiguration(kVgaWidth, kVgaHeight, Format::UNKNOWN);
+      createConfiguration(kVgaWidth, kVgaHeight, Format::UNKNOWN, kMaxFps);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
@@ -219,7 +230,7 @@
 TEST_F(VirtualCameraServiceTest, ConfigurationWithTooHighResFails) {
   bool aidlRet;
   VirtualCameraConfiguration config =
-      createConfiguration(1000000, 1000000, Format::YUV_420_888);
+      createConfiguration(1000000, 1000000, Format::YUV_420_888, kMaxFps);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
@@ -230,7 +241,7 @@
 TEST_F(VirtualCameraServiceTest, ConfigurationWithUnalignedResolutionFails) {
   bool aidlRet;
   VirtualCameraConfiguration config =
-      createConfiguration(641, 481, Format::YUV_420_888);
+      createConfiguration(641, 481, Format::YUV_420_888, kMaxFps);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
@@ -241,7 +252,29 @@
 TEST_F(VirtualCameraServiceTest, ConfigurationWithNegativeResolutionFails) {
   bool aidlRet;
   VirtualCameraConfiguration config =
-      createConfiguration(-1, kVgaHeight, Format::YUV_420_888);
+      createConfiguration(-1, kVgaHeight, Format::YUV_420_888, kMaxFps);
+
+  ASSERT_FALSE(
+      mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
+  EXPECT_FALSE(aidlRet);
+  EXPECT_THAT(getCameraIds(), IsEmpty());
+}
+
+TEST_F(VirtualCameraServiceTest, ConfigurationWithTooLowMaxFpsFails) {
+  bool aidlRet;
+  VirtualCameraConfiguration config =
+      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888, 0);
+
+  ASSERT_FALSE(
+      mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
+  EXPECT_FALSE(aidlRet);
+  EXPECT_THAT(getCameraIds(), IsEmpty());
+}
+
+TEST_F(VirtualCameraServiceTest, ConfigurationWithTooHighMaxFpsFails) {
+  bool aidlRet;
+  VirtualCameraConfiguration config =
+      createConfiguration(kVgaWidth, kVgaHeight, Format::YUV_420_888, 90);
 
   ASSERT_FALSE(
       mCameraService->registerCamera(mNdkOwnerToken, config, &aidlRet).isOk());
diff --git a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
index 30bd2b6..5f313a0 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2023 The Android Open Source Project
+ * Copyright 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
 #include "VirtualCameraSession.h"
 #include "aidl/android/companion/virtualcamera/BnVirtualCameraCallback.h"
 #include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
+#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/BnCameraDeviceCallback.h"
 #include "aidl/android/hardware/camera/device/StreamConfiguration.h"
@@ -29,21 +30,30 @@
 #include "android/binder_interface_utils.h"
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "util/MetadataBuilder.h"
+#include "util/MetadataUtil.h"
 
 namespace android {
 namespace companion {
 namespace virtualcamera {
 namespace {
 
-constexpr int kWidth = 640;
-constexpr int kHeight = 480;
+constexpr int kQvgaWidth = 320;
+constexpr int kQvgaHeight = 240;
+constexpr int kVgaWidth = 640;
+constexpr int kVgaHeight = 480;
+constexpr int kSvgaWidth = 800;
+constexpr int kSvgaHeight = 600;
+constexpr int kMaxFps = 30;
 constexpr int kStreamId = 0;
+constexpr int kSecondStreamId = 1;
 constexpr int kCameraId = 42;
 
 using ::aidl::android::companion::virtualcamera::BnVirtualCameraCallback;
 using ::aidl::android::companion::virtualcamera::Format;
+using ::aidl::android::companion::virtualcamera::LensFacing;
+using ::aidl::android::companion::virtualcamera::SensorOrientation;
 using ::aidl::android::companion::virtualcamera::SupportedStreamConfiguration;
+using ::aidl::android::companion::virtualcamera::VirtualCameraConfiguration;
 using ::aidl::android::hardware::camera::common::Status;
 using ::aidl::android::hardware::camera::device::BnCameraDeviceCallback;
 using ::aidl::android::hardware::camera::device::BufferRequest;
@@ -96,23 +106,13 @@
   MOCK_METHOD(ndk::ScopedAStatus, onStreamClosed, (int), (override));
 };
 
-class VirtualCameraSessionTest : public ::testing::Test {
+class VirtualCameraSessionTestBase : public ::testing::Test {
  public:
-  void SetUp() override {
+  virtual void SetUp() override {
     mMockCameraDeviceCallback =
         ndk::SharedRefBase::make<MockCameraDeviceCallback>();
     mMockVirtualCameraClientCallback =
         ndk::SharedRefBase::make<MockVirtualCameraCallback>();
-    mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
-        kCameraId,
-        std::vector<SupportedStreamConfiguration>{
-            SupportedStreamConfiguration{.width = kWidth,
-                                         .height = kHeight,
-                                         .pixelFormat = Format::YUV_420_888}},
-        mMockVirtualCameraClientCallback);
-    mVirtualCameraSession = ndk::SharedRefBase::make<VirtualCameraSession>(
-        mVirtualCameraDevice, mMockCameraDeviceCallback,
-        mMockVirtualCameraClientCallback);
 
     // Explicitly defining default actions below to prevent gmock from
     // default-constructing ndk::ScopedAStatus, because default-constructed
@@ -138,6 +138,35 @@
  protected:
   std::shared_ptr<MockCameraDeviceCallback> mMockCameraDeviceCallback;
   std::shared_ptr<MockVirtualCameraCallback> mMockVirtualCameraClientCallback;
+};
+
+class VirtualCameraSessionTest : public VirtualCameraSessionTestBase {
+ public:
+  void SetUp() override {
+    VirtualCameraSessionTestBase::SetUp();
+
+    mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
+        kCameraId,
+        VirtualCameraConfiguration{
+            .supportedStreamConfigs = {SupportedStreamConfiguration{
+                                           .width = kVgaWidth,
+                                           .height = kVgaHeight,
+                                           .pixelFormat = Format::YUV_420_888,
+                                           .maxFps = kMaxFps},
+                                       SupportedStreamConfiguration{
+                                           .width = kSvgaWidth,
+                                           .height = kSvgaHeight,
+                                           .pixelFormat = Format::YUV_420_888,
+                                           .maxFps = kMaxFps}},
+            .virtualCameraCallback = mMockVirtualCameraClientCallback,
+            .sensorOrientation = SensorOrientation::ORIENTATION_0,
+            .lensFacing = LensFacing::FRONT});
+    mVirtualCameraSession = ndk::SharedRefBase::make<VirtualCameraSession>(
+        mVirtualCameraDevice, mMockCameraDeviceCallback,
+        mMockVirtualCameraClientCallback);
+  }
+
+ protected:
   std::shared_ptr<VirtualCameraDevice> mVirtualCameraDevice;
   std::shared_ptr<VirtualCameraSession> mVirtualCameraSession;
 };
@@ -146,18 +175,22 @@
   PixelFormat format = PixelFormat::YCBCR_420_888;
   StreamConfiguration streamConfiguration;
   streamConfiguration.streams = {
-      createStream(kStreamId, kWidth, kHeight, format)};
+      createStream(kStreamId, kVgaWidth, kVgaHeight, format),
+      createStream(kSecondStreamId, kSvgaWidth, kSvgaHeight, format)};
   std::vector<HalStream> halStreams;
-  EXPECT_CALL(
-      *mMockVirtualCameraClientCallback,
-      onStreamConfigured(kStreamId, _, kWidth, kHeight, Format::YUV_420_888));
+
+  // Expect highest resolution to be picked for the client input.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId, _, kSvgaWidth, kSvgaHeight,
+                                 Format::YUV_420_888));
 
   ASSERT_TRUE(
       mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
           .isOk());
 
   EXPECT_THAT(halStreams, SizeIs(streamConfiguration.streams.size()));
-  EXPECT_THAT(mVirtualCameraSession->getStreamIds(), ElementsAre(0));
+  EXPECT_THAT(mVirtualCameraSession->getStreamIds(),
+              ElementsAre(kStreamId, kSecondStreamId));
 }
 
 TEST_F(VirtualCameraSessionTest, SecondConfigureDropsUnreferencedStreams) {
@@ -165,18 +198,18 @@
   StreamConfiguration streamConfiguration;
   std::vector<HalStream> halStreams;
 
-  streamConfiguration.streams = {createStream(0, kWidth, kHeight, format),
-                                 createStream(1, kWidth, kHeight, format),
-                                 createStream(2, kWidth, kHeight, format)};
+  streamConfiguration.streams = {createStream(0, kVgaWidth, kVgaHeight, format),
+                                 createStream(1, kVgaWidth, kVgaHeight, format),
+                                 createStream(2, kVgaWidth, kVgaHeight, format)};
   ASSERT_TRUE(
       mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
           .isOk());
 
   EXPECT_THAT(mVirtualCameraSession->getStreamIds(), ElementsAre(0, 1, 2));
 
-  streamConfiguration.streams = {createStream(0, kWidth, kHeight, format),
-                                 createStream(2, kWidth, kHeight, format),
-                                 createStream(3, kWidth, kHeight, format)};
+  streamConfiguration.streams = {createStream(0, kVgaWidth, kVgaHeight, format),
+                                 createStream(2, kVgaWidth, kVgaHeight, format),
+                                 createStream(3, kVgaWidth, kVgaHeight, format)};
   ASSERT_TRUE(
       mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
           .isOk());
@@ -201,8 +234,8 @@
 
 TEST_F(VirtualCameraSessionTest, onProcessCaptureRequestTriggersClientCallback) {
   StreamConfiguration streamConfiguration;
-  streamConfiguration.streams = {
-      createStream(kStreamId, kWidth, kHeight, PixelFormat::YCBCR_420_888)};
+  streamConfiguration.streams = {createStream(kStreamId, kVgaWidth, kVgaHeight,
+                                              PixelFormat::YCBCR_420_888)};
   std::vector<CaptureRequest> requests(1);
   requests[0].frameNumber = 42;
   requests[0].settings = *(
@@ -226,8 +259,8 @@
 
 TEST_F(VirtualCameraSessionTest, configureAfterCameraRelease) {
   StreamConfiguration streamConfiguration;
-  streamConfiguration.streams = {
-      createStream(kStreamId, kWidth, kHeight, PixelFormat::YCBCR_420_888)};
+  streamConfiguration.streams = {createStream(kStreamId, kVgaWidth, kVgaHeight,
+                                              PixelFormat::YCBCR_420_888)};
   std::vector<HalStream> halStreams;
 
   // Release virtual camera.
@@ -240,6 +273,108 @@
       Eq(static_cast<int32_t>(Status::CAMERA_DISCONNECTED)));
 }
 
+TEST_F(VirtualCameraSessionTest, ConfigureWithEmptyStreams) {
+  StreamConfiguration streamConfiguration;
+  std::vector<HalStream> halStreams;
+
+  // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+  EXPECT_THAT(
+      mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .getServiceSpecificError(),
+      Eq(static_cast<int32_t>(Status::ILLEGAL_ARGUMENT)));
+}
+
+TEST_F(VirtualCameraSessionTest, ConfigureWithDifferentAspectRatioFails) {
+  StreamConfiguration streamConfiguration;
+  streamConfiguration.streams = {
+      createStream(kStreamId, kVgaWidth, kVgaHeight, PixelFormat::YCBCR_420_888),
+      createStream(kSecondStreamId, kVgaHeight, kVgaWidth,
+                   PixelFormat::YCBCR_420_888)};
+
+  std::vector<HalStream> halStreams;
+
+  // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+  EXPECT_THAT(
+      mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .getServiceSpecificError(),
+      Eq(static_cast<int32_t>(Status::ILLEGAL_ARGUMENT)));
+}
+
+class VirtualCameraSessionInputChoiceTest : public VirtualCameraSessionTestBase {
+ public:
+  std::shared_ptr<VirtualCameraSession> createSession(
+      const std::vector<SupportedStreamConfiguration>& supportedInputConfigs) {
+    mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
+        kCameraId, VirtualCameraConfiguration{
+                       .supportedStreamConfigs = supportedInputConfigs,
+                       .virtualCameraCallback = mMockVirtualCameraClientCallback,
+                       .sensorOrientation = SensorOrientation::ORIENTATION_0,
+                       .lensFacing = LensFacing::FRONT});
+    return ndk::SharedRefBase::make<VirtualCameraSession>(
+        mVirtualCameraDevice, mMockCameraDeviceCallback,
+        mMockVirtualCameraClientCallback);
+  }
+
+ protected:
+  std::shared_ptr<VirtualCameraDevice> mVirtualCameraDevice;
+};
+
+TEST_F(VirtualCameraSessionInputChoiceTest,
+       configureChoosesCorrectInputStreamForDownsampledOutput) {
+  // Create camera configured to support SVGA YUV input and RGB QVGA input.
+  auto virtualCameraSession = createSession(
+      {SupportedStreamConfiguration{.width = kSvgaWidth,
+                                    .height = kSvgaHeight,
+                                    .pixelFormat = Format::YUV_420_888,
+                                    .maxFps = kMaxFps},
+       SupportedStreamConfiguration{.width = kQvgaWidth,
+                                    .height = kQvgaHeight,
+                                    .pixelFormat = Format::RGBA_8888,
+                                    .maxFps = kMaxFps}});
+
+  // Configure VGA stream. Expect SVGA input to be chosen to downscale from.
+  StreamConfiguration streamConfiguration;
+  streamConfiguration.streams = {createStream(
+      kStreamId, kVgaWidth, kVgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
+  std::vector<HalStream> halStreams;
+
+  // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId, _, kSvgaWidth, kSvgaHeight,
+                                 Format::YUV_420_888));
+  EXPECT_TRUE(
+      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .isOk());
+}
+
+TEST_F(VirtualCameraSessionInputChoiceTest,
+       configureChoosesCorrectInputStreamForMatchingResolution) {
+  // Create camera configured to support SVGA YUV input and RGB QVGA input.
+  auto virtualCameraSession = createSession(
+      {SupportedStreamConfiguration{.width = kSvgaWidth,
+                                    .height = kSvgaHeight,
+                                    .pixelFormat = Format::YUV_420_888,
+                                    .maxFps = kMaxFps},
+       SupportedStreamConfiguration{.width = kQvgaWidth,
+                                    .height = kQvgaHeight,
+                                    .pixelFormat = Format::RGBA_8888,
+                                    .maxFps = kMaxFps}});
+
+  // Configure VGA stream. Expect SVGA input to be chosen to downscale from.
+  StreamConfiguration streamConfiguration;
+  streamConfiguration.streams = {createStream(
+      kStreamId, kQvgaWidth, kQvgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
+  std::vector<HalStream> halStreams;
+
+  // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId, _, kQvgaWidth, kQvgaHeight,
+                                 Format::RGBA_8888));
+  EXPECT_TRUE(
+      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .isOk());
+}
+
 }  // namespace
 }  // namespace virtualcamera
 }  // namespace companion
diff --git a/services/camera/virtualcamera/util/EglProgram.cc b/services/camera/virtualcamera/util/EglProgram.cc
index 510fd33..7554a67 100644
--- a/services/camera/virtualcamera/util/EglProgram.cc
+++ b/services/camera/virtualcamera/util/EglProgram.cc
@@ -68,12 +68,13 @@
     })";
 
 constexpr char kExternalTextureVertexShader[] = R"(#version 300 es
+  uniform mat4 aTextureTransformMatrix; // Transform matrix given by surface texture.
   in vec4 aPosition;
   in vec2 aTextureCoord;
   out vec2 vTextureCoord;
   void main() {
     gl_Position = aPosition;
-    vTextureCoord = aTextureCoord;
+    vTextureCoord = (aTextureTransformMatrix * vec4(aTextureCoord, 0.0, 1.0)).xy;
   })";
 
 constexpr char kExternalYuvTextureFragmentShader[] = R"(#version 300 es
@@ -100,10 +101,12 @@
     })";
 
 constexpr int kCoordsPerVertex = 3;
-constexpr std::array<float, 12> kSquareCoords{-1.f, 1.0f, 0.0f,  // top left
-                                              -1.f, -1.f, 0.0f,  // bottom left
-                                              1.0f, -1.f, 0.0f,  // bottom right
-                                              1.0f, 1.0f, 0.0f};  // top right
+
+constexpr std::array<float, 12> kSquareCoords{
+    -1.f, -1.0f, 0.0f,   // top left
+    -1.f, 1.f,   0.0f,   // bottom left
+    1.0f, 1.f,   0.0f,   // bottom right
+    1.0f, -1.0f, 0.0f};  // top right
 
 constexpr std::array<float, 8> kTextureCoords{0.0f, 1.0f,   // top left
                                               0.0f, 0.0f,   // bottom left
@@ -265,32 +268,50 @@
   } else {
     ALOGE("External texture EGL shader program initialization failed.");
   }
+
+  // Lookup and cache handles to uniforms & attributes.
+  mPositionHandle = glGetAttribLocation(mProgram, "aPosition");
+  mTextureCoordHandle = glGetAttribLocation(mProgram, "aTextureCoord");
+  mTransformMatrixHandle =
+      glGetUniformLocation(mProgram, "aTextureTransformMatrix");
+  mTextureHandle = glGetUniformLocation(mProgram, "uTexture");
+
+  // Pass vertex array to the shader.
+  glEnableVertexAttribArray(mPositionHandle);
+  glVertexAttribPointer(mPositionHandle, kCoordsPerVertex, GL_FLOAT, false,
+                        kSquareCoords.size(), kSquareCoords.data());
+
+  // Pass texture coordinates corresponding to vertex array to the shader.
+  glEnableVertexAttribArray(mTextureCoordHandle);
+  glVertexAttribPointer(mTextureCoordHandle, 2, GL_FLOAT, false,
+                        kTextureCoords.size(), kTextureCoords.data());
 }
 
-bool EglTextureProgram::draw(GLuint textureId) {
+EglTextureProgram::~EglTextureProgram() {
+  if (mPositionHandle != -1) {
+    glDisableVertexAttribArray(mPositionHandle);
+  }
+  if (mTextureCoordHandle != -1) {
+    glDisableVertexAttribArray(mTextureCoordHandle);
+  }
+}
+
+bool EglTextureProgram::draw(GLuint textureId,
+                             const std::array<float, 16>& transformMatrix) {
   // Load compiled shader.
   glUseProgram(mProgram);
   if (checkEglError("glUseProgram")) {
     return false;
   }
 
-  // Pass vertex array to the shader.
-  int positionHandle = glGetAttribLocation(mProgram, "aPosition");
-  glEnableVertexAttribArray(positionHandle);
-  glVertexAttribPointer(positionHandle, kCoordsPerVertex, GL_FLOAT, false,
-                        kSquareCoords.size(), kSquareCoords.data());
-
-  // Pass texture coordinates corresponding to vertex array to the shader.
-  int textureCoordHandle = glGetAttribLocation(mProgram, "aTextureCoord");
-  glEnableVertexAttribArray(textureCoordHandle);
-  glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, false,
-                        kTextureCoords.size(), kTextureCoords.data());
+  // Pass transformation matrix for the texture coordinates.
+  glUniformMatrix4fv(mTransformMatrixHandle, 1, /*transpose=*/GL_FALSE,
+                     transformMatrix.data());
 
   // Configure texture for the shader.
-  int textureHandle = glGetUniformLocation(mProgram, "uTexture");
   glActiveTexture(GL_TEXTURE0);
   glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureId);
-  glUniform1i(textureHandle, 0);
+  glUniform1i(mTextureHandle, 0);
 
   // Draw triangle strip forming a square filling the viewport.
   glDrawElements(GL_TRIANGLES, kDrawOrder.size(), GL_UNSIGNED_BYTE,
diff --git a/services/camera/virtualcamera/util/EglProgram.h b/services/camera/virtualcamera/util/EglProgram.h
index 1b5f2cd..c695cbb 100644
--- a/services/camera/virtualcamera/util/EglProgram.h
+++ b/services/camera/virtualcamera/util/EglProgram.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_COMPANION_VIRTUALCAMERA_EGLPROGRAM_H
 #define ANDROID_COMPANION_VIRTUALCAMERA_EGLPROGRAM_H
 
+#include <array>
+
 #include "GLES/gl.h"
 
 namespace android {
@@ -58,8 +60,23 @@
   enum class TextureFormat { RGBA, YUV };
 
   EglTextureProgram(TextureFormat textureFormat = TextureFormat::YUV);
+  virtual ~EglTextureProgram();
 
-  bool draw(GLuint textureId);
+  // Draw texture over whole viewport, applying transformMatrix to texture
+  // coordinates.
+  //
+  // Transform matrix is 4x4 matrix represented in column-major order and is
+  // applied to texture coordinates in (s,t,0,1), s,t from <0,1> range prior to
+  // sampling:
+  //
+  // textureCoord = transformMatrix * vec4(s,t,0,1).xy
+  bool draw(GLuint textureId, const std::array<float, 16>& transformMatrix);
+
+ private:
+  int mPositionHandle = -1;
+  int mTextureCoordHandle = -1;
+  int mTransformMatrixHandle = -1;
+  int mTextureHandle = -1;
 };
 
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/util/EglSurfaceTexture.cc b/services/camera/virtualcamera/util/EglSurfaceTexture.cc
index 5b479c0..9f26e19 100644
--- a/services/camera/virtualcamera/util/EglSurfaceTexture.cc
+++ b/services/camera/virtualcamera/util/EglSurfaceTexture.cc
@@ -68,6 +68,16 @@
   return mTextureId;
 }
 
+GLuint EglSurfaceTexture::getTextureId() const {
+  return mTextureId;
+}
+
+std::array<float, 16> EglSurfaceTexture::getTransformMatrix() {
+  std::array<float, 16> matrix;
+  mGlConsumer->getTransformMatrix(matrix.data());
+  return matrix;
+}
+
 uint32_t EglSurfaceTexture::getWidth() const {
   return mWidth;
 }
diff --git a/services/camera/virtualcamera/util/EglSurfaceTexture.h b/services/camera/virtualcamera/util/EglSurfaceTexture.h
index 14dc7d6..faad7c4 100644
--- a/services/camera/virtualcamera/util/EglSurfaceTexture.h
+++ b/services/camera/virtualcamera/util/EglSurfaceTexture.h
@@ -57,6 +57,17 @@
   // Returns EGL texture id of the texture.
   GLuint updateTexture();
 
+  // Returns EGL texture id of the underlying texture.
+  GLuint getTextureId() const;
+
+  // Returns 4x4 transformation matrix in column-major order,
+  // which should be applied to EGL texture coordinates
+  // before sampling from the texture backed by android native buffer,
+  // so the corresponding region of the underlying buffer is sampled.
+  //
+  // See SurfaceTexture.getTransformMatrix for more details.
+  std::array<float, 16> getTransformMatrix();
+
  private:
   sp<IGraphicBufferProducer> mBufferProducer;
   sp<IGraphicBufferConsumer> mBufferConsumer;
diff --git a/services/camera/virtualcamera/util/JpegUtil.cc b/services/camera/virtualcamera/util/JpegUtil.cc
index 2b19c13..8569eff 100644
--- a/services/camera/virtualcamera/util/JpegUtil.cc
+++ b/services/camera/virtualcamera/util/JpegUtil.cc
@@ -19,7 +19,7 @@
 
 #include <cstddef>
 #include <cstdint>
-#include <memory>
+#include <optional>
 #include <vector>
 
 #include "android/hardware_buffer.h"
@@ -34,11 +34,9 @@
 namespace virtualcamera {
 namespace {
 
-constexpr int kJpegQuality = 80;
-
 class LibJpegContext {
  public:
-  LibJpegContext(int width, int height, const size_t outBufferSize,
+  LibJpegContext(int width, int height, int quality, const size_t outBufferSize,
                  void* outBuffer)
       : mWidth(width),
         mHeight(height),
@@ -76,7 +74,7 @@
     jpeg_set_defaults(&mCompressStruct);
 
     // Set quality and colorspace.
-    jpeg_set_quality(&mCompressStruct, kJpegQuality, 1);
+    jpeg_set_quality(&mCompressStruct, quality, 1);
     jpeg_set_colorspace(&mCompressStruct, JCS_YCbCr);
 
     // Configure RAW input mode - this let's libjpeg know we're providing raw,
@@ -94,11 +92,31 @@
     mCompressStruct.comp_info[2].v_samp_factor = 1;
   }
 
-  bool compress(const android_ycbcr& ycbr) {
+  LibJpegContext& setApp1Data(const uint8_t* app1Data, const size_t size) {
+    mApp1Data = app1Data;
+    mApp1DataSize = size;
+    return *this;
+  }
+
+  std::optional<size_t> compress(const android_ycbcr& ycbr) {
+    // TODO(b/301023410) - Add support for compressing image sizes not aligned
+    // with DCT size.
+    if (mWidth % (2 * DCTSIZE) || (mHeight % (2 * DCTSIZE))) {
+      ALOGE(
+          "%s: Compressing YUV420 image with size %dx%d not aligned with 2 * "
+          "DCTSIZE (%d) is not currently supported.",
+          __func__, mWidth, mHeight, 2 * DCTSIZE);
+      return std::nullopt;
+    }
+
+    // Chroma planes have 1/2 resolution of the original image.
+    const int cHeight = mHeight / 2;
+    const int cWidth = mWidth / 2;
+
     // Prepare arrays of pointers to scanlines of each plane.
     std::vector<JSAMPROW> yLines(mHeight);
-    std::vector<JSAMPROW> cbLines(mHeight / 2);
-    std::vector<JSAMPROW> crLines(mHeight / 2);
+    std::vector<JSAMPROW> cbLines(cHeight);
+    std::vector<JSAMPROW> crLines(cHeight);
 
     uint8_t* y = static_cast<uint8_t*>(ycbr.y);
     uint8_t* cb = static_cast<uint8_t*>(ycbr.cb);
@@ -107,23 +125,27 @@
     // Since UV samples might be interleaved (semiplanar) we need to copy
     // them to separate planes, since libjpeg doesn't directly
     // support processing semiplanar YUV.
-    const int c_samples = (mWidth / 2) * (mHeight / 2);
-    std::vector<uint8_t> cb_plane(c_samples);
-    std::vector<uint8_t> cr_plane(c_samples);
+    const int cSamples = cWidth * cHeight;
+    std::vector<uint8_t> cb_plane(cSamples);
+    std::vector<uint8_t> cr_plane(cSamples);
 
     // TODO(b/301023410) - Use libyuv or ARM SIMD for "unzipping" the data.
-    for (int i = 0; i < c_samples; ++i) {
-      cb_plane[i] = *cb;
-      cr_plane[i] = *cr;
-      cb += ycbr.chroma_step;
-      cr += ycbr.chroma_step;
+    int out_idx = 0;
+    for (int i = 0; i < cHeight; ++i) {
+      for (int j = 0; j < cWidth; ++j) {
+        cb_plane[out_idx] = cb[j * ycbr.chroma_step];
+        cr_plane[out_idx] = cr[j * ycbr.chroma_step];
+        out_idx++;
+      }
+      cb += ycbr.cstride;
+      cr += ycbr.cstride;
     }
 
     // Collect pointers to individual scanline of each plane.
     for (int i = 0; i < mHeight; ++i) {
       yLines[i] = y + i * ycbr.ystride;
     }
-    for (int i = 0; i < (mHeight / 2); ++i) {
+    for (int i = 0; i < cHeight; ++i) {
       cbLines[i] = cb_plane.data() + i * (mWidth / 2);
       crLines[i] = cr_plane.data() + i * (mWidth / 2);
     }
@@ -131,18 +153,6 @@
     return compress(yLines, cbLines, crLines);
   }
 
-  bool compressBlackImage() {
-    // We only really need to prepare one scanline for Y and one shared scanline
-    // for Cb & Cr.
-    std::vector<uint8_t> yLine(mWidth, 0);
-    std::vector<uint8_t> chromaLine(mWidth / 2, 0xff / 2);
-
-    std::vector<JSAMPROW> yLines(mHeight, yLine.data());
-    std::vector<JSAMPROW> cLines(mHeight / 2, chromaLine.data());
-
-    return compress(yLines, cLines, cLines);
-  }
-
  private:
   void setSuccess(const boolean success) {
     mSuccess = success;
@@ -165,11 +175,18 @@
   // Takes vector of pointers to Y / Cb / Cr scanlines as an input. Length of
   // each vector needs to correspond to height of corresponding plane.
   //
-  // Returns true if compression is successful, false otherwise.
-  bool compress(std::vector<JSAMPROW>& yLines, std::vector<JSAMPROW>& cbLines,
-                std::vector<JSAMPROW>& crLines) {
+  // Returns size of compressed image in bytes on success, empty optional otherwise.
+  std::optional<size_t> compress(std::vector<JSAMPROW>& yLines,
+                                 std::vector<JSAMPROW>& cbLines,
+                                 std::vector<JSAMPROW>& crLines) {
     jpeg_start_compress(&mCompressStruct, TRUE);
 
+    if (mApp1Data != nullptr && mApp1DataSize > 0) {
+      ALOGV("%s: Writing exif, size %zu B", __func__, mApp1DataSize);
+      jpeg_write_marker(&mCompressStruct, JPEG_APP0 + 1,
+                        static_cast<const JOCTET*>(mApp1Data), mApp1DataSize);
+    }
+
     while (mCompressStruct.next_scanline < mCompressStruct.image_height) {
       const uint32_t batchSize = DCTSIZE * 2;
       const uint32_t nl = mCompressStruct.next_scanline;
@@ -181,11 +198,11 @@
         ALOGE("%s: compressed %u lines, expected %u (total %u/%u)",
               __FUNCTION__, done, batchSize, mCompressStruct.next_scanline,
               mCompressStruct.image_height);
-        return false;
+        return std::nullopt;
       }
     }
     jpeg_finish_compress(&mCompressStruct);
-    return mSuccess;
+    return mEncodedSize;
   }
 
   // === libjpeg callbacks below ===
@@ -217,6 +234,10 @@
   jpeg_error_mgr mErrorMgr;
   jpeg_destination_mgr mDestinationMgr;
 
+  // APP1 data.
+  const uint8_t* mApp1Data = nullptr;
+  size_t mApp1DataSize = 0;
+
   // Dimensions of the input image.
   int mWidth;
   int mHeight;
@@ -235,15 +256,15 @@
 
 }  // namespace
 
-bool compressJpeg(int width, int height, const android_ycbcr& ycbcr,
-                  size_t outBufferSize, void* outBuffer) {
-  return LibJpegContext(width, height, outBufferSize, outBuffer).compress(ycbcr);
-}
-
-bool compressBlackJpeg(int width, int height, size_t outBufferSize,
-                       void* outBuffer) {
-  return LibJpegContext(width, height, outBufferSize, outBuffer)
-      .compressBlackImage();
+std::optional<size_t> compressJpeg(const int width, const int height,
+                                   const int quality, const android_ycbcr& ycbcr,
+                                   const std::vector<uint8_t>& app1ExifData,
+                                   size_t outBufferSize, void* outBuffer) {
+  LibJpegContext context(width, height, quality, outBufferSize, outBuffer);
+  if (!app1ExifData.empty()) {
+    context.setApp1Data(app1ExifData.data(), app1ExifData.size());
+  }
+  return context.compress(ycbcr);
 }
 
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/util/JpegUtil.h b/services/camera/virtualcamera/util/JpegUtil.h
index c44d0a8..83ed74b 100644
--- a/services/camera/virtualcamera/util/JpegUtil.h
+++ b/services/camera/virtualcamera/util/JpegUtil.h
@@ -17,9 +17,8 @@
 #ifndef ANDROID_COMPANION_VIRTUALCAMERA_JPEGUTIL_H
 #define ANDROID_COMPANION_VIRTUALCAMERA_JPEGUTIL_H
 
-#include <memory>
+#include <optional>
 
-#include "android/hardware_buffer.h"
 #include "system/graphics.h"
 
 namespace android {
@@ -27,14 +26,20 @@
 namespace virtualcamera {
 
 // Jpeg-compress image into the output buffer.
-// Returns true if the compression was successful, false otherwise.
-bool compressJpeg(int width, int height, const android_ycbcr& ycbcr,
-                  size_t outBufferSize, void* outBuffer);
-
-// Jpeg-compress all-black image into the output buffer.
-// Returns true if the compression was successful, false otherwise.
-bool compressBlackJpeg(int width, int height, size_t outBufferSize,
-                       void* outBuffer);
+// * width - width of the image
+// * heigh - height of the image
+// * quality - 0-100, higher number corresponds to higher quality.
+// * ycbr - android_ycbr structure describing layout of input YUV420 image.
+// * app1ExifData - vector containing data to be included in APP1
+//   segment. Can be empty.
+// * outBufferSize - capacity of the output buffer.
+// * outBuffer - output buffer to write compressed data into.
+// Returns size of compressed data if the compression was successful,
+// empty optional otherwise.
+std::optional<size_t> compressJpeg(int width, int height, int quality,
+                                   const android_ycbcr& ycbcr,
+                                   const std::vector<uint8_t>& app1ExifData,
+                                   size_t outBufferSize, void* outBuffer);
 
 }  // namespace virtualcamera
 }  // namespace companion
diff --git a/services/camera/virtualcamera/util/MetadataBuilder.cc b/services/camera/virtualcamera/util/MetadataBuilder.cc
deleted file mode 100644
index 92a48b9..0000000
--- a/services/camera/virtualcamera/util/MetadataBuilder.cc
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * Copyright (C) 2023 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "MetadataBuilder"
-
-#include "MetadataBuilder.h"
-
-#include <algorithm>
-#include <cstdint>
-#include <iterator>
-#include <memory>
-#include <utility>
-#include <variant>
-#include <vector>
-
-#include "CameraMetadata.h"
-#include "aidl/android/hardware/camera/device/CameraMetadata.h"
-#include "log/log.h"
-#include "system/camera_metadata.h"
-#include "utils/Errors.h"
-
-namespace android {
-namespace companion {
-namespace virtualcamera {
-
-namespace {
-
-using ::android::hardware::camera::common::helper::CameraMetadata;
-
-template <typename To, typename From>
-std::vector<To> convertTo(const std::vector<From>& from) {
-  std::vector<To> to;
-  to.reserve(from.size());
-  std::transform(from.begin(), from.end(), std::back_inserter(to),
-                 [](const From& x) { return static_cast<To>(x); });
-  return to;
-}
-
-}  // namespace
-
-MetadataBuilder& MetadataBuilder::setSupportedHardwareLevel(
-    camera_metadata_enum_android_info_supported_hardware_level_t hwLevel) {
-  mEntryMap[ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL] =
-      std::vector<uint8_t>({static_cast<uint8_t>(hwLevel)});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setFlashAvailable(bool flashAvailable) {
-  const uint8_t metadataVal = flashAvailable
-                                  ? ANDROID_FLASH_INFO_AVAILABLE_TRUE
-                                  : ANDROID_FLASH_INFO_AVAILABLE_FALSE;
-  mEntryMap[ANDROID_FLASH_INFO_AVAILABLE] = std::vector<uint8_t>({metadataVal});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setLensFacing(
-    camera_metadata_enum_android_lens_facing lensFacing) {
-  mEntryMap[ANDROID_LENS_FACING] =
-      std::vector<uint8_t>({static_cast<uint8_t>(lensFacing)});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setSensorOrientation(int32_t sensorOrientation) {
-  mEntryMap[ANDROID_SENSOR_ORIENTATION] =
-      std::vector<int32_t>({sensorOrientation});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setSensorTimestamp(
-    std::chrono::nanoseconds timestamp) {
-  mEntryMap[ANDROID_SENSOR_TIMESTAMP] =
-      std::vector<int64_t>({timestamp.count()});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableFaceDetectModes(
-    const std::vector<camera_metadata_enum_android_statistics_face_detect_mode_t>&
-        faceDetectModes) {
-  mEntryMap[ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES] =
-      convertTo<uint8_t>(faceDetectModes);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAvailableModes(
-    const std::vector<camera_metadata_enum_android_control_mode_t>&
-        availableModes) {
-  mEntryMap[ANDROID_CONTROL_AVAILABLE_MODES] =
-      convertTo<uint8_t>(availableModes);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAfAvailableModes(
-    const std::vector<camera_metadata_enum_android_control_af_mode_t>&
-        availableModes) {
-  mEntryMap[ANDROID_CONTROL_AF_AVAILABLE_MODES] =
-      convertTo<uint8_t>(availableModes);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAfMode(
-    const camera_metadata_enum_android_control_af_mode_t mode) {
-  mEntryMap[ANDROID_CONTROL_AF_MODE] =
-      std::vector<uint8_t>({static_cast<uint8_t>(mode)});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAeAvailableFpsRange(
-    const int32_t minFps, const int32_t maxFps) {
-  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES] =
-      std::vector<int32_t>({minFps, maxFps});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlMaxRegions(int32_t maxAeRegions,
-                                                       int32_t maxAwbRegions,
-                                                       int32_t maxAfRegions) {
-  mEntryMap[ANDROID_CONTROL_MAX_REGIONS] =
-      std::vector<int32_t>({maxAeRegions, maxAwbRegions, maxAfRegions});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAeRegions(
-    const std::vector<ControlRegion>& aeRegions) {
-  std::vector<int32_t> regions;
-  regions.reserve(5 * aeRegions.size());
-  for (const ControlRegion& region : aeRegions) {
-    regions.push_back(region.x0);
-    regions.push_back(region.y0);
-    regions.push_back(region.x1);
-    regions.push_back(region.y1);
-    regions.push_back(region.weight);
-  }
-  mEntryMap[ANDROID_CONTROL_AE_REGIONS] = std::move(regions);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAfRegions(
-    const std::vector<ControlRegion>& afRegions) {
-  std::vector<int32_t> regions;
-  regions.reserve(5 * afRegions.size());
-  for (const ControlRegion& region : afRegions) {
-    regions.push_back(region.x0);
-    regions.push_back(region.y0);
-    regions.push_back(region.x1);
-    regions.push_back(region.y1);
-    regions.push_back(region.weight);
-  }
-  mEntryMap[ANDROID_CONTROL_AF_REGIONS] = std::move(regions);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAwbRegions(
-    const std::vector<ControlRegion>& awbRegions) {
-  std::vector<int32_t> regions;
-  regions.reserve(5 * awbRegions.size());
-  for (const ControlRegion& region : awbRegions) {
-    regions.push_back(region.x0);
-    regions.push_back(region.y0);
-    regions.push_back(region.x1);
-    regions.push_back(region.y1);
-    regions.push_back(region.weight);
-  }
-  mEntryMap[ANDROID_CONTROL_AWB_REGIONS] = std::move(regions);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlCaptureIntent(
-    const camera_metadata_enum_android_control_capture_intent_t intent) {
-  mEntryMap[ANDROID_CONTROL_CAPTURE_INTENT] =
-      std::vector<uint8_t>({static_cast<uint8_t>(intent)});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setMaxJpegSize(const int32_t size) {
-  mEntryMap[ANDROID_JPEG_MAX_SIZE] = std::vector<int32_t>({size});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableOutputStreamConfigurations(
-    const std::vector<StreamConfiguration>& streamConfigurations) {
-  std::vector<int32_t> metadataStreamConfigs;
-  std::vector<int64_t> metadataMinFrameDurations;
-  std::vector<int64_t> metadataStallDurations;
-  metadataStreamConfigs.reserve(streamConfigurations.size());
-  metadataMinFrameDurations.reserve(streamConfigurations.size());
-  metadataStallDurations.reserve(streamConfigurations.size());
-
-  for (const auto& config : streamConfigurations) {
-    metadataStreamConfigs.push_back(config.format);
-    metadataStreamConfigs.push_back(config.width);
-    metadataStreamConfigs.push_back(config.height);
-    metadataStreamConfigs.push_back(
-        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
-
-    metadataMinFrameDurations.push_back(config.format);
-    metadataMinFrameDurations.push_back(config.width);
-    metadataMinFrameDurations.push_back(config.height);
-    metadataMinFrameDurations.push_back(config.minFrameDuration.count());
-
-    metadataStallDurations.push_back(config.format);
-    metadataStallDurations.push_back(config.width);
-    metadataStallDurations.push_back(config.height);
-    metadataStallDurations.push_back(config.minStallDuration.count());
-  }
-
-  mEntryMap[ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS] =
-      metadataStreamConfigs;
-  mEntryMap[ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS] =
-      metadataMinFrameDurations;
-  mEntryMap[ANDROID_SCALER_AVAILABLE_STALL_DURATIONS] =
-      metadataMinFrameDurations;
-
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableMaxDigitalZoom(const float maxZoom) {
-  mEntryMap[ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM] =
-      std::vector<float>(maxZoom);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlZoomRatioRange(const float min,
-                                                           const float max) {
-  mEntryMap[ANDROID_CONTROL_ZOOM_RATIO_RANGE] = std::vector<float>({min, max});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setSensorActiveArraySize(int x0, int y0,
-                                                           int x1, int y1) {
-  mEntryMap[ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE] =
-      std::vector<int32_t>({x0, y0, x1, y1});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAeCompensationRange(int32_t min,
-                                                                int32_t max) {
-  mEntryMap[ANDROID_CONTROL_AE_COMPENSATION_RANGE] =
-      std::vector<int32_t>({min, max});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setControlAeCompensationStep(
-    const camera_metadata_rational step) {
-  mEntryMap[ANDROID_CONTROL_AE_COMPENSATION_STEP] =
-      std::vector<camera_metadata_rational>({step});
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableRequestKeys(
-    const std::vector<int32_t>& keys) {
-  mEntryMap[ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS] = keys;
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableResultKeys(
-    const std::vector<int32_t>& keys) {
-  mEntryMap[ANDROID_REQUEST_AVAILABLE_RESULT_KEYS] = keys;
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableCapabilities(
-    const std::vector<camera_metadata_enum_android_request_available_capabilities_t>&
-        capabilities) {
-  mEntryMap[ANDROID_REQUEST_AVAILABLE_CAPABILITIES] =
-      convertTo<uint8_t>(capabilities);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableCharacteristicKeys(
-    const std::vector<camera_metadata_tag_t>& keys) {
-  mEntryMap[ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS] =
-      convertTo<int32_t>(keys);
-  return *this;
-}
-
-MetadataBuilder& MetadataBuilder::setAvailableCharacteristicKeys() {
-  std::vector<camera_metadata_tag_t> availableKeys;
-  availableKeys.reserve(mEntryMap.size());
-  for (const auto& [key, _] : mEntryMap) {
-    if (key != ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS) {
-      availableKeys.push_back(key);
-    }
-  }
-  setAvailableCharacteristicKeys(availableKeys);
-  return *this;
-}
-
-std::unique_ptr<aidl::android::hardware::camera::device::CameraMetadata>
-MetadataBuilder::build() const {
-  CameraMetadata metadataHelper;
-  for (const auto& entry : mEntryMap) {
-    status_t ret = std::visit(
-        [&](auto&& arg) {
-          return metadataHelper.update(entry.first, arg.data(), arg.size());
-        },
-        entry.second);
-    if (ret != NO_ERROR) {
-      ALOGE("Failed to update metadata with key %d - %s: %s", entry.first,
-            get_camera_metadata_tag_name(entry.first),
-            ::android::statusToString(ret).c_str());
-      return nullptr;
-    }
-  }
-
-  const camera_metadata_t* metadata = metadataHelper.getAndLock();
-  if (metadata == nullptr) {
-    ALOGE(
-        "Failure when constructing metadata -> CameraMetadata helper returned "
-        "nullptr");
-    return nullptr;
-  }
-
-  auto aidlMetadata =
-      std::make_unique<aidl::android::hardware::camera::device::CameraMetadata>();
-  const uint8_t* data_ptr = reinterpret_cast<const uint8_t*>(metadata);
-  aidlMetadata->metadata.assign(data_ptr,
-                                data_ptr + get_camera_metadata_size(metadata));
-  metadataHelper.unlock(metadata);
-
-  return aidlMetadata;
-}
-
-}  // namespace virtualcamera
-}  // namespace companion
-}  // namespace android
diff --git a/services/camera/virtualcamera/util/MetadataBuilder.h b/services/camera/virtualcamera/util/MetadataBuilder.h
deleted file mode 100644
index d992d31..0000000
--- a/services/camera/virtualcamera/util/MetadataBuilder.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2023 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_COMPANION_VIRTUALCAMERA_METADATABUILDER_H
-#define ANDROID_COMPANION_VIRTUALCAMERA_METADATABUILDER_H
-
-#include <chrono>
-#include <cstdint>
-#include <map>
-#include <memory>
-#include <variant>
-#include <vector>
-
-#include "aidl/android/hardware/camera/device/CameraMetadata.h"
-#include "system/camera_metadata.h"
-
-namespace android {
-namespace companion {
-namespace virtualcamera {
-
-// Convenience builder for the
-// aidl::android::hardware::camera::device::CameraMetadata.
-//
-// Calling the same builder setter multiple will overwrite the value.
-// This class is not thread-safe.
-class MetadataBuilder {
- public:
-  struct StreamConfiguration {
-    int32_t width = 0;
-    int32_t height = 0;
-    int32_t format = 0;
-    // Minimal frame duration - corresponds to maximal FPS for given format.
-    // See ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS in CameraMetadataTag.aidl.
-    std::chrono::nanoseconds minFrameDuration{std::chrono::seconds(1) / 30};
-    // Minimal stall duration.
-    // See ANDROID_SCALER_AVAILABLE_STALL_DURATIONS in CameraMetadataTag.aidl.
-    std::chrono::nanoseconds minStallDuration{0};
-  };
-
-  struct ControlRegion {
-    int32_t x0 = 0;
-    int32_t y0 = 0;
-    int32_t x1 = 0;
-    int32_t y1 = 0;
-    int32_t weight = 0;
-  };
-
-  MetadataBuilder() = default;
-  ~MetadataBuilder() = default;
-
-  // See ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL in CameraMetadataTag.aidl.
-  MetadataBuilder& setSupportedHardwareLevel(
-      camera_metadata_enum_android_info_supported_hardware_level_t hwLevel);
-
-  // Whether this camera device has a flash unit
-  // See ANDROID_FLASH_INFO_AVAILABLE in CameraMetadataTag.aidl.
-  MetadataBuilder& setFlashAvailable(bool flashAvailable);
-
-  // See ANDROID_LENS_FACING in CameraMetadataTag.aidl.
-  MetadataBuilder& setLensFacing(
-      camera_metadata_enum_android_lens_facing lensFacing);
-
-  // See ANDROID_SENSOR_ORIENTATION in CameraMetadataTag.aidl.
-  MetadataBuilder& setSensorOrientation(int32_t sensorOrientation);
-
-  // Time at start of exposure of first row of the image
-  // sensor active array, in nanoseconds.
-  //
-  // See ANDROID_SENSOR_TIMESTAMP in CameraMetadataTag.aidl.
-  MetadataBuilder& setSensorTimestamp(std::chrono::nanoseconds timestamp);
-
-  // See ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE in CameraMetadataTag.aidl.
-  MetadataBuilder& setSensorActiveArraySize(int x0, int y0, int x1, int y1);
-
-  // See ANDROID_STATISTICS_FACE_DETECT_MODE in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableFaceDetectModes(
-      const std::vector<camera_metadata_enum_android_statistics_face_detect_mode_t>&
-          faceDetectMode);
-
-  // Sets available stream configurations along with corresponding minimal frame
-  // durations (corresponding to max fps) and stall durations.
-  //
-  // See ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
-  // ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS and
-  // ANDROID_SCALER_AVAILABLE_STALL_DURATIONS in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableOutputStreamConfigurations(
-      const std::vector<StreamConfiguration>& streamConfigurations);
-
-  // See ANDROID_CONTROL_AVAILABLE_MODES in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAvailableModes(
-      const std::vector<camera_metadata_enum_android_control_mode_t>&
-          availableModes);
-
-  // See ANDROID_CONTROL_AE_COMPENSATION_RANGE in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAeCompensationRange(int32_t min, int32_t max);
-
-  // See ANDROID_CONTROL_AE_COMPENSATION_STEP in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAeCompensationStep(camera_metadata_rational step);
-
-  // See ANDROID_CONTROL_AF_AVAILABLE_MODES in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAfAvailableModes(
-      const std::vector<camera_metadata_enum_android_control_af_mode_t>&
-          availableModes);
-
-  // See ANDROID_CONTROL_AF_MODE in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAfMode(
-      const camera_metadata_enum_android_control_af_mode_t mode);
-
-  // See ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAeAvailableFpsRange(int32_t min, int32_t max);
-
-  // See ANDROID_CONTROL_CAPTURE_INTENT in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlCaptureIntent(
-      camera_metadata_enum_android_control_capture_intent_t intent);
-
-  // See ANDROID_CONTROL_MAX_REGIONS in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlMaxRegions(int32_t maxAeRegions,
-                                        int32_t maxAwbRegions,
-                                        int32_t maxAfRegions);
-
-  // See ANDROID_CONTROL_AE_REGIONS in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAeRegions(
-      const std::vector<ControlRegion>& aeRegions);
-
-  // See ANDROID_CONTROL_AWB_REGIONS in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAwbRegions(
-      const std::vector<ControlRegion>& awbRegions);
-
-  // See ANDROID_CONTROL_AF_REGIONS in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlAfRegions(
-      const std::vector<ControlRegion>& afRegions);
-
-  // The size of the compressed JPEG image, in bytes.
-  //
-  // See ANDROID_JPEG_SIZE in CameraMetadataTag.aidl.
-  MetadataBuilder& setMaxJpegSize(int32_t size);
-
-  // See ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableMaxDigitalZoom(const float maxZoom);
-
-  // See ANDROID_CONTROL_ZOOM_RATIO_RANGE in CameraMetadataTag.aidl.
-  MetadataBuilder& setControlZoomRatioRange(float min, float max);
-
-  // A list of all keys that the camera device has available to use with
-  // CaptureRequest.
-  //
-  // See ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableRequestKeys(const std::vector<int32_t>& keys);
-
-  // A list of all keys that the camera device has available to use with
-  // CaptureResult.
-  //
-  // See ANDROID_RESULT_AVAILABLE_REQUEST_KEYS in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableResultKeys(const std::vector<int32_t>& keys);
-
-  // See ANDROID_REQUEST_AVAILABLE_CAPABILITIES in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableCapabilities(
-      const std::vector<
-          camera_metadata_enum_android_request_available_capabilities_t>&
-          capabilities);
-
-  // A list of all keys that the camera device has available to use.
-  //
-  // See ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS in CameraMetadataTag.aidl.
-  MetadataBuilder& setAvailableCharacteristicKeys(
-      const std::vector<camera_metadata_tag_t>& keys);
-
-  // Extends metadata with ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS
-  // containing all previously set tags.
-  MetadataBuilder& setAvailableCharacteristicKeys();
-
-  // Build CameraMetadata instance.
-  //
-  // Returns nullptr in case something went wrong.
-  std::unique_ptr<::aidl::android::hardware::camera::device::CameraMetadata>
-  build() const;
-
- private:
-  // Maps metadata tags to vectors of values for the given tag.
-  std::map<camera_metadata_tag_t,
-           std::variant<std::vector<int64_t>, std::vector<int32_t>,
-                        std::vector<uint8_t>, std::vector<float>,
-                        std::vector<camera_metadata_rational_t>>>
-      mEntryMap;
-};
-
-}  // namespace virtualcamera
-}  // namespace companion
-}  // namespace android
-
-#endif  // ANDROID_COMPANION_VIRTUALCAMERA_METADATABUILDER_H
diff --git a/services/camera/virtualcamera/util/MetadataUtil.cc b/services/camera/virtualcamera/util/MetadataUtil.cc
new file mode 100644
index 0000000..e3d9e28
--- /dev/null
+++ b/services/camera/virtualcamera/util/MetadataUtil.cc
@@ -0,0 +1,729 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MetadataUtil"
+
+#include "MetadataUtil.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <variant>
+#include <vector>
+
+#include "CameraMetadata.h"
+#include "aidl/android/hardware/camera/device/CameraMetadata.h"
+#include "log/log.h"
+#include "system/camera_metadata.h"
+#include "util/Util.h"
+#include "utils/Errors.h"
+
+namespace android {
+namespace companion {
+namespace virtualcamera {
+
+namespace {
+
+using ::android::hardware::camera::common::helper::CameraMetadata;
+
+template <typename To, typename From>
+std::vector<To> convertTo(const std::vector<From>& from) {
+  std::vector<To> to;
+  to.reserve(from.size());
+  std::transform(from.begin(), from.end(), std::back_inserter(to),
+                 [](const From& x) { return static_cast<To>(x); });
+  return to;
+}
+
+template <typename To, typename From>
+std::vector<To> asVectorOf(const From from) {
+  return std::vector<To>({static_cast<To>(from)});
+}
+
+}  // namespace
+
+MetadataBuilder& MetadataBuilder::setSupportedHardwareLevel(
+    camera_metadata_enum_android_info_supported_hardware_level_t hwLevel) {
+  mEntryMap[ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL] =
+      asVectorOf<uint8_t>(hwLevel);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFlashAvailable(bool flashAvailable) {
+  const uint8_t metadataVal = flashAvailable
+                                  ? ANDROID_FLASH_INFO_AVAILABLE_TRUE
+                                  : ANDROID_FLASH_INFO_AVAILABLE_FALSE;
+  mEntryMap[ANDROID_FLASH_INFO_AVAILABLE] = asVectorOf<uint8_t>(metadataVal);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFlashState(
+    const camera_metadata_enum_android_flash_state_t flashState) {
+  mEntryMap[ANDROID_FLASH_STATE] = asVectorOf<uint8_t>(flashState);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFlashMode(
+    const camera_metadata_enum_android_flash_mode_t flashMode) {
+  mEntryMap[ANDROID_FLASH_MODE] = asVectorOf<uint8_t>(flashMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setLensFacing(
+    camera_metadata_enum_android_lens_facing lensFacing) {
+  mEntryMap[ANDROID_LENS_FACING] = asVectorOf<uint8_t>(lensFacing);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorReadoutTimestamp(
+    const camera_metadata_enum_android_sensor_readout_timestamp_t
+        sensorReadoutTimestamp) {
+  mEntryMap[ANDROID_SENSOR_READOUT_TIMESTAMP] =
+      asVectorOf<uint8_t>(sensorReadoutTimestamp);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableFocalLengths(
+    const std::vector<float>& focalLengths) {
+  mEntryMap[ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS] = focalLengths;
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFocalLength(float focalLength) {
+  mEntryMap[ANDROID_LENS_FOCAL_LENGTH] = asVectorOf<float>(focalLength);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorOrientation(int32_t sensorOrientation) {
+  mEntryMap[ANDROID_SENSOR_ORIENTATION] = asVectorOf<int32_t>(sensorOrientation);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorTimestampSource(
+    const camera_metadata_enum_android_sensor_info_timestamp_source_t
+        timestampSource) {
+  mEntryMap[ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE] =
+      asVectorOf<uint8_t>(timestampSource);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorTimestamp(
+    std::chrono::nanoseconds timestamp) {
+  mEntryMap[ANDROID_SENSOR_TIMESTAMP] = asVectorOf<int64_t>(timestamp.count());
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableFaceDetectModes(
+    const std::vector<camera_metadata_enum_android_statistics_face_detect_mode_t>&
+        faceDetectModes) {
+  mEntryMap[ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES] =
+      convertTo<uint8_t>(faceDetectModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableTestPatternModes(
+    const std::vector<camera_metadata_enum_android_sensor_test_pattern_mode>&
+        testPatternModes) {
+  mEntryMap[ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES] =
+      convertTo<int32_t>(testPatternModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setFaceDetectMode(
+    const camera_metadata_enum_android_statistics_face_detect_mode_t
+        faceDetectMode) {
+  mEntryMap[ANDROID_STATISTICS_FACE_DETECT_MODE] =
+      asVectorOf<uint8_t>(faceDetectMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableModes(
+    const std::vector<camera_metadata_enum_android_control_mode_t>&
+        availableModes) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_MODES] =
+      convertTo<uint8_t>(availableModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlMode(
+    const camera_metadata_enum_android_control_mode_t mode) {
+  mEntryMap[ANDROID_CONTROL_MODE] = asVectorOf<uint8_t>(mode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableSceneModes(
+    const std::vector<camera_metadata_enum_android_control_scene_mode>&
+        availableSceneModes) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_SCENE_MODES] =
+      convertTo<uint8_t>(availableSceneModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableEffects(
+    const std::vector<camera_metadata_enum_android_control_effect_mode>&
+        availableEffects) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_EFFECTS] =
+      convertTo<uint8_t>(availableEffects);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlEffectMode(
+    const camera_metadata_enum_android_control_effect_mode_t effectMode) {
+  mEntryMap[ANDROID_CONTROL_EFFECT_MODE] = asVectorOf<uint8_t>(effectMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableVideoStabilizationModes(
+    const std::vector<
+        camera_metadata_enum_android_control_video_stabilization_mode_t>&
+        videoStabilizationModes) {
+  mEntryMap[ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES] =
+      convertTo<uint8_t>(videoStabilizationModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAfAvailableModes(
+    const std::vector<camera_metadata_enum_android_control_af_mode_t>&
+        availableModes) {
+  mEntryMap[ANDROID_CONTROL_AF_AVAILABLE_MODES] =
+      convertTo<uint8_t>(availableModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAfMode(
+    const camera_metadata_enum_android_control_af_mode_t mode) {
+  mEntryMap[ANDROID_CONTROL_AF_MODE] = asVectorOf<uint8_t>(mode);
+  return *this;
+}
+
+// See ANDROID_CONTROL_AF_TRIGGER_MODE in CameraMetadataTag.aidl.
+MetadataBuilder& MetadataBuilder::setControlAfTrigger(
+    const camera_metadata_enum_android_control_af_trigger_t trigger) {
+  mEntryMap[ANDROID_CONTROL_AF_TRIGGER] = asVectorOf<uint8_t>(trigger);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAvailableFpsRanges(
+    const std::vector<FpsRange>& fpsRanges) {
+  std::vector<int32_t> ranges;
+  ranges.reserve(2 * fpsRanges.size());
+  for (const FpsRange fpsRange : fpsRanges) {
+    ranges.push_back(fpsRange.minFps);
+    ranges.push_back(fpsRange.maxFps);
+  }
+  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES] = std::move(ranges);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeTargetFpsRange(
+    const int32_t minFps, const int32_t maxFps) {
+  mEntryMap[ANDROID_CONTROL_AE_TARGET_FPS_RANGE] =
+      std::vector<int32_t>({minFps, maxFps});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeMode(
+    camera_metadata_enum_android_control_ae_mode_t mode) {
+  mEntryMap[ANDROID_CONTROL_AE_MODE] = asVectorOf<uint8_t>(mode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAvailableModes(
+    const std::vector<camera_metadata_enum_android_control_ae_mode_t>& modes) {
+  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_MODES] = convertTo<uint8_t>(modes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAePrecaptureTrigger(
+    const camera_metadata_enum_android_control_ae_precapture_trigger_t trigger) {
+  mEntryMap[ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER] =
+      asVectorOf<uint8_t>(trigger);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlMaxRegions(int32_t maxAeRegions,
+                                                       int32_t maxAwbRegions,
+                                                       int32_t maxAfRegions) {
+  mEntryMap[ANDROID_CONTROL_MAX_REGIONS] =
+      std::vector<int32_t>({maxAeRegions, maxAwbRegions, maxAfRegions});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAvailableAwbModes(
+    const std::vector<camera_metadata_enum_android_control_awb_mode>& awbModes) {
+  mEntryMap[ANDROID_CONTROL_AWB_AVAILABLE_MODES] = convertTo<uint8_t>(awbModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAwbMode(
+    const camera_metadata_enum_android_control_awb_mode awbMode) {
+  mEntryMap[ANDROID_CONTROL_AWB_MODE] = asVectorOf<uint8_t>(awbMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAwbLockAvailable(
+    const bool awbLockAvailable) {
+  const uint8_t lockAvailable = awbLockAvailable
+                                    ? ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE
+                                    : ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
+  mEntryMap[ANDROID_CONTROL_AWB_LOCK_AVAILABLE] =
+      std::vector<uint8_t>({lockAvailable});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAvailableAntibandingModes(
+    const std::vector<camera_metadata_enum_android_control_ae_antibanding_mode_t>&
+        antibandingModes) {
+  mEntryMap[ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES] =
+      convertTo<uint8_t>(antibandingModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeAntibandingMode(
+    const camera_metadata_enum_android_control_ae_antibanding_mode_t
+        antibandingMode) {
+  mEntryMap[ANDROID_CONTROL_AE_ANTIBANDING_MODE] =
+      asVectorOf<uint8_t>(antibandingMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeLockAvailable(
+    const bool aeLockAvailable) {
+  const uint8_t lockAvailable = aeLockAvailable
+                                    ? ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE
+                                    : ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
+  mEntryMap[ANDROID_CONTROL_AE_LOCK_AVAILABLE] =
+      asVectorOf<uint8_t>(lockAvailable);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeRegions(
+    const std::vector<ControlRegion>& aeRegions) {
+  std::vector<int32_t> regions;
+  regions.reserve(5 * aeRegions.size());
+  for (const ControlRegion& region : aeRegions) {
+    regions.push_back(region.x0);
+    regions.push_back(region.y0);
+    regions.push_back(region.x1);
+    regions.push_back(region.y1);
+    regions.push_back(region.weight);
+  }
+  mEntryMap[ANDROID_CONTROL_AE_REGIONS] = std::move(regions);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAfRegions(
+    const std::vector<ControlRegion>& afRegions) {
+  std::vector<int32_t> regions;
+  regions.reserve(5 * afRegions.size());
+  for (const ControlRegion& region : afRegions) {
+    regions.push_back(region.x0);
+    regions.push_back(region.y0);
+    regions.push_back(region.x1);
+    regions.push_back(region.y1);
+    regions.push_back(region.weight);
+  }
+  mEntryMap[ANDROID_CONTROL_AF_REGIONS] = std::move(regions);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAwbRegions(
+    const std::vector<ControlRegion>& awbRegions) {
+  std::vector<int32_t> regions;
+  regions.reserve(5 * awbRegions.size());
+  for (const ControlRegion& region : awbRegions) {
+    regions.push_back(region.x0);
+    regions.push_back(region.y0);
+    regions.push_back(region.x1);
+    regions.push_back(region.y1);
+    regions.push_back(region.weight);
+  }
+  mEntryMap[ANDROID_CONTROL_AWB_REGIONS] = std::move(regions);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlCaptureIntent(
+    const camera_metadata_enum_android_control_capture_intent_t intent) {
+  mEntryMap[ANDROID_CONTROL_CAPTURE_INTENT] = asVectorOf<uint8_t>(intent);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setCropRegion(const int32_t x, const int32_t y,
+                                                const int32_t width,
+                                                const int32_t height) {
+  mEntryMap[ANDROID_SCALER_CROP_REGION] =
+      std::vector<int32_t>({x, y, width, height});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setMaxJpegSize(const int32_t size) {
+  mEntryMap[ANDROID_JPEG_MAX_SIZE] = asVectorOf<int32_t>(size);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setMaxFrameDuration(
+    const std::chrono::nanoseconds duration) {
+  mEntryMap[ANDROID_SENSOR_INFO_MAX_FRAME_DURATION] =
+      asVectorOf<int64_t>(duration.count());
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setJpegAvailableThumbnailSizes(
+    const std::vector<Resolution>& thumbnailSizes) {
+  std::vector<int32_t> sizes;
+  sizes.reserve(thumbnailSizes.size() * 2);
+  for (const Resolution& resolution : thumbnailSizes) {
+    sizes.push_back(resolution.width);
+    sizes.push_back(resolution.height);
+  }
+  mEntryMap[ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES] = std::move(sizes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setJpegQuality(const uint8_t quality) {
+  mEntryMap[ANDROID_JPEG_QUALITY] = asVectorOf<uint8_t>(quality);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setJpegThumbnailSize(const int width,
+                                                       const int height) {
+  mEntryMap[ANDROID_JPEG_THUMBNAIL_SIZE] = std::vector<int32_t>({width, height});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setJpegThumbnailQuality(const uint8_t quality) {
+  mEntryMap[ANDROID_JPEG_THUMBNAIL_QUALITY] = asVectorOf<uint8_t>(quality);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setMaxNumberOutputStreams(
+    const int32_t maxRawStreams, const int32_t maxProcessedStreams,
+    const int32_t maxStallStreams) {
+  mEntryMap[ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS] = std::vector<int32_t>(
+      {maxRawStreams, maxProcessedStreams, maxStallStreams});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSyncMaxLatency(
+    camera_metadata_enum_android_sync_max_latency latency) {
+  mEntryMap[ANDROID_SYNC_MAX_LATENCY] = asVectorOf<int32_t>(latency);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setPipelineMaxDepth(const uint8_t maxDepth) {
+  mEntryMap[ANDROID_REQUEST_PIPELINE_MAX_DEPTH] = asVectorOf<uint8_t>(maxDepth);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setPipelineDepth(const uint8_t depth) {
+  mEntryMap[ANDROID_REQUEST_PIPELINE_DEPTH] = asVectorOf<uint8_t>(depth);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableRequestCapabilities(
+    const std::vector<camera_metadata_enum_android_request_available_capabilities_t>&
+        requestCapabilities) {
+  mEntryMap[ANDROID_REQUEST_AVAILABLE_CAPABILITIES] =
+      convertTo<uint8_t>(requestCapabilities);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableOutputStreamConfigurations(
+    const std::vector<StreamConfiguration>& streamConfigurations) {
+  std::vector<int32_t> metadataStreamConfigs;
+  std::vector<int64_t> metadataMinFrameDurations;
+  std::vector<int64_t> metadataStallDurations;
+  metadataStreamConfigs.reserve(streamConfigurations.size());
+  metadataMinFrameDurations.reserve(streamConfigurations.size());
+  metadataStallDurations.reserve(streamConfigurations.size());
+
+  for (const auto& config : streamConfigurations) {
+    metadataStreamConfigs.push_back(config.format);
+    metadataStreamConfigs.push_back(config.width);
+    metadataStreamConfigs.push_back(config.height);
+    metadataStreamConfigs.push_back(
+        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+
+    metadataMinFrameDurations.push_back(config.format);
+    metadataMinFrameDurations.push_back(config.width);
+    metadataMinFrameDurations.push_back(config.height);
+    metadataMinFrameDurations.push_back(config.minFrameDuration.count());
+
+    metadataStallDurations.push_back(config.format);
+    metadataStallDurations.push_back(config.width);
+    metadataStallDurations.push_back(config.height);
+    metadataStallDurations.push_back(config.minStallDuration.count());
+  }
+
+  mEntryMap[ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS] =
+      std::move(metadataStreamConfigs);
+  mEntryMap[ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS] =
+      std::move(metadataMinFrameDurations);
+  mEntryMap[ANDROID_SCALER_AVAILABLE_STALL_DURATIONS] =
+      std::move(metadataStallDurations);
+
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableAberrationCorrectionModes(
+    const std::vector<camera_metadata_enum_android_color_correction_aberration_mode>&
+        aberrationCorectionModes) {
+  mEntryMap[ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES] =
+      convertTo<uint8_t>(aberrationCorectionModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAberrationCorrectionMode(
+    const camera_metadata_enum_android_color_correction_aberration_mode
+        aberrationCorrectionMode) {
+  mEntryMap[ANDROID_COLOR_CORRECTION_ABERRATION_MODE] =
+      asVectorOf<uint8_t>(aberrationCorrectionMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableNoiseReductionModes(
+    const std::vector<camera_metadata_enum_android_noise_reduction_mode>&
+        noiseReductionModes) {
+  mEntryMap[ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES] =
+      convertTo<uint8_t>(noiseReductionModes);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setNoiseReductionMode(
+    camera_metadata_enum_android_noise_reduction_mode noiseReductionMode) {
+  mEntryMap[ANDROID_NOISE_REDUCTION_MODE] =
+      asVectorOf<uint8_t>(noiseReductionMode);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setRequestPartialResultCount(
+    const int partialResultCount) {
+  mEntryMap[ANDROID_REQUEST_PARTIAL_RESULT_COUNT] =
+      asVectorOf<int32_t>(partialResultCount);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setCroppingType(
+    const camera_metadata_enum_android_scaler_cropping_type croppingType) {
+  mEntryMap[ANDROID_SCALER_CROPPING_TYPE] = asVectorOf<uint8_t>(croppingType);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setMaxFaceCount(const int maxFaceCount) {
+  mEntryMap[ANDROID_STATISTICS_INFO_MAX_FACE_COUNT] =
+      asVectorOf<int32_t>(maxFaceCount);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableMaxDigitalZoom(const float maxZoom) {
+  mEntryMap[ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM] =
+      asVectorOf<float>(maxZoom);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlZoomRatioRange(const float min,
+                                                           const float max) {
+  mEntryMap[ANDROID_CONTROL_ZOOM_RATIO_RANGE] = std::vector<float>({min, max});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorActiveArraySize(int x0, int y0,
+                                                           int x1, int y1) {
+  mEntryMap[ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE] =
+      std::vector<int32_t>({x0, y0, x1, y1});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorPixelArraySize(int width,
+                                                          int height) {
+  mEntryMap[ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE] =
+      std::vector<int32_t>({width, height});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setSensorPhysicalSize(float width,
+                                                        float height) {
+  mEntryMap[ANDROID_SENSOR_INFO_PHYSICAL_SIZE] =
+      std::vector<float>({width, height});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeCompensationRange(int32_t min,
+                                                                int32_t max) {
+  mEntryMap[ANDROID_CONTROL_AE_COMPENSATION_RANGE] =
+      std::vector<int32_t>({min, max});
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeCompensationStep(
+    const camera_metadata_rational step) {
+  mEntryMap[ANDROID_CONTROL_AE_COMPENSATION_STEP] =
+      asVectorOf<camera_metadata_rational>(step);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setControlAeExposureCompensation(
+    const int32_t exposureCompensation) {
+  mEntryMap[ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION] =
+      asVectorOf<int32_t>(exposureCompensation);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableRequestKeys(
+    const std::vector<int32_t>& keys) {
+  mEntryMap[ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS] = keys;
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableResultKeys(
+    const std::vector<int32_t>& keys) {
+  mEntryMap[ANDROID_REQUEST_AVAILABLE_RESULT_KEYS] = keys;
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableCapabilities(
+    const std::vector<camera_metadata_enum_android_request_available_capabilities_t>&
+        capabilities) {
+  mEntryMap[ANDROID_REQUEST_AVAILABLE_CAPABILITIES] =
+      convertTo<uint8_t>(capabilities);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableCharacteristicKeys(
+    const std::vector<camera_metadata_tag_t>& keys) {
+  mEntryMap[ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS] =
+      convertTo<int32_t>(keys);
+  return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setAvailableCharacteristicKeys() {
+  mExtendWithAvailableCharacteristicsKeys = true;
+  return *this;
+}
+
+std::unique_ptr<aidl::android::hardware::camera::device::CameraMetadata>
+MetadataBuilder::build() {
+  if (mExtendWithAvailableCharacteristicsKeys) {
+    std::vector<camera_metadata_tag_t> availableKeys;
+    availableKeys.reserve(mEntryMap.size());
+    for (const auto& [key, _] : mEntryMap) {
+      if (key != ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS) {
+        availableKeys.push_back(key);
+      }
+    }
+    setAvailableCharacteristicKeys(availableKeys);
+  }
+
+  CameraMetadata metadataHelper;
+  for (const auto& entry : mEntryMap) {
+    status_t ret = std::visit(
+        [&](auto&& arg) {
+          return metadataHelper.update(entry.first, arg.data(), arg.size());
+        },
+        entry.second);
+    if (ret != NO_ERROR) {
+      ALOGE("Failed to update metadata with key %d - %s: %s", entry.first,
+            get_camera_metadata_tag_name(entry.first),
+            ::android::statusToString(ret).c_str());
+      return nullptr;
+    }
+  }
+
+  const camera_metadata_t* metadata = metadataHelper.getAndLock();
+  if (metadata == nullptr) {
+    ALOGE(
+        "Failure when constructing metadata -> CameraMetadata helper returned "
+        "nullptr");
+    return nullptr;
+  }
+
+  auto aidlMetadata =
+      std::make_unique<aidl::android::hardware::camera::device::CameraMetadata>();
+  const uint8_t* data_ptr = reinterpret_cast<const uint8_t*>(metadata);
+  aidlMetadata->metadata.assign(data_ptr,
+                                data_ptr + get_camera_metadata_size(metadata));
+  metadataHelper.unlock(metadata);
+
+  return aidlMetadata;
+}
+
+std::optional<int32_t> getJpegQuality(
+    const aidl::android::hardware::camera::device::CameraMetadata& cameraMetadata) {
+  auto metadata =
+      reinterpret_cast<const camera_metadata_t*>(cameraMetadata.metadata.data());
+
+  camera_metadata_ro_entry_t entry;
+  if (find_camera_metadata_ro_entry(metadata, ANDROID_JPEG_QUALITY, &entry) !=
+      OK) {
+    return std::nullopt;
+  }
+
+  return *entry.data.i32;
+}
+
+std::optional<Resolution> getJpegThumbnailSize(
+    const aidl::android::hardware::camera::device::CameraMetadata& cameraMetadata) {
+  auto metadata =
+      reinterpret_cast<const camera_metadata_t*>(cameraMetadata.metadata.data());
+
+  camera_metadata_ro_entry_t entry;
+  if (find_camera_metadata_ro_entry(metadata, ANDROID_JPEG_THUMBNAIL_SIZE,
+                                    &entry) != OK) {
+    return std::nullopt;
+  }
+
+  return Resolution(entry.data.i32[0], entry.data.i32[1]);
+}
+
+std::optional<int32_t> getJpegThumbnailQuality(
+    const aidl::android::hardware::camera::device::CameraMetadata& cameraMetadata) {
+  auto metadata =
+      reinterpret_cast<const camera_metadata_t*>(cameraMetadata.metadata.data());
+
+  camera_metadata_ro_entry_t entry;
+  if (find_camera_metadata_ro_entry(metadata, ANDROID_JPEG_THUMBNAIL_QUALITY,
+                                    &entry) != OK) {
+    return std::nullopt;
+  }
+
+  return *entry.data.i32;
+}
+
+std::vector<Resolution> getJpegAvailableThumbnailSizes(
+    const aidl::android::hardware::camera::device::CameraMetadata& cameraMetadata) {
+  auto metadata =
+      reinterpret_cast<const camera_metadata_t*>(cameraMetadata.metadata.data());
+
+  camera_metadata_ro_entry_t entry;
+  if (find_camera_metadata_ro_entry(
+          metadata, ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, &entry) != OK) {
+    return {};
+  }
+
+  std::vector<Resolution> thumbnailSizes;
+  thumbnailSizes.reserve(entry.count / 2);
+  for (int i = 0; i < entry.count; i += 2) {
+    thumbnailSizes.emplace_back(entry.data.i32[i], entry.data.i32[i + 1]);
+  }
+  return thumbnailSizes;
+}
+
+}  // namespace virtualcamera
+}  // namespace companion
+}  // namespace android
diff --git a/services/camera/virtualcamera/util/MetadataUtil.h b/services/camera/virtualcamera/util/MetadataUtil.h
new file mode 100644
index 0000000..b4d60cb
--- /dev/null
+++ b/services/camera/virtualcamera/util/MetadataUtil.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_COMPANION_VIRTUALCAMERA_METADATAUTIL_H
+#define ANDROID_COMPANION_VIRTUALCAMERA_METADATAUTIL_H
+
+#include <chrono>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <variant>
+#include <vector>
+
+#include "aidl/android/hardware/camera/device/CameraMetadata.h"
+#include "system/camera_metadata.h"
+#include "util/Util.h"
+
+namespace android {
+namespace companion {
+namespace virtualcamera {
+
+// Convenience builder for the
+// aidl::android::hardware::camera::device::CameraMetadata.
+//
+// Calling the same builder setter multiple will overwrite the value.
+// This class is not thread-safe.
+class MetadataBuilder {
+ public:
+  struct StreamConfiguration {
+    int32_t width = 0;
+    int32_t height = 0;
+    int32_t format = 0;
+    // Minimal frame duration - corresponds to maximal FPS for given format.
+    // See ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS in CameraMetadataTag.aidl.
+    std::chrono::nanoseconds minFrameDuration{0};
+    // Minimal stall duration.
+    // See ANDROID_SCALER_AVAILABLE_STALL_DURATIONS in CameraMetadataTag.aidl.
+    std::chrono::nanoseconds minStallDuration{0};
+  };
+
+  struct ControlRegion {
+    int32_t x0 = 0;
+    int32_t y0 = 0;
+    int32_t x1 = 0;
+    int32_t y1 = 0;
+    int32_t weight = 0;
+  };
+
+  struct FpsRange {
+    int32_t minFps;
+    int32_t maxFps;
+
+    bool operator<(const FpsRange& other) const {
+      return maxFps == other.maxFps ? minFps < other.minFps
+                                    : maxFps < other.maxFps;
+    }
+  };
+
+  MetadataBuilder() = default;
+  ~MetadataBuilder() = default;
+
+  // See ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL in CameraMetadataTag.aidl.
+  MetadataBuilder& setSupportedHardwareLevel(
+      camera_metadata_enum_android_info_supported_hardware_level_t hwLevel);
+
+  // Whether this camera device has a flash unit
+  // See ANDROID_FLASH_INFO_AVAILABLE in CameraMetadataTag.aidl.
+  MetadataBuilder& setFlashAvailable(bool flashAvailable);
+
+  // See FLASH_STATE in CaptureResult.java.
+  MetadataBuilder& setFlashState(
+      camera_metadata_enum_android_flash_state_t flashState);
+
+  // See FLASH_MODE in CaptureRequest.java.
+  MetadataBuilder& setFlashMode(
+      camera_metadata_enum_android_flash_mode_t flashMode);
+
+  // See ANDROID_LENS_FACING in CameraMetadataTag.aidl.
+  MetadataBuilder& setLensFacing(
+      camera_metadata_enum_android_lens_facing lensFacing);
+
+  // See ANDROID_SENSOR_READOUT_TIMESTAMP in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorReadoutTimestamp(
+      camera_metadata_enum_android_sensor_readout_timestamp_t
+          sensorReadoutTimestamp);
+
+  // See ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableFocalLengths(
+      const std::vector<float>& focalLengths);
+
+  // See ANDROID_LENS_FOCAL_LENGTH in CameraMetadataTag.aidl.
+  MetadataBuilder& setFocalLength(float focalLength);
+
+  // See ANDROID_SENSOR_ORIENTATION in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorOrientation(int32_t sensorOrientation);
+
+  // Time at start of exposure of first row of the image
+  // sensor active array, in nanoseconds.
+  //
+  // See ANDROID_SENSOR_TIMESTAMP in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorTimestamp(std::chrono::nanoseconds timestamp);
+
+  // See SENSOR_INFO_TIMESTAMP_SOURCE in CameraCharacteristic.java.
+  MetadataBuilder& setSensorTimestampSource(
+      camera_metadata_enum_android_sensor_info_timestamp_source_t timestampSource);
+
+  // See ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorActiveArraySize(int x0, int y0, int x1, int y1);
+
+  // See ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorPixelArraySize(int width, int height);
+
+  // See ANDROID_SENSOR_INFO_PHYSICAL_SIZE in CameraMetadataTag.aidl.
+  MetadataBuilder& setSensorPhysicalSize(float width, float height);
+
+  // See ANDROID_STATISTICS_FACE_DETECT_MODE in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableFaceDetectModes(
+      const std::vector<camera_metadata_enum_android_statistics_face_detect_mode_t>&
+          faceDetectMode);
+
+  // See SENSOR_AVAILABLE_TEST_PATTERN_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setAvailableTestPatternModes(
+      const std::vector<camera_metadata_enum_android_sensor_test_pattern_mode>&
+          testPatternModes);
+
+  // See ANDROID_STATISTICS_FACE_DETECT_MODE in CaptureRequest.java.
+  MetadataBuilder& setFaceDetectMode(
+      camera_metadata_enum_android_statistics_face_detect_mode_t faceDetectMode);
+
+  // Sets available stream configurations along with corresponding minimal frame
+  // durations (corresponding to max fps) and stall durations.
+  //
+  // See ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+  // ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS and
+  // ANDROID_SCALER_AVAILABLE_STALL_DURATIONS in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableOutputStreamConfigurations(
+      const std::vector<StreamConfiguration>& streamConfigurations);
+
+  // See COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setAvailableAberrationCorrectionModes(
+      const std::vector<
+          camera_metadata_enum_android_color_correction_aberration_mode>&
+          aberrationCorectionModes);
+
+  // See COLOR_CORRECTION_ABERRATION_MODE in CaptureRequest.java.
+  MetadataBuilder& setAberrationCorrectionMode(
+      camera_metadata_enum_android_color_correction_aberration_mode
+          aberrationCorrectionMode);
+
+  // See NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setAvailableNoiseReductionModes(
+      const std::vector<camera_metadata_enum_android_noise_reduction_mode>&
+          noiseReductionModes);
+
+  // See NOISE_REDUCTION_MODE in CaptureRequest.java.
+  MetadataBuilder& setNoiseReductionMode(
+      camera_metadata_enum_android_noise_reduction_mode noiseReductionMode);
+
+  // See REQUEST_PARTIAL_RESULT_COUNT in CameraCharacteristics.java.
+  MetadataBuilder& setRequestPartialResultCount(int partialResultCount);
+
+  // See SCALER_CROPPING_TYPE in CameraCharacteristics.java.
+  MetadataBuilder& setCroppingType(
+      camera_metadata_enum_android_scaler_cropping_type croppingType);
+
+  // See STATISTICS_INFO_MAX_FACE_COUNT in CameraCharacteristic.java.
+  MetadataBuilder& setMaxFaceCount(int maxFaceCount);
+
+  // See ANDROID_CONTROL_AVAILABLE_MODES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableModes(
+      const std::vector<camera_metadata_enum_android_control_mode_t>&
+          availableModes);
+
+  // See ANDROID_CONTROL_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlMode(
+      camera_metadata_enum_android_control_mode_t mode);
+
+  // See ANDROID_CONTROL_AVAILABLE_SCENE_MODES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableSceneModes(
+      const std::vector<camera_metadata_enum_android_control_scene_mode>&
+          availableSceneModes);
+
+  // See ANDROID_CONTROL_AVAILABLE_EFFECTS in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableEffects(
+      const std::vector<camera_metadata_enum_android_control_effect_mode>&
+          availableEffects);
+
+  // See CONTROL_EFFECT_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlEffectMode(
+      camera_metadata_enum_android_control_effect_mode_t effectMode);
+
+  // See ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES
+  MetadataBuilder& setControlAvailableVideoStabilizationModes(
+      const std::vector<
+          camera_metadata_enum_android_control_video_stabilization_mode_t>&
+          videoStabilizationModes);
+
+  // See CONTROL_AE_AVAILABLE_ANTIBANDING_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setControlAeAvailableAntibandingModes(
+      const std::vector<camera_metadata_enum_android_control_ae_antibanding_mode_t>&
+          antibandingModes);
+
+  // See CONTROL_AE_ANTIBANDING_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlAeAntibandingMode(
+      camera_metadata_enum_android_control_ae_antibanding_mode_t antibandingMode);
+
+  // See ANDROID_CONTROL_AE_COMPENSATION_RANGE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeCompensationRange(int32_t min, int32_t max);
+
+  // See ANDROID_CONTROL_AE_COMPENSATION_STEP in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeCompensationStep(camera_metadata_rational step);
+
+  // See ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeExposureCompensation(int32_t exposureCompensation);
+
+  // See ANDROID_CONTROL_AE_AVAILABLE_MODES in CameraCharacteristics.java.
+  MetadataBuilder& setControlAeAvailableModes(
+      const std::vector<camera_metadata_enum_android_control_ae_mode_t>& modes);
+
+  // See ANDROID_CONTROL_AE_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlAeMode(
+      camera_metadata_enum_android_control_ae_mode_t step);
+
+  // See ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER in CaptureRequest.java.
+  MetadataBuilder& setControlAePrecaptureTrigger(
+      camera_metadata_enum_android_control_ae_precapture_trigger_t trigger);
+
+  // See ANDROID_CONTROL_AF_AVAILABLE_MODES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAfAvailableModes(
+      const std::vector<camera_metadata_enum_android_control_af_mode_t>&
+          availableModes);
+
+  // See ANDROID_CONTROL_AF_MODE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAfMode(
+      const camera_metadata_enum_android_control_af_mode_t mode);
+
+  // See ANDROID_CONTROL_AF_TRIGGER_MODE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAfTrigger(
+      const camera_metadata_enum_android_control_af_trigger_t trigger);
+
+  // See ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeAvailableFpsRanges(
+      const std::vector<FpsRange>& fpsRanges);
+
+  // See ANDROID_CONTROL_AE_TARGET_FPS_RANGE in CaptureRequest.java.
+  MetadataBuilder& setControlAeTargetFpsRange(int32_t min, int32_t max);
+
+  // See ANDROID_CONTROL_CAPTURE_INTENT in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlCaptureIntent(
+      camera_metadata_enum_android_control_capture_intent_t intent);
+
+  // See ANDROID_CONTROL_MAX_REGIONS in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlMaxRegions(int32_t maxAeRegions,
+                                        int32_t maxAwbRegions,
+                                        int32_t maxAfRegions);
+
+  // See ANDROID_CONTROL_AWB_AVAILABLE_MODES in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAvailableAwbModes(
+      const std::vector<camera_metadata_enum_android_control_awb_mode>& awbModes);
+
+  // See ANDROID_CONTROL_AWB_AVAILABLE_MODE in CaptureRequest.java.
+  MetadataBuilder& setControlAwbMode(
+      camera_metadata_enum_android_control_awb_mode awb);
+
+  // See CONTROL_AWB_LOCK_AVAILABLE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAwbLockAvailable(bool awbLockAvailable);
+
+  // See CONTROL_AE_LOCK_AVAILABLE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeLockAvailable(bool aeLockAvailable);
+
+  // See ANDROID_CONTROL_AE_REGIONS in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAeRegions(
+      const std::vector<ControlRegion>& aeRegions);
+
+  // See ANDROID_CONTROL_AWB_REGIONS in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAwbRegions(
+      const std::vector<ControlRegion>& awbRegions);
+
+  // See ANDROID_SCALER_CROP_REGION in CaptureRequest.java.
+  MetadataBuilder& setCropRegion(int32_t x, int32_t y, int32_t width,
+                                 int32_t height);
+
+  // See ANDROID_CONTROL_AF_REGIONS in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlAfRegions(
+      const std::vector<ControlRegion>& afRegions);
+
+  // The size of the compressed JPEG image, in bytes.
+  //
+  // See ANDROID_JPEG_SIZE in CameraMetadataTag.aidl.
+  MetadataBuilder& setMaxJpegSize(int32_t size);
+
+  // See SENSOR_INFO_MAX_FRAME_DURATION in CameraCharacteristic.java.
+  MetadataBuilder& setMaxFrameDuration(std::chrono::nanoseconds duration);
+
+  // See JPEG_AVAILABLE_THUMBNAIL_SIZES in CameraCharacteristic.java.
+  MetadataBuilder& setJpegAvailableThumbnailSizes(
+      const std::vector<Resolution>& thumbnailSizes);
+
+  // See JPEG_QUALITY in CaptureRequest.java.
+  MetadataBuilder& setJpegQuality(uint8_t quality);
+
+  // See JPEG_THUMBNAIL_SIZE in CaptureRequest.java.
+  MetadataBuilder& setJpegThumbnailSize(int width, int height);
+
+  // See JPEG_THUMBNAIL_QUALITY in CaptureRequest.java.
+  MetadataBuilder& setJpegThumbnailQuality(uint8_t quality);
+
+  // The maximum numbers of different types of output streams
+  // that can be configured and used simultaneously by a camera device.
+  //
+  // See ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS in CameraMetadataTag.aidl.
+  MetadataBuilder& setMaxNumberOutputStreams(int32_t maxRawStreams,
+                                             int32_t maxProcessedStreams,
+                                             int32_t maxStallStreams);
+
+  // See ANDROID_SYNC_MAX_LATENCY in CameraMetadataTag.aidl.
+  MetadataBuilder& setSyncMaxLatency(
+      camera_metadata_enum_android_sync_max_latency setSyncMaxLatency);
+
+  // See REQUEST_PIPELINE_MAX_DEPTH in CameraCharacteristic.java.
+  MetadataBuilder& setPipelineMaxDepth(uint8_t maxDepth);
+
+  // See REQUEST_PIPELINE_DEPTH in CaptureResult.java.
+  MetadataBuilder& setPipelineDepth(uint8_t depth);
+
+  // See ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableMaxDigitalZoom(const float maxZoom);
+
+  // See ANDROID_CONTROL_ZOOM_RATIO_RANGE in CameraMetadataTag.aidl.
+  MetadataBuilder& setControlZoomRatioRange(float min, float max);
+
+  // See ANDROID_REQUEST_AVAILABLE_CAPABILITIES in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableRequestCapabilities(
+      const std::vector<
+          camera_metadata_enum_android_request_available_capabilities_t>&
+          requestCapabilities);
+
+  // A list of all keys that the camera device has available to use with
+  // CaptureRequest.
+  //
+  // See ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableRequestKeys(const std::vector<int32_t>& keys);
+
+  // A list of all keys that the camera device has available to use with
+  // CaptureResult.
+  //
+  // See ANDROID_RESULT_AVAILABLE_REQUEST_KEYS in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableResultKeys(const std::vector<int32_t>& keys);
+
+  // See ANDROID_REQUEST_AVAILABLE_CAPABILITIES in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableCapabilities(
+      const std::vector<
+          camera_metadata_enum_android_request_available_capabilities_t>&
+          capabilities);
+
+  // A list of all keys that the camera device has available to use.
+  //
+  // See ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS in CameraMetadataTag.aidl.
+  MetadataBuilder& setAvailableCharacteristicKeys(
+      const std::vector<camera_metadata_tag_t>& keys);
+
+  // Extends metadata with ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS
+  // containing all set tags.
+  MetadataBuilder& setAvailableCharacteristicKeys();
+
+  // Build CameraMetadata instance.
+  //
+  // Returns nullptr in case something went wrong.
+  std::unique_ptr<::aidl::android::hardware::camera::device::CameraMetadata>
+  build();
+
+ private:
+  // Maps metadata tags to vectors of values for the given tag.
+  std::map<camera_metadata_tag_t,
+           std::variant<std::vector<int64_t>, std::vector<int32_t>,
+                        std::vector<uint8_t>, std::vector<float>,
+                        std::vector<camera_metadata_rational_t>>>
+      mEntryMap;
+  // Extend metadata with ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS.
+  bool mExtendWithAvailableCharacteristicsKeys = false;
+};
+
+// Returns JPEG_QUALITY from metadata, or nullopt if the key is not present.
+std::optional<int32_t> getJpegQuality(
+    const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+
+// Returns JPEG_THUMBNAIL_SIZE from metadata, or nullopt if the key is not present.
+std::optional<Resolution> getJpegThumbnailSize(
+    const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+
+// Returns JPEG_THUMBNAIL_QUALITY from metadata, or nullopt if the key is not present.
+std::optional<int32_t> getJpegThumbnailQuality(
+    const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+
+// Returns JPEG_AVAILABLE_THUMBNAIL_SIZES from metadata, or nullopt if the key
+// is not present.
+std::vector<Resolution> getJpegAvailableThumbnailSizes(
+    const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+
+}  // namespace virtualcamera
+}  // namespace companion
+}  // namespace android
+
+#endif  // ANDROID_COMPANION_VIRTUALCAMERA_METADATAUTIL_H
diff --git a/services/camera/virtualcamera/util/Util.cc b/services/camera/virtualcamera/util/Util.cc
index df771b1..b2048bc 100644
--- a/services/camera/virtualcamera/util/Util.cc
+++ b/services/camera/virtualcamera/util/Util.cc
@@ -20,8 +20,13 @@
 
 #include <algorithm>
 #include <array>
+#include <cstdint>
+#include <memory>
 
+#include "android/hardware_buffer.h"
 #include "jpeglib.h"
+#include "ui/GraphicBuffer.h"
+#include "utils/Errors.h"
 
 namespace android {
 namespace companion {
@@ -35,10 +40,87 @@
 // TODO(b/301023410) - Query actual max texture size.
 constexpr int kMaxTextureSize = 2048;
 constexpr int kLibJpegDctSize = DCTSIZE;
+constexpr int kMaxFpsUpperLimit = 60;
 
 constexpr std::array<Format, 2> kSupportedFormats{Format::YUV_420_888,
                                                   Format::RGBA_8888};
 
+YCbCrLockGuard::YCbCrLockGuard(std::shared_ptr<AHardwareBuffer> hwBuffer,
+                               const uint32_t usageFlags)
+    : mHwBuffer(hwBuffer) {
+  GraphicBuffer* gBuffer = GraphicBuffer::fromAHardwareBuffer(mHwBuffer.get());
+  if (gBuffer == nullptr) {
+    ALOGE("%s: Attempting to lock nullptr buffer.", __func__);
+    return;
+  }
+  mLockStatus = gBuffer->lockYCbCr(usageFlags, &mYCbCr);
+  if (mLockStatus != OK) {
+    ALOGE("%s: Failed to lock graphic buffer: %s", __func__,
+          statusToString(mLockStatus).c_str());
+  }
+}
+
+YCbCrLockGuard::~YCbCrLockGuard() {
+  if (getStatus() != OK) {
+    return;
+  }
+
+  GraphicBuffer* gBuffer = GraphicBuffer::fromAHardwareBuffer(mHwBuffer.get());
+  if (gBuffer == nullptr) {
+    return;
+  }
+  gBuffer->unlock();
+  status_t status = gBuffer->unlock();
+  if (status != NO_ERROR) {
+    ALOGE("Failed to unlock graphic buffer: %s", statusToString(status).c_str());
+  }
+}
+
+status_t YCbCrLockGuard::getStatus() const {
+  return mLockStatus;
+}
+
+const android_ycbcr& YCbCrLockGuard::operator*() const {
+  LOG_ALWAYS_FATAL_IF(getStatus() != OK,
+                      "Dereferencing unlocked YCbCrLockGuard, status is %s",
+                      statusToString(mLockStatus).c_str());
+  return mYCbCr;
+}
+
+PlanesLockGuard::PlanesLockGuard(std::shared_ptr<AHardwareBuffer> hwBuffer,
+                                 const uint64_t usageFlags, sp<Fence> fence) {
+  if (hwBuffer == nullptr) {
+    ALOGE("%s: Attempting to lock nullptr buffer.", __func__);
+    return;
+  }
+
+  const int32_t rawFence = fence != nullptr ? fence->get() : -1;
+  mLockStatus = static_cast<status_t>(AHardwareBuffer_lockPlanes(
+      hwBuffer.get(), usageFlags, rawFence, nullptr, &mPlanes));
+  if (mLockStatus != OK) {
+    ALOGE("%s: Failed to lock graphic buffer: %s", __func__,
+          statusToString(mLockStatus).c_str());
+  }
+}
+
+PlanesLockGuard::~PlanesLockGuard() {
+  if (getStatus() != OK || mHwBuffer == nullptr) {
+    return;
+  }
+  AHardwareBuffer_unlock(mHwBuffer.get(), /*fence=*/nullptr);
+}
+
+int PlanesLockGuard::getStatus() const {
+  return mLockStatus;
+}
+
+const AHardwareBuffer_Planes& PlanesLockGuard::operator*() const {
+  LOG_ALWAYS_FATAL_IF(getStatus() != OK,
+                      "Dereferencing unlocked PlanesLockGuard, status is %s",
+                      statusToString(mLockStatus).c_str());
+  return mPlanes;
+}
+
 sp<Fence> importFence(const NativeHandle& aidlHandle) {
   if (aidlHandle.fds.size() != 1) {
     return sp<Fence>::make();
@@ -54,7 +136,7 @@
 
 // Returns true if specified format is supported for virtual camera input.
 bool isFormatSupportedForInput(const int width, const int height,
-                               const Format format) {
+                               const Format format, const int maxFps) {
   if (!isPixelFormatSupportedForInput(format)) {
     return false;
   }
@@ -71,9 +153,17 @@
     return false;
   }
 
+  if (maxFps <= 0 || maxFps > kMaxFpsUpperLimit) {
+    return false;
+  }
+
   return true;
 }
 
+std::ostream& operator<<(std::ostream& os, const Resolution& resolution) {
+  return os << resolution.width << "x" << resolution.height;
+}
+
 }  // namespace virtualcamera
 }  // namespace companion
 }  // namespace android
diff --git a/services/camera/virtualcamera/util/Util.h b/services/camera/virtualcamera/util/Util.h
index a73c99b..faae010 100644
--- a/services/camera/virtualcamera/util/Util.h
+++ b/services/camera/virtualcamera/util/Util.h
@@ -17,18 +17,82 @@
 #ifndef ANDROID_COMPANION_VIRTUALCAMERA_UTIL_H
 #define ANDROID_COMPANION_VIRTUALCAMERA_UTIL_H
 
+#include <cmath>
 #include <cstdint>
+#include <memory>
 
 #include "aidl/android/companion/virtualcamera/Format.h"
 #include "aidl/android/hardware/camera/common/Status.h"
 #include "aidl/android/hardware/camera/device/StreamBuffer.h"
 #include "android/binder_auto_utils.h"
+#include "android/hardware_buffer.h"
+#include "system/graphics.h"
 #include "ui/Fence.h"
 
 namespace android {
 namespace companion {
 namespace virtualcamera {
 
+// RAII utility class to safely lock AHardwareBuffer and obtain android_ycbcr
+// structure describing YUV plane layout.
+//
+// Access to the buffer is locked immediatelly afer construction.
+class YCbCrLockGuard {
+ public:
+  YCbCrLockGuard(std::shared_ptr<AHardwareBuffer> hwBuffer, uint32_t usageFlags);
+  YCbCrLockGuard(YCbCrLockGuard&& other) = default;
+  ~YCbCrLockGuard();
+
+  // Returns OK if the buffer is successfully locked.
+  status_t getStatus() const;
+
+  // Dereferencing instance of this guard returns android_ycbcr structure
+  // describing the layout.
+  // Caller needs to check whether the buffer was successfully locked
+  // before dereferencing.
+  const android_ycbcr& operator*() const;
+
+  // Disable copy.
+  YCbCrLockGuard(const YCbCrLockGuard&) = delete;
+  YCbCrLockGuard& operator=(const YCbCrLockGuard&) = delete;
+
+ private:
+  std::shared_ptr<AHardwareBuffer> mHwBuffer;
+  android_ycbcr mYCbCr = {};
+  status_t mLockStatus = DEAD_OBJECT;
+};
+
+// RAII utility class to safely lock AHardwareBuffer and obtain
+// AHardwareBuffer_Planes (Suitable for interacting with RGBA / BLOB buffers.
+//
+// Access to the buffer is locked immediatelly afer construction.
+class PlanesLockGuard {
+ public:
+  PlanesLockGuard(std::shared_ptr<AHardwareBuffer> hwBuffer,
+                  uint64_t usageFlags, sp<Fence> fence = nullptr);
+  PlanesLockGuard(PlanesLockGuard&& other) = default;
+  ~PlanesLockGuard();
+
+  // Returns OK if the buffer is successfully locked.
+  status_t getStatus() const;
+
+  // Dereferencing instance of this guard returns AHardwareBuffer_Planes
+  // structure describing the layout.
+  //
+  // Caller needs to check whether the buffer was successfully locked
+  // before dereferencing.
+  const AHardwareBuffer_Planes& operator*() const;
+
+  // Disable copy.
+  PlanesLockGuard(const PlanesLockGuard&) = delete;
+  PlanesLockGuard& operator=(const YCbCrLockGuard&) = delete;
+
+ private:
+  std::shared_ptr<AHardwareBuffer> mHwBuffer;
+  AHardwareBuffer_Planes mPlanes;
+  status_t mLockStatus = DEAD_OBJECT;
+};
+
 // Converts camera AIDL status to ndk::ScopedAStatus
 inline ndk::ScopedAStatus cameraStatus(
     const ::aidl::android::hardware::camera::common::Status status) {
@@ -50,7 +114,46 @@
 // Returns true if specified format is supported for virtual camera input.
 bool isFormatSupportedForInput(
     int width, int height,
-    ::aidl::android::companion::virtualcamera::Format format);
+    ::aidl::android::companion::virtualcamera::Format format, int maxFps);
+
+// Representation of resolution / size.
+struct Resolution {
+  Resolution() = default;
+  Resolution(const int w, const int h) : width(w), height(h) {
+  }
+
+  // Order by increasing pixel count, and by width for same pixel count.
+  bool operator<(const Resolution& other) const {
+    const int pixCount = width * height;
+    const int otherPixCount = other.width * other.height;
+    return pixCount == otherPixCount ? width < other.width
+                                     : pixCount < otherPixCount;
+  }
+
+  bool operator<=(const Resolution& other) const {
+    return *this == other || *this < other;
+  }
+
+  bool operator==(const Resolution& other) const {
+    return width == other.width && height == other.height;
+  }
+
+  int width = 0;
+  int height = 0;
+};
+
+inline bool isApproximatellySameAspectRatio(const Resolution r1,
+                                            const Resolution r2) {
+  static constexpr float kAspectRatioEpsilon = 0.05;
+  float aspectRatio1 =
+      static_cast<float>(r1.width) / static_cast<float>(r1.height);
+  float aspectRatio2 =
+      static_cast<float>(r2.width) / static_cast<float>(r2.height);
+
+  return std::abs(aspectRatio1 - aspectRatio2) < kAspectRatioEpsilon;
+}
+
+std::ostream& operator<<(std::ostream& os, const Resolution& resolution);
 
 }  // namespace virtualcamera
 }  // namespace companion
diff --git a/services/medialog/fuzzer/Android.bp b/services/medialog/fuzzer/Android.bp
index c96c37b..bf90f43 100644
--- a/services/medialog/fuzzer/Android.bp
+++ b/services/medialog/fuzzer/Android.bp
@@ -1,4 +1,5 @@
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index 630a436..7dc445b 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -549,7 +549,7 @@
 
     int slot = 1;
     std::stringstream ss;
-    ss << "AudioPowerUsage:\n";
+    ss << "AudioPowerUsage interval " << mIntervalHours << " hours:\n";
     for (const auto &item : mItems) {
         if (slot >= limit - 1) {
             ss << "-- AudioPowerUsage may be truncated!\n";
diff --git a/services/mediametrics/include/mediametricsservice/TimedAction.h b/services/mediametrics/include/mediametricsservice/TimedAction.h
index c7ef585..8b53ded 100644
--- a/services/mediametrics/include/mediametricsservice/TimedAction.h
+++ b/services/mediametrics/include/mediametricsservice/TimedAction.h
@@ -25,6 +25,12 @@
 namespace android::mediametrics {
 
 class TimedAction {
+    // Use system_clock instead of steady_clock to include suspend time.
+    using TimerClock = class std::chrono::system_clock;
+
+    // Define granularity of wakeup to prevent delayed events if
+    // device is suspended.
+    static constexpr auto kWakeupInterval = std::chrono::minutes(3);
 public:
     TimedAction() : mThread{[this](){threadLoop();}} {}
 
@@ -35,7 +41,7 @@
     // TODO: return a handle for cancelling the action?
     template <typename T> // T is in units of std::chrono::duration.
     void postIn(const T& time, std::function<void()> f) {
-        postAt(std::chrono::steady_clock::now() + time, f);
+        postAt(TimerClock::now() + time, f);
     }
 
     template <typename T> // T is in units of std::chrono::time_point
@@ -75,16 +81,21 @@
     void threadLoop() NO_THREAD_SAFETY_ANALYSIS { // thread safety doesn't cover unique_lock
         std::unique_lock l(mLock);
         while (!mQuit) {
-            auto sleepUntilTime = std::chrono::time_point<std::chrono::steady_clock>::max();
+            auto sleepUntilTime = std::chrono::time_point<TimerClock>::max();
             if (!mMap.empty()) {
                 sleepUntilTime = mMap.begin()->first;
-                if (sleepUntilTime <= std::chrono::steady_clock::now()) {
+                const auto now = TimerClock::now();
+                if (sleepUntilTime <= now) {
                     auto node = mMap.extract(mMap.begin()); // removes from mMap.
                     l.unlock();
                     node.mapped()();
                     l.lock();
                     continue;
                 }
+                // Bionic uses CLOCK_MONOTONIC for its pthread_mutex regardless
+                // of REALTIME specification, use kWakeupInterval to ensure minimum
+                // granularity if suspended.
+                sleepUntilTime = std::min(sleepUntilTime, now + kWakeupInterval);
             }
             mCondition.wait_until(l, sleepUntilTime);
         }
@@ -93,7 +104,7 @@
     mutable std::mutex mLock;
     std::condition_variable mCondition GUARDED_BY(mLock);
     bool mQuit GUARDED_BY(mLock) = false;
-    std::multimap<std::chrono::time_point<std::chrono::steady_clock>, std::function<void()>>
+    std::multimap<std::chrono::time_point<TimerClock>, std::function<void()>>
             mMap GUARDED_BY(mLock); // multiple functions could execute at the same time.
 
     // needs to be initialized after the variables above, done in constructor initializer list.
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 6b48075..5b4fca9 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -37,6 +37,8 @@
 #include "AAudioServiceEndpointPlay.h"
 #include "AAudioServiceEndpointMMAP.h"
 
+#include <com_android_media_aaudio.h>
+
 #define AAUDIO_BUFFER_CAPACITY_MIN    (4 * 512)
 #define AAUDIO_SAMPLE_RATE_DEFAULT    48000
 
@@ -148,9 +150,15 @@
 
         // Try other formats if the config from APM is the same as our current config.
         // Some HALs may report its format support incorrectly.
-        if ((previousConfig.format == config.format) &&
-                (previousConfig.sample_rate == config.sample_rate)) {
-            config.format = getNextFormatToTry(config.format);
+        if (previousConfig.format == config.format) {
+            if (previousConfig.sample_rate == config.sample_rate) {
+                config.format = getNextFormatToTry(config.format);
+            } else if (!com::android::media::aaudio::sample_rate_conversion()) {
+                ALOGI("%s() - AAudio SRC feature not enabled, different rates! %d != %d",
+                      __func__, previousConfig.sample_rate, config.sample_rate);
+                result = AAUDIO_ERROR_INVALID_RATE;
+                break;
+            }
         }
 
         ALOGD("%s() %#x %d failed, perhaps due to format or sample rate. Try again with %#x %d",
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index d9e7e2b..dc70c79 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -75,11 +75,7 @@
                         this, getState());
 
     // Stop the command thread before destroying.
-    if (mThreadEnabled) {
-        mThreadEnabled = false;
-        mCommandQueue.stopWaiting();
-        mCommandThread.stop();
-    }
+    stopCommandThread();
 }
 
 std::string AAudioServiceStreamBase::dumpHeader() {
@@ -194,26 +190,27 @@
 
 error:
     closeAndClear();
-    mThreadEnabled = false;
-    mCommandQueue.stopWaiting();
-    mCommandThread.stop();
+    stopCommandThread();
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
     aaudio_result_t result = sendCommand(CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
+    if (result == AAUDIO_ERROR_ALREADY_CLOSED) {
+        // AAUDIO_ERROR_ALREADY_CLOSED is not a really error but just indicate the stream has
+        // already been closed. In that case, there is no need to close the stream once more.
+        ALOGD("The stream(%d) is already closed", mHandle);
+        return AAUDIO_OK;
+    }
 
-    // Stop the command thread as the stream is closed.
-    mThreadEnabled = false;
-    mCommandQueue.stopWaiting();
-    mCommandThread.stop();
+    stopCommandThread();
 
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close_l() {
     if (getState() == AAUDIO_STREAM_STATE_CLOSED) {
-        return AAUDIO_OK;
+        return AAUDIO_ERROR_ALREADY_CLOSED;
     }
 
     // This will stop the stream, just in case it was not already stopped.
@@ -766,3 +763,11 @@
         .record();
     return result;
 }
+
+void AAudioServiceStreamBase::stopCommandThread() {
+    bool threadEnabled = true;
+    if (mThreadEnabled.compare_exchange_strong(threadEnabled, false)) {
+        mCommandQueue.stopWaiting();
+        mCommandThread.stop();
+    }
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index d5061b3..96a6d44 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -360,7 +360,7 @@
         EXIT_STANDBY,
     };
     AAudioThread            mCommandThread;
-    std::atomic<bool>       mThreadEnabled{false};
+    std::atomic_bool        mThreadEnabled{false};
     AAudioCommandQueue      mCommandQueue;
 
     int32_t                 mFramesPerBurst = 0;
@@ -400,6 +400,8 @@
                                 bool waitForReply = false,
                                 int64_t timeoutNanos = 0);
 
+    void stopCommandThread();
+
     aaudio_result_t closeAndClear();
 
     /**
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 9fe06b7..12ce17f 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -97,6 +97,7 @@
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "packagemanager_aidl-cpp",
+        "com.android.media.aaudio-aconfig-cc",
     ],
 
     static_libs: [
diff --git a/services/oboeservice/fuzzer/Android.bp b/services/oboeservice/fuzzer/Android.bp
index 0230935..31ed8ac 100644
--- a/services/oboeservice/fuzzer/Android.bp
+++ b/services/oboeservice/fuzzer/Android.bp
@@ -19,6 +19,7 @@
  */
 
 package {
+    default_team: "trendy_team_media_framework_audio",
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
     // all of the 'license_kinds' from "frameworks_av_license"
@@ -51,6 +52,7 @@
         "aaudio-aidl-cpp",
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
+        "com.android.media.aaudio-aconfig-cc",
     ],
     static_libs: [
         "libaaudioservice",