[automerger skipped] Merge "DO NOT MERGE - Merge pie-platform-release (PPRL.181205.001) into master" am: bc44bd9a13  -s ours am: b28f9e06fd  -s ours
am: 4d002081f2  -s ours

Change-Id: I42f5ef674c7e56e53c23ffa9ee46912da693363b
diff --git a/hal/audio_extn/a2dp.c b/hal/audio_extn/a2dp.c
index 671fe59..7c2e4db 100644
--- a/hal/audio_extn/a2dp.c
+++ b/hal/audio_extn/a2dp.c
@@ -141,6 +141,7 @@
                                enc_codec_t *codec_type);
 typedef int (*audio_check_a2dp_ready_t)(void);
 typedef int (*audio_is_scrambling_enabled_t)(void);
+typedef uint16_t (*audio_get_a2dp_sink_latency_t)(void);
 
 enum A2DP_STATE {
     A2DP_STATE_CONNECTED,
@@ -221,6 +222,8 @@
     audio_check_a2dp_ready_t audio_check_a2dp_ready;
     /* Check if scrambling is enabled on BTSoC */
     audio_is_scrambling_enabled_t audio_is_scrambling_enabled;
+    /* Get sink latency from Bluetooth stack */
+    audio_get_a2dp_sink_latency_t audio_get_a2dp_sink_latency;
     /* Internal A2DP state identifier */
     enum A2DP_STATE bt_state;
     /* A2DP codec type configured */
@@ -713,6 +716,8 @@
                         dlsym(a2dp.bt_lib_handle,"audio_check_a2dp_ready");
             a2dp.audio_is_scrambling_enabled = (audio_is_scrambling_enabled_t)
                         dlsym(a2dp.bt_lib_handle,"audio_is_scrambling_enabled");
+            a2dp.audio_get_a2dp_sink_latency = (audio_get_a2dp_sink_latency_t)
+                        dlsym(a2dp.bt_lib_handle,"audio_get_a2dp_sink_latency");
         }
     }
 
@@ -1714,7 +1719,7 @@
 
 uint32_t audio_extn_a2dp_get_encoder_latency()
 {
-    uint32_t latency = 0;
+    uint32_t latency_ms = 0;
     int avsync_runtime_prop = 0;
     int sbc_offset = 0, aptx_offset = 0, aptxhd_offset = 0,
         aac_offset = 0, ldac_offset = 0;
@@ -1731,36 +1736,41 @@
         }
     }
 
+    uint32_t slatency_ms = 0;
+    if (a2dp.audio_get_a2dp_sink_latency && a2dp.bt_state != A2DP_STATE_DISCONNECTED) {
+        slatency_ms = a2dp.audio_get_a2dp_sink_latency();
+    }
+
     switch (a2dp.bt_encoder_format) {
         case ENC_CODEC_TYPE_SBC:
-            latency = (avsync_runtime_prop > 0) ? sbc_offset : ENCODER_LATENCY_SBC;
-            latency += DEFAULT_SINK_LATENCY_SBC;
+            latency_ms = (avsync_runtime_prop > 0) ? sbc_offset : ENCODER_LATENCY_SBC;
+            latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_SBC : slatency_ms;
             break;
         case ENC_CODEC_TYPE_APTX:
-            latency = (avsync_runtime_prop > 0) ? aptx_offset : ENCODER_LATENCY_APTX;
-            latency += DEFAULT_SINK_LATENCY_APTX;
+            latency_ms = (avsync_runtime_prop > 0) ? aptx_offset : ENCODER_LATENCY_APTX;
+            latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_APTX : slatency_ms;
             break;
         case ENC_CODEC_TYPE_APTX_HD:
-            latency = (avsync_runtime_prop > 0) ? aptxhd_offset : ENCODER_LATENCY_APTX_HD;
-            latency += DEFAULT_SINK_LATENCY_APTX_HD;
+            latency_ms = (avsync_runtime_prop > 0) ? aptxhd_offset : ENCODER_LATENCY_APTX_HD;
+            latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_APTX_HD : slatency_ms;
             break;
         case ENC_CODEC_TYPE_AAC:
-            latency = (avsync_runtime_prop > 0) ? aac_offset : ENCODER_LATENCY_AAC;
-            latency += DEFAULT_SINK_LATENCY_AAC;
+            latency_ms = (avsync_runtime_prop > 0) ? aac_offset : ENCODER_LATENCY_AAC;
+            latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_AAC : slatency_ms;
             break;
         case ENC_CODEC_TYPE_LDAC:
-            latency = (avsync_runtime_prop > 0) ? ldac_offset : ENCODER_LATENCY_LDAC;
-            latency += DEFAULT_SINK_LATENCY_LDAC;
+            latency_ms = (avsync_runtime_prop > 0) ? ldac_offset : ENCODER_LATENCY_LDAC;
+            latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_LDAC : slatency_ms;
             break;
         case ENC_CODEC_TYPE_PCM:
-            latency = ENCODER_LATENCY_PCM;
-            latency += DEFAULT_SINK_LATENCY_PCM;
+            latency_ms = ENCODER_LATENCY_PCM;
+            latency_ms += DEFAULT_SINK_LATENCY_PCM;
             break;
         default:
-            latency = DEFAULT_ENCODER_LATENCY;
+            latency_ms = DEFAULT_ENCODER_LATENCY;
             break;
     }
-    return latency;
+    return latency_ms;
 }
 
 int audio_extn_a2dp_get_parameters(struct str_parms *query,
diff --git a/hal/audio_extn/utils.c b/hal/audio_extn/utils.c
index 73de0ab..3a1877b 100644
--- a/hal/audio_extn/utils.c
+++ b/hal/audio_extn/utils.c
@@ -281,6 +281,10 @@
                                                    usecase->out_snd_device,
                                                    out->sample_rate,
                                                    sample_rate);
+    } else if (out->devices & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        audio_extn_a2dp_get_sample_rate(sample_rate);
+        ALOGI("%s: Using sample rate %d for A2DP CoPP", __func__,
+               *sample_rate);
     }
 
     app_type_cfg->mode = flags_to_mode(0 /*playback*/, out->flags);
@@ -577,7 +581,7 @@
         }
 
         /* Initialize snd card name specific ids and/or backends*/
-        if (snd_card_info_init(platform_info_file, my_data,
+        if (platform_info_init(platform_info_file, my_data, false,
                                &acdb_set_parameters) < 0) {
             ALOGE("Failed to find platform_info_file");
             goto cleanup;
diff --git a/hal/audio_hw.c b/hal/audio_hw.c
index f315b5e..7661e58 100644
--- a/hal/audio_hw.c
+++ b/hal/audio_hw.c
@@ -225,7 +225,7 @@
 #define AFE_PROXY_CHANNEL_COUNT 2
 #define AFE_PROXY_SAMPLING_RATE 48000
 
-#define AFE_PROXY_PLAYBACK_PERIOD_SIZE  768
+#define AFE_PROXY_PLAYBACK_PERIOD_SIZE  256
 #define AFE_PROXY_PLAYBACK_PERIOD_COUNT 4
 
 struct pcm_config pcm_config_afe_proxy_playback = {
@@ -239,7 +239,7 @@
     .avail_min = AFE_PROXY_PLAYBACK_PERIOD_SIZE,
 };
 
-#define AFE_PROXY_RECORD_PERIOD_SIZE  768
+#define AFE_PROXY_RECORD_PERIOD_SIZE  256
 #define AFE_PROXY_RECORD_PERIOD_COUNT 4
 
 struct pcm_config pcm_config_afe_proxy_record = {
@@ -615,7 +615,7 @@
                        struct audio_usecase *usecase)
 {
     snd_device_t snd_device;
-    char mixer_path[50];
+    char mixer_path[MIXER_PATH_MAX_LENGTH];
 
     if (usecase == NULL)
         return -EINVAL;
@@ -628,8 +628,13 @@
         snd_device = usecase->out_snd_device;
     audio_extn_utils_send_app_type_cfg(adev, usecase);
     audio_extn_utils_send_audio_calibration(adev, usecase);
-    strcpy(mixer_path, use_case_table[usecase->id]);
+
+    // we shouldn't truncate mixer_path
+    ALOGW_IF(strlcpy(mixer_path, use_case_table[usecase->id], sizeof(mixer_path))
+            >= sizeof(mixer_path), "%s: truncation on mixer path", __func__);
+    // this also appends to mixer_path
     platform_add_backend_name(adev->platform, mixer_path, snd_device);
+
     audio_extn_sound_trigger_update_stream_status(usecase, ST_EVENT_STREAM_BUSY);
     ALOGD("%s: usecase(%d) apply and update mixer path: %s", __func__,  usecase->id, mixer_path);
     audio_route_apply_and_update_path(adev->audio_route, mixer_path);
@@ -642,7 +647,7 @@
                         struct audio_usecase *usecase)
 {
     snd_device_t snd_device;
-    char mixer_path[50];
+    char mixer_path[MIXER_PATH_MAX_LENGTH];
 
     if (usecase == NULL)
         return -EINVAL;
@@ -652,9 +657,14 @@
         snd_device = usecase->in_snd_device;
     else
         snd_device = usecase->out_snd_device;
-    strcpy(mixer_path, use_case_table[usecase->id]);
+
+    // we shouldn't truncate mixer_path
+    ALOGW_IF(strlcpy(mixer_path, use_case_table[usecase->id], sizeof(mixer_path))
+            >= sizeof(mixer_path), "%s: truncation on mixer path", __func__);
+    // this also appends to mixer_path
     platform_add_backend_name(adev->platform, mixer_path, snd_device);
     ALOGD("%s: usecase(%d) reset and update mixer path: %s", __func__, usecase->id, mixer_path);
+
     audio_route_reset_and_update_path(adev->audio_route, mixer_path);
     audio_extn_sound_trigger_update_stream_status(usecase, ST_EVENT_STREAM_FREE);
 
@@ -1134,8 +1144,14 @@
         // audio_channel_in_mask_from_count() does the right conversion to either positional or
         // indexed mask
         for ( ; channel_count <= channels && num_masks < max_masks; channel_count++) {
-            supported_channel_masks[num_masks++] =
+            const audio_channel_mask_t mask =
                     audio_channel_in_mask_from_count(channel_count);
+            supported_channel_masks[num_masks++] = mask;
+            const audio_channel_mask_t index_mask =
+                    audio_channel_mask_for_index_assignment_from_count(channel_count);
+            if (mask != index_mask && num_masks < max_masks) { // ensure index mask added.
+                supported_channel_masks[num_masks++] = index_mask;
+            }
         }
     }
 #ifdef NDEBUG
@@ -1727,15 +1743,15 @@
     struct stream_out *out = (struct stream_out *) context;
     struct listnode *item;
 
-    out->offload_state = OFFLOAD_STATE_IDLE;
-    out->playback_started = 0;
-
     setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_AUDIO);
     set_sched_policy(0, SP_FOREGROUND);
     prctl(PR_SET_NAME, (unsigned long)"Offload Callback", 0, 0, 0);
 
     ALOGV("%s", __func__);
+
     lock_output_stream(out);
+    out->offload_state = OFFLOAD_STATE_IDLE;
+    out->playback_started = 0;
     for (;;) {
         struct offload_cmd *cmd = NULL;
         stream_callback_event_t event;
@@ -1994,6 +2010,9 @@
         audio_low_latency_hint_end();
     }
 
+    if (out->usecase == USECASE_INCALL_MUSIC_UPLINK)
+        voice_set_device_mute_flag(adev, false);
+
     /* 1. Get and set stream specific mixer controls */
     disable_audio_route(adev, uc_info);
 
@@ -2117,6 +2136,9 @@
 
     audio_extn_extspk_update(adev->extspk);
 
+    if (out->usecase == USECASE_INCALL_MUSIC_UPLINK)
+        voice_set_device_mute_flag(adev, true);
+
     ALOGV("%s: Opening PCM device card_id(%d) device_id(%d) format(%#x)",
           __func__, adev->snd_card, out->pcm_device_id, out->config.format);
     if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
@@ -2546,10 +2568,13 @@
         // otherwise audio is no longer played on the new usb devices.
         // By forcing the stream in standby, the usb stack refcount drops to 0
         // and the driver is closed.
-        if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD && val == AUDIO_DEVICE_NONE &&
+        if (val == AUDIO_DEVICE_NONE &&
                 audio_is_usb_out_device(out->devices)) {
-            ALOGD("%s() putting the usb device in standby after disconnection", __func__);
-            out_standby_l(&out->stream.common);
+            if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
+                ALOGD("%s() putting the usb device in standby after disconnection", __func__);
+                out_standby_l(&out->stream.common);
+            }
+            val = AUDIO_DEVICE_OUT_SPEAKER;
         }
 
         pthread_mutex_lock(&adev->lock);
@@ -2712,7 +2737,7 @@
                                           struct str_parms *reply,
                                           audio_channel_mask_t *supported_channel_masks) {
     int ret = -1;
-    char value[256];
+    char value[ARRAY_SIZE(channels_name_to_enum_table) * 32 /* max channel name size */];
     bool first = true;
     size_t i, j;
 
@@ -5030,7 +5055,10 @@
                                                             devices,
                                                             flags,
                                                             source);
-    ALOGV("%s: enter", __func__);
+    ALOGV("%s: enter: flags %#x, is_usb_dev %d, may_use_hifi_record %d,"
+            " sample_rate %u, channel_mask %#x, format %#x",
+            __func__, flags, is_usb_dev, may_use_hifi_record,
+            config->sample_rate, config->channel_mask, config->format);
     *stream_in = NULL;
 
     if (is_usb_dev && !is_usb_ready(adev, false /* is_playback */)) {
@@ -5085,7 +5113,7 @@
     in->capture_handle = handle;
     in->flags = flags;
 
-    ALOGV("%s: source = %d, config->channel_mask = %d", __func__, source, config->channel_mask);
+    ALOGV("%s: source %d, config->channel_mask %#x", __func__, source, config->channel_mask);
     if (source == AUDIO_SOURCE_VOICE_UPLINK ||
          source == AUDIO_SOURCE_VOICE_DOWNLINK) {
         /* Force channel config requested to mono if incall
diff --git a/hal/audio_hw.h b/hal/audio_hw.h
index 0b3b028..447f8ca 100644
--- a/hal/audio_hw.h
+++ b/hal/audio_hw.h
@@ -357,6 +357,7 @@
     bool enable_voicerx;
     bool enable_hfp;
     bool mic_break_enabled;
+    bool use_voice_device_mute;
 
     int snd_card;
     void *platform;
diff --git a/hal/msm8916/platform.c b/hal/msm8916/platform.c
index 750d4e5..2707c89 100644
--- a/hal/msm8916/platform.c
+++ b/hal/msm8916/platform.c
@@ -1132,7 +1132,8 @@
     /* Initialize ACDB and PCM ID's */
     strlcpy(platform_info_path, PLATFORM_INFO_XML_PATH, MAX_MIXER_XML_PATH);
     resolve_config_file(platform_info_path);
-    platform_info_init(platform_info_path, my_data);
+    platform_info_init(platform_info_path, my_data,
+                       true, &platform_set_parameters);
 
     my_data->acdb_handle = dlopen(LIB_ACDB_LOADER, RTLD_NOW);
     if (my_data->acdb_handle == NULL) {
@@ -1651,6 +1652,7 @@
     struct audio_device *adev = my_data->adev;
     struct mixer_ctl *ctl;
     const char *mixer_ctl_name = "Voice Rx Gain";
+    const char *mute_mixer_ctl_name = "Voice Rx Device Mute";
     int vol_index = 0, ret = 0;
     uint32_t set_values[ ] = {0,
                               ALL_SESSION_VSID,
@@ -1661,7 +1663,6 @@
     // But this values don't changed in kernel. So, below change is need.
     vol_index = (int)percent_to_index(volume, MIN_VOL_INDEX, my_data->max_vol_index);
     set_values[0] = vol_index;
-
     ctl = mixer_get_ctl_by_name(adev->mixer, mixer_ctl_name);
     if (!ctl) {
         ALOGE("%s: Could not get ctl for mixer cmd - %s",
@@ -1671,6 +1672,23 @@
     ALOGV("Setting voice volume index: %d", set_values[0]);
     ret = mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
 
+    // Send mute command in case volume index is max since indexes are inverted
+    // for mixer controls.
+    if (vol_index == my_data->max_vol_index) {
+        set_values[0] = 1;
+    }
+    else {
+        set_values[0] = 0;
+    }
+
+    ctl = mixer_get_ctl_by_name(adev->mixer, mute_mixer_ctl_name);
+    if (!ctl) {
+        ALOGE("%s: Could not get ctl for mixer cmd - %s",
+              __func__, mute_mixer_ctl_name);
+        return -EINVAL;
+    }
+    ALOGV("%s: Setting RX Device Mute to: %d", __func__, set_values[0]);
+    mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
     return ret;
 }
 
diff --git a/hal/msm8974/platform.c b/hal/msm8974/platform.c
index 5a36f0c..6991a12 100644
--- a/hal/msm8974/platform.c
+++ b/hal/msm8974/platform.c
@@ -125,6 +125,10 @@
     uint32_t             sampling_rate;
     uint32_t             cal_type;
     uint32_t             module_id;
+#ifdef PLATFORM_SM8150
+    uint16_t             instance_id;
+    uint16_t             reserved;
+#endif
     uint32_t             param_id;
 } acdb_audio_cal_cfg_t;
 
@@ -1649,7 +1653,8 @@
 
     my_data->declared_mic_count = 0;
     /* Initialize platform specific ids and/or backends*/
-    platform_info_init(platform_info_file, my_data);
+    platform_info_init(platform_info_file, my_data,
+                       true, &platform_set_parameters);
 
     ALOGD("%s: Loading mixer file: %s", __func__, mixer_xml_file);
     adev->audio_route = audio_route_init(snd_card_num, mixer_xml_file);
@@ -2512,7 +2517,7 @@
               __func__, mixer_ctl_name);
         return -EINVAL;
     }
-    ALOGV("Setting voice mute state: %d", state);
+    ALOGV("%s: Setting voice mute state: %d", __func__, state);
     mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
 
     if (my_data->csd != NULL) {
diff --git a/hal/platform_api.h b/hal/platform_api.h
index 1a7d2c3..8e249da 100644
--- a/hal/platform_api.h
+++ b/hal/platform_api.h
@@ -117,11 +117,11 @@
 
 bool platform_sound_trigger_usecase_needs_event(audio_usecase_t uc_id);
 
-/* From platform_info.c */
-int platform_info_init(const char *filename, void *);
-
 typedef int (*set_parameters_fn)(void *platform, struct str_parms *parms);
-int snd_card_info_init(const char *filename, void *, set_parameters_fn);
+
+/* From platform_info.c */
+int platform_info_init(const char *filename, void *,
+                       bool do_full_parse, set_parameters_fn);
 
 int platform_get_usecase_index(const char * usecase);
 int platform_set_usecase_pcm_id(audio_usecase_t usecase, int32_t type, int32_t pcm_id);
diff --git a/hal/platform_info.c b/hal/platform_info.c
index f5fbe3f..0695592 100644
--- a/hal/platform_info.c
+++ b/hal/platform_info.c
@@ -25,6 +25,15 @@
 #include "platform_api.h"
 #include <platform.h>
 #include <math.h>
+#include <pthread.h>
+
+/*
+ * Mandatory microphone characteristics include: device_id, type, address, location, group,
+ * index_in_the_group, directionality, num_frequency_responses, frequencies and responses.
+ * MANDATORY_MICROPHONE_CHARACTERISTICS should be updated when mandatory microphone
+ * characteristics are changed.
+ */
+#define MANDATORY_MICROPHONE_CHARACTERISTICS (1 << 10) - 1
 
 typedef enum {
     ROOT,
@@ -74,17 +83,19 @@
     [ACDB_METAINFO_KEY] = process_acdb_metainfo_key,
 };
 
-static set_parameters_fn set_parameters = &platform_set_parameters;
-
 static section_t section;
 
 struct platform_info {
+    pthread_mutex_t   lock;
     bool              do_full_parse;
     void             *platform;
     struct str_parms *kvpairs;
+    set_parameters_fn set_parameters;
 };
 
-static struct platform_info my_data = {true, NULL, NULL};
+static struct platform_info my_data = {PTHREAD_MUTEX_INITIALIZER,
+                                       true, NULL, NULL,
+                                       &platform_set_parameters};
 
 struct audio_string_to_enum {
     const char* name;
@@ -145,17 +156,6 @@
     AUDIO_MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
 };
 
-enum {
-    AUDIO_MICROPHONE_CHARACTERISTIC_NONE = 0u, // 0x0
-    AUDIO_MICROPHONE_CHARACTERISTIC_SENSITIVITY = 1u, // 0x1
-    AUDIO_MICROPHONE_CHARACTERISTIC_MAX_SPL = 2u, // 0x2
-    AUDIO_MICROPHONE_CHARACTERISTIC_MIN_SPL = 4u, // 0x4
-    AUDIO_MICROPHONE_CHARACTERISTIC_ORIENTATION = 8u, // 0x8
-    AUDIO_MICROPHONE_CHARACTERISTIC_GEOMETRIC_LOCATION = 16u, // 0x10
-    AUDIO_MICROPHONE_CHARACTERISTIC_ALL = 31u, /* ((((SENSITIVITY | MAX_SPL) | MIN_SPL)
-                                                  | ORIENTATION) | GEOMETRIC_LOCATION) */
-};
-
 static bool find_enum_by_string(const struct audio_string_to_enum * table, const char * name,
                                 int32_t len, unsigned int *value)
 {
@@ -418,7 +418,7 @@
     }
 
     str_parms_add_str(my_data.kvpairs, (char*)attr[1], (char*)attr[3]);
-    set_parameters(my_data.platform, my_data.kvpairs);
+    my_data.set_parameters(my_data.platform, my_data.kvpairs);
 done:
     return;
 }
@@ -458,221 +458,165 @@
 
 static void process_microphone_characteristic(const XML_Char **attr) {
     struct audio_microphone_characteristic_t microphone;
-    uint32_t curIdx = 0;
+    uint32_t index = 0;
+    uint32_t found_mandatory_characteristics = 0;
+    uint32_t num_frequencies = 0;
+    uint32_t num_responses = 0;
+    microphone.sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
+    microphone.max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+    microphone.min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+    microphone.orientation.x = 0.0f;
+    microphone.orientation.y = 0.0f;
+    microphone.orientation.z = 0.0f;
+    microphone.geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+    microphone.geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+    microphone.geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
 
-    if (strcmp(attr[curIdx++], "valid_mask")) {
-        ALOGE("%s: valid_mask not found", __func__);
-        goto done;
-    }
-    uint32_t valid_mask = atoi(attr[curIdx++]);
-
-    if (strcmp(attr[curIdx++], "device_id")) {
-        ALOGE("%s: device_id not found", __func__);
-        goto done;
-    }
-    if (strlen(attr[curIdx]) > AUDIO_MICROPHONE_ID_MAX_LEN) {
-        ALOGE("%s: device_id %s is too long", __func__, attr[curIdx]);
-        goto done;
-    }
-    strcpy(microphone.device_id, attr[curIdx++]);
-
-    if (strcmp(attr[curIdx++], "type")) {
-        ALOGE("%s: device not found", __func__);
-        goto done;
-    }
-    if (!find_enum_by_string(device_in_types, (char*)attr[curIdx++],
-            ARRAY_SIZE(device_in_types), &microphone.device)) {
-        ALOGE("%s: type %s in %s not found!",
-              __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
-        goto done;
-    }
-
-    if (strcmp(attr[curIdx++], "address")) {
-        ALOGE("%s: address not found", __func__);
-        goto done;
-    }
-    if (strlen(attr[curIdx]) > AUDIO_DEVICE_MAX_ADDRESS_LEN) {
-        ALOGE("%s, address %s is too long", __func__, attr[curIdx]);
-        goto done;
-    }
-    strcpy(microphone.address, attr[curIdx++]);
-    if (strlen(microphone.address) == 0) {
-        // If the address is empty, populate the address according to device type.
-        if (microphone.device == AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            strcpy(microphone.address, AUDIO_BOTTOM_MICROPHONE_ADDRESS);
-        } else if (microphone.device == AUDIO_DEVICE_IN_BACK_MIC) {
-            strcpy(microphone.address, AUDIO_BACK_MICROPHONE_ADDRESS);
-        }
-    }
-
-    if (strcmp(attr[curIdx++], "location")) {
-        ALOGE("%s: location not found", __func__);
-        goto done;
-    }
-    if (!find_enum_by_string(mic_locations, (char*)attr[curIdx++],
-            AUDIO_MICROPHONE_LOCATION_CNT, &microphone.location)) {
-        ALOGE("%s: location %s in %s not found!",
-              __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
-        goto done;
-    }
-
-    if (strcmp(attr[curIdx++], "group")) {
-        ALOGE("%s: group not found", __func__);
-        goto done;
-    }
-    microphone.group = atoi(attr[curIdx++]);
-
-    if (strcmp(attr[curIdx++], "index_in_the_group")) {
-        ALOGE("%s: index_in_the_group not found", __func__);
-        goto done;
-    }
-    microphone.index_in_the_group = atoi(attr[curIdx++]);
-
-    if (strcmp(attr[curIdx++], "directionality")) {
-        ALOGE("%s: directionality not found", __func__);
-        goto done;
-    }
-    if (!find_enum_by_string(mic_directionalities, (char*)attr[curIdx++],
-                AUDIO_MICROPHONE_DIRECTIONALITY_CNT, &microphone.directionality)) {
-        ALOGE("%s: directionality %s in %s not found!",
-              __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
-        goto done;
-    }
-
-    if (strcmp(attr[curIdx++], "num_frequency_responses")) {
-        ALOGE("%s: num_frequency_responses not found", __func__);
-        goto done;
-    }
-    microphone.num_frequency_responses = atoi(attr[curIdx++]);
-    if (microphone.num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
-        ALOGE("%s: num_frequency_responses is too large", __func__);
-        goto done;
-    }
-    if (microphone.num_frequency_responses > 0) {
-        if (strcmp(attr[curIdx++], "frequencies")) {
-            ALOGE("%s: frequencies not found", __func__);
-            goto done;
-        }
-        char *token = strtok((char *)attr[curIdx++], " ");
-        uint32_t num_frequencies = 0;
-        while (token) {
-            microphone.frequency_responses[0][num_frequencies++] = atof(token);
-            if (num_frequencies > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
-                ALOGE("%s: num %u of frequency is too large", __func__, num_frequencies);
+    while (attr[index] != NULL) {
+        const char *attribute = attr[index++];
+        char value[strlen(attr[index]) + 1];
+        strcpy(value, attr[index++]);
+        if (strcmp(attribute, "device_id") == 0) {
+            if (strlen(value) > AUDIO_MICROPHONE_ID_MAX_LEN) {
+                ALOGE("%s: device_id %s is too long", __func__, value);
                 goto done;
             }
-            token = strtok(NULL, " ");
-        }
-
-        if (strcmp(attr[curIdx++], "responses")) {
-            ALOGE("%s: responses not found", __func__);
-            goto done;
-        }
-        token = strtok((char *)attr[curIdx++], " ");
-        uint32_t num_responses = 0;
-        while (token) {
-            microphone.frequency_responses[1][num_responses++] = atof(token);
-            if (num_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
-                ALOGE("%s: num %u of response is too large", __func__, num_responses);
+            strcpy(microphone.device_id, value);
+            found_mandatory_characteristics |= 1;
+        } else if (strcmp(attribute, "type") == 0) {
+            if (!find_enum_by_string(device_in_types, value,
+                    ARRAY_SIZE(device_in_types), &microphone.device)) {
+                ALOGE("%s: type %s in %s not found!",
+                        __func__, value, PLATFORM_INFO_XML_PATH);
                 goto done;
             }
-            token = strtok(NULL, " ");
-        }
-
-        if (num_frequencies != num_responses
-                || num_frequencies != microphone.num_frequency_responses) {
-            ALOGE("%s: num of frequency and response not match: %u, %u, %u",
-                  __func__, num_frequencies, num_responses, microphone.num_frequency_responses);
-            goto done;
-        }
-    }
-
-    if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_SENSITIVITY) {
-        if (strcmp(attr[curIdx++], "sensitivity")) {
-            ALOGE("%s: sensitivity not found", __func__);
-            goto done;
-        }
-        microphone.sensitivity = atof(attr[curIdx++]);
-    } else {
-        microphone.sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
-    }
-
-    if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_MAX_SPL) {
-        if (strcmp(attr[curIdx++], "max_spl")) {
-            ALOGE("%s: max_spl not found", __func__);
-            goto done;
-        }
-        microphone.max_spl = atof(attr[curIdx++]);
-    } else {
-        microphone.max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
-    }
-
-    if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_MIN_SPL) {
-        if (strcmp(attr[curIdx++], "min_spl")) {
-            ALOGE("%s: min_spl not found", __func__);
-            goto done;
-        }
-        microphone.min_spl = atof(attr[curIdx++]);
-    } else {
-        microphone.min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
-    }
-
-    if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_ORIENTATION) {
-        if (strcmp(attr[curIdx++], "orientation")) {
-            ALOGE("%s: orientation not found", __func__);
-            goto done;
-        }
-        char *token = strtok((char *)attr[curIdx++], " ");
-        float orientation[3];
-        uint32_t idx = 0;
-        while (token) {
-            orientation[idx++] = atof(token);
-            if (idx > 3) {
+            found_mandatory_characteristics |= (1 << 1);
+        } else if (strcmp(attribute, "address") == 0) {
+            if (strlen(value) > AUDIO_DEVICE_MAX_ADDRESS_LEN) {
+                ALOGE("%s, address %s is too long", __func__, value);
+                goto done;
+            }
+            strcpy(microphone.address, value);
+            if (strlen(microphone.address) == 0) {
+                // If the address is empty, populate the address according to device type.
+                if (microphone.device == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                    strcpy(microphone.address, AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+                } else if (microphone.device == AUDIO_DEVICE_IN_BACK_MIC) {
+                    strcpy(microphone.address, AUDIO_BACK_MICROPHONE_ADDRESS);
+                }
+            }
+            found_mandatory_characteristics |= (1 << 2);
+        } else if (strcmp(attribute, "location") == 0) {
+            if (!find_enum_by_string(mic_locations, value,
+                    AUDIO_MICROPHONE_LOCATION_CNT, &microphone.location)) {
+                ALOGE("%s: location %s in %s not found!",
+                        __func__, value, PLATFORM_INFO_XML_PATH);
+                goto done;
+            }
+            found_mandatory_characteristics |= (1 << 3);
+        } else if (strcmp(attribute, "group") == 0) {
+            microphone.group = atoi(value);
+            found_mandatory_characteristics |= (1 << 4);
+        } else if (strcmp(attribute, "index_in_the_group") == 0) {
+            microphone.index_in_the_group = atoi(value);
+            found_mandatory_characteristics |= (1 << 5);
+        } else if (strcmp(attribute, "directionality") == 0) {
+            if (!find_enum_by_string(mic_directionalities, value,
+                    AUDIO_MICROPHONE_DIRECTIONALITY_CNT, &microphone.directionality)) {
+                ALOGE("%s: directionality %s in %s not found!",
+                      __func__, attr[index], PLATFORM_INFO_XML_PATH);
+                goto done;
+            }
+            found_mandatory_characteristics |= (1 << 6);
+        } else if (strcmp(attribute, "num_frequency_responses") == 0) {
+            microphone.num_frequency_responses = atoi(value);
+            if (microphone.num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+                ALOGE("%s: num_frequency_responses is too large", __func__);
+                goto done;
+            }
+            found_mandatory_characteristics |= (1 << 7);
+        } else if (strcmp(attribute, "frequencies") == 0) {
+            char *token = strtok(value, " ");
+            while (token) {
+                microphone.frequency_responses[0][num_frequencies++] = atof(token);
+                if (num_frequencies > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+                    ALOGE("%s: num %u of frequency is too large", __func__, num_frequencies);
+                    goto done;
+                }
+                token = strtok(NULL, " ");
+            }
+            found_mandatory_characteristics |= (1 << 8);
+        } else if (strcmp(attribute, "responses") == 0) {
+            char *token = strtok(value, " ");
+            while (token) {
+                microphone.frequency_responses[1][num_responses++] = atof(token);
+                if (num_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+                    ALOGE("%s: num %u of response is too large", __func__, num_responses);
+                    goto done;
+                }
+                token = strtok(NULL, " ");
+            }
+            found_mandatory_characteristics |= (1 << 9);
+        } else if (strcmp(attribute, "sensitivity") == 0) {
+            microphone.sensitivity = atof(value);
+        } else if (strcmp(attribute, "max_spl") == 0) {
+            microphone.max_spl = atof(value);
+        } else if (strcmp(attribute, "min_spl") == 0) {
+            microphone.min_spl = atof(value);
+        } else if (strcmp(attribute, "orientation") == 0) {
+            char *token = strtok(value, " ");
+            float orientation[3];
+            uint32_t idx = 0;
+            while (token) {
+                orientation[idx++] = atof(token);
+                if (idx > 3) {
+                    ALOGE("%s: orientation invalid", __func__);
+                    goto done;
+                }
+                token = strtok(NULL, " ");
+            }
+            if (idx != 3) {
                 ALOGE("%s: orientation invalid", __func__);
                 goto done;
             }
-            token = strtok(NULL, " ");
-        }
-        if (idx != 3) {
-            ALOGE("%s: orientation invalid", __func__);
-            goto done;
-        }
-        microphone.orientation.x = orientation[0];
-        microphone.orientation.y = orientation[1];
-        microphone.orientation.z = orientation[2];
-    } else {
-        microphone.orientation.x = 0.0f;
-        microphone.orientation.y = 0.0f;
-        microphone.orientation.z = 0.0f;
-    }
-
-    if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_GEOMETRIC_LOCATION) {
-        if (strcmp(attr[curIdx++], "geometric_location")) {
-            ALOGE("%s: geometric_location not found", __func__);
-            goto done;
-        }
-        char *token = strtok((char *)attr[curIdx++], " ");
-        float geometric_location[3];
-        uint32_t idx = 0;
-        while (token) {
-            geometric_location[idx++] = atof(token);
-            if (idx > 3) {
+            microphone.orientation.x = orientation[0];
+            microphone.orientation.y = orientation[1];
+            microphone.orientation.z = orientation[2];
+        } else if (strcmp(attribute, "geometric_location") == 0) {
+            char *token = strtok(value, " ");
+            float geometric_location[3];
+            uint32_t idx = 0;
+            while (token) {
+                geometric_location[idx++] = atof(token);
+                if (idx > 3) {
+                    ALOGE("%s: geometric_location invalid", __func__);
+                    goto done;
+                }
+                token = strtok(NULL, " ");
+            }
+            if (idx != 3) {
                 ALOGE("%s: geometric_location invalid", __func__);
                 goto done;
             }
-            token = strtok(NULL, " ");
+            microphone.geometric_location.x = geometric_location[0];
+            microphone.geometric_location.y = geometric_location[1];
+            microphone.geometric_location.z = geometric_location[2];
+        } else {
+            ALOGW("%s: unknown attribute of microphone characteristics: %s",
+                    __func__, attribute);
         }
-        if (idx != 3) {
-            ALOGE("%s: geometric_location invalid", __func__);
-            goto done;
-        }
-        microphone.geometric_location.x = geometric_location[0];
-        microphone.geometric_location.y = geometric_location[1];
-        microphone.geometric_location.z = geometric_location[2];
-    } else {
-        microphone.geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
-        microphone.geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
-        microphone.geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+    }
+
+    if (num_frequencies != num_responses
+            || num_frequencies != microphone.num_frequency_responses) {
+        ALOGE("%s: num of frequency and response not match: %u, %u, %u",
+              __func__, num_frequencies, num_responses, microphone.num_frequency_responses);
+        goto done;
+    }
+
+    if (found_mandatory_characteristics != MANDATORY_MICROPHONE_CHARACTERISTICS) {
+        ALOGE("%s: some of mandatory microphone characteriscts are missed: %u",
+                __func__, found_mandatory_characteristics);
     }
 
     platform_set_microphone_characteristic(my_data.platform, microphone);
@@ -916,14 +860,8 @@
     }
 }
 
-int snd_card_info_init(const char *filename, void *platform, set_parameters_fn fn)
-{
-    set_parameters = fn;
-    my_data.do_full_parse = false;
-    return platform_info_init(filename, platform);
-}
-
-int platform_info_init(const char *filename, void *platform)
+int platform_info_init(const char *filename, void *platform,
+                       bool do_full_parse, set_parameters_fn fn)
 {
     XML_Parser      parser;
     FILE            *file;
@@ -932,7 +870,6 @@
     void            *buf;
     static const uint32_t kBufSize = 1024;
     char   platform_info_file_name[MIXER_PATH_MAX_LENGTH]= {0};
-    section = ROOT;
 
     if (filename == NULL) {
         strlcpy(platform_info_file_name, PLATFORM_INFO_XML_PATH, MIXER_PATH_MAX_LENGTH);
@@ -958,8 +895,12 @@
         goto err_close_file;
     }
 
+    pthread_mutex_lock(&my_data.lock);
+    section = ROOT;
+    my_data.do_full_parse = do_full_parse;
     my_data.platform = platform;
     my_data.kvpairs = str_parms_create();
+    my_data.set_parameters = fn;
 
     XML_SetElementHandler(parser, start_tag, end_tag);
 
@@ -990,10 +931,12 @@
             break;
     }
 
-    set_parameters = &platform_set_parameters;
-    my_data.do_full_parse = true;
-
 err_free_parser:
+    if (my_data.kvpairs != NULL) {
+        str_parms_destroy(my_data.kvpairs);
+        my_data.kvpairs = NULL;
+    }
+    pthread_mutex_unlock(&my_data.lock);
     XML_ParserFree(parser);
 err_close_file:
     fclose(file);
diff --git a/hal/voice.c b/hal/voice.c
index 09cb926..708ce6c 100644
--- a/hal/voice.c
+++ b/hal/voice.c
@@ -171,6 +171,7 @@
     uc_info->devices = adev->current_call_output ->devices;
     uc_info->in_snd_device = SND_DEVICE_NONE;
     uc_info->out_snd_device = SND_DEVICE_NONE;
+    adev->use_voice_device_mute = false;
 
     list_add_tail(&adev->usecase_list, &uc_info->list);
 
@@ -357,11 +358,19 @@
 int voice_set_mic_mute(struct audio_device *adev, bool state)
 {
     int err = 0;
+    struct audio_usecase *usecase = NULL;
 
     adev->voice.mic_mute = state;
     if (adev->mode == AUDIO_MODE_IN_CALL ||
-        adev->mode == AUDIO_MODE_IN_COMMUNICATION)
-        err = platform_set_mic_mute(adev->platform, state);
+        adev->mode == AUDIO_MODE_IN_COMMUNICATION) {
+        /* Use device mute if incall music delivery usecase is in progress */
+        if (adev->use_voice_device_mute)
+            err = platform_set_device_mute(adev->platform, state, "tx");
+        else
+            err = platform_set_mic_mute(adev->platform, state);
+        ALOGV("%s: voice mute status=%d, use_voice_device_mute_flag=%d",
+            __func__, state, adev->use_voice_device_mute);
+    }
 
     return err;
 }
@@ -371,6 +380,25 @@
     return adev->voice.mic_mute;
 }
 
+// Following function is called when incall music uplink usecase is
+// created or destroyed while mic is muted. If incall music uplink
+// usecase is active, apply voice device mute to mute only voice Tx
+// path and not the mixed voice Tx + inncall-music path. Revert to
+// voice stream mute once incall music uplink usecase is inactive
+void voice_set_device_mute_flag (struct audio_device *adev, bool state)
+{
+    if (adev->voice.mic_mute) {
+        if (state) {
+            platform_set_device_mute(adev->platform, true, "tx");
+            platform_set_mic_mute(adev->platform, false);
+        } else {
+            platform_set_mic_mute(adev->platform, true);
+            platform_set_device_mute(adev->platform, false, "tx");
+        }
+    }
+    adev->use_voice_device_mute = state;
+}
+
 int voice_set_volume(struct audio_device *adev, float volume)
 {
     int vol, err = 0;
@@ -517,6 +545,8 @@
     adev->voice.volume = 1.0f;
     adev->voice.mic_mute = false;
     adev->voice.in_call = false;
+    adev->use_voice_device_mute = false;
+
     for (i = 0; i < MAX_VOICE_SESSIONS; i++) {
         adev->voice.session[i].pcm_rx = NULL;
         adev->voice.session[i].pcm_tx = NULL;
diff --git a/hal/voice.h b/hal/voice.h
index 469a3b5..71e096b 100644
--- a/hal/voice.h
+++ b/hal/voice.h
@@ -95,4 +95,6 @@
                        snd_device_t out_snd_device,
                        bool enable);
 bool voice_is_call_state_active(struct audio_device *adev);
+void voice_set_device_mute_flag (struct audio_device *adev, bool state);
+
 #endif //VOICE_H