Merge "hal: Reset slimbus backend config on A2DP disconnect" into pi-dev am: 75da01e5f2
am: 12906812ae
Change-Id: I7cf4ee472b8347e6a4fe4faf9379dfecb88b37c2
diff --git a/hal/audio_extn/a2dp.c b/hal/audio_extn/a2dp.c
index 671fe59..7c2e4db 100644
--- a/hal/audio_extn/a2dp.c
+++ b/hal/audio_extn/a2dp.c
@@ -141,6 +141,7 @@
enc_codec_t *codec_type);
typedef int (*audio_check_a2dp_ready_t)(void);
typedef int (*audio_is_scrambling_enabled_t)(void);
+typedef uint16_t (*audio_get_a2dp_sink_latency_t)(void);
enum A2DP_STATE {
A2DP_STATE_CONNECTED,
@@ -221,6 +222,8 @@
audio_check_a2dp_ready_t audio_check_a2dp_ready;
/* Check if scrambling is enabled on BTSoC */
audio_is_scrambling_enabled_t audio_is_scrambling_enabled;
+ /* Get sink latency from Bluetooth stack */
+ audio_get_a2dp_sink_latency_t audio_get_a2dp_sink_latency;
/* Internal A2DP state identifier */
enum A2DP_STATE bt_state;
/* A2DP codec type configured */
@@ -713,6 +716,8 @@
dlsym(a2dp.bt_lib_handle,"audio_check_a2dp_ready");
a2dp.audio_is_scrambling_enabled = (audio_is_scrambling_enabled_t)
dlsym(a2dp.bt_lib_handle,"audio_is_scrambling_enabled");
+ a2dp.audio_get_a2dp_sink_latency = (audio_get_a2dp_sink_latency_t)
+ dlsym(a2dp.bt_lib_handle,"audio_get_a2dp_sink_latency");
}
}
@@ -1714,7 +1719,7 @@
uint32_t audio_extn_a2dp_get_encoder_latency()
{
- uint32_t latency = 0;
+ uint32_t latency_ms = 0;
int avsync_runtime_prop = 0;
int sbc_offset = 0, aptx_offset = 0, aptxhd_offset = 0,
aac_offset = 0, ldac_offset = 0;
@@ -1731,36 +1736,41 @@
}
}
+ uint32_t slatency_ms = 0;
+ if (a2dp.audio_get_a2dp_sink_latency && a2dp.bt_state != A2DP_STATE_DISCONNECTED) {
+ slatency_ms = a2dp.audio_get_a2dp_sink_latency();
+ }
+
switch (a2dp.bt_encoder_format) {
case ENC_CODEC_TYPE_SBC:
- latency = (avsync_runtime_prop > 0) ? sbc_offset : ENCODER_LATENCY_SBC;
- latency += DEFAULT_SINK_LATENCY_SBC;
+ latency_ms = (avsync_runtime_prop > 0) ? sbc_offset : ENCODER_LATENCY_SBC;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_SBC : slatency_ms;
break;
case ENC_CODEC_TYPE_APTX:
- latency = (avsync_runtime_prop > 0) ? aptx_offset : ENCODER_LATENCY_APTX;
- latency += DEFAULT_SINK_LATENCY_APTX;
+ latency_ms = (avsync_runtime_prop > 0) ? aptx_offset : ENCODER_LATENCY_APTX;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_APTX : slatency_ms;
break;
case ENC_CODEC_TYPE_APTX_HD:
- latency = (avsync_runtime_prop > 0) ? aptxhd_offset : ENCODER_LATENCY_APTX_HD;
- latency += DEFAULT_SINK_LATENCY_APTX_HD;
+ latency_ms = (avsync_runtime_prop > 0) ? aptxhd_offset : ENCODER_LATENCY_APTX_HD;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_APTX_HD : slatency_ms;
break;
case ENC_CODEC_TYPE_AAC:
- latency = (avsync_runtime_prop > 0) ? aac_offset : ENCODER_LATENCY_AAC;
- latency += DEFAULT_SINK_LATENCY_AAC;
+ latency_ms = (avsync_runtime_prop > 0) ? aac_offset : ENCODER_LATENCY_AAC;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_AAC : slatency_ms;
break;
case ENC_CODEC_TYPE_LDAC:
- latency = (avsync_runtime_prop > 0) ? ldac_offset : ENCODER_LATENCY_LDAC;
- latency += DEFAULT_SINK_LATENCY_LDAC;
+ latency_ms = (avsync_runtime_prop > 0) ? ldac_offset : ENCODER_LATENCY_LDAC;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_LDAC : slatency_ms;
break;
case ENC_CODEC_TYPE_PCM:
- latency = ENCODER_LATENCY_PCM;
- latency += DEFAULT_SINK_LATENCY_PCM;
+ latency_ms = ENCODER_LATENCY_PCM;
+ latency_ms += DEFAULT_SINK_LATENCY_PCM;
break;
default:
- latency = DEFAULT_ENCODER_LATENCY;
+ latency_ms = DEFAULT_ENCODER_LATENCY;
break;
}
- return latency;
+ return latency_ms;
}
int audio_extn_a2dp_get_parameters(struct str_parms *query,
diff --git a/hal/audio_extn/maxxaudio.c b/hal/audio_extn/maxxaudio.c
index fc65332..7caa55e 100644
--- a/hal/audio_extn/maxxaudio.c
+++ b/hal/audio_extn/maxxaudio.c
@@ -62,8 +62,8 @@
};
typedef enum MA_STREAM_TYPE {
- STREAM_MIN_STREAM_TYPES,
- STREAM_VOICE = STREAM_MIN_STREAM_TYPES,
+ STREAM_MIN_TYPES = 0,
+ STREAM_VOICE = STREAM_MIN_TYPES,
STREAM_SYSTEM,
STREAM_RING,
STREAM_MUSIC,
@@ -509,21 +509,28 @@
float vol, bool active)
{
bool ret = false;
- ma_stream_type_t stype = (ma_stream_type_t)stream_type;
+ ma_stream_type_t stype;
- ALOGV("%s: stream[%d] vol[%f] active[%s]",
- __func__, stream_type, vol, active ? "true" : "false");
+ if (stream_type >= STREAM_MAX_TYPES ||
+ stream_type < STREAM_MIN_TYPES) {
+ ALOGE("%s: stream_type %d out of range.", __func__, stream_type);
+ return ret;
+ }
if (!my_data) {
ALOGV("%s: maxxaudio isn't initialized.", __func__);
return ret;
}
+ ALOGV("%s: stream[%d] vol[%f] active[%s]",
+ __func__, stream_type, vol, active ? "true" : "false");
+
// update condition
// 1. start track: active and volume isn't zero
// 2. stop track: no tracks are active
if ((active && vol != 0) ||
(!active)) {
+ stype = (ma_stream_type_t)stream_type;
pthread_mutex_lock(&my_data->lock);
ma_cur_state_table[stype].vol = vol;
diff --git a/hal/audio_extn/utils.c b/hal/audio_extn/utils.c
index 73de0ab..79dd9e5 100644
--- a/hal/audio_extn/utils.c
+++ b/hal/audio_extn/utils.c
@@ -281,6 +281,10 @@
usecase->out_snd_device,
out->sample_rate,
sample_rate);
+ } else if (out->devices & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ audio_extn_a2dp_get_sample_rate(sample_rate);
+ ALOGI("%s: Using sample rate %d for A2DP CoPP", __func__,
+ *sample_rate);
}
app_type_cfg->mode = flags_to_mode(0 /*playback*/, out->flags);
diff --git a/hal/audio_hw.c b/hal/audio_hw.c
index 9559573..261531d 100644
--- a/hal/audio_hw.c
+++ b/hal/audio_hw.c
@@ -225,7 +225,7 @@
#define AFE_PROXY_CHANNEL_COUNT 2
#define AFE_PROXY_SAMPLING_RATE 48000
-#define AFE_PROXY_PLAYBACK_PERIOD_SIZE 768
+#define AFE_PROXY_PLAYBACK_PERIOD_SIZE 256
#define AFE_PROXY_PLAYBACK_PERIOD_COUNT 4
struct pcm_config pcm_config_afe_proxy_playback = {
@@ -239,7 +239,7 @@
.avail_min = AFE_PROXY_PLAYBACK_PERIOD_SIZE,
};
-#define AFE_PROXY_RECORD_PERIOD_SIZE 768
+#define AFE_PROXY_RECORD_PERIOD_SIZE 256
#define AFE_PROXY_RECORD_PERIOD_COUNT 4
struct pcm_config pcm_config_afe_proxy_record = {
@@ -1486,7 +1486,7 @@
voice_set_sidetone(adev, out_snd_device, true);
}
- if (usecase == voip_usecase) {
+ if (usecase->type != PCM_CAPTURE && voip_usecase) {
struct stream_out *voip_out = voip_usecase->stream.out;
audio_extn_utils_send_app_type_gain(adev,
voip_out->app_type_cfg.app_type,
@@ -1980,6 +1980,9 @@
audio_low_latency_hint_end();
}
+ if (out->usecase == USECASE_INCALL_MUSIC_UPLINK)
+ voice_set_device_mute_flag(adev, false);
+
/* 1. Get and set stream specific mixer controls */
disable_audio_route(adev, uc_info);
@@ -2103,6 +2106,9 @@
audio_extn_extspk_update(adev->extspk);
+ if (out->usecase == USECASE_INCALL_MUSIC_UPLINK)
+ voice_set_device_mute_flag(adev, true);
+
ALOGV("%s: Opening PCM device card_id(%d) device_id(%d) format(%#x)",
__func__, adev->snd_card, out->pcm_device_id, out->config.format);
if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
@@ -2532,10 +2538,13 @@
// otherwise audio is no longer played on the new usb devices.
// By forcing the stream in standby, the usb stack refcount drops to 0
// and the driver is closed.
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD && val == AUDIO_DEVICE_NONE &&
+ if (val == AUDIO_DEVICE_NONE &&
audio_is_usb_out_device(out->devices)) {
- ALOGD("%s() putting the usb device in standby after disconnection", __func__);
- out_standby_l(&out->stream.common);
+ if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
+ ALOGD("%s() putting the usb device in standby after disconnection", __func__);
+ out_standby_l(&out->stream.common);
+ }
+ val = AUDIO_DEVICE_OUT_SPEAKER;
}
pthread_mutex_lock(&adev->lock);
@@ -2698,7 +2707,7 @@
struct str_parms *reply,
audio_channel_mask_t *supported_channel_masks) {
int ret = -1;
- char value[256];
+ char value[ARRAY_SIZE(channels_name_to_enum_table) * 32 /* max channel name size */];
bool first = true;
size_t i, j;
diff --git a/hal/audio_hw.h b/hal/audio_hw.h
index 6379844..36e421c 100644
--- a/hal/audio_hw.h
+++ b/hal/audio_hw.h
@@ -329,6 +329,7 @@
bool enable_voicerx;
bool enable_hfp;
bool mic_break_enabled;
+ bool use_voice_device_mute;
int snd_card;
void *platform;
diff --git a/hal/msm8974/platform.c b/hal/msm8974/platform.c
index 7563689..221942a 100644
--- a/hal/msm8974/platform.c
+++ b/hal/msm8974/platform.c
@@ -2490,7 +2490,7 @@
__func__, mixer_ctl_name);
return -EINVAL;
}
- ALOGV("Setting voice mute state: %d", state);
+ ALOGV("%s: Setting voice mute state: %d", __func__, state);
mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
if (my_data->csd != NULL) {
diff --git a/hal/platform_info.c b/hal/platform_info.c
index f5fbe3f..c89020d 100644
--- a/hal/platform_info.c
+++ b/hal/platform_info.c
@@ -26,6 +26,14 @@
#include <platform.h>
#include <math.h>
+/*
+ * Mandatory microphone characteristics include: device_id, type, address, location, group,
+ * index_in_the_group, directionality, num_frequency_responses, frequencies and responses.
+ * MANDATORY_MICROPHONE_CHARACTERISTICS should be updated when mandatory microphone
+ * characteristics are changed.
+ */
+#define MANDATORY_MICROPHONE_CHARACTERISTICS (1 << 10) - 1
+
typedef enum {
ROOT,
ACDB,
@@ -145,17 +153,6 @@
AUDIO_MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
};
-enum {
- AUDIO_MICROPHONE_CHARACTERISTIC_NONE = 0u, // 0x0
- AUDIO_MICROPHONE_CHARACTERISTIC_SENSITIVITY = 1u, // 0x1
- AUDIO_MICROPHONE_CHARACTERISTIC_MAX_SPL = 2u, // 0x2
- AUDIO_MICROPHONE_CHARACTERISTIC_MIN_SPL = 4u, // 0x4
- AUDIO_MICROPHONE_CHARACTERISTIC_ORIENTATION = 8u, // 0x8
- AUDIO_MICROPHONE_CHARACTERISTIC_GEOMETRIC_LOCATION = 16u, // 0x10
- AUDIO_MICROPHONE_CHARACTERISTIC_ALL = 31u, /* ((((SENSITIVITY | MAX_SPL) | MIN_SPL)
- | ORIENTATION) | GEOMETRIC_LOCATION) */
-};
-
static bool find_enum_by_string(const struct audio_string_to_enum * table, const char * name,
int32_t len, unsigned int *value)
{
@@ -458,221 +455,165 @@
static void process_microphone_characteristic(const XML_Char **attr) {
struct audio_microphone_characteristic_t microphone;
- uint32_t curIdx = 0;
+ uint32_t index = 0;
+ uint32_t found_mandatory_characteristics = 0;
+ uint32_t num_frequencies = 0;
+ uint32_t num_responses = 0;
+ microphone.sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
+ microphone.max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ microphone.min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ microphone.orientation.x = 0.0f;
+ microphone.orientation.y = 0.0f;
+ microphone.orientation.z = 0.0f;
+ microphone.geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ microphone.geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ microphone.geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- if (strcmp(attr[curIdx++], "valid_mask")) {
- ALOGE("%s: valid_mask not found", __func__);
- goto done;
- }
- uint32_t valid_mask = atoi(attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "device_id")) {
- ALOGE("%s: device_id not found", __func__);
- goto done;
- }
- if (strlen(attr[curIdx]) > AUDIO_MICROPHONE_ID_MAX_LEN) {
- ALOGE("%s: device_id %s is too long", __func__, attr[curIdx]);
- goto done;
- }
- strcpy(microphone.device_id, attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "type")) {
- ALOGE("%s: device not found", __func__);
- goto done;
- }
- if (!find_enum_by_string(device_in_types, (char*)attr[curIdx++],
- ARRAY_SIZE(device_in_types), µphone.device)) {
- ALOGE("%s: type %s in %s not found!",
- __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
- goto done;
- }
-
- if (strcmp(attr[curIdx++], "address")) {
- ALOGE("%s: address not found", __func__);
- goto done;
- }
- if (strlen(attr[curIdx]) > AUDIO_DEVICE_MAX_ADDRESS_LEN) {
- ALOGE("%s, address %s is too long", __func__, attr[curIdx]);
- goto done;
- }
- strcpy(microphone.address, attr[curIdx++]);
- if (strlen(microphone.address) == 0) {
- // If the address is empty, populate the address according to device type.
- if (microphone.device == AUDIO_DEVICE_IN_BUILTIN_MIC) {
- strcpy(microphone.address, AUDIO_BOTTOM_MICROPHONE_ADDRESS);
- } else if (microphone.device == AUDIO_DEVICE_IN_BACK_MIC) {
- strcpy(microphone.address, AUDIO_BACK_MICROPHONE_ADDRESS);
- }
- }
-
- if (strcmp(attr[curIdx++], "location")) {
- ALOGE("%s: location not found", __func__);
- goto done;
- }
- if (!find_enum_by_string(mic_locations, (char*)attr[curIdx++],
- AUDIO_MICROPHONE_LOCATION_CNT, µphone.location)) {
- ALOGE("%s: location %s in %s not found!",
- __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
- goto done;
- }
-
- if (strcmp(attr[curIdx++], "group")) {
- ALOGE("%s: group not found", __func__);
- goto done;
- }
- microphone.group = atoi(attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "index_in_the_group")) {
- ALOGE("%s: index_in_the_group not found", __func__);
- goto done;
- }
- microphone.index_in_the_group = atoi(attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "directionality")) {
- ALOGE("%s: directionality not found", __func__);
- goto done;
- }
- if (!find_enum_by_string(mic_directionalities, (char*)attr[curIdx++],
- AUDIO_MICROPHONE_DIRECTIONALITY_CNT, µphone.directionality)) {
- ALOGE("%s: directionality %s in %s not found!",
- __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
- goto done;
- }
-
- if (strcmp(attr[curIdx++], "num_frequency_responses")) {
- ALOGE("%s: num_frequency_responses not found", __func__);
- goto done;
- }
- microphone.num_frequency_responses = atoi(attr[curIdx++]);
- if (microphone.num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGE("%s: num_frequency_responses is too large", __func__);
- goto done;
- }
- if (microphone.num_frequency_responses > 0) {
- if (strcmp(attr[curIdx++], "frequencies")) {
- ALOGE("%s: frequencies not found", __func__);
- goto done;
- }
- char *token = strtok((char *)attr[curIdx++], " ");
- uint32_t num_frequencies = 0;
- while (token) {
- microphone.frequency_responses[0][num_frequencies++] = atof(token);
- if (num_frequencies > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGE("%s: num %u of frequency is too large", __func__, num_frequencies);
+ while (attr[index] != NULL) {
+ const char *attribute = attr[index++];
+ char value[strlen(attr[index]) + 1];
+ strcpy(value, attr[index++]);
+ if (strcmp(attribute, "device_id") == 0) {
+ if (strlen(value) > AUDIO_MICROPHONE_ID_MAX_LEN) {
+ ALOGE("%s: device_id %s is too long", __func__, value);
goto done;
}
- token = strtok(NULL, " ");
- }
-
- if (strcmp(attr[curIdx++], "responses")) {
- ALOGE("%s: responses not found", __func__);
- goto done;
- }
- token = strtok((char *)attr[curIdx++], " ");
- uint32_t num_responses = 0;
- while (token) {
- microphone.frequency_responses[1][num_responses++] = atof(token);
- if (num_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGE("%s: num %u of response is too large", __func__, num_responses);
+ strcpy(microphone.device_id, value);
+ found_mandatory_characteristics |= 1;
+ } else if (strcmp(attribute, "type") == 0) {
+ if (!find_enum_by_string(device_in_types, value,
+ ARRAY_SIZE(device_in_types), µphone.device)) {
+ ALOGE("%s: type %s in %s not found!",
+ __func__, value, PLATFORM_INFO_XML_PATH);
goto done;
}
- token = strtok(NULL, " ");
- }
-
- if (num_frequencies != num_responses
- || num_frequencies != microphone.num_frequency_responses) {
- ALOGE("%s: num of frequency and response not match: %u, %u, %u",
- __func__, num_frequencies, num_responses, microphone.num_frequency_responses);
- goto done;
- }
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_SENSITIVITY) {
- if (strcmp(attr[curIdx++], "sensitivity")) {
- ALOGE("%s: sensitivity not found", __func__);
- goto done;
- }
- microphone.sensitivity = atof(attr[curIdx++]);
- } else {
- microphone.sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_MAX_SPL) {
- if (strcmp(attr[curIdx++], "max_spl")) {
- ALOGE("%s: max_spl not found", __func__);
- goto done;
- }
- microphone.max_spl = atof(attr[curIdx++]);
- } else {
- microphone.max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_MIN_SPL) {
- if (strcmp(attr[curIdx++], "min_spl")) {
- ALOGE("%s: min_spl not found", __func__);
- goto done;
- }
- microphone.min_spl = atof(attr[curIdx++]);
- } else {
- microphone.min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_ORIENTATION) {
- if (strcmp(attr[curIdx++], "orientation")) {
- ALOGE("%s: orientation not found", __func__);
- goto done;
- }
- char *token = strtok((char *)attr[curIdx++], " ");
- float orientation[3];
- uint32_t idx = 0;
- while (token) {
- orientation[idx++] = atof(token);
- if (idx > 3) {
+ found_mandatory_characteristics |= (1 << 1);
+ } else if (strcmp(attribute, "address") == 0) {
+ if (strlen(value) > AUDIO_DEVICE_MAX_ADDRESS_LEN) {
+ ALOGE("%s, address %s is too long", __func__, value);
+ goto done;
+ }
+ strcpy(microphone.address, value);
+ if (strlen(microphone.address) == 0) {
+ // If the address is empty, populate the address according to device type.
+ if (microphone.device == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+ strcpy(microphone.address, AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+ } else if (microphone.device == AUDIO_DEVICE_IN_BACK_MIC) {
+ strcpy(microphone.address, AUDIO_BACK_MICROPHONE_ADDRESS);
+ }
+ }
+ found_mandatory_characteristics |= (1 << 2);
+ } else if (strcmp(attribute, "location") == 0) {
+ if (!find_enum_by_string(mic_locations, value,
+ AUDIO_MICROPHONE_LOCATION_CNT, µphone.location)) {
+ ALOGE("%s: location %s in %s not found!",
+ __func__, value, PLATFORM_INFO_XML_PATH);
+ goto done;
+ }
+ found_mandatory_characteristics |= (1 << 3);
+ } else if (strcmp(attribute, "group") == 0) {
+ microphone.group = atoi(value);
+ found_mandatory_characteristics |= (1 << 4);
+ } else if (strcmp(attribute, "index_in_the_group") == 0) {
+ microphone.index_in_the_group = atoi(value);
+ found_mandatory_characteristics |= (1 << 5);
+ } else if (strcmp(attribute, "directionality") == 0) {
+ if (!find_enum_by_string(mic_directionalities, value,
+ AUDIO_MICROPHONE_DIRECTIONALITY_CNT, µphone.directionality)) {
+ ALOGE("%s: directionality %s in %s not found!",
+ __func__, attr[index], PLATFORM_INFO_XML_PATH);
+ goto done;
+ }
+ found_mandatory_characteristics |= (1 << 6);
+ } else if (strcmp(attribute, "num_frequency_responses") == 0) {
+ microphone.num_frequency_responses = atoi(value);
+ if (microphone.num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGE("%s: num_frequency_responses is too large", __func__);
+ goto done;
+ }
+ found_mandatory_characteristics |= (1 << 7);
+ } else if (strcmp(attribute, "frequencies") == 0) {
+ char *token = strtok(value, " ");
+ while (token) {
+ microphone.frequency_responses[0][num_frequencies++] = atof(token);
+ if (num_frequencies > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGE("%s: num %u of frequency is too large", __func__, num_frequencies);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ found_mandatory_characteristics |= (1 << 8);
+ } else if (strcmp(attribute, "responses") == 0) {
+ char *token = strtok(value, " ");
+ while (token) {
+ microphone.frequency_responses[1][num_responses++] = atof(token);
+ if (num_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGE("%s: num %u of response is too large", __func__, num_responses);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ found_mandatory_characteristics |= (1 << 9);
+ } else if (strcmp(attribute, "sensitivity") == 0) {
+ microphone.sensitivity = atof(value);
+ } else if (strcmp(attribute, "max_spl") == 0) {
+ microphone.max_spl = atof(value);
+ } else if (strcmp(attribute, "min_spl") == 0) {
+ microphone.min_spl = atof(value);
+ } else if (strcmp(attribute, "orientation") == 0) {
+ char *token = strtok(value, " ");
+ float orientation[3];
+ uint32_t idx = 0;
+ while (token) {
+ orientation[idx++] = atof(token);
+ if (idx > 3) {
+ ALOGE("%s: orientation invalid", __func__);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ if (idx != 3) {
ALOGE("%s: orientation invalid", __func__);
goto done;
}
- token = strtok(NULL, " ");
- }
- if (idx != 3) {
- ALOGE("%s: orientation invalid", __func__);
- goto done;
- }
- microphone.orientation.x = orientation[0];
- microphone.orientation.y = orientation[1];
- microphone.orientation.z = orientation[2];
- } else {
- microphone.orientation.x = 0.0f;
- microphone.orientation.y = 0.0f;
- microphone.orientation.z = 0.0f;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_GEOMETRIC_LOCATION) {
- if (strcmp(attr[curIdx++], "geometric_location")) {
- ALOGE("%s: geometric_location not found", __func__);
- goto done;
- }
- char *token = strtok((char *)attr[curIdx++], " ");
- float geometric_location[3];
- uint32_t idx = 0;
- while (token) {
- geometric_location[idx++] = atof(token);
- if (idx > 3) {
+ microphone.orientation.x = orientation[0];
+ microphone.orientation.y = orientation[1];
+ microphone.orientation.z = orientation[2];
+ } else if (strcmp(attribute, "geometric_location") == 0) {
+ char *token = strtok(value, " ");
+ float geometric_location[3];
+ uint32_t idx = 0;
+ while (token) {
+ geometric_location[idx++] = atof(token);
+ if (idx > 3) {
+ ALOGE("%s: geometric_location invalid", __func__);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ if (idx != 3) {
ALOGE("%s: geometric_location invalid", __func__);
goto done;
}
- token = strtok(NULL, " ");
+ microphone.geometric_location.x = geometric_location[0];
+ microphone.geometric_location.y = geometric_location[1];
+ microphone.geometric_location.z = geometric_location[2];
+ } else {
+ ALOGW("%s: unknown attribute of microphone characteristics: %s",
+ __func__, attribute);
}
- if (idx != 3) {
- ALOGE("%s: geometric_location invalid", __func__);
- goto done;
- }
- microphone.geometric_location.x = geometric_location[0];
- microphone.geometric_location.y = geometric_location[1];
- microphone.geometric_location.z = geometric_location[2];
- } else {
- microphone.geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- microphone.geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- microphone.geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ }
+
+ if (num_frequencies != num_responses
+ || num_frequencies != microphone.num_frequency_responses) {
+ ALOGE("%s: num of frequency and response not match: %u, %u, %u",
+ __func__, num_frequencies, num_responses, microphone.num_frequency_responses);
+ goto done;
+ }
+
+ if (found_mandatory_characteristics != MANDATORY_MICROPHONE_CHARACTERISTICS) {
+ ALOGE("%s: some of mandatory microphone characteriscts are missed: %u",
+ __func__, found_mandatory_characteristics);
}
platform_set_microphone_characteristic(my_data.platform, microphone);
diff --git a/hal/voice.c b/hal/voice.c
index 09cb926..708ce6c 100644
--- a/hal/voice.c
+++ b/hal/voice.c
@@ -171,6 +171,7 @@
uc_info->devices = adev->current_call_output ->devices;
uc_info->in_snd_device = SND_DEVICE_NONE;
uc_info->out_snd_device = SND_DEVICE_NONE;
+ adev->use_voice_device_mute = false;
list_add_tail(&adev->usecase_list, &uc_info->list);
@@ -357,11 +358,19 @@
int voice_set_mic_mute(struct audio_device *adev, bool state)
{
int err = 0;
+ struct audio_usecase *usecase = NULL;
adev->voice.mic_mute = state;
if (adev->mode == AUDIO_MODE_IN_CALL ||
- adev->mode == AUDIO_MODE_IN_COMMUNICATION)
- err = platform_set_mic_mute(adev->platform, state);
+ adev->mode == AUDIO_MODE_IN_COMMUNICATION) {
+ /* Use device mute if incall music delivery usecase is in progress */
+ if (adev->use_voice_device_mute)
+ err = platform_set_device_mute(adev->platform, state, "tx");
+ else
+ err = platform_set_mic_mute(adev->platform, state);
+ ALOGV("%s: voice mute status=%d, use_voice_device_mute_flag=%d",
+ __func__, state, adev->use_voice_device_mute);
+ }
return err;
}
@@ -371,6 +380,25 @@
return adev->voice.mic_mute;
}
+// Following function is called when incall music uplink usecase is
+// created or destroyed while mic is muted. If incall music uplink
+// usecase is active, apply voice device mute to mute only voice Tx
+// path and not the mixed voice Tx + inncall-music path. Revert to
+// voice stream mute once incall music uplink usecase is inactive
+void voice_set_device_mute_flag (struct audio_device *adev, bool state)
+{
+ if (adev->voice.mic_mute) {
+ if (state) {
+ platform_set_device_mute(adev->platform, true, "tx");
+ platform_set_mic_mute(adev->platform, false);
+ } else {
+ platform_set_mic_mute(adev->platform, true);
+ platform_set_device_mute(adev->platform, false, "tx");
+ }
+ }
+ adev->use_voice_device_mute = state;
+}
+
int voice_set_volume(struct audio_device *adev, float volume)
{
int vol, err = 0;
@@ -517,6 +545,8 @@
adev->voice.volume = 1.0f;
adev->voice.mic_mute = false;
adev->voice.in_call = false;
+ adev->use_voice_device_mute = false;
+
for (i = 0; i < MAX_VOICE_SESSIONS; i++) {
adev->voice.session[i].pcm_rx = NULL;
adev->voice.session[i].pcm_tx = NULL;
diff --git a/hal/voice.h b/hal/voice.h
index 469a3b5..71e096b 100644
--- a/hal/voice.h
+++ b/hal/voice.h
@@ -95,4 +95,6 @@
snd_device_t out_snd_device,
bool enable);
bool voice_is_call_state_active(struct audio_device *adev);
+void voice_set_device_mute_flag (struct audio_device *adev, bool state);
+
#endif //VOICE_H