audio: select camcorder snd device according to camera orientation am: 5f4ca9505d
am: 8cf22e49ee
Change-Id: I5823f2cd393c34cc35faefb1aedf67813dcd833d
diff --git a/hal/audio_extn/a2dp.c b/hal/audio_extn/a2dp.c
index 671fe59..7c2e4db 100644
--- a/hal/audio_extn/a2dp.c
+++ b/hal/audio_extn/a2dp.c
@@ -141,6 +141,7 @@
enc_codec_t *codec_type);
typedef int (*audio_check_a2dp_ready_t)(void);
typedef int (*audio_is_scrambling_enabled_t)(void);
+typedef uint16_t (*audio_get_a2dp_sink_latency_t)(void);
enum A2DP_STATE {
A2DP_STATE_CONNECTED,
@@ -221,6 +222,8 @@
audio_check_a2dp_ready_t audio_check_a2dp_ready;
/* Check if scrambling is enabled on BTSoC */
audio_is_scrambling_enabled_t audio_is_scrambling_enabled;
+ /* Get sink latency from Bluetooth stack */
+ audio_get_a2dp_sink_latency_t audio_get_a2dp_sink_latency;
/* Internal A2DP state identifier */
enum A2DP_STATE bt_state;
/* A2DP codec type configured */
@@ -713,6 +716,8 @@
dlsym(a2dp.bt_lib_handle,"audio_check_a2dp_ready");
a2dp.audio_is_scrambling_enabled = (audio_is_scrambling_enabled_t)
dlsym(a2dp.bt_lib_handle,"audio_is_scrambling_enabled");
+ a2dp.audio_get_a2dp_sink_latency = (audio_get_a2dp_sink_latency_t)
+ dlsym(a2dp.bt_lib_handle,"audio_get_a2dp_sink_latency");
}
}
@@ -1714,7 +1719,7 @@
uint32_t audio_extn_a2dp_get_encoder_latency()
{
- uint32_t latency = 0;
+ uint32_t latency_ms = 0;
int avsync_runtime_prop = 0;
int sbc_offset = 0, aptx_offset = 0, aptxhd_offset = 0,
aac_offset = 0, ldac_offset = 0;
@@ -1731,36 +1736,41 @@
}
}
+ uint32_t slatency_ms = 0;
+ if (a2dp.audio_get_a2dp_sink_latency && a2dp.bt_state != A2DP_STATE_DISCONNECTED) {
+ slatency_ms = a2dp.audio_get_a2dp_sink_latency();
+ }
+
switch (a2dp.bt_encoder_format) {
case ENC_CODEC_TYPE_SBC:
- latency = (avsync_runtime_prop > 0) ? sbc_offset : ENCODER_LATENCY_SBC;
- latency += DEFAULT_SINK_LATENCY_SBC;
+ latency_ms = (avsync_runtime_prop > 0) ? sbc_offset : ENCODER_LATENCY_SBC;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_SBC : slatency_ms;
break;
case ENC_CODEC_TYPE_APTX:
- latency = (avsync_runtime_prop > 0) ? aptx_offset : ENCODER_LATENCY_APTX;
- latency += DEFAULT_SINK_LATENCY_APTX;
+ latency_ms = (avsync_runtime_prop > 0) ? aptx_offset : ENCODER_LATENCY_APTX;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_APTX : slatency_ms;
break;
case ENC_CODEC_TYPE_APTX_HD:
- latency = (avsync_runtime_prop > 0) ? aptxhd_offset : ENCODER_LATENCY_APTX_HD;
- latency += DEFAULT_SINK_LATENCY_APTX_HD;
+ latency_ms = (avsync_runtime_prop > 0) ? aptxhd_offset : ENCODER_LATENCY_APTX_HD;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_APTX_HD : slatency_ms;
break;
case ENC_CODEC_TYPE_AAC:
- latency = (avsync_runtime_prop > 0) ? aac_offset : ENCODER_LATENCY_AAC;
- latency += DEFAULT_SINK_LATENCY_AAC;
+ latency_ms = (avsync_runtime_prop > 0) ? aac_offset : ENCODER_LATENCY_AAC;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_AAC : slatency_ms;
break;
case ENC_CODEC_TYPE_LDAC:
- latency = (avsync_runtime_prop > 0) ? ldac_offset : ENCODER_LATENCY_LDAC;
- latency += DEFAULT_SINK_LATENCY_LDAC;
+ latency_ms = (avsync_runtime_prop > 0) ? ldac_offset : ENCODER_LATENCY_LDAC;
+ latency_ms += (slatency_ms == 0) ? DEFAULT_SINK_LATENCY_LDAC : slatency_ms;
break;
case ENC_CODEC_TYPE_PCM:
- latency = ENCODER_LATENCY_PCM;
- latency += DEFAULT_SINK_LATENCY_PCM;
+ latency_ms = ENCODER_LATENCY_PCM;
+ latency_ms += DEFAULT_SINK_LATENCY_PCM;
break;
default:
- latency = DEFAULT_ENCODER_LATENCY;
+ latency_ms = DEFAULT_ENCODER_LATENCY;
break;
}
- return latency;
+ return latency_ms;
}
int audio_extn_a2dp_get_parameters(struct str_parms *query,
diff --git a/hal/audio_extn/maxxaudio.c b/hal/audio_extn/maxxaudio.c
index fc65332..2fd188d 100644
--- a/hal/audio_extn/maxxaudio.c
+++ b/hal/audio_extn/maxxaudio.c
@@ -41,29 +41,40 @@
#define CAL_PRESIST_STR "cal_persist"
#define CAL_SAMPLERATE_STR "cal_samplerate"
-#define MA_QDSP_PARAM_INIT "maxxaudio_qdsp_initialize"
-#define MA_QDSP_PARAM_DEINIT "maxxaudio_qdsp_uninitialize"
-#define MA_QDSP_SET_LR_SWAP "maxxaudio_qdsp_set_lr_swap"
-#define MA_QDSP_SET_MODE "maxxaudio_qdsp_set_sound_mode"
-#define MA_QDSP_SET_VOL "maxxaudio_qdsp_set_volume"
-#define MA_QDSP_SET_VOLT "maxxaudio_qdsp_set_volume_table"
+#define MA_QDSP_PARAM_INIT "maxxaudio_qdsp_initialize"
+#define MA_QDSP_PARAM_DEINIT "maxxaudio_qdsp_uninitialize"
+#define MA_QDSP_SET_LR_SWAP "maxxaudio_qdsp_set_lr_swap"
+#define MA_QDSP_SET_MODE "maxxaudio_qdsp_set_sound_mode"
+#define MA_QDSP_SET_VOL "maxxaudio_qdsp_set_volume"
+#define MA_QDSP_SET_VOLT "maxxaudio_qdsp_set_volume_table"
+#define MA_QDSP_SET_PARAM "maxxaudio_qdsp_set_parameter"
#define SUPPORT_DEV "Blackbird"
#define SUPPORTED_USB 0x01
-struct ma_audio_cal_settings {
- int app_type;
- audio_devices_t device;
-};
+typedef unsigned int effective_scope_flag_t;
+const effective_scope_flag_t EFFECTIVE_SCOPE_RTC = 1 << 0; /* RTC */
+const effective_scope_flag_t EFFECTIVE_SCOPE_ACDB = 1 << 1; /* ACDB */
+const effective_scope_flag_t EFFECTIVE_SCOPE_ALL = EFFECTIVE_SCOPE_RTC | EFFECTIVE_SCOPE_ACDB;
+const effective_scope_flag_t EFFECTIVE_SCOPE_NONE = 0;
+const effective_scope_flag_t EFFECTIVE_SCOPE_DEFAULT = EFFECTIVE_SCOPE_NONE;
-struct ma_state {
- float vol;
- bool active;
-};
+const unsigned int AUDIO_CAL_SETTINGS_VERSION_MAJOR = 2;
+const unsigned int AUDIO_CAL_SETTINGS_VERSION_MINOR = 0;
+const unsigned int AUDIO_CAL_SETTINGS_VERSION_MAJOR_DEFAULT = AUDIO_CAL_SETTINGS_VERSION_MAJOR;
+const unsigned int AUDIO_CAL_SETTINGS_VERSION_MINOR_DEFAULT = AUDIO_CAL_SETTINGS_VERSION_MINOR;
+
+const unsigned int VALUE_AUTO = 0xFFFFFFFF;
+const unsigned int APP_TYPE_AUTO = VALUE_AUTO;
+const unsigned int APP_TYPE_DEFAULT = APP_TYPE_AUTO;
+const unsigned int DEVICE_AUTO = VALUE_AUTO;
+const unsigned int DEVICE_DEFAULT = DEVICE_AUTO;
+
+const unsigned int MAAP_OUTPUT_GAIN = 27;
typedef enum MA_STREAM_TYPE {
- STREAM_MIN_STREAM_TYPES,
- STREAM_VOICE = STREAM_MIN_STREAM_TYPES,
+ STREAM_MIN_TYPES = 0,
+ STREAM_VOICE = STREAM_MIN_TYPES,
STREAM_SYSTEM,
STREAM_RING,
STREAM_MUSIC,
@@ -76,8 +87,31 @@
MA_CMD_VOL,
MA_CMD_SWAP_ENABLE,
MA_CMD_SWAP_DISABLE,
+ MA_CMD_SOFT_MUTE_ENABLE,
+ MA_CMD_SOFT_MUTE_DISABLE,
} ma_cmd_t;
+typedef struct ma_audio_cal_version {
+ unsigned int major;
+ unsigned int minor;
+} ma_audio_cal_version_t;
+
+typedef struct ma_audio_cal_common_settings {
+ unsigned int app_type;
+ unsigned int device;
+} ma_audio_cal_common_settings_t;
+
+struct ma_audio_cal_settings {
+ ma_audio_cal_version_t version;
+ ma_audio_cal_common_settings_t common;
+ effective_scope_flag_t effect_scope_flag;
+};
+
+struct ma_state {
+ float vol;
+ bool active;
+};
+
typedef void *ma_audio_cal_handle_t;
typedef int (*set_audio_cal_t)(const char *);
@@ -100,6 +134,10 @@
const struct ma_audio_cal_settings *,
size_t, struct ma_state *);
+typedef bool (*ma_set_param_t)(ma_audio_cal_handle_t,
+ const struct ma_audio_cal_settings *,
+ unsigned int, double);
+
struct ma_platform_data {
void *waves_handle;
void *platform;
@@ -110,6 +148,7 @@
ma_set_sound_mode_t ma_set_sound_mode;
ma_set_volume_t ma_set_volume;
ma_set_volume_table_t ma_set_volume_table;
+ ma_set_param_t ma_set_param;
};
ma_audio_cal_handle_t g_ma_audio_cal_handle = NULL;
@@ -155,6 +194,14 @@
volume_table);
}
+static bool ma_set_param_l(
+ const struct ma_audio_cal_settings *audio_cal_settings,
+ unsigned int index, double value)
+{
+ return my_data->ma_set_param(g_ma_audio_cal_handle,
+ audio_cal_settings, index, value);
+}
+
static inline bool valid_usecase(struct audio_usecase *usecase)
{
if ((usecase->type == PCM_PLAYBACK) &&
@@ -164,9 +211,8 @@
(usecase->id == USECASE_AUDIO_PLAYBACK_OFFLOAD)) &&
/* support devices */
((usecase->devices & AUDIO_DEVICE_OUT_SPEAKER) ||
- (usecase->devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) ||
- /* TODO: enable A2DP when it is ready */
- (usecase->devices & AUDIO_DEVICE_OUT_ALL_USB)))
+ (usecase->devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE)))
+ /* TODO: enable A2DP/USB when it is ready */
return true;
@@ -182,13 +228,21 @@
ma_stream_type_t i = 0;
for (i = 0; i < STREAM_MAX_TYPES; i++)
- if (ma_cur_state_table[i].active &&
- (ma_cur_state_table[i].vol != 0))
+ if (ma_cur_state_table[i].active)
return true;
return false;
}
+static void ma_cal_init(struct ma_audio_cal_settings *ma_cal)
+{
+ ma_cal->version.major = AUDIO_CAL_SETTINGS_VERSION_MAJOR_DEFAULT;
+ ma_cal->version.minor = AUDIO_CAL_SETTINGS_VERSION_MINOR_DEFAULT;
+ ma_cal->common.app_type = APP_TYPE_DEFAULT;
+ ma_cal->common.device = DEVICE_DEFAULT;
+ ma_cal->effect_scope_flag = EFFECTIVE_SCOPE_ALL;
+}
+
static bool check_and_send_all_audio_cal(struct audio_device *adev, ma_cmd_t cmd)
{
int i = 0;
@@ -196,32 +250,27 @@
float vol = 0;
struct listnode *node;
struct audio_usecase *usecase;
- struct ma_audio_cal_settings *ma_cal = NULL;
+ struct ma_audio_cal_settings ma_cal;
- // alloct
- ma_cal = (struct ma_audio_cal_settings *)malloc(sizeof(struct ma_audio_cal_settings));
-
- if (ma_cal == NULL) {
- ALOGE("%s: ma_cal alloct fail", __func__);
- return ret;
- }
+ ma_cal_init(&ma_cal);
list_for_each(node, &adev->usecase_list) {
usecase = node_to_item(node, struct audio_usecase, list);
if (valid_usecase(usecase)) {
- ma_cal->app_type = usecase->stream.out->app_type_cfg.app_type;
- ma_cal->device = usecase->stream.out->devices;
+ ma_cal.common.app_type = usecase->stream.out->app_type_cfg.app_type;
+ ma_cal.common.device = usecase->stream.out->devices;
ALOGV("%s: send usecase(%d) app_type(%d) device(%d)",
- __func__, usecase->id, ma_cal->app_type, ma_cal->device);
+ __func__, usecase->id, ma_cal.common.app_type,
+ ma_cal.common.device);
switch (cmd) {
case MA_CMD_VOL:
- ret = ma_set_volume_table_l(ma_cal, STREAM_MAX_TYPES,
+ ret = ma_set_volume_table_l(&ma_cal, STREAM_MAX_TYPES,
ma_cur_state_table);
if (ret)
- ALOGV("Waves: ma_set_volume_table_l success");
+ ALOGV("ma_set_volume_table_l success");
else
- ALOGE("Waves: ma_set_volume_table_l %f returned with error.", vol);
+ ALOGE("ma_set_volume_table_l returned with error.");
ALOGV("%s: send volume table === Start", __func__);
for (i = 0; i < STREAM_MAX_TYPES; i++)
@@ -230,26 +279,46 @@
ma_cur_state_table[i].active ? "T" : "F");
ALOGV("%s: send volume table === End", __func__);
break;
+
case MA_CMD_SWAP_ENABLE:
- ret = ma_set_lr_swap_l(ma_cal, true);
+ ret = ma_set_lr_swap_l(&ma_cal, true);
if (ret)
- ALOGV("Waves: ma_set_lr_swap_l enable returned with success.");
+ ALOGV("ma_set_lr_swap_l enable returned with success.");
else
- ALOGE("Waves: ma_set_lr_swap_l enable returned with error.");
+ ALOGE("ma_set_lr_swap_l enable returned with error.");
break;
+
case MA_CMD_SWAP_DISABLE:
- ret = ma_set_lr_swap_l(ma_cal, false);
+ ret = ma_set_lr_swap_l(&ma_cal, false);
if (ret)
- ALOGV("Waves: ma_set_lr_swap_l disable returned with success.");
+ ALOGV("ma_set_lr_swap_l disable returned with success.");
else
- ALOGE("Waves: ma_set_lr_swap_l disable returned with error.");
+ ALOGE("ma_set_lr_swap_l disable returned with error.");
break;
+
+ case MA_CMD_SOFT_MUTE_ENABLE:
+ if (usecase->id == USECASE_AUDIO_PLAYBACK_LOW_LATENCY) break;
+
+ ma_cal.effect_scope_flag = EFFECTIVE_SCOPE_RTC;
+ ret = ma_set_param_l(&ma_cal, MAAP_OUTPUT_GAIN, -96);
+ if (!ret)
+ ALOGE("soft mute enable returned with error.");
+ break;
+
+ case MA_CMD_SOFT_MUTE_DISABLE:
+ if (usecase->id == USECASE_AUDIO_PLAYBACK_LOW_LATENCY) break;
+
+ ma_cal.effect_scope_flag = EFFECTIVE_SCOPE_RTC;
+ ret = ma_set_param_l(&ma_cal, MAAP_OUTPUT_GAIN, 0);
+ if (!ret)
+ ALOGE("soft mute disable returned with error.");
+ break;
+
default:
ALOGE("%s: unsupported cmd %d", __func__, cmd);
}
}
}
- free(ma_cal);
return ret;
}
@@ -417,6 +486,13 @@
ALOGE("%s: dlsym error %s for ma_set_volume_table", __func__, dlerror());
goto error;
}
+
+ my_data->ma_set_param = (ma_set_param_t)dlsym(
+ my_data->waves_handle, MA_QDSP_SET_PARAM);
+ if (!my_data->ma_set_param) {
+ ALOGE("%s: dlsym error %s for ma_set_param", __func__, dlerror());
+ goto error;
+ }
}
/* get preset table */
@@ -509,27 +585,42 @@
float vol, bool active)
{
bool ret = false;
- ma_stream_type_t stype = (ma_stream_type_t)stream_type;
+ bool first_enable = false;
+ struct ma_state pr_mstate;
- ALOGV("%s: stream[%d] vol[%f] active[%s]",
- __func__, stream_type, vol, active ? "true" : "false");
+ if (stream_type >= STREAM_MAX_TYPES ||
+ stream_type < STREAM_MIN_TYPES) {
+ ALOGE("%s: stream_type %d out of range.", __func__, stream_type);
+ return ret;
+ }
if (!my_data) {
ALOGV("%s: maxxaudio isn't initialized.", __func__);
return ret;
}
- // update condition
- // 1. start track: active and volume isn't zero
- // 2. stop track: no tracks are active
- if ((active && vol != 0) ||
- (!active)) {
- pthread_mutex_lock(&my_data->lock);
+ ALOGV("%s: stream[%d] vol[%f] active[%s]",
+ __func__, stream_type, vol, active ? "true" : "false");
- ma_cur_state_table[stype].vol = vol;
- ma_cur_state_table[stype].active = active;
- if (is_active())
- ret = check_and_send_all_audio_cal(adev, MA_CMD_VOL);
+ pr_mstate.vol = ma_cur_state_table[(ma_stream_type_t)stream_type].vol;
+ pr_mstate.active = ma_cur_state_table[(ma_stream_type_t)stream_type].active;
+
+ // update condition: vol or active state changes
+ if (pr_mstate.vol != vol || pr_mstate.active != active) {
+
+ pthread_mutex_lock(&my_data->lock);
+ // get active state before updating
+ first_enable = (!is_active()) && active;
+
+ ma_cur_state_table[(ma_stream_type_t)stream_type].vol = vol;
+ ma_cur_state_table[(ma_stream_type_t)stream_type].active = active;
+
+ if (first_enable) //all F -> one of T
+ ret = check_and_send_all_audio_cal(adev, MA_CMD_SOFT_MUTE_DISABLE);
+ else if (!is_active()) // all F
+ ret = check_and_send_all_audio_cal(adev, MA_CMD_SOFT_MUTE_ENABLE);
+
+ ret = check_and_send_all_audio_cal(adev, MA_CMD_VOL);
pthread_mutex_unlock(&my_data->lock);
}
@@ -542,7 +633,7 @@
int i = 0;
int u_index = -1;
float vol = 0;
- struct ma_audio_cal_settings *ma_cal = NULL;
+ struct ma_audio_cal_settings ma_cal;
if (!my_data) {
ALOGV("%s: maxxaudio isn't initialized.", __func__);
@@ -554,38 +645,34 @@
return;
}
- ma_cal = (struct ma_audio_cal_settings *)malloc(sizeof(struct ma_audio_cal_settings));
+ ma_cal_init(&ma_cal);
/* update audio_cal and send it */
- if (ma_cal != NULL){
- ma_cal->app_type = usecase->stream.out->app_type_cfg.app_type;
- ma_cal->device = usecase->stream.out->devices;
- ALOGV("%s: send usecase(%d) app_type(%d) device(%d)",
- __func__, usecase->id, ma_cal->app_type, ma_cal->device);
+ ma_cal.common.app_type = usecase->stream.out->app_type_cfg.app_type;
+ ma_cal.common.device = usecase->stream.out->devices;
+ ALOGV("%s: send usecase(%d) app_type(%d) device(%d)",
+ __func__, usecase->id, ma_cal.common.app_type,
+ ma_cal.common.device);
- pthread_mutex_lock(&my_data->lock);
+ pthread_mutex_lock(&my_data->lock);
- if (is_active()) {
- ALOGV("%s: send volume table === Start", __func__);
- for (i = 0; i < STREAM_MAX_TYPES; i++)
- ALOGV("%s: stream(%d) volume(%f) active(%s)", __func__, i,
- ma_cur_state_table[i].vol,
- ma_cur_state_table[i].active ? "T" : "F");
- ALOGV("%s: send volume table === End", __func__);
+ if (is_active()) {
+ ALOGV("%s: send volume table === Start", __func__);
+ for (i = 0; i < STREAM_MAX_TYPES; i++)
+ ALOGV("%s: stream(%d) volume(%f) active(%s)", __func__, i,
+ ma_cur_state_table[i].vol,
+ ma_cur_state_table[i].active ? "T" : "F");
+ ALOGV("%s: send volume table === End", __func__);
- if (!ma_set_volume_table_l(ma_cal,
- STREAM_MAX_TYPES,
- ma_cur_state_table))
- ALOGE("Waves: ma_set_volume_table_l %f returned with error.", vol);
- else
- ALOGV("Waves: ma_set_volume_table_l success");
+ if (!ma_set_volume_table_l(&ma_cal,
+ STREAM_MAX_TYPES,
+ ma_cur_state_table))
+ ALOGE("ma_set_volume_table_l returned with error.");
+ else
+ ALOGV("ma_set_volume_table_l success");
- }
- pthread_mutex_unlock(&my_data->lock);
- free(ma_cal);
- } else {
- ALOGE("%s: ma_cal alloct fail", __func__);
}
+ pthread_mutex_unlock(&my_data->lock);
}
void audio_extn_ma_set_parameters(struct audio_device *adev,
diff --git a/hal/audio_extn/utils.c b/hal/audio_extn/utils.c
index 73de0ab..79dd9e5 100644
--- a/hal/audio_extn/utils.c
+++ b/hal/audio_extn/utils.c
@@ -281,6 +281,10 @@
usecase->out_snd_device,
out->sample_rate,
sample_rate);
+ } else if (out->devices & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ audio_extn_a2dp_get_sample_rate(sample_rate);
+ ALOGI("%s: Using sample rate %d for A2DP CoPP", __func__,
+ *sample_rate);
}
app_type_cfg->mode = flags_to_mode(0 /*playback*/, out->flags);
diff --git a/hal/audio_hw.c b/hal/audio_hw.c
index 8e04bf8..3312cbe 100644
--- a/hal/audio_hw.c
+++ b/hal/audio_hw.c
@@ -225,7 +225,7 @@
#define AFE_PROXY_CHANNEL_COUNT 2
#define AFE_PROXY_SAMPLING_RATE 48000
-#define AFE_PROXY_PLAYBACK_PERIOD_SIZE 768
+#define AFE_PROXY_PLAYBACK_PERIOD_SIZE 256
#define AFE_PROXY_PLAYBACK_PERIOD_COUNT 4
struct pcm_config pcm_config_afe_proxy_playback = {
@@ -239,7 +239,7 @@
.avail_min = AFE_PROXY_PLAYBACK_PERIOD_SIZE,
};
-#define AFE_PROXY_RECORD_PERIOD_SIZE 768
+#define AFE_PROXY_RECORD_PERIOD_SIZE 256
#define AFE_PROXY_RECORD_PERIOD_COUNT 4
struct pcm_config pcm_config_afe_proxy_record = {
@@ -615,7 +615,7 @@
struct audio_usecase *usecase)
{
snd_device_t snd_device;
- char mixer_path[50];
+ char mixer_path[MIXER_PATH_MAX_LENGTH];
if (usecase == NULL)
return -EINVAL;
@@ -628,8 +628,13 @@
snd_device = usecase->out_snd_device;
audio_extn_utils_send_app_type_cfg(adev, usecase);
audio_extn_utils_send_audio_calibration(adev, usecase);
- strcpy(mixer_path, use_case_table[usecase->id]);
+
+ // we shouldn't truncate mixer_path
+ ALOGW_IF(strlcpy(mixer_path, use_case_table[usecase->id], sizeof(mixer_path))
+ >= sizeof(mixer_path), "%s: truncation on mixer path", __func__);
+ // this also appends to mixer_path
platform_add_backend_name(adev->platform, mixer_path, snd_device);
+
audio_extn_sound_trigger_update_stream_status(usecase, ST_EVENT_STREAM_BUSY);
ALOGD("%s: usecase(%d) apply and update mixer path: %s", __func__, usecase->id, mixer_path);
audio_route_apply_and_update_path(adev->audio_route, mixer_path);
@@ -642,7 +647,7 @@
struct audio_usecase *usecase)
{
snd_device_t snd_device;
- char mixer_path[50];
+ char mixer_path[MIXER_PATH_MAX_LENGTH];
if (usecase == NULL)
return -EINVAL;
@@ -652,9 +657,14 @@
snd_device = usecase->in_snd_device;
else
snd_device = usecase->out_snd_device;
- strcpy(mixer_path, use_case_table[usecase->id]);
+
+ // we shouldn't truncate mixer_path
+ ALOGW_IF(strlcpy(mixer_path, use_case_table[usecase->id], sizeof(mixer_path))
+ >= sizeof(mixer_path), "%s: truncation on mixer path", __func__);
+ // this also appends to mixer_path
platform_add_backend_name(adev->platform, mixer_path, snd_device);
ALOGD("%s: usecase(%d) reset and update mixer path: %s", __func__, usecase->id, mixer_path);
+
audio_route_reset_and_update_path(adev->audio_route, mixer_path);
audio_extn_sound_trigger_update_stream_status(usecase, ST_EVENT_STREAM_FREE);
@@ -1134,8 +1144,14 @@
// audio_channel_in_mask_from_count() does the right conversion to either positional or
// indexed mask
for ( ; channel_count <= channels && num_masks < max_masks; channel_count++) {
- supported_channel_masks[num_masks++] =
+ const audio_channel_mask_t mask =
audio_channel_in_mask_from_count(channel_count);
+ supported_channel_masks[num_masks++] = mask;
+ const audio_channel_mask_t index_mask =
+ audio_channel_mask_for_index_assignment_from_count(channel_count);
+ if (mask != index_mask && num_masks < max_masks) { // ensure index mask added.
+ supported_channel_masks[num_masks++] = index_mask;
+ }
}
}
#ifdef NDEBUG
@@ -1500,7 +1516,7 @@
voice_set_sidetone(adev, out_snd_device, true);
}
- if (usecase == voip_usecase) {
+ if (usecase->type != PCM_CAPTURE && voip_usecase) {
struct stream_out *voip_out = voip_usecase->stream.out;
audio_extn_utils_send_app_type_gain(adev,
voip_out->app_type_cfg.app_type,
@@ -1994,6 +2010,9 @@
audio_low_latency_hint_end();
}
+ if (out->usecase == USECASE_INCALL_MUSIC_UPLINK)
+ voice_set_device_mute_flag(adev, false);
+
/* 1. Get and set stream specific mixer controls */
disable_audio_route(adev, uc_info);
@@ -2117,6 +2136,9 @@
audio_extn_extspk_update(adev->extspk);
+ if (out->usecase == USECASE_INCALL_MUSIC_UPLINK)
+ voice_set_device_mute_flag(adev, true);
+
ALOGV("%s: Opening PCM device card_id(%d) device_id(%d) format(%#x)",
__func__, adev->snd_card, out->pcm_device_id, out->config.format);
if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
@@ -2546,10 +2568,13 @@
// otherwise audio is no longer played on the new usb devices.
// By forcing the stream in standby, the usb stack refcount drops to 0
// and the driver is closed.
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD && val == AUDIO_DEVICE_NONE &&
+ if (val == AUDIO_DEVICE_NONE &&
audio_is_usb_out_device(out->devices)) {
- ALOGD("%s() putting the usb device in standby after disconnection", __func__);
- out_standby_l(&out->stream.common);
+ if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
+ ALOGD("%s() putting the usb device in standby after disconnection", __func__);
+ out_standby_l(&out->stream.common);
+ }
+ val = AUDIO_DEVICE_OUT_SPEAKER;
}
pthread_mutex_lock(&adev->lock);
@@ -2712,7 +2737,7 @@
struct str_parms *reply,
audio_channel_mask_t *supported_channel_masks) {
int ret = -1;
- char value[256];
+ char value[ARRAY_SIZE(channels_name_to_enum_table) * 32 /* max channel name size */];
bool first = true;
size_t i, j;
@@ -5030,7 +5055,10 @@
devices,
flags,
source);
- ALOGV("%s: enter", __func__);
+ ALOGV("%s: enter: flags %#x, is_usb_dev %d, may_use_hifi_record %d,"
+ " sample_rate %u, channel_mask %#x, format %#x",
+ __func__, flags, is_usb_dev, may_use_hifi_record,
+ config->sample_rate, config->channel_mask, config->format);
*stream_in = NULL;
if (is_usb_dev && !is_usb_ready(adev, false /* is_playback */)) {
@@ -5085,7 +5113,7 @@
in->capture_handle = handle;
in->flags = flags;
- ALOGV("%s: source = %d, config->channel_mask = %d", __func__, source, config->channel_mask);
+ ALOGV("%s: source %d, config->channel_mask %#x", __func__, source, config->channel_mask);
if (source == AUDIO_SOURCE_VOICE_UPLINK ||
source == AUDIO_SOURCE_VOICE_DOWNLINK) {
/* Force channel config requested to mono if incall
diff --git a/hal/audio_hw.h b/hal/audio_hw.h
index 0b3b028..447f8ca 100644
--- a/hal/audio_hw.h
+++ b/hal/audio_hw.h
@@ -357,6 +357,7 @@
bool enable_voicerx;
bool enable_hfp;
bool mic_break_enabled;
+ bool use_voice_device_mute;
int snd_card;
void *platform;
diff --git a/hal/msm8916/platform.c b/hal/msm8916/platform.c
index 750d4e5..2bf552c 100644
--- a/hal/msm8916/platform.c
+++ b/hal/msm8916/platform.c
@@ -1651,6 +1651,7 @@
struct audio_device *adev = my_data->adev;
struct mixer_ctl *ctl;
const char *mixer_ctl_name = "Voice Rx Gain";
+ const char *mute_mixer_ctl_name = "Voice Rx Device Mute";
int vol_index = 0, ret = 0;
uint32_t set_values[ ] = {0,
ALL_SESSION_VSID,
@@ -1661,7 +1662,6 @@
// But this values don't changed in kernel. So, below change is need.
vol_index = (int)percent_to_index(volume, MIN_VOL_INDEX, my_data->max_vol_index);
set_values[0] = vol_index;
-
ctl = mixer_get_ctl_by_name(adev->mixer, mixer_ctl_name);
if (!ctl) {
ALOGE("%s: Could not get ctl for mixer cmd - %s",
@@ -1671,6 +1671,23 @@
ALOGV("Setting voice volume index: %d", set_values[0]);
ret = mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
+ // Send mute command in case volume index is max since indexes are inverted
+ // for mixer controls.
+ if (vol_index == my_data->max_vol_index) {
+ set_values[0] = 1;
+ }
+ else {
+ set_values[0] = 0;
+ }
+
+ ctl = mixer_get_ctl_by_name(adev->mixer, mute_mixer_ctl_name);
+ if (!ctl) {
+ ALOGE("%s: Could not get ctl for mixer cmd - %s",
+ __func__, mute_mixer_ctl_name);
+ return -EINVAL;
+ }
+ ALOGV("%s: Setting RX Device Mute to: %d", __func__, set_values[0]);
+ mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
return ret;
}
diff --git a/hal/msm8974/platform.c b/hal/msm8974/platform.c
index 5a36f0c..acf6ffb 100644
--- a/hal/msm8974/platform.c
+++ b/hal/msm8974/platform.c
@@ -2512,7 +2512,7 @@
__func__, mixer_ctl_name);
return -EINVAL;
}
- ALOGV("Setting voice mute state: %d", state);
+ ALOGV("%s: Setting voice mute state: %d", __func__, state);
mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
if (my_data->csd != NULL) {
diff --git a/hal/platform_info.c b/hal/platform_info.c
index f5fbe3f..c89020d 100644
--- a/hal/platform_info.c
+++ b/hal/platform_info.c
@@ -26,6 +26,14 @@
#include <platform.h>
#include <math.h>
+/*
+ * Mandatory microphone characteristics include: device_id, type, address, location, group,
+ * index_in_the_group, directionality, num_frequency_responses, frequencies and responses.
+ * MANDATORY_MICROPHONE_CHARACTERISTICS should be updated when mandatory microphone
+ * characteristics are changed.
+ */
+#define MANDATORY_MICROPHONE_CHARACTERISTICS (1 << 10) - 1
+
typedef enum {
ROOT,
ACDB,
@@ -145,17 +153,6 @@
AUDIO_MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
};
-enum {
- AUDIO_MICROPHONE_CHARACTERISTIC_NONE = 0u, // 0x0
- AUDIO_MICROPHONE_CHARACTERISTIC_SENSITIVITY = 1u, // 0x1
- AUDIO_MICROPHONE_CHARACTERISTIC_MAX_SPL = 2u, // 0x2
- AUDIO_MICROPHONE_CHARACTERISTIC_MIN_SPL = 4u, // 0x4
- AUDIO_MICROPHONE_CHARACTERISTIC_ORIENTATION = 8u, // 0x8
- AUDIO_MICROPHONE_CHARACTERISTIC_GEOMETRIC_LOCATION = 16u, // 0x10
- AUDIO_MICROPHONE_CHARACTERISTIC_ALL = 31u, /* ((((SENSITIVITY | MAX_SPL) | MIN_SPL)
- | ORIENTATION) | GEOMETRIC_LOCATION) */
-};
-
static bool find_enum_by_string(const struct audio_string_to_enum * table, const char * name,
int32_t len, unsigned int *value)
{
@@ -458,221 +455,165 @@
static void process_microphone_characteristic(const XML_Char **attr) {
struct audio_microphone_characteristic_t microphone;
- uint32_t curIdx = 0;
+ uint32_t index = 0;
+ uint32_t found_mandatory_characteristics = 0;
+ uint32_t num_frequencies = 0;
+ uint32_t num_responses = 0;
+ microphone.sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
+ microphone.max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ microphone.min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
+ microphone.orientation.x = 0.0f;
+ microphone.orientation.y = 0.0f;
+ microphone.orientation.z = 0.0f;
+ microphone.geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ microphone.geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ microphone.geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- if (strcmp(attr[curIdx++], "valid_mask")) {
- ALOGE("%s: valid_mask not found", __func__);
- goto done;
- }
- uint32_t valid_mask = atoi(attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "device_id")) {
- ALOGE("%s: device_id not found", __func__);
- goto done;
- }
- if (strlen(attr[curIdx]) > AUDIO_MICROPHONE_ID_MAX_LEN) {
- ALOGE("%s: device_id %s is too long", __func__, attr[curIdx]);
- goto done;
- }
- strcpy(microphone.device_id, attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "type")) {
- ALOGE("%s: device not found", __func__);
- goto done;
- }
- if (!find_enum_by_string(device_in_types, (char*)attr[curIdx++],
- ARRAY_SIZE(device_in_types), µphone.device)) {
- ALOGE("%s: type %s in %s not found!",
- __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
- goto done;
- }
-
- if (strcmp(attr[curIdx++], "address")) {
- ALOGE("%s: address not found", __func__);
- goto done;
- }
- if (strlen(attr[curIdx]) > AUDIO_DEVICE_MAX_ADDRESS_LEN) {
- ALOGE("%s, address %s is too long", __func__, attr[curIdx]);
- goto done;
- }
- strcpy(microphone.address, attr[curIdx++]);
- if (strlen(microphone.address) == 0) {
- // If the address is empty, populate the address according to device type.
- if (microphone.device == AUDIO_DEVICE_IN_BUILTIN_MIC) {
- strcpy(microphone.address, AUDIO_BOTTOM_MICROPHONE_ADDRESS);
- } else if (microphone.device == AUDIO_DEVICE_IN_BACK_MIC) {
- strcpy(microphone.address, AUDIO_BACK_MICROPHONE_ADDRESS);
- }
- }
-
- if (strcmp(attr[curIdx++], "location")) {
- ALOGE("%s: location not found", __func__);
- goto done;
- }
- if (!find_enum_by_string(mic_locations, (char*)attr[curIdx++],
- AUDIO_MICROPHONE_LOCATION_CNT, µphone.location)) {
- ALOGE("%s: location %s in %s not found!",
- __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
- goto done;
- }
-
- if (strcmp(attr[curIdx++], "group")) {
- ALOGE("%s: group not found", __func__);
- goto done;
- }
- microphone.group = atoi(attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "index_in_the_group")) {
- ALOGE("%s: index_in_the_group not found", __func__);
- goto done;
- }
- microphone.index_in_the_group = atoi(attr[curIdx++]);
-
- if (strcmp(attr[curIdx++], "directionality")) {
- ALOGE("%s: directionality not found", __func__);
- goto done;
- }
- if (!find_enum_by_string(mic_directionalities, (char*)attr[curIdx++],
- AUDIO_MICROPHONE_DIRECTIONALITY_CNT, µphone.directionality)) {
- ALOGE("%s: directionality %s in %s not found!",
- __func__, attr[--curIdx], PLATFORM_INFO_XML_PATH);
- goto done;
- }
-
- if (strcmp(attr[curIdx++], "num_frequency_responses")) {
- ALOGE("%s: num_frequency_responses not found", __func__);
- goto done;
- }
- microphone.num_frequency_responses = atoi(attr[curIdx++]);
- if (microphone.num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGE("%s: num_frequency_responses is too large", __func__);
- goto done;
- }
- if (microphone.num_frequency_responses > 0) {
- if (strcmp(attr[curIdx++], "frequencies")) {
- ALOGE("%s: frequencies not found", __func__);
- goto done;
- }
- char *token = strtok((char *)attr[curIdx++], " ");
- uint32_t num_frequencies = 0;
- while (token) {
- microphone.frequency_responses[0][num_frequencies++] = atof(token);
- if (num_frequencies > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGE("%s: num %u of frequency is too large", __func__, num_frequencies);
+ while (attr[index] != NULL) {
+ const char *attribute = attr[index++];
+ char value[strlen(attr[index]) + 1];
+ strcpy(value, attr[index++]);
+ if (strcmp(attribute, "device_id") == 0) {
+ if (strlen(value) > AUDIO_MICROPHONE_ID_MAX_LEN) {
+ ALOGE("%s: device_id %s is too long", __func__, value);
goto done;
}
- token = strtok(NULL, " ");
- }
-
- if (strcmp(attr[curIdx++], "responses")) {
- ALOGE("%s: responses not found", __func__);
- goto done;
- }
- token = strtok((char *)attr[curIdx++], " ");
- uint32_t num_responses = 0;
- while (token) {
- microphone.frequency_responses[1][num_responses++] = atof(token);
- if (num_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGE("%s: num %u of response is too large", __func__, num_responses);
+ strcpy(microphone.device_id, value);
+ found_mandatory_characteristics |= 1;
+ } else if (strcmp(attribute, "type") == 0) {
+ if (!find_enum_by_string(device_in_types, value,
+ ARRAY_SIZE(device_in_types), µphone.device)) {
+ ALOGE("%s: type %s in %s not found!",
+ __func__, value, PLATFORM_INFO_XML_PATH);
goto done;
}
- token = strtok(NULL, " ");
- }
-
- if (num_frequencies != num_responses
- || num_frequencies != microphone.num_frequency_responses) {
- ALOGE("%s: num of frequency and response not match: %u, %u, %u",
- __func__, num_frequencies, num_responses, microphone.num_frequency_responses);
- goto done;
- }
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_SENSITIVITY) {
- if (strcmp(attr[curIdx++], "sensitivity")) {
- ALOGE("%s: sensitivity not found", __func__);
- goto done;
- }
- microphone.sensitivity = atof(attr[curIdx++]);
- } else {
- microphone.sensitivity = AUDIO_MICROPHONE_SENSITIVITY_UNKNOWN;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_MAX_SPL) {
- if (strcmp(attr[curIdx++], "max_spl")) {
- ALOGE("%s: max_spl not found", __func__);
- goto done;
- }
- microphone.max_spl = atof(attr[curIdx++]);
- } else {
- microphone.max_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_MIN_SPL) {
- if (strcmp(attr[curIdx++], "min_spl")) {
- ALOGE("%s: min_spl not found", __func__);
- goto done;
- }
- microphone.min_spl = atof(attr[curIdx++]);
- } else {
- microphone.min_spl = AUDIO_MICROPHONE_SPL_UNKNOWN;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_ORIENTATION) {
- if (strcmp(attr[curIdx++], "orientation")) {
- ALOGE("%s: orientation not found", __func__);
- goto done;
- }
- char *token = strtok((char *)attr[curIdx++], " ");
- float orientation[3];
- uint32_t idx = 0;
- while (token) {
- orientation[idx++] = atof(token);
- if (idx > 3) {
+ found_mandatory_characteristics |= (1 << 1);
+ } else if (strcmp(attribute, "address") == 0) {
+ if (strlen(value) > AUDIO_DEVICE_MAX_ADDRESS_LEN) {
+ ALOGE("%s, address %s is too long", __func__, value);
+ goto done;
+ }
+ strcpy(microphone.address, value);
+ if (strlen(microphone.address) == 0) {
+ // If the address is empty, populate the address according to device type.
+ if (microphone.device == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+ strcpy(microphone.address, AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+ } else if (microphone.device == AUDIO_DEVICE_IN_BACK_MIC) {
+ strcpy(microphone.address, AUDIO_BACK_MICROPHONE_ADDRESS);
+ }
+ }
+ found_mandatory_characteristics |= (1 << 2);
+ } else if (strcmp(attribute, "location") == 0) {
+ if (!find_enum_by_string(mic_locations, value,
+ AUDIO_MICROPHONE_LOCATION_CNT, µphone.location)) {
+ ALOGE("%s: location %s in %s not found!",
+ __func__, value, PLATFORM_INFO_XML_PATH);
+ goto done;
+ }
+ found_mandatory_characteristics |= (1 << 3);
+ } else if (strcmp(attribute, "group") == 0) {
+ microphone.group = atoi(value);
+ found_mandatory_characteristics |= (1 << 4);
+ } else if (strcmp(attribute, "index_in_the_group") == 0) {
+ microphone.index_in_the_group = atoi(value);
+ found_mandatory_characteristics |= (1 << 5);
+ } else if (strcmp(attribute, "directionality") == 0) {
+ if (!find_enum_by_string(mic_directionalities, value,
+ AUDIO_MICROPHONE_DIRECTIONALITY_CNT, µphone.directionality)) {
+ ALOGE("%s: directionality %s in %s not found!",
+ __func__, attr[index], PLATFORM_INFO_XML_PATH);
+ goto done;
+ }
+ found_mandatory_characteristics |= (1 << 6);
+ } else if (strcmp(attribute, "num_frequency_responses") == 0) {
+ microphone.num_frequency_responses = atoi(value);
+ if (microphone.num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGE("%s: num_frequency_responses is too large", __func__);
+ goto done;
+ }
+ found_mandatory_characteristics |= (1 << 7);
+ } else if (strcmp(attribute, "frequencies") == 0) {
+ char *token = strtok(value, " ");
+ while (token) {
+ microphone.frequency_responses[0][num_frequencies++] = atof(token);
+ if (num_frequencies > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGE("%s: num %u of frequency is too large", __func__, num_frequencies);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ found_mandatory_characteristics |= (1 << 8);
+ } else if (strcmp(attribute, "responses") == 0) {
+ char *token = strtok(value, " ");
+ while (token) {
+ microphone.frequency_responses[1][num_responses++] = atof(token);
+ if (num_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGE("%s: num %u of response is too large", __func__, num_responses);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ found_mandatory_characteristics |= (1 << 9);
+ } else if (strcmp(attribute, "sensitivity") == 0) {
+ microphone.sensitivity = atof(value);
+ } else if (strcmp(attribute, "max_spl") == 0) {
+ microphone.max_spl = atof(value);
+ } else if (strcmp(attribute, "min_spl") == 0) {
+ microphone.min_spl = atof(value);
+ } else if (strcmp(attribute, "orientation") == 0) {
+ char *token = strtok(value, " ");
+ float orientation[3];
+ uint32_t idx = 0;
+ while (token) {
+ orientation[idx++] = atof(token);
+ if (idx > 3) {
+ ALOGE("%s: orientation invalid", __func__);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ if (idx != 3) {
ALOGE("%s: orientation invalid", __func__);
goto done;
}
- token = strtok(NULL, " ");
- }
- if (idx != 3) {
- ALOGE("%s: orientation invalid", __func__);
- goto done;
- }
- microphone.orientation.x = orientation[0];
- microphone.orientation.y = orientation[1];
- microphone.orientation.z = orientation[2];
- } else {
- microphone.orientation.x = 0.0f;
- microphone.orientation.y = 0.0f;
- microphone.orientation.z = 0.0f;
- }
-
- if (valid_mask & AUDIO_MICROPHONE_CHARACTERISTIC_GEOMETRIC_LOCATION) {
- if (strcmp(attr[curIdx++], "geometric_location")) {
- ALOGE("%s: geometric_location not found", __func__);
- goto done;
- }
- char *token = strtok((char *)attr[curIdx++], " ");
- float geometric_location[3];
- uint32_t idx = 0;
- while (token) {
- geometric_location[idx++] = atof(token);
- if (idx > 3) {
+ microphone.orientation.x = orientation[0];
+ microphone.orientation.y = orientation[1];
+ microphone.orientation.z = orientation[2];
+ } else if (strcmp(attribute, "geometric_location") == 0) {
+ char *token = strtok(value, " ");
+ float geometric_location[3];
+ uint32_t idx = 0;
+ while (token) {
+ geometric_location[idx++] = atof(token);
+ if (idx > 3) {
+ ALOGE("%s: geometric_location invalid", __func__);
+ goto done;
+ }
+ token = strtok(NULL, " ");
+ }
+ if (idx != 3) {
ALOGE("%s: geometric_location invalid", __func__);
goto done;
}
- token = strtok(NULL, " ");
+ microphone.geometric_location.x = geometric_location[0];
+ microphone.geometric_location.y = geometric_location[1];
+ microphone.geometric_location.z = geometric_location[2];
+ } else {
+ ALOGW("%s: unknown attribute of microphone characteristics: %s",
+ __func__, attribute);
}
- if (idx != 3) {
- ALOGE("%s: geometric_location invalid", __func__);
- goto done;
- }
- microphone.geometric_location.x = geometric_location[0];
- microphone.geometric_location.y = geometric_location[1];
- microphone.geometric_location.z = geometric_location[2];
- } else {
- microphone.geometric_location.x = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- microphone.geometric_location.y = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
- microphone.geometric_location.z = AUDIO_MICROPHONE_COORDINATE_UNKNOWN;
+ }
+
+ if (num_frequencies != num_responses
+ || num_frequencies != microphone.num_frequency_responses) {
+ ALOGE("%s: num of frequency and response not match: %u, %u, %u",
+ __func__, num_frequencies, num_responses, microphone.num_frequency_responses);
+ goto done;
+ }
+
+ if (found_mandatory_characteristics != MANDATORY_MICROPHONE_CHARACTERISTICS) {
+ ALOGE("%s: some of mandatory microphone characteriscts are missed: %u",
+ __func__, found_mandatory_characteristics);
}
platform_set_microphone_characteristic(my_data.platform, microphone);
diff --git a/hal/voice.c b/hal/voice.c
index 09cb926..708ce6c 100644
--- a/hal/voice.c
+++ b/hal/voice.c
@@ -171,6 +171,7 @@
uc_info->devices = adev->current_call_output ->devices;
uc_info->in_snd_device = SND_DEVICE_NONE;
uc_info->out_snd_device = SND_DEVICE_NONE;
+ adev->use_voice_device_mute = false;
list_add_tail(&adev->usecase_list, &uc_info->list);
@@ -357,11 +358,19 @@
int voice_set_mic_mute(struct audio_device *adev, bool state)
{
int err = 0;
+ struct audio_usecase *usecase = NULL;
adev->voice.mic_mute = state;
if (adev->mode == AUDIO_MODE_IN_CALL ||
- adev->mode == AUDIO_MODE_IN_COMMUNICATION)
- err = platform_set_mic_mute(adev->platform, state);
+ adev->mode == AUDIO_MODE_IN_COMMUNICATION) {
+ /* Use device mute if incall music delivery usecase is in progress */
+ if (adev->use_voice_device_mute)
+ err = platform_set_device_mute(adev->platform, state, "tx");
+ else
+ err = platform_set_mic_mute(adev->platform, state);
+ ALOGV("%s: voice mute status=%d, use_voice_device_mute_flag=%d",
+ __func__, state, adev->use_voice_device_mute);
+ }
return err;
}
@@ -371,6 +380,25 @@
return adev->voice.mic_mute;
}
+// Following function is called when incall music uplink usecase is
+// created or destroyed while mic is muted. If incall music uplink
+// usecase is active, apply voice device mute to mute only voice Tx
+// path and not the mixed voice Tx + inncall-music path. Revert to
+// voice stream mute once incall music uplink usecase is inactive
+void voice_set_device_mute_flag (struct audio_device *adev, bool state)
+{
+ if (adev->voice.mic_mute) {
+ if (state) {
+ platform_set_device_mute(adev->platform, true, "tx");
+ platform_set_mic_mute(adev->platform, false);
+ } else {
+ platform_set_mic_mute(adev->platform, true);
+ platform_set_device_mute(adev->platform, false, "tx");
+ }
+ }
+ adev->use_voice_device_mute = state;
+}
+
int voice_set_volume(struct audio_device *adev, float volume)
{
int vol, err = 0;
@@ -517,6 +545,8 @@
adev->voice.volume = 1.0f;
adev->voice.mic_mute = false;
adev->voice.in_call = false;
+ adev->use_voice_device_mute = false;
+
for (i = 0; i < MAX_VOICE_SESSIONS; i++) {
adev->voice.session[i].pcm_rx = NULL;
adev->voice.session[i].pcm_tx = NULL;
diff --git a/hal/voice.h b/hal/voice.h
index 469a3b5..71e096b 100644
--- a/hal/voice.h
+++ b/hal/voice.h
@@ -95,4 +95,6 @@
snd_device_t out_snd_device,
bool enable);
bool voice_is_call_state_active(struct audio_device *adev);
+void voice_set_device_mute_flag (struct audio_device *adev, bool state);
+
#endif //VOICE_H