Merge "config: Disable EAR PA Boost for SDM630 skush"
diff --git a/configs/msm8998/sound_trigger_mixer_paths_wcd9340.xml b/configs/msm8998/sound_trigger_mixer_paths_wcd9340.xml
index 2e75a8c..64350fc 100644
--- a/configs/msm8998/sound_trigger_mixer_paths_wcd9340.xml
+++ b/configs/msm8998/sound_trigger_mixer_paths_wcd9340.xml
@@ -240,13 +240,13 @@
<ctl name="DMIC MUX5" value="DMIC1" />
<ctl name="CDC_IF TX6 MUX" value="DEC6" />
<ctl name="ADC MUX6" value="DMIC" />
- <ctl name="DMIC MUX6" value="DMIC0" />
+ <ctl name="DMIC MUX6" value="DMIC5" />
<ctl name="CDC_IF TX7 MUX" value="DEC7" />
<ctl name="ADC MUX7" value="DMIC" />
<ctl name="DMIC MUX7" value="DMIC2" />
<ctl name="CDC_IF TX8 MUX" value="DEC8" />
<ctl name="ADC MUX8" value="DMIC" />
- <ctl name="DMIC MUX8" value="DMIC5" />
+ <ctl name="DMIC MUX8" value="DMIC0" />
</path>
<path name="echo-reference">
diff --git a/configs/sdm660/audio_platform_info.xml b/configs/sdm660/audio_platform_info.xml
index dd0d974..00d64c3 100644
--- a/configs/sdm660/audio_platform_info.xml
+++ b/configs/sdm660/audio_platform_info.xml
@@ -121,12 +121,12 @@
<device name="SND_DEVICE_OUT_ANC_FB_HEADSET" interface="INT0_MI2S_RX"/>
<device name="SND_DEVICE_OUT_VOICE_ANC_FB_HEADSET" interface="INT0_MI2S_RX"/>
<device name="SND_DEVICE_OUT_ANC_HANDSET" interface="INT4_MI2S_RX"/>
- <device name="SND_DEVICE_OUT_SPEAKER_PROTECTED" interface="INT5_MI2S_TX"/>
- <device name="SND_DEVICE_OUT_VOICE_SPEAKER_PROTECTED" interface="INT5_MI2S_TX"/>
- <device name="SND_DEVICE_OUT_VOICE_SPEAKER_2_PROTECTED" interface="INT5_MI2S_TX"/>
- <device name="SND_DEVICE_OUT_SPEAKER_PROTECTED_VBAT" interface="INT5_MI2S_TX"/>
- <device name="SND_DEVICE_OUT_VOICE_SPEAKER_PROTECTED_VBAT" interface="INT5_MI2S_TX"/>
- <device name="SND_DEVICE_OUT_VOICE_SPEAKER_2_PROTECTED_VBAT" interface="INT5_MI2S_TX"/>
+ <device name="SND_DEVICE_OUT_SPEAKER_PROTECTED" interface="INT4_MI2S_RX"/>
+ <device name="SND_DEVICE_OUT_VOICE_SPEAKER_PROTECTED" interface="INT4_MI2S_RX"/>
+ <device name="SND_DEVICE_OUT_VOICE_SPEAKER_2_PROTECTED" interface="INT4_MI2S_RX"/>
+ <device name="SND_DEVICE_OUT_SPEAKER_PROTECTED_VBAT" interface="INT4_MI2S_RX"/>
+ <device name="SND_DEVICE_OUT_VOICE_SPEAKER_PROTECTED_VBAT" interface="INT4_MI2S_RX"/>
+ <device name="SND_DEVICE_OUT_VOICE_SPEAKER_2_PROTECTED_VBAT" interface="INT4_MI2S_RX"/>
<device name="SND_DEVICE_OUT_SPEAKER_WSA" interface="INT4_MI2S_RX"/>
<device name="SND_DEVICE_OUT_VOICE_SPEAKER_WSA" interface="INT4_MI2S_RX"/>
<device name="SND_DEVICE_OUT_VOICE_SPEAKER_2_WSA" interface="INT4_MI2S_RX"/>
diff --git a/hal/audio_extn/passthru.c b/hal/audio_extn/passthru.c
index eaa8c0a..dd4d4d4 100644
--- a/hal/audio_extn/passthru.c
+++ b/hal/audio_extn/passthru.c
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -46,7 +46,8 @@
AUDIO_FORMAT_E_AC3,
AUDIO_FORMAT_E_AC3_JOC,
AUDIO_FORMAT_DTS,
- AUDIO_FORMAT_DTS_HD
+ AUDIO_FORMAT_DTS_HD,
+ AUDIO_FORMAT_DOLBY_TRUEHD
};
/*
@@ -216,6 +217,8 @@
bool passt = false;
switch (out->format) {
case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_DTS_HD:
+ case AUDIO_FORMAT_DOLBY_TRUEHD:
if (platform_is_edid_supported_format(adev->platform, out->format)) {
ALOGV("%s:PASSTHROUGH supported for format %x",
__func__, out->format);
@@ -249,13 +252,6 @@
passt = true;
}
break;
- case AUDIO_FORMAT_DTS_HD:
- if (platform_is_edid_supported_format(adev->platform, out->format)) {
- ALOGV("%s:PASSTHROUGH supported for format %x",
- __func__, out->format);
- passt = true;
- }
- break;
default:
ALOGV("%s:Passthrough not supported", __func__);
}
diff --git a/hal/audio_extn/qaf.c b/hal/audio_extn/qaf.c
index ed66934..f16c365 100644
--- a/hal/audio_extn/qaf.c
+++ b/hal/audio_extn/qaf.c
@@ -36,6 +36,7 @@
#define ALOGVV(a...) do { } while(0)
#endif
+#define DEBUG_MSG_VV(arg,...) ALOGVV("%s: %d: " arg, __func__, __LINE__, ##__VA_ARGS__)
#define DEBUG_MSG(arg,...) ALOGV("%s: %d: " arg, __func__, __LINE__, ##__VA_ARGS__)
#define ERROR_MSG(arg,...) ALOGE("%s: %d: " arg, __func__, __LINE__, ##__VA_ARGS__)
@@ -58,6 +59,7 @@
#define QAF_MODULE_PCM_INPUT_BUFFER_LATENCY 32
#define MS12_PCM_OUT_FRAGMENT_SIZE 1536 //samples
+#define MS12_PCM_IN_FRAGMENT_SIZE 1536 //samples
#define DD_FRAME_SIZE 1536
#define DDP_FRAME_SIZE DD_FRAME_SIZE
@@ -147,9 +149,9 @@
typedef enum {
QAF_IN_MAIN = 0, /* Single PID Main/Primary or Dual-PID stream */
- QAF_IN_ASSOC, /* Associated/Secondary stream */
- QAF_IN_PCM, /* PCM stream. */
-
+ QAF_IN_ASSOC, /* Associated/Secondary stream */
+ QAF_IN_PCM, /* PCM stream. */
+ QAF_IN_MAIN_2, /* Single PID Main2 stream */
MAX_QAF_MODULE_IN
} mm_module_input_type;
@@ -305,6 +307,16 @@
}
}
+static bool is_main_active(struct qaf_module* qaf_mod)
+{
+ return (qaf_mod->stream_in[QAF_IN_MAIN] || qaf_mod->stream_in[QAF_IN_MAIN_2]);
+}
+
+static bool is_dual_main_active(struct qaf_module* qaf_mod)
+{
+ return (qaf_mod->stream_in[QAF_IN_MAIN] && qaf_mod->stream_in[QAF_IN_MAIN_2]);
+}
+
/* Gets the pcm output buffer size(in samples) for the mm module. */
static uint32_t get_pcm_output_buffer_size_samples(struct qaf_module *qaf_mod)
{
@@ -351,7 +363,8 @@
case AUDIO_FORMAT_AC3:
case AUDIO_FORMAT_E_AC3:
case AUDIO_FORMAT_DTS:
- case AUDIO_FORMAT_DTS_HD: {
+ case AUDIO_FORMAT_DTS_HD:
+ case AUDIO_FORMAT_DOLBY_TRUEHD: {
is_enabled = true;
break;
}
@@ -490,7 +503,7 @@
/* Sends a command to output stream offload thread. */
static int qaf_send_offload_cmd_l(struct stream_out* out, int command)
{
- DEBUG_MSG("command is %d", command);
+ DEBUG_MSG_VV("command is %d", command);
struct offload_cmd *cmd = (struct offload_cmd *)calloc(1, sizeof(struct offload_cmd));
@@ -636,7 +649,6 @@
{
int ret = -EINVAL;
struct qaf_module *qaf_mod = NULL;
- DEBUG_MSG("bytes = %d [%p]", bytes, out->qaf_stream_handle);
qaf_mod = get_qaf_module_for_input_stream(out);
if ((!qaf_mod) || (!qaf_mod->qaf_audio_stream_write)) {
@@ -656,7 +668,7 @@
struct audio_device *adev = out->dev;
ssize_t ret = 0;
- DEBUG_MSG("bytes = %d, usecase[%d] and flags[%x] for handle[%p]",
+ DEBUG_MSG_VV("bytes = %d, usecase[%d] and flags[%x] for handle[%p]",
(int)bytes, out->usecase, out->flags, out);
lock_output_stream(out);
@@ -690,7 +702,7 @@
}
ret = qaf_module_write_input_buffer(out, buffer, bytes);
- DEBUG_MSG("ret [%d]", (int)ret);
+ DEBUG_MSG_VV("ret [%d]", (int)ret);
if (ret >= 0) {
bytes = ret;
@@ -703,7 +715,7 @@
if (ret < 0) {
if (ret == -EAGAIN) {
- DEBUG_MSG("No space available in mm module, post msg to cb thread");
+ DEBUG_MSG_VV("No space available in mm module, post msg to cb thread");
ret = qaf_send_offload_cmd_l(out, OFFLOAD_CMD_WAIT_FOR_BUFFER);
bytes = 0;
} else if (ret == -ENOMEM || ret == -EPERM) {
@@ -718,11 +730,10 @@
return bytes;
}
-/* Gets PCM offload buffer size for QAF module output. */
-static uint32_t qaf_get_pcm_offload_buffer_size(struct qaf_module *qaf_mod,
- audio_offload_info_t* info)
+/* Gets PCM offload buffer size for a given config. */
+static uint32_t qaf_get_pcm_offload_buffer_size(audio_offload_info_t* info,
+ uint32_t samples_per_frame)
{
- uint32_t samples_per_frame = get_pcm_output_buffer_size_samples(qaf_mod);
uint32_t fragment_size = 0;
fragment_size = (samples_per_frame * (info->bit_width >> 3) * popcount(info->channel_mask));
@@ -738,11 +749,22 @@
fragment_size = ALIGN(fragment_size,
((info->bit_width >> 3) * popcount(info->channel_mask) * 32));
- ALOGI("Qaf PCM offload Fragment size to %d bytes", fragment_size);
+ ALOGI("Qaf PCM offload Fragment size is %d bytes", fragment_size);
return fragment_size;
}
+static uint32_t qaf_get_pcm_offload_input_buffer_size(info)
+{
+ return qaf_get_pcm_offload_buffer_size(info, MS12_PCM_IN_FRAGMENT_SIZE);
+}
+
+static uint32_t qaf_get_pcm_offload_output_buffer_size(struct qaf_module *qaf_mod,
+ audio_offload_info_t* info)
+{
+ return qaf_get_pcm_offload_buffer_size(info, get_pcm_output_buffer_size_samples(qaf_mod));
+}
+
/* Gets buffer latency in samples. */
static int get_buffer_latency(struct stream_out *out, uint32_t buffer_size, uint32_t *latency)
{
@@ -1046,7 +1068,7 @@
struct stream_out *out = (struct stream_out *)stream;
uint32_t latency = 0;
struct qaf_module *qaf_mod = NULL;
- DEBUG_MSG("Output Stream %p", out);
+ DEBUG_MSG_VV("Output Stream %p", out);
qaf_mod = get_qaf_module_for_input_stream(out);
if (!qaf_mod) {
@@ -1090,7 +1112,7 @@
}
}
- DEBUG_MSG("Latency %d", latency);
+ DEBUG_MSG_VV("Latency %d", latency);
return latency;
}
@@ -1138,12 +1160,10 @@
struct audio_stream_out *bt_stream = NULL;
int format;
- DEBUG_MSG("Device 0x%X, Event = 0x%X", device, event_id);
+ DEBUG_MSG_VV("Device 0x%X, Event = 0x%X, Bytes to write %d", device, event_id, size);
pthread_mutex_lock(&p_qaf->lock);
if (event_id == AUDIO_DATA_EVENT) {
- DEBUG_MSG("Device id 0x%X, bytes to write %d", device, size);
-
if (p_qaf->passthrough_out != NULL) {
//If QAF passthrough is active then all the module output will be dropped.
pthread_mutex_unlock(&p_qaf->lock);
@@ -1283,17 +1303,27 @@
qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH]->compr_config.fragments =
COMPRESS_OFFLOAD_NUM_FRAGMENTS;
qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH]->compr_config.fragment_size =
- qaf_get_pcm_offload_buffer_size(qaf_mod, &config.offload_info);
+ qaf_get_pcm_offload_output_buffer_size(qaf_mod, &config.offload_info);
p_qaf->mch_pcm_hdmi_enabled = true;
- if (qaf_mod->stream_in[QAF_IN_MAIN]
- && qaf_mod->stream_in[QAF_IN_MAIN]->client_callback != NULL) {
+ if ((qaf_mod->stream_in[QAF_IN_MAIN]
+ && qaf_mod->stream_in[QAF_IN_MAIN]->client_callback != NULL) ||
+ (qaf_mod->stream_in[QAF_IN_MAIN_2]
+ && qaf_mod->stream_in[QAF_IN_MAIN_2]->client_callback != NULL)) {
- qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH]->stream.set_callback(
+ if (qaf_mod->stream_in[QAF_IN_MAIN]) {
+ qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH]->stream.set_callback(
(struct audio_stream_out *)qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH],
qaf_mod->stream_in[QAF_IN_MAIN]->client_callback,
qaf_mod->stream_in[QAF_IN_MAIN]->client_cookie);
+ }
+ if (qaf_mod->stream_in[QAF_IN_MAIN_2]) {
+ qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH]->stream.set_callback(
+ (struct audio_stream_out *)qaf_mod->stream_out[QAF_OUT_OFFLOAD_MCH],
+ qaf_mod->stream_in[QAF_IN_MAIN_2]->client_callback,
+ qaf_mod->stream_in[QAF_IN_MAIN_2]->client_cookie);
+ }
} else if (qaf_mod->stream_in[QAF_IN_PCM]
&& qaf_mod->stream_in[QAF_IN_PCM]->client_callback != NULL) {
@@ -1306,6 +1336,8 @@
int index = -1;
if (qaf_mod->adsp_hdlr_config[QAF_IN_MAIN].adsp_hdlr_config_valid)
index = (int) QAF_IN_MAIN;
+ else if (qaf_mod->adsp_hdlr_config[QAF_IN_MAIN_2].adsp_hdlr_config_valid)
+ index = (int) QAF_IN_MAIN_2;
else if (qaf_mod->adsp_hdlr_config[QAF_IN_PCM].adsp_hdlr_config_valid)
index = (int) QAF_IN_PCM;
@@ -1388,13 +1420,23 @@
return;
}
- if (qaf_mod->stream_in[QAF_IN_MAIN]
- && qaf_mod->stream_in[QAF_IN_MAIN]->client_callback != NULL) {
+ if ((qaf_mod->stream_in[QAF_IN_MAIN]
+ && qaf_mod->stream_in[QAF_IN_MAIN]->client_callback != NULL) ||
+ (qaf_mod->stream_in[QAF_IN_MAIN_2]
+ && qaf_mod->stream_in[QAF_IN_MAIN_2]->client_callback != NULL)) {
- qaf_mod->stream_out[QAF_OUT_OFFLOAD]->stream.set_callback(
+ if (qaf_mod->stream_in[QAF_IN_MAIN]) {
+ qaf_mod->stream_out[QAF_OUT_OFFLOAD]->stream.set_callback(
(struct audio_stream_out *)qaf_mod->stream_out[QAF_OUT_OFFLOAD],
qaf_mod->stream_in[QAF_IN_MAIN]->client_callback,
qaf_mod->stream_in[QAF_IN_MAIN]->client_cookie);
+ }
+ if (qaf_mod->stream_in[QAF_IN_MAIN_2]) {
+ qaf_mod->stream_out[QAF_OUT_OFFLOAD]->stream.set_callback(
+ (struct audio_stream_out *)qaf_mod->stream_out[QAF_OUT_OFFLOAD],
+ qaf_mod->stream_in[QAF_IN_MAIN_2]->client_callback,
+ qaf_mod->stream_in[QAF_IN_MAIN_2]->client_cookie);
+ }
} else if (qaf_mod->stream_in[QAF_IN_PCM]
&& qaf_mod->stream_in[QAF_IN_PCM]->client_callback != NULL) {
@@ -1407,7 +1449,7 @@
qaf_mod->stream_out[QAF_OUT_OFFLOAD]->compr_config.fragments =
COMPRESS_OFFLOAD_NUM_FRAGMENTS;
qaf_mod->stream_out[QAF_OUT_OFFLOAD]->compr_config.fragment_size =
- qaf_get_pcm_offload_buffer_size(qaf_mod, &config.offload_info);
+ qaf_get_pcm_offload_output_buffer_size(qaf_mod, &config.offload_info);
if (qaf_mod->is_vol_set) {
DEBUG_MSG("Setting Volume Left[%f], Right[%f]", qaf_mod->vol_left, qaf_mod->vol_right);
@@ -1420,6 +1462,8 @@
int index = -1;
if (qaf_mod->adsp_hdlr_config[QAF_IN_MAIN].adsp_hdlr_config_valid)
index = (int) QAF_IN_MAIN;
+ else if (qaf_mod->adsp_hdlr_config[QAF_IN_MAIN_2].adsp_hdlr_config_valid)
+ index = (int) QAF_IN_MAIN_2;
else if (qaf_mod->adsp_hdlr_config[QAF_IN_PCM].adsp_hdlr_config_valid)
index = (int) QAF_IN_PCM;
if (index >= 0) {
@@ -1448,16 +1492,18 @@
size);
}
}
-
- DEBUG_MSG("Bytes written = %d", ret);
+ DEBUG_MSG_VV("Bytes written = %d", ret);
}
else if (event_id == AUDIO_EOS_MAIN_DD_DDP_EVENT
+ || event_id == AUDIO_EOS_MAIN_2_DD_DDP_EVENT
|| event_id == AUDIO_EOS_MAIN_AAC_EVENT
|| event_id == AUDIO_EOS_MAIN_AC4_EVENT
|| event_id == AUDIO_EOS_ASSOC_DD_DDP_EVENT) {
struct stream_out *out = qaf_mod->stream_in[QAF_IN_MAIN];
+ struct stream_out *out_main2 = qaf_mod->stream_in[QAF_IN_MAIN_2];
struct stream_out *out_assoc = qaf_mod->stream_in[QAF_IN_ASSOC];
bool *main_drain_received = &qaf_mod->drain_received[QAF_IN_MAIN];
+ bool *main2_drain_received = &qaf_mod->drain_received[QAF_IN_MAIN_2];
bool *assoc_drain_received = &qaf_mod->drain_received[QAF_IN_ASSOC];
/**
@@ -1473,6 +1519,15 @@
*assoc_drain_received = false;
unlock_output_stream(out_assoc);
DEBUG_MSG("sent associated DRAIN_READY");
+ } else if (event_id == AUDIO_EOS_MAIN_2_DD_DDP_EVENT
+ && (out_main2 != NULL)
+ && (*main2_drain_received)) {
+
+ lock_output_stream(out_main2);
+ out_main2->client_callback(STREAM_CBK_EVENT_DRAIN_READY, NULL, out_main2->client_cookie);
+ *main2_drain_received = false;
+ unlock_output_stream(out_main2);
+ DEBUG_MSG("sent main2 DRAIN_READY");
} else if ((out != NULL) && (*main_drain_received)) {
lock_output_stream(out);
out->client_callback(STREAM_CBK_EVENT_DRAIN_READY, NULL, out->client_cookie);
@@ -1717,44 +1772,61 @@
devices,
AUDIO_STREAM_SYSTEM_TONE);
qaf_mod->stream_in[QAF_IN_PCM] = out;
- } else {
- if (!qaf_mod->stream_in[QAF_IN_MAIN]) {
- if ((!(flags & AUDIO_OUTPUT_FLAG_MAIN)) && (flags & AUDIO_OUTPUT_FLAG_ASSOCIATED)) {
- ERROR_MSG("Error main input is not active.");
- return -EINVAL;
- }
-
- status = qaf_mod->qaf_audio_stream_open(qaf_mod->session_handle,
- &out->qaf_stream_handle,
- input_config,
- devices,
- AUDIO_STREAM_MAIN);
- if (status == 0) {
- DEBUG_MSG("Open stream for Input with Main stream contents with flag [%x] and stream handle [%p]",
- flags, out->qaf_stream_handle);
- qaf_mod->stream_in[QAF_IN_MAIN] = out;
- }
+ } else if ((flags & AUDIO_OUTPUT_FLAG_MAIN) && (flags & AUDIO_OUTPUT_FLAG_ASSOCIATED)) {
+ if (is_main_active(qaf_mod) || is_dual_main_active(qaf_mod)) {
+ ERROR_MSG("Dual Main or Main already active. So, Cannot open main and associated stream");
+ return -EINVAL;
} else {
- if (flags & AUDIO_OUTPUT_FLAG_MAIN) {
- ERROR_MSG("Error main input is already active");
- return -EINVAL;
- } else if ((flags & AUDIO_OUTPUT_FLAG_ASSOCIATED)
- && (!qaf_mod->stream_in[QAF_IN_ASSOC])) {
- status = qaf_mod->qaf_audio_stream_open(qaf_mod->session_handle,
- &out->qaf_stream_handle,
- input_config,
- devices,
- AUDIO_STREAM_ASSOCIATED);
- if (status == 0) {
- DEBUG_MSG("Open stream for Input with only Associated flag [%x] stream handle [%p]",
- flags, out->qaf_stream_handle);
- qaf_mod->stream_in[QAF_IN_ASSOC] = out;
+ status = qaf_mod->qaf_audio_stream_open(qaf_mod->session_handle, &out->qaf_stream_handle, input_config, devices, /*flags*/AUDIO_STREAM_MAIN);
+ if (status == 0) {
+ DEBUG_MSG("Open stream for Input with both Main and Associated stream contents with flag(%x) and stream_handle(%p)", flags, out->qaf_stream_handle);
+ qaf_mod->stream_in[QAF_IN_MAIN] = out;
+ } else {
+ ERROR_MSG("Stream Open FAILED !!!");
+ }
+ }
+ } else if ((flags & AUDIO_OUTPUT_FLAG_MAIN) || (!((flags & AUDIO_OUTPUT_FLAG_MAIN) && (flags & AUDIO_OUTPUT_FLAG_ASSOCIATED)))) {
+ /* Assume Main if no flag is set */
+ if (is_dual_main_active(qaf_mod)) {
+ ERROR_MSG("Dual Main already active. So, Cannot open main stream");
+ return -EINVAL;
+ } else if (is_main_active(qaf_mod) && qaf_mod->stream_in[QAF_IN_ASSOC]) {
+ ERROR_MSG("Main and Associated already active. So, Cannot open main stream");
+ return -EINVAL;
+ } else if (is_main_active(qaf_mod) && (mmtype != MS12)) {
+ ERROR_MSG("Main already active and Not an MS12 format. So, Cannot open another main stream");
+ return -EINVAL;
+ } else {
+ status = qaf_mod->qaf_audio_stream_open(qaf_mod->session_handle, &out->qaf_stream_handle, input_config, devices, /*flags*/AUDIO_STREAM_MAIN);
+ if (status == 0) {
+ DEBUG_MSG("Open stream for Input with only Main flag(%x) stream_handle(%p)", flags, out->qaf_stream_handle);
+ if(qaf_mod->stream_in[QAF_IN_MAIN]) {
+ qaf_mod->stream_in[QAF_IN_MAIN_2] = out;
+ } else {
+ qaf_mod->stream_in[QAF_IN_MAIN] = out;
}
} else {
- ERROR_MSG("Invalid flag or associated is already active");
- status = -EINVAL;
+ ERROR_MSG("Stream Open FAILED !!!");
}
}
+ } else if ((flags & AUDIO_OUTPUT_FLAG_ASSOCIATED)) {
+ if (is_dual_main_active(qaf_mod)) {
+ ERROR_MSG("Dual Main already active. So, Cannot open associated stream");
+ return -EINVAL;
+ } else if (!is_main_active(qaf_mod)) {
+ ERROR_MSG("Main not active. So, Cannot open associated stream");
+ return -EINVAL;
+ } else if (qaf_mod->stream_in[QAF_IN_ASSOC]) {
+ ERROR_MSG("Associated already active. So, Cannot open associated stream");
+ return -EINVAL;
+ }
+ status = qaf_mod->qaf_audio_stream_open(qaf_mod->session_handle, &out->qaf_stream_handle, input_config, devices, /*flags*/AUDIO_STREAM_ASSOCIATED);
+ if (status == 0) {
+ DEBUG_MSG("Open stream for Input with only Associated flag(%x) stream handle(%p)", flags, out->qaf_stream_handle);
+ qaf_mod->stream_in[QAF_IN_ASSOC] = out;
+ } else {
+ ERROR_MSG("Stream Open FAILED !!!");
+ }
}
if (status != 0) {
@@ -1848,11 +1920,11 @@
stream_callback_event_t event;
bool send_callback = false;
- DEBUG_MSG("List Empty %d (1:TRUE, 0:FALSE)", list_empty(&out->qaf_offload_cmd_list));
+ DEBUG_MSG_VV("List Empty %d (1:TRUE, 0:FALSE)", list_empty(&out->qaf_offload_cmd_list));
if (list_empty(&out->qaf_offload_cmd_list)) {
- DEBUG_MSG("SLEEPING");
+ DEBUG_MSG_VV("SLEEPING");
pthread_cond_wait(&out->qaf_offload_cond, &out->lock);
- DEBUG_MSG("RUNNING");
+ DEBUG_MSG_VV("RUNNING");
continue;
}
@@ -1870,7 +1942,7 @@
send_callback = false;
switch (cmd->cmd) {
case OFFLOAD_CMD_WAIT_FOR_BUFFER: {
- DEBUG_MSG("wait for buffer availability");
+ DEBUG_MSG_VV("wait for buffer availability");
while (1) {
kvpairs = qaf_mod->qaf_audio_stream_get_param(out->qaf_stream_handle,
@@ -1880,12 +1952,12 @@
ret = str_parms_get_int(parms, "buf_available", &value);
if (ret >= 0) {
if (value >= (int)out->compr_config.fragment_size) {
- DEBUG_MSG("buffer available");
+ DEBUG_MSG_VV("buffer available");
str_parms_destroy(parms);
parms = NULL;
break;
} else {
- DEBUG_MSG("sleep");
+ DEBUG_MSG_VV("sleep");
str_parms_destroy(parms);
parms = NULL;
usleep(10000);
@@ -1977,11 +2049,7 @@
/* Setting new device information to the mm module input streams.
* This is needed if QAF module output streams are not created yet.
*/
- if (qaf_mod->stream_in[QAF_IN_MAIN] == out || qaf_mod->stream_in[QAF_IN_ASSOC] == out) {
- qaf_mod->stream_in[QAF_IN_MAIN]->devices = val;
- } else {
- out->devices = val;
- }
+ out->devices = val;
if (val == AUDIO_DEVICE_OUT_BLUETOOTH_A2DP) {
//If device is BT then open the BT stream if not already opened.
@@ -2213,7 +2281,10 @@
out->config.period_count = DEEP_BUFFER_OUTPUT_PERIOD_COUNT;
out->config.start_threshold = QAF_DEEP_BUFFER_OUTPUT_PERIOD_SIZE / 4;
out->config.avail_min = QAF_DEEP_BUFFER_OUTPUT_PERIOD_SIZE / 4;
+ } else if(out->flags & AUDIO_OUTPUT_FLAG_DIRECT_PCM) {
+ out->compr_config.fragment_size = qaf_get_pcm_offload_input_buffer_size(&(config->offload_info));
}
+
*stream_out = &out->stream;
if (out->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
qaf_create_offload_callback_thread(out);
diff --git a/hal/audio_extn/utils.c b/hal/audio_extn/utils.c
index b8be7aa..27bbae8 100644
--- a/hal/audio_extn/utils.c
+++ b/hal/audio_extn/utils.c
@@ -70,6 +70,10 @@
#define BASE_TABLE_SIZE 64
#define MAX_BASEINDEX_LEN 256
+#ifndef SND_AUDIOCODEC_TRUEHD
+#define SND_AUDIOCODEC_TRUEHD 0x00000023
+#endif
+
#ifdef AUDIO_EXTERNAL_HDMI_ENABLED
#define PROFESSIONAL (1<<0) /* 0 = consumer, 1 = professional */
#define NON_LPCM (1<<1) /* 0 = audio, 1 = non-audio */
@@ -130,6 +134,7 @@
STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
STRING_TO_ENUM(AUDIO_FORMAT_DTS),
STRING_TO_ENUM(AUDIO_FORMAT_DTS_HD),
+ STRING_TO_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
#ifdef AUDIO_EXTN_FORMATS_ENABLED
STRING_TO_ENUM(AUDIO_FORMAT_E_AC3_JOC),
STRING_TO_ENUM(AUDIO_FORMAT_WMA),
@@ -900,7 +905,8 @@
app_type_cfg[len++] = app_type;
app_type_cfg[len++] = acdb_dev_id;
if (((usecase->stream.out->format == AUDIO_FORMAT_E_AC3) ||
- (usecase->stream.out->format == AUDIO_FORMAT_E_AC3_JOC))
+ (usecase->stream.out->format == AUDIO_FORMAT_E_AC3_JOC) ||
+ (usecase->stream.out->format == AUDIO_FORMAT_DOLBY_TRUEHD))
&& audio_extn_passthru_is_passthrough_stream(usecase->stream.out)) {
app_type_cfg[len++] = sample_rate * 4;
} else {
@@ -1246,6 +1252,9 @@
case AUDIO_FORMAT_DTS_HD:
id = SND_AUDIOCODEC_DTS;
break;
+ case AUDIO_FORMAT_DOLBY_TRUEHD:
+ id = SND_AUDIOCODEC_TRUEHD;
+ break;
case AUDIO_FORMAT_DSD:
id = SND_AUDIOCODEC_DSD;
break;
diff --git a/hal/audio_hw.c b/hal/audio_hw.c
index 68a552d..4fa42e8 100644
--- a/hal/audio_hw.c
+++ b/hal/audio_hw.c
@@ -293,6 +293,7 @@
STRING_TO_ENUM(AUDIO_FORMAT_AC3),
STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
STRING_TO_ENUM(AUDIO_FORMAT_E_AC3_JOC),
+ STRING_TO_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
STRING_TO_ENUM(AUDIO_FORMAT_DTS),
STRING_TO_ENUM(AUDIO_FORMAT_DTS_HD),
};
@@ -511,6 +512,7 @@
format == AUDIO_FORMAT_PCM_16_BIT ||
format == AUDIO_FORMAT_AC3 ||
format == AUDIO_FORMAT_E_AC3 ||
+ format == AUDIO_FORMAT_DOLBY_TRUEHD ||
format == AUDIO_FORMAT_DTS ||
format == AUDIO_FORMAT_DTS_HD ||
format == AUDIO_FORMAT_FLAC ||
@@ -1246,7 +1248,7 @@
/* Update voc calibration before enabling VoIP route */
if (usecase->type == VOIP_CALL)
status = platform_switch_voice_call_device_post(adev->platform,
- usecase->out_snd_device,
+ platform_get_output_snd_device(adev->platform, uc_info->stream.out),
usecase->in_snd_device);
enable_audio_route(adev, usecase);
}
@@ -1313,6 +1315,11 @@
out->supported_formats[i++] = AUDIO_FORMAT_E_AC3_JOC;
}
+ if (platform_is_edid_supported_format(out->dev->platform, AUDIO_FORMAT_DOLBY_TRUEHD)) {
+ ALOGV(":%s HDMI supports TRUE HD format", __func__);
+ out->supported_formats[i++] = AUDIO_FORMAT_DOLBY_TRUEHD;
+ }
+
if (platform_is_edid_supported_format(out->dev->platform, AUDIO_FORMAT_DTS)) {
ALOGV(":%s HDMI supports DTS format", __func__);
out->supported_formats[i++] = AUDIO_FORMAT_DTS;
diff --git a/hal/msm8916/platform.c b/hal/msm8916/platform.c
index 935bae3..79f6bc5 100644
--- a/hal/msm8916/platform.c
+++ b/hal/msm8916/platform.c
@@ -5457,12 +5457,20 @@
channels = max_supported_channels;
} else {
- /*During pass through set default bit width and channels*/
- channels = DEFAULT_HDMI_OUT_CHANNELS;
+ /*During pass through set default bit width */
+ if (usecase->stream.out->format == AUDIO_FORMAT_DOLBY_TRUEHD)
+ channels = 8;
+ else
+ channels = DEFAULT_HDMI_OUT_CHANNELS;
+
if ((usecase->stream.out->format == AUDIO_FORMAT_E_AC3) ||
- (usecase->stream.out->format == AUDIO_FORMAT_E_AC3_JOC))
+ (usecase->stream.out->format == AUDIO_FORMAT_E_AC3_JOC) ||
+ (usecase->stream.out->format == AUDIO_FORMAT_DOLBY_TRUEHD))
sample_rate = sample_rate * 4 ;
+ if (!edid_is_supported_sr(edid_info, sample_rate))
+ sample_rate = edid_get_highest_supported_sr(edid_info);
+
bit_width = CODEC_BACKEND_DEFAULT_BIT_WIDTH;
/* We force route so that the BE format can be set to Compr */
}
@@ -6301,6 +6309,10 @@
ALOGV("%s:E_AC3", __func__);
format = DOLBY_DIGITAL_PLUS;
break;
+ case AUDIO_FORMAT_DOLBY_TRUEHD:
+ ALOGV("%s:MAT", __func__);
+ format = MAT;
+ break;
case AUDIO_FORMAT_DTS:
ALOGV("%s:DTS", __func__);
format = DTS;
@@ -6327,9 +6339,18 @@
audio_offload_info_t* info)
{
uint32_t fragment_size = MIN_COMPRESS_PASSTHROUGH_FRAGMENT_SIZE;
+ char value[PROPERTY_VALUE_MAX] = {0};
+
+ if (((info->format == AUDIO_FORMAT_DOLBY_TRUEHD) ||
+ (info->format == AUDIO_FORMAT_IEC61937)) &&
+ property_get("audio.truehd.buffer.size.kb", value, "") &&
+ atoi(value)) {
+ fragment_size = atoi(value) * 1024;
+ goto done;
+ }
if (!info->has_video)
fragment_size = MIN_COMPRESS_PASSTHROUGH_FRAGMENT_SIZE;
-
+done:
return fragment_size;
}
diff --git a/hal/msm8974/platform.c b/hal/msm8974/platform.c
index 1d32409..47fce0e 100644
--- a/hal/msm8974/platform.c
+++ b/hal/msm8974/platform.c
@@ -5175,12 +5175,20 @@
channels = max_supported_channels;
} else {
- /*During pass through set default bit width and channels*/
- channels = DEFAULT_HDMI_OUT_CHANNELS;
+ /*During pass through set default bit width */
+ if (usecase->stream.out->format == AUDIO_FORMAT_DOLBY_TRUEHD)
+ channels = 8;
+ else
+ channels = DEFAULT_HDMI_OUT_CHANNELS;
+
if ((usecase->stream.out->format == AUDIO_FORMAT_E_AC3) ||
- (usecase->stream.out->format == AUDIO_FORMAT_E_AC3_JOC))
+ (usecase->stream.out->format == AUDIO_FORMAT_E_AC3_JOC) ||
+ (usecase->stream.out->format == AUDIO_FORMAT_DOLBY_TRUEHD))
sample_rate = sample_rate * 4 ;
+ if (!edid_is_supported_sr(edid_info, sample_rate))
+ sample_rate = edid_get_highest_supported_sr(edid_info);
+
bit_width = CODEC_BACKEND_DEFAULT_BIT_WIDTH;
/* We force route so that the BE format can be set to Compr */
}
@@ -5970,6 +5978,10 @@
ALOGV("%s:E_AC3", __func__);
format = DOLBY_DIGITAL_PLUS;
break;
+ case AUDIO_FORMAT_DOLBY_TRUEHD:
+ ALOGV("%s:MAT", __func__);
+ format = MAT;
+ break;
case AUDIO_FORMAT_DTS:
ALOGV("%s:DTS", __func__);
format = DTS;
@@ -5996,9 +6008,18 @@
audio_offload_info_t* info)
{
uint32_t fragment_size = MIN_COMPRESS_PASSTHROUGH_FRAGMENT_SIZE;
+ char value[PROPERTY_VALUE_MAX] = {0};
+
+ if (((info->format == AUDIO_FORMAT_DOLBY_TRUEHD) ||
+ (info->format == AUDIO_FORMAT_IEC61937)) &&
+ property_get("audio.truehd.buffer.size.kb", value, "") &&
+ atoi(value)) {
+ fragment_size = atoi(value) * 1024;
+ goto done;
+ }
if (!info->has_video)
fragment_size = MIN_COMPRESS_PASSTHROUGH_FRAGMENT_SIZE;
-
+done:
return fragment_size;
}
diff --git a/hal/voice_extn/compress_voip.c b/hal/voice_extn/compress_voip.c
index f23ff5b..43dedc5 100644
--- a/hal/voice_extn/compress_voip.c
+++ b/hal/voice_extn/compress_voip.c
@@ -288,7 +288,7 @@
ALOGV("%s: unexpected because out_stream_count=%d, in_stream_count=%d",
__func__, voip_data.out_stream_count, voip_data.in_stream_count);
uc_info = get_usecase_from_list(adev, USECASE_COMPRESS_VOIP_CALL);
- if (uc_info)
+ if (uc_info && !voip_data.out_stream_count)
uc_info->stream.out = adev->primary_output;
ret = -EINVAL;
}
diff --git a/policy_hal/AudioPolicyManager.cpp b/policy_hal/AudioPolicyManager.cpp
index 62dee22..8009784 100644
--- a/policy_hal/AudioPolicyManager.cpp
+++ b/policy_hal/AudioPolicyManager.cpp
@@ -1453,8 +1453,7 @@
audio_io_handle_t AudioPolicyManagerCustom::getOutputForDevice(
audio_devices_t device,
- audio_session_t session __unused,
- uid_t clientUid,
+ audio_session_t session,
audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
@@ -1792,14 +1791,15 @@
if ((samplingRate == outputDesc->mSamplingRate) &&
audio_formats_match(format, outputDesc->mFormat) &&
(channelMask == outputDesc->mChannelMask)) {
- if (clientUid == outputDesc->mDirectClientUid) {
+ if (session == outputDesc->mDirectClientSession) {
outputDesc->mDirectOpenCount++;
- ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
+ ALOGV("getOutput() reusing direct output %d for session %d",
+ mOutputs.keyAt(i), session);
return mOutputs.keyAt(i);
} else {
- ALOGV("getOutput() do not reuse direct output because current client (%ld) "
- "is not the same as requesting client (%ld)",
- (long)outputDesc->mDirectClientUid, (long)clientUid);
+ ALOGV("getOutput() do not reuse direct output because current client (%d) "
+ "is not the same as requesting client (%d)",
+ outputDesc->mDirectClientSession, session);
goto non_direct_output;
}
}
@@ -1870,7 +1870,7 @@
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
- outputDesc->mDirectClientUid = clientUid;
+ outputDesc->mDirectClientSession = session;
audio_io_handle_t srcOutput = getOutputForEffect();
addOutput(output, outputDesc);
diff --git a/policy_hal/AudioPolicyManager.h b/policy_hal/AudioPolicyManager.h
index 45e090e..433380b 100644
--- a/policy_hal/AudioPolicyManager.h
+++ b/policy_hal/AudioPolicyManager.h
@@ -165,7 +165,6 @@
audio_io_handle_t getOutputForDevice(
audio_devices_t device,
audio_session_t session,
- uid_t client,
audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
diff --git a/qahw_api/test/qahw_multi_record_test.c b/qahw_api/test/qahw_multi_record_test.c
index c9f8b03..f0720f2 100644
--- a/qahw_api/test/qahw_multi_record_test.c
+++ b/qahw_api/test/qahw_multi_record_test.c
@@ -89,6 +89,40 @@
static pthread_mutex_t sourcetrack_lock;
struct qahw_sound_focus_param sound_focus_data;
+static bool request_wake_lock(bool wakelock_acquired, bool enable)
+{
+ int system_ret;
+
+ if (enable) {
+ if (!wakelock_acquired) {
+ system_ret = system("echo audio_services > /sys/power/wake_lock");
+ if (system_ret < 0) {
+ fprintf(stderr, "%s.Failed to acquire audio_service lock\n", __func__);
+ fprintf(log_file, "%s.Failed to acquire audio_service lock\n", __func__);
+ } else {
+ wakelock_acquired = true;
+ fprintf(log_file, "%s.Success to acquire audio_service lock\n", __func__);
+ }
+ } else
+ fprintf(log_file, "%s.Lock is already acquired\n", __func__);
+ }
+
+ if (!enable) {
+ if (wakelock_acquired) {
+ system_ret = system("echo audio_services > /sys/power/wake_unlock");
+ if (system_ret < 0) {
+ fprintf(stderr, "%s.Failed to release audio_service lock\n", __func__);
+ fprintf(log_file, "%s.Failed to release audio_service lock\n", __func__);
+ } else {
+ wakelock_acquired = false;
+ fprintf(log_file, "%s.Success to release audio_service lock\n", __func__);
+ }
+ } else
+ fprintf(log_file, "%s.No Lock is acquired to release\n", __func__);
+ }
+ return wakelock_acquired;
+}
+
void stop_signal_handler(int signal __unused)
{
stop_record = true;
@@ -295,9 +329,12 @@
strlcat(param, params->profile, sizeof(param));
qahw_in_set_parameters(in_handle, param);
- fprintf(log_file, "\n Please speak into the microphone for %lf seconds, handle(%d)\n", params->record_length, params->handle);
+ /* Caution: Below ADL log shouldnt be altered without notifying automation APT since it used for
+ * automation testing
+ */
+ fprintf(log_file, "\n ADL: Please speak into the microphone for %lf seconds, handle(%d)\n", params->record_length, params->handle);
if (log_file != stdout)
- fprintf(stdout, "\n Please speak into the microphone for %lf seconds, handle(%d)\n", params->record_length, params->handle);
+ fprintf(stdout, "\n ADL: Please speak into the microphone for %lf seconds, handle(%d)\n", params->record_length, params->handle);
snprintf(file_name + name_len, sizeof(file_name) - name_len, "%d.wav", (0x99A - params->handle));
FILE *fd = fopen(file_name,"w");
@@ -433,14 +470,17 @@
fprintf(stdout, "could not close input stream %d, handle(%d)\n",rc, params->handle);
}
- /* Print instructions to access the file. */
- fprintf(log_file, "\n\n The audio recording has been saved to %s. Please use adb pull to get "
+ /* Print instructions to access the file.
+ * Caution: Below ADL log shouldnt be altered without notifying automation APT since it used for
+ * automation testing
+ */
+ fprintf(log_file, "\n\n ADL: The audio recording has been saved to %s. Please use adb pull to get "
"the file and play it using audacity. The audio data has the "
"following characteristics:\n Sample rate: %i\n Format: %d\n "
"Num channels: %i, handle(%d)\n\n",
file_name, params->config.sample_rate, params->config.format, params->channels, params->handle);
if (log_file != stdout)
- fprintf(stdout, "\n\n The audio recording has been saved to %s. Please use adb pull to get "
+ fprintf(stdout, "\n\n ADL: The audio recording has been saved to %s. Please use adb pull to get "
"the file and play it using audacity. The audio data has the "
"following characteristics:\n Sample rate: %i\n Format: %d\n "
"Num channels: %i, handle(%d)\n\n",
@@ -547,6 +587,7 @@
bool interactive_mode = false, source_tracking = false;
struct listnode param_list;
char log_filename[256] = "stdout";
+ bool wakelock_acquired = false;
log_file = stdout;
list_init(¶m_list);
@@ -624,6 +665,7 @@
}
}
+ wakelock_acquired = request_wake_lock(wakelock_acquired, true);
qahw_mod_handle = qahw_load_module(mod_name);
if(qahw_mod_handle == NULL) {
fprintf(log_file, " qahw_load_module failed");
@@ -857,10 +899,14 @@
fprintf(log_file, "could not unload hal %d \n",ret);
}
- fprintf(log_file, "\n Done with hal record test \n");
+ /* Caution: Below ADL log shouldnt be altered without notifying automation APT since it used
+ * for automation testing
+ */
+ fprintf(log_file, "\n ADL: Done with hal record test \n");
if (log_file != stdout) {
- fprintf(stdout, "\n Done with hal record test \n");
+ fprintf(stdout, "\n ADL: Done with hal record test \n");
fclose(log_file);
}
+ wakelock_acquired = request_wake_lock(wakelock_acquired, false);
return 0;
}
diff --git a/qahw_api/test/qahw_playback_test.c b/qahw_api/test/qahw_playback_test.c
index 1f65368..dbd1042 100644
--- a/qahw_api/test/qahw_playback_test.c
+++ b/qahw_api/test/qahw_playback_test.c
@@ -58,6 +58,10 @@
#define KVPAIRS_MAX 100
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[1]))
+#define FORMAT_DESCRIPTOR_SIZE 12
+#define SUBCHUNK1_SIZE(x) ((8) + (x))
+#define SUBCHUNK2_SIZE 8
+
static int get_wav_header_length (FILE* file_stream);
static void init_streams(void);
@@ -77,7 +81,8 @@
FILE_EAC3_JOC,
FILE_DTS,
FILE_MP2,
- FILE_APTX
+ FILE_APTX,
+ FILE_TRUEHD
};
typedef enum {
@@ -237,6 +242,40 @@
"music_offload_wma_encode_option2=%d;" \
"music_offload_wma_format_tag=%d;"
+static bool request_wake_lock(bool wakelock_acquired, bool enable)
+{
+ int system_ret;
+
+ if (enable) {
+ if (!wakelock_acquired) {
+ system_ret = system("echo audio_services > /sys/power/wake_lock");
+ if (system_ret < 0) {
+ fprintf(stderr, "%s.Failed to acquire audio_service lock\n", __func__);
+ fprintf(log_file, "%s.Failed to acquire audio_service lock\n", __func__);
+ } else {
+ wakelock_acquired = true;
+ fprintf(log_file, "%s.Success to acquire audio_service lock\n", __func__);
+ }
+ } else
+ fprintf(log_file, "%s.Lock is already acquired\n", __func__);
+ }
+
+ if (!enable) {
+ if (wakelock_acquired) {
+ system_ret = system("echo audio_services > /sys/power/wake_unlock");
+ if (system_ret < 0) {
+ fprintf(stderr, "%s.Failed to release audio_service lock\n", __func__);
+ fprintf(log_file, "%s.Failed to release audio_service lock\n", __func__);
+ } else {
+ wakelock_acquired = false;
+ fprintf(log_file, "%s.Success to release audio_service lock\n", __func__);
+ }
+ } else
+ fprintf(log_file, "%s.No Lock is acquired to release\n", __func__);
+ }
+ return wakelock_acquired;
+}
+
void stop_signal_handler(int signal __unused)
{
stop_playback = true;
@@ -373,7 +412,7 @@
qahw_in_buffer_t in_buf;
char *buffer;
int rc = 0;
- int bytes_to_read, bytes_written = 0;
+ int bytes_to_read, bytes_written = 0, bytes_wrote = 0;
FILE *fp = NULL;
qahw_stream_handle_t* in_handle = nullptr;
@@ -415,7 +454,13 @@
while (!(params->acp.thread_exit)) {
rc = qahw_in_read(in_handle, &in_buf);
if (rc > 0) {
- bytes_written += fwrite((char *)(in_buf.buffer), sizeof(char), (int)in_buf.bytes, fp);
+ bytes_wrote = fwrite((char *)(in_buf.buffer), sizeof(char), (int)in_buf.bytes, fp);
+ bytes_written += bytes_wrote;
+ if(bytes_wrote < in_buf.bytes) {
+ stop_playback = true;
+ fprintf(log_file, "Error in fwrite due to no memory(%d)=%s\n",ferror(fp), strerror(ferror(fp)));
+ break;
+ }
}
}
params->hdr.data_sz = bytes_written;
@@ -507,8 +552,6 @@
pthread_t drift_query_thread;
struct drift_data drift_params;
- if (params->output_device & AUDIO_DEVICE_OUT_ALL_A2DP)
- params->output_device = AUDIO_DEVICE_OUT_PROXY;
rc = qahw_open_output_stream(params->qahw_out_hal_handle,
params->handle,
params->output_device,
@@ -679,9 +722,12 @@
qahw_out_drain(params->out_handle, QAHW_DRAIN_ALL);
pthread_cond_wait(¶ms->drain_cond, ¶ms->drain_lock);
fprintf(log_file, "stream %d: out of compress drain\n", params->stream_index);
- fprintf(log_file, "stream %d: playback completed successfully\n", params->stream_index);
pthread_mutex_unlock(¶ms->drain_lock);
}
+ /* Caution: Below ADL log shouldnt be altered without notifying automation APT since
+ * it used for automation testing
+ */
+ fprintf(log_file, "ADL: stream %d: playback completed successfully\n", params->stream_index);
}
exit = true;
continue;
@@ -962,6 +1008,9 @@
case FILE_APTX:
stream_info->config.offload_info.format = AUDIO_FORMAT_APTX;
break;
+ case FILE_TRUEHD:
+ stream_info->config.offload_info.format = AUDIO_FORMAT_DOLBY_TRUEHD;
+ break;
default:
fprintf(log_file, "Does not support given filetype\n");
fprintf(stderr, "Does not support given filetype\n");
@@ -970,7 +1019,7 @@
}
stream_info->config.sample_rate = stream_info->config.offload_info.sample_rate;
stream_info->config.format = stream_info->config.offload_info.format;
- stream_info->config.channel_mask = stream_info->config.offload_info.channel_mask = audio_channel_in_mask_from_count(stream_info->channels);
+ stream_info->config.channel_mask = stream_info->config.offload_info.channel_mask = audio_channel_out_mask_from_count(stream_info->channels);
return;
}
@@ -1067,7 +1116,7 @@
event_payload.num_events = 1;
event_payload.event_id = 0x13236;
- event_payload.module_id = 0x10EEC;
+ event_payload.module_id = 0x10940;
event_payload.config_mask = 1;
payload.adsp_event_params.payload_length = sizeof(event_payload);
@@ -1141,6 +1190,9 @@
return -1;
parms = str_parms_create_str(kvpairs);
+ if (parms == NULL)
+ return -1;
+
if (str_parms_get_str(parms, key, value, KVPAIRS_MAX) < 0)
return -1;
@@ -1165,7 +1217,7 @@
/*
* for now we assume usb hal/pcm device announces suport for one format ONLY
*/
- for (i = 0; i < sizeof(format_table); i++) {
+ for (i = 0; i < (sizeof(format_table)/sizeof(format_table[0])); i++) {
if(!strncmp(format_table[i].string, value, sizeof(value))) {
match = true;
break;
@@ -1303,8 +1355,8 @@
param_string = qahw_out_get_parameters(stream->out_handle, QAHW_PARAMETER_STREAM_SUP_CHANNELS);
if ((ch = get_channels(param_string)) <= 0) {
- fprintf(log_file, "Unable to extract channels =(%d) string(%s)\n", ch, param_string);
- fprintf(stderr, "Unable to extract channels =(%d) string(%s)\n", ch, param_string);
+ fprintf(log_file, "Unable to extract channels =(%d) string(%s)\n", ch, param_string == NULL ? "null":param_string);
+ fprintf(stderr, "Unable to extract channels =(%d) string(%s)\n", ch, param_string == NULL ? "null":param_string);
return -1;
}
stream->config.channel_mask = audio_channel_in_mask_from_count(ch);
@@ -1399,10 +1451,13 @@
printf(" hal_play_test -f /data/MateRani.mp3 -t 2 -d 2 -v 0.01 -r 44100 -c 2 \n");
printf(" -> plays MP3 stream(-t = 2) on speaker device(-d = 2)\n");
printf(" -> 2 channels and 44100 sample rate\n\n");
- printf(" hal_play_test -f /data/v1-CBR-32kHz-stereo-40kbps.mp3 -t 2 -d 128 -v 0.01 -r 32000 -c 2 -D /data/proxy_dump.wav\n");
- printf(" -> plays MP3 stream(-t = 2) on BT device(-d = 128)\n");
+ printf(" hal_play_test -f /data/v1-CBR-32kHz-stereo-40kbps.mp3 -t 2 -d 33554432 -v 0.01 -r 32000 -c 2 -D /data/proxy_dump.wav\n");
+ printf(" -> plays MP3 stream(-t = 2) on BT device in non-split path (-d = 33554432)\n");
printf(" -> 2 channels and 32000 sample rate\n");
printf(" -> dumps pcm data to file at /data/proxy_dump.wav\n\n");
+ printf(" hal_play_test -f /data/v1-CBR-32kHz-stereo-40kbps.mp3 -t 2 -d 128 -v 0.01 -r 32000 -c 2 \n");
+ printf(" -> plays MP3 stream(-t = 2) on BT device in split path (-d = 128)\n");
+ printf(" -> 2 channels and 32000 sample rate\n");
printf(" hal_play_test -f /data/AACLC-71-48000Hz-384000bps.aac -t 4 -d 2 -v 0.05 -r 48000 -c 2 -a 1 \n");
printf(" -> plays AAC-ADTS stream(-t = 4) on speaker device(-d = 2)\n");
printf(" -> AAC format type is LC(-a = 1)\n");
@@ -1472,20 +1527,7 @@
fprintf(log_file, "This is not a valid wav file \n");
fprintf(stderr, "This is not a valid wav file \n");
} else {
- switch (subchunk_size) {
- case 16:
- fprintf(log_file, "44-byte wav header \n");
- wav_header_len = 44;
- break;
- case 18:
- fprintf(log_file, "46-byte wav header \n");
- wav_header_len = 46;
- break;
- default:
- fprintf(log_file, "Header contains extra data and is larger than 46 bytes: subchunk_size=%d \n", subchunk_size);
- wav_header_len = subchunk_size;
- break;
- }
+ wav_header_len = FORMAT_DESCRIPTOR_SIZE + SUBCHUNK1_SIZE(subchunk_size) + SUBCHUNK2_SIZE;
}
return wav_header_len;
}
@@ -1574,6 +1616,7 @@
int j = 0;
kpi_mode = false;
event_trigger = false;
+ bool wakelock_acquired = false;
log_file = stdout;
proxy_params.acp.file_name = "/data/pcm_dump.wav";
@@ -1739,8 +1782,12 @@
}
}
+ wakelock_acquired = request_wake_lock(wakelock_acquired, true);
num_of_streams = i+1;
- fprintf(log_file, "Starting audio hal tests for streams : %d\n", num_of_streams);
+ /* Caution: Below ADL log shouldnt be altered without notifying automation APT since it used
+ * for automation testing
+ */
+ fprintf(log_file, "ADL: Starting audio hal tests for streams : %d\n", num_of_streams);
if (kpi_mode == true && num_of_streams > 1) {
fprintf(log_file, "kpi-mode is not supported for multi-playback usecase\n");
@@ -1752,7 +1799,7 @@
goto exit;
}
- if (num_of_streams > 1 && stream_param[num_of_streams-1].output_device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ if (num_of_streams > 1 && stream_param[num_of_streams-1].output_device & AUDIO_DEVICE_OUT_PROXY) {
fprintf(log_file, "Proxy thread is not supported for multi-playback usecase\n");
fprintf(stderr, "Proxy thread is not supported for multi-playback usecase\n");
goto exit;
@@ -1823,13 +1870,14 @@
} else if (kpi_mode == true)
stream->config.format = stream->config.offload_info.format = AUDIO_FORMAT_PCM_16_BIT;
- if (stream->output_device & AUDIO_DEVICE_OUT_ALL_A2DP)
+ if (stream->output_device & AUDIO_DEVICE_OUT_PROXY)
fprintf(log_file, "Saving pcm data to file: %s\n", proxy_params.acp.file_name);
/* Set device connection state for HDMI */
- if (stream->output_device == AUDIO_DEVICE_OUT_AUX_DIGITAL) {
+ if ((stream->output_device == AUDIO_DEVICE_OUT_AUX_DIGITAL) ||
+ (stream->output_device == AUDIO_DEVICE_OUT_BLUETOOTH_A2DP)) {
char param[100] = {0};
- snprintf(param, sizeof(param), "%s=%d", "connect", AUDIO_DEVICE_OUT_AUX_DIGITAL);
+ snprintf(param, sizeof(param), "%s=%d", "connect", stream->output_device);
qahw_set_parameters(stream->qahw_out_hal_handle, param);
}
@@ -1888,16 +1936,17 @@
* reset device connection state for HDMI and close the file streams
*/
for (i = 0; i < num_of_streams; i++) {
- if (stream_param[i].output_device == AUDIO_DEVICE_OUT_AUX_DIGITAL) {
+ if ((stream_param[i].output_device == AUDIO_DEVICE_OUT_AUX_DIGITAL) ||
+ (stream_param[i].output_device == AUDIO_DEVICE_OUT_BLUETOOTH_A2DP)) {
char param[100] = {0};
- snprintf(param, sizeof(param), "%s=%d", "disconnect", AUDIO_DEVICE_OUT_AUX_DIGITAL);
+ snprintf(param, sizeof(param), "%s=%d", "disconnect", stream_param[i].output_device);
qahw_set_parameters(stream_param[i].qahw_out_hal_handle, param);
}
if (stream_param[i].file_stream != nullptr)
fclose(stream_param[i].file_stream);
else if (AUDIO_DEVICE_NONE != stream_param[i].input_device) {
- if (stream->in_handle) {
+ if (stream != NULL && stream->in_handle) {
rc = qahw_close_input_stream(stream->in_handle);
if (rc) {
fprintf(log_file, "input stream could not be closed\n");
@@ -1913,6 +1962,10 @@
if ((log_file != stdout) && (log_file != nullptr))
fclose(log_file);
- fprintf(log_file, "\nBYE BYE\n");
+ wakelock_acquired = request_wake_lock(wakelock_acquired, false);
+ /* Caution: Below ADL log shouldnt be altered without notifying automation APT since it used
+ * for automation testing
+ */
+ fprintf(log_file, "\nADL: BYE BYE\n");
return 0;
}