Merge "hal: Add support for Volume Boost"
diff --git a/hal/audio_extn/audio_extn.c b/hal/audio_extn/audio_extn.c
index 3f457d2..80ce063 100644
--- a/hal/audio_extn/audio_extn.c
+++ b/hal/audio_extn/audio_extn.c
@@ -61,13 +61,6 @@
void audio_extn_hfp_set_parameters(struct audio_device *adev,
struct str_parms *parms);
#endif
-#ifndef SSR_ENABLED
-#define audio_extn_ssr_get_parameters(query, reply) (0)
-#else
-void audio_extn_ssr_get_parameters(struct str_parms *query,
-
- struct str_parms *reply);
-#endif
#ifndef ANC_HEADSET_ENABLED
#define audio_extn_set_anc_parameters(adev, parms) (0)
@@ -335,7 +328,6 @@
struct str_parms *reply)
{
audio_extn_get_afe_proxy_parameters(query, reply);
- audio_extn_ssr_get_parameters(query, reply);
ALOGD("%s: returns %s", __func__, str_parms_to_str(reply));
}
diff --git a/hal/audio_extn/audio_extn.h b/hal/audio_extn/audio_extn.h
index 6bd03ee..e17aa6b 100644
--- a/hal/audio_extn/audio_extn.h
+++ b/hal/audio_extn/audio_extn.h
@@ -70,14 +70,14 @@
#ifndef SSR_ENABLED
#define audio_extn_ssr_init(adev, in) (0)
#define audio_extn_ssr_deinit() (0)
-#define audio_extn_ssr_update_enabled(adev) (0)
+#define audio_extn_ssr_update_enabled() (0)
#define audio_extn_ssr_get_enabled() (0)
#define audio_extn_ssr_read(stream, buffer, bytes) (0)
#else
int32_t audio_extn_ssr_init(struct audio_device *adev,
struct stream_in *in);
int32_t audio_extn_ssr_deinit();
-int32_t audio_extn_ssr_update_enabled(struct audio_device *adev);
+void audio_extn_ssr_update_enabled();
bool audio_extn_ssr_get_enabled();
int32_t audio_extn_ssr_read(struct audio_stream_in *stream,
void *buffer, size_t bytes);
diff --git a/hal/audio_extn/listen.c b/hal/audio_extn/listen.c
index 65c0ae6..9166f8e 100644
--- a/hal/audio_extn/listen.c
+++ b/hal/audio_extn/listen.c
@@ -119,6 +119,12 @@
void audio_extn_listen_set_parameters(struct audio_device *adev,
struct str_parms *parms)
{
+ ALOGV("%s: enter: %s", __func__, str_parms_to_str(parms));
+
+ if (listen_dev) {
+ listen_dev->listen_set_parameters(&adev->device, str_parms_to_str(parms));
+ }
+
return;
}
diff --git a/hal/audio_extn/ssr.c b/hal/audio_extn/ssr.c
index efd92ea..ac6da8b 100644
--- a/hal/audio_extn/ssr.c
+++ b/hal/audio_extn/ssr.c
@@ -34,14 +34,13 @@
#include "surround_filters_interface.h"
#ifdef SSR_ENABLED
-#define COEFF_ARRAY_SIZE 4
-#define FILT_SIZE ((512+1)* 6) /* # ((FFT bins)/2+1)*numOutputs */
-#define SSR_FRAME_SIZE 512
-#define SSR_INPUT_FRAME_SIZE (SSR_FRAME_SIZE * 4)
-#define SSR_OUTPUT_FRAME_SIZE (SSR_FRAME_SIZE * 6)
-#define SSR_CHANNEL_COUNT 4
-#define SSR_PERIOD_SIZE 256
-#define SSR_PERIOD_COUNT 8
+#define COEFF_ARRAY_SIZE 4
+#define FILT_SIZE ((512+1)* 6) /* # ((FFT bins)/2+1)*numOutputs */
+#define SSR_CHANNEL_INPUT_NUM 4
+#define SSR_CHANNEL_OUTPUT_NUM 6
+#define SSR_PERIOD_COUNT 8
+#define SSR_PERIOD_SIZE 512
+#define SSR_INPUT_FRAME_SIZE (SSR_PERIOD_SIZE * SSR_PERIOD_COUNT)
#define SURROUND_FILE_1R "/system/etc/surround_sound/filter1r.pcm"
#define SURROUND_FILE_2R "/system/etc/surround_sound/filter2r.pcm"
@@ -52,7 +51,7 @@
#define SURROUND_FILE_2I "/system/etc/surround_sound/filter2i.pcm"
#define SURROUND_FILE_3I "/system/etc/surround_sound/filter3i.pcm"
#define SURROUND_FILE_4I "/system/etc/surround_sound/filter4i.pcm"
-#define AUDIO_PARAMETER_KEY_SSR "ssr"
+
#define LIB_SURROUND_PROC "libsurround_proc.so"
typedef int (*surround_filters_init_t)(void *, int, int, Word16 **,
@@ -67,11 +66,7 @@
int16_t **real_coeffs;
int16_t **imag_coeffs;
void *surround_obj;
-
- int16_t *surround_input_buffer;
- int16_t *surround_output_buffer;
- int surround_input_bufferIdx;
- int surround_output_bufferIdx;
+ int16_t *surround_raw_buffer;
bool is_ssr_enabled;
void *surround_filters_handle;
@@ -81,19 +76,13 @@
surround_filters_intl_process_t surround_filters_intl_process;
};
-static int32_t ssr_init_surround_sound_lib(unsigned long buffersize);
-static int32_t ssr_read_coeffs_from_file();
-
static struct ssr_module ssrmod = {
.fp_4ch = NULL,
- .fp_6ch= NULL,
+ .fp_6ch = NULL,
.real_coeffs = NULL,
.imag_coeffs = NULL,
.surround_obj = NULL,
- .surround_output_buffer = NULL,
- .surround_input_buffer = NULL,
- .surround_output_bufferIdx = 0,
- .surround_input_bufferIdx= 0,
+ .surround_raw_buffer = NULL,
.is_ssr_enabled = 0,
.surround_filters_handle = NULL,
@@ -230,32 +219,20 @@
int high_freq = 100;
int i, ret = 0;
- ssrmod.surround_input_bufferIdx = 0;
- ssrmod.surround_output_bufferIdx = 0;
-
if ( ssrmod.surround_obj ) {
ALOGE("%s: ola filter library is already initialized", __func__);
return 0;
}
/* Allocate memory for input buffer */
- ssrmod.surround_input_buffer = (Word16 *) calloc(2 * SSR_INPUT_FRAME_SIZE,
+ ssrmod.surround_raw_buffer = (Word16 *) calloc(buffersize,
sizeof(Word16));
- if ( !ssrmod.surround_input_buffer ) {
+ if ( !ssrmod.surround_raw_buffer ) {
ALOGE("%s: Memory allocation failure. Not able to allocate "
"memory for surroundInputBuffer", __func__);
goto init_fail;
}
- /* Allocate memory for output buffer */
- ssrmod.surround_output_buffer = (Word16 *) calloc(2 * SSR_OUTPUT_FRAME_SIZE,
- sizeof(Word16));
- if ( !ssrmod.surround_output_buffer ) {
- ALOGE("%s: Memory allocation failure. Not able to "
- "allocate memory for surroundOutputBuffer", __func__);
- goto init_fail;
- }
-
/* Allocate memory for real and imag coeffs array */
ssrmod.real_coeffs = (Word16 **) calloc(COEFF_ARRAY_SIZE, sizeof(Word16 *));
if ( !ssrmod.real_coeffs ) {
@@ -353,13 +330,9 @@
free(ssrmod.surround_obj);
ssrmod.surround_obj = NULL;
}
- if (ssrmod.surround_output_buffer) {
- free(ssrmod.surround_output_buffer);
- ssrmod.surround_output_buffer = NULL;
- }
- if (ssrmod.surround_input_buffer) {
- free(ssrmod.surround_input_buffer);
- ssrmod.surround_input_buffer = NULL;
+ if (ssrmod.surround_raw_buffer) {
+ free(ssrmod.surround_raw_buffer);
+ ssrmod.surround_raw_buffer = NULL;
}
if (ssrmod.real_coeffs){
for (i =0; i<COEFF_ARRAY_SIZE; i++ ) {
@@ -385,7 +358,7 @@
return -ENOMEM;
}
-int32_t audio_extn_ssr_update_enabled(struct audio_device *adev)
+void audio_extn_ssr_update_enabled()
{
char ssr_enabled[PROPERTY_VALUE_MAX] = "false";
@@ -397,7 +370,6 @@
ALOGD("%s: surround sound recording is not supported", __func__);
ssrmod.is_ssr_enabled = false;
}
- return 0;
}
bool audio_extn_ssr_get_enabled()
@@ -414,12 +386,13 @@
uint32_t buffer_size;
ALOGD("%s: ssr case ", __func__);
- in->config.channels = SSR_CHANNEL_COUNT;
+ in->config.channels = SSR_CHANNEL_INPUT_NUM;
in->config.period_size = SSR_PERIOD_SIZE;
in->config.period_count = SSR_PERIOD_COUNT;
- buffer_size = (SSR_PERIOD_SIZE)*(SSR_PERIOD_COUNT);
- ALOGD("%s: buffer_size: %d", __func__, buffer_size);
+ /* use 4k hardcoded buffer size for ssr*/
+ buffer_size = SSR_INPUT_FRAME_SIZE;
+ ALOGV("%s: buffer_size: %d", __func__, buffer_size);
ret = ssr_init_surround_sound_lib(buffer_size);
if (0 != ret) {
@@ -429,16 +402,16 @@
}
property_get("ssr.pcmdump",c_multi_ch_dump,"0");
- if (0 == strncmp("true",c_multi_ch_dump, sizeof("ssr.dump-pcm"))) {
- /* Remember to change file system permission of data(e.g. chmod 777 data/),
- otherwise, fopen may fail */
- if ( !ssrmod.fp_4ch)
- ssrmod.fp_4ch = fopen("/data/media/0/4ch_ssr.pcm", "wb");
- if ( !ssrmod.fp_6ch)
- ssrmod.fp_6ch = fopen("/data/media/0/6ch_ssr.pcm", "wb");
- if ((!ssrmod.fp_4ch) || (!ssrmod.fp_6ch))
- ALOGE("%s: mfp_4ch or mfp_6ch open failed: mfp_4ch:%p mfp_6ch:%p",
- __func__, ssrmod.fp_4ch, ssrmod.fp_6ch);
+ if (0 == strncmp("true", c_multi_ch_dump, sizeof("ssr.dump-pcm"))) {
+ /* Remember to change file system permission of data(e.g. chmod 777 data/),
+ otherwise, fopen may fail */
+ if ( !ssrmod.fp_4ch)
+ ssrmod.fp_4ch = fopen("/data/4ch.pcm", "wb");
+ if ( !ssrmod.fp_6ch)
+ ssrmod.fp_6ch = fopen("/data/6ch.pcm", "wb");
+ if ((!ssrmod.fp_4ch) || (!ssrmod.fp_6ch))
+ ALOGE("%s: mfp_4ch or mfp_6ch open failed: mfp_4ch:%p mfp_6ch:%p",
+ __func__, ssrmod.fp_4ch, ssrmod.fp_6ch);
}
return 0;
@@ -449,7 +422,7 @@
int i;
if (ssrmod.surround_obj) {
- ALOGD("%s: entry", __func__);
+ ALOGV("%s: entry", __func__);
ssrmod.surround_filters_release(ssrmod.surround_obj);
if (ssrmod.surround_obj)
free(ssrmod.surround_obj);
@@ -474,25 +447,21 @@
free(ssrmod.imag_coeffs);
ssrmod.imag_coeffs = NULL;
}
- if (ssrmod.surround_output_buffer){
- free(ssrmod.surround_output_buffer);
- ssrmod.surround_output_buffer = NULL;
+ if (ssrmod.surround_raw_buffer) {
+ free(ssrmod.surround_raw_buffer);
+ ssrmod.surround_raw_buffer = NULL;
}
- if (ssrmod.surround_input_buffer) {
- free(ssrmod.surround_input_buffer);
- ssrmod.surround_input_buffer = NULL;
- }
-
- if ( ssrmod.fp_4ch ) fclose(ssrmod.fp_4ch);
- if ( ssrmod.fp_6ch ) fclose(ssrmod.fp_6ch);
+ if (ssrmod.fp_4ch)
+ fclose(ssrmod.fp_4ch);
+ if (ssrmod.fp_6ch)
+ fclose(ssrmod.fp_6ch);
}
- if(ssrmod.surround_filters_handle)
- {
+ if(ssrmod.surround_filters_handle) {
dlclose(ssrmod.surround_filters_handle);
ssrmod.surround_filters_handle = NULL;
}
- ALOGD("%s: exit", __func__);
+ ALOGV("%s: exit", __func__);
return 0;
}
@@ -500,120 +469,36 @@
int32_t audio_extn_ssr_read(struct audio_stream_in *stream,
void *buffer, size_t bytes)
{
- int processed = 0;
- int processed_pending;
- void *buffer_start = buffer;
- unsigned period_bytes;
- unsigned period_samples;
- int read_pending, n;
- size_t read_bytes = 0;
- int samples = bytes >> 1;
-
struct stream_in *in = (struct stream_in *)stream;
struct audio_device *adev = in->dev;
+ size_t peroid_bytes;
+ int32_t ret;
- period_bytes = in->config.period_size;
- ALOGD("%s: period_size: %d", __func__, in->config.period_size);
- period_samples = period_bytes >> 1;
+ /* Convert bytes for 6ch to 4ch*/
+ peroid_bytes = (bytes / SSR_CHANNEL_OUTPUT_NUM) * SSR_CHANNEL_INPUT_NUM;
- if (!ssrmod.surround_obj)
+ if (!ssrmod.surround_obj) {
+ ALOGE("%s: surround_obj not initialized", __func__);
return -ENOMEM;
-
- do {
- if (ssrmod.surround_output_bufferIdx > 0) {
- ALOGV("%s: copy processed output "
- "to buffer, surround_output_bufferIdx = %d",
- __func__, ssrmod.surround_output_bufferIdx);
- /* Copy processed output to buffer */
- processed_pending = ssrmod.surround_output_bufferIdx;
- if (processed_pending > (samples - processed)) {
- processed_pending = (samples - processed);
- }
- memcpy(buffer, ssrmod.surround_output_buffer, processed_pending * sizeof(Word16));
- buffer = (char*)buffer + processed_pending * sizeof(Word16);
- processed += processed_pending;
- if (ssrmod.surround_output_bufferIdx > processed_pending) {
- /* Shift leftover samples to beginning of the buffer */
- memcpy(&ssrmod.surround_output_buffer[0],
- &ssrmod.surround_output_buffer[processed_pending],
- (ssrmod.surround_output_bufferIdx - processed_pending) * sizeof(Word16));
- }
- ssrmod.surround_output_bufferIdx -= processed_pending;
- }
-
- if (processed >= samples) {
- ALOGV("%s: done processing buffer, "
- "processed = %d", __func__, processed);
- /* Done processing this buffer */
- break;
- }
-
- /* Fill input buffer until there is enough to process */
- read_pending = SSR_INPUT_FRAME_SIZE - ssrmod.surround_input_bufferIdx;
- read_bytes = ssrmod.surround_input_bufferIdx;
- while (in->pcm && read_pending > 0) {
- n = pcm_read(in->pcm, &ssrmod.surround_input_buffer[read_bytes],
- period_bytes);
- ALOGV("%s: pcm_read() returned n = %d buffer:%p size:%d", __func__,
- n, &ssrmod.surround_input_buffer[read_bytes], period_bytes);
- if (n && n != -EAGAIN) {
- /* Recovery part of pcm_read. TODO:split recovery */
- return (ssize_t)n;
- }
- else if (n < 0) {
- /* Recovery is part of pcm_write. TODO split is later */
- return (ssize_t)n;
- }
- else {
- read_pending -= period_samples;
- read_bytes += period_samples;
- }
- }
-
-
- if (ssrmod.fp_4ch) {
- fwrite( ssrmod.surround_input_buffer, 1,
- SSR_INPUT_FRAME_SIZE * sizeof(Word16), ssrmod.fp_4ch);
- }
-
- /* apply ssr libs to conver 4ch to 6ch */
- ssrmod.surround_filters_intl_process(ssrmod.surround_obj,
- &ssrmod.surround_output_buffer[ssrmod.surround_output_bufferIdx],
- (Word16 *)ssrmod.surround_input_buffer);
-
- /* Shift leftover samples to beginning of input buffer */
- if (read_pending < 0) {
- memcpy(&ssrmod.surround_input_buffer[0],
- &ssrmod.surround_input_buffer[SSR_INPUT_FRAME_SIZE],
- (-read_pending) * sizeof(Word16));
- }
- ssrmod.surround_input_bufferIdx = -read_pending;
-
- if (ssrmod.fp_6ch) {
- fwrite( &ssrmod.surround_output_buffer[ssrmod.surround_output_bufferIdx],
- 1, SSR_OUTPUT_FRAME_SIZE * sizeof(Word16), ssrmod.fp_6ch);
- }
-
- ssrmod.surround_output_bufferIdx += SSR_OUTPUT_FRAME_SIZE;
- ALOGV("%s: do_while loop: processed=%d, samples=%d\n", __func__, processed, samples);
- } while (in->pcm && processed < samples);
- read_bytes = processed * sizeof(Word16);
- buffer = buffer_start;
-
- return 0;
-}
-
-void audio_extn_ssr_get_parameters(struct str_parms *query,
- struct str_parms *reply)
-{
- int ret, val;
- char value[32]={0};
-
- ret = str_parms_get_str(query, AUDIO_PARAMETER_KEY_SSR, value, sizeof(value));
-
- if (ret >= 0) {
- memcpy(value, "true", 4);
- str_parms_add_str(reply, AUDIO_PARAMETER_KEY_SSR, value);
}
+
+ ret = pcm_read(in->pcm, ssrmod.surround_raw_buffer, peroid_bytes);
+ if (ret < 0) {
+ ALOGE("%s: %s ret:%d", __func__, pcm_get_error(in->pcm),ret);
+ return ret;
+ }
+
+ /* apply ssr libs to conver 4ch to 6ch */
+ ssrmod.surround_filters_intl_process(ssrmod.surround_obj,
+ buffer, ssrmod.surround_raw_buffer);
+
+ /*dump for raw pcm data*/
+ if (ssrmod.fp_4ch)
+ fwrite(ssrmod.surround_raw_buffer, 1, peroid_bytes, ssrmod.fp_4ch);
+ if (ssrmod.fp_6ch)
+ fwrite(buffer, 1, bytes, ssrmod.fp_6ch);
+
+ return ret;
}
+
#endif /* SSR_ENABLED */
diff --git a/hal/audio_hw.c b/hal/audio_hw.c
index fc78610..fefbfa4 100644
--- a/hal/audio_hw.c
+++ b/hal/audio_hw.c
@@ -53,13 +53,16 @@
#include "voice_extn.h"
#include "sound/compress_params.h"
-
+#define MAX_COMPRESS_OFFLOAD_FRAGMENT_SIZE (256 * 1024)
+#define MIN_COMPRESS_OFFLOAD_FRAGMENT_SIZE (8 * 1024)
#define COMPRESS_OFFLOAD_FRAGMENT_SIZE (32 * 1024)
#define COMPRESS_OFFLOAD_NUM_FRAGMENTS 4
/* ToDo: Check and update a proper value in msec */
#define COMPRESS_OFFLOAD_PLAYBACK_LATENCY 96
#define COMPRESS_PLAYBACK_VOLUME_MAX 0x2000
+#define USECASE_AUDIO_PLAYBACK_PRIMARY USECASE_AUDIO_PLAYBACK_DEEP_BUFFER
+
struct pcm_config pcm_config_deep_buffer = {
.channels = 2,
.rate = DEFAULT_OUTPUT_SAMPLING_RATE,
@@ -148,6 +151,7 @@
static unsigned int audio_device_ref_count;
static int set_voice_volume_l(struct audio_device *adev, float volume);
+static uint32_t get_offload_buffer_size();
static bool is_supported_format(audio_format_t format)
{
@@ -852,9 +856,6 @@
struct stream_out *out = (struct stream_out *) context;
struct listnode *item;
- out->offload_state = OFFLOAD_STATE_IDLE;
- out->playback_started = 0;
-
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_AUDIO);
set_sched_policy(0, SP_FOREGROUND);
prctl(PR_SET_NAME, (unsigned long)"Offload Callback", 0, 0, 0);
@@ -1998,6 +1999,13 @@
*stream_out = NULL;
out = (struct stream_out *)calloc(1, sizeof(struct stream_out));
+ if (!out) {
+ return -ENOMEM;
+ }
+
+ pthread_mutex_init(&out->lock, (const pthread_mutexattr_t *) NULL);
+ pthread_cond_init(&out->cond, (const pthread_condattr_t *) NULL);
+
if (devices == AUDIO_DEVICE_NONE)
devices = AUDIO_DEVICE_OUT_SPEAKER;
@@ -2083,7 +2091,7 @@
else
out->compr_config.codec->id =
get_snd_codec_id(config->offload_info.format);
- out->compr_config.fragment_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ out->compr_config.fragment_size = get_offload_buffer_size();
out->compr_config.fragments = COMPRESS_OFFLOAD_NUM_FRAGMENTS;
out->compr_config.codec->sample_rate =
compress_get_alsa_rate(config->offload_info.sample_rate);
@@ -2097,6 +2105,9 @@
out->non_blocking = 1;
out->send_new_metadata = 1;
+ out->offload_state = OFFLOAD_STATE_IDLE;
+ out->playback_started = 0;
+
create_offload_callback_thread(out);
ALOGV("%s: offloaded output offload_info version %04x bit rate %d",
__func__, config->offload_info.version,
@@ -2123,12 +2134,15 @@
out->config = pcm_config_low_latency;
out->sample_rate = out->config.rate;
} else {
- out->usecase = USECASE_AUDIO_PLAYBACK_DEEP_BUFFER;
+ /* primary path is the default path selected if no other outputs are available/suitable */
+ out->usecase = USECASE_AUDIO_PLAYBACK_PRIMARY;
out->config = pcm_config_deep_buffer;
out->sample_rate = out->config.rate;
}
- if (flags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+ if ((out->usecase == USECASE_AUDIO_PLAYBACK_PRIMARY) ||
+ (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
+ /* Ensure the default output is not selected twice */
if(adev->primary_output == NULL)
adev->primary_output = out;
else {
@@ -2171,9 +2185,6 @@
/* out->muted = false; by calloc() */
/* out->written = 0; by calloc() */
- pthread_mutex_init(&out->lock, (const pthread_mutexattr_t *) NULL);
- pthread_cond_init(&out->cond, (const pthread_condattr_t *) NULL);
-
config->format = out->stream.common.get_format(&out->stream.common);
config->channel_mask = out->stream.common.get_channels(&out->stream.common);
config->sample_rate = out->stream.common.get_sample_rate(&out->stream.common);
@@ -2593,6 +2604,7 @@
free(adev);
ALOGE("%s: Failed to init platform data, aborting.", __func__);
*device = NULL;
+ pthread_mutex_unlock(&adev_init_lock);
return -EINVAL;
}
@@ -2638,6 +2650,28 @@
return 0;
}
+/* Read offload buffer size from a property.
+ * If value is not power of 2 round it to
+ * power of 2.
+ */
+static uint32_t get_offload_buffer_size()
+{
+ char value[PROPERTY_VALUE_MAX] = {0};
+ uint32_t fragment_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ if((property_get("audio.offload.buffer.size.kb", value, "")) &&
+ atoi(value)) {
+ fragment_size = atoi(value) * 1024;
+ //ring buffer size needs to be 4k aligned.
+ CHECK(!(fragment_size * COMPRESS_OFFLOAD_NUM_FRAGMENTS % 4096));
+ }
+ if(fragment_size < MIN_COMPRESS_OFFLOAD_FRAGMENT_SIZE)
+ fragment_size = MIN_COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ else if(fragment_size > MAX_COMPRESS_OFFLOAD_FRAGMENT_SIZE)
+ fragment_size = MAX_COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ ALOGVV("%s: fragment_size %d", __func__, fragment_size);
+ return fragment_size;
+}
+
static struct hw_module_methods_t hal_module_methods = {
.open = adev_open,
};
diff --git a/hal/audio_hw.h b/hal/audio_hw.h
index 3055470..73575e8 100644
--- a/hal/audio_hw.h
+++ b/hal/audio_hw.h
@@ -250,6 +250,12 @@
bool update_mixer);
struct audio_usecase *get_usecase_from_list(struct audio_device *adev,
audio_usecase_t uc_id);
+
+#define LITERAL_TO_STRING(x) #x
+#define CHECK(condition) LOG_ALWAYS_FATAL_IF(!(condition), "%s",\
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__)\
+ " ASSERT_FATAL(" #condition ") failed.")
+
/*
* NOTE: when multiple mutexes have to be acquired, always take the
* stream_in or stream_out mutex first, followed by the audio_device mutex.
diff --git a/hal/msm8974/hw_info.c b/hal/msm8974/hw_info.c
index 58ca4dc..fd943ba 100644
--- a/hal/msm8974/hw_info.c
+++ b/hal/msm8974/hw_info.c
@@ -51,11 +51,6 @@
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-#define LITERAL_TO_STRING(x) #x
-#define CHECK(condition) LOG_ALWAYS_FATAL_IF(!(condition), "%s",\
- __FILE__ ":" LITERAL_TO_STRING(__LINE__)\
- " ASSERT_FATAL(" #condition ") failed.")
-
static const snd_device_t taiko_fluid_variant_devices[] = {
SND_DEVICE_OUT_SPEAKER,
SND_DEVICE_OUT_SPEAKER_AND_HEADPHONES,
diff --git a/hal/msm8974/platform.c b/hal/msm8974/platform.c
index d562a0c..9ba3b94 100644
--- a/hal/msm8974/platform.c
+++ b/hal/msm8974/platform.c
@@ -203,6 +203,7 @@
[SND_DEVICE_IN_CAMCORDER_MIC] = "camcorder-mic",
[SND_DEVICE_IN_VOICE_DMIC] = "voice-dmic-ef",
[SND_DEVICE_IN_VOICE_SPEAKER_DMIC] = "voice-speaker-dmic-ef",
+ [SND_DEVICE_IN_VOICE_SPEAKER_QMIC] = "voice-speaker-qmic",
[SND_DEVICE_IN_VOICE_TTY_FULL_HEADSET_MIC] = "voice-tty-full-headset-mic",
[SND_DEVICE_IN_VOICE_TTY_VCO_HANDSET_MIC] = "voice-tty-vco-handset-mic",
[SND_DEVICE_IN_VOICE_TTY_HCO_HEADSET_MIC] = "voice-tty-hco-headset-mic",
@@ -275,6 +276,7 @@
[SND_DEVICE_IN_CAMCORDER_MIC] = 4,
[SND_DEVICE_IN_VOICE_DMIC] = 41,
[SND_DEVICE_IN_VOICE_SPEAKER_DMIC] = 43,
+ [SND_DEVICE_IN_VOICE_SPEAKER_QMIC] = 19,
[SND_DEVICE_IN_VOICE_TTY_FULL_HEADSET_MIC] = 16,
[SND_DEVICE_IN_VOICE_TTY_VCO_HANDSET_MIC] = 36,
[SND_DEVICE_IN_VOICE_TTY_HCO_HEADSET_MIC] = 16,
@@ -586,7 +588,7 @@
audio_extn_usb_set_proxy_sound_card(adev->snd_card);
/* Read one time ssr property */
- audio_extn_ssr_update_enabled(adev);
+ audio_extn_ssr_update_enabled();
audio_extn_spkr_prot_init(adev);
return my_data;
}
@@ -1354,7 +1356,7 @@
{
struct platform_data *my_data = (struct platform_data *)platform;
char *str;
- char value[32];
+ char value[256] = {0};
int val;
int ret = 0;
@@ -1371,10 +1373,15 @@
}
}
- ret = str_parms_get_int(parms, AUDIO_PARAMETER_KEY_SLOWTALK, &val);
+ ret = str_parms_get_str(parms, AUDIO_PARAMETER_KEY_SLOWTALK, value, sizeof(value));
if (ret >= 0) {
+ bool state = false;
+ if (!strncmp("true", value, sizeof("true"))) {
+ state = true;
+ }
+
str_parms_del(parms, AUDIO_PARAMETER_KEY_SLOWTALK);
- ret = platform_set_slowtalk(my_data, val);
+ ret = platform_set_slowtalk(my_data, state);
if (ret)
ALOGE("%s: Failed to set slow talk err: %d", __func__, ret);
}
@@ -1515,8 +1522,8 @@
ret = str_parms_get_str(query, AUDIO_PARAMETER_KEY_SLOWTALK,
value, sizeof(value));
if (ret >= 0) {
- str_parms_add_int(reply, AUDIO_PARAMETER_KEY_SLOWTALK,
- my_data->slowtalk);
+ str_parms_add_str(reply, AUDIO_PARAMETER_KEY_SLOWTALK,
+ my_data->slowtalk?"true":"false");
}
ret = str_parms_get_str(query, AUDIO_PARAMETER_KEY_VOLUME_BOOST,
diff --git a/hal/msm8974/platform.h b/hal/msm8974/platform.h
index a791445..e7980d6 100644
--- a/hal/msm8974/platform.h
+++ b/hal/msm8974/platform.h
@@ -168,8 +168,8 @@
#define FM_PLAYBACK_PCM_DEVICE 5
#define FM_CAPTURE_PCM_DEVICE 6
#define HFP_PCM_RX 5
-#define HFP_SCO_RX 22
-#define HFP_ASM_RX_TX 23
+#define HFP_SCO_RX 23
+#define HFP_ASM_RX_TX 24
#define INCALL_MUSIC_UPLINK_PCM_DEVICE 1
#define INCALL_MUSIC_UPLINK2_PCM_DEVICE 16
@@ -201,11 +201,16 @@
#define VOICE2_CALL_PCM_DEVICE 13
#define VOLTE_CALL_PCM_DEVICE 21
#define QCHAT_CALL_PCM_DEVICE 06
-#else
+#elif PLATFORM_MSM8610
#define VOICE_CALL_PCM_DEVICE 2
#define VOICE2_CALL_PCM_DEVICE 13
#define VOLTE_CALL_PCM_DEVICE 14
#define QCHAT_CALL_PCM_DEVICE 20
+#else
+#define VOICE_CALL_PCM_DEVICE 2
+#define VOICE2_CALL_PCM_DEVICE 22
+#define VOLTE_CALL_PCM_DEVICE 14
+#define QCHAT_CALL_PCM_DEVICE 20
#endif
#define LIB_CSD_CLIENT "libcsd-client.so"
diff --git a/hal_mpq/Android.mk b/hal_mpq/Android.mk
index 683de7a..1cb7e60 100644
--- a/hal_mpq/Android.mk
+++ b/hal_mpq/Android.mk
@@ -44,9 +44,9 @@
$(call include-path-for, audio-effects) \
$(LOCAL_PATH)/$(AUDIO_PLATFORM)
-ifeq ($(strip $(AUDIO_FEATURE_ENABLED_AUXPCM_BT)),true)
- LOCAL_CFLAGS += -DAUXPCM_BT_ENABLED
-endif
+
+LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
+LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
LOCAL_MODULE := audio.primary.$(TARGET_BOARD_PLATFORM)
diff --git a/hal_mpq/audio_bitstream_sm.c b/hal_mpq/audio_bitstream_sm.c
index 9f2564d..d2e4e6e 100644
--- a/hal_mpq/audio_bitstream_sm.c
+++ b/hal_mpq/audio_bitstream_sm.c
@@ -26,7 +26,7 @@
#include <math.h>
#define LOG_TAG "AudioBitstreamStateMachine"
-//#define LOG_NDEBUG 0
+#define LOG_NDEBUG 0
#define LOG_NDDEBUG 0
#include <utils/Log.h>
@@ -36,7 +36,6 @@
#include <platform.h>
// ----------------------------------------------------------------------------
-
/*
Initialize all input and output pointers
Allocate twice the max buffer size of input and output for sufficient buffering
@@ -45,52 +44,62 @@
{
bstream->buffering_factor = buffering_factor;
bstream->buffering_factor_cnt = 0;
+ bstream->inp_buf_size = SAMPLES_PER_CHANNEL *
+ MAX_INPUT_CHANNELS_SUPPORTED*
+ (bstream->buffering_factor+1);
+ bstream->inp_buf = (char *)malloc( bstream->inp_buf_size);
- bstream->inp_buf=(char *)malloc(SAMPLES_PER_CHANNEL*
- MAX_INPUT_CHANNELS_SUPPORTED*
- (bstream->buffering_factor+1));
// multiplied by 2 to convert to bytes
if(bstream->inp_buf != NULL) {
bstream->inp_buf_curr_ptr = bstream->inp_buf;
bstream->inp_buf_write_ptr = bstream->inp_buf;
} else {
ALOGE("MS11 input buffer not allocated");
+ bstream->inp_buf_size = 0;
return 0;
}
- bstream->enc_out_buf =(char *)malloc(SAMPLES_PER_CHANNEL*
- MAX_INPUT_CHANNELS_SUPPORTED*
- FACTOR_FOR_BUFFERING);
+ bstream->enc_out_buf_size = SAMPLES_PER_CHANNEL * MAX_INPUT_CHANNELS_SUPPORTED*
+ FACTOR_FOR_BUFFERING;
+ bstream->enc_out_buf =(char *)malloc(bstream->enc_out_buf_size);
+
if(bstream->enc_out_buf) {
bstream->enc_out_buf_write_ptr = bstream->enc_out_buf;
} else {
ALOGE("MS11 Enc output buffer not allocated");
+ bstream->enc_out_buf_size = 0;
return 0;
}
- bstream->pcm_2_out_buf =(char *)malloc(SAMPLES_PER_CHANNEL*STEREO_CHANNELS *
- FACTOR_FOR_BUFFERING);
+ bstream->pcm_2_out_buf_size = SAMPLES_PER_CHANNEL*STEREO_CHANNELS *
+ FACTOR_FOR_BUFFERING;
+ bstream->pcm_2_out_buf =(char *)malloc(bstream->pcm_2_out_buf_size);
if(bstream->pcm_2_out_buf) {
bstream->pcm_2_out_buf_write_ptr = bstream->pcm_2_out_buf;
} else {
ALOGE("MS11 PCM2Ch output buffer not allocated");
+ bstream->pcm_2_out_buf_size = 0;
return 0;
}
- bstream->pcm_mch_out_buf =(char *)malloc(SAMPLES_PER_CHANNEL *
- MAX_OUTPUT_CHANNELS_SUPPORTED *
- FACTOR_FOR_BUFFERING);
+ bstream->pcm_mch_out_buf_size = SAMPLES_PER_CHANNEL * MAX_OUTPUT_CHANNELS_SUPPORTED *
+ FACTOR_FOR_BUFFERING;
+
+ bstream->pcm_mch_out_buf =(char *)malloc(bstream->pcm_mch_out_buf_size);
if(bstream->pcm_mch_out_buf) {
bstream->pcm_mch_out_buf_write_ptr = bstream->pcm_mch_out_buf;
} else {
ALOGE("MS11 PCMMCh output buffer not allocated");
+ bstream->pcm_mch_out_buf_size = 0;
return 0;
}
- bstream->passt_out_buf =(char *)malloc(SAMPLES_PER_CHANNEL *
+ bstream->passt_out_buf_size =SAMPLES_PER_CHANNEL *
MAX_INPUT_CHANNELS_SUPPORTED *
- FACTOR_FOR_BUFFERING);
+ FACTOR_FOR_BUFFERING;
+ bstream->passt_out_buf =(char *)malloc(bstream->passt_out_buf_size);
if(bstream->passt_out_buf) {
bstream->passt_out_buf_write_ptr = bstream->passt_out_buf;
} else {
ALOGE("MS11 Enc output buffer not allocated");
+ bstream->passt_out_buf_size = 0;
return 0;
}
return 1;
@@ -159,9 +168,8 @@
struct audio_bitstream_sm *bstream,
char *buf_ptr, size_t bytes)
{
- int32_t bufLen = SAMPLES_PER_CHANNEL*MAX_INPUT_CHANNELS_SUPPORTED*(bstream->buffering_factor+1);
// flush the input buffer if input is not consumed
- if( (bstream->inp_buf_write_ptr+bytes) > (bstream->inp_buf+bufLen) ) {
+ if( (bstream->inp_buf_write_ptr+bytes) > (bstream->inp_buf+bstream->inp_buf_size) ) {
ALOGE("Input bitstream is not consumed");
return;
}
@@ -226,6 +234,38 @@
return bstream->inp_buf_write_ptr;
}
+int audio_bitstream_set_input_buffer_ptr(
+ struct audio_bitstream_sm *bstream, int bytes)
+{
+ if(((bstream->inp_buf_curr_ptr + bytes) <=
+ (bstream->inp_buf + bstream->inp_buf_size)) &&
+ ((bstream->inp_buf_curr_ptr + bytes) >= bstream->inp_buf))
+
+ bstream->inp_buf_curr_ptr += bytes;
+ else {
+ ALOGE("Invalid input buffer size %d bytes", bytes);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int audio_bitstream_set_input_buffer_write_ptr(
+ struct audio_bitstream_sm *bstream, int bytes)
+{
+ if(((bstream->inp_buf_write_ptr + bytes) <=
+ (bstream->inp_buf + bstream->inp_buf_size)) &&
+ ((bstream->inp_buf_write_ptr + bytes) >= bstream->inp_buf))
+
+ bstream->inp_buf_write_ptr += bytes;
+ else {
+ ALOGE("Invalid input buffer size %d bytes", bytes);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
Get the output buffer start pointer to start rendering the pcm sampled to driver
*/
diff --git a/hal_mpq/audio_hw.h b/hal_mpq/audio_hw.h
index e1319a5..7814d95 100644
--- a/hal_mpq/audio_hw.h
+++ b/hal_mpq/audio_hw.h
@@ -24,7 +24,7 @@
#include <hardware/audio.h>
#include <tinyalsa/asoundlib.h>
#include <tinycompress/tinycompress.h>
-
+#include "sound/compress_params.h"
#include <audio_route/audio_route.h>
#define VISUALIZER_LIBRARY_PATH "/system/lib/soundfx/libqcomvisualizer.so"
@@ -45,6 +45,7 @@
#define DEFAULT_HDMI_OUT_CHANNELS 2
typedef int snd_device_t;
+#include <platform.h>
/* These are the supported use cases by the hardware.
* Each usecase is mapped to a specific PCM device.
@@ -56,11 +57,11 @@
USECASE_AUDIO_PLAYBACK_DEEP_BUFFER = 0,
USECASE_AUDIO_PLAYBACK_LOW_LATENCY,
USECASE_AUDIO_PLAYBACK_MULTI_CH,
- USECASE_AUDIO_PLAYBACK_OFFLOAD,
-
- /* FM usecase */
USECASE_AUDIO_PLAYBACK_FM,
-
+ USECASE_AUDIO_PLAYBACK_OFFLOAD,
+ USECASE_AUDIO_PLAYBACK_OFFLOAD1,
+ USECASE_AUDIO_PLAYBACK_OFFLOAD2,
+ USECASE_AUDIO_PLAYBACK_OFFLOAD3,
/* Capture usecases */
USECASE_AUDIO_RECORD,
USECASE_AUDIO_RECORD_COMPRESS,
@@ -88,6 +89,28 @@
AUDIO_USECASE_MAX
} audio_usecase_t;
+typedef enum {
+ DEEP_BUFFER_PLAYBACK_STREAM = 0,
+ LOW_LATENCY_PLAYBACK_STREAM,
+ MCH_PCM_PLAYBACK_STREAM,
+ OFFLOAD_PLAYBACK_STREAM,
+ LOW_LATENCY_RECORD_STREAM,
+ RECORD_STREAM,
+ VOICE_CALL_STREAM
+} audio_usecase_stream_type_t;
+
+#define STRING_TO_ENUM(string) { #string, string }
+struct string_to_enum {
+ const char *name;
+ uint32_t value;
+};
+
+static const struct string_to_enum out_channels_name_to_enum_table[] = {
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+ STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+};
+
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
/*
@@ -118,14 +141,43 @@
int data[];
};
+struct alsa_handle {
+
+ struct listnode list;
+//Parameters of the stream
+ struct pcm *pcm;
+ struct pcm_config config;
+
+ struct compress *compr;
+ struct compr_config compr_config;
+
+ struct stream_out *out;
+
+ audio_usecase_t usecase;
+ int device_id;
+ unsigned int sample_rate;
+ audio_channel_mask_t channel_mask;
+ audio_format_t input_format;
+ audio_format_t output_format;
+ audio_devices_t devices;
+
+ route_format_t route_format;
+ int decoder_type;
+
+ bool cmd_pending ;
+};
+
struct stream_out {
struct audio_stream_out stream;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
pthread_cond_t cond;
+ /* TODO remove this */
+ /*
struct pcm_config config;
struct compr_config compr_config;
struct pcm *pcm;
struct compress *compr;
+ */
int standby;
int pcm_device_id;
unsigned int sample_rate;
@@ -154,6 +206,54 @@
int send_new_metadata;
struct audio_device *dev;
+
+ /*devices configuration */
+ int left_volume;
+ int right_volume;
+ audio_usecase_stream_type_t uc_strm_type;
+ int hdmi_format;
+ int spdif_format;
+ int* device_formats; //TODO:Needs to come from AudioRutingManager
+ struct audio_config *config;
+
+ /* list of the session handles */
+ struct listnode session_list;
+
+ /* /MS11 instance */
+ int use_ms11_decoder;
+ void *ms11_decoder;
+ struct compr_config compr_config;
+
+ int channels;
+
+ /* Buffering utility */
+ struct audio_bitstream_sm *bitstrm;
+
+ int buffer_size;
+ int decoder_type;
+ bool dec_conf_set;
+ uint32_t min_bytes_req_to_dec;
+ bool is_m11_file_mode;
+ void *dec_conf_buf;
+ int32_t dec_conf_bufLength;
+ bool first_bitstrm_buf;
+
+ bool open_dec_route;
+ int dec_format_devices;
+ bool open_dec_mch_route;
+ int dec_mch_format_devices;
+ bool open_passt_route;
+ int passt_format_devices;
+ bool sw_open_trans_route;
+ int sw_trans_format_devices;
+ bool hw_open_trans_route;
+ int hw_trans_format_devices;
+ bool channel_status_set;
+ unsigned char channel_status[24];
+ int route_audio_to_a2dp;
+ int is_ms11_file_playback_mode;
+ char * write_temp_buf;
+ struct output_metadata output_meta_data;
};
struct stream_in {
@@ -194,6 +294,7 @@
snd_device_t out_snd_device;
snd_device_t in_snd_device;
union stream_ptr stream;
+ struct alsa_handle *handle;
};
struct audio_device {
@@ -225,6 +326,9 @@
[USECASE_AUDIO_PLAYBACK_LOW_LATENCY] = "low-latency-playback",
[USECASE_AUDIO_PLAYBACK_MULTI_CH] = "multi-channel-playback",
[USECASE_AUDIO_PLAYBACK_OFFLOAD] = "compress-offload-playback",
+ [USECASE_AUDIO_PLAYBACK_OFFLOAD1] = "compress-offload-playback1",
+ [USECASE_AUDIO_PLAYBACK_OFFLOAD2] = "compress-offload-playback2",
+ [USECASE_AUDIO_PLAYBACK_OFFLOAD3] = "compress-offload-playback3",
[USECASE_AUDIO_RECORD] = "audio-record",
[USECASE_AUDIO_RECORD_COMPRESS] = "audio-record-compress",
[USECASE_AUDIO_RECORD_LOW_LATENCY] = "low-latency-record",
@@ -245,6 +349,7 @@
[USECASE_AUDIO_SPKR_CALIB_TX] = "spkr-vi-record",
};
+
int adev_open_output_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
diff --git a/hal_mpq/audio_stream_out.c b/hal_mpq/audio_stream_out.c
index 71eb2f1..7ef3255 100644
--- a/hal_mpq/audio_stream_out.c
+++ b/hal_mpq/audio_stream_out.c
@@ -49,12 +49,24 @@
#include <platform.h>
#include "sound/compress_params.h"
+#include "audio_bitstream_sm.h"
+
+//TODO: enable sw_decode if required
+#define USE_SWDECODE 0
+
+#if USE_SWDECODE
+#include "SoftMS11.h"
+#endif
#define COMPRESS_OFFLOAD_FRAGMENT_SIZE (32 * 1024)
#define COMPRESS_OFFLOAD_NUM_FRAGMENTS 4
/* ToDo: Check and update a proper value in msec */
#define COMPRESS_OFFLOAD_PLAYBACK_LATENCY 96
#define COMPRESS_PLAYBACK_VOLUME_MAX 0x2000
+#define STRING_LENGTH_OF_INTEGER 12
+
+static int send_offload_cmd_l(struct stream_out* out, int command);
+static int get_snd_codec_id(audio_format_t format);
struct pcm_config pcm_config_deep_buffer = {
.channels = 2,
@@ -89,24 +101,1246 @@
.avail_min = 0,
};
-#define STRING_TO_ENUM(string) { #string, string }
+inline int nextMultiple(int n, int m) {
+ return ((n/m) + 1) * m;
+}
-struct string_to_enum {
- const char *name;
- uint32_t value;
-};
+/*******************************************************************************
+Description: check for MS11 supported formats
+*******************************************************************************/
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+int is_ms11_supported_fromats(int format)
+{
+ ALOGVV("is_ms11_supported_fromats");
+ int main_format = format & AUDIO_FORMAT_MAIN_MASK;
+ if(((main_format == AUDIO_FORMAT_AAC) ||
+ (main_format == AUDIO_FORMAT_HE_AAC_V1) ||
+ (main_format == AUDIO_FORMAT_HE_AAC_V2) ||
+ (main_format == AUDIO_FORMAT_AC3) ||
+ (main_format == AUDIO_FORMAT_AC3_PLUS) ||
+ (main_format == AUDIO_FORMAT_EAC3))) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+#endif
-static const struct string_to_enum out_channels_name_to_enum_table[] = {
- STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
- STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
- STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
+/*******************************************************************************
+Description: check if ac3 can played as pass through without MS11 decoder
+*******************************************************************************/
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+int can_ac3_passthrough_without_ms11(struct stream_out *out, int format)
+{
+ ALOGVV("can_ac3_passthrough_without_ms11");
+ int main_format = format & AUDIO_FORMAT_MAIN_MASK;
+ if(main_format == AUDIO_FORMAT_AC3) {
+ if(((out->hdmi_format == COMPRESSED) ||
+ (out->hdmi_format == AUTO_DEVICE_FORMAT) ||
+ (out->hdmi_format == COMPRESSED_CONVERT_EAC3_AC3) ||
+ (out->hdmi_format == COMPRESSED_CONVERT_ANY_AC3)) &&
+ ((out->spdif_format == COMPRESSED) ||
+ (out->spdif_format == AUTO_DEVICE_FORMAT) ||
+ (out->spdif_format == COMPRESSED_CONVERT_EAC3_AC3) ||
+ (out->spdif_format == COMPRESSED_CONVERT_ANY_AC3))) {
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/*******************************************************************************
+Description: get levels of buffering, interms of number of buffers
+*******************************************************************************/
+int get_buffering_factor(struct stream_out *out)
+{
+ ALOGVV("get_buffering_factor");
+ if((out->format == AUDIO_FORMAT_PCM_16_BIT) ||
+ (out->format == AUDIO_FORMAT_PCM_24_BIT))
+ return 1;
+ else
+ return NUM_OF_PERIODS;
+}
+
+/*******************************************************************************
+Description: get the buffer size based on format and device format type
+*******************************************************************************/
+void get_fragment_size_and_format(struct stream_out *out, int routeFormat, int *fragment_size,
+ int *fragment_count, int *format)
+{
+ ALOGV("get_fragment_size_and_format");
+
+ int frame_size = 0;
+ *format = out->format;
+ *fragment_count = NUM_OF_PERIODS;
+ switch(out->format) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ frame_size = PCM_16_BITS_PER_SAMPLE * out->channels;
+ /*TODO: do we need below calculation */
+ *fragment_size = nextMultiple(((frame_size * out->sample_rate * TIME_PER_BUFFER)/1000) + MIN_SIZE_FOR_METADATA , frame_size * 32);
+ break;
+ case AUDIO_FORMAT_PCM_24_BIT:
+ frame_size = PCM_24_BITS_PER_SAMPLE * out->channels;
+ *fragment_size = nextMultiple(((frame_size * out->sample_rate * TIME_PER_BUFFER)/1000) + MIN_SIZE_FOR_METADATA, frame_size * 32);
+ break;
+ case AUDIO_FORMAT_AAC:
+ case AUDIO_FORMAT_HE_AAC_V1:
+ case AUDIO_FORMAT_HE_AAC_V2:
+ case AUDIO_FORMAT_AAC_ADIF:
+ case AUDIO_FORMAT_AC3:
+ case AUDIO_FORMAT_AC3_DM:
+ case AUDIO_FORMAT_EAC3:
+ case AUDIO_FORMAT_EAC3_DM:
+ if(routeFormat == ROUTE_UNCOMPRESSED_MCH) {
+ frame_size = PCM_16_BITS_PER_SAMPLE * out->channels;
+ *fragment_size = nextMultiple(AC3_PERIOD_SIZE * out->channels + MIN_SIZE_FOR_METADATA, frame_size * 32);
+ *format = AUDIO_FORMAT_PCM_16_BIT;
+ } else if(routeFormat == ROUTE_UNCOMPRESSED) {
+ frame_size = PCM_16_BITS_PER_SAMPLE * 2;
+ *fragment_size = nextMultiple(AC3_PERIOD_SIZE * 2 + MIN_SIZE_FOR_METADATA, frame_size * 32);
+ *format = AUDIO_FORMAT_PCM_16_BIT;
+ } else {
+ *fragment_size = PERIOD_SIZE_COMPR;
+ }
+ break;
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_LBR:
+ case AUDIO_FORMAT_MP3:
+ case AUDIO_FORMAT_WMA:
+ case AUDIO_FORMAT_WMA_PRO:
+ case AUDIO_FORMAT_MP2:
+ *fragment_size = PERIOD_SIZE_COMPR;
+ break;
+ default:
+ *fragment_size = PERIOD_SIZE_COMPR;
+ *format = out->format;
+ }
+
+ /*TODO: remove this if fragement count needs to be decided based on the format*/
+ *fragment_count = COMPRESS_OFFLOAD_NUM_FRAGMENTS;
+ fragment_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+
+ ALOGV("fragment_size: %d, fragment_count: %d", *fragment_size, *fragment_count);
+ return;
+}
+
+/*******************************************************************************
+Description: buffer length updated to player
+*******************************************************************************/
+int get_buffer_length(struct stream_out *out)
+{
+ /* TODO: Do we need below */
+ ALOGV("get_buffer_length");
+ int buffer_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ switch(out->format) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ buffer_size = ((PCM_16_BITS_PER_SAMPLE * out->channels * out->sample_rate * TIME_PER_BUFFER)/1000);
+ break;
+ case AUDIO_FORMAT_PCM_24_BIT:
+ buffer_size = ((PCM_24_BITS_PER_SAMPLE * out->channels * out->sample_rate * TIME_PER_BUFFER)/1000);
+ break;
+ case AUDIO_FORMAT_AAC:
+ case AUDIO_FORMAT_HE_AAC_V1:
+ case AUDIO_FORMAT_HE_AAC_V2:
+ case AUDIO_FORMAT_AAC_ADIF:
+ buffer_size = AAC_BLOCK_PER_CHANNEL_MS11 * out->channels;
+ break;
+ case AUDIO_FORMAT_AC3:
+ case AUDIO_FORMAT_AC3_DM:
+ case AUDIO_FORMAT_EAC3:
+ case AUDIO_FORMAT_EAC3_DM:
+ buffer_size = AC3_BUFFER_SIZE;
+ break;
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_LBR:
+ case AUDIO_FORMAT_MP3:
+ case AUDIO_FORMAT_WMA:
+ case AUDIO_FORMAT_WMA_PRO:
+ case AUDIO_FORMAT_MP2:
+ buffer_size = COMPR_INPUT_BUFFER_SIZE;
+ break;
+ default:
+ buffer_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ }
+
+ /*TODO: remove this if fragement count needs to be decided based on the format*/
+ buffer_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ return buffer_size;
+}
+
+/* TODO: Uncomment this when enabling A2DP
+ TODO: add support for the 24 bit playback*/
+#if 0
+/*******************************************************************************
+Description: fix up devices for supporting A2DP playback
+*******************************************************************************/
+void fixUpDevicesForA2DPPlayback(struct stream_out *out)
+{
+ ALOGVV("fixUpDevicesForA2DPPlayback");
+ if(out->devices & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ out->route_audio_to_a2dp = 1;
+ out->devices &= ~AUDIO_DEVICE_OUT_ALL_A2DP;
+ //TODO: add spdif and proxy
+ //out->devices &= ~AUDIO_DEVICE_OUT_SPDIF;
+ //out->devices |= AudioSystem::DEVICE_OUT_PROXY;
+ }
+}
+#endif
+
+/*******************************************************************************
+Description: open temp buffer so that meta data mode can be updated properly
+*******************************************************************************/
+int open_temp_buf_for_metadata(struct stream_out *out)
+{
+ ALOGV("%s", __func__);
+ if (out->write_temp_buf == NULL) {
+ /*Max Period size which is exposed by the compr driver
+ The value needs to be modified when the period size is modified*/
+ out->write_temp_buf = (char *) malloc(PLAYBACK_MAX_PERIOD_SIZE);
+ if (out->write_temp_buf == NULL) {
+ ALOGE("Memory allocation of temp buffer to write pcm to driver failed");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*******************************************************************************
+Description: get index of handle based on device handle device
+*******************************************************************************/
+struct alsa_handle * get_handle_based_on_devices(struct stream_out *out, int handleDevices)
+{
+ ALOGVV("get_handle_based_on_devices");
+ struct listnode *node;
+ struct alsa_handle *handle = NULL;
+
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if(handle->devices & handleDevices)
+ break;
+ }
+ return handle;
+}
+
+void reset_out_parameters(struct stream_out *out) {
+
+ out->hdmi_format = UNCOMPRESSED;
+ out->spdif_format = UNCOMPRESSED;
+ out->decoder_type = UNCOMPRESSED ;
+ out->dec_conf_set = false;
+ out->min_bytes_req_to_dec = 0;
+ out->is_m11_file_mode = false;
+ out->dec_conf_bufLength = 0;
+ out->first_bitstrm_buf = false;
+ out->open_dec_route = false;
+ out->dec_format_devices = AUDIO_DEVICE_NONE;
+ out->open_dec_mch_route = false;
+ out->dec_mch_format_devices =AUDIO_DEVICE_NONE;
+ out->open_passt_route = false;
+ out->passt_format_devices = AUDIO_DEVICE_NONE;
+ out->sw_open_trans_route = false;
+ out->sw_trans_format_devices = AUDIO_DEVICE_NONE;
+ out->hw_open_trans_route =false ;
+ out->hw_trans_format_devices = AUDIO_DEVICE_NONE;
+ out->channel_status_set = false;
+ out->route_audio_to_a2dp = false;
+ out->is_ms11_file_playback_mode = false;
+ out->write_temp_buf = NULL;
+ return;
+}
+
+struct alsa_handle *get_alsa_handle() {
+
+ struct alsa_handle *handle;
+ handle = (struct alsa_handle *)calloc(1, sizeof(struct alsa_handle));
+ if(handle == NULL) {
+ ALOGE("%s calloc failed for handle", __func__);
+ } else {
+ ALOGE("%s handle is 0x%x", __func__,(uint32_t)handle);
+ }
+
+ return handle;
+}
+
+void free_alsa_handle(struct alsa_handle *handle) {
+
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ }
+ free(handle);
+
+ return;
+}
+
+
+struct alsa_handle *get_handle_by_route_format(struct stream_out *out,
+ int route_format)
+{
+ struct listnode *node;
+ struct alsa_handle *handle = NULL;
+ ALOGV("%s",__func__);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if(handle->route_format & route_format) {
+ ALOGV("%s found handle %x",__func__,(uint32_t)handle);
+ break;
+ }
+ }
+
+ return handle;
+}
+
+/*******************************************************************************
+Description: get the format index
+*******************************************************************************/
+int get_format_index(int format)
+{
+ ALOGVV("get_format_index");
+ int idx = 0,i;
+ for(i=0; i<NUM_SUPPORTED_CODECS; i++) {
+ if(format == format_index[i][0]) {
+ idx = format_index[i][1];
+ break;
+ }
+ }
+ return idx;
+}
+
+int get_compress_available_space(struct alsa_handle *handle)
+{
+ uint32_t ret;
+ size_t avail = 0;
+ struct timespec tstamp;
+ ret = compress_get_hpointer(handle->compr,&avail, &tstamp);
+ if(ret!=0) {
+ ALOGE("cannot get available space\n");
+ } else
+ ret = (int)avail;
+ return ret;
+}
+
+
+/*******************************************************************************
+Description: validate if the decoder requires configuration to be set as first
+ buffer
+*******************************************************************************/
+int is_decoder_config_required(struct stream_out *out)
+{
+ ALOGVV("is_decoder_config_required");
+ int main_format = out->format & AUDIO_FORMAT_MAIN_MASK;
+ uint32_t i;
+ if(!out->is_ms11_file_playback_mode)
+ return 0;
+ for(i=0; i<sizeof(decodersRequireConfig)/sizeof(int); i++)
+ if(main_format == decodersRequireConfig[i])
+ return 1;
+ return 0;
+}
+
+/*******************************************************************************
+Description: query if input buffering mode require
+*******************************************************************************/
+int is_input_buffering_mode_reqd(struct stream_out *out)
+{
+ ALOGVV("is_input_buffering_mode_reqd");
+ if((out->decoder_type == SW_PASSTHROUGH) ||
+ (out->decoder_type == DSP_PASSTHROUGH))
+ return 1;
+ else
+ return 0;
+}
+
+
+
+/*******************************************************************************
+Description: update use case and routing flags
+*******************************************************************************/
+void update_decode_type_and_routing_states(struct stream_out *out)
+{
+ ALOGV("%s", __func__);
+
+ int format_index = get_format_index(out->format);
+ int decodeType, idx;
+
+ out->open_dec_route = false;
+ out->open_dec_mch_route = false;
+ out->open_passt_route = false;
+ out->sw_open_trans_route = false;
+ out->hw_open_trans_route = false;
+ out->dec_format_devices = out->devices;
+ out->dec_mch_format_devices = AUDIO_DEVICE_NONE;
+ out->passt_format_devices = AUDIO_DEVICE_NONE;
+ out->sw_trans_format_devices = AUDIO_DEVICE_NONE;
+ out->hw_trans_format_devices = AUDIO_DEVICE_NONE;
+ out->decoder_type = 0;
+
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+ if(is_ms11_supported_fromats(out->format))
+ out->use_ms11_decoder = true;
+#endif
+
+ ALOGV("format_index: %d devices %x", format_index,out->devices);
+ if(out->devices & AUDIO_DEVICE_OUT_SPDIF) {
+ decodeType = usecase_docode_hdmi_spdif[NUM_STATES_FOR_EACH_DEVICE_FMT*format_index]
+ [out->spdif_format];
+ ALOGV("SPDIF: decoderType: %d", decodeType);
+ out->decoder_type = decodeType;
+ for(idx=0; idx<NUM_DECODE_PATH; idx++) {
+ if(route_to_driver[idx][DECODER_TYPE_IDX] == decodeType) {
+ switch(route_to_driver[idx][ROUTE_FORMAT_IDX]) {
+ case ROUTE_UNCOMPRESSED:
+ ALOGVV("ROUTE_UNCOMPRESSED");
+ ALOGVV("SPDIF opened with stereo decode");
+ out->open_dec_route = true;
+ break;
+ case ROUTE_UNCOMPRESSED_MCH:
+ ALOGVV("ROUTE_UNCOMPRESSED_MCH");
+ ALOGVV("SPDIF opened with multichannel decode");
+ out->open_dec_mch_route = true;
+ out->dec_format_devices &= ~AUDIO_DEVICE_OUT_SPDIF;
+ out->dec_mch_format_devices |= AUDIO_DEVICE_OUT_SPDIF;
+ break;
+ case ROUTE_COMPRESSED:
+ ALOGVV("ROUTE_COMPRESSED");
+ out->open_passt_route = true;
+ out->dec_format_devices &= ~AUDIO_DEVICE_OUT_SPDIF;
+ out->passt_format_devices = AUDIO_DEVICE_OUT_SPDIF;
+ break;
+ case ROUTE_DSP_TRANSCODED_COMPRESSED:
+ ALOGVV("ROUTE_DSP_TRANSCODED_COMPRESSED");
+ out->hw_open_trans_route = true;
+ out->hw_trans_format_devices = AUDIO_DEVICE_OUT_SPDIF;
+ break;
+ case ROUTE_SW_TRANSCODED_COMPRESSED:
+ ALOGVV("ROUTE_SW_TRANSCODED_COMPRESSED");
+ out->sw_open_trans_route = true;
+ out->dec_format_devices &= ~AUDIO_DEVICE_OUT_SPDIF;
+ out->sw_trans_format_devices = AUDIO_DEVICE_OUT_SPDIF;
+ break;
+ default:
+ ALOGW("INVALID ROUTE for SPDIF, decoderType %d, routeFormat %d",
+ decodeType, route_to_driver[idx][ROUTE_FORMAT_IDX]);
+ break;
+ }
+ }
+ }
+ }
+ if(out->devices & AUDIO_DEVICE_OUT_AUX_DIGITAL) {
+ decodeType = usecase_docode_hdmi_spdif[NUM_STATES_FOR_EACH_DEVICE_FMT*format_index]
+ [out->hdmi_format];
+ ALOGV("HDMI: decoderType: %d", decodeType);
+ out->decoder_type |= decodeType;
+ for(idx=0; idx<NUM_DECODE_PATH; idx++) {
+ if(route_to_driver[idx][DECODER_TYPE_IDX] == decodeType) {
+ switch(route_to_driver[idx][ROUTE_FORMAT_IDX]) {
+ case ROUTE_UNCOMPRESSED:
+ ALOGVV("ROUTE_UNCOMPRESSED");
+ ALOGVV("HDMI opened with stereo decode");
+ out->open_dec_route = true;
+ break;
+ case ROUTE_UNCOMPRESSED_MCH:
+ ALOGVV("ROUTE_UNCOMPRESSED_MCH");
+ ALOGVV("HDMI opened with multichannel decode");
+ out->open_dec_mch_route = true;
+ out->dec_format_devices &= ~AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ out->dec_mch_format_devices |= AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ break;
+ case ROUTE_COMPRESSED:
+ ALOGVV("ROUTE_COMPRESSED");
+ out->open_passt_route = true;
+ out->dec_format_devices &= ~AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ out->passt_format_devices |= AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ break;
+ case ROUTE_DSP_TRANSCODED_COMPRESSED:
+ ALOGVV("ROUTE_DSP_TRANSCODED_COMPRESSED");
+ out->hw_open_trans_route = true;
+ out->hw_trans_format_devices |= AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ break;
+ case ROUTE_SW_TRANSCODED_COMPRESSED:
+ ALOGVV("ROUTE_SW_TRANSCODED_COMPRESSED");
+ out->sw_open_trans_route = true;
+ out->dec_format_devices &= ~AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ out->sw_trans_format_devices |= AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ break;
+ default:
+ ALOGW("INVALID ROUTE for HDMI, decoderType %d, routeFormat %d",
+ decodeType, route_to_driver[idx][ROUTE_FORMAT_IDX]);
+ break;
+ }
+ }
+ }
+ }
+ if(out->devices & ~(AUDIO_DEVICE_OUT_AUX_DIGITAL |
+ AUDIO_DEVICE_OUT_SPDIF)) {
+ decodeType = usecase_decode_format[NUM_STATES_FOR_EACH_DEVICE_FMT*format_index];
+ ALOGV("Other Devices: decoderType: %d", decodeType);
+ out->decoder_type |= decodeType;
+ for(idx=0; idx<NUM_DECODE_PATH; idx++) {
+ if(route_to_driver[idx][DECODER_TYPE_IDX] == decodeType) {
+ switch(route_to_driver[idx][ROUTE_FORMAT_IDX]) {
+ case ROUTE_UNCOMPRESSED:
+ ALOGVV("ROUTE_UNCOMPRESSED");
+ ALOGVV("Other Devices opened with stereo decode");
+ out->open_dec_route = true;
+ break;
+ case ROUTE_UNCOMPRESSED_MCH:
+ ALOGVV("ROUTE_UNCOMPRESSED_MCH");
+ ALOGVV("Other Devices opened with multichannel decode");
+ out->open_dec_mch_route = true;
+ out->dec_format_devices &= ~(out->devices &
+ ~(AUDIO_DEVICE_OUT_SPDIF |
+ AUDIO_DEVICE_OUT_AUX_DIGITAL));
+ out->dec_mch_format_devices |= (out->devices &
+ ~(AUDIO_DEVICE_OUT_SPDIF |
+ AUDIO_DEVICE_OUT_AUX_DIGITAL));
+ break;
+ default:
+ ALOGW("INVALID ROUTE for Other Devices, decoderType %d, routeFormat %d",
+ decodeType, route_to_driver[idx][ROUTE_FORMAT_IDX]);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/*******************************************************************************
+Description: update handle states
+*******************************************************************************/
+int update_alsa_handle_state(struct stream_out *out)
+{
+ ALOGV("%s", __func__);
+
+ struct alsa_handle *handle = NULL;
+ struct listnode *node;
+
+ if(out->open_dec_route) {
+ if((handle = get_alsa_handle())== NULL)
+ goto error;
+ list_add_tail(&out->session_list, &handle->list);
+ handle->route_format = ROUTE_UNCOMPRESSED;
+ handle->devices = out->dec_format_devices;
+ handle->usecase = platform_get_usecase(USECASE_AUDIO_PLAYBACK_OFFLOAD);
+ handle->out = out;
+ handle->cmd_pending = false;
+ ALOGD("open_dec_route: routeformat: %d, devices: 0x%x: "
+ ,handle->route_format, handle->devices);
+ }
+ if(out->open_dec_mch_route) {
+ if((handle = get_alsa_handle())== NULL)
+ goto error;
+ list_add_tail(&out->session_list, &handle->list);
+ handle->route_format = ROUTE_UNCOMPRESSED_MCH;
+ handle->devices = out->dec_mch_format_devices;
+ handle->usecase = platform_get_usecase(USECASE_AUDIO_PLAYBACK_OFFLOAD);
+ handle->out = out;
+ handle->cmd_pending = false;
+ ALOGD("OpenMCHDecodeRoute: routeformat: %d, devices: 0x%x: "
+ ,handle->route_format, handle->devices);
+ }
+ if(out->open_passt_route) {
+ if((handle = get_alsa_handle())== NULL)
+ goto error;
+ list_add_tail(&out->session_list, &handle->list);
+ handle->route_format = ROUTE_COMPRESSED;
+ handle->devices = out->passt_format_devices;
+ handle->usecase = platform_get_usecase(USECASE_AUDIO_PLAYBACK_OFFLOAD);
+ handle->out = out;
+ handle->cmd_pending = false;
+ ALOGD("open_passt_route: routeformat: %d, devices: 0x%x: "
+ ,handle->route_format, handle->devices);
+ }
+ if(out->sw_open_trans_route) {
+ if((handle = get_alsa_handle())== NULL)
+ goto error;
+ handle->route_format = ROUTE_SW_TRANSCODED_COMPRESSED;
+ handle->devices = out->sw_trans_format_devices;
+ handle->usecase = platform_get_usecase(USECASE_AUDIO_PLAYBACK_OFFLOAD);
+ handle->out = out;
+ handle->cmd_pending = false;
+ ALOGD("OpenTranscodeRoute: routeformat: %d, devices: 0x%x: "
+ ,handle->route_format, handle->devices);
+ }
+ if(out->hw_open_trans_route) {
+ if((handle = get_alsa_handle())== NULL)
+ goto error;
+ handle->route_format = ROUTE_DSP_TRANSCODED_COMPRESSED;
+ handle->devices = out->hw_trans_format_devices;
+ handle->usecase = platform_get_usecase(USECASE_AUDIO_PLAYBACK_OFFLOAD);
+ handle->out = out;
+ handle->cmd_pending = false;
+ ALOGD("OpenTranscodeRoute: routeformat: %d, devices: 0x%x: "
+ ,handle->route_format, handle->devices);
+ }
+
+return 0;
+
+error:
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ free_alsa_handle(handle);
+ }
+
+ return -ENOMEM;
+}
+
+/*******************************************************************************
+Description: setup input path
+*******************************************************************************/
+int allocate_internal_buffers(struct stream_out *out)
+{
+ ALOGV("%s",__func__);
+ int ret = 0;
+ int main_format = out->format & AUDIO_FORMAT_MAIN_MASK;
+
+ /*
+ setup the bitstream state machine
+ */
+ out->bitstrm = ( struct audio_bitstream_sm *)calloc(1,
+ sizeof(struct audio_bitstream_sm));
+ if(!audio_bitstream_init(out->bitstrm, get_buffering_factor(out))) {
+ ALOGE("%s Unable to allocate bitstream buffering for MS11",__func__);
+ free(out->bitstrm);
+ out->bitstrm = NULL;
+ return -EINVAL;
+ }
+
+ if(is_input_buffering_mode_reqd(out))
+ audio_bitstream_start_input_buffering_mode(out->bitstrm);
+
+ /*
+ setup the buffering data required for decode to start
+ AAC_ADIF would require worst case frame size before decode starts
+ other decoder formats handles the partial data, hence threshold is zero.
+ */
+
+ if(main_format == AUDIO_FORMAT_AAC_ADIF)
+ out->min_bytes_req_to_dec = AAC_BLOCK_PER_CHANNEL_MS11*out->channels-1;
+ else
+ out->min_bytes_req_to_dec = 0;
+
+ ret = open_temp_buf_for_metadata(out);
+ if(ret < 0) {
+ free(out->bitstrm);
+ out->bitstrm = NULL;
+ }
+ out->buffer_size = get_buffer_length(out);
+
+ return ret;
+}
+
+/*******************************************************************************
+Description: setup input path
+*******************************************************************************/
+int free_internal_buffers(struct stream_out *out)
+{
+ if(out->bitstrm) {
+ free(out->bitstrm);
+ out->bitstrm = NULL;
+ }
+
+ if(out->write_temp_buf) {
+ free(out->write_temp_buf);
+ out->write_temp_buf = NULL;
+ }
+
+ if(out->dec_conf_buf) {
+ free(out->dec_conf_buf);
+ out->dec_conf_buf = NULL;
+ }
+ return 0;
+}
+
+/*******************************************************************************
+Description: open MS11 instance
+*******************************************************************************/
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+static int open_ms11_instance(struct stream_out *out)
+{
+ ALOGV("openMS11Instance");
+ int32_t formatMS11;
+ int main_format = out->format & AUDIO_FORMAT_MAIN_MASK;
+ out->ms11_decoder = get_soft_ms11();
+ if(!out->ms11_decoder) {
+ ALOGE("Could not resolve all symbols Required for MS11");
+ return -EINVAL;
+ }
+ /*
+ MS11 created
+ */
+ if(initialize_ms11_function_pointers(out->ms11_decoder) == false) {
+ ALOGE("Could not resolve all symbols Required for MS11");
+ free_soft_ms11(out->ms11_decoder);
+ return -EINVAL;
+ }
+ /*
+ update format
+ */
+ if((main_format == AUDIO_FORMAT_AC3) ||
+ (main_format == AUDIO_FORMAT_EAC3)) {
+ /*TODO: who wil setCOMPRESSED_CONVERT_AC3_ASSOC */
+ if (out->spdif_format == COMPRESSED_CONVERT_AC3_ASSOC)
+ formatMS11 = FORMAT_DOLBY_DIGITAL_PLUS_MAIN_ASSOC;
+ else
+ formatMS11 = FORMAT_DOLBY_DIGITAL_PLUS_MAIN;
+ } else
+ formatMS11 = FORMAT_DOLBY_PULSE_MAIN;
+ /*
+ set the use case to the MS11 decoder and open the stream for decoding
+ */
+ if(ms11_set_usecase_and_open_stream_with_mode(out->ms11_decoder,
+ formatMS11, out->channels, out->sample_rate,
+ out->is_m11_file_mode)) {
+ ALOGE("SetUseCaseAndOpen MS11 failed");
+ free_soft_ms11(out->ms11_decoder);
+ return EINVAL;
+ }
+ if(is_decoder_config_required(out) && out->dec_conf_buf && out->dec_conf_bufLength) {
+ if(ms11_set_aac_config(out->ms11_decoder, (unsigned char *)out->dec_conf_buf,
+ out->dec_conf_bufLength) == true) {
+ out->dec_conf_set = true;
+ }
+ }
+
+ return 0;
+}
+#endif
+/*******************************************************************************
+Description: copy input to internal buffer
+*******************************************************************************/
+void copy_bitstream_internal_buffer(struct audio_bitstream_sm *bitstrm,
+ char *buffer, size_t bytes)
+{
+ // copy bitstream to internal buffer
+ audio_bitstream_copy_to_internal_buffer(bitstrm, (char *)buffer, bytes);
+#ifdef DEBUG
+ dumpInputOutput(INPUT, buffer, bytes, 0);
+#endif
+}
+
+/*******************************************************************************
+Description: set decoder config
+*******************************************************************************/
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+int setDecodeConfig(struct stream_out *out, char *buffer, size_t bytes)
+{
+ ALOGV("%s ", __func__);
+
+ int main_format = out->format & AUDIO_FORMAT_MAIN_MASK;
+ if(!out->dec_conf_set) {
+ if(main_format == AUDIO_FORMAT_AAC ||
+ main_format == AUDIO_FORMAT_HE_AAC_V1 ||
+ main_format == AUDIO_FORMAT_AAC_ADIF ||
+ main_format == AUDIO_FORMAT_HE_AAC_V2) {
+ if(out->ms11_decoder != NULL) {
+ if(ms11_set_aac_config(out->ms11_decoder,(unsigned char *)buffer,
+ bytes) == false) {
+ ALOGE("AAC decoder config fail");
+ return 0;
+ }
+ }
+ }
+
+ out->dec_conf_bufLength = bytes;
+ if(out->dec_conf_buf)
+ free(out->dec_conf_buf);
+
+ out->dec_conf_buf = malloc(out->dec_conf_bufLength);
+ memcpy(out->dec_conf_buf,
+ buffer,
+ out->dec_conf_bufLength);
+ out->dec_conf_set = true;
+ }
+ out->dec_conf_set = true;
+ return bytes;
+}
+#endif
+
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+int validate_sw_free_space(struct stream_out* out, int bytes_consumed_in_decode, int *pcm_2ch_len,
+ int *pcm_mch_len, int *passthru_len, int *transcode_len, bool *wait_for_write_done) {
+
+ struct alsa_handle *handle = NULL;
+ char *bufPtr;
+ int copy_output_buffer_size;
+
+ *pcm_2ch_len = *pcm_mch_len = *passthru_len = *transcode_len = *wait_for_write_done = 0;
+
+ if(out->decoder_type & SW_DECODE) {
+ bufPtr = audio_bitstream_get_output_buffer_write_ptr(out->bitstrm,
+ PCM_2CH_OUT);
+ /*TODO: there is chance of illegale access if ms11 output exceeds bitstream
+ output buffer boudary */
+ copy_output_buffer_size = ms11_copy_output_from_ms11buf(out->ms11_decoder,
+ PCM_2CH_OUT,
+ bufPtr);
+ handle = get_handle_by_route_format(out, ROUTE_UNCOMPRESSED);
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ return -EINVAL;
+ }
+ if(get_compress_available_space(handle) < copy_output_buffer_size) {
+ handle->cmd_pending = true;
+ *wait_for_write_done = true;
+ }
+ *pcm_2ch_len = copy_output_buffer_size;
+
+ }
+ if(out->decoder_type & SW_DECODE_MCH) {
+ bufPtr=audio_bitstream_get_output_buffer_write_ptr(out->bitstrm,
+ PCM_MCH_OUT);
+ copy_output_buffer_size = ms11_copy_output_from_ms11buf(out->ms11_decoder,
+ PCM_MCH_OUT,
+ bufPtr);
+ handle = get_handle_by_route_format(out, ROUTE_UNCOMPRESSED_MCH);
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ return -EINVAL;
+ }
+
+ if(get_compress_available_space(handle) < copy_output_buffer_size) {
+ handle->cmd_pending = true;
+ *wait_for_write_done = true;
+ }
+ *pcm_mch_len = copy_output_buffer_size;
+ }
+ if(out->decoder_type & SW_PASSTHROUGH) {
+ bufPtr = audio_bitstream_get_output_buffer_write_ptr(out->bitstrm, COMPRESSED_OUT);
+ copy_output_buffer_size = bytes_consumed_in_decode;
+ memcpy(bufPtr, audio_bitstream_get_input_buffer_ptr(out->bitstrm), copy_output_buffer_size);
+
+ handle = get_handle_by_route_format(out, ROUTE_COMPRESSED);
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ return -EINVAL;
+ }
+
+ if(get_compress_available_space(handle) < copy_output_buffer_size) {
+ handle->cmd_pending = true;
+ *wait_for_write_done = true;
+ }
+ *passthru_len = copy_output_buffer_size;
+ }
+ if(out->decoder_type & SW_TRANSCODE) {
+ bufPtr = audio_bitstream_get_output_buffer_write_ptr(out->bitstrm,
+ TRANSCODE_OUT);
+ copy_output_buffer_size = ms11_copy_output_from_ms11buf(out->bitstrm,
+ COMPRESSED_OUT,
+ bufPtr);
+ handle = get_handle_by_route_format(out, ROUTE_SW_TRANSCODED_COMPRESSED);
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ return -EINVAL;
+ }
+ if(get_compress_available_space(handle) < copy_output_buffer_size) {
+ handle->cmd_pending = true;
+ *wait_for_write_done = true;
+ }
+ *transcode_len = copy_output_buffer_size;
+ }
+ return 0;
+}
+#endif
+
+int validate_hw_free_space(struct stream_out *out, int bytes_consumed_in_decode, int *pcm_2ch_len,
+ int *pcm_mch_len, int *passthru_len, int *transcode_len, bool *wait_for_write_done) {
+
+ struct alsa_handle *handle = NULL;
+ char *bufPtr;
+ int copy_output_buffer_size;
+ *pcm_2ch_len = *pcm_mch_len = *passthru_len = *transcode_len = *wait_for_write_done = 0;
+ if(out->decoder_type & DSP_DECODE) {
+ ALOGVV("DSP_DECODE");
+ bufPtr = audio_bitstream_get_output_buffer_write_ptr(out->bitstrm,
+ PCM_MCH_OUT);
+ copy_output_buffer_size = bytes_consumed_in_decode;
+ memcpy(bufPtr, audio_bitstream_get_input_buffer_ptr(out->bitstrm),
+ copy_output_buffer_size);
+ ALOGVV("%s bytes_consumed %d out bufPtr %x, pcm_mch_out_buf_size%d",
+ __func__,bytes_consumed_in_decode,bufPtr,
+ out->bitstrm->pcm_mch_out_buf_size);
+ handle = get_handle_by_route_format(out, ROUTE_UNCOMPRESSED);/*TODO: revisit */
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ return -EINVAL;
+ }
+ if(get_compress_available_space(handle) < copy_output_buffer_size) {
+ handle->cmd_pending = true;
+ *wait_for_write_done = true;
+ /*reset input buffer pointer as flinger will resend the data back */
+ audio_bitstream_set_input_buffer_write_ptr(out->bitstrm,
+ -copy_output_buffer_size);
+ *pcm_mch_len = copy_output_buffer_size;
+ }
+ else
+ *pcm_mch_len = copy_output_buffer_size;
+ }
+ if(out->decoder_type & DSP_PASSTHROUGH) {
+ ALOGVV("DSP_PASSTHROUGH");
+ bufPtr = audio_bitstream_get_output_buffer_write_ptr(out->bitstrm, COMPRESSED_OUT);
+ copy_output_buffer_size = bytes_consumed_in_decode;
+ memcpy(bufPtr, audio_bitstream_get_input_buffer_ptr(out->bitstrm), copy_output_buffer_size);
+ handle = get_handle_by_route_format(out, ROUTE_COMPRESSED);
+ if(handle == NULL) {
+ ALOGE("%s Invalid handle", __func__);
+ return -EINVAL;
+ }
+ if(get_compress_available_space(handle) < copy_output_buffer_size) {
+ handle->cmd_pending = true;
+ *wait_for_write_done = true;
+ *passthru_len = copy_output_buffer_size;
+ /*reset input buffer pointer as flinger will resend the data back */
+ audio_bitstream_set_input_buffer_ptr(out->bitstrm, -copy_output_buffer_size);
+ }
+ else
+ *passthru_len = copy_output_buffer_size;
+ }
+ /*TODO: handle DSP Transcode usecase */
+ return 0;
+}
+
+int update_bitstrm_pointers(struct stream_out *out, int pcm_2ch_len,
+ int pcm_mch_len, int passthru_len, int transcode_len) {
+
+ if(out->decoder_type & SW_DECODE) {
+ audio_bitstream_set_output_buffer_write_ptr(out->bitstrm, PCM_2CH_OUT,
+ pcm_2ch_len);
+
+ }
+ if(out->decoder_type & SW_DECODE_MCH || out->decoder_type & DSP_DECODE) {
+ audio_bitstream_set_output_buffer_write_ptr(out->bitstrm, PCM_MCH_OUT, pcm_mch_len);
+ }
+ if(out->decoder_type & SW_PASSTHROUGH || out->decoder_type & DSP_PASSTHROUGH) {
+ audio_bitstream_set_output_buffer_write_ptr(out->bitstrm, COMPRESSED_OUT, passthru_len);
+ }
+ if(out->decoder_type & SW_TRANSCODE) {
+ audio_bitstream_set_output_buffer_write_ptr(out->bitstrm,
+ TRANSCODE_OUT,
+ transcode_len);
+ }
+ return 0;
+}
+
+/*TODO correct it */
+static int configure_compr(struct stream_out *out,
+ struct alsa_handle *handle) {
+ handle->compr_config.codec = (struct snd_codec *)
+ calloc(1, sizeof(struct snd_codec));
+ handle->compr_config.codec->id =
+ get_snd_codec_id(out->format); /*TODO: correct this based on format*/
+ handle->compr_config.fragment_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ handle->compr_config.fragments = COMPRESS_OFFLOAD_NUM_FRAGMENTS;
+ handle->compr_config.codec->sample_rate =
+ compress_get_alsa_rate(out->sample_rate);
+ handle->compr_config.codec->bit_rate = out->compr_config.codec->bit_rate;
+ handle->compr_config.codec->ch_in =
+ popcount(out->channel_mask);
+ handle->compr_config.codec->ch_out = handle->compr_config.codec->ch_in;
+ handle->compr_config.codec->format = out->compr_config.codec->format;
+ memcpy(&handle->compr_config.codec->options,
+ &out->compr_config.codec->options,
+ sizeof(union snd_codec_options));
+ return 0;
+}
+
+/*TODO: do we need to apply volume at the session open*/
+static int set_compress_volume(struct alsa_handle *handle, int left, int right)
+{
+
+ struct audio_device *adev = handle->out->dev;
+ struct mixer_ctl *ctl;
+ int volume[2];
+
+ char mixer_ctl_name[44]; // max length of name is 44 as defined
+ char device_id[STRING_LENGTH_OF_INTEGER+1];
+
+ memset(mixer_ctl_name, 0, sizeof(mixer_ctl_name));
+ strlcpy(mixer_ctl_name, "Compress Playback Volume", sizeof(mixer_ctl_name));
+
+ memset(device_id, 0, sizeof(device_id));
+ snprintf(device_id, "%d", handle->device_id, sizeof(device_id));
+
+ strlcat(mixer_ctl_name, device_id, sizeof(mixer_ctl_name));
+
+ ctl = mixer_get_ctl_by_name(adev->mixer, mixer_ctl_name);
+ if (!ctl) {
+ ALOGE("%s: Could not get ctl for mixer cmd - %s",
+ __func__, mixer_ctl_name);
+ return -EINVAL;
+ }
+ volume[0] = (int)(left * COMPRESS_PLAYBACK_VOLUME_MAX);
+ volume[1] = (int)(right * COMPRESS_PLAYBACK_VOLUME_MAX);
+ mixer_ctl_set_array(ctl, volume, sizeof(volume)/sizeof(volume[0]));
+
+ return 0;
+
+}
+
+/*******************************************************************************
+Description: software decode handling
+*******************************************************************************/
+//TODO: enable sw_decode if required
+#if USE_SWDECODE
+static int sw_decode(struct stream_out *out,
+ char *buffer,
+ size_t bytes,
+ size_t *bytes_consumed,
+ bool *continueDecode)
+{
+ /* bytes pending to be decoded in current buffer*/
+ bool wait_for_write_done = false;
+ int bytes_pending_for_decode = 0;
+ /* bytes consumed in current write buffer */
+ int total_bytes_consumed = 0;
+ size_t copyBytesMS11 = 0;
+ size_t bytes_consumed_in_decode = 0;
+ size_t copy_output_buffer_size = 0;
+ uint32_t outSampleRate = out->sample_rate;
+ uint32_t outChannels = out->channels;
+ char * bufPtr;
+ int pcm_2ch_len, pcm_mch_len, passthru_len, transcode_len;
+ struct alsa_handle *handle = NULL;
+
+ ALOGVV("sw Decode");
+ // eos handling
+ if(bytes == 0) {
+ if(out->format == AUDIO_FORMAT_AAC_ADIF)
+ audio_bitstream_append_silence_internal_buffer(out->bitstrm,
+ out->min_bytes_req_to_dec,0x0);
+ else
+ return false;
+ }
+ /*
+ check for sync word, if present then configure MS11 for fileplayback mode
+ OFF. This is specifically done to handle Widevine usecase, in which the
+ ADTS HEADER is not stripped off by the Widevine parser
+ */
+ if(out->first_bitstrm_buf == true) {
+ uint16_t uData = (*((char *)buffer) << 8) + *((char *)buffer + 1) ;
+ if(ADTS_HEADER_SYNC_RESULT == (uData & ADTS_HEADER_SYNC_MASK)) {
+ ALOGD("Sync word found hence configure MS11 in file_playback Mode OFF");
+ free_soft_ms11(out->ms11_decoder);
+ out->is_m11_file_mode = false;
+ open_ms11_instance(out);
+ }
+ out->first_bitstrm_buf = false;
+ }
+ //decode
+ if(out->decoder_type == SW_PASSTHROUGH) {
+ /*TODO: check if correct */
+ bytes_consumed_in_decode = audio_bitstream_get_size(out->bitstrm);
+ } else {
+ if(audio_bitstream_sufficient_buffer_to_decode(out->bitstrm,
+ out->min_bytes_req_to_dec) == true) {
+ bufPtr = audio_bitstream_get_input_buffer_ptr(out->bitstrm);
+ copyBytesMS11 = audio_bitstream_get_size(out->bitstrm);
+ ms11_copy_bitstream_to_ms11_inpbuf(out->ms11_decoder, bufPtr,copyBytesMS11);
+ bytes_consumed_in_decode = ms11_stream_decode(out->ms11_decoder,
+ &outSampleRate, &outChannels);
+ }
+ }
+
+ if((out->sample_rate != outSampleRate) || (out->channels != outChannels)) {
+ ALOGD("Change in sample rate. New sample rate: %d", outSampleRate);
+ out->sample_rate = outSampleRate;
+ out->channels = outChannels;
+ handle = get_handle_by_route_format(out, ROUTE_UNCOMPRESSED);
+ if(handle !=NULL) {
+ configure_compr(out, handle);
+ handle->compr = compress_open(SOUND_CARD, handle->device_id,
+ COMPRESS_IN, &handle->compr_config);
+ if (handle->compr && !is_compress_ready(handle->compr)) {
+ ALOGE("%s: %s", __func__, compress_get_error(handle->compr));
+ compress_close(handle->compr);
+ handle->compr = NULL;
+ }
+ if (out->offload_callback)
+ compress_nonblock(handle->compr, out->non_blocking);
+
+ set_compress_volume(handle, out->left_volume, out->right_volume);
+ }
+
+ handle = get_handle_by_route_format(out, ROUTE_UNCOMPRESSED_MCH);
+ if(handle !=NULL) {
+ configure_compr(out, handle);
+ handle->compr = compress_open(SOUND_CARD, handle->device_id,
+ COMPRESS_IN, &handle->compr_config);
+ if (handle->compr && !is_compress_ready(handle->compr)) {
+ ALOGE("%s: %s", __func__, compress_get_error(handle->compr));
+ compress_close(handle->compr);
+ handle->compr = NULL;
+ }
+ if (out->offload_callback)
+ compress_nonblock(handle->compr, out->non_blocking);
+ set_compress_volume(handle, out->left_volume, out->right_volume);
+ out->channel_status_set = false;
+ }
+ }
+
+
+ validate_sw_free_space(out, bytes_consumed_in_decode, &pcm_2ch_len, &pcm_mch_len,
+ &passthru_len, &transcode_len, &wait_for_write_done);
+
+ if(wait_for_write_done && out->non_blocking) {
+ send_offload_cmd_l(out, OFFLOAD_CMD_WAIT_FOR_BUFFER);
+ *continueDecode = false;
+ *bytes_consumed = 0;
+ return 0;
+ } else {
+ update_bitstrm_pointers(out, pcm_2ch_len, pcm_mch_len,
+ passthru_len, transcode_len);
+ audio_bitstream_copy_residue_to_start(out->bitstrm, bytes_consumed_in_decode);
+ *bytes_consumed = bytes_consumed_in_decode;
+ }
+
+ copy_output_buffer_size = pcm_2ch_len + pcm_mch_len + passthru_len + transcode_len;
+ if(copy_output_buffer_size &&
+ audio_bitstream_sufficient_buffer_to_decode(out->bitstrm, out->min_bytes_req_to_dec) == true) {
+ *continueDecode = true;
+ return 0;
+ }
+ return 0;
+}
+#endif
+
+/*******************************************************************************
+Description: dsp decode handling
+*******************************************************************************/
+static bool dsp_decode(struct stream_out *out, char *buffer, size_t bytes,
+ size_t *bytes_consumed, bool *continueDecode)
+{
+ char *bufPtr;
+ size_t bytes_consumed_in_decode = 0;
+
+ bool wait_for_write_done = false;
+ int pcm_2ch_len, pcm_mch_len, passthru_len, transcode_len;
+
+ ALOGVV("dsp_decode");
+ // decode
+ {
+ bytes_consumed_in_decode = audio_bitstream_get_size(out->bitstrm);
+ }
+ // handle change in sample rate
+ {
+ }
+ //TODO: check if the copy of the buffers can be avoided
+ /* can be removed as its not required for dsp decode usecase */
+ *continueDecode = false;
+ validate_hw_free_space(out, bytes_consumed_in_decode, &pcm_2ch_len, &pcm_mch_len,
+ &passthru_len, &transcode_len, &wait_for_write_done);
+
+ if(wait_for_write_done && out->non_blocking) {
+ send_offload_cmd_l(out, OFFLOAD_CMD_WAIT_FOR_BUFFER);
+ *bytes_consumed = 0;
+ return 0;
+ } else {
+ update_bitstrm_pointers(out, pcm_2ch_len, pcm_mch_len,
+ passthru_len, transcode_len);
+ audio_bitstream_copy_residue_to_start(out->bitstrm, bytes_consumed_in_decode);
+ *bytes_consumed = bytes_consumed_in_decode;
+ ALOGV("%s bytes_consumed_in_decode =%d",__func__,bytes_consumed_in_decode);
+ }
+
+ return 0;
+}
+
+static bool decode(struct stream_out *out, char * buffer, size_t bytes,
+ size_t *bytes_consumed, bool *continuedecode)
+{
+ ALOGV("decode");
+ bool continueDecode = false;
+ int ret = 0;
+
+ // TODO: enable software decode if required
+ /*if (out->use_ms11_decoder) {
+ ret = sw_decode(out, buffer, bytes,
+ bytes_consumed, continuedecode);
+
+ // set channel status
+ // Set the channel status after first frame decode/transcode
+ //TODO: set the SPDIF channel status bits
+ if(out->channel_status_set == false)
+ setSpdifchannel_status(
+ audio_bitstream_get_output_buffer_ptr(out->bitstrm, COMPRESSED_OUT),
+ bytes, AUDIO_PARSER_CODEC_AC3);
+
+ } else */{
+ ret = dsp_decode(out, buffer, bytes,
+ bytes_consumed, continuedecode);
+ // set channel status
+ // Set the channel status after first frame decode/transcode
+ //TODO: set the SPDIF channel status bits
+/* if(out->channel_status_set == false)
+ setSpdifchannel_status(
+ audio_bitstream_get_output_buffer_ptr(out->bitstrm, COMPRESSED_OUT),
+ bytes, AUDIO_PARSER_CODEC_DTS);
+*/
+ }
+ return ret;
+}
+
+/*******************************************************************************
+Description: fixup sample rate and channel info based on format
+*******************************************************************************/
+void fixupSampleRateChannelModeMS11Formats(struct stream_out *out)
+{
+ ALOGV("fixupSampleRateChannelModeMS11Formats");
+ int main_format = out->format & AUDIO_FORMAT_MAIN_MASK;
+ int subFormat = out->format & AUDIO_FORMAT_SUB_MASK;
+/*
+NOTE: For AAC, the output of MS11 is 48000 for the sample rates greater than
+ 24000. The samples rates <= 24000 will be at their native sample rate
+ For AC3, the PCM output is at its native sample rate if the decoding is
+ single decode usecase for MS11.
+*/
+ if(main_format == AUDIO_FORMAT_AAC ||
+ main_format == AUDIO_FORMAT_HE_AAC_V1 ||
+ main_format == AUDIO_FORMAT_HE_AAC_V2 ||
+ main_format == AUDIO_FORMAT_AAC_ADIF) {
+ out->sample_rate = out->sample_rate > 24000 ? 48000 : out->sample_rate;
+ out->channels = 6;
+ } else if (main_format == AUDIO_FORMAT_AC3 ||
+ main_format == AUDIO_FORMAT_EAC3) {
+ /* transcode AC3/EAC3 44.1K to 48K AC3 for non dual-mono clips */
+ if (out->sample_rate == 44100 &&
+ (subFormat != AUDIO_FORMAT_DOLBY_SUB_DM) &&
+ (out->spdif_format == COMPRESSED ||
+ out->spdif_format == AUTO_DEVICE_FORMAT ||
+ out->spdif_format == COMPRESSED_CONVERT_EAC3_AC3) &&
+ (out->hdmi_format == UNCOMPRESSED ||
+ out->hdmi_format == UNCOMPRESSED_MCH)) {
+ out->sample_rate = 48000;
+ out->spdif_format = COMPRESSED_CONVERT_AC3_ASSOC;
+ } else if (out->sample_rate == 44100) {
+ out->spdif_format = UNCOMPRESSED;
+ }
+ out->channels = 6;
+ }
+ ALOGD("ms11 format fixup: out->spdif_format %d, out->hdmi_format %d",
+ out->spdif_format, out->hdmi_format);
+}
static bool is_supported_format(audio_format_t format)
{
- if (format == AUDIO_FORMAT_MP3 ||
- format == AUDIO_FORMAT_AAC)
+ switch (format) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_MP3:
+ case AUDIO_FORMAT_AAC:
+ case AUDIO_FORMAT_WMA:
+ case AUDIO_FORMAT_WMA_PRO:
+ case AUDIO_FORMAT_MP2:
return true;
+ default:
+ ALOGE("%s: Unsupported audio format: %x", __func__, format);
+ break;
+ }
return false;
}
@@ -116,14 +1350,26 @@
int id = 0;
switch (format) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ id = SND_AUDIOCODEC_PCM;
+ break;
case AUDIO_FORMAT_MP3:
id = SND_AUDIOCODEC_MP3;
break;
case AUDIO_FORMAT_AAC:
id = SND_AUDIOCODEC_AAC;
break;
+ case AUDIO_FORMAT_WMA:
+ id = SND_AUDIOCODEC_WMA;
+ break;
+ case AUDIO_FORMAT_WMA_PRO:
+ id = SND_AUDIOCODEC_WMA_PRO;
+ break;
+ case AUDIO_FORMAT_MP2:
+ id = SND_AUDIOCODEC_MP2;
+ break;
default:
- ALOGE("%s: Unsupported audio format", __func__);
+ ALOGE("%s: Unsupported audio format %x", __func__, format);
}
return id;
@@ -173,21 +1419,33 @@
/* must be called iwth out->lock locked */
static void stop_compressed_output_l(struct stream_out *out)
{
+ struct listnode *node;
+ struct alsa_handle *handle;
+ bool is_compr_out = false;
+
+ ALOGV("%s", __func__);
out->offload_state = OFFLOAD_STATE_IDLE;
out->playback_started = 0;
out->send_new_metadata = 1;
- if (out->compr != NULL) {
- compress_stop(out->compr);
- while (out->offload_thread_blocked) {
- pthread_cond_wait(&out->cond, &out->lock);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->compr != NULL) {
+ compress_stop(handle->compr);
+ is_compr_out = true;
}
}
+ if (is_compr_out) {
+ while (out->offload_thread_blocked)
+ pthread_cond_wait(&out->cond, &out->lock);
+ }
}
static void *offload_thread_loop(void *context)
{
struct stream_out *out = (struct stream_out *) context;
struct listnode *item;
+ struct listnode *node;
+ struct alsa_handle *handle;
out->offload_state = OFFLOAD_STATE_IDLE;
out->playback_started = 0;
@@ -217,15 +1475,15 @@
cmd = node_to_item(item, struct offload_cmd, node);
list_remove(item);
- ALOGVV("%s STATE %d CMD %d out->compr %p",
- __func__, out->offload_state, cmd->cmd, out->compr);
+ ALOGVV("%s STATE %d CMD %d",
+ __func__, out->offload_state, cmd->cmd);
if (cmd->cmd == OFFLOAD_CMD_EXIT) {
free(cmd);
break;
}
- if (out->compr == NULL) {
+ if (list_empty(&out->session_list)) {
ALOGE("%s: Compress handle is NULL", __func__);
pthread_cond_signal(&out->cond);
continue;
@@ -235,18 +1493,34 @@
send_callback = false;
switch(cmd->cmd) {
case OFFLOAD_CMD_WAIT_FOR_BUFFER:
- compress_wait(out->compr, -1);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->compr && handle->cmd_pending) {
+ compress_wait(handle->compr, -1);
+ handle->cmd_pending = false;
+ }
+ }
send_callback = true;
event = STREAM_CBK_EVENT_WRITE_READY;
break;
case OFFLOAD_CMD_PARTIAL_DRAIN:
- compress_next_track(out->compr);
- compress_partial_drain(out->compr);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->compr) {
+ compress_next_track(handle->compr);
+ compress_partial_drain(handle->compr);
+ }
+ }
send_callback = true;
event = STREAM_CBK_EVENT_DRAIN_READY;
break;
case OFFLOAD_CMD_DRAIN:
- compress_drain(out->compr);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->compr) {
+ compress_drain(handle->compr);
+ }
+ }
send_callback = true;
event = STREAM_CBK_EVENT_DRAIN_READY;
break;
@@ -373,18 +1647,18 @@
return 0;
}
-static int stop_output_stream(struct stream_out *out)
+static int stop_output_stream(struct stream_out *out, struct alsa_handle *handle)
{
int i, ret = 0;
struct audio_usecase *uc_info;
struct audio_device *adev = out->dev;
ALOGV("%s: enter: usecase(%d: %s)", __func__,
- out->usecase, use_case_table[out->usecase]);
- uc_info = get_usecase_from_list(adev, out->usecase);
+ handle->usecase, use_case_table[handle->usecase]);
+ uc_info = get_usecase_from_list(adev, handle->usecase);
if (uc_info == NULL) {
ALOGE("%s: Could not find the usecase (%d) in the list",
- __func__, out->usecase);
+ __func__, handle->usecase);
return -EINVAL;
}
@@ -409,63 +1683,66 @@
return ret;
}
-int start_output_stream(struct stream_out *out)
+int start_output_stream(struct stream_out *out, struct alsa_handle *handle)
{
int ret = 0;
struct audio_usecase *uc_info;
struct audio_device *adev = out->dev;
ALOGV("%s: enter: usecase(%d: %s) devices(%#x)",
- __func__, out->usecase, use_case_table[out->usecase], out->devices);
- out->pcm_device_id = platform_get_pcm_device_id(out->usecase, PCM_PLAYBACK);
- if (out->pcm_device_id < 0) {
+ __func__, handle->usecase, use_case_table[handle->usecase], handle->devices);
+ handle->device_id = platform_get_pcm_device_id(handle->usecase, PCM_PLAYBACK);
+ if (handle->device_id < 0) {
ALOGE("%s: Invalid PCM device id(%d) for the usecase(%d)",
- __func__, out->pcm_device_id, out->usecase);
+ __func__, handle->device_id, handle->usecase);
ret = -EINVAL;
goto error_config;
}
uc_info = (struct audio_usecase *)calloc(1, sizeof(struct audio_usecase));
- uc_info->id = out->usecase;
+ uc_info->id = handle->usecase;
+ uc_info->handle = handle;
uc_info->type = PCM_PLAYBACK;
uc_info->stream.out = out;
- uc_info->devices = out->devices;
+ uc_info->devices = handle->devices;
uc_info->in_snd_device = SND_DEVICE_NONE;
uc_info->out_snd_device = SND_DEVICE_NONE;
/* This must be called before adding this usecase to the list */
- if (out->devices & AUDIO_DEVICE_OUT_AUX_DIGITAL)
- check_and_set_hdmi_channels(adev, out->config.channels);
+ //if (out->devices & AUDIO_DEVICE_OUT_AUX_DIGITAL)
+ // check_and_set_hdmi_channels(adev, out->config.channels);
list_add_tail(&adev->usecase_list, &uc_info->list);
- select_devices(adev, out->usecase);
+ select_devices(adev, handle->usecase);
ALOGV("%s: Opening PCM device card_id(%d) device_id(%d)",
- __func__, 0, out->pcm_device_id);
- if (out->usecase != USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- out->pcm = pcm_open(SOUND_CARD, out->pcm_device_id,
- PCM_OUT | PCM_MONOTONIC, &out->config);
- if (out->pcm && !pcm_is_ready(out->pcm)) {
- ALOGE("%s: %s", __func__, pcm_get_error(out->pcm));
- pcm_close(out->pcm);
- out->pcm = NULL;
+ __func__, 0, handle->device_id);
+ if (out->uc_strm_type != OFFLOAD_PLAYBACK_STREAM) {
+ handle->compr = NULL;
+ handle->pcm = pcm_open(SOUND_CARD, handle->device_id,
+ PCM_OUT | PCM_MONOTONIC, &handle->config);
+ if (handle->pcm && !pcm_is_ready(handle->pcm)) {
+ ALOGE("%s: %s", __func__, pcm_get_error(handle->pcm));
+ pcm_close(handle->pcm);
+ handle->pcm = NULL;
ret = -EIO;
goto error_open;
}
} else {
- out->pcm = NULL;
- out->compr = compress_open(SOUND_CARD, out->pcm_device_id,
- COMPRESS_IN, &out->compr_config);
- if (out->compr && !is_compress_ready(out->compr)) {
- ALOGE("%s: %s", __func__, compress_get_error(out->compr));
- compress_close(out->compr);
- out->compr = NULL;
+ handle->pcm = NULL;
+ configure_compr(out, handle);
+ handle->compr = compress_open(SOUND_CARD, handle->device_id,
+ COMPRESS_IN, &handle->compr_config);
+ if (handle->compr && !is_compress_ready(handle->compr)) {
+ ALOGE("%s: %s", __func__, compress_get_error(handle->compr));
+ compress_close(handle->compr);
+ handle->compr = NULL;
ret = -EIO;
goto error_open;
}
if (out->offload_callback)
- compress_nonblock(out->compr, out->non_blocking);
+ compress_nonblock(handle->compr, out->non_blocking);
if (adev->visualizer_start_output != NULL)
adev->visualizer_start_output(out->handle);
@@ -473,7 +1750,7 @@
ALOGV("%s: exit", __func__);
return 0;
error_open:
- stop_output_stream(out);
+ stop_output_stream(out, handle);
error_config:
return ret;
}
@@ -494,10 +1771,12 @@
{
struct stream_out *out = (struct stream_out *)stream;
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD)
+ /*if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD)
return out->compr_config.fragment_size;
+ */
+ return (size_t)out->buffer_size;
- return out->config.period_size * audio_stream_frame_size(stream);
+ //return out->config.period_size * audio_stream_frame_size(stream);
}
static uint32_t out_get_channels(const struct audio_stream *stream)
@@ -523,6 +1802,8 @@
{
struct stream_out *out = (struct stream_out *)stream;
struct audio_device *adev = out->dev;
+ struct listnode *node;
+ struct alsa_handle *handle;
ALOGV("%s: enter: usecase(%d: %s)", __func__,
out->usecase, use_case_table[out->usecase]);
@@ -538,21 +1819,21 @@
pthread_mutex_lock(&adev->lock);
if (!out->standby) {
out->standby = true;
- if (out->usecase != USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- if (out->pcm) {
- pcm_close(out->pcm);
- out->pcm = NULL;
+ stop_compressed_output_l(out);
+ out->gapless_mdata.encoder_delay = 0;
+ out->gapless_mdata.encoder_padding = 0;
+
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->compr != NULL) {
+ compress_close(handle->compr);
+ handle->compr = NULL;
+ } else if (handle->pcm) {
+ pcm_close(handle->pcm);
+ handle->pcm = NULL;
}
- } else {
- stop_compressed_output_l(out);
- out->gapless_mdata.encoder_delay = 0;
- out->gapless_mdata.encoder_padding = 0;
- if (out->compr != NULL) {
- compress_close(out->compr);
- out->compr = NULL;
- }
+ stop_output_stream(out, handle);
}
- stop_output_stream(out);
}
pthread_mutex_unlock(&adev->lock);
pthread_mutex_unlock(&out->lock);
@@ -570,6 +1851,7 @@
int ret = 0;
char value[32];
struct compr_gapless_mdata tmp_mdata;
+ bool gapless_meta_set = true;
if (!out || !parms) {
return -EINVAL;
@@ -579,21 +1861,61 @@
if (ret >= 0) {
tmp_mdata.encoder_delay = atoi(value); //whats a good limit check?
} else {
- return -EINVAL;
+ gapless_meta_set = false;
}
ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES, value, sizeof(value));
if (ret >= 0) {
tmp_mdata.encoder_padding = atoi(value);
} else {
- return -EINVAL;
+ gapless_meta_set = false;
}
- out->gapless_mdata = tmp_mdata;
- out->send_new_metadata = 1;
- ALOGV("%s new encoder delay %u and padding %u", __func__,
- out->gapless_mdata.encoder_delay, out->gapless_mdata.encoder_padding);
+ if (gapless_meta_set) {
+ out->gapless_mdata = tmp_mdata;
+ out->send_new_metadata = 1;
+ ALOGV("%s new encoder delay %u and padding %u", __func__,
+ out->gapless_mdata.encoder_delay, out->gapless_mdata.encoder_padding);
+ }
+ if(out->format == AUDIO_FORMAT_WMA || out->format == AUDIO_FORMAT_WMA_PRO) {
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_FORMAT_TAG, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->format = atoi(value);
+ }
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_BLOCK_ALIGN, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->options.wma.super_block_align = atoi(value);
+ }
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_BIT_PER_SAMPLE, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->options.wma.bits_per_sample = atoi(value);
+ }
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_CHANNEL_MASK, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->options.wma.channelmask = atoi(value);
+ }
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_ENCODE_OPTION, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->options.wma.encodeopt = atoi(value);
+ }
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_ENCODE_OPTION1, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->options.wma.encodeopt1 = atoi(value);
+ }
+ ret = str_parms_get_str(parms, AUDIO_OFFLOAD_CODEC_WMA_ENCODE_OPTION2, value, sizeof(value));
+ if (ret >= 0) {
+ out->compr_config.codec->options.wma.encodeopt2 = atoi(value);
+ }
+ ALOGV("WMA params: fmt %x, balgn %x, sr %d, chmsk %x, encop %x, op1 %x, op2 %x",
+ out->compr_config.codec->format,
+ out->compr_config.codec->options.wma.super_block_align,
+ out->compr_config.codec->options.wma.bits_per_sample,
+ out->compr_config.codec->options.wma.channelmask,
+ out->compr_config.codec->options.wma.encodeopt,
+ out->compr_config.codec->options.wma.encodeopt1,
+ out->compr_config.codec->options.wma.encodeopt2);
+ }
return 0;
}
@@ -608,8 +1930,7 @@
int ret, val = 0;
bool select_new_device = false;
- ALOGD("%s: enter: usecase(%d: %s) kvpairs: %s",
- __func__, out->usecase, use_case_table[out->usecase], kvpairs);
+ ALOGD("%s: enter: kvpairs: %s", __func__, kvpairs);
parms = str_parms_create_str(kvpairs);
ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
if (ret >= 0) {
@@ -653,13 +1974,64 @@
if (!out->standby)
select_devices(adev, out->usecase);
}
+//TODO:
+//Get the device and device format mapping from the RoutingManager.
+//Decide which streams need to be derouted and which need to opened/closed
+//Update the respective device in each of the handles
+#if 0
+ if (out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM) {
+
+ /* TODO get format form routing manager */
+ update_decode_type_and_routing_states(out);
+
+ if(is_input_buffering_mode_reqd(out))
+ audio_bitstream_start_input_buffering_mode(out->bitstrm);
+ else
+ audio_bitstream_stop_input_buffering_mode(out->bitstrm);
+ /*
+ For the runtime format change, close the device first to avoid any
+ concurrent PCM + Compressed sessions on the same device.
+ */
+ close_handles_for_device_switch(out);
+ if(!out->mopen_dec_route)
+ handleCloseForDeviceSwitch(ROUTE_UNCOMPRESSED);
+
+ if(!out->mopen_dec_mch_route)
+ handleCloseForDeviceSwitch(ROUTE_UNCOMPRESSED_MCH);
+
+ if(!out->mopen_passt_route)
+ handleCloseForDeviceSwitch(ROUTE_COMPRESSED);
+
+ if(!msw_open_trans_route)
+ handleCloseForDeviceSwitch(ROUTE_SW_TRANSCODED_COMPRESSED);
+
+ if(!mhw_open_trans_route)
+ handleCloseForDeviceSwitch(ROUTE_DSP_TRANSCODED_COMPRESSED);
+
+ if(out->mopen_dec_route)
+ handleSwitchAndOpenForDeviceSwitch(mdec_format_devices,
+ ROUTE_UNCOMPRESSED);
+ if(out->mopen_dec_mch_route)
+ handleSwitchAndOpenForDeviceSwitch(mdec_mch_format_devices,
+ ROUTE_UNCOMPRESSED_MCH);
+ if(out->mopen_passt_route)
+ handleSwitchAndOpenForDeviceSwitch(mpasst_format_devices,
+ ROUTE_COMPRESSED);
+ if(out->msw_open_trans_route)
+ handleSwitchAndOpenForDeviceSwitch(msw_trans_format_devices,
+ ROUTE_SW_TRANSCODED_COMPRESSED);
+ if(out->mhw_open_trans_route)
+ handleSwitchAndOpenForDeviceSwitch(mhw_trans_format_devices,
+ ROUTE_DSP_TRANSCODED_COMPRESSED);
+ }
+#endif
pthread_mutex_unlock(&adev->lock);
pthread_mutex_unlock(&out->lock);
}
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- parse_compress_metadata(out, parms);
+ if (out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM) {
+ ret = parse_compress_metadata(out, parms);
}
str_parms_destroy(parms);
@@ -707,42 +2079,206 @@
static uint32_t out_get_latency(const struct audio_stream_out *stream)
{
struct stream_out *out = (struct stream_out *)stream;
+ struct listnode *item;
+ struct alsa_handle *handle;
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD)
+ //TODO: decide based on the clip properties
+ if (out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM)
return COMPRESS_OFFLOAD_PLAYBACK_LATENCY;
- return (out->config.period_count * out->config.period_size * 1000) /
- (out->config.rate);
+ item = list_head(&out->session_list);
+ handle = node_to_item(item, struct alsa_handle, list);
+ if(!handle) {
+ ALOGE("%s: error pcm handle NULL", __func__);
+ return -EINVAL;
+ }
+
+ return (handle->config.period_count * handle->config.period_size * 1000) /
+ (handle->config.rate);
}
static int out_set_volume(struct audio_stream_out *stream, float left,
float right)
{
struct stream_out *out = (struct stream_out *)stream;
- int volume[2];
+ struct listnode *node;
+ struct alsa_handle *handle;
+ struct audio_device *adev = out->dev;
+ int ret = -ENOSYS;
+ ALOGV("%s", __func__);
+ pthread_mutex_lock(&out->lock);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->pcm && (out->usecase == USECASE_AUDIO_PLAYBACK_MULTI_CH)){
+ /* only take left channel into account: the API is for stereo anyway */
+ out->muted = (left == 0.0f);
+ ret = 0;
+ } else if (handle->compr) {
- if (out->usecase == USECASE_AUDIO_PLAYBACK_MULTI_CH) {
- /* only take left channel into account: the API is for stereo anyway */
- out->muted = (left == 0.0f);
- return 0;
- } else if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- const char *mixer_ctl_name = "Compress Playback Volume";
- struct audio_device *adev = out->dev;
- struct mixer_ctl *ctl;
+ out->left_volume = left;
+ out->right_volume = right;
- ctl = mixer_get_ctl_by_name(adev->mixer, mixer_ctl_name);
- if (!ctl) {
- ALOGE("%s: Could not get ctl for mixer cmd - %s",
- __func__, mixer_ctl_name);
- return -EINVAL;
+ //ret = set_compress_volume(handle, left, right);
}
- volume[0] = (int)(left * COMPRESS_PLAYBACK_VOLUME_MAX);
- volume[1] = (int)(right * COMPRESS_PLAYBACK_VOLUME_MAX);
- mixer_ctl_set_array(ctl, volume, sizeof(volume)/sizeof(volume[0]));
- return 0;
+ }
+ pthread_mutex_unlock(&out->lock);
+
+ return ret;
+}
+
+static int write_data(struct stream_out *out, struct alsa_handle *handle,
+ const void *buffer, int bytes) {
+
+ int ret = 0;
+ if (out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM) {
+ ALOGV("%s: writing buffer (%d bytes) to compress device", __func__, bytes);
+
+ ret = compress_write(handle->compr, buffer, bytes);
+ ALOGV("%s: writing buffer (%d bytes) to compress device returned %d",
+ __func__, bytes, ret);
+ /* TODO:disnable this if ms12 */
+
+ if (ret >= 0 && ret < (ssize_t)bytes) {
+ send_offload_cmd_l(out, OFFLOAD_CMD_WAIT_FOR_BUFFER);
+ }
+ return ret;
+ } else {
+ if (handle->pcm) {
+ if (out->muted)
+ memset((void *)buffer, 0, bytes);
+ ALOGV("%s: writing buffer (%d bytes) to pcm device", __func__, bytes);
+ ret = pcm_write(handle->pcm, (void *)buffer, bytes);
+ }
}
- return -ENOSYS;
+ if (ret != 0) {
+ if ((handle && handle->pcm))
+ ALOGE("%s: error %d - %s", __func__, ret, pcm_get_error(handle->pcm));
+ out_standby(&out->stream.common);
+ usleep(bytes * 1000000 / audio_stream_frame_size(&out->stream.common) /
+ out_get_sample_rate(&out->stream.common));
+ }
+ return bytes;
+}
+
+/*******************************************************************************
+Description: render
+*******************************************************************************/
+size_t render_offload_data(struct stream_out *out, const void *buffer, size_t bytes)
+{
+ int ret =0;
+ uint32_t renderedPcmBytes = 0;
+ int fragment_size;
+ uint32_t availableSize;
+ int bytes_to_write = bytes;
+ int renderType;
+ /*int metadataLength = sizeof(out->output_meta_data);*/
+ struct listnode *node;
+ struct alsa_handle *handle;
+
+ ALOGV("%s", __func__);
+
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (out->send_new_metadata) {
+ ALOGVV("send new gapless metadata");
+ compress_set_gapless_metadata(handle->compr, &out->gapless_mdata);
+ }
+
+ switch(handle->route_format) {
+ case ROUTE_UNCOMPRESSED:
+ ALOGVV("ROUTE_UNCOMPRESSED");
+ renderType = PCM_2CH_OUT;
+ break;
+ case ROUTE_UNCOMPRESSED_MCH:
+ ALOGVV("ROUTE_UNCOMPRESSED_MCH");
+ renderType = PCM_MCH_OUT;
+ break;
+ case ROUTE_COMPRESSED:
+ ALOGVV("ROUTE_COMPRESSED");
+ renderType = COMPRESSED_OUT;
+ break;
+ case ROUTE_SW_TRANSCODED_COMPRESSED:
+ ALOGVV("ROUTE_SW_TRANSCODED_COMPRESSED");
+ renderType = TRANSCODE_OUT;
+ break;
+ case ROUTE_DSP_TRANSCODED_COMPRESSED:
+ ALOGVV("ROUTE_DSP_TRANSCODED_COMPRESSED");
+ continue;
+ default:
+ continue;
+ };
+
+ fragment_size = handle->compr_config.fragment_size;
+ /*TODO handle timestamp case */
+#if USE_SWDECODE
+ while(audio_bitstream_sufficient_sample_to_render(out->bitstrm,
+ renderType, 1) == true) {
+ availableSize = audio_bitstream_get_output_buffer_write_ptr(out->bitstrm, renderType) -
+ audio_bitstream_get_output_buffer_ptr(out->bitstrm, renderType);
+ buffer = audio_bitstream_get_output_buffer_ptr(out->bitstrm, renderType);
+ bytes_to_write = availableSize;
+
+ TODO: meta data is only neded for TS mode
+ out->output_meta_data.metadataLength = metadataLength;
+ out->output_meta_data.bufferLength = (availableSize >=
+ (fragment_size - metadataLength)) ?
+ fragment_size - metadataLength :
+ availableSize;
+ bytes_to_write = metadataLength +out->output_meta_data.bufferLength;
+ out->output_meta_data.timestamp = 0;
+ memcpy(out->write_temp_buf, &out->output_meta_data, metadataLength);
+ memcpy(out->write_temp_buf+metadataLength,
+ audio_bitstream_get_output_buffer_ptr(out->bitstrm, renderType),
+ out->output_meta_data.bufferLength);
+ ret = write_data(out, handle, out->write_temp_buf, bytes_to_write);
+#endif
+
+ ret = write_data(out, handle, buffer, bytes_to_write);
+ ALOGD("write_data returned with %d", ret);
+ if(ret < 0) {
+ ALOGE("write_data returned ret < 0");
+ return ret;
+ } else {
+ if (!out->playback_started) {
+ compress_start(handle->compr);
+ }
+ /*TODO:Do we need this
+ if(renderType == ROUTE_UNCOMPRESSED ||
+ (renderType == ROUTE_UNCOMPRESSED_MCH && !out->open_dec_route)) {
+ mFrameCount++;
+ renderedPcmBytes += out->output_meta_data.bufferLength;
+ }*/
+ renderedPcmBytes += ret;
+#if USE_SWDECODE
+ /*iTODO: enable for MS11
+ audio_bitstream_copy_residue_output_start(out->bitstrm, renderType,
+ bytes_to_write);
+ TODO:what if ret<bytes_to_write*/
+#endif
+ }
+#if USE_SWDECODE
+ }
+#endif
+ }
+ out->playback_started = 1;
+ out->offload_state = OFFLOAD_STATE_PLAYING;
+ out->send_new_metadata = 0;
+ return renderedPcmBytes;
+}
+
+size_t render_pcm_data(struct stream_out *out, const void *buffer, size_t bytes)
+{
+ ALOGV("%s", __func__);
+ size_t ret = 0;
+ struct listnode *node;
+ struct alsa_handle *handle;
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ ALOGV("%s handle is 0x%x", __func__,(uint32_t)handle);
+ ret = write_data(out, handle, buffer, bytes);
+ }
+ return ret;
}
static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
@@ -751,21 +2287,66 @@
struct stream_out *out = (struct stream_out *)stream;
struct audio_device *adev = out->dev;
ssize_t ret = 0;
+ struct listnode *node;
+ bool continueDecode;
+ struct alsa_handle *handle;
+ size_t bytes_consumed;
+ size_t total_bytes_consumed = 0;
+
+ ALOGV("%s bytes =%d", __func__, bytes);
pthread_mutex_lock(&out->lock);
+
+//TODO: handle a2dp
+/* if (mRouteAudioToA2dp &&
+ mA2dpUseCase == AudioHardwareALSA::USECASE_NONE) {
+ a2dpRenderingControl(A2DP_RENDER_SETUP);
+ }
+*/
+ /* TODO: meta data comes in set_parameter it will be passed in compre_open
+ for all format exxce ms11 format
+ and for ms11 it will be set sdecode fucntion while opneing ms11 instance
+ hence below piece of code is no required*/
+ /*
+ if(!out->dec_conf_set && is_decoder_config_required(out)) {
+ if (setDecodeConfig(out, (char *)buffer, bytes))
+ ALOGD("decoder configuration set");
+ }
+ */
+
if (out->standby) {
out->standby = false;
- pthread_mutex_lock(&adev->lock);
- ret = start_output_stream(out);
- pthread_mutex_unlock(&adev->lock);
- /* ToDo: If use case is compress offload should return 0 */
- if (ret != 0) {
- out->standby = true;
- goto exit;
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ pthread_mutex_lock(&adev->lock);
+ ret = start_output_stream(out, handle);
+ pthread_mutex_unlock(&adev->lock);
+ /* ToDo: If use case is compress offload should return 0 */
+ if (ret != 0) {
+ out->standby = true;
+ goto exit;
+ }
}
}
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
+ if (out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM) {
+#if USE_SWDECODE
+ //TODO: Enable for MS11
+ copy_bitstream_internal_buffer(out->bitstrm, (char *)buffer, bytes);
+ //DO check if timestamp mode handle partial buffer
+ do {
+
+ bytes_consumed = 0;
+ ret = decode(out, (char *)buffer, bytes, &bytes_consumed, &continueDecode);
+ if(ret < 0)
+ goto exit;
+ /*TODO: check for return size from write when ms11 is removed*/
+ render_offload_data(out, continueDecode);
+ total_bytes_consumed += bytes_consumed;
+
+ } while(continueDecode == true);
+#endif
+#if 0
ALOGVV("%s: writing buffer (%d bytes) to compress device", __func__, bytes);
if (out->send_new_metadata) {
ALOGVV("send new gapless metadata");
@@ -807,11 +2388,50 @@
out_get_sample_rate(&out->stream.common));
}
return bytes;
+#endif
+ ret = render_offload_data(out, buffer, bytes);
+ total_bytes_consumed = ret;
+ } else {
+ ret = render_pcm_data(out, buffer, bytes);
+ total_bytes_consumed = ret;
+ }
+
+exit:
+ pthread_mutex_unlock(&out->lock);
+ ALOGV("total_bytes_consumed %d",total_bytes_consumed);
+ return total_bytes_consumed;
}
static int out_get_render_position(const struct audio_stream_out *stream,
uint32_t *dsp_frames)
{
+ struct listnode *node;
+ struct alsa_handle *handle;
+ struct stream_out *out = (struct stream_out *)stream;
+ struct audio_device *adev = out->dev;
+ *dsp_frames = 0;
+ ALOGV("%s", __func__);
+ pthread_mutex_lock(&out->lock);
+ if ((out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM) && (dsp_frames != NULL)) {
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if ((handle && handle->compr &&
+ handle->route_format != ROUTE_DSP_TRANSCODED_COMPRESSED)){
+ compress_get_tstamp(handle->compr, (unsigned long *)dsp_frames,
+ &out->sample_rate);
+ ALOGV("%s rendered frames %d sample_rate %d",
+ __func__, *dsp_frames, out->sample_rate);
+ }
+ pthread_mutex_unlock(&out->lock);
+ return 0;
+ }
+ }
+ else {
+ pthread_mutex_unlock(&out->lock);
+ return -EINVAL;
+ }
+ return 0;
+#if 0
struct stream_out *out = (struct stream_out *)stream;
*dsp_frames = 0;
if ((out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) && (dsp_frames != NULL)) {
@@ -826,6 +2446,7 @@
return 0;
} else
return -EINVAL;
+#endif
}
static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
@@ -847,6 +2468,46 @@
static int out_get_presentation_position(const struct audio_stream_out *stream,
uint64_t *frames, struct timespec *timestamp)
{
+ struct listnode *node;
+ struct alsa_handle *handle;
+ struct stream_out *out = (struct stream_out *)stream;
+ struct audio_device *adev = out->dev;
+ *frames = 0;
+ ALOGV("%s", __func__);
+ pthread_mutex_lock(&out->lock);
+ if ((frames != NULL)) {
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if ((handle && handle->compr &&
+ handle->route_format != ROUTE_DSP_TRANSCODED_COMPRESSED)){
+ compress_get_tstamp(handle->compr, (unsigned long *)frames,
+ &out->sample_rate);
+ clock_gettime(CLOCK_MONOTONIC, timestamp);
+ ALOGV("%s rendered frames %d sample_rate %d",
+ __func__, *frames, out->sample_rate);
+ }
+ else if (handle->pcm) {
+ size_t avail;
+ if (pcm_get_htimestamp(handle->pcm, &avail, timestamp) == 0) {
+ size_t kernel_buffer_size = handle->config.period_size * handle->config.period_count;
+ int64_t signed_frames = out->written - kernel_buffer_size + avail;
+ // This adjustment accounts for buffering after app processor.
+ // It is based on estimated DSP latency per use case, rather than exact.
+ signed_frames -=
+ (platform_render_latency(handle->usecase) * out->sample_rate / 1000000LL);
+
+ // It would be unusual for this value to be negative, but check just in case ...
+ if (signed_frames >= 0) {
+ *frames = signed_frames;
+ }
+ }
+ }
+
+ }
+ }
+ pthread_mutex_unlock(&out->lock);
+ return -EINVAL;
+#if 0
struct stream_out *out = (struct stream_out *)stream;
int ret = -1;
unsigned long dsp_frames;
@@ -887,6 +2548,7 @@
pthread_mutex_unlock(&out->lock);
return ret;
+#endif
}
static int out_set_callback(struct audio_stream_out *stream,
@@ -904,64 +2566,84 @@
static int out_pause(struct audio_stream_out* stream)
{
+ struct listnode *node;
+ struct alsa_handle *handle;
struct stream_out *out = (struct stream_out *)stream;
int status = -ENOSYS;
ALOGV("%s", __func__);
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- pthread_mutex_lock(&out->lock);
- if (out->compr != NULL && out->offload_state == OFFLOAD_STATE_PLAYING) {
- status = compress_pause(out->compr);
+ pthread_mutex_lock(&out->lock);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ if (handle->compr != NULL && out->offload_state ==
+ OFFLOAD_STATE_PLAYING) {
+ status = compress_pause(handle->compr);
out->offload_state = OFFLOAD_STATE_PAUSED;
}
- pthread_mutex_unlock(&out->lock);
}
+ pthread_mutex_unlock(&out->lock);
return status;
}
static int out_resume(struct audio_stream_out* stream)
{
+ struct listnode *node;
+ struct alsa_handle *handle;
struct stream_out *out = (struct stream_out *)stream;
int status = -ENOSYS;
ALOGV("%s", __func__);
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- status = 0;
- pthread_mutex_lock(&out->lock);
- if (out->compr != NULL && out->offload_state == OFFLOAD_STATE_PAUSED) {
- status = compress_resume(out->compr);
- out->offload_state = OFFLOAD_STATE_PLAYING;
- }
- pthread_mutex_unlock(&out->lock);
+ pthread_mutex_lock(&out->lock);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ status = 0;
+ if (handle->compr != NULL && out->offload_state ==
+ OFFLOAD_STATE_PAUSED) {
+ status = compress_resume(handle->compr);
+ out->offload_state = OFFLOAD_STATE_PLAYING;
+ }
}
+ pthread_mutex_unlock(&out->lock);
return status;
}
static int out_drain(struct audio_stream_out* stream, audio_drain_type_t type )
{
+ struct listnode *node;
+ struct alsa_handle *handle;
struct stream_out *out = (struct stream_out *)stream;
int status = -ENOSYS;
ALOGV("%s", __func__);
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- pthread_mutex_lock(&out->lock);
- if (type == AUDIO_DRAIN_EARLY_NOTIFY)
- status = send_offload_cmd_l(out, OFFLOAD_CMD_PARTIAL_DRAIN);
- else
- status = send_offload_cmd_l(out, OFFLOAD_CMD_DRAIN);
- pthread_mutex_unlock(&out->lock);
+ pthread_mutex_lock(&out->lock);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ status = 0;
+ if (handle->compr != NULL) {
+ if (type == AUDIO_DRAIN_EARLY_NOTIFY)
+ status = send_offload_cmd_l(out, OFFLOAD_CMD_PARTIAL_DRAIN);
+ else
+ status = send_offload_cmd_l(out, OFFLOAD_CMD_DRAIN);
+ }
}
+ pthread_mutex_unlock(&out->lock);
return status;
}
static int out_flush(struct audio_stream_out* stream)
{
+ struct listnode *node;
+ struct alsa_handle *handle;
struct stream_out *out = (struct stream_out *)stream;
+ int status = -ENOSYS;
ALOGV("%s", __func__);
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
- pthread_mutex_lock(&out->lock);
- stop_compressed_output_l(out);
- pthread_mutex_unlock(&out->lock);
- return 0;
+ pthread_mutex_lock(&out->lock);
+ list_for_each(node, &out->session_list) {
+ handle = node_to_item(node, struct alsa_handle, list);
+ status = 0;
+ if (handle->compr != NULL) {
+ stop_compressed_output_l(out);
+ }
}
- return -ENOSYS;
+ pthread_mutex_unlock(&out->lock);
+ return status;
}
int adev_open_output_stream(struct audio_hw_device *dev,
@@ -973,7 +2655,9 @@
{
struct audio_device *adev = (struct audio_device *)dev;
struct stream_out *out;
- int i, ret;
+ struct alsa_handle *device_handle = NULL;
+ int i, ret, channels;
+ struct listnode *item;
ALOGV("%s: enter: sample_rate(%d) channel_mask(%#x) devices(%#x) flags(%#x)",
__func__, config->sample_rate, config->channel_mask, devices, flags);
@@ -982,7 +2666,9 @@
if (devices == AUDIO_DEVICE_NONE)
devices = AUDIO_DEVICE_OUT_SPEAKER;
+ list_init(&out->session_list);
+ reset_out_parameters(out);
out->flags = flags;
out->devices = devices;
out->dev = adev;
@@ -990,109 +2676,13 @@
out->sample_rate = config->sample_rate;
out->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
out->supported_channel_masks[0] = AUDIO_CHANNEL_OUT_STEREO;
+ out->config = config;
out->handle = handle;
+//*TODO: get hdmi/spdif format/channels from routing manager and intialize out->spdif_format & out->hdmi_format*/
/* Init use case and pcm_config */
- if (out->flags == AUDIO_OUTPUT_FLAG_DIRECT &&
- out->devices & AUDIO_DEVICE_OUT_AUX_DIGITAL) {
- pthread_mutex_lock(&adev->lock);
- ret = read_hdmi_channel_masks(out);
- pthread_mutex_unlock(&adev->lock);
- if (ret != 0)
- goto error_open;
-
- if (config->sample_rate == 0)
- config->sample_rate = DEFAULT_OUTPUT_SAMPLING_RATE;
- if (config->channel_mask == 0)
- config->channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
-
- out->channel_mask = config->channel_mask;
- out->sample_rate = config->sample_rate;
- out->usecase = USECASE_AUDIO_PLAYBACK_MULTI_CH;
- out->config = pcm_config_hdmi_multi;
- out->config.rate = config->sample_rate;
- out->config.channels = popcount(out->channel_mask);
- out->config.period_size = HDMI_MULTI_PERIOD_BYTES / (out->config.channels * 2);
- } else if (out->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- if (config->offload_info.version != AUDIO_INFO_INITIALIZER.version ||
- config->offload_info.size != AUDIO_INFO_INITIALIZER.size) {
- ALOGE("%s: Unsupported Offload information", __func__);
- ret = -EINVAL;
- goto error_open;
- }
- if (!is_supported_format(config->offload_info.format)) {
- ALOGE("%s: Unsupported audio format", __func__);
- ret = -EINVAL;
- goto error_open;
- }
-
- out->compr_config.codec = (struct snd_codec *)
- calloc(1, sizeof(struct snd_codec));
-
- out->usecase = USECASE_AUDIO_PLAYBACK_OFFLOAD;
- if (config->offload_info.channel_mask)
- out->channel_mask = config->offload_info.channel_mask;
- else if (config->channel_mask)
- out->channel_mask = config->channel_mask;
- out->format = config->offload_info.format;
- out->sample_rate = config->offload_info.sample_rate;
-
- out->stream.set_callback = out_set_callback;
- out->stream.pause = out_pause;
- out->stream.resume = out_resume;
- out->stream.drain = out_drain;
- out->stream.flush = out_flush;
-
- out->compr_config.codec->id =
- get_snd_codec_id(config->offload_info.format);
- out->compr_config.fragment_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
- out->compr_config.fragments = COMPRESS_OFFLOAD_NUM_FRAGMENTS;
- out->compr_config.codec->sample_rate =
- compress_get_alsa_rate(config->offload_info.sample_rate);
- out->compr_config.codec->bit_rate =
- config->offload_info.bit_rate;
- out->compr_config.codec->ch_in =
- popcount(config->channel_mask);
- out->compr_config.codec->ch_out = out->compr_config.codec->ch_in;
-
- if (flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING)
- out->non_blocking = 1;
-
- out->send_new_metadata = 1;
- create_offload_callback_thread(out);
- ALOGV("%s: offloaded output offload_info version %04x bit rate %d",
- __func__, config->offload_info.version,
- config->offload_info.bit_rate);
- } else if (out->flags & AUDIO_OUTPUT_FLAG_FAST) {
- out->usecase = USECASE_AUDIO_PLAYBACK_LOW_LATENCY;
- out->config = pcm_config_low_latency;
- out->sample_rate = out->config.rate;
- } else {
- out->usecase = USECASE_AUDIO_PLAYBACK_DEEP_BUFFER;
- out->config = pcm_config_deep_buffer;
- out->sample_rate = out->config.rate;
- }
-
- if (flags & AUDIO_OUTPUT_FLAG_PRIMARY) {
- if(adev->primary_output == NULL)
- adev->primary_output = out;
- else {
- ALOGE("%s: Primary output is already opened", __func__);
- ret = -EEXIST;
- goto error_open;
- }
- }
-
- /* Check if this usecase is already existing */
- pthread_mutex_lock(&adev->lock);
- if (get_usecase_from_list(adev, out->usecase) != NULL) {
- ALOGE("%s: Usecase (%d) is already present", __func__, out->usecase);
- pthread_mutex_unlock(&adev->lock);
- ret = -EEXIST;
- goto error_open;
- }
- pthread_mutex_unlock(&adev->lock);
-
+ out->hdmi_format = UNCOMPRESSED;
+ out->spdif_format = UNCOMPRESSED;
out->stream.common.get_sample_rate = out_get_sample_rate;
out->stream.common.set_sample_rate = out_set_sample_rate;
out->stream.common.get_buffer_size = out_get_buffer_size;
@@ -1119,6 +2709,138 @@
pthread_mutex_init(&out->lock, (const pthread_mutexattr_t *) NULL);
pthread_cond_init(&out->cond, (const pthread_condattr_t *) NULL);
+ if (out->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ ALOGE("%s: Usecase is OFFLOAD", __func__);
+ if (config->offload_info.version != AUDIO_INFO_INITIALIZER.version ||
+ config->offload_info.size != AUDIO_INFO_INITIALIZER.size) {
+ ALOGE("%s: Unsupported Offload information", __func__);
+ ret = -EINVAL;
+ goto error_open;
+ }
+
+ if (!is_supported_format(config->offload_info.format)) {
+ ALOGE("%s: Unsupported audio format", __func__);
+ ret = -EINVAL;
+ goto error_open;
+ }
+ out->compr_config.codec = (struct snd_codec *)
+ calloc(1, sizeof(struct snd_codec));
+ //Session/clip config.
+ out->format = config->offload_info.format;
+ out->sample_rate = config->offload_info.sample_rate;
+ out->compr_config.codec->id =
+ get_snd_codec_id(config->offload_info.format);
+ out->compr_config.fragment_size = COMPRESS_OFFLOAD_FRAGMENT_SIZE;
+ out->compr_config.fragments = COMPRESS_OFFLOAD_NUM_FRAGMENTS;
+ out->compr_config.codec->sample_rate =
+ compress_get_alsa_rate(config->offload_info.sample_rate);
+ out->compr_config.codec->bit_rate =
+ config->offload_info.bit_rate;
+ out->compr_config.codec->ch_in =
+ popcount(config->channel_mask);
+ out->compr_config.codec->ch_out = out->compr_config.codec->ch_in;
+
+ if (config->offload_info.channel_mask)
+ out->channel_mask = config->offload_info.channel_mask;
+ else if (config->channel_mask)
+ out->channel_mask = config->channel_mask;
+ out->uc_strm_type = OFFLOAD_PLAYBACK_STREAM;
+
+ //Initialize the handles
+ /* ------------------------------------------------------------------------
+ Update use decoder type and routing flags and corresponding states
+ decoderType will cache the decode types such as decode/passthrough/transcode
+ and in s/w or dsp. Besides, the states to open decode/passthrough/transcode
+ handles with the corresponding devices and device formats are updated
+ -------------------------------------------------------------------------*/
+ update_decode_type_and_routing_states(out);
+
+ /* ------------------------------------------------------------------------
+ Update rxHandle states
+ Based on the states, we open the driver and store the handle at appropriate
+ index
+ -------------------------------------------------------------------------*/
+ update_alsa_handle_state(out);
+
+ /* ------------------------------------------------------------------------
+ setup routing
+ -------------------------------------------------------------------------*/
+ ret = allocate_internal_buffers(out);
+ if(ret < 0) {
+ ALOGE("%s:Error %d",__func__, ret);
+ goto error_handle;
+ }
+
+ //Callbacks
+ out->stream.set_callback = out_set_callback;
+ out->stream.pause = out_pause;
+ out->stream.resume = out_resume;
+ out->stream.drain = out_drain;
+ out->stream.flush = out_flush;
+
+ if (flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING)
+ out->non_blocking = 1;
+
+ out->send_new_metadata = 1;
+ create_offload_callback_thread(out);
+ ALOGV("%s: offloaded output offload_info version %04x bit rate %d",
+ __func__, config->offload_info.version,
+ config->offload_info.bit_rate);
+ } else { //if (out->flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) {
+ ALOGE("%s: Usecase is DEEP_BUFFER", __func__);
+ if((device_handle = get_alsa_handle())== NULL)
+ goto error_handle;
+ list_add_tail(&out->session_list, &device_handle->list);
+ device_handle->usecase = USECASE_AUDIO_PLAYBACK_DEEP_BUFFER;
+ device_handle->config = pcm_config_deep_buffer;
+ device_handle->out = out;
+ device_handle->cmd_pending = false;
+ out->sample_rate = device_handle->config.rate;
+ out->uc_strm_type = DEEP_BUFFER_PLAYBACK_STREAM;
+ out->buffer_size = device_handle->config.period_size *
+ audio_stream_frame_size(&out->stream.common);
+ }/* else {
+ if((device_handle = get_alsa_handle())== NULL)
+ goto error_handle;
+ list_add_tail(&out->session_list, &device_handle->list);
+ device_handle->usecase = USECASE_AUDIO_PLAYBACK_LOW_LATENCY;
+ device_handle->config = pcm_config_low_latency;
+ device_handle->sample_rate = device_handle->config.rate;
+ device_handle->out = out;
+ device_handle->cmd_pending = false;
+ out->uc_strm_type = LOW_LATENCY_PLAYBACK_STREAM;
+ out->buffer_size = device_handle->config.period_size *
+ audio_stream_frame_size(&out->stream.common);
+ }*/
+
+ if (flags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+ ALOGE("%s: Usecase is primary ", __func__);
+ if(adev->primary_output == NULL)
+ adev->primary_output = out;
+ else {
+ ALOGE("%s: Primary output is already opened", __func__);
+ ret = -EEXIST;
+ goto error_open;
+ }
+ }
+
+ /* Check if this usecase is already existing */
+ pthread_mutex_lock(&adev->lock);
+ if (out->uc_strm_type != OFFLOAD_PLAYBACK_STREAM) {
+ if (get_usecase_from_list(adev, device_handle->usecase) != NULL) {
+ ALOGE("%s: Usecase (%d) is already present", __func__,
+ device_handle->usecase);
+ pthread_mutex_unlock(&adev->lock);
+ ret = -EEXIST;
+ goto error_open;
+ }
+ }
+ pthread_mutex_unlock(&adev->lock);
+
+
+ /* out->muted = false; by calloc() */
+
+
config->format = out->stream.common.get_format(&out->stream.common);
config->channel_mask = out->stream.common.get_channels(&out->stream.common);
config->sample_rate = out->stream.common.get_sample_rate(&out->stream.common);
@@ -1127,6 +2849,17 @@
ALOGV("%s: exit", __func__);
return 0;
+error_handle:
+ ret = -EINVAL;
+ ALOGE("%s: exit: error handle %d", __func__, ret);
+ while (!list_empty(&out->session_list)) {
+ item = list_head(&out->session_list);
+ list_remove(item);
+ device_handle = node_to_item(item, struct alsa_handle, list);
+ platform_free_usecase(device_handle->usecase);
+ free_alsa_handle(device_handle);
+ }
+
error_open:
free(out);
*stream_out = NULL;
@@ -1139,17 +2872,28 @@
{
struct stream_out *out = (struct stream_out *)stream;
struct audio_device *adev = out->dev;
- int ret = 0;
+ struct listnode *item;
+ struct alsa_handle *handle;
- ALOGV("%s: enter", __func__);
+ ALOGV("%s", __func__);
+
out_standby(&stream->common);
-
- if (out->usecase == USECASE_AUDIO_PLAYBACK_OFFLOAD) {
+ if (out->uc_strm_type == OFFLOAD_PLAYBACK_STREAM) {
destroy_offload_callback_thread(out);
- if (out->compr_config.codec != NULL)
- free(out->compr_config.codec);
+ while (!list_empty(&out->session_list)) {
+ item = list_head(&out->session_list);
+ list_remove(item);
+ handle = node_to_item(item, struct alsa_handle, list);
+ if(handle->compr_config.codec != NULL)
+ free(handle->compr_config.codec);
+ platform_free_usecase(handle->usecase);
+ free_alsa_handle(handle);
+ }
+ free(out->compr_config.codec);
}
+
+ free_internal_buffers(out);
pthread_cond_destroy(&out->cond);
pthread_mutex_destroy(&out->lock);
free(stream);
diff --git a/hal_mpq/mpq8092/platform.c b/hal_mpq/mpq8092/platform.c
index d7d67d5..3c1b4f7 100644
--- a/hal_mpq/mpq8092/platform.c
+++ b/hal_mpq/mpq8092/platform.c
@@ -91,20 +91,39 @@
struct csd_data *csd;
};
-static const int pcm_device_table[AUDIO_USECASE_MAX][2] = {
- [USECASE_AUDIO_PLAYBACK_DEEP_BUFFER] = {DEEP_BUFFER_PCM_DEVICE,
- DEEP_BUFFER_PCM_DEVICE},
- [USECASE_AUDIO_PLAYBACK_LOW_LATENCY] = {LOWLATENCY_PCM_DEVICE,
- LOWLATENCY_PCM_DEVICE},
- [USECASE_AUDIO_PLAYBACK_MULTI_CH] = {MULTIMEDIA2_PCM_DEVICE,
- MULTIMEDIA2_PCM_DEVICE},
+static int pcm_device_table[AUDIO_USECASE_MAX][4] = {
+ [USECASE_AUDIO_PLAYBACK_DEEP_BUFFER] = {USECASE_AUDIO_PLAYBACK_DEEP_BUFFER,
+ DEEP_BUFFER_PCM_DEVICE,
+ DEEP_BUFFER_PCM_DEVICE, 0},
+ [USECASE_AUDIO_PLAYBACK_LOW_LATENCY] = {USECASE_AUDIO_PLAYBACK_LOW_LATENCY,
+ LOWLATENCY_PCM_DEVICE,
+ LOWLATENCY_PCM_DEVICE, 0},
+ [USECASE_AUDIO_PLAYBACK_MULTI_CH] = {USECASE_AUDIO_PLAYBACK_MULTI_CH,
+ MULTIMEDIA2_PCM_DEVICE,
+ MULTIMEDIA2_PCM_DEVICE, 0},
+ [USECASE_AUDIO_PLAYBACK_MULTI_CH] = {USECASE_AUDIO_PLAYBACK_MULTI_CH,
+ MULTI_CHANNEL_PCM_DEVICE,
+ MULTI_CHANNEL_PCM_DEVICE, 0},
[USECASE_AUDIO_PLAYBACK_OFFLOAD] =
- {PLAYBACK_OFFLOAD_DEVICE, PLAYBACK_OFFLOAD_DEVICE},
- [USECASE_AUDIO_RECORD] = {AUDIO_RECORD_PCM_DEVICE, AUDIO_RECORD_PCM_DEVICE},
- [USECASE_AUDIO_RECORD_COMPRESS] = {COMPRESS_CAPTURE_DEVICE, COMPRESS_CAPTURE_DEVICE},
- [USECASE_AUDIO_RECORD_LOW_LATENCY] = {LOWLATENCY_PCM_DEVICE,
- LOWLATENCY_PCM_DEVICE},
- [USECASE_AUDIO_RECORD_FM_VIRTUAL] = {MULTIMEDIA2_PCM_DEVICE,
+ {USECASE_AUDIO_PLAYBACK_OFFLOAD, PLAYBACK_OFFLOAD_DEVICE1,
+ PLAYBACK_OFFLOAD_DEVICE1, 0},
+ [USECASE_AUDIO_PLAYBACK_OFFLOAD1] = {USECASE_AUDIO_PLAYBACK_OFFLOAD,
+ PLAYBACK_OFFLOAD_DEVICE2,
+ PLAYBACK_OFFLOAD_DEVICE2, 0},
+ [USECASE_AUDIO_PLAYBACK_OFFLOAD2] = {USECASE_AUDIO_PLAYBACK_OFFLOAD,
+ PLAYBACK_OFFLOAD_DEVICE3,
+ PLAYBACK_OFFLOAD_DEVICE3, 0},
+ [USECASE_AUDIO_PLAYBACK_OFFLOAD3] = {USECASE_AUDIO_PLAYBACK_OFFLOAD,
+ PLAYBACK_OFFLOAD_DEVICE4,
+ PLAYBACK_OFFLOAD_DEVICE4, 0},
+ [USECASE_AUDIO_RECORD] = {USECASE_AUDIO_RECORD, AUDIO_RECORD_PCM_DEVICE,
+ AUDIO_RECORD_PCM_DEVICE, 0},
+ [USECASE_AUDIO_RECORD_COMPRESS] = {USECASE_AUDIO_RECORD_COMPRESS, COMPRESS_CAPTURE_DEVICE,
+ COMPRESS_CAPTURE_DEVICE, 0},
+ [USECASE_AUDIO_RECORD_LOW_LATENCY] = {USECASE_AUDIO_RECORD_LOW_LATENCY,
+ LOWLATENCY_PCM_DEVICE,
+ LOWLATENCY_PCM_DEVICE, 0},
+ /* [USECASE_AUDIO_RECORD_FM_VIRTUAL] = {,
MULTIMEDIA2_PCM_DEVICE},
[USECASE_AUDIO_PLAYBACK_FM] = {FM_PLAYBACK_PCM_DEVICE, FM_CAPTURE_PCM_DEVICE},
[USECASE_VOICE_CALL] = {VOICE_CALL_PCM_DEVICE, VOICE_CALL_PCM_DEVICE},
@@ -124,6 +143,7 @@
INCALL_MUSIC_UPLINK2_PCM_DEVICE},
[USECASE_AUDIO_SPKR_CALIB_RX] = {SPKR_PROT_CALIB_RX_PCM_DEVICE, -1},
[USECASE_AUDIO_SPKR_CALIB_TX] = {-1, SPKR_PROT_CALIB_TX_PCM_DEVICE},
+*/
};
/* Array to store sound devices */
@@ -593,12 +613,39 @@
{
int device_id;
if (device_type == PCM_PLAYBACK)
- device_id = pcm_device_table[usecase][0];
- else
device_id = pcm_device_table[usecase][1];
+ else
+ device_id = pcm_device_table[usecase][2];
return device_id;
}
+audio_usecase_t platform_get_usecase(
+ audio_usecase_stream_type_t uc_type)
+{
+ int i = 0;
+ for(i =0;i<AUDIO_USECASE_MAX; i++)
+ if((pcm_device_table[i][0] == (int)uc_type) &&
+ (pcm_device_table[i][3] == 0)) {
+ pcm_device_table[i][3] = 1;
+ break;
+ }
+
+ if(i == AUDIO_USECASE_MAX)
+ return -EINVAL;
+ else
+ return (audio_usecase_t)i;
+}
+
+int platform_free_usecase(audio_usecase_t uc_id)
+{
+ if(uc_id >= AUDIO_USECASE_MAX) {
+ ALOGV("%s: enter: invalid usecase(%d)", __func__, uc_id);
+ return -EINVAL;
+ }
+ pcm_device_table[uc_id][3] = 0;
+ return 0;
+}
+
int platform_send_audio_calibration(void *platform, snd_device_t snd_device)
{
struct platform_data *my_data = (struct platform_data *)platform;
diff --git a/hal_mpq/mpq8092/platform.h b/hal_mpq/mpq8092/platform.h
index 7559258..2a81df5 100644
--- a/hal_mpq/mpq8092/platform.h
+++ b/hal_mpq/mpq8092/platform.h
@@ -26,6 +26,7 @@
FLUENCE_QUAD_MIC = 0x2,
};
+#include <hardware/audio.h>
/*
* Below are the devices for which is back end is same, SLIMBUS_0_RX.
* All these devices are handled by the internal HW codec. We can
@@ -34,6 +35,8 @@
#define AUDIO_DEVICE_OUT_ALL_CODEC_BACKEND \
(AUDIO_DEVICE_OUT_EARPIECE | AUDIO_DEVICE_OUT_SPEAKER | \
AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_OUT_WIRED_HEADPHONE)
+/*TODO remove this once define in audio.h */
+#define AUDIO_DEVICE_OUT_SPDIF 0x4000
/* Sound devices specific to the platform
* The DEVICE_OUT_* and DEVICE_IN_* should be mapped to these sound
@@ -154,6 +157,16 @@
#define LOW_LATENCY_OUTPUT_PERIOD_SIZE 240
#define LOW_LATENCY_OUTPUT_PERIOD_COUNT 2
+/*******************************************************************************
+ADTS HEADER PARSING
+*******************************************************************************/
+//Required for ADTS Header Parsing
+#define ADTS_HEADER_SYNC_RESULT 0xfff0
+#define ADTS_HEADER_SYNC_MASK 0xfff6
+/*******************************************************************************
+HDMI and SPDIF Device Output format control
+*******************************************************************************/
+
#define HDMI_MULTI_PERIOD_SIZE 336
#define HDMI_MULTI_PERIOD_COUNT 8
#define HDMI_MULTI_DEFAULT_CHANNEL_COUNT 6
@@ -174,8 +187,14 @@
#define INCALL_MUSIC_UPLINK2_PCM_DEVICE 16
#define SPKR_PROT_CALIB_RX_PCM_DEVICE 5
#define SPKR_PROT_CALIB_TX_PCM_DEVICE 22
-#define PLAYBACK_OFFLOAD_DEVICE 9
#define COMPRESS_VOIP_CALL_PCM_DEVICE 3
+#define MULTI_CHANNEL_PCM_DEVICE 1
+#define VOICE_CALL_PCM_DEVICE 2
+//TODO: update the device number as per the dai links
+#define PLAYBACK_OFFLOAD_DEVICE1 2
+#define PLAYBACK_OFFLOAD_DEVICE2 3
+#define PLAYBACK_OFFLOAD_DEVICE3 4
+#define PLAYBACK_OFFLOAD_DEVICE4 19
#define LOWLATENCY_PCM_DEVICE 15
#define COMPRESS_CAPTURE_DEVICE 19
@@ -235,11 +254,48 @@
void hw_info_append_hw_type(void *hw_info, snd_device_t snd_device,
char *device_name);
-#define SAMPLES_PER_CHANNEL 1536*2
+/*******************************************************************************
+USECASES AND THE CORRESPONDING DEVICE FORMATS THAT WE SUPPORT IN HAL
+*******************************************************************************/
+/*
+In general max of 2 for pass through. Additional 1 for handling transcode
+as the existence of transcode is with a PCM handle followed by transcode handle
+So, a (AC3/EAC3) pass through + trancode require - 1 for pas through, 1 - pcm and
+1 - transcode
+*/
+#define NUM_DEVICES_SUPPORT_COMPR_DATA 2+1
+#define NUM_SUPPORTED_CODECS 16
+#define NUM_COLUMN_FOR_INDEXING 2
+#define NUM_STATES_FOR_EACH_DEVICE_FMT 3
+#define DECODER_TYPE_IDX 0
+#define ROUTE_FORMAT_IDX 1
+
+#define MIN_SIZE_FOR_METADATA 64
+#define NUM_OF_PERIODS 8
+/*Period size to be a multiple of chanels * bitwidth,
+So min period size = LCM (1,2...8) * 4*/
+#define PERIOD_SIZE_COMPR 3360
+#define MS11_INPUT_BUFFER_SIZE 1536
+/*Max Period size which is exposed by the compr driver
+The value needs to be modified when the period size is modified*/
+#define PLAYBACK_MAX_PERIOD_SIZE (160 * 1024)
+
+#define COMPR_INPUT_BUFFER_SIZE (PERIOD_SIZE_COMPR - MIN_SIZE_FOR_METADATA)
+#define PCM_16_BITS_PER_SAMPLE 2
+#define PCM_24_BITS_PER_SAMPLE 3
+#define AC3_PERIOD_SIZE 1536 * PCM_16_BITS_PER_SAMPLE
+#define TIME_PER_BUFFER 40 //Time duration in ms
+#define SAMPLES_PER_CHANNEL 32*1024 //1536*2 /*TODO:correct it
#define MAX_INPUT_CHANNELS_SUPPORTED 8
#define FACTOR_FOR_BUFFERING 2
#define STEREO_CHANNELS 2
#define MAX_OUTPUT_CHANNELS_SUPPORTED 8
+#define PCM_BLOCK_PER_CHANNEL_MS11 1536*2
+#define AAC_BLOCK_PER_CHANNEL_MS11 768
+#define NUMBER_BITS_IN_A_BYTE 8
+#define AC3_BUFFER_SIZE 1920*2
+
+#define MAX_OUTPUT_CHANNELS_SUPPORTED 8
#define PCM_2CH_OUT 0
#define PCM_MCH_OUT 1
@@ -248,6 +304,333 @@
#define TRANSCODE_OUT 3
#define FACTOR_FOR_BUFFERING 2
+#define NUM_DEVICES_SUPPORT_COMPR_DATA 2+1
+#define NUM_SUPPORTED_CODECS 16
+#define NUM_COLUMN_FOR_INDEXING 2
+#define NUM_STATES_FOR_EACH_DEVICE_FMT 3
+#define DECODER_TYPE_IDX 0
+#define ROUTE_FORMAT_IDX 1
+#define NUM_OF_PERIODS 8
+
+enum {
+ LPCM,
+ MULTI_CH_PCM,
+ COMPR,
+ TRANSCODE
+};
+
+
+/*
+List of indexes of the supported formats
+Redundant formats such as (AAC-LC, HEAAC) are removed from the indexes as they
+are treated with the AAC format
+*/
+enum {
+ PCM_IDX = 0,
+ AAC_IDX,
+ AC3_IDX,
+ EAC3_IDX,
+ DTS_IDX,
+ DTS_LBR_IDX,
+ MP3_IDX,
+ WMA_IDX,
+ WMA_PRO_IDX,
+ MP2_IDX,
+ ALL_FORMATS_IDX
+};
+/*
+List of pass through's supported in the current usecases
+*/
+enum {
+ NO_PASSTHROUGH = 0,
+ AC3_PASSTHR,
+ EAC3_PASSTHR,
+ DTS_PASSTHR
+};
+/*
+List of transcoder's supported in the current usecases
+*/
+enum {
+ NO_TRANSCODER = 0,
+ AC3_TRANSCODER,
+ DTS_TRANSCODER
+};
+/*
+Requested end device format by user/app through set parameters
+*/
+enum {
+ UNCOMPRESSED = 0,
+ COMPRESSED,
+ COMPRESSED_CONVERT_EAC3_AC3,
+ COMPRESSED_CONVERT_ANY_AC3,
+ COMPRESSED_CONVERT_ANY_DTS,
+ AUTO_DEVICE_FORMAT,
+ UNCOMPRESSED_MCH, /* not to be exposed, internal use only */
+ COMPRESSED_CONVERT_AC3_ASSOC, /* not to be exposed, internal use only */
+ ALL_DEVICE_FORMATS
+};
+/*
+List of type of data routed on end device
+*/
+typedef enum {
+ ROUTE_NONE = 0x0,
+ ROUTE_UNCOMPRESSED = 0x1,
+ ROUTE_COMPRESSED = 0x2,
+ ROUTE_SW_TRANSCODED = 0x10, //route sub-format, not to be used directly
+ ROUTE_DSP_TRANSCODED = 0x20, //route sub-format, not to be used directly
+ ROUTE_MCH = 0x40, //route sub-format, not to be used directly
+ ROUTE_UNCOMPRESSED_MCH = (ROUTE_UNCOMPRESSED | ROUTE_MCH),
+ ROUTE_SW_TRANSCODED_COMPRESSED = (ROUTE_COMPRESSED | ROUTE_SW_TRANSCODED),
+ ROUTE_DSP_TRANSCODED_COMPRESSED = (ROUTE_COMPRESSED | ROUTE_DSP_TRANSCODED)
+}route_format_t;
+/*
+List of end device formats
+*/
+enum {
+ FORMAT_INVALID = -1,
+ FORMAT_PCM,
+ FORMAT_COMPR
+};
+/*
+Below are the only different types of decode that we perform
+*/
+enum {
+ DSP_DECODE = 1, // render uncompressed
+ DSP_PASSTHROUGH = 2, // render compressed
+ DSP_TRANSCODE = 4, // render as compressed
+ SW_DECODE = 8, // render as uncompressed
+ SW_DECODE_MCH = 16, // render as uncompressed
+ SW_PASSTHROUGH = 32, // render compressed
+ SW_TRANSCODE = 64, // render compressed
+ NUM_DECODE_PATH = 7
+};
+/*
+Modes of buffering that we can support
+As of now, we only support input buffering to an extent specified by usecase
+*/
+enum {
+ NO_BUFFERING_MODE = 0,
+ INPUT_BUFFERING_MODE,
+ OUTPUT_BUFFEING_MODE
+};
+/*
+playback controls
+*/
+enum {
+ PLAY = 1,
+ PAUSE = (1<<1),
+ RESUME = (1<<2),
+ SEEK = (1<<3),
+ EOS = (1<<4),
+ STOP = (1<<5),
+ STANDBY = (1<<6),
+ INIT = (1<<7),
+};
+/*
+Multiple instance of use case
+*/
+enum {
+ STEREO_DRIVER = 0,
+ MULTI_CHANNEL_DRIVER,
+ COMRPESSED_DRIVER,
+};
+/*
+Instance bits
+*/
+enum {
+ MULTI_CHANNEL_1_BIT = 1<<4,
+ MULTI_CHANNEL_2_BIT = 1<<5,
+ MULTI_CHANNEL_3_BIT = 1<<6,
+ COMPRESSED_1_BIT = 1<<12,
+ COMPRESSED_2_BIT = 1<<13,
+ COMPRESSED_3_BIT = 1<<14,
+ COMPRESSED_4_BIT = 1<<15,
+ COMPRESSED_5_BIT = 1<<16,
+ COMPRESSED_6_BIT = 1<<17
+};
+
+/*
+List of support formats configured from frameworks
+*/
+static const int supportedFormats[NUM_SUPPORTED_CODECS] = {
+ AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_FORMAT_PCM_24_BIT,
+ AUDIO_FORMAT_AAC,
+ AUDIO_FORMAT_HE_AAC_V1,
+ AUDIO_FORMAT_HE_AAC_V2,
+ AUDIO_FORMAT_AAC_ADIF,
+ AUDIO_FORMAT_AC3,
+ AUDIO_FORMAT_AC3_DM,
+ AUDIO_FORMAT_EAC3,
+ AUDIO_FORMAT_EAC3_DM,
+ AUDIO_FORMAT_DTS,
+ AUDIO_FORMAT_DTS_LBR,
+ AUDIO_FORMAT_MP3,
+ AUDIO_FORMAT_WMA,
+ AUDIO_FORMAT_WMA_PRO,
+ AUDIO_FORMAT_MP2
+};
+/*
+we can only have 6 types of decoder type stored with bit masks.
+*/
+static const int route_to_driver[NUM_DECODE_PATH][NUM_COLUMN_FOR_INDEXING] = {
+ {DSP_DECODE, ROUTE_UNCOMPRESSED_MCH},
+ {DSP_PASSTHROUGH, ROUTE_COMPRESSED},
+ {DSP_TRANSCODE, ROUTE_DSP_TRANSCODED_COMPRESSED},
+ {SW_DECODE, ROUTE_UNCOMPRESSED},
+ {SW_DECODE_MCH, ROUTE_UNCOMPRESSED_MCH},
+ {SW_PASSTHROUGH, ROUTE_COMPRESSED},
+ {SW_TRANSCODE, ROUTE_SW_TRANSCODED_COMPRESSED}
+};
+/*
+table to query index based on the format
+*/
+static const int format_index[NUM_SUPPORTED_CODECS][NUM_COLUMN_FOR_INDEXING] = {
+/*---------------------------------------------
+| FORMAT | INDEX |
+----------------------------------------------*/
+ {AUDIO_FORMAT_PCM_16_BIT, PCM_IDX},
+ {AUDIO_FORMAT_PCM_24_BIT, PCM_IDX},
+ {AUDIO_FORMAT_AAC, AAC_IDX},
+ {AUDIO_FORMAT_HE_AAC_V1, AAC_IDX},
+ {AUDIO_FORMAT_HE_AAC_V2, AAC_IDX},
+ {AUDIO_FORMAT_AAC_ADIF, AAC_IDX},
+ {AUDIO_FORMAT_AC3, AC3_IDX},
+ {AUDIO_FORMAT_AC3_DM, AC3_IDX},
+ {AUDIO_FORMAT_EAC3, EAC3_IDX},
+ {AUDIO_FORMAT_EAC3_DM, EAC3_IDX},
+ {AUDIO_FORMAT_DTS, DTS_IDX},
+ {AUDIO_FORMAT_DTS_LBR, DTS_LBR_IDX},
+ {AUDIO_FORMAT_MP3, MP3_IDX},
+ {AUDIO_FORMAT_WMA, WMA_IDX},
+ {AUDIO_FORMAT_WMA_PRO, WMA_PRO_IDX},
+ {AUDIO_FORMAT_MP2, MP2_IDX}
+};
+
+/*
+Table to query non HDMI and SPDIF devices and their states such as type of
+decode, type of data routed to end device and type of transcoding needed
+*/
+static const int usecase_decode_format[ALL_FORMATS_IDX*NUM_STATES_FOR_EACH_DEVICE_FMT] = {
+/*-----------------
+| UNCOMPR |
+-----------------*/
+/* PCM */
+ DSP_DECODE, //PCM_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ SW_DECODE, // AAC_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ SW_DECODE, //AC3_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ SW_DECODE, //EAC3_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ DSP_DECODE, //DTS_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ DSP_DECODE, //DTS_LBR_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ DSP_DECODE, //MP3_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ DSP_DECODE, //WMA_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ DSP_DECODE, //WMA_PRO_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER,//TRANSCODE_FORMAT
+/* PCM */
+ DSP_DECODE, //MP2_IDX
+ FORMAT_PCM, //ROUTE_FORMAT
+ NO_TRANSCODER//TRANSCODE_FORMAT
+};
+/*
+Table to query HDMI and SPDIF devices and their states such as type of
+decode, type of data routed to end device and type of transcoding needed
+*/
+static const int usecase_docode_hdmi_spdif[ALL_FORMATS_IDX*NUM_STATES_FOR_EACH_DEVICE_FMT]
+ [ALL_DEVICE_FORMATS] = {
+/*-------------------------------------------------------------------------------------------------------------------------------------------------------
+| UNCOMPRESSED | COMPR | COMPR_CONV | COMPR_CONV | COMPR_CONV | AUTO | UNCOMPR_MCH | AC3_AC3 |
+| | | EAC3_AC3 | ANY_AC3 | ANY_DTS | | | |
+--------------------------------------------------------------------------------------------------------------------------------------------------------*/
+/* PCM PCM PCM PCM PCM PCM PCM PCM */
+ {DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE|DSP_TRANSCODE, DSP_DECODE, DSP_DECODE, DSP_DECODE}, //PCM_IDX
+ {FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, DTS_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM PCM PCM AC3 PCM PCM PCM PCM */
+ {SW_DECODE, SW_DECODE, SW_DECODE, SW_TRANSCODE, DSP_DECODE, SW_DECODE, SW_DECODE_MCH, SW_DECODE}, //AAC_IDX
+ {FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, AC3_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM AC3 AC3 AC3 PCM AC3 PCM AC3 */
+ {SW_DECODE, SW_PASSTHROUGH, SW_PASSTHROUGH, SW_PASSTHROUGH, DSP_DECODE, SW_PASSTHROUGH, SW_DECODE_MCH, SW_TRANSCODE}, //AC3_IDX
+ {FORMAT_PCM, FORMAT_COMPR, FORMAT_COMPR, FORMAT_COMPR, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_COMPR}, //ROUTE_FORMAT
+ {NO_TRANSCODER, AC3_PASSTHR, AC3_PASSTHR, AC3_PASSTHR, NO_TRANSCODER, AC3_PASSTHR, NO_TRANSCODER, AC3_TRANSCODER}, //TRANSCODE_FMT
+/* PCM EAC3 AC3 AC3 PCM EAC3 PCM PCM */
+ {SW_DECODE, SW_PASSTHROUGH, SW_TRANSCODE, SW_TRANSCODE, DSP_DECODE, SW_PASSTHROUGH, SW_DECODE_MCH, SW_TRANSCODE}, //EAC3_IDX
+ {FORMAT_PCM, FORMAT_COMPR, FORMAT_COMPR, FORMAT_COMPR, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_COMPR}, //ROUTE_FORMAT
+ {NO_TRANSCODER, EAC3_PASSTHR, AC3_TRANSCODER, AC3_TRANSCODER, NO_TRANSCODER, EAC3_PASSTHR, NO_TRANSCODER, AC3_TRANSCODER}, //TRANSCODE_FMT
+/* PCM DTS PCM PCM DTS DTS PCM PCM */
+ {DSP_DECODE, DSP_PASSTHROUGH, DSP_DECODE, DSP_DECODE, DSP_PASSTHROUGH, DSP_PASSTHROUGH, DSP_DECODE, DSP_DECODE},//DTS_IDX
+ {FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, DTS_PASSTHR, NO_TRANSCODER, NO_TRANSCODER, DTS_PASSTHR, DTS_PASSTHR, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM DTS_LBR PCM PCM DTS DTS PCM PCM */
+ {DSP_DECODE, DSP_PASSTHROUGH, DSP_DECODE, DSP_DECODE, DSP_PASSTHROUGH, DSP_PASSTHROUGH, DSP_DECODE, DSP_DECODE},//DTS_LBR_IDX
+ {FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, DTS_PASSTHR, NO_TRANSCODER, NO_TRANSCODER, DTS_PASSTHR, DTS_PASSTHR, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM PCM PCM PCM DTS PCM PCM PCM */
+ {DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE|DSP_TRANSCODE, DSP_DECODE, DSP_DECODE, DSP_DECODE}, //MP3_IDX
+ {FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, DTS_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM PCM PCM PCM DTS PCM PCM PCM */
+ {DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE|DSP_TRANSCODE, DSP_DECODE, DSP_DECODE, DSP_DECODE}, //WMA_IDX
+ {FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, DTS_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM PCM PCM PCM DTS PCM PCM PCM */
+ {DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE|DSP_TRANSCODE, DSP_DECODE, DSP_DECODE, DSP_DECODE}, //WMA_PRO_IDX
+ {FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, DTS_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER}, //TRANSCODE_FMT
+/* PCM PCM PCM PCM DTS PCM PCM PCM */
+ {DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE, DSP_DECODE|DSP_TRANSCODE, DSP_DECODE, DSP_DECODE, DSP_DECODE}, //MP2_IDX
+ {FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM, FORMAT_COMPR, FORMAT_PCM, FORMAT_PCM, FORMAT_PCM}, //ROUTE_FORMAT
+ {NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, DTS_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER, NO_TRANSCODER} //TRANSCODE_FMT
+};
+/*
+List of decoders which require config as part of first buffer
+*/
+static const int decodersRequireConfig[] = {
+ AUDIO_FORMAT_AAC,
+ AUDIO_FORMAT_HE_AAC_V1,
+ AUDIO_FORMAT_HE_AAC_V2,
+ AUDIO_FORMAT_AAC_ADIF,
+ AUDIO_FORMAT_WMA,
+ AUDIO_FORMAT_WMA_PRO
+};
+/*
+List of enum that are used in Broadcast.
+NOTE: Need to be removed once broadcast is moved with updated states as above
+*/
+enum {
+ INVALID_FORMAT = -1,
+ PCM_FORMAT = 0,
+ COMPRESSED_FORMAT = 1,
+ COMPRESSED_FORCED_PCM_FORMAT = 2,
+ COMPRESSED_PASSTHROUGH_FORMAT = 3
+};
+
+
struct audio_bitstream_sm {
int buffering_factor;
int buffering_factor_cnt;
@@ -255,18 +638,33 @@
char *inp_buf;
char *inp_buf_curr_ptr;
char *inp_buf_write_ptr;
+ uint32_t inp_buf_size;
char *enc_out_buf;
char *enc_out_buf_write_ptr;
+ uint32_t enc_out_buf_size;
char *pcm_2_out_buf;
char *pcm_2_out_buf_write_ptr;
+ uint32_t pcm_2_out_buf_size;
char *pcm_mch_out_buf;
char *pcm_mch_out_buf_write_ptr;
+ uint32_t pcm_mch_out_buf_size;
char *passt_out_buf;
char *passt_out_buf_write_ptr;
+ uint32_t passt_out_buf_size;
+};
+
+/*
+Meta data structure for handling compressed output
+*/
+struct output_metadata {
+ uint32_t metadataLength;
+ uint32_t bufferLength;
+ uint64_t timestamp;
+ uint32_t reserved[12];
};
#endif // QCOM_AUDIO_PLATFORM_H
diff --git a/hal_mpq/platform_api.h b/hal_mpq/platform_api.h
index 44ad790..955aa3b 100644
--- a/hal_mpq/platform_api.h
+++ b/hal_mpq/platform_api.h
@@ -51,5 +51,7 @@
/* returns the latency for a usecase in Us */
int64_t platform_render_latency(audio_usecase_t usecase);
int platform_update_usecase_from_source(int source, audio_usecase_t usecase);
+audio_usecase_t platform_get_usecase(audio_usecase_stream_type_t uc_type);
+int platform_free_usecase(audio_usecase_t uc_id);
#endif // QCOM_AUDIO_PLATFORM_API_H
diff --git a/mm-audio/aenc-aac/Android.mk b/mm-audio/aenc-aac/Android.mk
index 8698436..8ab45b3 100644
--- a/mm-audio/aenc-aac/Android.mk
+++ b/mm-audio/aenc-aac/Android.mk
@@ -21,6 +21,9 @@
ifeq ($(call is-board-platform,apq8084),true)
include $(AENC_AAC_PATH)/qdsp6/Android.mk
endif
+ifeq ($(call is-board-platform,mpq8092),true)
+include $(AENC_AAC_PATH)/qdsp6/Android.mk
+endif
endif
diff --git a/mm-audio/aenc-amrnb/Android.mk b/mm-audio/aenc-amrnb/Android.mk
index 6c2f458..2601ede 100644
--- a/mm-audio/aenc-amrnb/Android.mk
+++ b/mm-audio/aenc-amrnb/Android.mk
@@ -21,6 +21,9 @@
ifeq ($(call is-board-platform,apq8084),true)
include $(AENC_AMR_PATH)/qdsp6/Android.mk
endif
+ifeq ($(call is-board-platform,mpq8092),true)
+include $(AENC_AMR_PATH)/qdsp6/Android.mk
+endif
endif
diff --git a/mm-audio/aenc-evrc/Android.mk b/mm-audio/aenc-evrc/Android.mk
index c8ee1ed..2f42d6b 100644
--- a/mm-audio/aenc-evrc/Android.mk
+++ b/mm-audio/aenc-evrc/Android.mk
@@ -21,6 +21,9 @@
ifeq ($(call is-board-platform,apq8084),true)
include $(AENC_EVRC_PATH)/qdsp6/Android.mk
endif
+ifeq ($(call is-board-platform,mpq8092),true)
+include $(AENC_EVRC_PATH)/qdsp6/Android.mk
+endif
endif
diff --git a/mm-audio/aenc-qcelp13/Android.mk b/mm-audio/aenc-qcelp13/Android.mk
index 8f6985c..fe18efc 100644
--- a/mm-audio/aenc-qcelp13/Android.mk
+++ b/mm-audio/aenc-qcelp13/Android.mk
@@ -21,6 +21,8 @@
ifeq ($(call is-board-platform,apq8084),true)
include $(AENC_QCELP13_PATH)/qdsp6/Android.mk
endif
-
+ifeq ($(call is-board-platform,mpq8092),true)
+include $(AENC_QCELP13_PATH)/qdsp6/Android.mk
+endif
endif
diff --git a/policy_hal/AudioPolicyManager.cpp b/policy_hal/AudioPolicyManager.cpp
index 0716656..4fa2356 100644
--- a/policy_hal/AudioPolicyManager.cpp
+++ b/policy_hal/AudioPolicyManager.cpp
@@ -27,9 +27,6 @@
#define ALOGVV(a...) do { } while(0)
#endif
-// A device mask for all audio input devices that are considered "virtual" when evaluating
-// active inputs in getActiveInput()
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL AUDIO_DEVICE_IN_REMOTE_SUBMIX | AUDIO_DEVICE_IN_FM_RX_A2DP
// A device mask for all audio output devices that are considered "remote" when evaluating
// active output devices in isStreamActiveRemotely()
#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL AUDIO_DEVICE_OUT_REMOTE_SUBMIX
@@ -739,16 +736,6 @@
return device;
}
-bool AudioPolicyManager::isVirtualInputDevice(audio_devices_t device)
-{
- if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
- device &= ~AUDIO_DEVICE_BIT_IN;
- if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
- return true;
- }
- return false;
-}
-
AudioPolicyManager::device_category AudioPolicyManager::getDeviceCategory(audio_devices_t device)
{
switch(getDeviceForVolume(device)) {
diff --git a/policy_hal/AudioPolicyManager.h b/policy_hal/AudioPolicyManager.h
index 2e0c6fb..7a8cfa9 100644
--- a/policy_hal/AudioPolicyManager.h
+++ b/policy_hal/AudioPolicyManager.h
@@ -68,8 +68,6 @@
// select input device corresponding to requested audio source
virtual audio_devices_t getDeviceForInputSource(int inputSource);
- static bool isVirtualInputDevice(audio_devices_t device);
-
// compute the actual volume for a given stream according to the requested index and a particular
// device
virtual float computeVolume(int stream, int index, audio_io_handle_t output, audio_devices_t device);