Merge "Modular DRM for MediaPlayer: L3 playback fails at OMX mmap; preparDrm preparing" into oc-dev
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 1e327a3..361686c 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -76,6 +76,7 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libvisualizer.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libreverbwrapper.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libbundlewrapper.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libaudiopreprocessing.so)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index f54954a..e56f675 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -25,6 +25,7 @@
#include <android/hardware/drm/1.0/IDrmPlugin.h>
#include <android/hardware/drm/1.0/types.h>
#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
#include <media/DrmHal.h>
#include <media/DrmSessionClientInterface.h>
@@ -200,7 +201,7 @@
Vector<sp<IDrmFactory>> DrmHal::makeDrmFactories() {
Vector<sp<IDrmFactory>> factories;
- auto manager = ::IServiceManager::getService();
+ auto manager = hardware::defaultServiceManager();
if (manager != NULL) {
manager->listByInterface(IDrmFactory::descriptor,
@@ -957,11 +958,9 @@
void DrmHal::binderDied(const wp<IBinder> &the_late_who __unused)
{
- mEventLock.lock();
- mListener.clear();
- mEventLock.unlock();
-
Mutex::Autolock autoLock(mLock);
+ setListener(NULL);
+ mPlugin->setListener(NULL);
mPlugin.clear();
}
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
index 51a5a85..3e24f9f 100644
--- a/media/libaaudio/examples/input_monitor/jni/Android.mk
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -4,32 +4,30 @@
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/examples/utils
-LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
- libbinder libcutils libutils
-LOCAL_STATIC_LIBRARIES := libsndfile
-LOCAL_MODULE := write_sine_ndk
-LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/input_monitor.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := input_monitor_ndk
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/liboboe/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
-LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
-LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
- libbinder libcutils libutils
-LOCAL_STATIC_LIBRARIES := libsndfile
-LOCAL_MODULE := write_sine_threaded_ndk
-LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := input_monitor_callback_ndk
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
-LOCAL_MODULE := liboboe_prebuilt
-LOCAL_SRC_FILES := liboboe.so
+LOCAL_MODULE := libaaudio_prebuilt
+LOCAL_SRC_FILES := libaaudio.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
+include $(PREBUILT_SHARED_LIBRARY)
\ No newline at end of file
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index 0e14af0..715c5f8 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -22,46 +22,35 @@
#include <stdlib.h>
#include <math.h>
#include <aaudio/AAudio.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimpleRecorder.h"
#define SAMPLE_RATE 48000
+
#define NUM_SECONDS 10
-#define NANOS_PER_MICROSECOND ((int64_t)1000)
-#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
-#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
-#define DECAY_FACTOR 0.999
#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
-static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
- const char *modeText = "unknown";
- switch (mode) {
- case AAUDIO_SHARING_MODE_EXCLUSIVE:
- modeText = "EXCLUSIVE";
- break;
- case AAUDIO_SHARING_MODE_SHARED:
- modeText = "SHARED";
- break;
- default:
- break;
- }
- return modeText;
-}
-
int main(int argc, char **argv)
{
(void)argc; // unused
aaudio_result_t result;
-
+ AAudioSimpleRecorder recorder;
int actualSamplesPerFrame;
int actualSampleRate;
- const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
- aaudio_audio_format_t actualDataFormat;
+ const aaudio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+ aaudio_format_t actualDataFormat;
+ const int requestedInputChannelCount = 1; // Can affect whether we get a FAST path.
+
+ //aaudio_performance_mode_t requestedPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ const aaudio_performance_mode_t requestedPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ //aaudio_performance_mode_t requestedPerformanceMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
aaudio_sharing_mode_t actualSharingMode;
- AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
aaudio_stream_state_t state;
int32_t framesPerBurst = 0;
@@ -79,22 +68,16 @@
printf("%s - Monitor input level using AAudio\n", argv[0]);
- // Use an AAudioStreamBuilder to contain requested parameters.
- result = AAudio_createStreamBuilder(&aaudioBuilder);
+ recorder.setPerformanceMode(requestedPerformanceMode);
+ recorder.setSharingMode(requestedSharingMode);
+
+ result = recorder.open(requestedInputChannelCount, 48000, requestedDataFormat,
+ nullptr, nullptr, nullptr);
if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
goto finish;
}
-
- // Request stream properties.
- AAudioStreamBuilder_setDirection(aaudioBuilder, AAUDIO_DIRECTION_INPUT);
- AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
- AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
-
- // Create an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
- if (result != AAUDIO_OK) {
- goto finish;
- }
+ aaudioStream = recorder.getStream();
actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
printf("SamplesPerFrame = %d\n", actualSamplesPerFrame);
@@ -117,13 +100,16 @@
while (framesPerRead < MIN_FRAMES_TO_READ) {
framesPerRead *= 2;
}
- printf("DataFormat: framesPerRead = %d\n",framesPerRead);
+ printf("DataFormat: framesPerRead = %d\n",framesPerRead);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+ printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
// TODO handle other data formats
assert(actualDataFormat == AAUDIO_FORMAT_PCM_I16);
+ printf("PerformanceMode: requested = %d, actual = %d\n", requestedPerformanceMode,
+ AAudioStream_getPerformanceMode(aaudioStream));
+
// Allocate a buffer for the audio data.
data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
if (data == nullptr) {
@@ -133,37 +119,36 @@
}
// Start the stream.
- printf("call AAudioStream_requestStart()\n");
- result = AAudioStream_requestStart(aaudioStream);
+ result = recorder.start();
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+ fprintf(stderr, "ERROR - recorder.start() returned %d\n", result);
goto finish;
}
state = AAudioStream_getState(aaudioStream);
printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
- // Play for a while.
+ // Record for a while.
framesToRecord = actualSampleRate * NUM_SECONDS;
framesLeft = framesToRecord;
while (framesLeft > 0) {
// Read audio data from the stream.
- int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
+ const int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
if (actual < 0) {
- fprintf(stderr, "ERROR - AAudioStream_read() returned %zd\n", actual);
+ fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
+ result = actual;
goto finish;
} else if (actual == 0) {
- fprintf(stderr, "WARNING - AAudioStream_read() returned %zd\n", actual);
+ fprintf(stderr, "WARNING - AAudioStream_read() returned %d\n", actual);
goto finish;
}
framesLeft -= actual;
- // Peak follower.
+ // Peak finder.
for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
- peakLevel *= DECAY_FACTOR;
if (sample > peakLevel) {
peakLevel = sample;
}
@@ -171,22 +156,22 @@
// Display level as stars, eg. "******".
if ((loopCounter++ % 10) == 0) {
- printf("%5.3f ", peakLevel);
- int numStars = (int)(peakLevel * 50);
- for (int i = 0; i < numStars; i++) {
- printf("*");
- }
- printf("\n");
+ displayPeakLevel(peakLevel);
+ peakLevel = 0.0;
}
}
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ result = recorder.stop();
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
finish:
+ recorder.close();
delete[] data;
- AAudioStream_close(aaudioStream);
- AAudioStreamBuilder_delete(aaudioBuilder);
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 7c34252..9de2eb0 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -23,229 +23,18 @@
#include <math.h>
#include <time.h>
#include <aaudio/AAudio.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimpleRecorder.h"
#define NUM_SECONDS 5
-#define NANOS_PER_MICROSECOND ((int64_t)1000)
-#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
-#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
-
-//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
-#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
-
-/**
- * Simple wrapper for AAudio that opens an input stream and then calls
- * a callback function to process the input data.
- */
-class SimpleAAudioRecorder {
-public:
- SimpleAAudioRecorder() {}
- ~SimpleAAudioRecorder() {
- close();
- };
-
- /**
- * Call this before calling open().
- * @param requestedSharingMode
- */
- void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
- mRequestedSharingMode = requestedSharingMode;
- }
-
- /**
- * Also known as "sample rate"
- * Only call this after open() has been called.
- */
- int32_t getFramesPerSecond() {
- if (mStream == nullptr) {
- return AAUDIO_ERROR_INVALID_STATE;
- }
- return AAudioStream_getSampleRate(mStream);;
- }
-
- /**
- * Only call this after open() has been called.
- */
- int32_t getSamplesPerFrame() {
- if (mStream == nullptr) {
- return AAUDIO_ERROR_INVALID_STATE;
- }
- return AAudioStream_getSamplesPerFrame(mStream);;
- }
- /**
- * Only call this after open() has been called.
- */
- int64_t getFramesRead() {
- if (mStream == nullptr) {
- return AAUDIO_ERROR_INVALID_STATE;
- }
- return AAudioStream_getFramesRead(mStream);;
- }
-
- /**
- * Open a stream
- */
- aaudio_result_t open(AAudioStream_dataCallback proc, void *userContext) {
- aaudio_result_t result = AAUDIO_OK;
-
- // Use an AAudioStreamBuilder to contain requested parameters.
- result = AAudio_createStreamBuilder(&mBuilder);
- if (result != AAUDIO_OK) return result;
-
- AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
- AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
- AAudioStreamBuilder_setDataCallback(mBuilder, proc, userContext);
- AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
-
- // Open an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStreamBuilder_openStream() returned %d %s\n",
- result, AAudio_convertResultToText(result));
- goto finish1;
- }
-
- printf("AAudioStream_getFramesPerBurst() = %d\n",
- AAudioStream_getFramesPerBurst(mStream));
- printf("AAudioStream_getBufferSizeInFrames() = %d\n",
- AAudioStream_getBufferSizeInFrames(mStream));
- printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
- AAudioStream_getBufferCapacityInFrames(mStream));
- return result;
-
- finish1:
- AAudioStreamBuilder_delete(mBuilder);
- mBuilder = nullptr;
- return result;
- }
-
- aaudio_result_t close() {
- if (mStream != nullptr) {
- printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
- AAudioStream_close(mStream);
- mStream = nullptr;
- AAudioStreamBuilder_delete(mBuilder);
- mBuilder = nullptr;
- }
- return AAUDIO_OK;
- }
-
- // Write zero data to fill up the buffer and prevent underruns.
- aaudio_result_t prime() {
- int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
- const int numFrames = 32; // arbitrary
- float zeros[numFrames * samplesPerFrame];
- memset(zeros, 0, sizeof(zeros));
- aaudio_result_t result = numFrames;
- while (result == numFrames) {
- result = AAudioStream_write(mStream, zeros, numFrames, 0);
- }
- return result;
- }
-
- // Start the stream. AAudio will start calling your callback function.
- aaudio_result_t start() {
- aaudio_result_t result = AAudioStream_requestStart(mStream);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
- result, AAudio_convertResultToText(result));
- }
- return result;
- }
-
- // Stop the stream. AAudio will stop calling your callback function.
- aaudio_result_t stop() {
- aaudio_result_t result = AAudioStream_requestStop(mStream);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
- result, AAudio_convertResultToText(result));
- }
- return result;
- }
-
- // Pause the stream. AAudio will stop calling your callback function.
- aaudio_result_t pause() {
- aaudio_result_t result = AAudioStream_requestPause(mStream);
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - AAudioStream_requestPause() returned %d %s\n",
- result, AAudio_convertResultToText(result));
- }
- return result;
- }
-
-private:
- AAudioStreamBuilder *mBuilder = nullptr;
- AAudioStream *mStream = nullptr;
- aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
-};
-
-// Application data that gets passed to the callback.
-typedef struct PeakTrackerData {
- float peakLevel;
-} PeakTrackerData_t;
-
-#define DECAY_FACTOR 0.999
-
-// Callback function that fills the audio output buffer.
-aaudio_data_callback_result_t MyDataCallbackProc(
- AAudioStream *stream,
- void *userData,
- void *audioData,
- int32_t numFrames
- ) {
-
- PeakTrackerData_t *data = (PeakTrackerData_t *) userData;
- // printf("MyCallbackProc(): frameCount = %d\n", numFrames);
- int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
- float sample;
- // This code assume mono or stereo.
- switch (AAudioStream_getFormat(stream)) {
- case AAUDIO_FORMAT_PCM_I16: {
- int16_t *audioBuffer = (int16_t *) audioData;
- // Peak follower
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- sample = audioBuffer[frameIndex * samplesPerFrame] * (1.0/32768);
- data->peakLevel *= DECAY_FACTOR;
- if (sample > data->peakLevel) {
- data->peakLevel = sample;
- }
- }
- }
- break;
- case AAUDIO_FORMAT_PCM_FLOAT: {
- float *audioBuffer = (float *) audioData;
- // Peak follower
- for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
- sample = audioBuffer[frameIndex * samplesPerFrame];
- data->peakLevel *= DECAY_FACTOR;
- if (sample > data->peakLevel) {
- data->peakLevel = sample;
- }
- }
- }
- break;
- default:
- return AAUDIO_CALLBACK_RESULT_STOP;
- }
-
- return AAUDIO_CALLBACK_RESULT_CONTINUE;
-}
-
-void displayPeakLevel(float peakLevel) {
- printf("%5.3f ", peakLevel);
- const int maxStars = 50; // arbitrary, fits on one line
- int numStars = (int) (peakLevel * maxStars);
- for (int i = 0; i < numStars; i++) {
- printf("*");
- }
- printf("\n");
-}
int main(int argc, char **argv)
{
(void)argc; // unused
- SimpleAAudioRecorder recorder;
+ AAudioSimpleRecorder recorder;
PeakTrackerData_t myData = {0.0};
aaudio_result_t result;
+ aaudio_stream_state_t state;
const int displayRateHz = 20; // arbitrary
const int loopsNeeded = NUM_SECONDS * displayRateHz;
@@ -254,9 +43,8 @@
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
printf("%s - Display audio input using an AAudio callback\n", argv[0]);
- recorder.setSharingMode(SHARING_MODE);
-
- result = recorder.open(MyDataCallbackProc, &myData);
+ result = recorder.open(2, 48000, AAUDIO_FORMAT_PCM_I16,
+ SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
goto error;
@@ -278,6 +66,19 @@
(void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
+
+ result = AAudioStream_waitForStateChange(recorder.getStream(),
+ AAUDIO_STREAM_STATE_CLOSED,
+ &state,
+ 0);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
+ goto error;
+ }
+ if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+ printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+ break;
+ }
}
printf("Woke up. Stop for a moment.\n");
@@ -285,7 +86,7 @@
if (result != AAUDIO_OK) {
goto error;
}
- sleep(1);
+ usleep(2000 * 1000);
result = recorder.start();
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.start() returned %d\n", result);
@@ -300,6 +101,12 @@
(void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
+
+ state = AAudioStream_getState(recorder.getStream());
+ if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+ printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+ break;
+ }
}
printf("Woke up now.\n");
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
index e83f179..61fc3b8 100644
--- a/media/libaaudio/examples/input_monitor/static/Android.mk
+++ b/media/libaaudio/examples/input_monitor/static/Android.mk
@@ -4,14 +4,15 @@
LOCAL_MODULE_TAGS := examples
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
# TODO reorganize folders to avoid using ../
LOCAL_SRC_FILES:= ../src/input_monitor.cpp
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
- libaudioclient liblog libtinyalsa
+ libaudioclient liblog libtinyalsa libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := input_monitor
@@ -22,13 +23,14 @@
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
- libaudioclient liblog
+ libaudioclient liblog libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := input_monitor_callback
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index bad21f7..9f06ee7 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -339,13 +339,13 @@
const int requestedOutputChannelCount = AAUDIO_UNSPECIFIED;
const int requestedSampleRate = SAMPLE_RATE;
int actualSampleRate = 0;
- const aaudio_audio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
- const aaudio_audio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
- aaudio_audio_format_t actualInputFormat;
- aaudio_audio_format_t actualOutputFormat;
+ const aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
+ const aaudio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ aaudio_format_t actualInputFormat;
+ aaudio_format_t actualOutputFormat;
- //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
- const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_sharing_mode_t actualSharingMode;
AAudioStreamBuilder *builder = nullptr;
diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h
new file mode 100644
index 0000000..66de25f
--- /dev/null
+++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_EXAMPLE_UTILS_H
+#define AAUDIO_EXAMPLE_UTILS_H
+
+#include <unistd.h>
+#include <sched.h>
+#include <aaudio/AAudio.h>
+
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+ const char *modeText = "unknown";
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ modeText = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ modeText = "SHARED";
+ break;
+ default:
+ break;
+ }
+ return modeText;
+}
+
+static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+ struct timespec time;
+ int result = clock_gettime(clockId, &time);
+ if (result < 0) {
+ return -errno;
+ }
+ return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
+}
+
+void displayPeakLevel(float peakLevel) {
+ printf("%5.3f ", peakLevel);
+ const int maxStars = 50; // arbitrary, fits on one line
+ int numStars = (int) (peakLevel * maxStars);
+ for (int i = 0; i < numStars; i++) {
+ printf("*");
+ }
+ printf("\n");
+}
+
+#endif // AAUDIO_EXAMPLE_UTILS_H
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
new file mode 100644
index 0000000..aaeb25f
--- /dev/null
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio callback.
+
+#ifndef AAUDIO_SIMPLE_PLAYER_H
+#define AAUDIO_SIMPLE_PLAYER_H
+
+#include <unistd.h>
+#include <sched.h>
+
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+#define PERFORMANCE_MODE AAUDIO_PERFORMANCE_MODE_NONE
+
+/**
+ * Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
+ */
+class AAudioSimplePlayer {
+public:
+ AAudioSimplePlayer() {}
+ ~AAudioSimplePlayer() {
+ close();
+ };
+
+ /**
+ * Call this before calling open().
+ * @param requestedSharingMode
+ */
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /**
+ * Call this before calling open().
+ * @param requestedPerformanceMode
+ */
+ void setPerformanceMode(aaudio_performance_mode_t requestedPerformanceMode) {
+ mRequestedPerformanceMode = requestedPerformanceMode;
+ }
+
+ /**
+ * Also known as "sample rate"
+ * Only call this after open() has been called.
+ */
+ int32_t getFramesPerSecond() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSampleRate(mStream);;
+ }
+
+ /**
+ * Only call this after open() has been called.
+ */
+ int32_t getChannelCount() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getChannelCount(mStream);;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(int channelCount, int sampSampleRate, aaudio_format_t format,
+ AAudioStream_dataCallback dataProc, AAudioStream_errorCallback errorProc,
+ void *userContext) {
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ //AAudioStreamBuilder_setSampleRate(mBuilder, 44100);
+ AAudioStreamBuilder_setPerformanceMode(mBuilder, mRequestedPerformanceMode);
+ AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ if (dataProc != nullptr) {
+ AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+ }
+ if (errorProc != nullptr) {
+ AAudioStreamBuilder_setErrorCallback(mBuilder, errorProc, userContext);
+ }
+ AAudioStreamBuilder_setChannelCount(mBuilder, channelCount);
+ AAudioStreamBuilder_setSampleRate(mBuilder, sampSampleRate);
+ AAudioStreamBuilder_setFormat(mBuilder, format);
+ //AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);
+
+ //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+ AAudioStreamBuilder_setPerformanceMode(mBuilder, perfMode);
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) goto finish1;
+
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
+ AAudioStream_getFramesPerBurst(mStream));
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ AAudioStream_getBufferSizeInFrames(mStream));
+ printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+ AAudioStream_getBufferCapacityInFrames(mStream));
+ printf("AAudioStream_getPerformanceMode() = %d, requested %d\n",
+ AAudioStream_getPerformanceMode(mStream), perfMode);
+
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != nullptr) {
+ printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = nullptr;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Write zero data to fill up the buffer and prevent underruns.
+ aaudio_result_t prime() {
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(mStream);
+ const int numFrames = 32;
+ float zeros[numFrames * samplesPerFrame];
+ memset(zeros, 0, sizeof(zeros));
+ aaudio_result_t result = numFrames;
+ while (result == numFrames) {
+ result = AAudioStream_write(mStream, zeros, numFrames, 0);
+ }
+ return result;
+ }
+
+ // Start the stream. AAudio will start calling your callback function.
+ aaudio_result_t start() {
+ aaudio_result_t result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestStart() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Stop the stream. AAudio will stop calling your callback function.
+ aaudio_result_t stop() {
+ aaudio_result_t result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ return result;
+ }
+
+ AAudioStream *getStream() const {
+ return mStream;
+ }
+
+private:
+ AAudioStreamBuilder *mBuilder = nullptr;
+ AAudioStream *mStream = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+ aaudio_performance_mode_t mRequestedPerformanceMode = PERFORMANCE_MODE;
+};
+
+typedef struct SineThreadedData_s {
+ SineGenerator sineOsc1;
+ SineGenerator sineOsc2;
+ int scheduler;
+ bool schedulerChecked;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t SimplePlayerDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+ ) {
+
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ fprintf(stderr, "ERROR - SimplePlayerDataCallbackProc needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
+
+ if (!sineData->schedulerChecked) {
+ sineData->scheduler = sched_getscheduler(gettid());
+ sineData->schedulerChecked = true;
+ }
+
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void SimplePlayerErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error)
+{
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+#endif //AAUDIO_SIMPLE_PLAYER_H
diff --git a/media/libaaudio/examples/utils/AAudioSimpleRecorder.h b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
new file mode 100644
index 0000000..9e7c463
--- /dev/null
+++ b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#ifndef AAUDIO_SIMPLE_RECORDER_H
+#define AAUDIO_SIMPLE_RECORDER_H
+
+#include <aaudio/AAudio.h>
+
+//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+#define PERFORMANCE_MODE AAUDIO_PERFORMANCE_MODE_NONE
+/**
+ * Simple wrapper for AAudio that opens an input stream either in callback or blocking read mode.
+ */
+class AAudioSimpleRecorder {
+public:
+ AAudioSimpleRecorder() {}
+ ~AAudioSimpleRecorder() {
+ close();
+ };
+
+ /**
+ * Call this before calling open().
+ * @param requestedSharingMode
+ */
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /**
+ * Call this before calling open().
+ * @param requestedPerformanceMode
+ */
+ void setPerformanceMode(aaudio_performance_mode_t requestedPerformanceMode) {
+ mRequestedPerformanceMode = requestedPerformanceMode;
+ }
+
+ /**
+ * Also known as "sample rate"
+ * Only call this after open() has been called.
+ */
+ int32_t getFramesPerSecond() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSampleRate(mStream);;
+ }
+
+ /**
+ * Only call this after open() has been called.
+ */
+ int32_t getSamplesPerFrame() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSamplesPerFrame(mStream);;
+ }
+ /**
+ * Only call this after open() has been called.
+ */
+ int64_t getFramesRead() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getFramesRead(mStream);;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(int channelCount, int sampSampleRate, aaudio_format_t format,
+ AAudioStream_dataCallback dataProc, AAudioStream_errorCallback errorProc,
+ void *userContext) {
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
+ AAudioStreamBuilder_setPerformanceMode(mBuilder, mRequestedPerformanceMode);
+ AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ if (dataProc != nullptr) {
+ AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+ }
+ if (errorProc != nullptr) {
+ AAudioStreamBuilder_setErrorCallback(mBuilder, errorProc, userContext);
+ }
+ AAudioStreamBuilder_setChannelCount(mBuilder, channelCount);
+ AAudioStreamBuilder_setSampleRate(mBuilder, sampSampleRate);
+ AAudioStreamBuilder_setFormat(mBuilder, format);
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStreamBuilder_openStream() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish1;
+ }
+
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
+ AAudioStream_getFramesPerBurst(mStream));
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ AAudioStream_getBufferSizeInFrames(mStream));
+ printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+ AAudioStream_getBufferCapacityInFrames(mStream));
+ return result;
+
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != nullptr) {
+ printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = nullptr;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Write zero data to fill up the buffer and prevent underruns.
+ aaudio_result_t prime() {
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+ const int numFrames = 32; // arbitrary
+ float zeros[numFrames * samplesPerFrame];
+ memset(zeros, 0, sizeof(zeros));
+ aaudio_result_t result = numFrames;
+ while (result == numFrames) {
+ result = AAudioStream_write(mStream, zeros, numFrames, 0);
+ }
+ return result;
+ }
+
+ // Start the stream. AAudio will start calling your callback function.
+ aaudio_result_t start() {
+ aaudio_result_t result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Stop the stream. AAudio will stop calling your callback function.
+ aaudio_result_t stop() {
+ aaudio_result_t result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Pause the stream. AAudio will stop calling your callback function.
+ aaudio_result_t pause() {
+ aaudio_result_t result = AAudioStream_requestPause(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestPause() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ AAudioStream *getStream() const {
+ return mStream;
+ }
+
+private:
+ AAudioStreamBuilder *mBuilder = nullptr;
+ AAudioStream *mStream = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+ aaudio_performance_mode_t mRequestedPerformanceMode = PERFORMANCE_MODE;
+};
+
+// Application data that gets passed to the callback.
+typedef struct PeakTrackerData {
+ float peakLevel;
+} PeakTrackerData_t;
+
+#define DECAY_FACTOR 0.999
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t SimpleRecorderDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+ ) {
+
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ fprintf(stderr, "ERROR - SimpleRecorderDataCallbackProc needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ PeakTrackerData_t *data = (PeakTrackerData_t *) userData;
+ // printf("MyCallbackProc(): frameCount = %d\n", numFrames);
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+ float sample;
+ // This code assume mono or stereo.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Peak follower
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ sample = audioBuffer[frameIndex * samplesPerFrame] * (1.0/32768);
+ data->peakLevel *= DECAY_FACTOR;
+ if (sample > data->peakLevel) {
+ data->peakLevel = sample;
+ }
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Peak follower
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ sample = audioBuffer[frameIndex * samplesPerFrame];
+ data->peakLevel *= DECAY_FACTOR;
+ if (sample > data->peakLevel) {
+ data->peakLevel = sample;
+ }
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void SimpleRecorderErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error)
+{
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+#endif //AAUDIO_SIMPLE_RECORDER_H
diff --git a/media/libaaudio/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/utils/SineGenerator.h
similarity index 100%
rename from media/libaaudio/examples/write_sine/src/SineGenerator.h
rename to media/libaaudio/examples/utils/SineGenerator.h
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
index 0bda008..c306ed3 100644
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -5,7 +5,8 @@
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/examples/utils
# NDK recommends using this kind of relative path instead of an absolute path.
LOCAL_SRC_FILES:= ../src/write_sine.cpp
@@ -17,7 +18,8 @@
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
LOCAL_SHARED_LIBRARIES := libaaudio
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 57a5273..6522ba4 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -20,58 +20,40 @@
#include <stdlib.h>
#include <math.h>
#include <aaudio/AAudio.h>
-#include "SineGenerator.h"
+#include <aaudio/AAudioTesting.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
#define SAMPLE_RATE 48000
-#define NUM_SECONDS 5
-#define NANOS_PER_MICROSECOND ((int64_t)1000)
-#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
-#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+#define NUM_SECONDS 20
-#define REQUESTED_FORMAT AAUDIO_FORMAT_PCM_I16
-#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_SHARED
-//#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define MMAP_POLICY AAUDIO_UNSPECIFIED
+//#define MMAP_POLICY AAUDIO_POLICY_NEVER
+//#define MMAP_POLICY AAUDIO_POLICY_AUTO
+//#define MMAP_POLICY AAUDIO_POLICY_ALWAYS
-static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
- const char *modeText = "unknown";
- switch (mode) {
- case AAUDIO_SHARING_MODE_EXCLUSIVE:
- modeText = "EXCLUSIVE";
- break;
- case AAUDIO_SHARING_MODE_SHARED:
- modeText = "SHARED";
- break;
- default:
- break;
- }
- return modeText;
-}
+#define REQUESTED_FORMAT AAUDIO_FORMAT_PCM_I16
-// TODO move to a common utility library
-static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
- struct timespec time;
- int result = clock_gettime(clockId, &time);
- if (result < 0) {
- return -errno;
- }
- return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
-}
+#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+//#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+
int main(int argc, char **argv)
{
(void)argc; // unused
+ AAudioSimplePlayer player;
+ SineThreadedData_t myData;
aaudio_result_t result = AAUDIO_OK;
const int requestedChannelCount = 2;
int actualChannelCount = 0;
const int requestedSampleRate = SAMPLE_RATE;
int actualSampleRate = 0;
- aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
-
+ aaudio_format_t requestedDataFormat = REQUESTED_FORMAT;
+ aaudio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
- AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
int32_t framesPerBurst = 0;
@@ -83,36 +65,26 @@
float *floatData = nullptr;
int16_t *shortData = nullptr;
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
-
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
printf("%s - Play a sine wave using AAudio\n", argv[0]);
- // Use an AAudioStreamBuilder to contain requested parameters.
- result = AAudio_createStreamBuilder(&aaudioBuilder);
+ AAudio_setMMapPolicy(MMAP_POLICY);
+ printf("requested MMapPolicy = %d\n", AAudio_getMMapPolicy());
+
+ player.setSharingMode(REQUESTED_SHARING_MODE);
+
+ result = player.open(requestedChannelCount, requestedSampleRate, requestedDataFormat,
+ nullptr, nullptr, &myData);
if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
goto finish;
}
+ aaudioStream = player.getStream();
// Request stream properties.
- AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
- AAudioStreamBuilder_setChannelCount(aaudioBuilder, requestedChannelCount);
- AAudioStreamBuilder_setFormat(aaudioBuilder, REQUESTED_FORMAT);
- AAudioStreamBuilder_setSharingMode(aaudioBuilder, REQUESTED_SHARING_MODE);
-
- AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
- //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
- //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_POWER_SAVING);
-
- // Create an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
- if (result != AAUDIO_OK) {
- goto finish;
- }
state = AAudioStream_getState(aaudioStream);
printf("after open, state = %s\n", AAudio_convertStreamStateToText(state));
@@ -121,8 +93,8 @@
actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
- sineOsc1.setup(440.0, actualSampleRate);
- sineOsc2.setup(660.0, actualSampleRate);
+ myData.sineOsc1.setup(440.0, actualSampleRate);
+ myData.sineOsc2.setup(660.0, actualSampleRate);
actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
printf("ChannelCount: requested = %d, actual = %d\n",
@@ -136,7 +108,7 @@
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
- printf("Buffer: bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(aaudioStream));
+ printf("Buffer: bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(aaudioStream));
bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
printf("Buffer: bufferCapacity = %d, remainder = %d\n",
bufferCapacity, bufferCapacity % framesPerBurst);
@@ -150,11 +122,12 @@
printf("Buffer: framesPerBurst = %d\n",framesPerBurst);
printf("Buffer: framesPerWrite = %d\n",framesPerWrite);
- actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n", REQUESTED_FORMAT, actualDataFormat);
- // TODO handle other data formats
+ printf("PerformanceMode = %d\n", AAudioStream_getPerformanceMode(aaudioStream));
+ printf("is MMAP used? = %s\n", AAudioStream_isMMapUsed(aaudioStream) ? "yes" : "no");
- printf("PerformanceMode: %d\n", AAudioStream_getPerformanceMode(aaudioStream));
+ actualDataFormat = AAudioStream_getFormat(aaudioStream);
+ printf("DataFormat: requested = %d, actual = %d\n", REQUESTED_FORMAT, actualDataFormat);
+ // TODO handle other data formats
// Allocate a buffer for the audio data.
if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
@@ -167,8 +140,8 @@
}
// Start the stream.
- printf("call AAudioStream_requestStart()\n");
- result = AAudioStream_requestStart(aaudioStream);
+ printf("call player.start()\n");
+ result = player.start();
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
goto finish;
@@ -184,15 +157,15 @@
if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
// Render sine waves to left and right channels.
- sineOsc1.render(&floatData[0], actualChannelCount, framesPerWrite);
+ myData.sineOsc1.render(&floatData[0], actualChannelCount, framesPerWrite);
if (actualChannelCount > 1) {
- sineOsc2.render(&floatData[1], actualChannelCount, framesPerWrite);
+ myData.sineOsc2.render(&floatData[1], actualChannelCount, framesPerWrite);
}
} else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
// Render sine waves to left and right channels.
- sineOsc1.render(&shortData[0], actualChannelCount, framesPerWrite);
+ myData.sineOsc1.render(&shortData[0], actualChannelCount, framesPerWrite);
if (actualChannelCount > 1) {
- sineOsc2.render(&shortData[1], actualChannelCount, framesPerWrite);
+ myData.sineOsc2.render(&shortData[1], actualChannelCount, framesPerWrite);
}
}
@@ -240,11 +213,16 @@
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ printf("call stop()\n");
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
finish:
+ player.close();
delete[] floatData;
delete[] shortData;
- AAudioStream_close(aaudioStream);
- AAudioStreamBuilder_delete(aaudioBuilder);
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 20a981b..69145aa 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -24,210 +24,18 @@
#include <math.h>
#include <time.h>
#include <aaudio/AAudio.h>
-#include "SineGenerator.h"
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
#define NUM_SECONDS 5
-//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
-#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
-
-// TODO refactor common code into a single SimpleAAudio class
-/**
- * Simple wrapper for AAudio that opens a default stream and then calls
- * a callback function to fill the output buffers.
- */
-class SimpleAAudioPlayer {
-public:
- SimpleAAudioPlayer() {}
- ~SimpleAAudioPlayer() {
- close();
- };
-
- /**
- * Call this before calling open().
- * @param requestedSharingMode
- */
- void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
- mRequestedSharingMode = requestedSharingMode;
- }
-
- /**
- * Also known as "sample rate"
- * Only call this after open() has been called.
- */
- int32_t getFramesPerSecond() {
- if (mStream == nullptr) {
- return AAUDIO_ERROR_INVALID_STATE;
- }
- return AAudioStream_getSampleRate(mStream);;
- }
-
- /**
- * Only call this after open() has been called.
- */
- int32_t getChannelCount() {
- if (mStream == nullptr) {
- return AAUDIO_ERROR_INVALID_STATE;
- }
- return AAudioStream_getChannelCount(mStream);;
- }
-
- /**
- * Open a stream
- */
- aaudio_result_t open(AAudioStream_dataCallback dataProc, void *userContext) {
- aaudio_result_t result = AAUDIO_OK;
-
- // Use an AAudioStreamBuilder to contain requested parameters.
- result = AAudio_createStreamBuilder(&mBuilder);
- if (result != AAUDIO_OK) return result;
-
- //AAudioStreamBuilder_setSampleRate(mBuilder, 44100);
- AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
- AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
- AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
- //AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
- AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);
-
- //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_NONE;
- aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
- //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
- AAudioStreamBuilder_setPerformanceMode(mBuilder, perfMode);
-
- // Open an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
- if (result != AAUDIO_OK) goto finish1;
-
- printf("AAudioStream_getFramesPerBurst() = %d\n",
- AAudioStream_getFramesPerBurst(mStream));
- printf("AAudioStream_getBufferSizeInFrames() = %d\n",
- AAudioStream_getBufferSizeInFrames(mStream));
- printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
- AAudioStream_getBufferCapacityInFrames(mStream));
- printf("AAudioStream_getPerformanceMode() = %d, requested %d\n",
- AAudioStream_getPerformanceMode(mStream), perfMode);
-
- finish1:
- AAudioStreamBuilder_delete(mBuilder);
- mBuilder = nullptr;
- return result;
- }
-
- aaudio_result_t close() {
- if (mStream != nullptr) {
- printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
- AAudioStream_close(mStream);
- mStream = nullptr;
- AAudioStreamBuilder_delete(mBuilder);
- mBuilder = nullptr;
- }
- return AAUDIO_OK;
- }
-
- // Write zero data to fill up the buffer and prevent underruns.
- aaudio_result_t prime() {
- int32_t samplesPerFrame = AAudioStream_getChannelCount(mStream);
- const int numFrames = 32;
- float zeros[numFrames * samplesPerFrame];
- memset(zeros, 0, sizeof(zeros));
- aaudio_result_t result = numFrames;
- while (result == numFrames) {
- result = AAudioStream_write(mStream, zeros, numFrames, 0);
- }
- return result;
- }
-
- // Start the stream. AAudio will start calling your callback function.
- aaudio_result_t start() {
- aaudio_result_t result = AAudioStream_requestStart(mStream);
- if (result != AAUDIO_OK) {
- printf("ERROR - AAudioStream_requestStart() returned %d %s\n",
- result, AAudio_convertResultToText(result));
- }
- return result;
- }
-
- // Stop the stream. AAudio will stop calling your callback function.
- aaudio_result_t stop() {
- aaudio_result_t result = AAudioStream_requestStop(mStream);
- if (result != AAUDIO_OK) {
- printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
- result, AAudio_convertResultToText(result));
- }
- int32_t xRunCount = AAudioStream_getXRunCount(mStream);
- printf("AAudioStream_getXRunCount %d\n", xRunCount);
- return result;
- }
-
- AAudioStream *getStream() const {
- return mStream;
- }
-
-private:
- AAudioStreamBuilder *mBuilder = nullptr;
- AAudioStream *mStream = nullptr;
- aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
-};
-
// Application data that gets passed to the callback.
#define MAX_FRAME_COUNT_RECORDS 256
-typedef struct SineThreadedData_s {
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
- int scheduler;
- bool schedulerChecked;
-} SineThreadedData_t;
-
-// Callback function that fills the audio output buffer.
-aaudio_data_callback_result_t MyDataCallbackProc(
- AAudioStream *stream,
- void *userData,
- void *audioData,
- int32_t numFrames
- ) {
-
- SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
-
- if (!sineData->schedulerChecked) {
- sineData->scheduler = sched_getscheduler(gettid());
- sineData->schedulerChecked = true;
- }
-
- int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
- // This code only plays on the first one or two channels.
- // TODO Support arbitrary number of channels.
- switch (AAudioStream_getFormat(stream)) {
- case AAUDIO_FORMAT_PCM_I16: {
- int16_t *audioBuffer = (int16_t *) audioData;
- // Render sine waves as shorts to first channel.
- sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
- // Render sine waves to second channel if there is one.
- if (samplesPerFrame > 1) {
- sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
- }
- }
- break;
- case AAUDIO_FORMAT_PCM_FLOAT: {
- float *audioBuffer = (float *) audioData;
- // Render sine waves as floats to first channel.
- sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
- // Render sine waves to second channel if there is one.
- if (samplesPerFrame > 1) {
- sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
- }
- }
- break;
- default:
- return AAUDIO_CALLBACK_RESULT_STOP;
- }
-
- return AAUDIO_CALLBACK_RESULT_CONTINUE;
-}
int main(int argc, char **argv)
{
(void)argc; // unused
- SimpleAAudioPlayer player;
+ AAudioSimplePlayer player;
SineThreadedData_t myData;
aaudio_result_t result;
@@ -236,11 +44,10 @@
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
- player.setSharingMode(SHARING_MODE);
-
myData.schedulerChecked = false;
- result = player.open(MyDataCallbackProc, &myData);
+ result = player.open(2, 44100, AAUDIO_FORMAT_PCM_FLOAT,
+ SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - player.open() returned %d\n", result);
goto error;
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
index 3fee08a..40dca34 100644
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -5,14 +5,15 @@
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/av/media/libaaudio/src \
- frameworks/av/media/libaaudio/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
# NDK recommends using this kind of relative path instead of an absolute path.
LOCAL_SRC_FILES:= ../src/write_sine.cpp
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
- libaudioclient liblog libtinyalsa
+ libaudioclient liblog libtinyalsa libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := write_sine
@@ -23,13 +24,14 @@
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
- libaudioclient liblog
+ libaudioclient liblog libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := write_sine_callback
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 4e36e84..f11f1fa 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -42,7 +42,6 @@
* and would accept whatever it was given.
*/
#define AAUDIO_UNSPECIFIED 0
-#define AAUDIO_DEVICE_UNSPECIFIED 0
enum {
AAUDIO_DIRECTION_OUTPUT,
@@ -58,12 +57,6 @@
};
typedef int32_t aaudio_format_t;
-/**
- * @deprecated use aaudio_format_t instead
- * TODO remove when tests and examples are updated
- */
-typedef int32_t aaudio_audio_format_t;
-
enum {
AAUDIO_OK,
AAUDIO_ERROR_BASE = -900, // TODO review
@@ -197,11 +190,11 @@
* Request an audio device identified device using an ID.
* On Android, for example, the ID could be obtained from the Java AudioManager.
*
- * The default, if you do not call this function, is AAUDIO_DEVICE_UNSPECIFIED,
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED,
* in which case the primary device will be used.
*
* @param builder reference provided by AAudio_createStreamBuilder()
- * @param deviceId device identifier or AAUDIO_DEVICE_UNSPECIFIED
+ * @param deviceId device identifier or AAUDIO_UNSPECIFIED
*/
AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
int32_t deviceId);
@@ -263,7 +256,7 @@
* @param format common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
*/
AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
- aaudio_audio_format_t format);
+ aaudio_format_t format);
/**
* Request a mode for sharing the device.
@@ -754,7 +747,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual data format
*/
-AAUDIO_API aaudio_audio_format_t AAudioStream_getFormat(AAudioStream* stream);
+AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream);
/**
* Provide actual sharing mode.
diff --git a/media/libaaudio/include/aaudio/AAudioTesting.h b/media/libaaudio/include/aaudio/AAudioTesting.h
new file mode 100644
index 0000000..02ec411
--- /dev/null
+++ b/media/libaaudio/include/aaudio/AAudioTesting.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is test support for AAudio.
+ */
+#ifndef AAUDIO_AAUDIO_TESTING_H
+#define AAUDIO_AAUDIO_TESTING_H
+
+#include <aaudio/AAudio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************************************************************************
+ * The definitions below are only for testing. Do not use them in an application.
+ * They may change or be removed at any time.
+ ************************************************************************************/
+
+enum {
+ /**
+ * Related feature is disabled and never used.
+ */
+ AAUDIO_POLICY_NEVER = 1,
+
+ /**
+ * If related feature works then use it. Otherwise fall back to something else.
+ */
+ AAUDIO_POLICY_AUTO,
+
+ /**
+ * Related feature must be used. If not available then fail.
+ */
+ AAUDIO_POLICY_ALWAYS
+};
+typedef int32_t aaudio_policy_t;
+
+/**
+ * Control whether AAudioStreamBuilder_openStream() will use the new MMAP data path
+ * or the older "Legacy" data path.
+ *
+ * This will only affect the current process.
+ *
+ * If unspecified then the policy will be based on system properties or configuration.
+ *
+ * @note This is only for testing. Do not use this in an application.
+ * It may change or be removed at any time.
+ *
+ * @param policy AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER, AAUDIO_POLICY_AUTO, or AAUDIO_POLICY_ALWAYS
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudio_setMMapPolicy(aaudio_policy_t policy);
+
+/**
+ * Get the current MMAP policy set by AAudio_setMMapPolicy().
+ *
+ * @note This is only for testing. Do not use this in an application.
+ * It may change or be removed at any time.
+ *
+ * @return current policy
+ */
+AAUDIO_API aaudio_policy_t AAudio_getMMapPolicy();
+
+/**
+ * Return true if the stream uses the MMAP data path versus the legacy path.
+ *
+ * @note This is only for testing. Do not use this in an application.
+ * It may change or be removed at any time.
+ *
+ * @return true if the stream uses ther MMAP data path
+ */
+AAUDIO_API bool AAudioStream_isMMapUsed(AAudioStream* stream);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //AAUDIO_AAUDIO_TESTING_H
+
+/** @} */
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index 8f74800..2ba5250 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -3,6 +3,8 @@
AAudio_convertResultToText;
AAudio_convertStreamStateToText;
AAudio_createStreamBuilder;
+ AAudio_getMMapPolicy;
+ AAudio_setMMapPolicy;
AAudioStreamBuilder_setPerformanceMode;
AAudioStreamBuilder_setDeviceId;
AAudioStreamBuilder_setDataCallback;
@@ -43,6 +45,7 @@
AAudioStream_getFramesWritten;
AAudioStream_getFramesRead;
AAudioStream_getTimestamp;
+ AAudioStream_isMMapUsed;
local:
*;
};
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
index f43c0ad..28c4d7f 100644
--- a/media/libaaudio/src/Android.mk
+++ b/media/libaaudio/src/Android.mk
@@ -18,6 +18,7 @@
frameworks/native/media/libaaudio/include/include \
frameworks/av/media/libaaudio/include \
frameworks/native/include \
+ frameworks/av/media/libaudioclient/include \
$(LOCAL_PATH) \
$(LOCAL_PATH)/binding \
$(LOCAL_PATH)/client \
@@ -44,6 +45,8 @@
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
client/AudioStreamInternal.cpp \
+ client/AudioStreamInternalCapture.cpp \
+ client/AudioStreamInternalPlay.cpp \
client/IsochronousClockModel.cpp \
binding/AudioEndpointParcelable.cpp \
binding/AAudioBinderClient.cpp \
@@ -99,6 +102,8 @@
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
client/AudioStreamInternal.cpp \
+ client/AudioStreamInternalCapture.cpp \
+ client/AudioStreamInternalPlay.cpp \
client/IsochronousClockModel.cpp \
binding/AudioEndpointParcelable.cpp \
binding/AAudioBinderClient.cpp \
@@ -116,6 +121,6 @@
# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
-LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder
+LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder libaudiomanager
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index 2de560b..638544e 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -77,8 +77,7 @@
// Set capacityInFrames to zero if Queue is unused.
RingBufferDescriptor upMessageQueueDescriptor; // server to client
RingBufferDescriptor downMessageQueueDescriptor; // client to server
- RingBufferDescriptor upDataQueueDescriptor; // eg. record
- RingBufferDescriptor downDataQueueDescriptor; // eg. playback
+ RingBufferDescriptor dataQueueDescriptor; // playback or capture
} EndpointDescriptor;
} // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 09eaa42..44edb1d 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -55,19 +55,16 @@
}
status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
- int32_t temp;
status_t status = parcel->readInt32(&mDeviceId);
if (status != NO_ERROR) goto error;
status = parcel->readInt32(&mSampleRate);
if (status != NO_ERROR) goto error;
status = parcel->readInt32(&mSamplesPerFrame);
if (status != NO_ERROR) goto error;
- status = parcel->readInt32(&temp);
+ status = parcel->readInt32(&mSharingMode);
if (status != NO_ERROR) goto error;
- mSharingMode = (aaudio_sharing_mode_t) temp;
- status = parcel->readInt32(&temp);
+ status = parcel->readInt32(&mAudioFormat);
if (status != NO_ERROR) goto error;
- mAudioFormat = (aaudio_audio_format_t) temp;
status = parcel->readInt32(&mBufferCapacity);
if (status != NO_ERROR) goto error;
return NO_ERROR;
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index b1e4a7d..144595a 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -58,11 +58,11 @@
mSamplesPerFrame = samplesPerFrame;
}
- aaudio_audio_format_t getAudioFormat() const {
+ aaudio_format_t getAudioFormat() const {
return mAudioFormat;
}
- void setAudioFormat(aaudio_audio_format_t audioFormat) {
+ void setAudioFormat(aaudio_format_t audioFormat) {
mAudioFormat = audioFormat;
}
@@ -91,11 +91,11 @@
void dump() const;
private:
- int32_t mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+ int32_t mDeviceId = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
- aaudio_audio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
};
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index ee92ee3..d05abb0 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -93,10 +93,8 @@
&descriptor->downMessageQueueDescriptor);
if (result != AAUDIO_OK) return result;
- result = mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
- if (result != AAUDIO_OK) return result;
result = mDownDataQueueParcelable.resolve(mSharedMemories,
- &descriptor->downDataQueueDescriptor);
+ &descriptor->dataQueueDescriptor);
return result;
}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 1094d9e..899eb04 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -62,6 +62,8 @@
return status;
}
if (mSizeInBytes > 0) {
+ // Keep the original FD until you are done with the mFd.
+ // If you close it in here then it will prevent mFd from working.
mOriginalFd = parcel->readFileDescriptor();
ALOGV("SharedMemoryParcelable::readFromParcel() LEAK? mOriginalFd = %d\n", mOriginalFd);
mFd = fcntl(mOriginalFd, F_DUPFD_CLOEXEC, 0);
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index e6751c49..5cb642b 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -32,7 +32,7 @@
#define RIDICULOUSLY_LARGE_FRAME_SIZE 4096
AudioEndpoint::AudioEndpoint()
- : mOutputFreeRunning(false)
+ : mFreeRunning(false)
, mDataReadCounter(0)
, mDataWriteCounter(0)
{
@@ -108,7 +108,7 @@
&pEndpointDescriptor->upMessageQueueDescriptor);
if (result == AAUDIO_OK) {
result = AudioEndpoint_validateQueueDescriptor("data",
- &pEndpointDescriptor->downDataQueueDescriptor);
+ &pEndpointDescriptor->dataQueueDescriptor);
}
return result;
}
@@ -144,11 +144,11 @@
);
// ============================ down data queue =============================
- descriptor = &pEndpointDescriptor->downDataQueueDescriptor;
+ descriptor = &pEndpointDescriptor->dataQueueDescriptor;
ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
- mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
- ALOGV("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+ mFreeRunning = descriptor->readCounterAddress == nullptr;
+ ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
: descriptor->readCounterAddress;
@@ -156,7 +156,7 @@
? &mDataWriteCounter
: descriptor->writeCounterAddress;
- mDownDataQueue = new FifoBuffer(
+ mDataQueue = new FifoBuffer(
descriptor->bytesPerFrame,
descriptor->capacityInFrames,
readCounterAddress,
@@ -164,7 +164,7 @@
descriptor->dataAddress
);
uint32_t threshold = descriptor->capacityInFrames / 2;
- mDownDataQueue->setThreshold(threshold);
+ mDataQueue->setThreshold(threshold);
return result;
}
@@ -175,44 +175,54 @@
aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
{
- return mDownDataQueue->write(buffer, numFrames);
+ return mDataQueue->write(buffer, numFrames);
}
-void AudioEndpoint::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
- mDownDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
+ mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
}
-int32_t AudioEndpoint::getEmptyFramesAvailable() {
- return mDownDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+int32_t AudioEndpoint::getEmptyFramesAvailable()
+{
+ return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+}
+
+void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
+{
+ return mDataQueue->getFullDataAvailable(wrappingBuffer);
}
int32_t AudioEndpoint::getFullFramesAvailable()
{
- return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+ return mDataQueue->getFifoControllerBase()->getFullFramesAvailable();
}
void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
- mDownDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
+ mDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
}
-void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
-{
- mDownDataQueue->setReadCounter(framesRead);
+void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
+ mDataQueue->getFifoControllerBase()->advanceReadIndex(deltaFrames);
}
-fifo_counter_t AudioEndpoint::getDownDataReadCounter()
+void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead)
{
- return mDownDataQueue->getReadCounter();
+ mDataQueue->setReadCounter(framesRead);
}
-void AudioEndpoint::setDownDataWriteCounter(fifo_counter_t framesRead)
+fifo_counter_t AudioEndpoint::getDataReadCounter()
{
- mDownDataQueue->setWriteCounter(framesRead);
+ return mDataQueue->getReadCounter();
}
-fifo_counter_t AudioEndpoint::getDownDataWriteCounter()
+void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead)
{
- return mDownDataQueue->getWriteCounter();
+ mDataQueue->setWriteCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDataWriteCounter()
+{
+ return mDataQueue->getWriteCounter();
}
int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
@@ -221,18 +231,18 @@
if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
}
- mDownDataQueue->setThreshold(requestedFrames);
- *actualFrames = mDownDataQueue->getThreshold();
+ mDataQueue->setThreshold(requestedFrames);
+ *actualFrames = mDataQueue->getThreshold();
return AAUDIO_OK;
}
int32_t AudioEndpoint::getBufferSizeInFrames() const
{
- return mDownDataQueue->getThreshold();
+ return mDataQueue->getThreshold();
}
int32_t AudioEndpoint::getBufferCapacityInFrames() const
{
- return (int32_t)mDownDataQueue->getBufferCapacityInFrames();
+ return (int32_t)mDataQueue->getBufferCapacityInFrames();
}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 3a2099f..53ba033 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -54,29 +54,36 @@
*/
aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
- void getEmptyRoomAvailable(android::WrappingBuffer *wrappingBuffer);
+ void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
int32_t getEmptyFramesAvailable();
+
+ void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+
int32_t getFullFramesAvailable();
+ void advanceReadIndex(int32_t deltaFrames);
+
void advanceWriteIndex(int32_t deltaFrames);
/**
* Set the read index in the downData queue.
* This is needed if the reader is not updating the index itself.
*/
- void setDownDataReadCounter(android::fifo_counter_t framesRead);
- android::fifo_counter_t getDownDataReadCounter();
+ void setDataReadCounter(android::fifo_counter_t framesRead);
- void setDownDataWriteCounter(android::fifo_counter_t framesWritten);
- android::fifo_counter_t getDownDataWriteCounter();
+ android::fifo_counter_t getDataReadCounter();
+
+ void setDataWriteCounter(android::fifo_counter_t framesWritten);
+
+ android::fifo_counter_t getDataWriteCounter();
/**
* The result is not valid until after configure() is called.
*
* @return true if the output buffer read position is not updated, eg. DMA
*/
- bool isOutputFreeRunning() const { return mOutputFreeRunning; }
+ bool isFreeRunning() const { return mFreeRunning; }
int32_t setBufferSizeInFrames(int32_t requestedFrames,
int32_t *actualFrames);
@@ -86,8 +93,8 @@
private:
android::FifoBuffer *mUpCommandQueue;
- android::FifoBuffer *mDownDataQueue;
- bool mOutputFreeRunning;
+ android::FifoBuffer *mDataQueue;
+ bool mFreeRunning;
android::fifo_counter_t mDataReadCounter; // only used if free-running
android::fifo_counter_t mDataWriteCounter; // only used if free-running
};
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 143d4b7..f71404d 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -14,7 +14,9 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+// This file is used in both client and server processes.
+// This is needed to make sense of the logs more easily.
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -41,8 +43,6 @@
#include "AudioStreamInternal.h"
-#define LOG_TIMESTAMPS 0
-
using android::String16;
using android::Mutex;
using android::WrappingBuffer;
@@ -54,10 +54,7 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
-//static int64_t s_logCounter = 0;
-//#define MYLOG_CONDITION (mInService == true && s_logCounter++ < 500)
-//#define MYLOG_CONDITION (s_logCounter++ < 500000)
-#define MYLOG_CONDITION (1)
+#define LOG_TIMESTAMPS 0
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
@@ -84,7 +81,7 @@
}
// We have to do volume scaling. So we prefer FLOAT format.
- if (getFormat() == AAUDIO_UNSPECIFIED) {
+ if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
setFormat(AAUDIO_FORMAT_PCM_FLOAT);
}
// Request FLOAT for the shared mixer.
@@ -106,7 +103,7 @@
mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
if (mServiceStreamHandle < 0) {
result = mServiceStreamHandle;
- ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
+ ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
} else {
result = configuration.validate();
if (result != AAUDIO_OK) {
@@ -123,8 +120,6 @@
result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
if (result != AAUDIO_OK) {
- ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
- getLocationName(), result);
mServiceInterface.closeStream(mServiceStreamHandle);
return result;
}
@@ -132,7 +127,6 @@
// resolve parcelable into a descriptor
result = mEndPointParcelable.resolve(&mEndpointDescriptor);
if (result != AAUDIO_OK) {
- ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
mServiceInterface.closeStream(mServiceStreamHandle);
return result;
}
@@ -140,11 +134,9 @@
// Configure endpoint based on descriptor.
mAudioEndpoint.configure(&mEndpointDescriptor);
- mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
- int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
+ mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
- getLocationName(), mFramesPerBurst, capacity);
// Validate result from server.
if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
@@ -188,12 +180,12 @@
}
aaudio_result_t AudioStreamInternal::close() {
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+ ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
mServiceStreamHandle);
if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
// Don't close a stream while it is running.
aaudio_stream_state_t currentState = getState();
- if (isPlaying()) {
+ if (isActive()) {
requestStop();
aaudio_stream_state_t nextState;
int64_t timeoutNanoseconds = MIN_TIMEOUT_NANOS;
@@ -217,53 +209,6 @@
}
-// Render audio in the application callback and then write the data to the stream.
-void *AudioStreamInternal::callbackLoop() {
- aaudio_result_t result = AAUDIO_OK;
- aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- AAudioStream_dataCallback appCallback = getDataCallbackProc();
- if (appCallback == nullptr) return NULL;
-
- // result might be a frame count
- while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
- // Call application using the AAudio callback interface.
- callbackResult = (*appCallback)(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- mCallbackBuffer,
- mCallbackFrames);
-
- if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- // Write audio data to stream.
- int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
-
- // This is a BLOCKING WRITE!
- result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
- if ((result != mCallbackFrames)) {
- ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
- if (result >= 0) {
- // Only wrote some of the frames requested. Must have timed out.
- result = AAUDIO_ERROR_TIMEOUT;
- }
- if (getErrorCallbackProc() != nullptr) {
- (*getErrorCallbackProc())(
- (AAudioStream *) this,
- getErrorCallbackUserData(),
- result);
- }
- break;
- }
- } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
- break;
- }
- }
-
- ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
- result, (int) isPlaying());
- return NULL;
-}
-
static void *aaudio_callback_thread_proc(void *context)
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
@@ -278,14 +223,13 @@
aaudio_result_t AudioStreamInternal::requestStart()
{
int64_t startTime;
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
+ ALOGD("AudioStreamInternal(): start()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
startTime = AudioClock::getNanoseconds();
mClockModel.start(startTime);
- processTimestamp(0, startTime);
setState(AAUDIO_STREAM_STATE_STARTING);
aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
@@ -313,11 +257,15 @@
return timeoutNanoseconds;
}
+int64_t AudioStreamInternal::calculateReasonableTimeout() {
+ return calculateReasonableTimeout(getFramesPerBurst());
+}
+
aaudio_result_t AudioStreamInternal::stopCallback()
{
if (isDataCallbackActive()) {
mCallbackEnabled.store(false);
- return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
+ return joinThread(NULL);
} else {
return AAUDIO_OK;
}
@@ -338,18 +286,15 @@
aaudio_result_t AudioStreamInternal::requestPause()
{
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
aaudio_result_t result = stopCallback();
if (result != AAUDIO_OK) {
return result;
}
result = requestPauseInternal();
- ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
return result;
}
aaudio_result_t AudioStreamInternal::requestFlush() {
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
mServiceStreamHandle);
@@ -360,10 +305,11 @@
return mServiceInterface.flushStream(mServiceStreamHandle);
}
+// TODO for Play only
void AudioStreamInternal::onFlushFromServer() {
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
- int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
- int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+ ALOGD("AudioStreamInternal(): onFlushFromServer()");
+ int64_t readCounter = mAudioEndpoint.getDataReadCounter();
+ int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
// Bump offset so caller does not see the retrograde motion in getFramesRead().
int64_t framesFlushed = writeCounter - readCounter;
@@ -371,7 +317,7 @@
// Flush written frames by forcing writeCounter to readCounter.
// This is because we cannot move the read counter in the hardware.
- mAudioEndpoint.setDownDataWriteCounter(readCounter);
+ mAudioEndpoint.setDataWriteCounter(readCounter);
}
aaudio_result_t AudioStreamInternal::requestStopInternal()
@@ -389,13 +335,11 @@
aaudio_result_t AudioStreamInternal::requestStop()
{
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
aaudio_result_t result = stopCallback();
if (result != AAUDIO_OK) {
return result;
}
result = requestStopInternal();
- ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
return result;
}
@@ -422,7 +366,8 @@
// TODO Generate in server and pass to client. Return latest.
int64_t time = AudioClock::getNanoseconds();
*framePosition = mClockModel.convertTimeToPosition(time);
- *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
+ // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
+ *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
return AAUDIO_OK;
}
@@ -434,21 +379,21 @@
}
#if LOG_TIMESTAMPS
-static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
+static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
static int64_t oldPosition = 0;
static int64_t oldTime = 0;
int64_t framePosition = command.timestamp.position;
int64_t nanoTime = command.timestamp.timestamp;
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
+ ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %lld",
(long long) framePosition,
(long long) nanoTime);
int64_t nanosDelta = nanoTime - oldTime;
if (nanosDelta > 0 && oldTime > 0) {
int64_t framesDelta = framePosition - oldPosition;
int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
+ ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+ ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+ ALOGD("AudioStreamInternal() - measured rate = %lld", (long long) rate);
}
oldPosition = framePosition;
oldTime = nanoTime;
@@ -456,38 +401,43 @@
#endif
aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
- int64_t framePosition = 0;
#if LOG_TIMESTAMPS
- AudioStreamInternal_LogTimestamp(command);
+ AudioStreamInternal_logTimestamp(*message);
#endif
- framePosition = message->timestamp.position;
- processTimestamp(framePosition, message->timestamp.timestamp);
+ processTimestamp(message->timestamp.position, message->timestamp.timestamp);
return AAUDIO_OK;
}
aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
aaudio_result_t result = AAUDIO_OK;
- ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
switch (message->event.event) {
case AAUDIO_SERVICE_EVENT_STARTED:
- ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
- setState(AAUDIO_STREAM_STATE_STARTED);
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+ if (getState() == AAUDIO_STREAM_STATE_STARTING) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
break;
case AAUDIO_SERVICE_EVENT_PAUSED:
- ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
- setState(AAUDIO_STREAM_STATE_PAUSED);
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+ if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ }
break;
case AAUDIO_SERVICE_EVENT_STOPPED:
- ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
- setState(AAUDIO_STREAM_STATE_STOPPED);
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+ if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
break;
case AAUDIO_SERVICE_EVENT_FLUSHED:
- ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
- setState(AAUDIO_STREAM_STATE_FLUSHED);
- onFlushFromServer();
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+ if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
+ onFlushFromServer();
+ }
break;
case AAUDIO_SERVICE_EVENT_CLOSED:
- ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
setState(AAUDIO_STREAM_STATE_CLOSED);
break;
case AAUDIO_SERVICE_EVENT_DISCONNECTED:
@@ -497,7 +447,7 @@
break;
case AAUDIO_SERVICE_EVENT_VOLUME:
mVolumeRamp.setTarget((float) message->event.dataDouble);
- ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f",
+ ALOGD("processCommands() AAUDIO_SERVICE_EVENT_VOLUME %lf",
message->event.dataDouble);
break;
default:
@@ -513,7 +463,7 @@
aaudio_result_t result = AAUDIO_OK;
while (result == AAUDIO_OK) {
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
+ //ALOGD("AudioStreamInternal::processCommands() - looping, %d", result);
AAudioServiceMessage message;
if (mAudioEndpoint.readUpCommand(&message) != 1) {
break; // no command this time, no problem
@@ -537,15 +487,15 @@
return result;
}
-// Write the data, block if needed and timeoutMillis > 0
-aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
- int64_t timeoutNanoseconds)
+// Read or write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
{
const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
ATRACE_BEGIN(traceName);
aaudio_result_t result = AAUDIO_OK;
int32_t loopCount = 0;
- uint8_t* source = (uint8_t*)buffer;
+ uint8_t* audioData = (uint8_t*)buffer;
int64_t currentTimeNanos = AudioClock::getNanoseconds();
int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
int32_t framesLeft = numFrames;
@@ -556,19 +506,19 @@
ATRACE_INT(traceName, fullFrames);
}
- // Write until all the data has been written or until a timeout occurs.
+ // Loop until all the data has been processed or until a timeout occurs.
while (framesLeft > 0) {
- // The call to writeNow() will not block. It will just write as much as it can.
+ // The call to processDataNow() will not block. It will just read as much as it can.
int64_t wakeTimeNanos = 0;
- aaudio_result_t framesWritten = writeNow(source, framesLeft,
- currentTimeNanos, &wakeTimeNanos);
- if (framesWritten < 0) {
- ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
- result = framesWritten;
+ aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
+ currentTimeNanos, &wakeTimeNanos);
+ if (framesProcessed < 0) {
+ ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
+ result = framesProcessed;
break;
}
- framesLeft -= (int32_t) framesWritten;
- source += framesWritten * getBytesPerFrame();
+ framesLeft -= (int32_t) framesProcessed;
+ audioData += framesProcessed * getBytesPerFrame();
// Should we block?
if (timeoutNanoseconds == 0) {
@@ -580,8 +530,13 @@
}
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
- ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
+ // TODO remove after we fix the deadline bug
+ ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
(long long) timeoutNanoseconds);
+ ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+ (long long) wakeTimeNanos, (long long) deadlineNanos);
+ ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
+ (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
break;
}
@@ -591,179 +546,14 @@
}
}
- // return error or framesWritten
+ // return error or framesProcessed
(void) loopCount;
ATRACE_END();
return (result < 0) ? result : numFrames - framesLeft;
}
-// Write as much data as we can without blocking.
-aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
- int64_t currentNanoTime, int64_t *wakeTimePtr) {
- aaudio_result_t result = processCommands();
- if (result != AAUDIO_OK) {
- return result;
- }
-
- if (mAudioEndpoint.isOutputFreeRunning()) {
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
- // Update data queue based on the timing model.
- int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
- mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
- }
- // TODO else query from endpoint cuz set by actual reader, maybe
-
- // If the read index passed the write index then consider it an underrun.
- if (mAudioEndpoint.getFullFramesAvailable() < 0) {
- mXRunCount++;
- }
-
- // Write some data to the buffer.
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
- int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
- // numFrames, framesWritten);
-
- // Calculate an ideal time to wake up.
- if (wakeTimePtr != nullptr && framesWritten >= 0) {
- // By default wake up a few milliseconds from now. // TODO review
- int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
- aaudio_stream_state_t state = getState();
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
- // AAudio_convertStreamStateToText(state));
- switch (state) {
- case AAUDIO_STREAM_STATE_OPEN:
- case AAUDIO_STREAM_STATE_STARTING:
- if (framesWritten != 0) {
- // Don't wait to write more data. Just prime the buffer.
- wakeTime = currentNanoTime;
- }
- break;
- case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
- {
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
-
- uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
- }
- break;
- default:
- break;
- }
- *wakeTimePtr = wakeTime;
-
- }
-// ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
- return framesWritten;
-}
-
-
-aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
- int32_t numFrames) {
- // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)",
- // buffer, numFrames);
- WrappingBuffer wrappingBuffer;
- uint8_t *source = (uint8_t *) buffer;
- int32_t framesLeft = numFrames;
-
- mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
-
- // Read data in one or two parts.
- int partIndex = 0;
- while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
- int32_t framesToWrite = framesLeft;
- int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable > 0) {
- if (framesToWrite > framesAvailable) {
- framesToWrite = framesAvailable;
- }
- int32_t numBytes = getBytesPerFrame() * framesToWrite;
- int32_t numSamples = framesToWrite * getSamplesPerFrame();
- // Data conversion.
- float levelFrom;
- float levelTo;
- bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
- &levelFrom, &levelTo);
- // The formats are validated when the stream is opened so we do not have to
- // check for illegal combinations here.
- if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudio_linearRamp(
- (const float *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- if (ramping) {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- }
- } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- if (ramping) {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- AAudio_linearRamp(
- (const int16_t *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- }
- }
- source += numBytes;
- framesLeft -= framesToWrite;
- } else {
- break;
- }
- partIndex++;
- }
- int32_t framesWritten = numFrames - framesLeft;
- mAudioEndpoint.advanceWriteIndex(framesWritten);
-
- if (framesWritten > 0) {
- incrementFramesWritten(framesWritten);
- }
- // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
- return framesWritten;
-}
-
void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
- mClockModel.processTimestamp( position, time);
+ mClockModel.processTimestamp(position, time);
}
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
@@ -775,8 +565,7 @@
}
aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
- getLocationName(), requestedFrames, actualFrames);
+ ALOGD("AudioStreamInternal::setBufferSize() req = %d => %d", requestedFrames, actualFrames);
if (result < 0) {
return result;
} else {
@@ -784,40 +573,18 @@
}
}
-int32_t AudioStreamInternal::getBufferSize() const
-{
+int32_t AudioStreamInternal::getBufferSize() const {
return mAudioEndpoint.getBufferSizeInFrames();
}
-int32_t AudioStreamInternal::getBufferCapacity() const
-{
+int32_t AudioStreamInternal::getBufferCapacity() const {
return mAudioEndpoint.getBufferCapacityInFrames();
}
-int32_t AudioStreamInternal::getFramesPerBurst() const
-{
- return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+int32_t AudioStreamInternal::getFramesPerBurst() const {
+ return mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
}
-int64_t AudioStreamInternal::getFramesRead()
-{
- int64_t framesRead =
- mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
- + mFramesOffsetFromService;
- // Prevent retrograde motion.
- if (framesRead < mLastFramesRead) {
- framesRead = mLastFramesRead;
- } else {
- mLastFramesRead = framesRead;
- }
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
- return framesRead;
-}
-
-int64_t AudioStreamInternal::getFramesWritten()
-{
- int64_t getFramesWritten = mAudioEndpoint.getDownDataWriteCounter()
- + mFramesOffsetFromService;
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
- return getFramesWritten;
+aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
+ return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index ee602c1..a11f309 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -37,7 +37,7 @@
class AudioStreamInternal : public AudioStream {
public:
- AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService = false);
+ AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService);
virtual ~AudioStreamInternal();
// =========== Begin ABSTRACT methods ===========================
@@ -60,10 +60,6 @@
aaudio_result_t close() override;
- aaudio_result_t write(const void *buffer,
- int32_t numFrames,
- int64_t timeoutNanoseconds) override;
-
aaudio_result_t setBufferSize(int32_t requestedFrames) override;
int32_t getBufferSize() const override;
@@ -72,9 +68,6 @@
int32_t getFramesPerBurst() const override;
- int64_t getFramesRead() override;
- int64_t getFramesWritten() override;
-
int32_t getXRunCount() const override {
return mXRunCount;
}
@@ -83,16 +76,37 @@
aaudio_result_t unregisterThread() override;
+ aaudio_result_t joinThread(void** returnArg);
+
// Called internally from 'C'
- void *callbackLoop();
+ virtual void *callbackLoop() = 0;
bool isMMap() override {
return true;
}
+ // Calculate timeout based on framesPerBurst
+ int64_t calculateReasonableTimeout();
+
protected:
+ aaudio_result_t processData(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds);
+
+/**
+ * Low level data processing that will not block. It will just read or write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames processed or a negative error code.
+ */
+ virtual aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) = 0;
+
aaudio_result_t processCommands();
aaudio_result_t requestPauseInternal();
@@ -100,17 +114,6 @@
aaudio_result_t stopCallback();
-/**
- * Low level write that will not block. It will just write as much as it can.
- *
- * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
- *
- * @return the number of frames written or a negative error code.
- */
- aaudio_result_t writeNow(const void *buffer,
- int32_t numFrames,
- int64_t currentTimeNanos,
- int64_t *wakeTimePtr);
void onFlushFromServer();
@@ -121,6 +124,24 @@
// Calculate timeout for an operation involving framesPerOperation.
int64_t calculateReasonableTimeout(int32_t framesPerOperation);
+ aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
+ IsochronousClockModel mClockModel; // timing model for chasing the HAL
+
+ AudioEndpoint mAudioEndpoint; // source for reads or sink for writes
+ aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
+
+ int32_t mFramesPerBurst; // frames per HAL transfer
+ int32_t mXRunCount = 0; // how many underrun events?
+
+ LinearRamp mVolumeRamp;
+
+ // Offset from underlying frame position.
+ int64_t mFramesOffsetFromService = 0; // offset for timestamps
+
+ uint8_t *mCallbackBuffer = nullptr;
+ int32_t mCallbackFrames = 0;
+
private:
/*
* Asynchronous write with data conversion.
@@ -130,38 +151,16 @@
*/
aaudio_result_t writeNowWithConversion(const void *buffer,
int32_t numFrames);
- void processTimestamp(uint64_t position, int64_t time);
-
-
- const char *getLocationName() const {
- return mInService ? "SERVICE" : "CLIENT";
- }
// Adjust timing model based on timestamp from service.
-
- IsochronousClockModel mClockModel; // timing model for chasing the HAL
- AudioEndpoint mAudioEndpoint; // sink for writes
- aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
+ void processTimestamp(uint64_t position, int64_t time);
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
-
- aaudio_audio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
-
- uint8_t *mCallbackBuffer = nullptr;
- int32_t mCallbackFrames = 0;
-
- // Offset from underlying frame position.
- int64_t mFramesOffsetFromService = 0; // offset for timestamps
- int64_t mLastFramesRead = 0; // used to prevent retrograde motion
- int32_t mFramesPerBurst; // frames per HAL transfer
- int32_t mXRunCount = 0; // how many underrun events?
- LinearRamp mVolumeRamp;
-
AAudioServiceInterface &mServiceInterface; // abstract interface to the service
// The service uses this for SHARED mode.
- bool mInService = false; // Are running in the client or the service?
+ bool mInService = false; // Is this running in the client or the service?
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
new file mode 100644
index 0000000..93693bd
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <aaudio/AAudio.h>
+
+#include "client/AudioStreamInternalCapture.h"
+#include "utility/AudioClock.h"
+
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
+ bool inService)
+ : AudioStreamInternal(serviceInterface, inService) {
+
+}
+
+AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
+
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ return processData(buffer, numFrames, timeoutNanoseconds);
+}
+
+// Read as much data as we can without blocking.
+aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
+ int64_t currentNanoTime, int64_t *wakeTimePtr) {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (mAudioEndpoint.isFreeRunning()) {
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
+ // Update data queue based on the timing model.
+ int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ // TODO refactor, maybe use setRemoteCounter()
+ mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
+ }
+
+ // If the write index passed the read index then consider it an overrun.
+ if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+
+ // Read some data from the buffer.
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
+ int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
+ // numFrames, framesProcessed);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesProcessed >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+ aaudio_stream_state_t state = getState();
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
+ // AAudio_convertStreamStateToText(state));
+ switch (state) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesProcessed;
+}
+
+aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
+ int32_t numFrames) {
+ // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
+ // buffer, numFrames);
+ WrappingBuffer wrappingBuffer;
+ uint8_t *destination = (uint8_t *) buffer;
+ int32_t framesLeft = numFrames;
+
+ mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
+ int32_t framesToProcess = framesLeft;
+ int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable <= 0) break;
+
+ if (framesToProcess > framesAvailable) {
+ framesToProcess = framesAvailable;
+ }
+
+ int32_t numBytes = getBytesPerFrame() * framesToProcess;
+ int32_t numSamples = framesToProcess * getSamplesPerFrame();
+
+ // TODO factor this out into a utility function
+ if (mDeviceFormat == getFormat()) {
+ memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
+ && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) wrappingBuffer.data[partIndex],
+ (float *) destination,
+ numSamples,
+ 1.0f);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
+ && getFormat() == AAUDIO_FORMAT_PCM_I16) {
+ AAudioConvert_floatToPcm16(
+ (const float *) wrappingBuffer.data[partIndex],
+ (int16_t *) destination,
+ numSamples,
+ 1.0f);
+ } else {
+ ALOGE("Format conversion not supported!");
+ return AAUDIO_ERROR_INVALID_FORMAT;
+ }
+ destination += numBytes;
+ framesLeft -= framesToProcess;
+ }
+
+ int32_t framesProcessed = numFrames - framesLeft;
+ mAudioEndpoint.advanceReadIndex(framesProcessed);
+ incrementFramesRead(framesProcessed);
+
+ //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
+ return framesProcessed;
+}
+
+int64_t AudioStreamInternalCapture::getFramesWritten()
+{
+ int64_t frames =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (frames < mLastFramesWritten) {
+ frames = mLastFramesWritten;
+ } else {
+ mLastFramesWritten = frames;
+ }
+ //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld", (long long)frames);
+ return frames;
+}
+
+int64_t AudioStreamInternalCapture::getFramesRead()
+{
+ int64_t frames = mAudioEndpoint.getDataWriteCounter()
+ + mFramesOffsetFromService;
+ //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
+ return frames;
+}
+
+// Read data from the stream and pass it to the callback for processing.
+void *AudioStreamInternalCapture::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
+
+ // Read audio data from stream.
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+ // This is a BLOCKING READ!
+ result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
+ if (result >= 0) {
+ // Only read some of the frames requested. Must have timed out.
+ result = AAUDIO_ERROR_TIMEOUT;
+ }
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ }
+ break;
+ }
+
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
+ result, (int) isActive());
+ return NULL;
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
new file mode 100644
index 0000000..17f37e8
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceInterface.h"
+#include "client/AudioStreamInternal.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+class AudioStreamInternalCapture : public AudioStreamInternal {
+public:
+ AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface, bool inService = false);
+ virtual ~AudioStreamInternalCapture();
+
+ aaudio_result_t read(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ int64_t getFramesRead() override;
+ int64_t getFramesWritten() override;
+
+ void *callbackLoop() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_INPUT;
+ }
+protected:
+
+/**
+ * Low level data processing that will not block. It will just read or write as much as it can.
+ *
+ * It passes back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames processed or a negative error code.
+ */
+ aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) override;
+
+private:
+ /*
+ * Asynchronous read with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return frames written or negative error
+ */
+ aaudio_result_t readNowWithConversion(void *buffer, int32_t numFrames);
+
+ int64_t mLastFramesWritten = 0; // used to prevent retrograde motion
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
new file mode 100644
index 0000000..fc9766f
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "client/AudioStreamInternalPlay.h"
+#include "utility/AudioClock.h"
+
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
+ bool inService)
+ : AudioStreamInternal(serviceInterface, inService) {
+
+}
+
+AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
+
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+
+{
+ return processData((void *)buffer, numFrames, timeoutNanoseconds);
+}
+
+// Write as much data as we can without blocking.
+aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
+ int64_t currentNanoTime, int64_t *wakeTimePtr) {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (mAudioEndpoint.isFreeRunning()) {
+ //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
+ // Update data queue based on the timing model.
+ int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
+ }
+ // TODO else query from endpoint cuz set by actual reader, maybe
+
+ // If the read index passed the write index then consider it an underrun.
+ if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+
+ // Write some data to the buffer.
+ //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
+ int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
+ //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
+ // numFrames, framesWritten);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesWritten >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+ aaudio_stream_state_t state = getState();
+ //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
+ // AAudio_convertStreamStateToText(state));
+ switch (state) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ if (framesWritten != 0) {
+ // Don't wait to write more data. Just prime the buffer.
+ wakeTime = currentNanoTime;
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesWritten;
+}
+
+
+aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
+ int32_t numFrames) {
+ // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
+ // buffer, numFrames);
+ WrappingBuffer wrappingBuffer;
+ uint8_t *source = (uint8_t *) buffer;
+ int32_t framesLeft = numFrames;
+
+ mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ int32_t framesToWrite = framesLeft;
+ int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToWrite > framesAvailable) {
+ framesToWrite = framesAvailable;
+ }
+ int32_t numBytes = getBytesPerFrame() * framesToWrite;
+ int32_t numSamples = framesToWrite * getSamplesPerFrame();
+ // Data conversion.
+ float levelFrom;
+ float levelTo;
+ bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
+ &levelFrom, &levelTo);
+ // The formats are validated when the stream is opened so we do not have to
+ // check for illegal combinations here.
+ // TODO factor this out into a utility function
+ if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudio_linearRamp(
+ (const float *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ if (ramping) {
+ AAudioConvert_floatToPcm16(
+ (const float *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_floatToPcm16(
+ (const float *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ numSamples,
+ levelTo);
+ }
+ }
+ } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
+ if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (ramping) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ numSamples,
+ levelTo);
+ }
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ AAudio_linearRamp(
+ (const int16_t *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ }
+ }
+ source += numBytes;
+ framesLeft -= framesToWrite;
+ } else {
+ break;
+ }
+ partIndex++;
+ }
+ int32_t framesWritten = numFrames - framesLeft;
+ mAudioEndpoint.advanceWriteIndex(framesWritten);
+
+ if (framesWritten > 0) {
+ incrementFramesWritten(framesWritten);
+ }
+ // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+ return framesWritten;
+}
+
+
+int64_t AudioStreamInternalPlay::getFramesRead()
+{
+ int64_t framesRead =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (framesRead < mLastFramesRead) {
+ framesRead = mLastFramesRead;
+ } else {
+ mLastFramesRead = framesRead;
+ }
+ ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+ return framesRead;
+}
+
+int64_t AudioStreamInternalPlay::getFramesWritten()
+{
+ int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
+ + mFramesOffsetFromService;
+ ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
+ return getFramesWritten;
+}
+
+
+// Render audio in the application callback and then write the data to the stream.
+void *AudioStreamInternalPlay::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ // Write audio data to stream.
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+ // This is a BLOCKING WRITE!
+ result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
+ if (result >= 0) {
+ // Only wrote some of the frames requested. Must have timed out.
+ result = AAUDIO_ERROR_TIMEOUT;
+ }
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ }
+ break;
+ }
+ } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
+ result, (int) isActive());
+ return NULL;
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
new file mode 100644
index 0000000..b043f67
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceInterface.h"
+#include "client/AudioStreamInternal.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+class AudioStreamInternalPlay : public AudioStreamInternal {
+public:
+ AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface, bool inService = false);
+ virtual ~AudioStreamInternalPlay();
+
+ aaudio_result_t write(const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ int64_t getFramesRead() override;
+ int64_t getFramesWritten() override;
+
+ void *callbackLoop() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_OUTPUT;
+ }
+
+protected:
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+ aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) override;
+private:
+ /*
+ * Asynchronous write with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return fdrames written or negative error
+ */
+ aaudio_result_t writeNowWithConversion(const void *buffer,
+ int32_t numFrames);
+
+ int64_t mLastFramesRead = 0; // used to prevent retrograde motion
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 21e3e70..1de33bb 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -41,41 +41,40 @@
IsochronousClockModel::~IsochronousClockModel() {
}
-void IsochronousClockModel::start(int64_t nanoTime)
-{
+void IsochronousClockModel::start(int64_t nanoTime) {
+ ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
}
-void IsochronousClockModel::stop(int64_t nanoTime)
-{
+void IsochronousClockModel::stop(int64_t nanoTime) {
+ ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
mState = STATE_STOPPED;
}
-void IsochronousClockModel::processTimestamp(int64_t framePosition,
- int64_t nanoTime) {
+void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
return;
}
-// ALOGI("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
+// ALOGD("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
// (long long)mMarkerFramePosition,
// (long long)mMarkerNanoTime);
-// ALOGI("processTimestamp() - framePosition = %lld at nanoTime %llu",
+// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
// (long long)framePosition,
// (long long)nanoTime);
int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
-// ALOGI("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
+// ALOGD("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
// (long long)expectedNanosDelta,
// (long long)nanosDelta);
-// ALOGI("processTimestamp() - mSampleRate = %d", mSampleRate);
-// ALOGI("processTimestamp() - mState = %d", mState);
+// ALOGD("processTimestamp() - mSampleRate = %d", mSampleRate);
+// ALOGD("processTimestamp() - mState = %d", mState);
switch (mState) {
case STATE_STOPPED:
break;
@@ -85,12 +84,12 @@
mState = STATE_SYNCING;
break;
case STATE_SYNCING:
- // This will handle a burst of rapid consumption in the beginning.
+ // This will handle a burst of rapid transfer at the beginning.
if (nanosDelta < expectedNanosDelta) {
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
} else {
- ALOGI("processTimestamp() - advance to STATE_RUNNING");
+// ALOGD("processTimestamp() - advance to STATE_RUNNING");
mState = STATE_RUNNING;
}
break;
@@ -101,15 +100,15 @@
// or we may be drifting due to a slow HW clock.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
- ALOGV("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
- (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+// ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
} else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
// Later than expected timestamp.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
- ALOGV("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
- (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
- (int) (mMaxLatenessInNanos / 1000));
+// ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
+// (int) (mMaxLatenessInNanos / 1000));
}
break;
default:
@@ -141,8 +140,7 @@
return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
}
-int64_t IsochronousClockModel::convertPositionToTime(
- int64_t framePosition) const {
+int64_t IsochronousClockModel::convertPositionToTime(int64_t framePosition) const {
if (mState == STATE_STOPPED) {
return mMarkerNanoTime;
}
@@ -151,14 +149,13 @@
int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
-// ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+// ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
// (unsigned long long)framePosition,
// (unsigned long long)time);
return time;
}
-int64_t IsochronousClockModel::convertTimeToPosition(
- int64_t nanoTime) const {
+int64_t IsochronousClockModel::convertTimeToPosition(int64_t nanoTime) const {
if (mState == STATE_STOPPED) {
return mMarkerFramePosition;
}
@@ -167,10 +164,10 @@
int64_t nextBurstPosition = mMarkerFramePosition + framesDelta;
int64_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
int64_t position = nextBurstIndex * mFramesPerBurst;
-// ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+// ALOGD("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
// (unsigned long long)nanoTime,
// (unsigned long long)position);
-// ALOGI("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+// ALOGD("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
// (long long) framesDelta, mFramesPerBurst);
return position;
}
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 59032d5..04bf2de 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -22,6 +22,7 @@
#include <pthread.h>
#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
#include "AudioStreamBuilder.h"
#include "AudioStream.h"
@@ -94,6 +95,13 @@
#undef AAUDIO_CASE_ENUM
+
+/******************************************
+ * Static globals.
+ */
+static aaudio_policy_t s_MMapPolicy = AAUDIO_UNSPECIFIED;
+
+
static AudioStream *convertAAudioStreamToAudioStream(AAudioStream* stream)
{
return (AudioStream*) stream;
@@ -157,7 +165,7 @@
}
AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
- aaudio_audio_format_t format)
+ aaudio_format_t format)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setFormat(format);
@@ -206,11 +214,12 @@
AAudioStream** streamPtr)
{
AudioStream *audioStream = nullptr;
- ALOGD("AAudioStreamBuilder_openStream() ----------------------------------------------");
+ // Please leave these logs because they are very helpful when debugging.
+ ALOGD("AAudioStreamBuilder_openStream() called ----------------------------------------");
AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
aaudio_result_t result = streamBuilder->build(&audioStream);
- ALOGD("AAudioStreamBuilder_openStream() returns %d -----------------------------------",
- result);
+ ALOGD("AAudioStreamBuilder_openStream() returns %d = %s for (%p) ----------------",
+ result, AAudio_convertResultToText(result), audioStream);
if (result == AAUDIO_OK) {
*streamPtr = (AAudioStream*) audioStream;
} else {
@@ -236,7 +245,6 @@
if (audioStream != nullptr) {
audioStream->close();
delete audioStream;
- ALOGD("AAudioStream_close() ----------------------------------------------");
return AAUDIO_OK;
}
return AAUDIO_ERROR_INVALID_HANDLE;
@@ -300,7 +308,6 @@
}
aaudio_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
- // ALOGD("AAudioStream_read(): read returns %d", result);
return result;
}
@@ -316,7 +323,7 @@
}
// Don't allow writes when playing with a callback.
- if (audioStream->getDataCallbackProc() != nullptr && audioStream->isPlaying()) {
+ if (audioStream->getDataCallbackProc() != nullptr && audioStream->isActive()) {
ALOGE("Cannot write to a callback stream when running.");
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -328,7 +335,6 @@
}
aaudio_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
- // ALOGD("AAudioStream_write(): write returns %d", result);
return result;
}
@@ -361,7 +367,7 @@
return audioStream->getState();
}
-AAUDIO_API aaudio_audio_format_t AAudioStream_getFormat(AAudioStream* stream)
+AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
return audioStream->getFormat();
@@ -456,3 +462,29 @@
return audioStream->getTimestamp(clockid, framePosition, timeNanoseconds);
}
+
+AAUDIO_API aaudio_policy_t AAudio_getMMapPolicy() {
+ return s_MMapPolicy;
+}
+
+AAUDIO_API aaudio_result_t AAudio_setMMapPolicy(aaudio_policy_t policy) {
+ aaudio_result_t result = AAUDIO_OK;
+ switch(policy) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_POLICY_NEVER:
+ case AAUDIO_POLICY_AUTO:
+ case AAUDIO_POLICY_ALWAYS:
+ s_MMapPolicy = policy;
+ break;
+ default:
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ break;
+ }
+ return result;
+}
+
+AAUDIO_API bool AAudioStream_isMMapUsed(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->isMMap();
+}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 7847661..03f1a5c 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -43,7 +43,6 @@
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
- mDirection = builder.getDirection();
mSharingMode = builder.getSharingMode();
mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
@@ -56,9 +55,12 @@
mDataCallbackUserData = builder.getDataCallbackUserData();
mErrorCallbackUserData = builder.getErrorCallbackUserData();
- // This is very helpful for debugging in the future.
- ALOGI("AudioStream.open(): rate = %d, channels = %d, format = %d, sharing = %d",
- mSampleRate, mSamplesPerFrame, mFormat, mSharingMode);
+ // This is very helpful for debugging in the future. Please leave it in.
+ ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %d, dir = %s",
+ mSampleRate, mSamplesPerFrame, mFormat, mSharingMode,
+ (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
+ ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
+ mDeviceId, mPerformanceMode, mFramesPerDataCallback);
// Check for values that are ridiculously out of range to prevent math overflow exploits.
// The service will do a better check.
@@ -82,10 +84,6 @@
ALOGE("AudioStream::open(): mSampleRate out of range = %d", mSampleRate);
return AAUDIO_ERROR_INVALID_RATE;
}
- if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
- ALOGE("AudioStream::open(): illegal direction %d", mDirection);
- return AAUDIO_ERROR_UNEXPECTED_VALUE;
- }
switch(mPerformanceMode) {
case AAUDIO_PERFORMANCE_MODE_NONE:
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index c49b46b..39c9f9c 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -92,7 +92,7 @@
aaudio_audio_thread_proc_t threadProc,
void *threadArg);
- virtual aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
+ aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
virtual aaudio_result_t registerThread() {
return AAUDIO_OK;
@@ -130,7 +130,7 @@
return AAUDIO_ERROR_UNIMPLEMENTED;
}
- bool isPlaying() const {
+ bool isActive() const {
return mState == AAUDIO_STREAM_STATE_STARTING || mState == AAUDIO_STREAM_STATE_STARTED;
}
@@ -142,7 +142,7 @@
return mSampleRate;
}
- aaudio_audio_format_t getFormat() const {
+ aaudio_format_t getFormat() const {
return mFormat;
}
@@ -170,9 +170,7 @@
return mSharingModeMatchRequired;
}
- aaudio_direction_t getDirection() const {
- return mDirection;
- }
+ virtual aaudio_direction_t getDirection() const = 0;
/**
* This is only valid after setSamplesPerFrame() and setFormat() have been called.
@@ -215,7 +213,7 @@
}
bool isDataCallbackActive() {
- return (mDataCallbackProc != nullptr) && isPlaying();
+ return (mDataCallbackProc != nullptr) && isActive();
}
// ============== I/O ===========================
@@ -266,7 +264,7 @@
/**
* This should not be called after the open() call.
*/
- void setFormat(aaudio_audio_format_t format) {
+ void setFormat(aaudio_format_t format) {
mFormat = format;
}
@@ -301,8 +299,7 @@
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
bool mSharingModeMatchRequired = false; // must match sharing mode requested
- aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
- aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
+ aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 30e7eba..4262f27 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -22,9 +22,11 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
#include "binding/AAudioBinderClient.h"
-#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalCapture.h"
+#include "client/AudioStreamInternalPlay.h"
#include "core/AudioStream.h"
#include "core/AudioStreamBuilder.h"
#include "legacy/AudioStreamRecord.h"
@@ -32,6 +34,9 @@
using namespace aaudio;
+#define AAUDIO_MMAP_POLICY_DEFAULT AAUDIO_POLICY_NEVER
+#define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT AAUDIO_POLICY_NEVER
+
/*
* AudioStreamBuilder
*/
@@ -51,17 +56,18 @@
switch (direction) {
case AAUDIO_DIRECTION_INPUT:
- if (sharingMode == AAUDIO_SHARING_MODE_SHARED) {
- *audioStreamPtr = new AudioStreamRecord();
+ if (tryMMap) {
+ *audioStreamPtr = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
+ false);
} else {
- ALOGE("AudioStreamBuilder(): bad sharing mode = %d for input", sharingMode);
- result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ *audioStreamPtr = new AudioStreamRecord();
}
break;
case AAUDIO_DIRECTION_OUTPUT:
if (tryMMap) {
- *audioStreamPtr = new AudioStreamInternal(AAudioBinderClient::getInstance(), false);
+ *audioStreamPtr = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
+ false);
} else {
*audioStreamPtr = new AudioStreamTrack();
}
@@ -74,27 +80,41 @@
return result;
}
-// Try to open using MMAP path if that is enabled.
-// Fall back to Legacy path is MMAP not available.
+// Try to open using MMAP path if that is allowed.
+// Fall back to Legacy path if MMAP not available.
+// Exact behavior is controlled by MMapPolicy.
aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
AudioStream *audioStream = nullptr;
*streamPtr = nullptr;
- int32_t mmapEnabled = AAudioProperty_getMMapEnabled();
- int32_t mmapExclusiveEnabled = AAudioProperty_getMMapExclusiveEnabled();
- ALOGD("AudioStreamBuilder(): mmapEnabled = %d, mmapExclusiveEnabled = %d",
- mmapEnabled, mmapExclusiveEnabled);
+ // The API setting is the highest priority.
+ aaudio_policy_t mmapPolicy = AAudio_getMMapPolicy();
+ // If not specified then get from a system property.
+ if (mmapPolicy == AAUDIO_UNSPECIFIED) {
+ mmapPolicy = AAudioProperty_getMMapPolicy();
+ }
+ // If still not specified then use the default.
+ if (mmapPolicy == AAUDIO_UNSPECIFIED) {
+ mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
+ }
+
+ int32_t mapExclusivePolicy = AAudioProperty_getMMapExclusivePolicy();
+ if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
+ mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
+ }
+ ALOGD("AudioStreamBuilder(): mmapPolicy = %d, mapExclusivePolicy = %d",
+ mmapPolicy, mapExclusivePolicy);
aaudio_sharing_mode_t sharingMode = getSharingMode();
if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
- && (mmapExclusiveEnabled == AAUDIO_USE_NEVER)) {
+ && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
ALOGW("AudioStreamBuilder(): EXCLUSIVE sharing mode not supported. Use SHARED.");
sharingMode = AAUDIO_SHARING_MODE_SHARED;
setSharingMode(sharingMode);
}
- bool allowMMap = mmapEnabled != AAUDIO_USE_NEVER;
- bool allowLegacy = mmapEnabled != AAUDIO_USE_ALWAYS;
+ bool allowMMap = mmapPolicy != AAUDIO_POLICY_NEVER;
+ bool allowLegacy = mmapPolicy != AAUDIO_POLICY_ALWAYS;
aaudio_result_t result = builder_createStream(getDirection(), sharingMode,
allowMMap, &audioStream);
@@ -125,6 +145,5 @@
}
}
- ALOGD("AudioStreamBuilder(): returned %d", result);
return result;
}
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 569ca63..fd416c4 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -64,11 +64,11 @@
return this;
}
- aaudio_audio_format_t getFormat() const {
+ aaudio_format_t getFormat() const {
return mFormat;
}
- AudioStreamBuilder *setFormat(aaudio_audio_format_t format) {
+ AudioStreamBuilder *setFormat(aaudio_format_t format) {
mFormat = format;
return this;
}
@@ -168,10 +168,10 @@
private:
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
- int32_t mDeviceId = AAUDIO_DEVICE_UNSPECIFIED;
+ int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
bool mSharingModeMatchRequired = false; // must match sharing mode requested
- aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index f89234a..dfac4fb 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -30,7 +30,7 @@
using namespace aaudio;
AudioStreamLegacy::AudioStreamLegacy()
- : AudioStream() {
+ : AudioStream(), mDeviceCallback(new StreamDeviceCallback(this)) {
}
AudioStreamLegacy::~AudioStreamLegacy() {
@@ -60,44 +60,54 @@
void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
aaudio_data_callback_result_t callbackResult;
+
+ if (!mCallbackEnabled.load()) {
+ return;
+ }
+
switch (opcode) {
case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
- // Note that this code assumes an AudioTrack::Buffer is the same as AudioRecord::Buffer
- // TODO define our own AudioBuffer and pass it from the subclasses.
- AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
- if (audioBuffer->frameCount == 0) return;
+ if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
+ // Note that this code assumes an AudioTrack::Buffer is the same as
+ // AudioRecord::Buffer
+ // TODO define our own AudioBuffer and pass it from the subclasses.
+ AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+ if (audioBuffer->frameCount == 0) return;
- // If the caller specified an exact size then use a block size adapter.
- if (mBlockAdapter != nullptr) {
- int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
- callbackResult = mBlockAdapter->processVariableBlock((uint8_t *) audioBuffer->raw,
- byteCount);
- } else {
- // Call using the AAudio callback interface.
- callbackResult = (*getDataCallbackProc())(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- audioBuffer->raw,
- audioBuffer->frameCount
- );
- }
- if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
- incrementClientFrameCounter(audioBuffer->frameCount);
- } else {
- audioBuffer->size = 0;
+ // If the caller specified an exact size then use a block size adapter.
+ if (mBlockAdapter != nullptr) {
+ int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+ callbackResult = mBlockAdapter->processVariableBlock(
+ (uint8_t *) audioBuffer->raw, byteCount);
+ } else {
+ // Call using the AAudio callback interface.
+ callbackResult = (*getDataCallbackProc())(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ audioBuffer->raw,
+ audioBuffer->frameCount
+ );
+ }
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+ incrementClientFrameCounter(audioBuffer->frameCount);
+ } else {
+ audioBuffer->size = 0;
+ }
+ break;
}
}
- break;
+ /// FALL THROUGH
// Stream got rerouted so we disconnect.
case AAUDIO_CALLBACK_OPERATION_DISCONNECTED: {
- ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected");
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ ALOGD("processCallbackCommon() stream disconnected");
if (getErrorCallbackProc() != nullptr) {
(*getErrorCallbackProc())(
(AAudioStream *) this,
getErrorCallbackUserData(),
- AAUDIO_OK
+ AAUDIO_ERROR_DISCONNECTED
);
}
mCallbackEnabled.store(false);
@@ -129,3 +139,22 @@
status_t status = extendedTimestamp->getBestTimestamp(framePosition, timeNanoseconds, timebase);
return AAudioConvert_androidToAAudioResult(status);
}
+
+void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
+{
+ ALOGD("onAudioDeviceUpdate() deviceId %d", (int)deviceId);
+ if (getDeviceId() != AAUDIO_UNSPECIFIED && getDeviceId() != deviceId &&
+ getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ // if we have a data callback and the stream is active, send the error callback from
+ // data callback thread when it sees the DISCONNECTED state
+ if (!isDataCallbackActive() && getErrorCallbackProc() != nullptr) {
+ (*getErrorCallbackProc())(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ AAUDIO_ERROR_DISCONNECTED
+ );
+ }
+ }
+ setDeviceId(deviceId);
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 38f1a56..0ded8e1 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -18,6 +18,7 @@
#define LEGACY_AUDIO_STREAM_LEGACY_H
#include <media/AudioTimestamp.h>
+#include <media/AudioSystem.h>
#include <aaudio/AAudio.h>
@@ -75,14 +76,37 @@
protected:
+ class StreamDeviceCallback : public android::AudioSystem::AudioDeviceCallback
+ {
+ public:
+
+ StreamDeviceCallback(AudioStreamLegacy *parent) : mParent(parent) {}
+ virtual ~StreamDeviceCallback() {}
+
+ virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo __unused,
+ audio_port_handle_t deviceId) {
+ if (mParent != nullptr) {
+ mParent->onAudioDeviceUpdate(deviceId);
+ }
+ }
+
+ AudioStreamLegacy *mParent;
+ };
+
aaudio_result_t getBestTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds,
android::ExtendedTimestamp *extendedTimestamp);
+ void onAudioDeviceUpdate(audio_port_handle_t deviceId);
+
+ void onStart() { mCallbackEnabled.store(true); }
+ void onStop() { mCallbackEnabled.store(false); }
+
FixedBlockAdapter *mBlockAdapter = nullptr;
aaudio_wrapping_frames_t mPositionWhenStarting = 0;
int32_t mCallbackBufferSize = 0;
+ const android::sp<StreamDeviceCallback> mDeviceCallback;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index b3d01d6..156e83d 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AudioStreamRecord"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -64,12 +64,13 @@
: builder.getBufferCapacity();
// TODO implement an unspecified Android format then use that.
- audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
+ audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
? AUDIO_FORMAT_PCM_FLOAT
: AAudioConvert_aaudioToAndroidDataFormat(getFormat());
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
- switch(getPerformanceMode()) {
+ aaudio_performance_mode_t perfMode = getPerformanceMode();
+ switch (perfMode) {
case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
flags = (audio_input_flags_t) (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW);
break;
@@ -95,16 +96,24 @@
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
+ ALOGD("AudioStreamRecord::open(), request notificationFrames = %u, frameCount = %u",
+ notificationFrames, (uint)frameCount);
mAudioRecord = new AudioRecord(
+ mOpPackageName // const String16& opPackageName TODO does not compile
+ );
+ if (getDeviceId() != AAUDIO_UNSPECIFIED) {
+ mAudioRecord->setInputDevice(getDeviceId());
+ }
+ mAudioRecord->set(
AUDIO_SOURCE_VOICE_RECOGNITION,
getSampleRate(),
format,
channelMask,
- mOpPackageName, // const String16& opPackageName TODO does not compile
frameCount,
callback,
callbackData,
notificationFrames,
+ false /*threadCanCallJava*/,
AUDIO_SESSION_ALLOCATE,
streamTransferType,
flags
@@ -123,9 +132,14 @@
// Get the actual rate.
setSampleRate(mAudioRecord->getSampleRate());
- setSamplesPerFrame(mAudioRecord->channelCount());
setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
+ int32_t actualSampleRate = mAudioRecord->getSampleRate();
+ ALOGW_IF(actualSampleRate != getSampleRate(),
+ "AudioStreamRecord::open() sampleRate changed from %d to %d",
+ getSampleRate(), actualSampleRate);
+ setSampleRate(actualSampleRate);
+
// We may need to pass the data through a block size adapter to guarantee constant size.
if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
@@ -135,7 +149,27 @@
mBlockAdapter = nullptr;
}
+ // Update performance mode based on the actual stream.
+ // For example, if the sample rate does not match native then you won't get a FAST track.
+ audio_input_flags_t actualFlags = mAudioRecord->getFlags();
+ aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ // FIXME Some platforms do not advertise RAW mode for low latency inputs.
+ if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
+ == (AUDIO_INPUT_FLAG_FAST)) {
+ actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ }
+ setPerformanceMode(actualPerformanceMode);
+ // Log warning if we did not get what we asked for.
+ ALOGW_IF(actualFlags != flags,
+ "AudioStreamRecord::open() flags changed from 0x%08X to 0x%08X",
+ flags, actualFlags);
+ ALOGW_IF(actualPerformanceMode != perfMode,
+ "AudioStreamRecord::open() perfMode changed from %d to %d",
+ perfMode, actualPerformanceMode);
+
setState(AAUDIO_STREAM_STATE_OPEN);
+ setDeviceId(mAudioRecord->getRoutedDeviceId());
+ mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
return AAUDIO_OK;
}
@@ -152,8 +186,6 @@
}
void AudioStreamRecord::processCallback(int event, void *info) {
-
- ALOGD("AudioStreamRecord::processCallback(), event %d", event);
switch (event) {
case AudioRecord::EVENT_MORE_DATA:
processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
@@ -185,6 +217,7 @@
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
} else {
+ onStart();
setState(AAUDIO_STREAM_STATE_STARTING);
}
return AAUDIO_OK;
@@ -206,6 +239,7 @@
if (mAudioRecord.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
+ onStop();
setState(AAUDIO_STREAM_STATE_STOPPING);
incrementFramesWritten(getFramesRead() - getFramesWritten()); // TODO review
mAudioRecord->stop();
@@ -250,12 +284,22 @@
return result;
}
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+
// TODO add timeout to AudioRecord
bool blocking = (timeoutNanoseconds > 0);
ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
if (bytesRead == WOULD_BLOCK) {
return 0;
} else if (bytesRead < 0) {
+ // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+ // AudioRecord invalidation
+ if (bytesRead == DEAD_OBJECT) {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
return AAudioConvert_androidToAAudioResult(bytesRead);
}
int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 0af6457..90000fc 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -65,6 +65,10 @@
aaudio_result_t updateStateWhileWaiting() override;
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_INPUT;
+ }
+
// This is public so it can be called from the C callback function.
void processCallback(int event, void *info) override;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 9c433cd..7e39908 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AudioStreamTrack"
+#define LOG_TAG "AAudio"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -58,15 +58,11 @@
return result;
}
- ALOGD("AudioStreamTrack::open = %p", this);
-
// Try to create an AudioTrack
// Use stereo if unspecified.
int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
? 2 : getSamplesPerFrame();
audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
- ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
- samplesPerFrame, channelMask);
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -87,8 +83,7 @@
break;
}
- int32_t frameCount = builder.getBufferCapacity();
- ALOGD("AudioStreamTrack::open(), requested buffer capacity %d", frameCount);
+ size_t frameCount = (size_t)builder.getBufferCapacity();
int32_t notificationFrames = 0;
@@ -118,8 +113,13 @@
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
- ALOGD("AudioStreamTrack::open(), notificationFrames = %d", notificationFrames);
- mAudioTrack = new AudioTrack(
+ ALOGD("AudioStreamTrack::open(), request notificationFrames = %d, frameCount = %u",
+ notificationFrames, (uint)frameCount);
+ mAudioTrack = new AudioTrack();
+ if (getDeviceId() != AAUDIO_UNSPECIFIED) {
+ mAudioTrack->setOutputDevice(getDeviceId());
+ }
+ mAudioTrack->set(
(audio_stream_type_t) AUDIO_STREAM_MUSIC,
getSampleRate(),
format,
@@ -129,26 +129,35 @@
callback,
callbackData,
notificationFrames,
+ 0 /*sharedBuffer*/,
+ false /*threadCanCallJava*/,
AUDIO_SESSION_ALLOCATE,
streamTransferType
);
// Did we get a valid track?
status_t status = mAudioTrack->initCheck();
- ALOGD("AudioStreamTrack::open(), initCheck() returned %d", status);
if (status != NO_ERROR) {
close();
ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
return AAudioConvert_androidToAAudioResult(status);
}
+ //TrackPlayerBase init
+ init(mAudioTrack.get(), PLAYER_TYPE_AAUDIO, AUDIO_USAGE_MEDIA);
+
// Get the actual values from the AudioTrack.
setSamplesPerFrame(mAudioTrack->channelCount());
- setSampleRate(mAudioTrack->getSampleRate());
- aaudio_audio_format_t aaudioFormat =
+ aaudio_format_t aaudioFormat =
AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
setFormat(aaudioFormat);
+ int32_t actualSampleRate = mAudioTrack->getSampleRate();
+ ALOGW_IF(actualSampleRate != getSampleRate(),
+ "AudioStreamTrack::open() sampleRate changed from %d to %d",
+ getSampleRate(), actualSampleRate);
+ setSampleRate(actualSampleRate);
+
// We may need to pass the data through a block size adapter to guarantee constant size.
if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
@@ -160,6 +169,7 @@
setState(AAUDIO_STREAM_STATE_OPEN);
setDeviceId(mAudioTrack->getRoutedDeviceId());
+ mAudioTrack->addAudioDeviceCallback(mDeviceCallback);
// Update performance mode based on the actual stream.
// For example, if the sample rate is not allowed then you won't get a FAST track.
@@ -187,7 +197,7 @@
aaudio_result_t AudioStreamTrack::close()
{
if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
- mAudioTrack.clear();
+ destroy();
setState(AAUDIO_STREAM_STATE_CLOSED);
}
mFixedBlockReader.close();
@@ -225,10 +235,11 @@
return AAudioConvert_androidToAAudioResult(err);
}
- err = mAudioTrack->start();
+ err = startWithStatus();
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
} else {
+ onStart();
setState(AAUDIO_STREAM_STATE_STARTING);
}
return AAUDIO_OK;
@@ -246,8 +257,9 @@
AAudio_convertStreamStateToText(getState()));
return AAUDIO_ERROR_INVALID_STATE;
}
+ onStop();
setState(AAUDIO_STREAM_STATE_PAUSING);
- mAudioTrack->pause();
+ pause();
status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
@@ -276,9 +288,10 @@
if (mAudioTrack.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
+ onStop();
setState(AAUDIO_STREAM_STATE_STOPPING);
incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
- mAudioTrack->stop();
+ stop();
mFramesWritten.reset32();
return AAUDIO_OK;
}
@@ -339,6 +352,10 @@
return result;
}
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+
// TODO add timeout to AudioTrack
bool blocking = timeoutNanoseconds > 0;
ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
@@ -346,6 +363,12 @@
return 0;
} else if (bytesWritten < 0) {
ALOGE("invalid write, returned %d", (int)bytesWritten);
+ // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+ // AudioTrack invalidation
+ if (bytesWritten == DEAD_OBJECT) {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
return AAudioConvert_androidToAAudioResult(bytesWritten);
}
int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 186a08e..ff429ea 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -18,7 +18,7 @@
#define LEGACY_AUDIO_STREAM_TRACK_H
#include <math.h>
-#include <media/AudioTrack.h>
+#include <media/TrackPlayerBase.h>
#include <aaudio/AAudio.h>
#include "AudioStreamBuilder.h"
@@ -32,7 +32,7 @@
/**
* Internal stream that uses the legacy AudioTrack path.
*/
-class AudioStreamTrack : public AudioStreamLegacy {
+class AudioStreamTrack : public AudioStreamLegacy, public android::TrackPlayerBase {
public:
AudioStreamTrack();
@@ -63,6 +63,10 @@
int64_t getFramesRead() override;
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_OUTPUT;
+ }
+
aaudio_result_t updateStateWhileWaiting() override;
// This is public so it can be called from the C callback function.
@@ -74,7 +78,6 @@
private:
- android::sp<android::AudioTrack> mAudioTrack;
// adapts between variable sized blocks and fixed size blocks
FixedBlockReader mFixedBlockReader;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 168ed86..2c7634e 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -24,7 +24,9 @@
#include <utils/Errors.h>
#include "aaudio/AAudio.h"
-#include "AAudioUtilities.h"
+#include <aaudio/AAudioTesting.h>
+
+#include "utility/AAudioUtilities.h"
using namespace android;
@@ -33,7 +35,7 @@
#define MAX_HEADROOM (1.41253754f)
#define MIN_HEADROOM (0 - MAX_HEADROOM)
-int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format) {
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format) {
int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
switch (format) {
case AAUDIO_FORMAT_PCM_I16:
@@ -275,7 +277,7 @@
return result;
}
-audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudioFormat) {
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat) {
audio_format_t androidFormat;
switch (aaudioFormat) {
case AAUDIO_FORMAT_PCM_I16:
@@ -292,8 +294,8 @@
return androidFormat;
}
-aaudio_audio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
- aaudio_audio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
+aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
+ aaudio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
switch (androidFormat) {
case AUDIO_FORMAT_PCM_16_BIT:
aaudioFormat = AAUDIO_FORMAT_PCM_I16;
@@ -327,11 +329,12 @@
static int32_t AAudioProperty_getMMapProperty(const char *propName,
int32_t defaultValue,
const char * caller) {
- int32_t prop = property_get_int32(AAUDIO_PROP_MMAP_ENABLED, defaultValue);
+ int32_t prop = property_get_int32(propName, defaultValue);
switch (prop) {
- case AAUDIO_USE_NEVER:
- case AAUDIO_USE_ALWAYS:
- case AAUDIO_USE_AUTO:
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_POLICY_NEVER:
+ case AAUDIO_POLICY_ALWAYS:
+ case AAUDIO_POLICY_AUTO:
break;
default:
ALOGE("%s: invalid = %d", caller, prop);
@@ -341,20 +344,20 @@
return prop;
}
-int32_t AAudioProperty_getMMapEnabled() {
- return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_ENABLED,
- AAUDIO_USE_NEVER, __func__);
+int32_t AAudioProperty_getMMapPolicy() {
+ return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
+ AAUDIO_UNSPECIFIED, __func__);
}
-int32_t AAudioProperty_getMMapExclusiveEnabled() {
- return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_ENABLED,
- AAUDIO_USE_NEVER, __func__);
+int32_t AAudioProperty_getMMapExclusivePolicy() {
+ return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
+ AAUDIO_UNSPECIFIED, __func__);
}
int32_t AAudioProperty_getMixerBursts() {
- const int32_t defaultBursts = 2; // arbitrary
+ const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
const int32_t maxBursts = 1024; // arbitrary
- int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts); // use 2 for double buffered
+ int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
if (prop < 1 || prop > maxBursts) {
ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
prop = defaultBursts;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 7c383c7..f894bc0 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -161,42 +161,33 @@
int32_t bytesPerFrame,
int32_t *sizeInBytes);
-audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_audio_format_t aaudio_format);
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudio_format);
-aaudio_audio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
+aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
/**
* @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
*/
-int32_t AAudioConvert_formatToSizeInBytes(aaudio_audio_format_t format);
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format);
// Note that this code may be replaced by Settings or by some other system configuration tool.
-enum : int32_t {
- // Related feature is disabled
- AAUDIO_USE_NEVER = 0,
- // If related feature works then use it. Otherwise fall back to something else.
- AAUDIO_USE_AUTO = 1,
- // Related feature must be used. If not available then fail.
- AAUDIO_USE_ALWAYS = 2
-};
-
-#define AAUDIO_PROP_MMAP_ENABLED "aaudio.mmap_enabled"
+#define AAUDIO_PROP_MMAP_POLICY "aaudio.mmap_policy"
/**
* Read system property.
- * @return AAUDIO_USE_NEVER or AAUDIO_USE_AUTO or AAUDIO_USE_ALWAYS
+ * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
*/
-int32_t AAudioProperty_getMMapEnabled();
+int32_t AAudioProperty_getMMapPolicy();
-#define AAUDIO_PROP_MMAP_EXCLUSIVE_ENABLED "aaudio.mmap_exclusive_enabled"
+#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
/**
* Read system property.
- * @return AAUDIO_USE_NEVER or AAUDIO_USE_AUTO or AAUDIO_USE_ALWAYS
+ * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
*/
-int32_t AAudioProperty_getMMapExclusiveEnabled();
+int32_t AAudioProperty_getMMapExclusivePolicy();
#define AAUDIO_PROP_MIXER_BURSTS "aaudio.mixer_bursts"
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index fba81f2..afcdebf 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -7,7 +7,7 @@
frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_handle_tracker.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
+ libcutils liblog libmedia libutils libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := test_handle_tracker
include $(BUILD_NATIVE_TEST)
@@ -19,7 +19,7 @@
frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_marshalling.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
+ libcutils liblog libmedia libutils libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := test_aaudio_marshalling
include $(BUILD_NATIVE_TEST)
@@ -31,7 +31,7 @@
frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_block_adapter.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
+ libcutils liblog libmedia libutils libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := test_block_adapter
include $(BUILD_NATIVE_TEST)
@@ -43,7 +43,7 @@
frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_linear_ramp.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
+ libcutils liblog libmedia libutils libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := test_linear_ramp
include $(BUILD_NATIVE_TEST)
@@ -55,7 +55,7 @@
frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_open_params.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
- libcutils liblog libmedia libutils
+ libcutils liblog libmedia libutils libaudiomanager
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := test_open_params
include $(BUILD_NATIVE_TEST)
diff --git a/media/libaaudio/tests/test_open_params.cpp b/media/libaaudio/tests/test_open_params.cpp
index 5125653..01b8799 100644
--- a/media/libaaudio/tests/test_open_params.cpp
+++ b/media/libaaudio/tests/test_open_params.cpp
@@ -57,7 +57,7 @@
static void testOpenOptions(aaudio_direction_t direction,
int32_t channelCount,
int32_t sampleRate,
- aaudio_audio_format_t format) {
+ aaudio_format_t format) {
aaudio_result_t result = AAUDIO_OK;
@@ -66,7 +66,7 @@
int32_t actualChannelCount = 0;
int32_t actualSampleRate = 0;
- aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_direction_t actualDirection;
@@ -134,7 +134,8 @@
//void foo() { // for tricking the Android Studio formatter
TEST(test_open_params, aaudio_open_all) {
aaudio_direction_t directions[] = {AAUDIO_DIRECTION_OUTPUT, AAUDIO_DIRECTION_INPUT};
- aaudio_audio_format_t formats[] = {AAUDIO_FORMAT_UNSPECIFIED, AAUDIO_FORMAT_PCM_I16, AAUDIO_FORMAT_PCM_FLOAT};
+ aaudio_format_t formats[] = {AAUDIO_FORMAT_UNSPECIFIED,
+ AAUDIO_FORMAT_PCM_I16, AAUDIO_FORMAT_PCM_FLOAT};
int32_t rates[] = {AAUDIO_UNSPECIFIED, 22050, 32000, 44100, 48000, 88200, 96000, 37913, 59132};
// Make printf print immediately so that debug info is not stuck
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 166534f..d853946 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -22,6 +22,8 @@
"IEffect.cpp",
"IEffectClient.cpp",
"ToneGenerator.cpp",
+ "PlayerBase.cpp",
+ "TrackPlayerBase.cpp",
],
shared_libs: [
"liblog",
@@ -30,6 +32,7 @@
"libbinder",
"libdl",
"libaudioutils",
+ "libaudiomanager",
],
export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 5c54bb2..e749ac4 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -481,12 +481,14 @@
AutoMutex lock(mLock);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
- // stop capture so that audio policy manager does not reject the new instance start request
- // as only one capture can be active at a time.
- if (mAudioRecord != 0 && mActive) {
- mAudioRecord->stop();
+ if (mStatus == NO_ERROR) {
+ // stop capture so that audio policy manager does not reject the new instance start request
+ // as only one capture can be active at a time.
+ if (mAudioRecord != 0 && mActive) {
+ mAudioRecord->stop();
+ }
+ android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
}
- android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
}
return NO_ERROR;
}
@@ -576,10 +578,17 @@
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
bool useCaseAllowed =
- // either of these use cases:
+ // any of these use cases:
// use case 1: callback transfer mode
(mTransfer == TRANSFER_CALLBACK) ||
- // use case 2: obtain/release mode
+ // use case 2: blocking read mode
+ // The default buffer capacity at 48 kHz is 2048 frames, or ~42.6 ms.
+ // That's enough for double-buffering with our standard 20 ms rule of thumb for
+ // the minimum period of a non-SCHED_FIFO thread.
+ // This is needed so that AAudio apps can do a low latency non-blocking read from a
+ // callback running with SCHED_FIFO.
+ (mTransfer == TRANSFER_SYNC) ||
+ // use case 3: obtain/release mode
(mTransfer == TRANSFER_OBTAIN);
// sample rates must also match
bool fastAllowed = useCaseAllowed && (mSampleRate == afSampleRate);
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 5cd2789..9ef1db7 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
#include <media/AudioSystem.h>
#include <media/IAudioFlinger.h>
#include <media/IAudioPolicyService.h>
@@ -68,6 +69,8 @@
gAudioFlinger = interface_cast<IAudioFlinger>(binder);
LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
afc = gAudioFlingerClient;
+ // Make sure callbacks can be received by gAudioFlingerClient
+ ProcessState::self()->startThreadPool();
}
af = gAudioFlinger;
}
@@ -711,6 +714,8 @@
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
apc = gAudioPolicyServiceClient;
+ // Make sure callbacks can be received by gAudioPolicyServiceClient
+ ProcessState::self()->startThreadPool();
}
ap = gAudioPolicyService;
}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 38d90bc..ffb7703 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1209,7 +1209,9 @@
AutoMutex lock(mLock);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
- android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ if (mStatus == NO_ERROR) {
+ android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ }
}
return NO_ERROR;
}
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
new file mode 100644
index 0000000..cbef1b3
--- /dev/null
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/IServiceManager.h>
+#include <media/PlayerBase.h>
+
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+namespace android {
+
+//--------------------------------------------------------------------------------------------------
+PlayerBase::PlayerBase() : BnPlayer(),
+ mPanMultiplierL(1.0f), mPanMultiplierR(1.0f),
+ mVolumeMultiplierL(1.0f), mVolumeMultiplierR(1.0f),
+ mPIId(PLAYER_PIID_INVALID), mLastReportedEvent(PLAYER_STATE_UNKNOWN)
+{
+ ALOGD("PlayerBase::PlayerBase()");
+ // use checkService() to avoid blocking if audio service is not up yet
+ sp<IBinder> binder = defaultServiceManager()->checkService(String16("audio"));
+ if (binder == 0) {
+ ALOGE("PlayerBase(): binding to audio service failed, service up?");
+ } else {
+ mAudioManager = interface_cast<IAudioManager>(binder);
+ }
+}
+
+
+PlayerBase::~PlayerBase() {
+ ALOGD("PlayerBase::~PlayerBase()");
+ baseDestroy();
+}
+
+void PlayerBase::init(player_type_t playerType, audio_usage_t usage) {
+ if (mAudioManager == 0) {
+ ALOGE("AudioPlayer realize: no audio service, player will not be registered");
+ } else {
+ mPIId = mAudioManager->trackPlayer(playerType, usage, AUDIO_CONTENT_TYPE_UNKNOWN, this);
+ }
+}
+
+void PlayerBase::baseDestroy() {
+ serviceReleasePlayer();
+ if (mAudioManager != 0) {
+ mAudioManager.clear();
+ }
+}
+
+//------------------------------------------------------------------------------
+void PlayerBase::servicePlayerEvent(player_state_t event) {
+ if (mAudioManager != 0) {
+ // only report state change
+ Mutex::Autolock _l(mPlayerStateLock);
+ if (event != mLastReportedEvent
+ && mPIId != PLAYER_PIID_INVALID) {
+ mLastReportedEvent = event;
+ mAudioManager->playerEvent(mPIId, event);
+ }
+ }
+}
+
+void PlayerBase::serviceReleasePlayer() {
+ if (mAudioManager != 0
+ && mPIId != PLAYER_PIID_INVALID) {
+ mAudioManager->releasePlayer(mPIId);
+ }
+}
+
+//FIXME temporary method while some AudioTrack state is outside of this class
+void PlayerBase::reportEvent(player_state_t event) {
+ servicePlayerEvent(event);
+}
+
+status_t PlayerBase::startWithStatus() {
+ status_t status = playerStart();
+ if (status == NO_ERROR) {
+ ALOGD("PlayerBase::start() from IPlayer");
+ servicePlayerEvent(PLAYER_STATE_STARTED);
+ } else {
+ ALOGD("PlayerBase::start() no AudioTrack to start from IPlayer");
+ }
+ return status;
+}
+
+//------------------------------------------------------------------------------
+// Implementation of IPlayer
+void PlayerBase::start() {
+ (void)startWithStatus();
+}
+
+void PlayerBase::pause() {
+ if (playerPause() == NO_ERROR) {
+ ALOGD("PlayerBase::pause() from IPlayer");
+ servicePlayerEvent(PLAYER_STATE_PAUSED);
+ } else {
+ ALOGD("PlayerBase::pause() no AudioTrack to pause from IPlayer");
+ }
+}
+
+
+void PlayerBase::stop() {
+ if (playerStop() == NO_ERROR) {
+ ALOGD("PlayerBase::stop() from IPlayer");
+ servicePlayerEvent(PLAYER_STATE_STOPPED);
+ } else {
+ ALOGD("PlayerBase::stop() no AudioTrack to stop from IPlayer");
+ }
+}
+
+void PlayerBase::setVolume(float vol) {
+ {
+ Mutex::Autolock _l(mSettingsLock);
+ mVolumeMultiplierL = vol;
+ mVolumeMultiplierR = vol;
+ }
+ if (playerSetVolume() == NO_ERROR) {
+ ALOGD("PlayerBase::setVolume() from IPlayer");
+ } else {
+ ALOGD("PlayerBase::setVolume() no AudioTrack for volume control from IPlayer");
+ }
+}
+
+void PlayerBase::setPan(float pan) {
+ {
+ Mutex::Autolock _l(mSettingsLock);
+ pan = min(max(-1.0f, pan), 1.0f);
+ if (pan >= 0.0f) {
+ mPanMultiplierL = 1.0f - pan;
+ mPanMultiplierR = 1.0f;
+ } else {
+ mPanMultiplierL = 1.0f;
+ mPanMultiplierR = 1.0f + pan;
+ }
+ }
+ if (playerSetVolume() == NO_ERROR) {
+ ALOGD("PlayerBase::setPan() from IPlayer");
+ } else {
+ ALOGD("PlayerBase::setPan() no AudioTrack for volume control from IPlayer");
+ }
+}
+
+void PlayerBase::setStartDelayMs(int32_t delayMs __unused) {
+ ALOGW("setStartDelay() is not supported");
+}
+
+void PlayerBase::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration __unused,
+ const sp<VolumeShaper::Operation>& operation __unused) {
+ ALOGW("applyVolumeShaper() is not supported");
+}
+
+status_t PlayerBase::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnPlayer::onTransact(code, data, reply, flags);
+}
+
+} // namespace android
diff --git a/media/libaudioclient/TrackPlayerBase.cpp b/media/libaudioclient/TrackPlayerBase.cpp
new file mode 100644
index 0000000..48cd803
--- /dev/null
+++ b/media/libaudioclient/TrackPlayerBase.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/TrackPlayerBase.h>
+
+namespace android {
+
+//--------------------------------------------------------------------------------------------------
+TrackPlayerBase::TrackPlayerBase() : PlayerBase(),
+ mPlayerVolumeL(1.0f), mPlayerVolumeR(1.0f)
+{
+ ALOGD("TrackPlayerBase::TrackPlayerBase()");
+}
+
+
+TrackPlayerBase::~TrackPlayerBase() {
+ ALOGD("TrackPlayerBase::~TrackPlayerBase()");
+ doDestroy();
+}
+
+void TrackPlayerBase::init(AudioTrack* pat, player_type_t playerType, audio_usage_t usage) {
+ PlayerBase::init(playerType, usage);
+ mAudioTrack = pat;
+}
+
+void TrackPlayerBase::destroy() {
+ doDestroy();
+ baseDestroy();
+}
+
+void TrackPlayerBase::doDestroy() {
+ if (mAudioTrack != 0) {
+ mAudioTrack->stop();
+ // Note that there may still be another reference in post-unlock phase of SetPlayState
+ mAudioTrack.clear();
+ }
+}
+
+void TrackPlayerBase::setPlayerVolume(float vl, float vr) {
+ {
+ Mutex::Autolock _l(mSettingsLock);
+ mPlayerVolumeL = vl;
+ mPlayerVolumeR = vr;
+ }
+ doSetVolume();
+}
+
+//------------------------------------------------------------------------------
+// Implementation of IPlayer
+status_t TrackPlayerBase::playerStart() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ status = mAudioTrack->start();
+ }
+ return status;
+}
+
+status_t TrackPlayerBase::playerPause() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ mAudioTrack->pause();
+ status = NO_ERROR;
+ }
+ return status;
+}
+
+
+status_t TrackPlayerBase::playerStop() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ mAudioTrack->stop();
+ status = NO_ERROR;
+ }
+ return status;
+}
+
+status_t TrackPlayerBase::playerSetVolume() {
+ return doSetVolume();
+}
+
+status_t TrackPlayerBase::doSetVolume() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ float tl = mPlayerVolumeL * mPanMultiplierL * mVolumeMultiplierL;
+ float tr = mPlayerVolumeR * mPanMultiplierR * mVolumeMultiplierR;
+ mAudioTrack->setVolume(tl, tr);
+ status = NO_ERROR;
+ }
+ return status;
+}
+
+
+void TrackPlayerBase::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) {
+ if (mAudioTrack != 0) {
+ ALOGD("TrackPlayerBase::applyVolumeShaper() from IPlayer");
+ VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
+ if (status < 0) { // a non-negative value is the volume shaper id.
+ ALOGE("TrackPlayerBase::applyVolumeShaper() failed with status %d", status);
+ }
+ } else {
+ ALOGD("TrackPlayerBase::applyVolumeShaper()"
+ " no AudioTrack for volume control from IPlayer");
+ }
+}
+
+} // namespace android
diff --git a/media/libaudioclient/include/media/PlayerBase.h b/media/libaudioclient/include/media/PlayerBase.h
new file mode 100644
index 0000000..fe1db7b
--- /dev/null
+++ b/media/libaudioclient/include/media/PlayerBase.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANDROID_PLAYER_BASE_H__
+#define __ANDROID_PLAYER_BASE_H__
+
+#include <audiomanager/IPlayer.h>
+#include <audiomanager/AudioManager.h>
+#include <audiomanager/IAudioManager.h>
+
+
+namespace android {
+
+class PlayerBase : public BnPlayer
+{
+public:
+ explicit PlayerBase();
+ virtual ~PlayerBase();
+
+ virtual void destroy() = 0;
+
+ //IPlayer implementation
+ virtual void start();
+ virtual void pause();
+ virtual void stop();
+ virtual void setVolume(float vol);
+ virtual void setPan(float pan);
+ virtual void setStartDelayMs(int32_t delayMs);
+ virtual void applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) override;
+
+ virtual status_t onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+
+
+ status_t startWithStatus();
+
+ //FIXME temporary method while some player state is outside of this class
+ void reportEvent(player_state_t event);
+
+protected:
+
+ void init(player_type_t playerType, audio_usage_t usage);
+ void baseDestroy();
+
+ //IPlayer methods handlers for derived classes
+ virtual status_t playerStart() { return NO_ERROR; }
+ virtual status_t playerPause() { return NO_ERROR; }
+ virtual status_t playerStop() { return NO_ERROR; }
+ virtual status_t playerSetVolume() { return NO_ERROR; }
+
+ // mutex for IPlayer volume and pan, and player-specific volume
+ Mutex mSettingsLock;
+
+ // volume multipliers coming from the IPlayer volume and pan controls
+ float mPanMultiplierL, mPanMultiplierR;
+ float mVolumeMultiplierL, mVolumeMultiplierR;
+
+private:
+ // report events to AudioService
+ void servicePlayerEvent(player_state_t event);
+ void serviceReleasePlayer();
+
+ // native interface to AudioService
+ android::sp<android::IAudioManager> mAudioManager;
+
+ // player interface ID, uniquely identifies the player in the system
+ audio_unique_id_t mPIId;
+
+ // Mutex for state reporting
+ Mutex mPlayerStateLock;
+ player_state_t mLastReportedEvent;
+};
+
+} // namespace android
+
+#endif /* __ANDROID_PLAYER_BASE_H__ */
diff --git a/media/libaudioclient/include/media/TrackPlayerBase.h b/media/libaudioclient/include/media/TrackPlayerBase.h
new file mode 100644
index 0000000..2d113c0
--- /dev/null
+++ b/media/libaudioclient/include/media/TrackPlayerBase.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANDROID_TRACK_PLAYER_BASE_H__
+#define __ANDROID_TRACK_PLAYER_BASE_H__
+
+#include <media/AudioTrack.h>
+#include <media/PlayerBase.h>
+
+namespace android {
+
+class TrackPlayerBase : public PlayerBase
+{
+public:
+ explicit TrackPlayerBase();
+ virtual ~TrackPlayerBase();
+
+ void init(AudioTrack* pat, player_type_t playerType, audio_usage_t usage);
+ virtual void destroy();
+
+ //IPlayer implementation
+ virtual void applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation);
+
+ //FIXME move to protected field, so far made public to minimize changes to AudioTrack logic
+ sp<AudioTrack> mAudioTrack;
+
+ void setPlayerVolume(float vl, float vr);
+
+protected:
+
+ //PlayerBase virtuals
+ virtual status_t playerStart();
+ virtual status_t playerPause();
+ virtual status_t playerStop();
+ virtual status_t playerSetVolume();
+
+private:
+ void doDestroy();
+ status_t doSetVolume();
+
+ // volume coming from the player volume API
+ float mPlayerVolumeL, mPlayerVolumeR;
+};
+
+} // namespace android
+
+#endif /* __ANDROID_TRACK_PLAYER_BASE_H__ */
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index c3c4b67..14a171b 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -10,33 +10,33 @@
# the HW and SW effects
#proxy {
- #path /system/lib/soundfx/libeffectproxy.so
+ #path /vendor/lib/soundfx/libeffectproxy.so
#}
# This is the SW implementation library of the effect
#libSW {
- #path /system/lib/soundfx/libswwrapper.so
+ #path /vendor/lib/soundfx/libswwrapper.so
#}
# This is the HW implementation library for the effect
#libHW {
- #path /system/lib/soundfx/libhwwrapper.so
+ #path /vendor/lib/soundfx/libhwwrapper.so
#}
bundle {
- path /system/lib/soundfx/libbundlewrapper.so
+ path /vendor/lib/soundfx/libbundlewrapper.so
}
reverb {
- path /system/lib/soundfx/libreverbwrapper.so
+ path /vendor/lib/soundfx/libreverbwrapper.so
}
visualizer {
- path /system/lib/soundfx/libvisualizer.so
+ path /vendor/lib/soundfx/libvisualizer.so
}
downmix {
- path /system/lib/soundfx/libdownmix.so
+ path /vendor/lib/soundfx/libdownmix.so
}
loudness_enhancer {
- path /system/lib/soundfx/libldnhncr.so
+ path /vendor/lib/soundfx/libldnhncr.so
}
}
@@ -44,7 +44,7 @@
# audio HAL implements support for default software audio pre-processing effects
#
# pre_processing {
-# path /system/lib/soundfx/libaudiopreprocessing.so
+# path /vendor/lib/soundfx/libaudiopreprocessing.so
# }
# list of effects to load. Each effect element must contain a "library" and a "uuid" element.
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index 523b6e1..37c0bb7 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -510,34 +510,81 @@
return 0;
}
+#ifdef __LP64__
+// audio_effects.conf always specifies 32 bit lib path: convert to 64 bit path if needed
+static const char *kLibraryPathRoot[] =
+ {"/odm/lib64/soundfx", "/vendor/lib64/soundfx", "/system/lib64/soundfx"};
+#else
+static const char *kLibraryPathRoot[] =
+ {"/odm/lib/soundfx", "/vendor/lib/soundfx", "/system/lib/soundfx"};
+#endif
+
+static const int kLibraryPathRootSize =
+ (sizeof(kLibraryPathRoot) / sizeof(kLibraryPathRoot[0]));
+
+// Checks if the library path passed as lib_path_in can be opened and if not
+// tries in standard effect library directories with just the library name and returns correct path
+// in lib_path_out
+int checkLibraryPath(const char *lib_path_in, char *lib_path_out) {
+ char *str;
+ const char *lib_name;
+ size_t len;
+
+ if (lib_path_in == NULL || lib_path_out == NULL) {
+ return -EINVAL;
+ }
+
+ strlcpy(lib_path_out, lib_path_in, PATH_MAX);
+
+ // Try exact path first
+ str = strstr(lib_path_out, "/lib/soundfx/");
+ if (str == NULL) {
+ return -EINVAL;
+ }
+
+ // Extract library name from input path
+ len = str - lib_path_out;
+ lib_name = lib_path_in + len + strlen("/lib/soundfx/");
+
+ // Then try with library name and standard path names in order of preference
+ for (int i = 0; i < kLibraryPathRootSize; i++) {
+ char path[PATH_MAX];
+
+ snprintf(path,
+ PATH_MAX,
+ "%s/%s",
+ kLibraryPathRoot[i],
+ lib_name);
+ if (F_OK == access(path, 0)) {
+ strcpy(lib_path_out, path);
+ ALOGW_IF(strncmp(lib_path_out, lib_path_in, PATH_MAX) != 0,
+ "checkLibraryPath() corrected library path %s to %s", lib_path_in, lib_path_out);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+
+
int loadLibrary(cnode *root, const char *name)
{
cnode *node;
- void *hdl;
+ void *hdl = NULL;
audio_effect_library_t *desc;
list_elem_t *e;
lib_entry_t *l;
char path[PATH_MAX];
- char *str;
- size_t len;
node = config_find(root, PATH_TAG);
if (node == NULL) {
return -EINVAL;
}
- // audio_effects.conf always specifies 32 bit lib path: convert to 64 bit path if needed
- strlcpy(path, node->value, PATH_MAX);
-#ifdef __LP64__
- str = strstr(path, "/lib/");
- if (str == NULL)
- return -EINVAL;
- len = str - path;
- path[len] = '\0';
- strlcat(path, "/lib64/", PATH_MAX);
- strlcat(path, node->value + len + strlen("/lib/"), PATH_MAX);
-#endif
- if (strlen(path) >= PATH_MAX - 1)
- return -EINVAL;
+
+ if (checkLibraryPath((const char *)node->value, path) != 0) {
+ ALOGW("loadLibrary() could not find library %s", path);
+ goto error;
+ }
hdl = dlopen(path, RTLD_NOW);
if (hdl == NULL) {
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index 60030ac..06d8237 100644
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -7,6 +7,7 @@
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_RELATIVE_PATH := soundfx
+LOCAL_VENDOR_MODULE := true
LOCAL_SRC_FILES:= \
PreProcessing.cpp
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index a4a5861..8fe255b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -79,6 +79,8 @@
mIsAudio(true),
mIsVideoAVC(false),
mIsSecure(false),
+ mIsEncrypted(false),
+ mIsEncryptedObservedEarlier(false),
mFormatChangePending(false),
mTimeChangePending(false),
mFrameRateTotal(kDefaultVideoFrameRateTotal),
@@ -330,6 +332,10 @@
pCrypto = NULL;
}
sp<ICrypto> crypto = (ICrypto*)pCrypto;
+ // non-encrypted source won't have a crypto
+ mIsEncrypted = (crypto != NULL);
+ // configure is called once; still using OR in case the behavior changes.
+ mIsEncryptedObservedEarlier = mIsEncryptedObservedEarlier || mIsEncrypted;
ALOGV("onConfigure mCrypto: %p (%d) mIsSecure: %d",
crypto.get(), (crypto != NULL ? crypto->getStrongCount() : 0), mIsSecure);
@@ -611,6 +617,9 @@
sp<AMessage> response = new AMessage;
response->setInt32("status", status);
+ // Clearing the state as it's tied to crypto. mIsEncryptedObservedEarlier is sticky though
+ // and lasts for the lifetime of this codec. See its use in fetchInputData.
+ mIsEncrypted = false;
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
@@ -878,7 +887,20 @@
}
dropAccessUnit = false;
- if (!mIsAudio && !mIsSecure) {
+ if (!mIsAudio && !mIsEncrypted) {
+ // Extra safeguard if higher-level behavior changes. Otherwise, not required now.
+ // Preventing the buffer from being processed (and sent to codec) if this is a later
+ // round of playback but this time without prepareDrm. Or if there is a race between
+ // stop (which is not blocking) and releaseDrm allowing buffers being processed after
+ // Crypto has been released (GenericSource currently prevents this race though).
+ // Particularly doing this check before IsAVCReferenceFrame call to prevent parsing
+ // of encrypted data.
+ if (mIsEncryptedObservedEarlier) {
+ ALOGE("fetchInputData: mismatched mIsEncrypted/mIsEncryptedObservedEarlier (0/1)");
+
+ return INVALID_OPERATION;
+ }
+
int32_t layerId = 0;
bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
if (mRenderer->getVideoLateByUs() > 100000ll
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index de21379..3da2f0b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -99,6 +99,8 @@
bool mIsAudio;
bool mIsVideoAVC;
bool mIsSecure;
+ bool mIsEncrypted;
+ bool mIsEncryptedObservedEarlier;
bool mFormatChangePending;
bool mTimeChangePending;
float mFrameRateTotal;
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 0bb4dbb..bbdcf0b 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -2967,8 +2967,10 @@
int32_t delay, padding;
if (sscanf(mLastCommentData,
" %*x %x %x %*x", &delay, &padding) == 2) {
- if (mLastTrack == NULL)
+ if (mLastTrack == NULL) {
+ delete[] buffer;
return ERROR_MALFORMED;
+ }
mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 6e7ef35..7193435 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -484,11 +484,18 @@
bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit) {
const uint8_t *data = accessUnit->data();
size_t size = accessUnit->size();
+ if (data == NULL) {
+ ALOGE("IsAVCReferenceFrame: called on NULL data (%p, %zu)", accessUnit.get(), size);
+ return false;
+ }
const uint8_t *nalStart;
size_t nalSize;
while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
- CHECK_GT(nalSize, 0u);
+ if (nalSize == 0) {
+ ALOGE("IsAVCReferenceFrame: invalid nalSize: 0 (%p, %zu)", accessUnit.get(), size);
+ return false;
+ }
unsigned nalType = nalStart[0] & 0x1f;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 415fdf5..c2b71a2 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -250,9 +250,13 @@
conflicting.emplace(String8(cost.conflictingDevices[i].c_str()));
}
- Mutex::Autolock lock(mCameraStatesLock);
- mCameraStates.emplace(id8,
- std::make_shared<CameraState>(id8, cost.resourceCost, conflicting));
+ {
+ Mutex::Autolock lock(mCameraStatesLock);
+ mCameraStates.emplace(id8,
+ std::make_shared<CameraState>(id8, cost.resourceCost, conflicting));
+ }
+
+ onDeviceStatusChanged(id8, CameraDeviceStatus::PRESENT);
if (mFlashlight->hasFlashUnit(id8)) {
mTorchStatusMap.add(id8, TorchModeStatus::AVAILABLE_OFF);
@@ -301,7 +305,12 @@
std::shared_ptr<CameraState> state = getCameraState(id);
if (state == nullptr) {
- ALOGE("%s: Bad camera ID %s", __FUNCTION__, id.string());
+ if (newStatus == StatusInternal::PRESENT) {
+ ALOGW("%s: Unknown camera ID %s, probably newly registered?",
+ __FUNCTION__, id.string());
+ } else {
+ ALOGE("%s: Bad camera ID %s", __FUNCTION__, id.string());
+ }
return;
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index b9d6843..5addaf1 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -527,9 +527,6 @@
__FUNCTION__, device.c_str(), strerror(-res), res);
continue;
}
- if (listener != nullptr) {
- listener->onDeviceStatusChanged(String8(id.c_str()), CameraDeviceStatus::PRESENT);
- }
}
for (auto& device : mDevices) {
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index d93b331..8c8b97a 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -128,11 +128,9 @@
// De-list all the buffers associated with this stream first.
StreamSet& currentSet = mStreamSetMap.editValueFor(streamSetId);
- BufferList& freeBufs = currentSet.freeBuffers;
BufferCountMap& handOutBufferCounts = currentSet.handoutBufferCountMap;
BufferCountMap& attachedBufferCounts = currentSet.attachedBufferCountMap;
InfoMap& infoMap = currentSet.streamInfoMap;
- removeBuffersFromBufferListLocked(freeBufs, streamId);
handOutBufferCounts.removeItem(streamId);
attachedBufferCounts.removeItem(streamId);
@@ -151,13 +149,93 @@
currentSet.allocatedBufferWaterMark = 0;
// Remove this stream set if all its streams have been removed.
- if (freeBufs.size() == 0 && handOutBufferCounts.size() == 0 && infoMap.size() == 0) {
+ if (handOutBufferCounts.size() == 0 && infoMap.size() == 0) {
mStreamSetMap.removeItem(streamSetId);
}
return OK;
}
+void Camera3BufferManager::notifyBufferRemoved(int streamId, int streamSetId) {
+ Mutex::Autolock l(mLock);
+ StreamSet &streamSet = mStreamSetMap.editValueFor(streamSetId);
+ size_t& attachedBufferCount =
+ streamSet.attachedBufferCountMap.editValueFor(streamId);
+ attachedBufferCount--;
+}
+
+status_t Camera3BufferManager::checkAndFreeBufferOnOtherStreamsLocked(
+ int streamId, int streamSetId) {
+ StreamId firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
+ StreamSet &streamSet = mStreamSetMap.editValueFor(streamSetId);
+ if (streamSet.streamInfoMap.size() == 1) {
+ ALOGV("StreamSet %d has no other stream available to free", streamSetId);
+ return OK;
+ }
+
+ bool freeBufferIsAttached = false;
+ for (size_t i = 0; i < streamSet.streamInfoMap.size(); i++) {
+ firstOtherStreamId = streamSet.streamInfoMap[i].streamId;
+ if (firstOtherStreamId != streamId) {
+
+ size_t otherBufferCount =
+ streamSet.handoutBufferCountMap.valueFor(firstOtherStreamId);
+ size_t otherAttachedBufferCount =
+ streamSet.attachedBufferCountMap.valueFor(firstOtherStreamId);
+ if (otherAttachedBufferCount > otherBufferCount) {
+ freeBufferIsAttached = true;
+ break;
+ }
+ }
+ firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
+ }
+ if (firstOtherStreamId == CAMERA3_STREAM_ID_INVALID || !freeBufferIsAttached) {
+ ALOGV("StreamSet %d has no buffer available to free", streamSetId);
+ return OK;
+ }
+
+
+ // This will drop the reference to one free buffer, which will effectively free one
+ // buffer (from the free buffer list) for the inactive streams.
+ size_t totalAllocatedBufferCount = 0;
+ for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
+ totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
+ }
+ if (totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark) {
+ ALOGV("Stream %d: Freeing buffer: detach", firstOtherStreamId);
+ sp<Camera3OutputStream> stream =
+ mStreamMap.valueFor(firstOtherStreamId).promote();
+ if (stream == nullptr) {
+ ALOGE("%s: unable to promote stream %d to detach buffer", __FUNCTION__,
+ firstOtherStreamId);
+ return INVALID_OPERATION;
+ }
+
+ // Detach and then drop the buffer.
+ //
+ // Need to unlock because the stream may also be calling
+ // into the buffer manager in parallel to signal buffer
+ // release, or acquire a new buffer.
+ bool bufferFreed = false;
+ {
+ mLock.unlock();
+ sp<GraphicBuffer> buffer;
+ stream->detachBuffer(&buffer, /*fenceFd*/ nullptr);
+ mLock.lock();
+ if (buffer.get() != nullptr) {
+ bufferFreed = true;
+ }
+ }
+ if (bufferFreed) {
+ size_t& otherAttachedBufferCount =
+ streamSet.attachedBufferCountMap.editValueFor(firstOtherStreamId);
+ otherAttachedBufferCount--;
+ }
+ }
+
+ return OK;
+}
+
status_t Camera3BufferManager::getBufferForStream(int streamId, int streamSetId,
sp<GraphicBuffer>* gb, int* fenceFd) {
ATRACE_CALL();
@@ -191,31 +269,25 @@
}
ALOGV("Stream %d set %d: Get buffer for stream: Allocate new", streamId, streamSetId);
- GraphicBufferEntry buffer =
- getFirstBufferFromBufferListLocked(streamSet.freeBuffers, streamId);
-
if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
- // Allocate one if there is no free buffer available.
- if (buffer.graphicBuffer == nullptr) {
- const StreamInfo& info = streamSet.streamInfoMap.valueFor(streamId);
- buffer.fenceFd = -1;
+ const StreamInfo& info = streamSet.streamInfoMap.valueFor(streamId);
+ GraphicBufferEntry buffer;
+ buffer.fenceFd = -1;
+ buffer.graphicBuffer = new GraphicBuffer(
+ info.width, info.height, PixelFormat(info.format), info.combinedUsage,
+ std::string("Camera3BufferManager pid [") +
+ std::to_string(getpid()) + "]");
+ status_t res = buffer.graphicBuffer->initCheck();
- buffer.graphicBuffer = new GraphicBuffer(
- info.width, info.height, PixelFormat(info.format), info.combinedUsage,
- std::string("Camera3BufferManager pid [") +
- std::to_string(getpid()) + "]");
- status_t res = buffer.graphicBuffer->initCheck();
-
- ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
- __FUNCTION__, info.width, info.height, info.format,
- buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
- if (res < 0) {
- ALOGE("%s: graphic buffer allocation failed: (error %d %s) ",
- __FUNCTION__, res, strerror(-res));
- return res;
- }
- ALOGV("%s: allocation done", __FUNCTION__);
+ ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
+ __FUNCTION__, info.width, info.height, info.format,
+ buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
+ if (res < 0) {
+ ALOGE("%s: graphic buffer allocation failed: (error %d %s) ",
+ __FUNCTION__, res, strerror(-res));
+ return res;
}
+ ALOGV("%s: allocation done", __FUNCTION__);
// Increase the hand-out and attached buffer counts for tracking purposes.
bufferCount++;
@@ -236,69 +308,15 @@
// in returnBufferForStream() if we want to free buffer more quickly.
// TODO: probably should find out all the inactive stream IDs, and free the firstly found
// buffers for them.
- StreamId firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
- if (streamSet.streamInfoMap.size() > 1) {
- bool freeBufferIsAttached = false;
- for (size_t i = 0; i < streamSet.streamInfoMap.size(); i++) {
- firstOtherStreamId = streamSet.streamInfoMap[i].streamId;
- if (firstOtherStreamId != streamId) {
-
- size_t otherBufferCount =
- streamSet.handoutBufferCountMap.valueFor(firstOtherStreamId);
- size_t otherAttachedBufferCount =
- streamSet.attachedBufferCountMap.valueFor(firstOtherStreamId);
- if (otherAttachedBufferCount > otherBufferCount) {
- freeBufferIsAttached = true;
- break;
- }
- if (hasBufferForStreamLocked(streamSet.freeBuffers, firstOtherStreamId)) {
- freeBufferIsAttached = false;
- break;
- }
- }
- firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
- }
- if (firstOtherStreamId == CAMERA3_STREAM_ID_INVALID) {
- return OK;
- }
-
- // This will drop the reference to one free buffer, which will effectively free one
- // buffer (from the free buffer list) for the inactive streams.
- size_t totalAllocatedBufferCount = streamSet.freeBuffers.size();
- for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
- totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
- }
- if (totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark) {
- ALOGV("%s: free a buffer from stream %d", __FUNCTION__, firstOtherStreamId);
- if (freeBufferIsAttached) {
- ALOGV("Stream %d: Freeing buffer: detach", firstOtherStreamId);
- sp<Camera3OutputStream> stream =
- mStreamMap.valueFor(firstOtherStreamId).promote();
- if (stream == nullptr) {
- ALOGE("%s: unable to promote stream %d to detach buffer", __FUNCTION__,
- firstOtherStreamId);
- return INVALID_OPERATION;
- }
-
- // Detach and then drop the buffer.
- //
- // Need to unlock because the stream may also be calling
- // into the buffer manager in parallel to signal buffer
- // release, or acquire a new buffer.
- {
- mLock.unlock();
- sp<GraphicBuffer> buffer;
- stream->detachBuffer(&buffer, /*fenceFd*/ nullptr);
- mLock.lock();
- }
- size_t& otherAttachedBufferCount =
- streamSet.attachedBufferCountMap.editValueFor(firstOtherStreamId);
- otherAttachedBufferCount--;
- } else {
- // Droppable buffer is in the free buffer list, grab and drop
- getFirstBufferFromBufferListLocked(streamSet.freeBuffers, firstOtherStreamId);
- }
- }
+ res = checkAndFreeBufferOnOtherStreamsLocked(streamId, streamSetId);
+ if (res != OK) {
+ return res;
+ }
+ // Since we just allocated one new buffer above, try free one more buffer from other streams
+ // to prevent total buffer count from growing
+ res = checkAndFreeBufferOnOtherStreamsLocked(streamId, streamSetId);
+ if (res != OK) {
+ return res;
}
} else {
// TODO: implement this.
@@ -308,11 +326,18 @@
return OK;
}
-status_t Camera3BufferManager::onBufferReleased(int streamId, int streamSetId) {
+status_t Camera3BufferManager::onBufferReleased(
+ int streamId, int streamSetId, bool* shouldFreeBuffer) {
ATRACE_CALL();
- Mutex::Autolock l(mLock);
+ if (shouldFreeBuffer == nullptr) {
+ ALOGE("%s: shouldFreeBuffer is null", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock l(mLock);
ALOGV("Stream %d set %d: Buffer released", streamId, streamSetId);
+ *shouldFreeBuffer = false;
if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
ALOGV("%s: signaling buffer release for an already unregistered stream "
@@ -327,6 +352,36 @@
bufferCount--;
ALOGV("%s: Stream %d set %d: Buffer count now %zu", __FUNCTION__, streamId, streamSetId,
bufferCount);
+
+ size_t totalAllocatedBufferCount = 0;
+ size_t totalHandOutBufferCount = 0;
+ for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
+ totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
+ totalHandOutBufferCount += streamSet.handoutBufferCountMap[i];
+ }
+
+ size_t newWaterMark = totalHandOutBufferCount + BUFFER_WATERMARK_DEC_THRESHOLD;
+ if (totalAllocatedBufferCount > newWaterMark &&
+ streamSet.allocatedBufferWaterMark > newWaterMark) {
+ // BufferManager got more than enough buffers, so decrease watermark
+ // to trigger more buffers free operation.
+ streamSet.allocatedBufferWaterMark = newWaterMark;
+ ALOGV("%s: Stream %d set %d: watermark--; now %zu",
+ __FUNCTION__, streamId, streamSetId, streamSet.allocatedBufferWaterMark);
+ }
+
+ size_t attachedBufferCount = streamSet.attachedBufferCountMap.valueFor(streamId);
+ if (attachedBufferCount <= bufferCount) {
+ ALOGV("%s: stream %d has no buffer available to free.", __FUNCTION__, streamId);
+ }
+
+ bool freeBufferIsAttached = (attachedBufferCount > bufferCount);
+ if (freeBufferIsAttached &&
+ totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark &&
+ attachedBufferCount > bufferCount + BUFFER_FREE_THRESHOLD) {
+ ALOGV("%s: free a buffer from stream %d", __FUNCTION__, streamId);
+ *shouldFreeBuffer = true;
+ }
} else {
// TODO: implement gralloc V1 support
return BAD_VALUE;
@@ -335,40 +390,42 @@
return OK;
}
-status_t Camera3BufferManager::returnBufferForStream(int streamId,
- int streamSetId, const sp<GraphicBuffer>& buffer, int fenceFd) {
+status_t Camera3BufferManager::onBuffersRemoved(int streamId, int streamSetId, size_t count) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
- ALOGV_IF(buffer != 0, "%s: return buffer (%p) with handle (%p) for stream %d and stream set %d",
- __FUNCTION__, buffer.get(), buffer->handle, streamId, streamSetId);
+
+ ALOGV("Stream %d set %d: Buffer removed", streamId, streamSetId);
if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
- ALOGV("%s: returning buffer for an already unregistered stream (stream %d with set id %d),"
- "buffer will be dropped right away!", __FUNCTION__, streamId, streamSetId);
+ ALOGV("%s: signaling buffer removal for an already unregistered stream "
+ "(stream %d with set id %d)", __FUNCTION__, streamId, streamSetId);
return OK;
}
if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
- // Add to the freeBuffer list.
StreamSet& streamSet = mStreamSetMap.editValueFor(streamSetId);
- if (buffer != 0) {
- BufferEntry entry;
- entry.add(streamId, GraphicBufferEntry(buffer, fenceFd));
- status_t res = addBufferToBufferListLocked(streamSet.freeBuffers, entry);
- if (res != OK) {
- ALOGE("%s: add buffer to free buffer list failed", __FUNCTION__);
- return res;
- }
+ BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
+ size_t& totalHandoutCount = handOutBufferCounts.editValueFor(streamId);
+ BufferCountMap& attachedBufferCounts = streamSet.attachedBufferCountMap;
+ size_t& totalAttachedCount = attachedBufferCounts.editValueFor(streamId);
+
+ if (count > totalHandoutCount) {
+ ALOGE("%s: Removed buffer count %zu greater than current handout count %zu",
+ __FUNCTION__, count, totalHandoutCount);
+ return BAD_VALUE;
+ }
+ if (count > totalAttachedCount) {
+ ALOGE("%s: Removed buffer count %zu greater than current attached count %zu",
+ __FUNCTION__, count, totalAttachedCount);
+ return BAD_VALUE;
}
- // Update the handed out and attached buffer count for this buffer.
- BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
- size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
- bufferCount--;
- size_t& attachedBufferCount = streamSet.attachedBufferCountMap.editValueFor(streamId);
- attachedBufferCount--;
+ totalHandoutCount -= count;
+ totalAttachedCount -= count;
+ ALOGV("%s: Stream %d set %d: Buffer count now %zu, attached buffer count now %zu",
+ __FUNCTION__, streamId, streamSetId, totalHandoutCount, totalAttachedCount);
} else {
- // TODO: implement this.
+ // TODO: implement gralloc V1 support
return BAD_VALUE;
}
@@ -404,17 +461,6 @@
lines.appendFormat(" stream id: %d, attached buffer count: %zu.\n",
streamId, bufferCount);
}
-
- lines.appendFormat(" Free buffer count: %zu\n",
- mStreamSetMap[i].freeBuffers.size());
- for (auto& bufEntry : mStreamSetMap[i].freeBuffers) {
- for (size_t m = 0; m < bufEntry.size(); m++) {
- const sp<GraphicBuffer>& buffer = bufEntry.valueAt(m).graphicBuffer;
- int streamId = bufEntry.keyAt(m);
- lines.appendFormat(" stream id: %d, buffer: %p, handle: %p.\n",
- streamId, buffer.get(), buffer->handle);
- }
- }
}
write(fd, lines.string(), lines.size());
}
@@ -444,67 +490,5 @@
return true;
}
-status_t Camera3BufferManager::addBufferToBufferListLocked(BufferList& bufList,
- const BufferEntry& buffer) {
- // TODO: need add some sanity check here.
- bufList.push_back(buffer);
-
- return OK;
-}
-
-status_t Camera3BufferManager::removeBuffersFromBufferListLocked(BufferList& bufferList,
- int streamId) {
- BufferList::iterator i = bufferList.begin();
- while (i != bufferList.end()) {
- ssize_t idx = i->indexOfKey(streamId);
- if (idx != NAME_NOT_FOUND) {
- ALOGV("%s: Remove a buffer for stream %d, free buffer total count: %zu",
- __FUNCTION__, streamId, bufferList.size());
- i->removeItem(streamId);
- if (i->isEmpty()) {
- i = bufferList.erase(i);
- }
- } else {
- i++;
- }
- }
-
- return OK;
-}
-
-bool Camera3BufferManager::hasBufferForStreamLocked(BufferList& buffers, int streamId) {
- BufferList::iterator i = buffers.begin();
- while (i != buffers.end()) {
- ssize_t idx = i->indexOfKey(streamId);
- if (idx != NAME_NOT_FOUND) {
- return true;
- }
- i++;
- }
-
- return false;
-}
-
-Camera3BufferManager::GraphicBufferEntry Camera3BufferManager::getFirstBufferFromBufferListLocked(
- BufferList& buffers, int streamId) {
- // Try to get the first buffer from the free buffer list if there is one.
- GraphicBufferEntry entry;
- BufferList::iterator i = buffers.begin();
- while (i != buffers.end()) {
- ssize_t idx = i->indexOfKey(streamId);
- if (idx != NAME_NOT_FOUND) {
- entry = GraphicBufferEntry(i->valueAt(idx));
- i = buffers.erase(i);
- break;
- } else {
- i++;
- }
- }
-
- ALOGV_IF(entry.graphicBuffer == 0, "%s: Unable to find free buffer for stream %d",
- __FUNCTION__, streamId);
- return entry;
-}
-
} // namespace camera3
} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.h b/services/camera/libcameraservice/device3/Camera3BufferManager.h
index d1d7a6f..025062e 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.h
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.h
@@ -137,41 +137,41 @@
* buffer has been reused. The manager will call detachBuffer on the stream
* if it needs the released buffer otherwise.
*
+ * When shouldFreeBuffer is set to true, caller must detach and free one buffer from the
+ * buffer queue, and then call notifyBufferRemoved to update the manager.
+ *
* Return values:
*
* OK: Buffer release was processed succesfully
* BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
* combination doesn't match what was registered, or this stream wasn't registered
- * to this buffer manager before.
+ * to this buffer manager before, or shouldFreeBuffer is null/
*/
- status_t onBufferReleased(int streamId, int streamSetId);
+ status_t onBufferReleased(int streamId, int streamSetId, /*out*/bool* shouldFreeBuffer);
/**
- * This method returns a buffer for a stream to this buffer manager.
+ * This method notifies the manager that certain buffers has been removed from the
+ * buffer queue by detachBuffer from the consumer.
*
- * When a buffer is returned, it is treated as a free buffer and may either be reused for future
- * getBufferForStream() calls, or freed if there total number of outstanding allocated buffers
- * is too large. The latter only applies to the case where the buffer are physically shared
- * between streams in the same stream set. A physically shared buffer is the buffer that has one
- * physical back store but multiple handles. Multiple stream can access the same physical memory
- * with their own handles. Physically shared buffer can only be supported by Gralloc HAL V1.
- * See hardware/libhardware/include/hardware/gralloc1.h for more details.
+ * The notification lets the manager update its internal handout buffer count and
+ * attached buffer counts accordingly. When buffers are detached from
+ * consumer, both handout and attached counts are decremented.
*
+ * Return values:
*
- * This call takes the ownership of the returned buffer if it was allocated by this buffer
- * manager; clients should not use this buffer after this call. Attempting to access this buffer
- * after this call will have undefined behavior. Holding a reference to this buffer after this
- * call may cause memory leakage. If a BufferQueue is used to track the buffers handed out by
- * this buffer queue, it is recommended to call detachNextBuffer() from the buffer queue after
- * BufferQueueProducer onBufferReleased callback is fired, and return it to this buffer manager.
- *
- * OK: Buffer return for this stream was successful.
- * BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID combination
- * doesn't match what was registered, or this stream wasn't registered to this
- * buffer manager before.
+ * OK: Buffer removal was processed succesfully
+ * BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
+ * combination doesn't match what was registered, or this stream wasn't registered
+ * to this buffer manager before, or the removed buffer count is larger than
+ * current total handoutCount or attachedCount.
*/
- status_t returnBufferForStream(int streamId, int streamSetId, const sp<GraphicBuffer>& buffer,
- int fenceFd);
+ status_t onBuffersRemoved(int streamId, int streamSetId, size_t count);
+
+ /**
+ * This method notifiers the manager that a buffer is freed from the buffer queue, usually
+ * because onBufferReleased signals the caller to free a buffer via the shouldFreeBuffer flag.
+ */
+ void notifyBufferRemoved(int streamId, int streamSetId);
/**
* Dump the buffer manager statistics.
@@ -179,6 +179,18 @@
void dump(int fd, const Vector<String16> &args) const;
private:
+ // allocatedBufferWaterMark will be decreased when:
+ // numAllocatedBuffersThisSet > numHandoutBuffersThisSet + BUFFER_WATERMARK_DEC_THRESHOLD
+ // This allows the watermark go back down after a burst of buffer requests
+ static const int BUFFER_WATERMARK_DEC_THRESHOLD = 3;
+
+ // onBufferReleased will set shouldFreeBuffer to true when:
+ // numAllocatedBuffersThisSet > allocatedBufferWaterMark AND
+ // numAllocatedBuffersThisStream > numHandoutBuffersThisStream + BUFFER_FREE_THRESHOLD
+ // So after a burst of buffer requests and back to steady state, the buffer queue should have
+ // (BUFFER_FREE_THRESHOLD + steady state handout buffer count) buffers.
+ static const int BUFFER_FREE_THRESHOLD = 3;
+
/**
* Lock to synchronize the access to the methods of this class.
*/
@@ -256,11 +268,6 @@
*/
InfoMap streamInfoMap;
/**
- * The free buffer list for all the buffers belong to this set. The free buffers are
- * returned by the returnBufferForStream() call, and available for reuse.
- */
- BufferList freeBuffers;
- /**
* The count of the buffers that were handed out to the streams of this set.
*/
BufferCountMap handoutBufferCountMap;
@@ -294,37 +301,10 @@
bool checkIfStreamRegisteredLocked(int streamId, int streamSetId) const;
/**
- * Add a buffer entry to the BufferList. This method needs to be called with mLock held.
+ * Check if other streams in the stream set has extra buffer available to be freed, and
+ * free one if so.
*/
- status_t addBufferToBufferListLocked(BufferList &bufList, const BufferEntry &buffer);
-
- /**
- * Remove all buffers from the BufferList.
- *
- * Note that this doesn't mean that the buffers are freed after this call. A buffer is freed
- * only if all other references to it are dropped.
- *
- * This method needs to be called with mLock held.
- */
- status_t removeBuffersFromBufferListLocked(BufferList &bufList, int streamId);
-
- /**
- * Get the first available buffer from the buffer list for this stream. The graphicBuffer inside
- * this entry will be NULL if there is no any GraphicBufferEntry found. After this call, the
- * GraphicBufferEntry will be removed from the BufferList if a GraphicBufferEntry is found.
- *
- * This method needs to be called with mLock held.
- *
- */
- GraphicBufferEntry getFirstBufferFromBufferListLocked(BufferList& buffers, int streamId);
-
- /**
- * Check if there is any buffer associated with this stream in the given buffer list.
- *
- * This method needs to be called with mLock held.
- *
- */
- bool inline hasBufferForStreamLocked(BufferList& buffers, int streamId);
+ status_t checkAndFreeBufferOnOtherStreamsLocked(int streamId, int streamSetId);
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index e46d55e..ec0f508 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -550,6 +550,10 @@
res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
if (res == OK) {
onBuffersRemovedLocked(removedBuffers);
+
+ if (mUseBufferManager && removedBuffers.size() > 0) {
+ mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), removedBuffers.size());
+ }
}
}
@@ -687,13 +691,24 @@
}
ALOGV("Stream %d: Buffer released", stream->getId());
+ bool shouldFreeBuffer = false;
status_t res = stream->mBufferManager->onBufferReleased(
- stream->getId(), stream->getStreamSetId());
+ stream->getId(), stream->getStreamSetId(), &shouldFreeBuffer);
if (res != OK) {
ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
strerror(-res), res);
stream->mState = STATE_ERROR;
}
+
+ if (shouldFreeBuffer) {
+ sp<GraphicBuffer> buffer;
+ // Detach and free a buffer (when buffer goes out of scope)
+ stream->detachBufferLocked(&buffer, /*fenceFd*/ nullptr);
+ if (buffer.get() != nullptr) {
+ stream->mBufferManager->notifyBufferRemoved(
+ stream->getId(), stream->getStreamSetId());
+ }
+ }
}
void Camera3OutputStream::onBuffersRemovedLocked(
@@ -708,7 +723,10 @@
status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
Mutex::Autolock l(mLock);
+ return detachBufferLocked(buffer, fenceFd);
+}
+status_t Camera3OutputStream::detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd) {
ALOGV("Stream %d: detachBuffer", getId());
if (buffer == nullptr) {
return BAD_VALUE;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 86676e4..98ffb73 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -263,7 +263,11 @@
virtual status_t getEndpointUsage(uint32_t *usage) const;
+ /**
+ * Private methods
+ */
void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
+ status_t detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd);
}; // class Camera3OutputStream
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index b8a5e90..52658d1 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -65,3 +65,5 @@
getegid32: 1
getgroups32: 1
recvmsg: 1
+getpid: 1
+gettid: 1
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
index 7e8af1a..23d349d 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
@@ -59,3 +59,5 @@
getdents64: 1
pipe2: 1
ppoll: 1
+getpid: 1
+gettid: 1
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
index aa8be5b..42e0d75 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -50,3 +50,5 @@
getdents64: 1
pipe2: 1
ppoll: 1
+getpid: 1
+gettid: 1
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
index b5a6503..76403f2 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
@@ -60,3 +60,5 @@
# Required by AddressSanitizer
gettid: 1
sched_yield: 1
+getpid: 1
+gettid: 1
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 65b17bc..3dc1feb 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -21,10 +21,8 @@
#include <assert.h>
#include <map>
#include <mutex>
-#include <utils/Singleton.h>
#include "AAudioEndpointManager.h"
-#include "AAudioServiceEndpoint.h"
using namespace android;
using namespace aaudio;
@@ -43,7 +41,6 @@
std::lock_guard<std::mutex> lock(mLock);
// Try to find an existing endpoint.
- ALOGD("AAudioEndpointManager::openEndpoint(), device = %d, dir = %d", deviceId, direction);
switch (direction) {
case AAUDIO_DIRECTION_INPUT:
endpoint = mInputs[deviceId];
@@ -55,32 +52,37 @@
assert(false); // There are only two possible directions.
break;
}
+ ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
+ endpoint, deviceId, (int)direction);
- // If we can't find an existing one then open one.
- ALOGD("AAudioEndpointManager::openEndpoint(), found %p", endpoint);
+ // If we can't find an existing one then open a new one.
if (endpoint == nullptr) {
- endpoint = new AAudioServiceEndpoint(audioService);
- if (endpoint->open(deviceId, direction) != AAUDIO_OK) {
- ALOGE("AAudioEndpointManager::findEndpoint(), open failed");
- delete endpoint;
- endpoint = nullptr;
- } else {
- switch(direction) {
- case AAUDIO_DIRECTION_INPUT:
- mInputs[deviceId] = endpoint;
- break;
- case AAUDIO_DIRECTION_OUTPUT:
- mOutputs[deviceId] = endpoint;
- break;
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ AAudioServiceEndpointCapture *capture = new AAudioServiceEndpointCapture(audioService);
+ if (capture->open(deviceId) != AAUDIO_OK) {
+ ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+ delete capture;
+ } else {
+ mInputs[deviceId] = capture;
+ endpoint = capture;
+ }
+ } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ AAudioServiceEndpointPlay *player = new AAudioServiceEndpointPlay(audioService);
+ if (player->open(deviceId) != AAUDIO_OK) {
+ ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+ delete player;
+ } else {
+ mOutputs[deviceId] = player;
+ endpoint = player;
}
}
+
}
if (endpoint != nullptr) {
// Increment the reference count under this lock.
endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
}
-
return endpoint;
}
@@ -105,6 +107,7 @@
mOutputs.erase(deviceId);
break;
}
+
serviceEndpoint->close();
delete serviceEndpoint;
}
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index bbcfc1d..db1103d 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -23,6 +23,8 @@
#include "binding/AAudioServiceMessage.h"
#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceEndpointCapture.h"
+#include "AAudioServiceEndpointPlay.h"
namespace aaudio {
@@ -49,10 +51,8 @@
std::mutex mLock;
- // We need separate inputs and outputs because they may both have device==0.
- // TODO review
- std::map<int32_t, AAudioServiceEndpoint *> mInputs;
- std::map<int32_t, AAudioServiceEndpoint *> mOutputs;
+ std::map<int32_t, AAudioServiceEndpointCapture *> mInputs;
+ std::map<int32_t, AAudioServiceEndpointPlay *> mOutputs;
};
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 816d5ab..c9b9065 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -63,7 +63,6 @@
}
if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
- ALOGD("AAudioService::openStream(), sharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE");
serviceStream = new AAudioServiceStreamMMAP();
result = serviceStream->open(request, configurationOutput);
if (result != AAUDIO_OK) {
@@ -79,7 +78,6 @@
// if SHARED requested or if EXCLUSIVE failed
if (sharingMode == AAUDIO_SHARING_MODE_SHARED
|| (serviceStream == nullptr && !sharingModeMatchRequired)) {
- ALOGD("AAudioService::openStream(), try AAUDIO_SHARING_MODE_SHARED");
serviceStream = new AAudioServiceStreamShared(*this);
result = serviceStream->open(request, configurationOutput);
configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
@@ -91,7 +89,7 @@
return result;
} else {
aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
- ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
+ ALOGV("AAudioService::openStream(): handle = 0x%08X", handle);
if (handle < 0) {
ALOGE("AAudioService::openStream(): handle table full");
delete serviceStream;
@@ -104,7 +102,7 @@
AAudioServiceStreamBase *serviceStream = (AAudioServiceStreamBase *)
mHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM,
streamHandle);
- ALOGD("AAudioService.closeStream(0x%08X)", streamHandle);
+ ALOGV("AAudioService.closeStream(0x%08X)", streamHandle);
if (serviceStream != nullptr) {
serviceStream->close();
delete serviceStream;
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index a2e6d33..d8ae284 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -44,43 +44,23 @@
// This is the maximum size in frames. The effective size can be tuned smaller at runtime.
#define DEFAULT_BUFFER_CAPACITY (48 * 8)
-// The mStreamInternal will use a service interface that does not go through Binder.
-AAudioServiceEndpoint::AAudioServiceEndpoint(AAudioService &audioService)
- : mStreamInternal(audioService, true)
- {
-}
-
-AAudioServiceEndpoint::~AAudioServiceEndpoint() {
-}
-
// Set up an EXCLUSIVE MMAP stream that will be shared.
-aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId, aaudio_direction_t direction) {
+aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId) {
+ mStreamInternal = getStreamInternal();
+
AudioStreamBuilder builder;
builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
// Don't fall back to SHARED because that would cause recursion.
builder.setSharingModeMatchRequired(true);
builder.setDeviceId(deviceId);
- builder.setDirection(direction);
+ builder.setDirection(getDirection());
builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
- aaudio_result_t result = mStreamInternal.open(builder);
- if (result == AAUDIO_OK) {
- mMixer.allocate(mStreamInternal.getSamplesPerFrame(), mStreamInternal.getFramesPerBurst());
-
- int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
- if (burstsPerBuffer == 0) {
- mLatencyTuningEnabled = true;
- burstsPerBuffer = 2;
- }
- ALOGD("AAudioServiceEndpoint(): burstsPerBuffer = %d", burstsPerBuffer);
- int32_t desiredBufferSize = burstsPerBuffer * mStreamInternal.getFramesPerBurst();
- mStreamInternal.setBufferSize(desiredBufferSize);
- }
- return result;
+ return getStreamInternal()->open(builder);
}
aaudio_result_t AAudioServiceEndpoint::close() {
- return mStreamInternal.close();
+ return getStreamInternal()->close();
}
// TODO, maybe use an interface to reduce exposure
@@ -102,96 +82,49 @@
std::lock_guard<std::mutex> lock(mLockStreams);
mRunningStreams.push_back(sharedStream);
if (mRunningStreams.size() == 1) {
- startMixer_l();
+ startSharingThread_l();
}
return AAUDIO_OK;
}
aaudio_result_t AAudioServiceEndpoint::stopStream(AAudioServiceStreamShared *sharedStream) {
- std::lock_guard<std::mutex> lock(mLockStreams);
- mRunningStreams.erase(std::remove(mRunningStreams.begin(), mRunningStreams.end(), sharedStream),
- mRunningStreams.end());
- if (mRunningStreams.size() == 0) {
- stopMixer_l();
+ int numRunningStreams = 0;
+ {
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ mRunningStreams.erase(
+ std::remove(mRunningStreams.begin(), mRunningStreams.end(), sharedStream),
+ mRunningStreams.end());
+ numRunningStreams = mRunningStreams.size();
+ }
+ if (numRunningStreams == 0) {
+ // Don't call this under a lock because the callbackLoop also uses the lock.
+ stopSharingThread();
}
return AAUDIO_OK;
}
-static void *aaudio_mixer_thread_proc(void *context) {
- AAudioServiceEndpoint *stream = (AAudioServiceEndpoint *) context;
- if (stream != NULL) {
- return stream->callbackLoop();
+static void *aaudio_endpoint_thread_proc(void *context) {
+ AAudioServiceEndpoint *endpoint = (AAudioServiceEndpoint *) context;
+ if (endpoint != NULL) {
+ return endpoint->callbackLoop();
} else {
return NULL;
}
}
-// Render audio in the application callback and then write the data to the stream.
-void *AAudioServiceEndpoint::callbackLoop() {
- ALOGD("AAudioServiceEndpoint(): callbackLoop() entering");
- int32_t underflowCount = 0;
-
- aaudio_result_t result = mStreamInternal.requestStart();
-
- // result might be a frame count
- while (mCallbackEnabled.load() && mStreamInternal.isPlaying() && (result >= 0)) {
- // Mix data from each active stream.
- {
- mMixer.clear();
- std::lock_guard<std::mutex> lock(mLockStreams);
- for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
- FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
- float volume = 0.5; // TODO get from system
- bool underflowed = mMixer.mix(fifo, volume);
- underflowCount += underflowed ? 1 : 0;
- // TODO log underflows in each stream
- sharedStream->markTransferTime(AudioClock::getNanoseconds());
- }
- }
-
- // Write audio data to stream using a blocking write.
- int64_t timeoutNanos = calculateReasonableTimeout(mStreamInternal.getFramesPerBurst());
- result = mStreamInternal.write(mMixer.getOutputBuffer(), getFramesPerBurst(), timeoutNanos);
- if (result == AAUDIO_ERROR_DISCONNECTED) {
- disconnectRegisteredStreams();
- break;
- } else if (result != getFramesPerBurst()) {
- ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
- result, getFramesPerBurst());
- break;
- }
- }
-
- result = mStreamInternal.requestStop();
-
- ALOGD("AAudioServiceEndpoint(): callbackLoop() exiting, %d underflows", underflowCount);
- return NULL; // TODO review
-}
-
-aaudio_result_t AAudioServiceEndpoint::startMixer_l() {
+aaudio_result_t AAudioServiceEndpoint::startSharingThread_l() {
// Launch the callback loop thread.
- int64_t periodNanos = mStreamInternal.getFramesPerBurst()
+ int64_t periodNanos = getStreamInternal()->getFramesPerBurst()
* AAUDIO_NANOS_PER_SECOND
/ getSampleRate();
mCallbackEnabled.store(true);
- return mStreamInternal.createThread(periodNanos, aaudio_mixer_thread_proc, this);
+ return getStreamInternal()->createThread(periodNanos, aaudio_endpoint_thread_proc, this);
}
-aaudio_result_t AAudioServiceEndpoint::stopMixer_l() {
+aaudio_result_t AAudioServiceEndpoint::stopSharingThread() {
mCallbackEnabled.store(false);
- return mStreamInternal.joinThread(NULL, calculateReasonableTimeout(mStreamInternal.getFramesPerBurst()));
-}
-
-// TODO Call method in AudioStreamInternal when that callback CL is merged.
-int64_t AAudioServiceEndpoint::calculateReasonableTimeout(int32_t framesPerOperation) {
-
- // Wait for at least a second or some number of callbacks to join the thread.
- int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
- / getSampleRate();
- if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
- timeoutNanoseconds = MIN_TIMEOUT_NANOS;
- }
- return timeoutNanoseconds;
+ aaudio_result_t result = getStreamInternal()->joinThread(NULL);
+ return result;
}
void AAudioServiceEndpoint::disconnectRegisteredStreams() {
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index d0c2f53..50bf049 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -23,6 +23,7 @@
#include <vector>
#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
#include "binding/AAudioServiceMessage.h"
#include "AAudioServiceStreamShared.h"
#include "AAudioServiceStreamMMAP.h"
@@ -33,14 +34,13 @@
class AAudioServiceEndpoint {
public:
- explicit AAudioServiceEndpoint(android::AAudioService &audioService);
- virtual ~AAudioServiceEndpoint();
+ virtual ~AAudioServiceEndpoint() = default;
- aaudio_result_t open(int32_t deviceId, aaudio_direction_t direction);
+ virtual aaudio_result_t open(int32_t deviceId);
- int32_t getSampleRate() const { return mStreamInternal.getSampleRate(); }
- int32_t getSamplesPerFrame() const { return mStreamInternal.getSamplesPerFrame(); }
- int32_t getFramesPerBurst() const { return mStreamInternal.getFramesPerBurst(); }
+ int32_t getSampleRate() const { return mStreamInternal->getSampleRate(); }
+ int32_t getSamplesPerFrame() const { return mStreamInternal->getSamplesPerFrame(); }
+ int32_t getFramesPerBurst() const { return mStreamInternal->getFramesPerBurst(); }
aaudio_result_t registerStream(AAudioServiceStreamShared *sharedStream);
aaudio_result_t unregisterStream(AAudioServiceStreamShared *sharedStream);
@@ -48,13 +48,13 @@
aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
aaudio_result_t close();
- int32_t getDeviceId() const { return mStreamInternal.getDeviceId(); }
+ int32_t getDeviceId() const { return mStreamInternal->getDeviceId(); }
- aaudio_direction_t getDirection() const { return mStreamInternal.getDirection(); }
+ aaudio_direction_t getDirection() const { return mStreamInternal->getDirection(); }
void disconnectRegisteredStreams();
- void *callbackLoop();
+ virtual void *callbackLoop() = 0;
// This should only be called from the AAudioEndpointManager under a mutex.
int32_t getReferenceCount() const {
@@ -66,23 +66,21 @@
mReferenceCount = count;
}
-private:
- aaudio_result_t startMixer_l();
- aaudio_result_t stopMixer_l();
-
- int64_t calculateReasonableTimeout(int32_t framesPerOperation);
-
- AudioStreamInternal mStreamInternal;
- AAudioMixer mMixer;
+ virtual AudioStreamInternal *getStreamInternal() = 0;
std::atomic<bool> mCallbackEnabled;
- int32_t mReferenceCount = 0;
- bool mLatencyTuningEnabled = false; // TODO implement tuning
std::mutex mLockStreams;
+
std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
std::vector<AAudioServiceStreamShared *> mRunningStreams;
+private:
+ aaudio_result_t startSharingThread_l();
+ aaudio_result_t stopSharingThread();
+
+ AudioStreamInternal *mStreamInternal = nullptr;
+ int32_t mReferenceCount = 0;
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
new file mode 100644
index 0000000..29d6cb9
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceEndpointCapture.h"
+
+using namespace android; // TODO just import names needed
+using namespace aaudio; // TODO just import names needed
+
+AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService &audioService)
+ : mStreamInternalCapture(audioService, true) {
+}
+
+AAudioServiceEndpointCapture::~AAudioServiceEndpointCapture() {
+ delete mDistributionBuffer;
+}
+
+aaudio_result_t AAudioServiceEndpointCapture::open(int32_t deviceId) {
+ aaudio_result_t result = AAudioServiceEndpoint::open(deviceId);
+ if (result == AAUDIO_OK) {
+ delete mDistributionBuffer;
+ int distributionBufferSizeBytes = getStreamInternal()->getFramesPerBurst()
+ * getStreamInternal()->getBytesPerFrame();
+ mDistributionBuffer = new uint8_t[distributionBufferSizeBytes];
+ }
+ return result;
+}
+
+// Read data from the shared MMAP stream and then distribute it to the client streams.
+void *AAudioServiceEndpointCapture::callbackLoop() {
+ ALOGD("AAudioServiceEndpointCapture(): callbackLoop() entering");
+ int32_t underflowCount = 0;
+
+ aaudio_result_t result = getStreamInternal()->requestStart();
+
+ int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout();
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) {
+ // Read audio data from stream using a blocking read.
+ result = getStreamInternal()->read(mDistributionBuffer, getFramesPerBurst(), timeoutNanos);
+ if (result == AAUDIO_ERROR_DISCONNECTED) {
+ disconnectRegisteredStreams();
+ break;
+ } else if (result != getFramesPerBurst()) {
+ ALOGW("AAudioServiceEndpointCapture(): callbackLoop() read %d / %d",
+ result, getFramesPerBurst());
+ break;
+ }
+
+ // Distribute data to each active stream.
+ { // use lock guard
+ std::lock_guard <std::mutex> lock(mLockStreams);
+ for (AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+ FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
+ if (fifo->getFifoControllerBase()->getEmptyFramesAvailable() <
+ getFramesPerBurst()) {
+ underflowCount++;
+ } else {
+ fifo->write(mDistributionBuffer, getFramesPerBurst());
+ }
+ sharedStream->markTransferTime(AudioClock::getNanoseconds());
+ }
+ }
+ }
+
+ result = getStreamInternal()->requestStop();
+
+ ALOGD("AAudioServiceEndpointCapture(): callbackLoop() exiting, %d underflows", underflowCount);
+ return NULL; // TODO review
+}
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.h b/services/oboeservice/AAudioServiceEndpointCapture.h
new file mode 100644
index 0000000..35857d1
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointCapture.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
+#define AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
+
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalCapture.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpointCapture : public AAudioServiceEndpoint {
+public:
+ explicit AAudioServiceEndpointCapture(android::AAudioService &audioService);
+ virtual ~AAudioServiceEndpointCapture();
+
+ aaudio_result_t open(int32_t deviceId) override;
+
+ AudioStreamInternal *getStreamInternal() override {
+ return &mStreamInternalCapture;
+ }
+
+ void *callbackLoop() override;
+
+private:
+ AudioStreamInternalCapture mStreamInternalCapture;
+ uint8_t *mDistributionBuffer = nullptr;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
new file mode 100644
index 0000000..cc09cc3
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+#include <algorithm>
+#include <mutex>
+#include <vector>
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceEndpointPlay.h"
+
+using namespace android; // TODO just import names needed
+using namespace aaudio; // TODO just import names needed
+
+#define BURSTS_PER_BUFFER_DEFAULT 2
+
+AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
+ : mStreamInternalPlay(audioService, true) {
+}
+
+AAudioServiceEndpointPlay::~AAudioServiceEndpointPlay() {
+}
+
+aaudio_result_t AAudioServiceEndpointPlay::open(int32_t deviceId) {
+ aaudio_result_t result = AAudioServiceEndpoint::open(deviceId);
+ if (result == AAUDIO_OK) {
+ mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
+ getStreamInternal()->getFramesPerBurst());
+
+ int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+ if (burstsPerBuffer == 0) {
+ mLatencyTuningEnabled = true;
+ burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT;
+ }
+ ALOGD("AAudioServiceEndpoint(): burstsPerBuffer = %d", burstsPerBuffer);
+ int32_t desiredBufferSize = burstsPerBuffer * getStreamInternal()->getFramesPerBurst();
+ getStreamInternal()->setBufferSize(desiredBufferSize);
+ }
+ return result;
+}
+
+// Mix data from each application stream and write result to the shared MMAP stream.
+void *AAudioServiceEndpointPlay::callbackLoop() {
+ ALOGD("AAudioServiceEndpointPlay(): callbackLoop() entering");
+ int32_t underflowCount = 0;
+
+ aaudio_result_t result = getStreamInternal()->requestStart();
+
+ int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout();
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) {
+ // Mix data from each active stream.
+ mMixer.clear();
+ { // use lock guard
+ std::lock_guard <std::mutex> lock(mLockStreams);
+ for (AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+ FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
+ float volume = 0.5; // TODO get from system
+ bool underflowed = mMixer.mix(fifo, volume);
+ underflowCount += underflowed ? 1 : 0;
+ // TODO log underflows in each stream
+ sharedStream->markTransferTime(AudioClock::getNanoseconds());
+ }
+ }
+
+ // Write mixer output to stream using a blocking write.
+ result = getStreamInternal()->write(mMixer.getOutputBuffer(),
+ getFramesPerBurst(), timeoutNanos);
+ if (result == AAUDIO_ERROR_DISCONNECTED) {
+ disconnectRegisteredStreams();
+ break;
+ } else if (result != getFramesPerBurst()) {
+ ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
+ result, getFramesPerBurst());
+ break;
+ }
+ }
+
+ result = getStreamInternal()->requestStop();
+
+ ALOGD("AAudioServiceEndpointPlay(): callbackLoop() exiting, %d underflows", underflowCount);
+ return NULL; // TODO review
+}
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
new file mode 100644
index 0000000..b977960
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_PLAY_H
+#define AAUDIO_SERVICE_ENDPOINT_PLAY_H
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpointPlay : public AAudioServiceEndpoint {
+public:
+ explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
+ virtual ~AAudioServiceEndpointPlay();
+
+ aaudio_result_t open(int32_t deviceId) override;
+
+ AudioStreamInternal *getStreamInternal() override {
+ return &mStreamInternalPlay;
+ }
+
+ void *callbackLoop() override;
+
+private:
+ AudioStreamInternalPlay mStreamInternalPlay; // for playing output of mixer
+ bool mLatencyTuningEnabled = false; // TODO implement tuning
+ AAudioMixer mMixer; //
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SERVICE_ENDPOINT_PLAY_H
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 8248f8b..8f0abc2 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -64,7 +64,6 @@
}
aaudio_result_t AAudioServiceStreamBase::start() {
- ALOGD("AAudioServiceStreamBase::start() send AAUDIO_SERVICE_EVENT_STARTED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
mState = AAUDIO_STREAM_STATE_STARTED;
mThreadEnabled.store(true);
@@ -80,7 +79,6 @@
processError();
return result;
}
- ALOGD("AAudioServiceStreamBase::pause() send AAUDIO_SERVICE_EVENT_PAUSED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
mState = AAUDIO_STREAM_STATE_PAUSED;
return result;
@@ -95,20 +93,18 @@
processError();
return result;
}
- ALOGD("AAudioServiceStreamBase::stop() send AAUDIO_SERVICE_EVENT_STOPPED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
mState = AAUDIO_STREAM_STATE_STOPPED;
return result;
}
aaudio_result_t AAudioServiceStreamBase::flush() {
- ALOGD("AAudioServiceStreamBase::flush() send AAUDIO_SERVICE_EVENT_FLUSHED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
mState = AAUDIO_STREAM_STATE_FLUSHED;
return AAUDIO_OK;
}
-// implement Runnable
+// implement Runnable, periodically send timestamps to client
void AAudioServiceStreamBase::run() {
ALOGD("AAudioServiceStreamBase::run() entering ----------------");
TimestampScheduler timestampScheduler;
@@ -162,18 +158,18 @@
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
AAudioServiceMessage command;
- //ALOGD("sendCurrentTimestamp() called");
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
- //ALOGD("sendCurrentTimestamp(): position %d", (int) command.timestamp.position);
+ // ALOGD("sendCurrentTimestamp(): position = %lld, nanos = %lld",
+ // (long long) command.timestamp.position,
+ // (long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP;
result = writeUpMessageQueue(&command);
}
return result;
}
-
/**
* Get an immutable description of the in-memory queues
* used to communicate with the underlying HAL or Service.
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 9318c2e..ee52c39 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -133,7 +133,7 @@
// This is used by one thread to tell another thread to exit. So it must be atomic.
std::atomic<bool> mThreadEnabled;
- aaudio_audio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
int32_t mFramesPerBurst = 0;
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 78a1583..97b9937 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -53,7 +53,6 @@
}
aaudio_result_t AAudioServiceStreamMMAP::close() {
- ALOGD("AAudioServiceStreamMMAP::close() called, %p", mMmapStream.get());
mMmapStream.clear(); // TODO review. Is that all we have to do?
// Apparently the above close is asynchronous. An attempt to open a new device
// right after a close can fail. Also some callbacks may still be in flight!
@@ -61,8 +60,6 @@
AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
if (mAudioDataFileDescriptor != -1) {
- ALOGV("AAudioServiceStreamMMAP: LEAK? close(mAudioDataFileDescriptor = %d)\n",
- mAudioDataFileDescriptor);
::close(mAudioDataFileDescriptor);
mAudioDataFileDescriptor = -1;
}
@@ -76,7 +73,7 @@
const audio_attributes_t attributes = {
.content_type = AUDIO_CONTENT_TYPE_MUSIC,
.usage = AUDIO_USAGE_MEDIA,
- .source = AUDIO_SOURCE_DEFAULT,
+ .source = AUDIO_SOURCE_VOICE_RECOGNITION,
.flags = AUDIO_FLAG_LOW_LATENCY,
.tags = ""
};
@@ -91,17 +88,13 @@
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
audio_port_handle_t deviceId = configurationInput.getDeviceId();
- // ALOGI("open request dump()");
- // request.dump();
-
mMmapClient.clientUid = request.getUserId();
mMmapClient.clientPid = request.getProcessId();
aaudio_direction_t direction = request.getDirection();
// Fill in config
- aaudio_audio_format_t aaudioFormat = configurationInput.getAudioFormat();
+ aaudio_format_t aaudioFormat = configurationInput.getAudioFormat();
if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- ALOGI("open forcing use of AAUDIO_FORMAT_PCM_I16");
aaudioFormat = AAUDIO_FORMAT_PCM_I16;
}
config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
@@ -132,9 +125,6 @@
MmapStreamInterface::stream_direction_t streamDirection = (direction == AAUDIO_DIRECTION_OUTPUT)
? MmapStreamInterface::DIRECTION_OUTPUT : MmapStreamInterface::DIRECTION_INPUT;
- ALOGD("AAudioServiceStreamMMAP::open() request devId = %d, sRate = %d",
- deviceId, config.sample_rate);
-
// Open HAL stream.
status_t status = MmapStreamInterface::openMmapStream(streamDirection,
&attributes,
@@ -171,8 +161,6 @@
: audio_channel_count_from_in_mask(config.channel_mask);
mAudioDataFileDescriptor = mMmapBufferinfo.shared_memory_fd;
- ALOGV("AAudioServiceStreamMMAP::open LEAK? mAudioDataFileDescriptor = %d\n",
- mAudioDataFileDescriptor);
mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
mCapacityInFrames = mMmapBufferinfo.buffer_size_frames;
mAudioFormat = AAudioConvert_androidToAAudioDataFormat(config.format);
@@ -193,9 +181,6 @@
ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
- ALOGD("AAudioServiceStreamMMAP::open() got devId = %d, sRate = %d",
- deviceId, config.sample_rate);
-
// Fill in AAudioStreamConfiguration
configurationOutput.setSampleRate(mSampleRate);
configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
@@ -205,16 +190,17 @@
return AAUDIO_OK;
}
-
/**
* Start the flow of data.
*/
aaudio_result_t AAudioServiceStreamMMAP::start() {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
- aaudio_result_t result = mMmapStream->start(mMmapClient, &mPortHandle);
- if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", result);
+ aaudio_result_t result;
+ status_t status = mMmapStream->start(mMmapClient, &mPortHandle);
+ if (status != OK) {
+ ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", status);
processError();
+ result = AAudioConvert_androidToAAudioResult(status);
} else {
result = AAudioServiceStreamBase::start();
}
@@ -228,18 +214,18 @@
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
aaudio_result_t result1 = AAudioServiceStreamBase::pause();
- aaudio_result_t result2 = mMmapStream->stop(mPortHandle);
+ status_t status = mMmapStream->stop(mPortHandle);
mFramesRead.reset32();
- return (result1 != AAUDIO_OK) ? result1 : result2;
+ return (result1 != AAUDIO_OK) ? result1 : AAudioConvert_androidToAAudioResult(status);
}
aaudio_result_t AAudioServiceStreamMMAP::stop() {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
aaudio_result_t result1 = AAudioServiceStreamBase::stop();
- aaudio_result_t result2 = mMmapStream->stop(mPortHandle);
+ aaudio_result_t status = mMmapStream->stop(mPortHandle);
mFramesRead.reset32();
- return (result1 != AAUDIO_OK) ? result1 : result2;
+ return (result1 != AAUDIO_OK) ? result1 : AAudioConvert_androidToAAudioResult(status);
}
/**
@@ -248,7 +234,6 @@
aaudio_result_t AAudioServiceStreamMMAP::flush() {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
// TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
- ALOGD("AAudioServiceStreamMMAP::flush() send AAUDIO_SERVICE_EVENT_FLUSHED");
sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
mState = AAUDIO_STREAM_STATE_FLUSHED;
return AAudioServiceStreamBase::flush();;
@@ -276,7 +261,7 @@
}
void AAudioServiceStreamMMAP::onTearDown() {
- ALOGD("AAudioServiceStreamMMAP::onTearDown() called - TODO");
+ ALOGE("AAudioServiceStreamMMAP::onTearDown() called - TODO");
};
void AAudioServiceStreamMMAP::onVolumeChanged(audio_channel_mask_t channels,
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 713d1f8..494b18e 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -59,11 +59,10 @@
int32_t deviceId = configurationInput.getDeviceId();
aaudio_direction_t direction = request.getDirection();
- ALOGD("AAudioServiceStreamShared::open(), direction = %d", direction);
AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, deviceId, direction);
- ALOGD("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
if (mServiceEndpoint == nullptr) {
+ ALOGE("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
return AAUDIO_ERROR_UNAVAILABLE;
}
@@ -77,7 +76,7 @@
}
mSampleRate = configurationInput.getSampleRate();
- if (mSampleRate == AAUDIO_FORMAT_UNSPECIFIED) {
+ if (mSampleRate == AAUDIO_UNSPECIFIED) {
mSampleRate = mServiceEndpoint->getSampleRate();
} else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
@@ -86,7 +85,7 @@
}
mSamplesPerFrame = configurationInput.getSamplesPerFrame();
- if (mSamplesPerFrame == AAUDIO_FORMAT_UNSPECIFIED) {
+ if (mSamplesPerFrame == AAUDIO_UNSPECIFIED) {
mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
} else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
@@ -134,7 +133,7 @@
if (endpoint == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
- // Add this stream to the mixer.
+ // For output streams, this will add the stream to the mixer.
aaudio_result_t result = endpoint->startStream(this);
if (result != AAUDIO_OK) {
ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index b981387..dfdbbb3 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -100,7 +100,7 @@
private:
android::AAudioService &mAudioService;
AAudioServiceEndpoint *mServiceEndpoint = nullptr;
- SharedRingBuffer *mAudioDataQueue;
+ SharedRingBuffer *mAudioDataQueue = nullptr;
int64_t mMarkedPosition = 0;
int64_t mMarkedTime = 0;
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index afb477e..b447725 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -29,6 +29,8 @@
AAudioMixer.cpp \
AAudioService.cpp \
AAudioServiceEndpoint.cpp \
+ AAudioServiceEndpointCapture.cpp \
+ AAudioServiceEndpointPlay.cpp \
AAudioServiceStreamBase.cpp \
AAudioServiceStreamMMAP.cpp \
AAudioServiceStreamShared.cpp \
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
index f7a73c3..beb7c09 100644
--- a/services/radio/RadioService.cpp
+++ b/services/radio/RadioService.cpp
@@ -34,6 +34,7 @@
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
+#include <binder/PermissionCache.h>
#include <hardware/radio.h>
#include <media/AudioSystem.h>
#include "RadioService.h"
@@ -43,6 +44,8 @@
static const char kRadioTunerAudioDeviceName[] = "Radio tuner source";
+static const String16 RADIO_PERMISSION("android.permission.ACCESS_FM_RADIO");
+
RadioService::RadioService()
: BnRadioService(), mNextUniqueId(1)
{
@@ -84,6 +87,9 @@
status_t RadioService::listModules(struct radio_properties *properties,
uint32_t *numModules)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
ALOGV("listModules");
AutoMutex lock(mServiceLock);
@@ -104,6 +110,9 @@
bool withAudio,
sp<IRadio>& radio)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
ALOGV("%s %d config %p withAudio %d", __FUNCTION__, handle, config, withAudio);
AutoMutex lock(mServiceLock);
@@ -717,6 +726,9 @@
status_t RadioService::ModuleClient::setConfiguration(const struct radio_band_config *config)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
status_t status = NO_ERROR;
ALOGV("%s locked", __FUNCTION__);
@@ -738,6 +750,9 @@
status_t RadioService::ModuleClient::getConfiguration(struct radio_band_config *config)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
status_t status = NO_ERROR;
ALOGV("%s locked", __FUNCTION__);
@@ -756,6 +771,9 @@
status_t RadioService::ModuleClient::setMute(bool mute)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
sp<Module> module;
{
Mutex::Autolock _l(mLock);
@@ -774,6 +792,9 @@
status_t RadioService::ModuleClient::getMute(bool *mute)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
sp<Module> module;
{
Mutex::Autolock _l(mLock);
@@ -788,6 +809,9 @@
status_t RadioService::ModuleClient::scan(radio_direction_t direction, bool skipSubChannel)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
@@ -801,6 +825,9 @@
status_t RadioService::ModuleClient::step(radio_direction_t direction, bool skipSubChannel)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
@@ -814,6 +841,9 @@
status_t RadioService::ModuleClient::tune(uint32_t channel, uint32_t subChannel)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
@@ -827,6 +857,9 @@
status_t RadioService::ModuleClient::cancel()
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
@@ -840,6 +873,9 @@
status_t RadioService::ModuleClient::getProgramInformation(struct radio_program_info *info)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
@@ -854,6 +890,9 @@
status_t RadioService::ModuleClient::hasControl(bool *hasControl)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
Mutex::Autolock lock(mLock);
ALOGV("%s locked", __FUNCTION__);
*hasControl = mTuner != 0;