Define audio HAL

Created after hardware/audio.h with the following changes:

 - names changed to satisfy HAL style guide;

 - defined getter / setter methods for properties, and interfaces
   for devices where needed;

 - stream out callback changed to be used over RPC;

 - 'dump' method is already defined by BBinder, so in HAL
   interfaces it is replaced by 'debugDump'.

Note that audio data is currently transferred using byte buffer,
which is not effective due to memory copy and HwBinder transaction
involved. The transfer method will be changed to FastMessageQueue.

Bug: 30222631
Test: make

Change-Id: Ibb3bd940a91820e81d1a2b53b38d63b9e3de148a
diff --git a/audio/2.0/IStreamOut.hal b/audio/2.0/IStreamOut.hal
new file mode 100644
index 0000000..adce538
--- /dev/null
+++ b/audio/2.0/IStreamOut.hal
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.audio@2.0;
+
+import android.hardware.audio.common@2.0;
+import IStream;
+import IStreamOutCallback;
+
+interface IStreamOut extends IStream {
+    typedef android.hardware.audio@2.0::Result Result;
+
+    /*
+     * Return the audio hardware driver estimated latency in milliseconds.
+     *
+     * @return latencyMs latency in milliseconds.
+     */
+    getLatency() generates (uint32_t latencyMs);
+
+    /*
+     * This method is used in situations where audio mixing is done in the
+     * hardware. This method serves as a direct interface with hardware,
+     * allowing to directly set the volume as apposed to via the framework.
+     * This method might produce multiple PCM outputs or hardware accelerated
+     * codecs, such as MP3 or AAC.
+     *
+     * @param left left channel attenuation, 1.0f is unity, 0.0f is zero.
+     * @param right right channel attenuation, 1.0f is unity, 0.0f is zero.
+     * @return retval operation completion status.
+     */
+    setVolume(float left, float right) generates (Result retval);
+
+    /*
+     * Write audio buffer to driver. On success, sets 'retval' to 'OK', and
+     * returns number of bytes written. If at least one frame was written
+     * successfully prior to the error, it is suggested that the driver return
+     * that successful (short) byte count and then return an error in the
+     * subsequent call.
+     *
+     * If 'setCallback' has previously been called to enable non-blocking mode
+     * then 'write' is not allowed to block. It must write only the number of
+     * bytes that currently fit in the driver/hardware buffer and then return
+     * this byte count. If this is less than the requested write size the
+     * callback function must be called when more space is available in the
+     * driver/hardware buffer.
+     *
+     * @param data audio data.
+     * @return retval operation completion status.
+     * @return written number of bytes written.
+     */
+    // TODO(mnaganov): Replace with FMQ version.
+    write(vec<uint8_t> data) generates (Result retval, uint64_t written);
+
+    /*
+     * Return the number of audio frames written by the audio DSP to DAC since
+     * the output has exited standby.
+     *
+     * @return retval operation completion status.
+     * @return dspFrames number of audio frames written.
+     */
+    getRenderPosition() generates (Result retval, uint32_t dspFrames);
+
+    /*
+     * Get the local time at which the next write to the audio driver will be
+     * presented. The units are microseconds, where the epoch is decided by the
+     * local audio HAL.
+     *
+     * @return retval operation completion status.
+     * @return timestampUs time of the next write.
+     */
+    getNextWriteTimestamp() generates (Result retval, int64_t timestampUs);
+
+    /*
+     * Set the callback interface for notifying completion of non-blocking
+     * write and drain.
+     *
+     * Calling this function implies that all future 'write' and 'drain'
+     * must be non-blocking and use the callback to signal completion.
+     *
+     * @return retval operation completion status.
+     */
+    setCallback(IStreamOutCallback callback) generates (Result retval);
+
+    /*
+     * Returns whether HAL supports pausing and resuming of streams.
+     *
+     * @return supportsPause true if pausing is supported.
+     * @return supportsResume true if resume is supported.
+     */
+    supportsPauseAndResume()
+            generates (bool supportsPause, bool supportsResume);
+
+    /**
+     * Notifies to the audio driver to stop playback however the queued buffers
+     * are retained by the hardware. Useful for implementing pause/resume. Empty
+     * implementation if not supported however must be implemented for hardware
+     * with non-trivial latency. In the pause state, some audio hardware may
+     * still be using power. Client code may consider calling 'suspend' after a
+     * timeout to prevent that excess power usage.
+     *
+     * Implementation of this function is mandatory for offloaded playback.
+     *
+     * @return retval operation completion status.
+     */
+    pause() generates (Result retval);
+
+    /*
+     * Notifies to the audio driver to resume playback following a pause.
+     * Returns error INVALID_STATE if called without matching pause.
+     *
+     * Implementation of this function is mandatory for offloaded playback.
+     *
+     * @return retval operation completion status.
+     */
+    resume() generates (Result retval);
+
+    /*
+     * Returns whether HAL supports draining of streams.
+     *
+     * @return supports true if draining is supported.
+     */
+    supportsDrain() generates (bool supports);
+
+    /**
+     * Requests notification when data buffered by the driver/hardware has been
+     * played. If 'setCallback' has previously been called to enable
+     * non-blocking mode, then 'drain' must not block, instead it must return
+     * quickly and completion of the drain is notified through the callback. If
+     * 'setCallback' has not been called, then 'drain' must block until
+     * completion.
+     *
+     * If 'type' is 'AUDIO_DRAIN_ALL', the drain completes when all previously
+     * written data has been played.
+     *
+     * If 'type' is 'AUDIO_DRAIN_EARLY_NOTIFY', the drain completes shortly
+     * before all data for the current track has played to allow time for the
+     * framework to perform a gapless track switch.
+     *
+     * Drain must return immediately on 'stop' and 'flush' calls.
+     *
+     * Implementation of this function is mandatory for offloaded playback.
+     *
+     * @param type type of drain.
+     * @return retval operation completion status.
+     */
+    drain(AudioDrain type) generates (Result retval);
+
+    /*
+     * Notifies to the audio driver to flush the queued data. Stream must
+     * already be paused before calling 'flush'.
+     *
+     * Implementation of this function is mandatory for offloaded playback.
+     *
+     * @return retval operation completion status.
+     */
+    flush() generates (Result retval);
+
+    /*
+     * Return a recent count of the number of audio frames presented to an
+     * external observer. This excludes frames which have been written but are
+     * still in the pipeline. The count is not reset to zero when output enters
+     * standby. Also returns the value of CLOCK_MONOTONIC as of this
+     * presentation count. The returned count is expected to be 'recent', but
+     * does not need to be the most recent possible value. However, the
+     * associated time must correspond to whatever count is returned.
+     *
+     * Example: assume that N+M frames have been presented, where M is a 'small'
+     * number. Then it is permissible to return N instead of N+M, and the
+     * timestamp must correspond to N rather than N+M. The terms 'recent' and
+     * 'small' are not defined. They reflect the quality of the implementation.
+     *
+     * @return retval operation completion status.
+     * @return frames count of presented audio frames.
+     * @return timeStamp associated clock time.
+     */
+    getPresentationPosition()
+            generates (Result retval, uint64_t frames, TimeSpec timeStamp);
+};