| /* |
| * Copyright (C) 2019 The Android Open Source Project |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| package android.hardware.audio@6.0; |
| |
| import android.hardware.audio.common@6.0; |
| import IStream; |
| import IStreamOutCallback; |
| import IStreamOutEventCallback; |
| |
| interface IStreamOut extends IStream { |
| /** |
| * Return the audio hardware driver estimated latency in milliseconds. |
| * |
| * @return latencyMs latency in milliseconds. |
| */ |
| getLatency() generates (uint32_t latencyMs); |
| |
| /** |
| * This method is used in situations where audio mixing is done in the |
| * hardware. This method serves as a direct interface with hardware, |
| * allowing to directly set the volume as apposed to via the framework. |
| * This method might produce multiple PCM outputs or hardware accelerated |
| * codecs, such as MP3 or AAC. |
| * Optional method |
| * |
| * @param left left channel attenuation, 1.0f is unity, 0.0f is zero. |
| * @param right right channel attenuation, 1.0f is unity, 0.0f is zero. |
| * @return retval operation completion status. |
| * If a volume is outside [0,1], return INVALID_ARGUMENTS |
| */ |
| setVolume(float left, float right) generates (Result retval); |
| |
| /** |
| * Commands that can be executed on the driver writer thread. |
| */ |
| enum WriteCommand : int32_t { |
| WRITE, |
| GET_PRESENTATION_POSITION, |
| GET_LATENCY |
| }; |
| |
| /** |
| * Data structure passed back to the client via status message queue |
| * of 'write' operation. |
| * |
| * Possible values of 'retval' field: |
| * - OK, write operation was successful; |
| * - INVALID_ARGUMENTS, stream was not configured properly; |
| * - INVALID_STATE, stream is in a state that doesn't allow writes; |
| * - INVALID_OPERATION, retrieving presentation position isn't supported. |
| */ |
| struct WriteStatus { |
| Result retval; |
| WriteCommand replyTo; // discriminator |
| union Reply { |
| uint64_t written; // WRITE command, amount of bytes written, >= 0. |
| struct PresentationPosition { // same as generated by |
| uint64_t frames; // getPresentationPosition. |
| TimeSpec timeStamp; |
| } presentationPosition; |
| uint32_t latencyMs; // Same as generated by getLatency. |
| } reply; |
| }; |
| |
| /** |
| * Called when the metadata of the stream's source has been changed. |
| * @param sourceMetadata Description of the audio that is played by the clients. |
| */ |
| updateSourceMetadata(SourceMetadata sourceMetadata); |
| |
| /** |
| * Set up required transports for passing audio buffers to the driver. |
| * |
| * The transport consists of three message queues: |
| * -- command queue is used to instruct the writer thread what operation |
| * to perform; |
| * -- data queue is used for passing audio data from the client |
| * to the driver; |
| * -- status queue is used for reporting operation status |
| * (e.g. amount of bytes actually written or error code). |
| * |
| * The driver operates on a dedicated thread. The client must ensure that |
| * the thread is given an appropriate priority and assigned to correct |
| * scheduler and cgroup. For this purpose, the method returns identifiers |
| * of the driver thread. |
| * |
| * @param frameSize the size of a single frame, in bytes. |
| * @param framesCount the number of frames in a buffer. |
| * @return retval OK if both message queues were created successfully. |
| * INVALID_STATE if the method was already called. |
| * INVALID_ARGUMENTS if there was a problem setting up |
| * the queues. |
| * @return commandMQ a message queue used for passing commands. |
| * @return dataMQ a message queue used for passing audio data in the format |
| * specified at the stream opening. |
| * @return statusMQ a message queue used for passing status from the driver |
| * using WriteStatus structures. |
| * @return threadInfo identifiers of the driver's dedicated thread. |
| */ |
| prepareForWriting(uint32_t frameSize, uint32_t framesCount) |
| generates ( |
| Result retval, |
| fmq_sync<WriteCommand> commandMQ, |
| fmq_sync<uint8_t> dataMQ, |
| fmq_sync<WriteStatus> statusMQ, |
| ThreadInfo threadInfo); |
| |
| /** |
| * Return the number of audio frames written by the audio DSP to DAC since |
| * the output has exited standby. |
| * Optional method |
| * |
| * @return retval operation completion status. |
| * @return dspFrames number of audio frames written. |
| */ |
| getRenderPosition() generates (Result retval, uint32_t dspFrames); |
| |
| /** |
| * Get the local time at which the next write to the audio driver will be |
| * presented. The units are microseconds, where the epoch is decided by the |
| * local audio HAL. |
| * Optional method |
| * |
| * @return retval operation completion status. |
| * @return timestampUs time of the next write. |
| */ |
| getNextWriteTimestamp() generates (Result retval, int64_t timestampUs); |
| |
| /** |
| * Set the callback interface for notifying completion of non-blocking |
| * write and drain. |
| * |
| * Calling this function implies that all future 'write' and 'drain' |
| * must be non-blocking and use the callback to signal completion. |
| * |
| * 'clearCallback' method needs to be called in order to release the local |
| * callback proxy on the server side and thus dereference the callback |
| * implementation on the client side. |
| * |
| * @return retval operation completion status. |
| */ |
| setCallback(IStreamOutCallback callback) generates (Result retval); |
| |
| /** |
| * Clears the callback previously set via 'setCallback' method. |
| * |
| * Warning: failure to call this method results in callback implementation |
| * on the client side being held until the HAL server termination. |
| * |
| * If no callback was previously set, the method should be a no-op |
| * and return OK. |
| * |
| * @return retval operation completion status: OK or NOT_SUPPORTED. |
| */ |
| clearCallback() generates (Result retval); |
| |
| /** |
| * Set the callback interface for notifying about an output stream event. |
| * |
| * Calling this method with a null pointer will result in releasing |
| * the local callback proxy on the server side and thus dereference |
| * the callback implementation on the client side. |
| * |
| * @return retval operation completion status. |
| */ |
| setEventCallback(IStreamOutEventCallback callback) |
| generates (Result retval); |
| |
| /** |
| * Returns whether HAL supports pausing and resuming of streams. |
| * |
| * @return supportsPause true if pausing is supported. |
| * @return supportsResume true if resume is supported. |
| */ |
| supportsPauseAndResume() |
| generates (bool supportsPause, bool supportsResume); |
| |
| /** |
| * Notifies to the audio driver to stop playback however the queued buffers |
| * are retained by the hardware. Useful for implementing pause/resume. Empty |
| * implementation if not supported however must be implemented for hardware |
| * with non-trivial latency. In the pause state, some audio hardware may |
| * still be using power. Client code may consider calling 'suspend' after a |
| * timeout to prevent that excess power usage. |
| * |
| * Implementation of this function is mandatory for offloaded playback. |
| * |
| * @return retval operation completion status. |
| */ |
| pause() generates (Result retval); |
| |
| /** |
| * Notifies to the audio driver to resume playback following a pause. |
| * Returns error INVALID_STATE if called without matching pause. |
| * |
| * Implementation of this function is mandatory for offloaded playback. |
| * |
| * @return retval operation completion status. |
| */ |
| resume() generates (Result retval); |
| |
| /** |
| * Returns whether HAL supports draining of streams. |
| * |
| * @return supports true if draining is supported. |
| */ |
| supportsDrain() generates (bool supports); |
| |
| /** |
| * Requests notification when data buffered by the driver/hardware has been |
| * played. If 'setCallback' has previously been called to enable |
| * non-blocking mode, then 'drain' must not block, instead it must return |
| * quickly and completion of the drain is notified through the callback. If |
| * 'setCallback' has not been called, then 'drain' must block until |
| * completion. |
| * |
| * If 'type' is 'ALL', the drain completes when all previously written data |
| * has been played. |
| * |
| * If 'type' is 'EARLY_NOTIFY', the drain completes shortly before all data |
| * for the current track has played to allow time for the framework to |
| * perform a gapless track switch. |
| * |
| * Drain must return immediately on 'stop' and 'flush' calls. |
| * |
| * Implementation of this function is mandatory for offloaded playback. |
| * |
| * @param type type of drain. |
| * @return retval operation completion status. |
| */ |
| drain(AudioDrain type) generates (Result retval); |
| |
| /** |
| * Notifies to the audio driver to flush the queued data. Stream must |
| * already be paused before calling 'flush'. |
| * Optional method |
| * |
| * Implementation of this function is mandatory for offloaded playback. |
| * |
| * @return retval operation completion status. |
| */ |
| flush() generates (Result retval); |
| |
| /** |
| * Return a recent count of the number of audio frames presented to an |
| * external observer. This excludes frames which have been written but are |
| * still in the pipeline. The count is not reset to zero when output enters |
| * standby. Also returns the value of CLOCK_MONOTONIC as of this |
| * presentation count. The returned count is expected to be 'recent', but |
| * does not need to be the most recent possible value. However, the |
| * associated time must correspond to whatever count is returned. |
| * |
| * Example: assume that N+M frames have been presented, where M is a 'small' |
| * number. Then it is permissible to return N instead of N+M, and the |
| * timestamp must correspond to N rather than N+M. The terms 'recent' and |
| * 'small' are not defined. They reflect the quality of the implementation. |
| * |
| * Optional method |
| * |
| * @return retval operation completion status. |
| * @return frames count of presented audio frames. |
| * @return timeStamp associated clock time. |
| */ |
| getPresentationPosition() |
| generates (Result retval, uint64_t frames, TimeSpec timeStamp); |
| |
| /** |
| * Selects a presentation for decoding from a next generation media stream |
| * (as defined per ETSI TS 103 190-2) and a program within the presentation. |
| * Optional method |
| * |
| * @param presentationId selected audio presentation. |
| * @param programId refinement for the presentation. |
| * @return retval operation completion status. |
| */ |
| selectPresentation(int32_t presentationId, int32_t programId) |
| generates (Result retval); |
| |
| /** |
| * Returns the Dual Mono mode presentation setting. |
| * |
| * Optional method |
| * |
| * @return retval operation completion status. |
| * @return mode current setting of Dual Mono mode. |
| */ |
| getDualMonoMode() generates (Result retval, DualMonoMode mode); |
| |
| /** |
| * Sets the Dual Mono mode presentation on the output device. |
| * |
| * The Dual Mono mode is generally applied to stereo audio streams |
| * where the left and right channels come from separate sources. |
| * |
| * Optional method |
| * |
| * @param mode selected Dual Mono mode. |
| * @return retval operation completion status. |
| */ |
| setDualMonoMode(DualMonoMode mode) generates (Result retval); |
| |
| /** |
| * Returns the Audio Description Mix level in dB. |
| * |
| * The level is applied to streams incorporating a secondary Audio |
| * Description stream. It specifies the relative level of mixing for |
| * the Audio Description with a reference to the Main Audio. |
| * |
| * Optional method |
| * |
| * The value of the relative level is in the range from negative infinity |
| * to +48. |
| * |
| * @return retval operation completion status. |
| * @return leveldB the current Audio Description Mix Level in dB. |
| */ |
| getAudioDescriptionMixLevel() generates (Result retval, float leveldB); |
| |
| /** |
| * Sets the Audio Description Mix level in dB. |
| * |
| * For streams incorporating a secondary Audio Description stream |
| * the relative level of mixing of the Audio Description to the Main Audio |
| * is controlled by this method. |
| * |
| * Optional method |
| * |
| * The value of the relative level must be in the range from negative |
| * infinity to +48. |
| * |
| * @param leveldB Audio Description Mix Level in dB |
| * @return retval operation completion status. |
| */ |
| setAudioDescriptionMixLevel(float leveldB) generates (Result retval); |
| |
| /** |
| * Retrieves current playback rate parameters. |
| * |
| * Optional method |
| * |
| * @return retval operation completion status. |
| * @return playbackRate current playback parameters |
| */ |
| getPlaybackRateParameters() |
| generates (Result retval, PlaybackRate playbackRate); |
| |
| /** |
| * Sets the playback rate parameters that control playback behavior. |
| * This is normally used when playing encoded content and decoding |
| * is performed in hardware. Otherwise, the framework can apply |
| * necessary transformations. |
| * |
| * Optional method |
| * |
| * If the HAL supports setting the playback rate, it is recommended |
| * to support speed and pitch values at least in the range |
| * from 0.5f to 2.0f, inclusive (see the definition of PlaybackRate struct). |
| * |
| * @param playbackRate playback parameters |
| * @return retval operation completion status. |
| */ |
| setPlaybackRateParameters(PlaybackRate playbackRate) |
| generates (Result retval); |
| }; |