diff options
36 files changed, 1527 insertions, 1312 deletions
diff --git a/core/java/android/animation/Animator.java b/core/java/android/animation/Animator.java index b6c4763dd404..db8b9eba5fb8 100755 --- a/core/java/android/animation/Animator.java +++ b/core/java/android/animation/Animator.java @@ -28,8 +28,13 @@ import java.util.ArrayList; * This class provides a simple timing engine for running animations * which calculate animated values and set them on target objects. * - * There is a single timing pulse that all animations use. It runs in a - * custom handler to ensure that property changes happen on the UI thread. + * <p>There is a single timing pulse that all animations use. It runs in a + * custom handler to ensure that property changes happen on the UI thread.</p> + * + * <p>By default, Animator uses non-linear time interpolation, via the + * {@link AccelerateDecelerateInterpolator} class, which accelerates into and decelerates + * out of an animation. This behavior can be changed by calling + * {@link Animator#setInterpolator(Interpolator)}.</p> */ public class Animator extends Animatable { diff --git a/core/java/android/animation/Sequencer.java b/core/java/android/animation/Sequencer.java index 00ab6a3342da..e73ac88313db 100644 --- a/core/java/android/animation/Sequencer.java +++ b/core/java/android/animation/Sequencer.java @@ -27,7 +27,7 @@ import java.util.HashMap; * either the {@link Sequencer#playTogether(Animatable...) playTogether()} or * {@link Sequencer#playSequentially(Animatable...) playSequentially()} methods can be called to add * a set of animations all at once, or the {@link Sequencer#play(Animatable)} can be - * used in conjunction with methods in the {@link android.animation.Sequencer.Builder Builder} + * used in conjunction with methods in the {@link myandroid.animation.Sequencer.Builder Builder} * class to add animations * one by one.</p> * @@ -83,6 +83,13 @@ public final class Sequencer extends Animatable { private SequencerAnimatableListener mSequenceListener = null; /** + * Flag indicating that the Sequencer has been canceled (by calling cancel() or end()). + * This flag is used to avoid starting other animations when currently-playing + * child animations of this Sequencer end. + */ + boolean mCanceled = false; + + /** * Sets up this Sequencer to play all of the supplied animations at the same time. * * @param sequenceItems The animations that will be started simultaneously. @@ -161,6 +168,7 @@ public final class Sequencer extends Animatable { @SuppressWarnings("unchecked") @Override public void cancel() { + mCanceled = true; if (mListeners != null) { ArrayList<AnimatableListener> tmpListeners = (ArrayList<AnimatableListener>) mListeners.clone(); @@ -168,11 +176,10 @@ public final class Sequencer extends Animatable { listener.onAnimationCancel(this); } } - if (mPlayingSet.size() > 0) { - for (Animatable item : mPlayingSet) { - item.cancel(); + if (mSortedNodes.size() > 0) { + for (Node node : mSortedNodes) { + node.animation.cancel(); } - mPlayingSet.clear(); } } @@ -184,11 +191,11 @@ public final class Sequencer extends Animatable { */ @Override public void end() { - if (mPlayingSet.size() > 0) { - for (Animatable item : mPlayingSet) { - item.end(); + mCanceled = true; + if (mSortedNodes.size() > 0) { + for (Node node : mSortedNodes) { + node.animation.end(); } - mPlayingSet.clear(); } } @@ -202,6 +209,8 @@ public final class Sequencer extends Animatable { @SuppressWarnings("unchecked") @Override public void start() { + mCanceled = false; + // First, sort the nodes (if necessary). This will ensure that sortedNodes // contains the animation nodes in the correct order. sortNodes(); @@ -221,7 +230,7 @@ public final class Sequencer extends Animatable { } else { for (Dependency dependency : node.dependencies) { dependency.node.animation.addListener( - new DependencyListener(node, dependency.rule)); + new DependencyListener(this, node, dependency.rule)); } node.tmpDependencies = (ArrayList<Dependency>) node.dependencies.clone(); } @@ -247,6 +256,8 @@ public final class Sequencer extends Animatable { */ private static class DependencyListener implements AnimatableListener { + private Sequencer mSequencer; + // The node upon which the dependency is based. private Node mNode; @@ -254,27 +265,18 @@ public final class Sequencer extends Animatable { // the node private int mRule; - public DependencyListener(Node node, int rule) { + public DependencyListener(Sequencer sequencer, Node node, int rule) { + this.mSequencer = sequencer; this.mNode = node; this.mRule = rule; } /** - * If an animation that is being listened for is canceled, then this removes - * the listener on that animation, to avoid triggering further animations down - * the line when the animation ends. + * Ignore cancel events for now. We may want to handle this eventually, + * to prevent follow-on animations from running when some dependency + * animation is canceled. */ public void onAnimationCancel(Animatable animation) { - Dependency dependencyToRemove = null; - for (Dependency dependency : mNode.tmpDependencies) { - if (dependency.node.animation == animation) { - // animation canceled - remove the dependency and listener - dependencyToRemove = dependency; - animation.removeListener(this); - break; - } - } - mNode.tmpDependencies.remove(dependencyToRemove); } /** @@ -308,6 +310,10 @@ public final class Sequencer extends Animatable { * @param dependencyAnimation the animation that sent the event. */ private void startIfReady(Animatable dependencyAnimation) { + if (mSequencer.mCanceled) { + // if the parent Sequencer was canceled, then don't start any dependent anims + return; + } Dependency dependencyToRemove = null; for (Dependency dependency : mNode.tmpDependencies) { if (dependency.rule == mRule && @@ -405,6 +411,7 @@ public final class Sequencer extends Animatable { } } } + roots.clear(); roots.addAll(tmpRoots); tmpRoots.clear(); } diff --git a/core/java/android/app/ApplicationErrorReport.java b/core/java/android/app/ApplicationErrorReport.java index 6981cd6872ba..48cbd46d19bf 100644 --- a/core/java/android/app/ApplicationErrorReport.java +++ b/core/java/android/app/ApplicationErrorReport.java @@ -179,7 +179,7 @@ public class ApplicationErrorReport implements Parcelable { /** * Return activity in receiverPackage that handles ACTION_APP_ERROR. * - * @param pm PackageManager isntance + * @param pm PackageManager instance * @param errorPackage package which caused the error * @param receiverPackage candidate package to receive the error * @return activity component within receiverPackage which handles diff --git a/core/java/android/bluetooth/ScoSocket.java b/core/java/android/bluetooth/ScoSocket.java deleted file mode 100644 index b65a99a048e3..000000000000 --- a/core/java/android/bluetooth/ScoSocket.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright (C) 2008 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package android.bluetooth; - -import android.os.Handler; -import android.os.Message; -import android.os.PowerManager; -import android.os.PowerManager.WakeLock; -import android.util.Log; - -/** - * The Android Bluetooth API is not finalized, and *will* change. Use at your - * own risk. - * - * Simple SCO Socket. - * Currently in Android, there is no support for sending data over a SCO - * socket - this is managed by the hardware link to the Bluetooth Chip. This - * class is instead intended for management of the SCO socket lifetime, - * and is tailored for use with the headset / handsfree profiles. - * @hide - */ -public class ScoSocket { - private static final String TAG = "ScoSocket"; - private static final boolean DBG = true; - private static final boolean VDBG = false; // even more logging - - public static final int STATE_READY = 1; // Ready for use. No threads or sockets - public static final int STATE_ACCEPT = 2; // accept() thread running - public static final int STATE_CONNECTING = 3; // connect() thread running - public static final int STATE_CONNECTED = 4; // connected, waiting for close() - public static final int STATE_CLOSED = 5; // was connected, now closed. - - private int mState; - private int mNativeData; - private Handler mHandler; - private int mAcceptedCode; - private int mConnectedCode; - private int mClosedCode; - - private WakeLock mWakeLock; // held while in STATE_CONNECTING - - static { - classInitNative(); - } - private native static void classInitNative(); - - public ScoSocket(PowerManager pm, Handler handler, int acceptedCode, int connectedCode, - int closedCode) { - initNative(); - mState = STATE_READY; - mHandler = handler; - mAcceptedCode = acceptedCode; - mConnectedCode = connectedCode; - mClosedCode = closedCode; - mWakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "ScoSocket"); - mWakeLock.setReferenceCounted(false); - if (VDBG) log(this + " SCO OBJECT CTOR"); - } - private native void initNative(); - - protected void finalize() throws Throwable { - try { - if (VDBG) log(this + " SCO OBJECT DTOR"); - destroyNative(); - releaseWakeLockNow(); - } finally { - super.finalize(); - } - } - private native void destroyNative(); - - /** Connect this SCO socket to the given BT address. - * Does not block. - */ - public synchronized boolean connect(String address, String name) { - if (DBG) log("connect() " + this); - if (mState != STATE_READY) { - if (DBG) log("connect(): Bad state"); - return false; - } - acquireWakeLock(); - if (connectNative(address, name)) { - mState = STATE_CONNECTING; - return true; - } else { - mState = STATE_CLOSED; - releaseWakeLockNow(); - return false; - } - } - private native boolean connectNative(String address, String name); - - /** Accept incoming SCO connections. - * Does not block. - */ - public synchronized boolean accept() { - if (VDBG) log("accept() " + this); - if (mState != STATE_READY) { - if (DBG) log("Bad state"); - return false; - } - if (acceptNative()) { - mState = STATE_ACCEPT; - return true; - } else { - mState = STATE_CLOSED; - return false; - } - } - private native boolean acceptNative(); - - public synchronized void close() { - if (DBG) log(this + " SCO OBJECT close() mState = " + mState); - acquireWakeLock(); - mState = STATE_CLOSED; - closeNative(); - releaseWakeLock(); - } - private native void closeNative(); - - public synchronized int getState() { - return mState; - } - - private synchronized void onConnected(int result) { - if (VDBG) log(this + " onConnected() mState = " + mState + " " + this); - if (mState != STATE_CONNECTING) { - if (DBG) log("Strange state, closing " + mState + " " + this); - return; - } - if (result >= 0) { - mState = STATE_CONNECTED; - } else { - mState = STATE_CLOSED; - } - mHandler.obtainMessage(mConnectedCode, mState, -1, this).sendToTarget(); - releaseWakeLockNow(); - } - - private synchronized void onAccepted(int result) { - if (VDBG) log("onAccepted() " + this); - if (mState != STATE_ACCEPT) { - if (DBG) log("Strange state " + this); - return; - } - if (result >= 0) { - mState = STATE_CONNECTED; - } else { - mState = STATE_CLOSED; - } - mHandler.obtainMessage(mAcceptedCode, mState, -1, this).sendToTarget(); - } - - private synchronized void onClosed() { - if (DBG) log("onClosed() " + this); - if (mState != STATE_CLOSED) { - mState = STATE_CLOSED; - mHandler.obtainMessage(mClosedCode, mState, -1, this).sendToTarget(); - releaseWakeLock(); - } - } - - private void acquireWakeLock() { - if (!mWakeLock.isHeld()) { - mWakeLock.acquire(); - if (VDBG) log("mWakeLock.acquire() " + this); - } - } - - private void releaseWakeLock() { - if (mWakeLock.isHeld()) { - // Keep apps processor awake for a further 2 seconds. - // This is a hack to resolve issue http://b/1616263 - in which - // we are left in a 80 mA power state when remotely terminating a - // call while connected to BT headset "HTC BH S100 " with A2DP and - // HFP profiles. - if (VDBG) log("mWakeLock.release() in 2 sec" + this); - mWakeLock.acquire(2000); - } - } - - private void releaseWakeLockNow() { - if (mWakeLock.isHeld()) { - if (VDBG) log("mWakeLock.release() now" + this); - mWakeLock.release(); - } - } - - private void log(String msg) { - Log.d(TAG, msg); - } -} diff --git a/core/java/android/os/Debug.java b/core/java/android/os/Debug.java index d23b16144785..a58e70b1e1e8 100644 --- a/core/java/android/os/Debug.java +++ b/core/java/android/os/Debug.java @@ -94,7 +94,8 @@ public final class Debug /** * Default trace file path and file */ - private static final String DEFAULT_TRACE_PATH_PREFIX = "/sdcard/"; + private static final String DEFAULT_TRACE_PATH_PREFIX = + Environment.getExternalStorageDirectory().getPath() + "/"; private static final String DEFAULT_TRACE_BODY = "dmtrace"; private static final String DEFAULT_TRACE_EXTENSION = ".trace"; private static final String DEFAULT_TRACE_FILE_PATH = @@ -127,7 +128,7 @@ public final class Debug public int otherPrivateDirty; /** The shared dirty pages used by everything else. */ public int otherSharedDirty; - + public MemoryInfo() { } @@ -137,21 +138,21 @@ public final class Debug public int getTotalPss() { return dalvikPss + nativePss + otherPss; } - + /** * Return total private dirty memory usage in kB. */ public int getTotalPrivateDirty() { return dalvikPrivateDirty + nativePrivateDirty + otherPrivateDirty; } - + /** * Return total shared dirty memory usage in kB. */ public int getTotalSharedDirty() { return dalvikSharedDirty + nativeSharedDirty + otherSharedDirty; } - + public int describeContents() { return 0; } @@ -179,7 +180,7 @@ public final class Debug otherPrivateDirty = source.readInt(); otherSharedDirty = source.readInt(); } - + public static final Creator<MemoryInfo> CREATOR = new Creator<MemoryInfo>() { public MemoryInfo createFromParcel(Parcel source) { return new MemoryInfo(source); @@ -460,7 +461,7 @@ href="{@docRoot}guide/developing/tools/traceview.html">Traceview: A Graphical Lo * Like startMethodTracing(String, int, int), but taking an already-opened * FileDescriptor in which the trace is written. The file name is also * supplied simply for logging. Makes a dup of the file descriptor. - * + * * Not exposed in the SDK unless we are really comfortable with supporting * this and find it would be useful. * @hide @@ -1090,7 +1091,7 @@ href="{@docRoot}guide/developing/tools/traceview.html">Traceview: A Graphical Lo * static { * // Sets all the fields * Debug.setFieldsOn(MyDebugVars.class); - * + * * // Sets only the fields annotated with @Debug.DebugProperty * // Debug.setFieldsOn(MyDebugVars.class, true); * } diff --git a/core/java/android/provider/Telephony.java b/core/java/android/provider/Telephony.java index bf9e8549aaaf..d271e934db77 100644 --- a/core/java/android/provider/Telephony.java +++ b/core/java/android/provider/Telephony.java @@ -25,6 +25,7 @@ import android.content.Intent; import android.database.Cursor; import android.database.sqlite.SqliteWrapper; import android.net.Uri; +import android.os.Environment; import android.telephony.SmsMessage; import android.text.TextUtils; import android.util.Config; @@ -1526,7 +1527,8 @@ public final class Telephony { * which streams the captured image to the uri. Internally we write the media content * to this file. It's named '.temp.jpg' so Gallery won't pick it up. */ - public static final String SCRAP_FILE_PATH = "/sdcard/mms/scrapSpace/.temp.jpg"; + public static final String SCRAP_FILE_PATH = + Environment.getExternalStorageDirectory().getPath() + "/mms/scrapSpace/.temp.jpg"; } public static final class Intents { diff --git a/core/java/com/android/internal/app/ActionBarImpl.java b/core/java/com/android/internal/app/ActionBarImpl.java index f37021b7ede2..63dbdb4fdbde 100644 --- a/core/java/com/android/internal/app/ActionBarImpl.java +++ b/core/java/com/android/internal/app/ActionBarImpl.java @@ -16,8 +16,10 @@ package com.android.internal.app; -import com.android.internal.view.menu.ActionMenu; -import com.android.internal.view.menu.ActionMenuItem; +import com.android.internal.view.menu.MenuBuilder; +import com.android.internal.view.menu.MenuItemImpl; +import com.android.internal.view.menu.MenuPopupHelper; +import com.android.internal.view.menu.SubMenuBuilder; import com.android.internal.widget.ActionBarContextView; import com.android.internal.widget.ActionBarView; @@ -196,9 +198,7 @@ public class ActionBarImpl extends ActionBar { @Override public void startContextMode(ContextModeCallback callback) { - if (mContextMode != null) { - mContextMode.finish(); - } + finishContextMode(); // Don't wait for the close context mode animation to finish. if (mClosingContext) { @@ -207,15 +207,16 @@ public class ActionBarImpl extends ActionBar { mCloseContext.run(); } - mContextMode = new ContextMode(callback); - if (callback.onCreateContextMode(mContextMode, mContextMode.getMenu())) { - mContextMode.invalidate(); - mUpperContextView.initForMode(mContextMode); + ContextMode mode = new ContextMode(callback); + if (callback.onCreateContextMode(mode, mode.getMenu())) { + mode.invalidate(); + mUpperContextView.initForMode(mode); mAnimatorView.setDisplayedChild(CONTEXT_VIEW); if (mLowerContextView != null) { // TODO animate this mLowerContextView.setVisibility(View.VISIBLE); } + mContextMode = mode; } } @@ -336,14 +337,15 @@ public class ActionBarImpl extends ActionBar { /** * @hide */ - public class ContextMode extends ActionBar.ContextMode { + public class ContextMode extends ActionBar.ContextMode implements MenuBuilder.Callback { private ContextModeCallback mCallback; - private ActionMenu mMenu; + private MenuBuilder mMenu; private WeakReference<View> mCustomView; public ContextMode(ContextModeCallback callback) { mCallback = callback; - mMenu = new ActionMenu(mActionView.getContext()); + mMenu = new MenuBuilder(mActionView.getContext()); + mMenu.setCallback(this); } @Override @@ -405,12 +407,27 @@ public class ActionBarImpl extends ActionBar { return mCustomView != null ? mCustomView.get() : null; } - public void dispatchOnContextItemClicked(MenuItem item) { - ActionMenuItem actionItem = (ActionMenuItem) item; - if (!actionItem.invoke()) { - mCallback.onContextItemClicked(this, item); + public boolean onMenuItemSelected(MenuBuilder menu, MenuItem item) { + return mCallback.onContextItemClicked(this, item); + } + + public void onCloseMenu(MenuBuilder menu, boolean allMenusAreClosing) { + } + + public boolean onSubMenuSelected(SubMenuBuilder subMenu) { + if (!subMenu.hasVisibleItems()) { + return true; } - } + + new MenuPopupHelper(mActivity, subMenu).show(); + return true; + } + + public void onCloseSubMenu(SubMenuBuilder menu) { + } + + public void onMenuModeChange(MenuBuilder menu) { + } } /** diff --git a/core/java/com/android/internal/view/menu/ActionMenuView.java b/core/java/com/android/internal/view/menu/ActionMenuView.java index 7024a2748cd8..e2815368ecba 100644 --- a/core/java/com/android/internal/view/menu/ActionMenuView.java +++ b/core/java/com/android/internal/view/menu/ActionMenuView.java @@ -71,6 +71,10 @@ public class ActionMenuView extends LinearLayout implements MenuBuilder.ItemInvo return mReserveOverflow; } + public void setOverflowReserved(boolean reserveOverflow) { + mReserveOverflow = reserveOverflow; + } + @Override protected boolean checkLayoutParams(ViewGroup.LayoutParams p) { if (p instanceof LayoutParams) { diff --git a/core/java/com/android/internal/widget/ActionBarContextView.java b/core/java/com/android/internal/widget/ActionBarContextView.java index cd9832f0659f..de3162a84b20 100644 --- a/core/java/com/android/internal/widget/ActionBarContextView.java +++ b/core/java/com/android/internal/widget/ActionBarContextView.java @@ -16,7 +16,8 @@ package com.android.internal.widget; import com.android.internal.R; -import com.android.internal.app.ActionBarImpl; +import com.android.internal.view.menu.ActionMenuView; +import com.android.internal.view.menu.MenuBuilder; import android.app.ActionBar; import android.content.Context; @@ -24,12 +25,8 @@ import android.content.res.TypedArray; import android.graphics.drawable.Drawable; import android.util.AttributeSet; import android.view.LayoutInflater; -import android.view.Menu; -import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; -import android.view.View.MeasureSpec; -import android.view.ViewGroup.LayoutParams; import android.widget.ImageButton; import android.widget.LinearLayout; import android.widget.TextView; @@ -43,6 +40,7 @@ public class ActionBarContextView extends ViewGroup { private int mItemPadding; private int mItemMargin; + private int mActionSpacing; private int mContentHeight; private CharSequence mTitle; @@ -54,6 +52,7 @@ public class ActionBarContextView extends ViewGroup { private TextView mTitleView; private TextView mSubtitleView; private Drawable mCloseDrawable; + private ActionMenuView mMenuView; public ActionBarContextView(Context context) { this(context, null, 0); @@ -137,8 +136,6 @@ public class ActionBarContextView extends ViewGroup { } public void initForMode(final ActionBar.ContextMode mode) { - final ActionBarImpl.ContextMode implMode = (ActionBarImpl.ContextMode) mode; - if (mCloseButton == null) { mCloseButton = new ImageButton(getContext()); mCloseButton.setImageDrawable(mCloseDrawable); @@ -151,34 +148,17 @@ public class ActionBarContextView extends ViewGroup { } addView(mCloseButton); - final Context context = getContext(); - final Menu menu = mode.getMenu(); - final int itemCount = menu.size(); - for (int i = 0; i < itemCount; i++) { - final MenuItem item = menu.getItem(i); - final ImageButton button = new ImageButton(context, null, - com.android.internal.R.attr.actionButtonStyle); - button.setClickable(true); - button.setFocusable(true); - button.setImageDrawable(item.getIcon()); - button.setId(item.getItemId()); - button.setVisibility(item.isVisible() ? VISIBLE : GONE); - button.setEnabled(item.isEnabled()); - - button.setOnClickListener(new OnClickListener() { - public void onClick(View v) { - implMode.dispatchOnContextItemClicked(item); - } - }); - - addView(button); - } - requestLayout(); + final MenuBuilder menu = (MenuBuilder) mode.getMenu(); + mMenuView = (ActionMenuView) menu.getMenuView(MenuBuilder.TYPE_ACTION_BUTTON, this); + mMenuView.setOverflowReserved(true); + mMenuView.updateChildren(false); + addView(mMenuView); } public void closeMode() { removeAllViews(); mCustomView = null; + mMenuView = null; } @Override @@ -266,15 +246,10 @@ public class ActionBarContextView extends ViewGroup { } x = r - l - getPaddingRight(); - - final int childCount = getChildCount(); - for (int i = 0; i < childCount; i++) { - final View child = getChildAt(i); - if (child == mCloseButton || child == mTitleLayout || child == mCustomView) { - continue; - } - x -= positionChildInverse(child, x, y, contentHeight) + itemMargin; + if (mMenuView != null) { + x -= positionChildInverse(mMenuView, x + mActionSpacing, y, contentHeight) + - mActionSpacing; } } diff --git a/core/jni/Android.mk b/core/jni/Android.mk index c1921aad6b78..fea5ae302158 100644 --- a/core/jni/Android.mk +++ b/core/jni/Android.mk @@ -124,7 +124,6 @@ LOCAL_SRC_FILES:= \ android_bluetooth_common.cpp \ android_bluetooth_BluetoothAudioGateway.cpp \ android_bluetooth_BluetoothSocket.cpp \ - android_bluetooth_ScoSocket.cpp \ android_server_BluetoothService.cpp \ android_server_BluetoothEventLoop.cpp \ android_server_BluetoothA2dpService.cpp \ diff --git a/core/jni/AndroidRuntime.cpp b/core/jni/AndroidRuntime.cpp index d70f64f78a41..3b91710be969 100644 --- a/core/jni/AndroidRuntime.cpp +++ b/core/jni/AndroidRuntime.cpp @@ -152,7 +152,6 @@ extern int register_android_opengl_classes(JNIEnv *env); extern int register_android_bluetooth_HeadsetBase(JNIEnv* env); extern int register_android_bluetooth_BluetoothAudioGateway(JNIEnv* env); extern int register_android_bluetooth_BluetoothSocket(JNIEnv *env); -extern int register_android_bluetooth_ScoSocket(JNIEnv *env); extern int register_android_server_BluetoothService(JNIEnv* env); extern int register_android_server_BluetoothEventLoop(JNIEnv *env); extern int register_android_server_BluetoothA2dpService(JNIEnv* env); @@ -1286,7 +1285,6 @@ static const RegJNIRec gRegJNI[] = { REG_JNI(register_android_bluetooth_HeadsetBase), REG_JNI(register_android_bluetooth_BluetoothAudioGateway), REG_JNI(register_android_bluetooth_BluetoothSocket), - REG_JNI(register_android_bluetooth_ScoSocket), REG_JNI(register_android_server_BluetoothService), REG_JNI(register_android_server_BluetoothEventLoop), REG_JNI(register_android_server_BluetoothA2dpService), diff --git a/core/jni/android_bluetooth_ScoSocket.cpp b/core/jni/android_bluetooth_ScoSocket.cpp deleted file mode 100644 index 94e4409a4aba..000000000000 --- a/core/jni/android_bluetooth_ScoSocket.cpp +++ /dev/null @@ -1,689 +0,0 @@ -/* -** Copyright 2008, The Android Open Source Project -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -*/ - -#define LOG_TAG "bluetooth_ScoSocket.cpp" - -#include "android_bluetooth_common.h" -#include "android_runtime/AndroidRuntime.h" -#include "JNIHelp.h" -#include "jni.h" -#include "utils/Log.h" -#include "utils/misc.h" - -#include <stdio.h> -#include <string.h> -#include <stdlib.h> -#include <errno.h> -#include <unistd.h> -#include <pthread.h> -#include <sys/socket.h> -#include <sys/types.h> -#include <sys/uio.h> -#include <sys/poll.h> - -#ifdef HAVE_BLUETOOTH -#include <bluetooth/bluetooth.h> -#include <bluetooth/sco.h> -#include <bluetooth/hci.h> - -#define MAX_LINE 255 - -/* - * Defines the module strings used in the blacklist file. - * These are used by consumers of the blacklist file to see if the line is - * used by that module. - */ -#define SCO_BLACKLIST_MODULE_NAME "scoSocket" - - -/* Define the type strings used in the blacklist file. */ -#define BLACKLIST_BY_NAME "name" -#define BLACKLIST_BY_PARTIAL_NAME "partial_name" -#define BLACKLIST_BY_OUI "vendor_oui" - -#endif - -/* Ideally, blocking I/O on a SCO socket would return when another thread - * calls close(). However it does not right now, in fact close() on a SCO - * socket has strange behavior (returns a bogus value) when other threads - * are performing blocking I/O on that socket. So, to workaround, we always - * call close() from the same thread that does blocking I/O. This requires the - * use of a socketpair to signal the blocking I/O to abort. - * - * Unfortunately I don't know a way to abort connect() yet, but at least this - * times out after the BT page timeout (10 seconds currently), so the thread - * will die eventually. The fact that the thread can outlive - * the Java object forces us to use a mutex in destoryNative(). - * - * The JNI API is entirely async. - * - * Also note this class deals only with SCO connections, not with data - * transmission. - */ -namespace android { -#ifdef HAVE_BLUETOOTH - -static JavaVM *jvm; -static jfieldID field_mNativeData; -static jmethodID method_onAccepted; -static jmethodID method_onConnected; -static jmethodID method_onClosed; - -struct thread_data_t; -static void *work_thread(void *arg); -static int connect_work(const char *address, uint16_t sco_pkt_type); -static int accept_work(int signal_sk); -static void wait_for_close(int sk, int signal_sk); -static void closeNative(JNIEnv *env, jobject object); - -static void parseBlacklist(void); -static uint16_t getScoType(char *address, const char *name); - -#define COMPARE_STRING(key, s) (!strncmp(key, s, strlen(s))) - -/* Blacklist data */ -typedef struct scoBlacklist { - int fieldType; - char *value; - uint16_t scoType; - struct scoBlacklist *next; -} scoBlacklist_t; - -#define BL_TYPE_NAME 1 // Field type is name string - -static scoBlacklist_t *blacklist = NULL; - -/* shared native data - protected by mutex */ -typedef struct { - pthread_mutex_t mutex; - int signal_sk; // socket to signal blocked I/O to unblock - jobject object; // JNI global ref to the Java object - thread_data_t *thread_data; // pointer to thread local data - // max 1 thread per sco socket -} native_data_t; - -/* thread local data */ -struct thread_data_t { - native_data_t *nat; - bool is_accept; // accept (listening) or connect (outgoing) thread - int signal_sk; // socket for thread to listen for unblock signal - char address[BTADDR_SIZE]; // BT addres as string - uint16_t sco_pkt_type; // SCO packet types supported -}; - -static inline native_data_t * get_native_data(JNIEnv *env, jobject object) { - return (native_data_t *)(env->GetIntField(object, field_mNativeData)); -} - -static uint16_t str2scoType (char *key) { - LOGV("%s: key = %s", __FUNCTION__, key); - if (COMPARE_STRING(key, "ESCO_HV1")) - return ESCO_HV1; - if (COMPARE_STRING(key, "ESCO_HV2")) - return ESCO_HV2; - if (COMPARE_STRING(key, "ESCO_HV3")) - return ESCO_HV3; - if (COMPARE_STRING(key, "ESCO_EV3")) - return ESCO_EV3; - if (COMPARE_STRING(key, "ESCO_EV4")) - return ESCO_EV4; - if (COMPARE_STRING(key, "ESCO_EV5")) - return ESCO_EV5; - if (COMPARE_STRING(key, "ESCO_2EV3")) - return ESCO_2EV3; - if (COMPARE_STRING(key, "ESCO_3EV3")) - return ESCO_3EV3; - if (COMPARE_STRING(key, "ESCO_2EV5")) - return ESCO_2EV5; - if (COMPARE_STRING(key, "ESCO_3EV5")) - return ESCO_3EV5; - if (COMPARE_STRING(key, "SCO_ESCO_MASK")) - return SCO_ESCO_MASK; - if (COMPARE_STRING(key, "EDR_ESCO_MASK")) - return EDR_ESCO_MASK; - if (COMPARE_STRING(key, "ALL_ESCO_MASK")) - return ALL_ESCO_MASK; - LOGE("Unknown SCO Type (%s) skipping",key); - return 0; -} - -static void parseBlacklist(void) { - const char *filename = "/etc/bluetooth/blacklist.conf"; - char line[MAX_LINE]; - scoBlacklist_t *list = NULL; - scoBlacklist_t *newelem; - - LOGV(__FUNCTION__); - - /* Open file */ - FILE *fp = fopen(filename, "r"); - if(!fp) { - LOGE("Error(%s)opening blacklist file", strerror(errno)); - return; - } - - while (fgets(line, MAX_LINE, fp) != NULL) { - if ((COMPARE_STRING(line, "//")) || (!strcmp(line, ""))) - continue; - char *module = strtok(line,":"); - if (COMPARE_STRING(module, SCO_BLACKLIST_MODULE_NAME)) { - newelem = (scoBlacklist_t *)calloc(1, sizeof(scoBlacklist_t)); - if (newelem == NULL) { - LOGE("%s: out of memory!", __FUNCTION__); - return; - } - // parse line - char *type = strtok(NULL, ","); - char *valueList = strtok(NULL, ","); - char *paramList = strtok(NULL, ","); - if (COMPARE_STRING(type, BLACKLIST_BY_NAME)) { - // Extract Name from Value list - newelem->fieldType = BL_TYPE_NAME; - newelem->value = (char *)calloc(1, strlen(valueList)); - if (newelem->value == NULL) { - LOGE("%s: out of memory!", __FUNCTION__); - continue; - } - valueList++; // Skip open quote - strncpy(newelem->value, valueList, strlen(valueList) - 1); - - // Get Sco Settings from Parameters - char *param = strtok(paramList, ";"); - uint16_t scoTypes = 0; - while (param != NULL) { - uint16_t sco; - if (param[0] == '-') { - param++; - sco = str2scoType(param); - if (sco != 0) - scoTypes &= ~sco; - } else if (param[0] == '+') { - param++; - sco = str2scoType(param); - if (sco != 0) - scoTypes |= sco; - } else if (param[0] == '=') { - param++; - sco = str2scoType(param); - if (sco != 0) - scoTypes = sco; - } else { - LOGE("Invalid SCO type must be =, + or -"); - } - param = strtok(NULL, ";"); - } - newelem->scoType = scoTypes; - } else { - LOGE("Unknown SCO type entry in Blacklist file"); - continue; - } - if (list) { - list->next = newelem; - list = newelem; - } else { - blacklist = list = newelem; - } - LOGI("Entry name = %s ScoTypes = 0x%x", newelem->value, - newelem->scoType); - } - } - fclose(fp); - return; -} -static uint16_t getScoType(char *address, const char *name) { - uint16_t ret = 0; - scoBlacklist_t *list = blacklist; - - while (list != NULL) { - if (list->fieldType == BL_TYPE_NAME) { - if (COMPARE_STRING(name, list->value)) { - ret = list->scoType; - break; - } - } - list = list->next; - } - LOGI("%s %s - 0x%x", __FUNCTION__, name, ret); - return ret; -} -#endif - -static void classInitNative(JNIEnv* env, jclass clazz) { - LOGV(__FUNCTION__); -#ifdef HAVE_BLUETOOTH - if (env->GetJavaVM(&jvm) < 0) { - LOGE("Could not get handle to the VM"); - } - field_mNativeData = get_field(env, clazz, "mNativeData", "I"); - method_onAccepted = env->GetMethodID(clazz, "onAccepted", "(I)V"); - method_onConnected = env->GetMethodID(clazz, "onConnected", "(I)V"); - method_onClosed = env->GetMethodID(clazz, "onClosed", "()V"); - - /* Read the blacklist file in here */ - parseBlacklist(); -#endif -} - -/* Returns false if a serious error occured */ -static jboolean initNative(JNIEnv* env, jobject object) { - LOGV(__FUNCTION__); -#ifdef HAVE_BLUETOOTH - - native_data_t *nat = (native_data_t *) calloc(1, sizeof(native_data_t)); - if (nat == NULL) { - LOGE("%s: out of memory!", __FUNCTION__); - return JNI_FALSE; - } - - pthread_mutex_init(&nat->mutex, NULL); - env->SetIntField(object, field_mNativeData, (jint)nat); - nat->signal_sk = -1; - nat->object = NULL; - nat->thread_data = NULL; - -#endif - return JNI_TRUE; -} - -static void destroyNative(JNIEnv* env, jobject object) { - LOGV(__FUNCTION__); -#ifdef HAVE_BLUETOOTH - native_data_t *nat = get_native_data(env, object); - - closeNative(env, object); - - pthread_mutex_lock(&nat->mutex); - if (nat->thread_data != NULL) { - nat->thread_data->nat = NULL; - } - pthread_mutex_unlock(&nat->mutex); - pthread_mutex_destroy(&nat->mutex); - - free(nat); -#endif -} - -static jboolean acceptNative(JNIEnv *env, jobject object) { - LOGV(__FUNCTION__); -#ifdef HAVE_BLUETOOTH - native_data_t *nat = get_native_data(env, object); - int signal_sks[2]; - pthread_t thread; - struct thread_data_t *data = NULL; - - pthread_mutex_lock(&nat->mutex); - if (nat->signal_sk != -1) { - pthread_mutex_unlock(&nat->mutex); - return JNI_FALSE; - } - - // setup socketpair to pass messages between threads - if (socketpair(AF_UNIX, SOCK_STREAM, 0, signal_sks) < 0) { - LOGE("%s: socketpair() failed: %s", __FUNCTION__, strerror(errno)); - pthread_mutex_unlock(&nat->mutex); - return JNI_FALSE; - } - nat->signal_sk = signal_sks[0]; - nat->object = env->NewGlobalRef(object); - - data = (thread_data_t *)calloc(1, sizeof(thread_data_t)); - if (data == NULL) { - LOGE("%s: out of memory", __FUNCTION__); - pthread_mutex_unlock(&nat->mutex); - return JNI_FALSE; - } - nat->thread_data = data; - pthread_mutex_unlock(&nat->mutex); - - data->signal_sk = signal_sks[1]; - data->nat = nat; - data->is_accept = true; - - if (pthread_create(&thread, NULL, &work_thread, (void *)data) < 0) { - LOGE("%s: pthread_create() failed: %s", __FUNCTION__, strerror(errno)); - return JNI_FALSE; - } - return JNI_TRUE; - -#endif - return JNI_FALSE; -} - -static jboolean connectNative(JNIEnv *env, jobject object, jstring address, - jstring name) { - - LOGV(__FUNCTION__); -#ifdef HAVE_BLUETOOTH - native_data_t *nat = get_native_data(env, object); - int signal_sks[2]; - pthread_t thread; - struct thread_data_t *data; - const char *c_address; - const char *c_name; - - pthread_mutex_lock(&nat->mutex); - if (nat->signal_sk != -1) { - pthread_mutex_unlock(&nat->mutex); - return JNI_FALSE; - } - - // setup socketpair to pass messages between threads - if (socketpair(AF_UNIX, SOCK_STREAM, 0, signal_sks) < 0) { - LOGE("%s: socketpair() failed: %s\n", __FUNCTION__, strerror(errno)); - pthread_mutex_unlock(&nat->mutex); - return JNI_FALSE; - } - nat->signal_sk = signal_sks[0]; - nat->object = env->NewGlobalRef(object); - - data = (thread_data_t *)calloc(1, sizeof(thread_data_t)); - if (data == NULL) { - LOGE("%s: out of memory", __FUNCTION__); - pthread_mutex_unlock(&nat->mutex); - return JNI_FALSE; - } - pthread_mutex_unlock(&nat->mutex); - - data->signal_sk = signal_sks[1]; - data->nat = nat; - c_address = env->GetStringUTFChars(address, NULL); - strlcpy(data->address, c_address, BTADDR_SIZE); - env->ReleaseStringUTFChars(address, c_address); - data->is_accept = false; - - if (name == NULL) { - LOGE("%s: Null pointer passed in for device name", __FUNCTION__); - data->sco_pkt_type = 0; - } else { - c_name = env->GetStringUTFChars(name, NULL); - /* See if this device is in the black list */ - data->sco_pkt_type = getScoType(data->address, c_name); - env->ReleaseStringUTFChars(name, c_name); - } - if (pthread_create(&thread, NULL, &work_thread, (void *)data) < 0) { - LOGE("%s: pthread_create() failed: %s", __FUNCTION__, strerror(errno)); - return JNI_FALSE; - } - return JNI_TRUE; - -#endif - return JNI_FALSE; -} - -static void closeNative(JNIEnv *env, jobject object) { - LOGV(__FUNCTION__); -#ifdef HAVE_BLUETOOTH - native_data_t *nat = get_native_data(env, object); - int signal_sk; - - pthread_mutex_lock(&nat->mutex); - signal_sk = nat->signal_sk; - nat->signal_sk = -1; - env->DeleteGlobalRef(nat->object); - nat->object = NULL; - pthread_mutex_unlock(&nat->mutex); - - if (signal_sk >= 0) { - LOGV("%s: signal_sk = %d", __FUNCTION__, signal_sk); - unsigned char dummy; - write(signal_sk, &dummy, sizeof(dummy)); - close(signal_sk); - } -#endif -} - -#ifdef HAVE_BLUETOOTH -/* thread entry point */ -static void *work_thread(void *arg) { - JNIEnv* env; - thread_data_t *data = (thread_data_t *)arg; - int sk; - - LOGV(__FUNCTION__); - if (jvm->AttachCurrentThread(&env, NULL) != JNI_OK) { - LOGE("%s: AttachCurrentThread() failed", __FUNCTION__); - return NULL; - } - - /* connect the SCO socket */ - if (data->is_accept) { - LOGV("SCO OBJECT %p ACCEPT #####", data->nat->object); - sk = accept_work(data->signal_sk); - LOGV("SCO OBJECT %p END ACCEPT *****", data->nat->object); - } else { - sk = connect_work(data->address, data->sco_pkt_type); - } - - /* callback with connection result */ - if (data->nat == NULL) { - LOGV("%s: object destroyed!", __FUNCTION__); - goto done; - } - pthread_mutex_lock(&data->nat->mutex); - if (data->nat->object == NULL) { - pthread_mutex_unlock(&data->nat->mutex); - LOGV("%s: callback cancelled", __FUNCTION__); - goto done; - } - if (data->is_accept) { - env->CallVoidMethod(data->nat->object, method_onAccepted, sk); - } else { - env->CallVoidMethod(data->nat->object, method_onConnected, sk); - } - pthread_mutex_unlock(&data->nat->mutex); - - if (sk < 0) { - goto done; - } - - LOGV("SCO OBJECT %p %d CONNECTED +++ (%s)", data->nat->object, sk, - data->is_accept ? "in" : "out"); - - /* wait for the socket to close */ - LOGV("wait_for_close()..."); - wait_for_close(sk, data->signal_sk); - LOGV("wait_for_close() returned"); - - /* callback with close result */ - if (data->nat == NULL) { - LOGV("%s: object destroyed!", __FUNCTION__); - goto done; - } - pthread_mutex_lock(&data->nat->mutex); - if (data->nat->object == NULL) { - LOGV("%s: callback cancelled", __FUNCTION__); - } else { - env->CallVoidMethod(data->nat->object, method_onClosed); - } - pthread_mutex_unlock(&data->nat->mutex); - -done: - if (sk >= 0) { - close(sk); - LOGV("SCO OBJECT %p %d CLOSED --- (%s)", data->nat->object, sk, data->is_accept ? "in" : "out"); - } - if (data->signal_sk >= 0) { - close(data->signal_sk); - } - LOGV("SCO socket closed"); - - if (data->nat != NULL) { - pthread_mutex_lock(&data->nat->mutex); - env->DeleteGlobalRef(data->nat->object); - data->nat->object = NULL; - data->nat->thread_data = NULL; - pthread_mutex_unlock(&data->nat->mutex); - } - - free(data); - if (jvm->DetachCurrentThread() != JNI_OK) { - LOGE("%s: DetachCurrentThread() failed", __FUNCTION__); - } - - LOGV("work_thread() done"); - return NULL; -} - -static int accept_work(int signal_sk) { - LOGV(__FUNCTION__); - int sk; - int nsk; - int addr_sz; - int max_fd; - fd_set fds; - struct sockaddr_sco addr; - - sk = socket(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_SCO); - if (sk < 0) { - LOGE("%s socket() failed: %s", __FUNCTION__, strerror(errno)); - return -1; - } - - memset(&addr, 0, sizeof(addr)); - addr.sco_family = AF_BLUETOOTH; - memcpy(&addr.sco_bdaddr, BDADDR_ANY, sizeof(bdaddr_t)); - if (bind(sk, (struct sockaddr *) &addr, sizeof(addr)) < 0) { - LOGE("%s bind() failed: %s", __FUNCTION__, strerror(errno)); - goto error; - } - - if (listen(sk, 1)) { - LOGE("%s: listen() failed: %s", __FUNCTION__, strerror(errno)); - goto error; - } - - memset(&addr, 0, sizeof(addr)); - addr_sz = sizeof(addr); - - FD_ZERO(&fds); - FD_SET(sk, &fds); - FD_SET(signal_sk, &fds); - - max_fd = (sk > signal_sk) ? sk : signal_sk; - LOGI("Listening SCO socket..."); - while (select(max_fd + 1, &fds, NULL, NULL, NULL) < 0) { - if (errno != EINTR) { - LOGE("%s: select() failed: %s", __FUNCTION__, strerror(errno)); - goto error; - } - LOGV("%s: select() EINTR, retrying", __FUNCTION__); - } - LOGV("select() returned"); - if (FD_ISSET(signal_sk, &fds)) { - // signal to cancel listening - LOGV("cancelled listening socket, closing"); - goto error; - } - if (!FD_ISSET(sk, &fds)) { - LOGE("error: select() returned >= 0 with no fds set"); - goto error; - } - - nsk = accept(sk, (struct sockaddr *)&addr, &addr_sz); - if (nsk < 0) { - LOGE("%s: accept() failed: %s", __FUNCTION__, strerror(errno)); - goto error; - } - LOGI("Connected SCO socket (incoming)"); - close(sk); // The listening socket - - return nsk; - -error: - close(sk); - - return -1; -} - -static int connect_work(const char *address, uint16_t sco_pkt_type) { - LOGV(__FUNCTION__); - struct sockaddr_sco addr; - int sk = -1; - - sk = socket(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_SCO); - if (sk < 0) { - LOGE("%s: socket() failed: %s", __FUNCTION__, strerror(errno)); - return -1; - } - - /* Bind to local address */ - memset(&addr, 0, sizeof(addr)); - addr.sco_family = AF_BLUETOOTH; - memcpy(&addr.sco_bdaddr, BDADDR_ANY, sizeof(bdaddr_t)); - if (bind(sk, (struct sockaddr *) &addr, sizeof(addr)) < 0) { - LOGE("%s: bind() failed: %s", __FUNCTION__, strerror(errno)); - goto error; - } - - memset(&addr, 0, sizeof(addr)); - addr.sco_family = AF_BLUETOOTH; - get_bdaddr(address, &addr.sco_bdaddr); - addr.sco_pkt_type = sco_pkt_type; - LOGI("Connecting to socket"); - while (connect(sk, (struct sockaddr *)&addr, sizeof(addr)) < 0) { - if (errno != EINTR) { - LOGE("%s: connect() failed: %s", __FUNCTION__, strerror(errno)); - goto error; - } - LOGV("%s: connect() EINTR, retrying", __FUNCTION__); - } - LOGI("SCO socket connected (outgoing)"); - - return sk; - -error: - if (sk >= 0) close(sk); - return -1; -} - -static void wait_for_close(int sk, int signal_sk) { - LOGV(__FUNCTION__); - pollfd p[2]; - - memset(p, 0, 2 * sizeof(pollfd)); - p[0].fd = sk; - p[1].fd = signal_sk; - p[1].events = POLLIN | POLLPRI; - - LOGV("poll..."); - - while (poll(p, 2, -1) < 0) { // blocks - if (errno != EINTR) { - LOGE("%s: poll() failed: %s", __FUNCTION__, strerror(errno)); - break; - } - LOGV("%s: poll() EINTR, retrying", __FUNCTION__); - } - - LOGV("poll() returned"); -} -#endif - -static JNINativeMethod sMethods[] = { - {"classInitNative", "()V", (void*)classInitNative}, - {"initNative", "()V", (void *)initNative}, - {"destroyNative", "()V", (void *)destroyNative}, - {"connectNative", "(Ljava/lang/String;Ljava/lang/String;)Z", (void *)connectNative}, - {"acceptNative", "()Z", (void *)acceptNative}, - {"closeNative", "()V", (void *)closeNative}, -}; - -int register_android_bluetooth_ScoSocket(JNIEnv *env) { - return AndroidRuntime::registerNativeMethods(env, - "android/bluetooth/ScoSocket", sMethods, NELEM(sMethods)); -} - -} /* namespace android */ diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h index 194f23aa5dc8..9fd905fc9ebc 100644 --- a/include/media/AudioSystem.h +++ b/include/media/AudioSystem.h @@ -168,6 +168,15 @@ public: TX_DISABLE = 0 }; + // special audio session values + enum audio_sessions { + SESSION_OUTPUT_STAGE = -1, // session for effects attached to a particular output stream + // (value must be less than 0) + SESSION_OUTPUT_MIX = 0, // session for effects applied to output mix. These effects can + // be moved by audio policy manager to another output stream + // (value must be 0) + }; + /* These are static methods to control the system-wide AudioFlinger * only privileged processes can have access to them */ @@ -353,8 +362,12 @@ public: uint32_t format = FORMAT_DEFAULT, uint32_t channels = CHANNEL_OUT_STEREO, output_flags flags = OUTPUT_FLAG_INDIRECT); - static status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream); - static status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream); + static status_t startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session = 0); + static status_t stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session = 0); static void releaseOutput(audio_io_handle_t output); static audio_io_handle_t getInput(int inputSource, uint32_t samplingRate = 0, @@ -370,6 +383,16 @@ public: static status_t setStreamVolumeIndex(stream_type stream, int index); static status_t getStreamVolumeIndex(stream_type stream, int *index); + static uint32_t getStrategyForStream(stream_type stream); + + static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc); + static status_t registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id); + static status_t unregisterEffect(int id); + static const sp<IAudioPolicyService>& get_audio_policy_service(); // ---------------------------------------------------------------------------- diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h index 5814fd615acd..70e505eb342e 100644 --- a/include/media/IAudioFlinger.h +++ b/include/media/IAudioFlinger.h @@ -161,6 +161,8 @@ public: status_t *status, int *id, int *enabled) = 0; + + virtual status_t moveEffects(int session, int srcOutput, int dstOutput) = 0; }; diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h index 4804bbd20a39..49eee59040e1 100644 --- a/include/media/IAudioPolicyService.h +++ b/include/media/IAudioPolicyService.h @@ -53,8 +53,12 @@ public: uint32_t format = AudioSystem::FORMAT_DEFAULT, uint32_t channels = 0, AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT) = 0; - virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) = 0; - virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) = 0; + virtual status_t startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session = 0) = 0; + virtual status_t stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session = 0) = 0; virtual void releaseOutput(audio_io_handle_t output) = 0; virtual audio_io_handle_t getInput(int inputSource, uint32_t samplingRate = 0, @@ -69,6 +73,14 @@ public: int indexMax) = 0; virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index) = 0; virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index) = 0; + virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream) = 0; + virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0; + virtual status_t registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id) = 0; + virtual status_t unregisterEffect(int id) = 0; }; diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h index 9af587121b94..9a09586be68e 100644 --- a/include/media/stagefright/AudioPlayer.h +++ b/include/media/stagefright/AudioPlayer.h @@ -86,6 +86,10 @@ private: bool mStarted; + bool mIsFirstBuffer; + status_t mFirstBufferResult; + MediaBuffer *mFirstBuffer; + sp<MediaPlayerBase::AudioSink> mAudioSink; static void AudioCallback(int event, void *user, void *info); diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h index fd30ba58d4e9..ed5f09f8eda8 100644 --- a/include/media/stagefright/CameraSource.h +++ b/include/media/stagefright/CameraSource.h @@ -22,7 +22,6 @@ #include <media/stagefright/MediaSource.h> #include <utils/List.h> #include <utils/RefBase.h> -#include <utils/threads.h> namespace android { @@ -35,10 +34,6 @@ public: static CameraSource *Create(); static CameraSource *CreateFromCamera(const sp<Camera> &camera); - void enableTimeLapseMode( - int64_t timeBetweenTimeLapseFrameCaptureUs, int32_t videoFrameRate); - void disableTimeLapseMode(); - virtual ~CameraSource(); virtual status_t start(MetaData *params = NULL); @@ -51,12 +46,34 @@ public: virtual void signalBufferReturned(MediaBuffer* buffer); -private: - friend class CameraSourceListener; - +protected: sp<Camera> mCamera; sp<MetaData> mMeta; + int64_t mStartTimeUs; + int32_t mNumFramesReceived; + int64_t mLastFrameTimestampUs; + bool mStarted; + + CameraSource(const sp<Camera> &camera); + + virtual void startCameraRecording(); + virtual void stopCameraRecording(); + virtual void releaseRecordingFrame(const sp<IMemory>& frame); + + // Returns true if need to skip the current frame. + // Called from dataCallbackTimestamp. + virtual bool skipCurrentFrame(int64_t timestampUs) {return false;} + + // Callback called when still camera raw data is available. + virtual void dataCallback(int32_t msgType, const sp<IMemory> &data) {} + + virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType, + const sp<IMemory> &data); + +private: + friend class CameraSourceListener; + Mutex mLock; Condition mFrameAvailableCondition; Condition mFrameCompleteCondition; @@ -64,29 +81,12 @@ private: List<sp<IMemory> > mFramesBeingEncoded; List<int64_t> mFrameTimes; - int64_t mStartTimeUs; int64_t mFirstFrameTimeUs; - int64_t mLastFrameTimestampUs; - int32_t mNumFramesReceived; int32_t mNumFramesEncoded; int32_t mNumFramesDropped; int32_t mNumGlitches; int64_t mGlitchDurationThresholdUs; bool mCollectStats; - bool mStarted; - - // Time between capture of two frames during time lapse recording - // Negative value indicates that timelapse is disabled. - int64_t mTimeBetweenTimeLapseFrameCaptureUs; - // Time between two frames in final video (1/frameRate) - int64_t mTimeBetweenTimeLapseVideoFramesUs; - // Real timestamp of the last encoded time lapse frame - int64_t mLastTimeLapseFrameRealTimestampUs; - - CameraSource(const sp<Camera> &camera); - - void dataCallbackTimestamp( - int64_t timestampUs, int32_t msgType, const sp<IMemory> &data); void releaseQueuedFrames(); void releaseOneRecordingFrame(const sp<IMemory>& frame); diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h new file mode 100644 index 000000000000..f153f095fd78 --- /dev/null +++ b/include/media/stagefright/CameraSourceTimeLapse.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef CAMERA_SOURCE_TIME_LAPSE_H_ + +#define CAMERA_SOURCE_TIME_LAPSE_H_ + +#include <pthread.h> + +#include <utils/RefBase.h> +#include <utils/threads.h> + +namespace android { + +class ICamera; +class IMemory; +class Camera; + +class CameraSourceTimeLapse : public CameraSource { +public: + static CameraSourceTimeLapse *Create(bool useStillCameraForTimeLapse, + int64_t timeBetweenTimeLapseFrameCaptureUs, + int32_t videoFrameRate); + + static CameraSourceTimeLapse *CreateFromCamera(const sp<Camera> &camera, + bool useStillCameraForTimeLapse, + int64_t timeBetweenTimeLapseFrameCaptureUs, + int32_t videoFrameRate); + + virtual ~CameraSourceTimeLapse(); + +private: + // If true, will use still camera takePicture() for time lapse frames + // If false, will use the videocamera frames instead. + bool mUseStillCameraForTimeLapse; + + // Time between capture of two frames during time lapse recording + // Negative value indicates that timelapse is disabled. + int64_t mTimeBetweenTimeLapseFrameCaptureUs; + + // Time between two frames in final video (1/frameRate) + int64_t mTimeBetweenTimeLapseVideoFramesUs; + + // Real timestamp of the last encoded time lapse frame + int64_t mLastTimeLapseFrameRealTimestampUs; + + // Thread id of thread which takes still picture and sleeps in a loop. + pthread_t mThreadTimeLapse; + + // Variable set in dataCallbackTimestamp() to help skipCurrentFrame() + // to know if current frame needs to be skipped. + bool mSkipCurrentFrame; + + // True if camera is in preview mode and ready for takePicture(). + bool mCameraIdle; + + CameraSourceTimeLapse(const sp<Camera> &camera, + bool useStillCameraForTimeLapse, + int64_t timeBetweenTimeLapseFrameCaptureUs, + int32_t videoFrameRate); + + // For still camera case starts a thread which calls camera's takePicture() + // in a loop. For video camera case, just starts the camera's video recording. + virtual void startCameraRecording(); + + // For still camera case joins the thread created in startCameraRecording(). + // For video camera case, just stops the camera's video recording. + virtual void stopCameraRecording(); + + // For still camera case don't need to do anything as memory is locally + // allocated with refcounting. + // For video camera case just tell the camera to release the frame. + virtual void releaseRecordingFrame(const sp<IMemory>& frame); + + // mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current + // frame needs to be skipped and this function just returns the value of mSkipCurrentFrame. + virtual bool skipCurrentFrame(int64_t timestampUs); + + // Handles the callback to handle raw frame data from the still camera. + // Creates a copy of the frame data as the camera can reuse the frame memory + // once this callback returns. The function also sets a new timstamp corresponding + // to one frame time ahead of the last encoded frame's time stamp. It then + // calls dataCallbackTimestamp() of the base class with the copied data and the + // modified timestamp, which will think that it recieved the frame from a video + // camera and proceed as usual. + virtual void dataCallback(int32_t msgType, const sp<IMemory> &data); + + // In the video camera case calls skipFrameAndModifyTimeStamp() to modify + // timestamp and set mSkipCurrentFrame. + // Then it calls the base CameraSource::dataCallbackTimestamp() + virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType, + const sp<IMemory> &data); + + // When video camera is used for time lapse capture, returns true + // until enough time has passed for the next time lapse frame. When + // the frame needs to be encoded, it returns false and also modifies + // the time stamp to be one frame time ahead of the last encoded + // frame's time stamp. + bool skipFrameAndModifyTimeStamp(int64_t *timestampUs); + + // Wrapper to enter threadTimeLapseEntry() + static void *ThreadTimeLapseWrapper(void *me); + + // Runs a loop which sleeps until a still picture is required + // and then calls mCamera->takePicture() to take the still picture. + // Used only in the case mUseStillCameraForTimeLapse = true. + void threadTimeLapseEntry(); + + // Wrapper to enter threadStartPreview() + static void *ThreadStartPreviewWrapper(void *me); + + // Starts the camera's preview. + void threadStartPreview(); + + // Starts thread ThreadStartPreviewWrapper() for restarting preview. + // Needs to be done in a thread so that dataCallback() which calls this function + // can return, and the camera can know that takePicture() is done. + void restartPreview(); + + // Creates a copy of source_data into a new memory of final type MemoryBase. + sp<IMemory> createIMemoryCopy(const sp<IMemory> &source_data); + + CameraSourceTimeLapse(const CameraSourceTimeLapse &); + CameraSourceTimeLapse &operator=(const CameraSourceTimeLapse &); +}; + +} // namespace android + +#endif // CAMERA_SOURCE_TIME_LAPSE_H_ diff --git a/libs/rs/rsType.cpp b/libs/rs/rsType.cpp index 52e0d52efa40..79cfd412c145 100644 --- a/libs/rs/rsType.cpp +++ b/libs/rs/rsType.cpp @@ -145,6 +145,10 @@ uint32_t Type::getLODOffset(uint32_t lod, uint32_t x, uint32_t y, uint32_t z) co void Type::makeGLComponents() { + if(getElement()->getFieldCount() >= RS_MAX_ATTRIBS) { + return; + } + uint32_t userNum = 0; for (uint32_t ct=0; ct < getElement()->getFieldCount(); ct++) { diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c index edd6184ff63d..0be280c2324e 100644 --- a/media/libeffects/factory/EffectsFactory.c +++ b/media/libeffects/factory/EffectsFactory.c @@ -31,7 +31,7 @@ static list_elem_t *gCurLib; // current library in enumeration process static list_elem_t *gCurEffect; // current effect in enumeration process static uint32_t gCurEffectIdx; // current effect index in enumeration process -static const char * const gEffectLibPath = "/system/lib/soundfx"; // path to built-in effect libraries +const char * const gEffectLibPath = "/system/lib/soundfx"; // path to built-in effect libraries static int gInitDone; // true is global initialization has been preformed static int gNextLibId; // used by loadLibrary() to allocate unique library handles static int gCanQueryEffect; // indicates that call to EffectQueryEffect() is valid, i.e. that the list of effects diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp index 3bbcf55a0a34..9e39e79006c8 100644 --- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp +++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp @@ -18,7 +18,7 @@ #define LOG_TAG "Bundle" #define ARRAY_SIZE(array) (sizeof array / sizeof array[0]) #define LVM_BUNDLE // Include all the bundle code -#define LOG_NDEBUG 0 +//#define LOG_NDEBUG 0 #include <cutils/log.h> #include <assert.h> diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index 372a92774ff7..9c2a8ba3ab3c 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -590,18 +590,22 @@ audio_io_handle_t AudioSystem::getOutput(stream_type stream, return output; } -status_t AudioSystem::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) +status_t AudioSystem::startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; - return aps->startOutput(output, stream); + return aps->startOutput(output, stream, session); } -status_t AudioSystem::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) +status_t AudioSystem::stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; - return aps->stopOutput(output, stream); + return aps->stopOutput(output, stream, session); } void AudioSystem::releaseOutput(audio_io_handle_t output) @@ -666,6 +670,38 @@ status_t AudioSystem::getStreamVolumeIndex(stream_type stream, int *index) return aps->getStreamVolumeIndex(stream, index); } +uint32_t AudioSystem::getStrategyForStream(AudioSystem::stream_type stream) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return 0; + return aps->getStrategyForStream(stream); +} + +audio_io_handle_t AudioSystem::getOutputForEffect(effect_descriptor_t *desc) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return PERMISSION_DENIED; + return aps->getOutputForEffect(desc); +} + +status_t AudioSystem::registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return PERMISSION_DENIED; + return aps->registerEffect(desc, output, strategy, session, id); +} + +status_t AudioSystem::unregisterEffect(int id) +{ + const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); + if (aps == 0) return PERMISSION_DENIED; + return aps->unregisterEffect(id); +} + // --------------------------------------------------------------------------- void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) { diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp index 7d6a5d347666..3a89e250d793 100644 --- a/media/libmedia/IAudioFlinger.cpp +++ b/media/libmedia/IAudioFlinger.cpp @@ -69,7 +69,8 @@ enum { QUERY_NUM_EFFECTS, QUERY_EFFECT, GET_EFFECT_DESCRIPTOR, - CREATE_EFFECT + CREATE_EFFECT, + MOVE_EFFECTS }; class BpAudioFlinger : public BpInterface<IAudioFlinger> @@ -676,6 +677,17 @@ public: return effect; } + + virtual status_t moveEffects(int session, int srcOutput, int dstOutput) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); + data.writeInt32(session); + data.writeInt32(srcOutput); + data.writeInt32(dstOutput); + remote()->transact(MOVE_EFFECTS, data, &reply); + return reply.readInt32(); + } }; IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger"); @@ -1024,6 +1036,14 @@ status_t BnAudioFlinger::onTransact( reply->write(&desc, sizeof(effect_descriptor_t)); return NO_ERROR; } break; + case MOVE_EFFECTS: { + CHECK_INTERFACE(IAudioFlinger, data, reply); + int session = data.readInt32(); + int srcOutput = data.readInt32(); + int dstOutput = data.readInt32(); + reply->writeInt32(moveEffects(session, srcOutput, dstOutput)); + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp index 18dd173bfc64..950c2131824f 100644 --- a/media/libmedia/IAudioPolicyService.cpp +++ b/media/libmedia/IAudioPolicyService.cpp @@ -44,7 +44,11 @@ enum { RELEASE_INPUT, INIT_STREAM_VOLUME, SET_STREAM_VOLUME, - GET_STREAM_VOLUME + GET_STREAM_VOLUME, + GET_STRATEGY_FOR_STREAM, + GET_OUTPUT_FOR_EFFECT, + REGISTER_EFFECT, + UNREGISTER_EFFECT }; class BpAudioPolicyService : public BpInterface<IAudioPolicyService> @@ -137,22 +141,28 @@ public: return static_cast <audio_io_handle_t> (reply.readInt32()); } - virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) + virtual status_t startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(output); data.writeInt32(stream); + data.writeInt32(session); remote()->transact(START_OUTPUT, data, &reply); return static_cast <status_t> (reply.readInt32()); } - virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) + virtual status_t stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(output); data.writeInt32(stream); + data.writeInt32(session); remote()->transact(STOP_OUTPUT, data, &reply); return static_cast <status_t> (reply.readInt32()); } @@ -242,6 +252,51 @@ public: if (index) *index = lIndex; return static_cast <status_t> (reply.readInt32()); } + + virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + data.writeInt32(static_cast <uint32_t>(stream)); + remote()->transact(GET_STRATEGY_FOR_STREAM, data, &reply); + return reply.readInt32(); + } + + virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + data.write(desc, sizeof(effect_descriptor_t)); + remote()->transact(GET_OUTPUT_FOR_EFFECT, data, &reply); + return static_cast <audio_io_handle_t> (reply.readInt32()); + } + + virtual status_t registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + data.write(desc, sizeof(effect_descriptor_t)); + data.writeInt32(output); + data.writeInt32(strategy); + data.writeInt32(session); + data.writeInt32(id); + remote()->transact(REGISTER_EFFECT, data, &reply); + return static_cast <status_t> (reply.readInt32()); + } + + virtual status_t unregisterEffect(int id) + { + Parcel data, reply; + data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); + data.writeInt32(id); + remote()->transact(UNREGISTER_EFFECT, data, &reply); + return static_cast <status_t> (reply.readInt32()); + } + }; IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService"); @@ -255,18 +310,24 @@ status_t BnAudioPolicyService::onTransact( switch(code) { case SET_DEVICE_CONNECTION_STATE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32()); - AudioSystem::device_connection_state state = static_cast <AudioSystem::device_connection_state>(data.readInt32()); + AudioSystem::audio_devices device = + static_cast <AudioSystem::audio_devices>(data.readInt32()); + AudioSystem::device_connection_state state = + static_cast <AudioSystem::device_connection_state>(data.readInt32()); const char *device_address = data.readCString(); - reply->writeInt32(static_cast <uint32_t>(setDeviceConnectionState(device, state, device_address))); + reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device, + state, + device_address))); return NO_ERROR; } break; case GET_DEVICE_CONNECTION_STATE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32()); + AudioSystem::audio_devices device = + static_cast<AudioSystem::audio_devices> (data.readInt32()); const char *device_address = data.readCString(); - reply->writeInt32(static_cast <uint32_t>(getDeviceConnectionState(device, device_address))); + reply->writeInt32(static_cast<uint32_t> (getDeviceConnectionState(device, + device_address))); return NO_ERROR; } break; @@ -287,7 +348,8 @@ status_t BnAudioPolicyService::onTransact( case SET_FORCE_USE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32()); - AudioSystem::forced_config config = static_cast <AudioSystem::forced_config>(data.readInt32()); + AudioSystem::forced_config config = + static_cast <AudioSystem::forced_config>(data.readInt32()); reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config))); return NO_ERROR; } break; @@ -301,11 +363,13 @@ status_t BnAudioPolicyService::onTransact( case GET_OUTPUT: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32()); + AudioSystem::stream_type stream = + static_cast <AudioSystem::stream_type>(data.readInt32()); uint32_t samplingRate = data.readInt32(); uint32_t format = data.readInt32(); uint32_t channels = data.readInt32(); - AudioSystem::output_flags flags = static_cast <AudioSystem::output_flags>(data.readInt32()); + AudioSystem::output_flags flags = + static_cast <AudioSystem::output_flags>(data.readInt32()); audio_io_handle_t output = getOutput(stream, samplingRate, @@ -320,7 +384,10 @@ status_t BnAudioPolicyService::onTransact( CHECK_INTERFACE(IAudioPolicyService, data, reply); audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32()); uint32_t stream = data.readInt32(); - reply->writeInt32(static_cast <uint32_t>(startOutput(output, (AudioSystem::stream_type)stream))); + int session = data.readInt32(); + reply->writeInt32(static_cast <uint32_t>(startOutput(output, + (AudioSystem::stream_type)stream, + session))); return NO_ERROR; } break; @@ -328,7 +395,10 @@ status_t BnAudioPolicyService::onTransact( CHECK_INTERFACE(IAudioPolicyService, data, reply); audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32()); uint32_t stream = data.readInt32(); - reply->writeInt32(static_cast <uint32_t>(stopOutput(output, (AudioSystem::stream_type)stream))); + int session = data.readInt32(); + reply->writeInt32(static_cast <uint32_t>(stopOutput(output, + (AudioSystem::stream_type)stream, + session))); return NO_ERROR; } break; @@ -345,7 +415,8 @@ status_t BnAudioPolicyService::onTransact( uint32_t samplingRate = data.readInt32(); uint32_t format = data.readInt32(); uint32_t channels = data.readInt32(); - AudioSystem::audio_in_acoustics acoustics = static_cast <AudioSystem::audio_in_acoustics>(data.readInt32()); + AudioSystem::audio_in_acoustics acoustics = + static_cast <AudioSystem::audio_in_acoustics>(data.readInt32()); audio_io_handle_t input = getInput(inputSource, samplingRate, format, @@ -378,7 +449,8 @@ status_t BnAudioPolicyService::onTransact( case INIT_STREAM_VOLUME: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32()); + AudioSystem::stream_type stream = + static_cast <AudioSystem::stream_type>(data.readInt32()); int indexMin = data.readInt32(); int indexMax = data.readInt32(); reply->writeInt32(static_cast <uint32_t>(initStreamVolume(stream, indexMin,indexMax))); @@ -387,7 +459,8 @@ status_t BnAudioPolicyService::onTransact( case SET_STREAM_VOLUME: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32()); + AudioSystem::stream_type stream = + static_cast <AudioSystem::stream_type>(data.readInt32()); int index = data.readInt32(); reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index))); return NO_ERROR; @@ -395,7 +468,8 @@ status_t BnAudioPolicyService::onTransact( case GET_STREAM_VOLUME: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32()); + AudioSystem::stream_type stream = + static_cast <AudioSystem::stream_type>(data.readInt32()); int index; status_t status = getStreamVolumeIndex(stream, &index); reply->writeInt32(index); @@ -403,6 +477,46 @@ status_t BnAudioPolicyService::onTransact( return NO_ERROR; } break; + case GET_STRATEGY_FOR_STREAM: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + AudioSystem::stream_type stream = + static_cast <AudioSystem::stream_type>(data.readInt32()); + reply->writeInt32(getStrategyForStream(stream)); + return NO_ERROR; + } break; + + case GET_OUTPUT_FOR_EFFECT: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + effect_descriptor_t desc; + data.read(&desc, sizeof(effect_descriptor_t)); + audio_io_handle_t output = getOutputForEffect(&desc); + reply->writeInt32(static_cast <int>(output)); + return NO_ERROR; + } break; + + case REGISTER_EFFECT: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + effect_descriptor_t desc; + data.read(&desc, sizeof(effect_descriptor_t)); + audio_io_handle_t output = data.readInt32(); + uint32_t strategy = data.readInt32(); + int session = data.readInt32(); + int id = data.readInt32(); + reply->writeInt32(static_cast <int32_t>(registerEffect(&desc, + output, + strategy, + session, + id))); + return NO_ERROR; + } break; + + case UNREGISTER_EFFECT: { + CHECK_INTERFACE(IAudioPolicyService, data, reply); + int id = data.readInt32(); + reply->writeInt32(static_cast <int32_t>(unregisterEffect(id))); + return NO_ERROR; + } break; + default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index 24b0e7b77971..c6f1a676ee63 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -24,6 +24,7 @@ #include <media/stagefright/AudioSource.h> #include <media/stagefright/AMRWriter.h> #include <media/stagefright/CameraSource.h> +#include <media/stagefright/CameraSourceTimeLapse.h> #include <media/stagefright/MPEG4Writer.h> #include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> @@ -895,11 +896,10 @@ status_t StagefrightRecorder::setupVideoEncoder(const sp<MediaWriter>& writer) { status_t err = setupCameraSource(); if (err != OK) return err; - sp<CameraSource> cameraSource = CameraSource::CreateFromCamera(mCamera); + sp<CameraSource> cameraSource = (mCaptureTimeLapse) ? + CameraSourceTimeLapse::CreateFromCamera(mCamera, true, 3E6, mFrameRate): + CameraSource::CreateFromCamera(mCamera); CHECK(cameraSource != NULL); - if(mCaptureTimeLapse) { - cameraSource->enableTimeLapseMode(1E6, mFrameRate); - } sp<MetaData> enc_meta = new MetaData; enc_meta->setInt32(kKeyBitRate, mVideoBitRate); @@ -949,9 +949,11 @@ status_t StagefrightRecorder::setupVideoEncoder(const sp<MediaWriter>& writer) { OMXClient client; CHECK_EQ(client.connect(), OK); + uint32_t encoder_flags = (mCaptureTimeLapse) ? OMXCodec::kPreferSoftwareCodecs : 0; sp<MediaSource> encoder = OMXCodec::Create( client.interface(), enc_meta, - true /* createEncoder */, cameraSource); + true /* createEncoder */, cameraSource, + NULL, encoder_flags); if (encoder == NULL) { return UNKNOWN_ERROR; } diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk index 89bfc1fd92b9..bf5643d00e86 100644 --- a/media/libstagefright/Android.mk +++ b/media/libstagefright/Android.mk @@ -10,6 +10,7 @@ LOCAL_SRC_FILES:= \ AudioSource.cpp \ AwesomePlayer.cpp \ CameraSource.cpp \ + CameraSourceTimeLapse.cpp \ DataSource.cpp \ ESDS.cpp \ FileSource.cpp \ diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp index b79ba13a9ba3..b7bde6bdb097 100644 --- a/media/libstagefright/AudioPlayer.cpp +++ b/media/libstagefright/AudioPlayer.cpp @@ -41,6 +41,9 @@ AudioPlayer::AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink) mReachedEOS(false), mFinalStatus(OK), mStarted(false), + mIsFirstBuffer(false), + mFirstBufferResult(OK), + mFirstBuffer(NULL), mAudioSink(audioSink) { } @@ -68,6 +71,24 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) { } } + // We allow an optional INFO_FORMAT_CHANGED at the very beginning + // of playback, if there is one, getFormat below will retrieve the + // updated format, if there isn't, we'll stash away the valid buffer + // of data to be used on the first audio callback. + + CHECK(mFirstBuffer == NULL); + + mFirstBufferResult = mSource->read(&mFirstBuffer); + if (mFirstBufferResult == INFO_FORMAT_CHANGED) { + LOGV("INFO_FORMAT_CHANGED!!!"); + + CHECK(mFirstBuffer == NULL); + mFirstBufferResult = OK; + mIsFirstBuffer = false; + } else { + mIsFirstBuffer = true; + } + sp<MetaData> format = mSource->getFormat(); const char *mime; bool success = format->findCString(kKeyMIMEType, &mime); @@ -87,6 +108,11 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) { DEFAULT_AUDIOSINK_BUFFERCOUNT, &AudioPlayer::AudioSinkCallback, this); if (err != OK) { + if (mFirstBuffer != NULL) { + mFirstBuffer->release(); + mFirstBuffer = NULL; + } + if (!sourceAlreadyStarted) { mSource->stop(); } @@ -110,6 +136,11 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) { delete mAudioTrack; mAudioTrack = NULL; + if (mFirstBuffer != NULL) { + mFirstBuffer->release(); + mFirstBuffer = NULL; + } + if (!sourceAlreadyStarted) { mSource->stop(); } @@ -163,6 +194,12 @@ void AudioPlayer::stop() { // Make sure to release any buffer we hold onto so that the // source is able to stop(). + + if (mFirstBuffer != NULL) { + mFirstBuffer->release(); + mFirstBuffer = NULL; + } + if (mInputBuffer != NULL) { LOGV("AudioPlayer releasing input buffer."); @@ -247,6 +284,14 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) { Mutex::Autolock autoLock(mLock); if (mSeeking) { + if (mIsFirstBuffer) { + if (mFirstBuffer != NULL) { + mFirstBuffer->release(); + mFirstBuffer = NULL; + } + mIsFirstBuffer = false; + } + options.setSeekTo(mSeekTimeUs); if (mInputBuffer != NULL) { @@ -259,7 +304,17 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) { } if (mInputBuffer == NULL) { - status_t err = mSource->read(&mInputBuffer, &options); + status_t err; + + if (mIsFirstBuffer) { + mInputBuffer = mFirstBuffer; + mFirstBuffer = NULL; + err = mFirstBufferResult; + + mIsFirstBuffer = false; + } else { + err = mSource->read(&mInputBuffer, &options); + } CHECK((err == OK && mInputBuffer != NULL) || (err != OK && mInputBuffer == NULL)); diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index bb53d97b3b4f..aa0893c9637a 100644 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -65,6 +65,11 @@ void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) { void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr) { LOGV("postData(%d, ptr:%p, size:%d)", msgType, dataPtr->pointer(), dataPtr->size()); + + sp<CameraSource> source = mSource.promote(); + if (source.get() != NULL) { + source->dataCallback(msgType, dataPtr); + } } void CameraSourceListener::postDataTimestamp( @@ -116,33 +121,17 @@ CameraSource *CameraSource::CreateFromCamera(const sp<Camera> &camera) { return new CameraSource(camera); } -void CameraSource::enableTimeLapseMode( - int64_t timeBetweenTimeLapseFrameCaptureUs, int32_t videoFrameRate) { - LOGV("starting time lapse mode"); - mTimeBetweenTimeLapseFrameCaptureUs = timeBetweenTimeLapseFrameCaptureUs; - mTimeBetweenTimeLapseVideoFramesUs = (1E6/videoFrameRate); -} - -void CameraSource::disableTimeLapseMode() { - LOGV("stopping time lapse mode"); - mTimeBetweenTimeLapseFrameCaptureUs = -1; - mTimeBetweenTimeLapseVideoFramesUs = 0; -} - CameraSource::CameraSource(const sp<Camera> &camera) : mCamera(camera), - mFirstFrameTimeUs(0), - mLastFrameTimestampUs(0), mNumFramesReceived(0), + mLastFrameTimestampUs(0), + mStarted(false), + mFirstFrameTimeUs(0), mNumFramesEncoded(0), mNumFramesDropped(0), mNumGlitches(0), mGlitchDurationThresholdUs(200000), - mCollectStats(false), - mStarted(false), - mTimeBetweenTimeLapseFrameCaptureUs(-1), - mTimeBetweenTimeLapseVideoFramesUs(0), - mLastTimeLapseFrameRealTimestampUs(0) { + mCollectStats(false) { int64_t token = IPCThreadState::self()->clearCallingIdentity(); String8 s = mCamera->getParameters(); @@ -177,7 +166,6 @@ CameraSource::CameraSource(const sp<Camera> &camera) mMeta->setInt32(kKeyHeight, height); mMeta->setInt32(kKeyStride, stride); mMeta->setInt32(kKeySliceHeight, sliceHeight); - } CameraSource::~CameraSource() { @@ -186,6 +174,10 @@ CameraSource::~CameraSource() { } } +void CameraSource::startCameraRecording() { + CHECK_EQ(OK, mCamera->startRecording()); +} + status_t CameraSource::start(MetaData *meta) { CHECK(!mStarted); @@ -203,13 +195,17 @@ status_t CameraSource::start(MetaData *meta) { int64_t token = IPCThreadState::self()->clearCallingIdentity(); mCamera->setListener(new CameraSourceListener(this)); - CHECK_EQ(OK, mCamera->startRecording()); + startCameraRecording(); IPCThreadState::self()->restoreCallingIdentity(token); mStarted = true; return OK; } +void CameraSource::stopCameraRecording() { + mCamera->stopRecording(); +} + status_t CameraSource::stop() { LOGV("stop"); Mutex::Autolock autoLock(mLock); @@ -218,7 +214,7 @@ status_t CameraSource::stop() { int64_t token = IPCThreadState::self()->clearCallingIdentity(); mCamera->setListener(NULL); - mCamera->stopRecording(); + stopCameraRecording(); releaseQueuedFrames(); while (!mFramesBeingEncoded.empty()) { LOGI("Waiting for outstanding frames being encoded: %d", @@ -238,11 +234,15 @@ status_t CameraSource::stop() { return OK; } +void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) { + mCamera->releaseRecordingFrame(frame); +} + void CameraSource::releaseQueuedFrames() { List<sp<IMemory> >::iterator it; while (!mFramesReceived.empty()) { it = mFramesReceived.begin(); - mCamera->releaseRecordingFrame(*it); + releaseRecordingFrame(*it); mFramesReceived.erase(it); ++mNumFramesDropped; } @@ -254,7 +254,7 @@ sp<MetaData> CameraSource::getFormat() { void CameraSource::releaseOneRecordingFrame(const sp<IMemory>& frame) { int64_t token = IPCThreadState::self()->clearCallingIdentity(); - mCamera->releaseRecordingFrame(frame); + releaseRecordingFrame(frame); IPCThreadState::self()->restoreCallingIdentity(token); } @@ -263,7 +263,6 @@ void CameraSource::signalBufferReturned(MediaBuffer *buffer) { for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin(); it != mFramesBeingEncoded.end(); ++it) { if ((*it)->pointer() == buffer->data()) { - releaseOneRecordingFrame((*it)); mFramesBeingEncoded.erase(it); ++mNumFramesEncoded; @@ -332,33 +331,11 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs, ++mNumGlitches; } - // time lapse - if(mTimeBetweenTimeLapseFrameCaptureUs >= 0) { - if(mLastTimeLapseFrameRealTimestampUs == 0) { - // First time lapse frame. Initialize mLastTimeLapseFrameRealTimestampUs - // to current time (timestampUs) and save frame data. - LOGV("dataCallbackTimestamp timelapse: initial frame"); - - mLastTimeLapseFrameRealTimestampUs = timestampUs; - } else if (timestampUs < - (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) { - // Skip all frames from last encoded frame until - // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed. - // Tell the camera to release its recording frame and return. - LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame"); - - releaseOneRecordingFrame(data); - return; - } else { - // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time: - // - Reset mLastTimeLapseFrameRealTimestampUs to current time. - // - Artificially modify timestampUs to be one frame time (1/framerate) ahead - // of the last encoded frame's time stamp. - LOGV("dataCallbackTimestamp timelapse: got timelapse frame"); - - mLastTimeLapseFrameRealTimestampUs = timestampUs; - timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs; - } + // May need to skip frame or modify timestamp. Currently implemented + // by the subclass CameraSourceTimeLapse. + if(skipCurrentFrame(timestampUs)) { + releaseOneRecordingFrame(data); + return; } mLastFrameTimestampUs = timestampUs; diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp new file mode 100644 index 000000000000..30ed1436cc44 --- /dev/null +++ b/media/libstagefright/CameraSourceTimeLapse.cpp @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "CameraSourceTimeLapse" + +#include <binder/IPCThreadState.h> +#include <binder/MemoryBase.h> +#include <binder/MemoryHeapBase.h> +#include <media/stagefright/CameraSource.h> +#include <media/stagefright/CameraSourceTimeLapse.h> +#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/MetaData.h> +#include <camera/Camera.h> +#include <camera/CameraParameters.h> +#include <utils/String8.h> + +namespace android { + +// static +CameraSourceTimeLapse *CameraSourceTimeLapse::Create(bool useStillCameraForTimeLapse, + int64_t timeBetweenTimeLapseFrameCaptureUs, + int32_t videoFrameRate) { + sp<Camera> camera = Camera::connect(0); + + if (camera.get() == NULL) { + return NULL; + } + + return new CameraSourceTimeLapse(camera, useStillCameraForTimeLapse, + timeBetweenTimeLapseFrameCaptureUs, videoFrameRate); +} + +// static +CameraSourceTimeLapse *CameraSourceTimeLapse::CreateFromCamera(const sp<Camera> &camera, + bool useStillCameraForTimeLapse, + int64_t timeBetweenTimeLapseFrameCaptureUs, + int32_t videoFrameRate) { + if (camera.get() == NULL) { + return NULL; + } + + return new CameraSourceTimeLapse(camera, useStillCameraForTimeLapse, + timeBetweenTimeLapseFrameCaptureUs, videoFrameRate); +} + +CameraSourceTimeLapse::CameraSourceTimeLapse(const sp<Camera> &camera, + bool useStillCameraForTimeLapse, + int64_t timeBetweenTimeLapseFrameCaptureUs, + int32_t videoFrameRate) + : CameraSource(camera), + mUseStillCameraForTimeLapse(useStillCameraForTimeLapse), + mTimeBetweenTimeLapseFrameCaptureUs(timeBetweenTimeLapseFrameCaptureUs), + mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate), + mLastTimeLapseFrameRealTimestampUs(0), + mSkipCurrentFrame(false) { + + LOGV("starting time lapse mode"); + if(mUseStillCameraForTimeLapse) { + // Currently hardcoded the picture size. Will need to choose + // automatically or pass in from the app. + int32_t width, height; + width = 1024; + height = 768; + mMeta->setInt32(kKeyWidth, width); + mMeta->setInt32(kKeyHeight, height); + } +} + +CameraSourceTimeLapse::~CameraSourceTimeLapse() { +} + +// static +void *CameraSourceTimeLapse::ThreadTimeLapseWrapper(void *me) { + CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me); + source->threadTimeLapseEntry(); + return NULL; +} + +void CameraSourceTimeLapse::threadTimeLapseEntry() { + while(mStarted) { + if(mCameraIdle) { + LOGV("threadTimeLapseEntry: taking picture"); + CHECK_EQ(OK, mCamera->takePicture()); + mCameraIdle = false; + sleep(mTimeBetweenTimeLapseFrameCaptureUs/1E6); + } else { + LOGV("threadTimeLapseEntry: camera busy with old takePicture. Sleeping a little."); + sleep(.01); + } + } +} + +void CameraSourceTimeLapse::startCameraRecording() { + if(mUseStillCameraForTimeLapse) { + LOGV("start time lapse recording using still camera"); + + int32_t width; + int32_t height; + mMeta->findInt32(kKeyWidth, &width); + mMeta->findInt32(kKeyHeight, &height); + + int64_t token = IPCThreadState::self()->clearCallingIdentity(); + String8 s = mCamera->getParameters(); + IPCThreadState::self()->restoreCallingIdentity(token); + + CameraParameters params(s); + params.setPictureSize(width, height); + mCamera->setParameters(params.flatten()); + mCameraIdle = true; + + // create a thread which takes pictures in a loop + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + + pthread_create(&mThreadTimeLapse, &attr, ThreadTimeLapseWrapper, this); + pthread_attr_destroy(&attr); + } else { + LOGV("start time lapse recording using video camera"); + CHECK_EQ(OK, mCamera->startRecording()); + } +} + +void CameraSourceTimeLapse::stopCameraRecording() { + if(mUseStillCameraForTimeLapse) { + void *dummy; + pthread_join(mThreadTimeLapse, &dummy); + } else { + mCamera->stopRecording(); + } +} + +void CameraSourceTimeLapse::releaseRecordingFrame(const sp<IMemory>& frame) { + if(!mUseStillCameraForTimeLapse) { + mCamera->releaseRecordingFrame(frame); + } +} + +sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(const sp<IMemory> &source_data) { + size_t source_size = source_data->size(); + void* source_pointer = source_data->pointer(); + + sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(source_size); + sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, source_size); + memcpy(newMemory->pointer(), source_pointer, source_size); + return newMemory; +} + +// static +void *CameraSourceTimeLapse::ThreadStartPreviewWrapper(void *me) { + CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me); + source->threadStartPreview(); + return NULL; +} + +void CameraSourceTimeLapse::threadStartPreview() { + CHECK_EQ(OK, mCamera->startPreview()); + mCameraIdle = true; +} + +void CameraSourceTimeLapse::restartPreview() { + // Start this in a different thread, so that the dataCallback can return + LOGV("restartPreview"); + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + pthread_t threadPreview; + pthread_create(&threadPreview, &attr, ThreadStartPreviewWrapper, this); + pthread_attr_destroy(&attr); +} + +void CameraSourceTimeLapse::dataCallback(int32_t msgType, const sp<IMemory> &data) { + if(msgType == CAMERA_MSG_COMPRESSED_IMAGE) { + // takePicture will complete after this callback, so restart preview. + restartPreview(); + } + if(msgType != CAMERA_MSG_RAW_IMAGE) { + return; + } + + LOGV("dataCallback for timelapse still frame"); + CHECK_EQ(true, mUseStillCameraForTimeLapse); + + int64_t timestampUs; + if (mNumFramesReceived == 0) { + timestampUs = mStartTimeUs; + } else { + timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs; + } + sp<IMemory> dataCopy = createIMemoryCopy(data); + dataCallbackTimestamp(timestampUs, msgType, dataCopy); +} + +bool CameraSourceTimeLapse::skipCurrentFrame(int64_t timestampUs) { + if(mSkipCurrentFrame) { + mSkipCurrentFrame = false; + return true; + } else { + return false; + } +} + +bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) { + if(!mUseStillCameraForTimeLapse) { + if(mLastTimeLapseFrameRealTimestampUs == 0) { + // First time lapse frame. Initialize mLastTimeLapseFrameRealTimestampUs + // to current time (timestampUs) and save frame data. + LOGV("dataCallbackTimestamp timelapse: initial frame"); + + mLastTimeLapseFrameRealTimestampUs = *timestampUs; + } else if (*timestampUs < + (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) { + // Skip all frames from last encoded frame until + // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed. + // Tell the camera to release its recording frame and return. + LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame"); + return true; + } else { + // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time: + // - Reset mLastTimeLapseFrameRealTimestampUs to current time. + // - Artificially modify timestampUs to be one frame time (1/framerate) ahead + // of the last encoded frame's time stamp. + LOGV("dataCallbackTimestamp timelapse: got timelapse frame"); + + mLastTimeLapseFrameRealTimestampUs = *timestampUs; + *timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs; + } + } + return false; +} + +void CameraSourceTimeLapse::dataCallbackTimestamp(int64_t timestampUs, int32_t msgType, + const sp<IMemory> &data) { + if(!mUseStillCameraForTimeLapse) { + mSkipCurrentFrame = skipFrameAndModifyTimeStamp(×tampUs); + } + CameraSource::dataCallbackTimestamp(timestampUs, msgType, data); +} + +} // namespace android diff --git a/media/libstagefright/codecs/aacdec/AACDecoder.cpp b/media/libstagefright/codecs/aacdec/AACDecoder.cpp index f3b281f65187..8ae1135b704a 100644 --- a/media/libstagefright/codecs/aacdec/AACDecoder.cpp +++ b/media/libstagefright/codecs/aacdec/AACDecoder.cpp @@ -99,15 +99,6 @@ status_t AACDecoder::initCheck() { != MP4AUDEC_SUCCESS) { return ERROR_UNSUPPORTED; } - - // Check on the sampling rate to see whether it is changed. - int32_t sampleRate; - CHECK(mMeta->findInt32(kKeySampleRate, &sampleRate)); - if (mConfig->samplingRate != sampleRate) { - mMeta->setInt32(kKeySampleRate, mConfig->samplingRate); - LOGW("Sample rate was %d, but now is %d", - sampleRate, mConfig->samplingRate); - } } return OK; } @@ -215,6 +206,19 @@ status_t AACDecoder::read( Int decoderErr = PVMP4AudioDecodeFrame(mConfig, mDecoderBuf); + // Check on the sampling rate to see whether it is changed. + int32_t sampleRate; + CHECK(mMeta->findInt32(kKeySampleRate, &sampleRate)); + if (mConfig->samplingRate != sampleRate) { + mMeta->setInt32(kKeySampleRate, mConfig->samplingRate); + LOGW("Sample rate was %d, but now is %d", + sampleRate, mConfig->samplingRate); + buffer->release(); + mInputBuffer->release(); + mInputBuffer = NULL; + return INFO_FORMAT_CHANGED; + } + size_t numOutBytes = mConfig->frameLength * sizeof(int16_t) * mConfig->desiredChannels; if (mConfig->aacPlusUpsamplingFactor == 2) { diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk index cb1c0d3b57a9..7502f6e890ad 100644 --- a/media/mtp/Android.mk +++ b/media/mtp/Android.mk @@ -14,10 +14,10 @@ # limitations under the License. # -ifneq ($(TARGET_SIMULATOR),true) - LOCAL_PATH:= $(call my-dir) +ifneq ($(TARGET_SIMULATOR),true) + include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index b38a5c8d4877..b88e69d88580 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -63,6 +63,8 @@ // ---------------------------------------------------------------------------- +extern const char * const gEffectLibPath; + namespace android { static const char* kDeadlockedString = "AudioFlinger may be deadlocked\n"; @@ -127,8 +129,7 @@ static bool settingsAllowed() { AudioFlinger::AudioFlinger() : BnAudioFlinger(), - mAudioHardware(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1), - mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0) + mAudioHardware(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1) { mHardwareStatus = AUDIO_HW_IDLE; @@ -321,13 +322,19 @@ sp<IAudioTrack> AudioFlinger::createTrack( mClients.add(pid, client); } - // If no audio session id is provided, create one here - // TODO: enforce same stream type for all tracks in same audio session? - // TODO: prevent same audio session on different output threads LOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId); - if (sessionId != NULL && *sessionId != 0) { + if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) { + // prevent same audio session on different output threads + for (size_t i = 0; i < mPlaybackThreads.size(); i++) { + if (mPlaybackThreads.keyAt(i) != output && + mPlaybackThreads.valueAt(i)->hasAudioSession(*sessionId)) { + lStatus = BAD_VALUE; + goto Exit; + } + } lSessionId = *sessionId; } else { + // if no audio session id is provided, create one here lSessionId = nextUniqueId(); if (sessionId != NULL) { *sessionId = lSessionId; @@ -1141,6 +1148,23 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTra { // scope for mLock Mutex::Autolock _l(mLock); + + // all tracks in same audio session must share the same routing strategy otherwise + // conflicts will happen when tracks are moved from one output to another by audio policy + // manager + uint32_t strategy = + AudioSystem::getStrategyForStream((AudioSystem::stream_type)streamType); + for (size_t i = 0; i < mTracks.size(); ++i) { + sp<Track> t = mTracks[i]; + if (t != 0) { + if (sessionId == t->sessionId() && + strategy != AudioSystem::getStrategyForStream((AudioSystem::stream_type)t->type())) { + lStatus = BAD_VALUE; + goto Exit; + } + } + } + track = new Track(this, client, streamType, sampleRate, format, channelCount, frameCount, sharedBuffer, sessionId); if (track->getCblk() == NULL || track->name() < 0) { @@ -1153,6 +1177,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTra if (chain != 0) { LOGV("createTrack_l() setting main buffer %p", chain->inBuffer()); track->setMainBuffer(chain->inBuffer()); + chain->setStrategy(AudioSystem::getStrategyForStream((AudioSystem::stream_type)track->type())); } } lStatus = NO_ERROR; @@ -1344,7 +1369,16 @@ void AudioFlinger::PlaybackThread::readOutputParameters() mMixBuffer = new int16_t[mFrameCount * 2]; memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t)); - //TODO handle effects reconfig + // force reconfiguration of effect chains and engines to take new buffer size and audio + // parameters into account + // Note that mLock is not held when readOutputParameters() is called from the constructor + // but in this case nothing is done below as no audio sessions have effect yet so it doesn't + // matter. + // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains + Vector< sp<EffectChain> > effectChains = mEffectChains; + for (size_t i = 0; i < effectChains.size(); i ++) { + mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this); + } } status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames) @@ -1369,7 +1403,8 @@ bool AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) for (size_t i = 0; i < mTracks.size(); ++i) { sp<Track> track = mTracks[i]; - if (sessionId == track->sessionId()) { + if (sessionId == track->sessionId() && + !(track->mCblk->flags & CBLK_INVALID_MSK)) { return true; } } @@ -1377,6 +1412,23 @@ bool AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) return false; } +uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId) +{ + // session AudioSystem::SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that + // it is moved to correct output by audio policy manager when A2DP is connected or disconnected + if (sessionId == AudioSystem::SESSION_OUTPUT_MIX) { + return AudioSystem::getStrategyForStream(AudioSystem::MUSIC); + } + for (size_t i = 0; i < mTracks.size(); i++) { + sp<Track> track = mTracks[i]; + if (sessionId == track->sessionId() && + !(track->mCblk->flags & CBLK_INVALID_MSK)) { + return AudioSystem::getStrategyForStream((AudioSystem::stream_type) track->type()); + } + } + return AudioSystem::getStrategyForStream(AudioSystem::MUSIC); +} + sp<AudioFlinger::EffectChain> AudioFlinger::PlaybackThread::getEffectChain(int sessionId) { Mutex::Autolock _l(mLock); @@ -1503,8 +1555,7 @@ bool AudioFlinger::MixerThread::threadLoop() // prevent any changes in effect chain list and in each effect chain // during mixing and effect process as the audio buffers could be deleted // or modified if an effect is created or deleted - lockEffectChains_l(); - effectChains = mEffectChains; + lockEffectChains_l(effectChains); } if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) { @@ -1540,7 +1591,7 @@ bool AudioFlinger::MixerThread::threadLoop() effectChains[i]->process_l(); } // enable changes in effect chain - unlockEffectChains(); + unlockEffectChains(effectChains); #ifdef LVMX int audioOutputType = LifeVibes::getMixerType(mId, mType); if (LifeVibes::audioOutputTypeIsLifeVibes(audioOutputType)) { @@ -1571,7 +1622,7 @@ bool AudioFlinger::MixerThread::threadLoop() mStandby = false; } else { // enable changes in effect chain - unlockEffectChains(); + unlockEffectChains(effectChains); usleep(sleepTime); } @@ -1625,7 +1676,7 @@ uint32_t AudioFlinger::MixerThread::prepareTracks_l(const SortedVector< wp<Track } #endif // Delegate master volume control to effect in output mix effect chain if needed - sp<EffectChain> chain = getEffectChain_l(0); + sp<EffectChain> chain = getEffectChain_l(AudioSystem::SESSION_OUTPUT_MIX); if (chain != 0) { uint32_t v = (uint32_t)(masterVolume * (1 << 24)); chain->setVolume_l(&v, &v); @@ -1814,8 +1865,10 @@ uint32_t AudioFlinger::MixerThread::prepareTracks_l(const SortedVector< wp<Track void AudioFlinger::MixerThread::invalidateTracks(int streamType) { - LOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d", this, streamType, mTracks.size()); + LOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d", + this, streamType, mTracks.size()); Mutex::Autolock _l(mLock); + size_t size = mTracks.size(); for (size_t i = 0; i < size; i++) { sp<Track> t = mTracks[i]; @@ -2070,7 +2123,6 @@ bool AudioFlinger::DirectOutputThread::threadLoop() // hardware resources as soon as possible nsecs_t standbyDelay = microseconds(activeSleepTime*2); - while (!exitPending()) { bool rampVolume; @@ -2246,7 +2298,8 @@ bool AudioFlinger::DirectOutputThread::threadLoop() if (UNLIKELY(trackToRemove != 0)) { mActiveTracks.remove(trackToRemove); if (!effectChains.isEmpty()) { - LOGV("stopping track on chain %p for session Id: %d", effectChains[0].get(), trackToRemove->sessionId()); + LOGV("stopping track on chain %p for session Id: %d", effectChains[0].get(), + trackToRemove->sessionId()); effectChains[0]->stopTrack(); } if (trackToRemove->isTerminated()) { @@ -2255,7 +2308,7 @@ bool AudioFlinger::DirectOutputThread::threadLoop() } } - lockEffectChains_l(); + lockEffectChains_l(effectChains); } if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) { @@ -2301,7 +2354,7 @@ bool AudioFlinger::DirectOutputThread::threadLoop() for (size_t i = 0; i < effectChains.size(); i ++) { effectChains[i]->process_l(); } - unlockEffectChains(); + unlockEffectChains(effectChains); mLastWriteTime = systemTime(); mInWrite = true; @@ -2312,7 +2365,7 @@ bool AudioFlinger::DirectOutputThread::threadLoop() mInWrite = false; mStandby = false; } else { - unlockEffectChains(); + unlockEffectChains(effectChains); usleep(sleepTime); } @@ -2505,8 +2558,7 @@ bool AudioFlinger::DuplicatingThread::threadLoop() // prevent any changes in effect chain list and in each effect chain // during mixing and effect process as the audio buffers could be deleted // or modified if an effect is created or deleted - lockEffectChains_l(); - effectChains = mEffectChains; + lockEffectChains_l(effectChains); } if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) { @@ -2547,7 +2599,7 @@ bool AudioFlinger::DuplicatingThread::threadLoop() effectChains[i]->process_l(); } // enable changes in effect chain - unlockEffectChains(); + unlockEffectChains(effectChains); standbyTime = systemTime() + kStandbyTimeInNsecs; for (size_t i = 0; i < outputTracks.size(); i++) { @@ -2557,7 +2609,7 @@ bool AudioFlinger::DuplicatingThread::threadLoop() mBytesWritten += mixBufferSize; } else { // enable changes in effect chain - unlockEffectChains(); + unlockEffectChains(effectChains); usleep(sleepTime); } @@ -2859,7 +2911,9 @@ void AudioFlinger::PlaybackThread::Track::destroy() if (thread != 0) { if (!isOutputTrack()) { if (mState == ACTIVE || mState == RESUMING) { - AudioSystem::stopOutput(thread->id(), (AudioSystem::stream_type)mStreamType); + AudioSystem::stopOutput(thread->id(), + (AudioSystem::stream_type)mStreamType, + mSessionId); } AudioSystem::releaseOutput(thread->id()); } @@ -2966,7 +3020,9 @@ status_t AudioFlinger::PlaybackThread::Track::start() if (!isOutputTrack() && state != ACTIVE && state != RESUMING) { thread->mLock.unlock(); - status = AudioSystem::startOutput(thread->id(), (AudioSystem::stream_type)mStreamType); + status = AudioSystem::startOutput(thread->id(), + (AudioSystem::stream_type)mStreamType, + mSessionId); thread->mLock.lock(); } if (status == NO_ERROR) { @@ -2999,7 +3055,9 @@ void AudioFlinger::PlaybackThread::Track::stop() } if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) { thread->mLock.unlock(); - AudioSystem::stopOutput(thread->id(), (AudioSystem::stream_type)mStreamType); + AudioSystem::stopOutput(thread->id(), + (AudioSystem::stream_type)mStreamType, + mSessionId); thread->mLock.lock(); } } @@ -3016,7 +3074,9 @@ void AudioFlinger::PlaybackThread::Track::pause() LOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get()); if (!isOutputTrack()) { thread->mLock.unlock(); - AudioSystem::stopOutput(thread->id(), (AudioSystem::stream_type)mStreamType); + AudioSystem::stopOutput(thread->id(), + (AudioSystem::stream_type)mStreamType, + mSessionId); thread->mLock.lock(); } } @@ -3585,7 +3645,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( } // If no audio session id is provided, create one here - if (sessionId != NULL && *sessionId != 0) { + if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) { lSessionId = *sessionId; } else { lSessionId = nextUniqueId(); @@ -4416,8 +4476,8 @@ status_t AudioFlinger::setStreamOutput(uint32_t stream, int output) thread->type() != PlaybackThread::DIRECT) { MixerThread *srcThread = (MixerThread *)thread; srcThread->invalidateTracks(stream); - } } + } return NO_ERROR; } @@ -4472,12 +4532,26 @@ int AudioFlinger::nextUniqueId() status_t AudioFlinger::loadEffectLibrary(const char *libPath, int *handle) { + // check calling permissions + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + // only allow libraries loaded from /system/lib/soundfx for now + if (strncmp(gEffectLibPath, libPath, strlen(gEffectLibPath)) != 0) { + return PERMISSION_DENIED; + } + Mutex::Autolock _l(mLock); return EffectLoadLibrary(libPath, handle); } status_t AudioFlinger::unloadEffectLibrary(int handle) { + // check calling permissions + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + Mutex::Autolock _l(mLock); return EffectUnloadLibrary(handle); } @@ -4522,7 +4596,8 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, sp<Client> client; wp<Client> wclient; - LOGV("createEffect pid %d, client %p, priority %d, sessionId %d, output %d", pid, effectClient.get(), priority, sessionId, output); + LOGV("createEffect pid %d, client %p, priority %d, sessionId %d, output %d", + pid, effectClient.get(), priority, sessionId, output); if (pDesc == NULL) { lStatus = BAD_VALUE; @@ -4577,7 +4652,7 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, // an auxiliary version of this effect type is available found = true; memcpy(&d, &desc, sizeof(effect_descriptor_t)); - if (sessionId != 0 || + if (sessionId != AudioSystem::SESSION_OUTPUT_MIX || (desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { break; } @@ -4590,22 +4665,23 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, } // For same effect type, chose auxiliary version over insert version if // connect to output mix (Compliance to OpenSL ES) - if (sessionId == 0 && + if (sessionId == AudioSystem::SESSION_OUTPUT_MIX && (d.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) { memcpy(&desc, &d, sizeof(effect_descriptor_t)); } } // Do not allow auxiliary effects on a session different from 0 (output mix) - if (sessionId != 0 && + if (sessionId != AudioSystem::SESSION_OUTPUT_MIX && (desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { lStatus = INVALID_OPERATION; goto Exit; } - // Session -1 is reserved for output stage effects that can only be created - // by audio policy manager (running in same process) - if (sessionId == -1 && getpid() != IPCThreadState::self()->getCallingPid()) { + // Session AudioSystem::SESSION_OUTPUT_STAGE is reserved for output stage effects + // that can only be created by audio policy manager (running in same process) + if (sessionId == AudioSystem::SESSION_OUTPUT_STAGE && + getpid() != IPCThreadState::self()->getCallingPid()) { lStatus = INVALID_OPERATION; goto Exit; } @@ -4617,13 +4693,14 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, // output threads. // TODO: allow attachment of effect to inputs if (output == 0) { - if (sessionId <= 0) { - // default to first output - // TODO: define criteria to choose output when not specified. Or - // receive output from audio policy manager - if (mPlaybackThreads.size() != 0) { - output = mPlaybackThreads.keyAt(0); - } + if (sessionId == AudioSystem::SESSION_OUTPUT_STAGE) { + // output must be specified by AudioPolicyManager when using session + // AudioSystem::SESSION_OUTPUT_STAGE + lStatus = BAD_VALUE; + goto Exit; + } else if (sessionId == AudioSystem::SESSION_OUTPUT_MIX) { + output = AudioSystem::getOutputForEffect(&desc); + LOGV("createEffect() got output %d for effect %s", output, desc.name); } else { // look for the thread where the specified audio session is present for (size_t i = 0; i < mPlaybackThreads.size(); i++) { @@ -4636,7 +4713,7 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, } PlaybackThread *thread = checkPlaybackThread_l(output); if (thread == NULL) { - LOGE("unknown output thread"); + LOGE("createEffect() unknown output thread"); lStatus = BAD_VALUE; goto Exit; } @@ -4651,7 +4728,8 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid, } // create effect on selected output trhead - handle = thread->createEffect_l(client, effectClient, priority, sessionId, &desc, enabled, &lStatus); + handle = thread->createEffect_l(client, effectClient, priority, sessionId, + &desc, enabled, &lStatus); if (handle != 0 && id != NULL) { *id = handle->id(); } @@ -4664,31 +4742,64 @@ Exit: return handle; } -status_t AudioFlinger::registerEffectResource_l(effect_descriptor_t *desc) { - if (mTotalEffectsCpuLoad + desc->cpuLoad > MAX_EFFECTS_CPU_LOAD) { - LOGW("registerEffectResource() CPU Load limit exceeded for Fx %s, CPU %f MIPS", - desc->name, (float)desc->cpuLoad/10); - return INVALID_OPERATION; +status_t AudioFlinger::moveEffects(int session, int srcOutput, int dstOutput) +{ + LOGV("moveEffects() session %d, srcOutput %d, dstOutput %d", + session, srcOutput, dstOutput); + Mutex::Autolock _l(mLock); + if (srcOutput == dstOutput) { + LOGW("moveEffects() same dst and src outputs %d", dstOutput); + return NO_ERROR; } - if (mTotalEffectsMemory + desc->memoryUsage > MAX_EFFECTS_MEMORY) { - LOGW("registerEffectResource() memory limit exceeded for Fx %s, Memory %d KB", - desc->name, desc->memoryUsage); - return INVALID_OPERATION; + PlaybackThread *srcThread = checkPlaybackThread_l(srcOutput); + if (srcThread == NULL) { + LOGW("moveEffects() bad srcOutput %d", srcOutput); + return BAD_VALUE; + } + PlaybackThread *dstThread = checkPlaybackThread_l(dstOutput); + if (dstThread == NULL) { + LOGW("moveEffects() bad dstOutput %d", dstOutput); + return BAD_VALUE; } - mTotalEffectsCpuLoad += desc->cpuLoad; - mTotalEffectsMemory += desc->memoryUsage; - LOGV("registerEffectResource_l() effect %s, CPU %d, memory %d", - desc->name, desc->cpuLoad, desc->memoryUsage); - LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory); + + Mutex::Autolock _dl(dstThread->mLock); + Mutex::Autolock _sl(srcThread->mLock); + moveEffectChain_l(session, srcThread, dstThread); + return NO_ERROR; } -void AudioFlinger::unregisterEffectResource_l(effect_descriptor_t *desc) { - mTotalEffectsCpuLoad -= desc->cpuLoad; - mTotalEffectsMemory -= desc->memoryUsage; - LOGV("unregisterEffectResource_l() effect %s, CPU %d, memory %d", - desc->name, desc->cpuLoad, desc->memoryUsage); - LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory); +// moveEffectChain_l mustbe called with both srcThread and dstThread mLocks held +status_t AudioFlinger::moveEffectChain_l(int session, + AudioFlinger::PlaybackThread *srcThread, + AudioFlinger::PlaybackThread *dstThread) +{ + LOGV("moveEffectChain_l() session %d from thread %p to thread %p", + session, srcThread, dstThread); + + sp<EffectChain> chain = srcThread->getEffectChain_l(session); + if (chain == 0) { + LOGW("moveEffectChain_l() effect chain for session %d not on source thread %p", + session, srcThread); + return INVALID_OPERATION; + } + + // remove chain first. This is usefull only if reconfiguring effect chain on same output thread, + // so that a new chain is created with correct parameters when first effect is added. This is + // otherwise unecessary as removeEffect_l() will remove the chain when last effect is + // removed. + srcThread->removeEffectChain_l(chain); + + // transfer all effects one by one so that new effect chain is created on new thread with + // correct buffer sizes and audio parameters and effect engines reconfigured accordingly + sp<EffectModule> effect = chain->getEffectFromId_l(0); + while (effect != 0) { + srcThread->removeEffect_l(effect); + dstThread->addEffect_l(effect); + effect = chain->getEffectFromId_l(0); + } + + return NO_ERROR; } // PlaybackThread::createEffect_l() must be called with AudioFlinger::mLock held @@ -4707,6 +4818,7 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::PlaybackThread::createEffect_l( status_t lStatus; sp<Track> track; sp<EffectChain> chain; + bool chainCreated = false; bool effectCreated = false; bool effectRegistered = false; @@ -4718,16 +4830,18 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::PlaybackThread::createEffect_l( // Do not allow auxiliary effect on session other than 0 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY && - sessionId != 0) { - LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", desc->name, sessionId); + sessionId != AudioSystem::SESSION_OUTPUT_MIX) { + LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", + desc->name, sessionId); lStatus = BAD_VALUE; goto Exit; } // Do not allow effects with session ID 0 on direct output or duplicating threads // TODO: add rule for hw accelerated effects on direct outputs with non PCM format - if (sessionId == 0 && mType != MIXER) { - LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", desc->name, sessionId); + if (sessionId == AudioSystem::SESSION_OUTPUT_MIX && mType != MIXER) { + LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", + desc->name, sessionId); lStatus = BAD_VALUE; goto Exit; } @@ -4744,6 +4858,8 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::PlaybackThread::createEffect_l( LOGV("createEffect_l() new effect chain for session %d", sessionId); chain = new EffectChain(this, sessionId); addEffectChain_l(chain); + chain->setStrategy(getStrategyForSession_l(sessionId)); + chainCreated = true; } else { effect = chain->getEffectFromDesc_l(desc); } @@ -4751,14 +4867,15 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::PlaybackThread::createEffect_l( LOGV("createEffect_l() got effect %p on chain %p", effect == 0 ? 0 : effect.get(), chain.get()); if (effect == 0) { + int id = mAudioFlinger->nextUniqueId(); // Check CPU and memory usage - lStatus = mAudioFlinger->registerEffectResource_l(desc); + lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id); if (lStatus != NO_ERROR) { goto Exit; } effectRegistered = true; // create a new effect module if none present in the chain - effect = new EffectModule(this, chain, desc, mAudioFlinger->nextUniqueId(), sessionId); + effect = new EffectModule(this, chain, desc, id, sessionId); lStatus = effect->status(); if (lStatus != NO_ERROR) { goto Exit; @@ -4782,14 +4899,15 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::PlaybackThread::createEffect_l( Exit: if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) { + Mutex::Autolock _l(mLock); if (effectCreated) { - Mutex::Autolock _l(mLock); - if (chain->removeEffect_l(effect) == 0) { - removeEffectChain_l(chain); - } + chain->removeEffect_l(effect); } if (effectRegistered) { - mAudioFlinger->unregisterEffectResource_l(desc); + AudioSystem::unregisterEffect(effect->id()); + } + if (chainCreated) { + removeEffectChain_l(chain); } handle.clear(); } @@ -4800,26 +4918,71 @@ Exit: return handle; } -void AudioFlinger::PlaybackThread::disconnectEffect(const sp< EffectModule>& effect, - const wp<EffectHandle>& handle) { +// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and +// PlaybackThread::mLock held +status_t AudioFlinger::PlaybackThread::addEffect_l(const sp<EffectModule>& effect) +{ + // check for existing effect chain with the requested audio session + int sessionId = effect->sessionId(); + sp<EffectChain> chain = getEffectChain_l(sessionId); + bool chainCreated = false; + + if (chain == 0) { + // create a new chain for this session + LOGV("addEffect_l() new effect chain for session %d", sessionId); + chain = new EffectChain(this, sessionId); + addEffectChain_l(chain); + chain->setStrategy(getStrategyForSession_l(sessionId)); + chainCreated = true; + } + LOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get()); + + if (chain->getEffectFromId_l(effect->id()) != 0) { + LOGW("addEffect_l() %p effect %s already present in chain %p", + this, effect->desc().name, chain.get()); + return BAD_VALUE; + } + + status_t status = chain->addEffect_l(effect); + if (status != NO_ERROR) { + if (chainCreated) { + removeEffectChain_l(chain); + } + return status; + } + + effect->setDevice(mDevice); + effect->setMode(mAudioFlinger->getMode()); + return NO_ERROR; +} + +void AudioFlinger::PlaybackThread::removeEffect_l(const sp<EffectModule>& effect) { + + LOGV("removeEffect_l() %p effect %p", this, effect.get()); effect_descriptor_t desc = effect->desc(); + if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { + detachAuxEffect_l(effect->id()); + } + + sp<EffectChain> chain = effect->chain().promote(); + if (chain != 0) { + // remove effect chain if removing last effect + if (chain->removeEffect_l(effect) == 0) { + removeEffectChain_l(chain); + } + } else { + LOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get()); + } +} + +void AudioFlinger::PlaybackThread::disconnectEffect(const sp<EffectModule>& effect, + const wp<EffectHandle>& handle) { Mutex::Autolock _l(mLock); + LOGV("disconnectEffect() %p effect %p", this, effect.get()); // delete the effect module if removing last handle on it if (effect->removeHandle(handle) == 0) { - if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { - detachAuxEffect_l(effect->id()); - } - sp<EffectChain> chain = effect->chain().promote(); - if (chain != 0) { - // remove effect chain if remove last effect - if (chain->removeEffect_l(effect) == 0) { - removeEffectChain_l(chain); - } - } - mLock.unlock(); - mAudioFlinger->mLock.lock(); - mAudioFlinger->unregisterEffectResource_l(&desc); - mAudioFlinger->mLock.unlock(); + removeEffect_l(effect); + AudioSystem::unregisterEffect(effect->id()); } } @@ -4863,13 +5026,16 @@ status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& c chain->setInBuffer(buffer, ownsBuffer); chain->setOutBuffer(mMixBuffer); - // Effect chain for session -1 is inserted at end of effect chains list - // in order to be processed last as it contains output stage effects - // Effect chain for session 0 is inserted before session -1 to be processed + // Effect chain for session AudioSystem::SESSION_OUTPUT_STAGE is inserted at end of effect + // chains list in order to be processed last as it contains output stage effects + // Effect chain for session AudioSystem::SESSION_OUTPUT_MIX is inserted before + // session AudioSystem::SESSION_OUTPUT_STAGE to be processed // after track specific effects and before output stage - // Effect chain for session other than 0 is inserted at beginning of effect - // chains list to be processed before output mix effects. Relative order between - // sessions other than 0 is not important + // It is therefore mandatory that AudioSystem::SESSION_OUTPUT_MIX == 0 and + // that AudioSystem::SESSION_OUTPUT_STAGE < AudioSystem::SESSION_OUTPUT_MIX + // Effect chain for other sessions are inserted at beginning of effect + // chains list to be processed before output mix effects. Relative order between other + // sessions is not important size_t size = mEffectChains.size(); size_t i = 0; for (i = 0; i < size; i++) { @@ -4896,26 +5062,30 @@ size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& track->setMainBuffer(mMixBuffer); } } + break; } } return mEffectChains.size(); } -void AudioFlinger::PlaybackThread::lockEffectChains_l() +void AudioFlinger::PlaybackThread::lockEffectChains_l( + Vector<sp <AudioFlinger::EffectChain> >& effectChains) { + effectChains = mEffectChains; for (size_t i = 0; i < mEffectChains.size(); i++) { mEffectChains[i]->lock(); } } -void AudioFlinger::PlaybackThread::unlockEffectChains() +void AudioFlinger::PlaybackThread::unlockEffectChains( + Vector<sp <AudioFlinger::EffectChain> >& effectChains) { - Mutex::Autolock _l(mLock); - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->unlock(); + for (size_t i = 0; i < effectChains.size(); i++) { + effectChains[i]->unlock(); } } + sp<AudioFlinger::EffectModule> AudioFlinger::PlaybackThread::getEffect_l(int sessionId, int effectId) { sp<EffectModule> effect; @@ -4927,21 +5097,23 @@ sp<AudioFlinger::EffectModule> AudioFlinger::PlaybackThread::getEffect_l(int ses return effect; } -status_t AudioFlinger::PlaybackThread::attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) +status_t AudioFlinger::PlaybackThread::attachAuxEffect( + const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) { Mutex::Autolock _l(mLock); return attachAuxEffect_l(track, EffectId); } -status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) +status_t AudioFlinger::PlaybackThread::attachAuxEffect_l( + const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId) { status_t status = NO_ERROR; if (EffectId == 0) { track->setAuxBuffer(0, NULL); } else { - // Auxiliary effects are always in audio session 0 - sp<EffectModule> effect = getEffect_l(0, EffectId); + // Auxiliary effects are always in audio session AudioSystem::SESSION_OUTPUT_MIX + sp<EffectModule> effect = getEffect_l(AudioSystem::SESSION_OUTPUT_MIX, EffectId); if (effect != 0) { if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer()); @@ -5137,7 +5309,7 @@ void AudioFlinger::EffectModule::process() if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { AudioMixer::ditherAndClamp(mConfig.inputCfg.buffer.s32, mConfig.inputCfg.buffer.s32, - mConfig.inputCfg.buffer.frameCount); + mConfig.inputCfg.buffer.frameCount/2); } // do the actual processing in the effect engine @@ -5214,7 +5386,8 @@ status_t AudioFlinger::EffectModule::configure() mConfig.outputCfg.bufferProvider.releaseBuffer = NULL; mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; // Insert effect: - // - in session 0 or -1, always overwrites output buffer: input buffer == output buffer + // - in session AudioSystem::SESSION_OUTPUT_MIX or AudioSystem::SESSION_OUTPUT_STAGE, + // always overwrites output buffer: input buffer == output buffer // - in other sessions: // last effect in the chain accumulates in output buffer: input buffer != output buffer // other effect: overwrites output buffer: input buffer == output buffer @@ -5231,6 +5404,9 @@ status_t AudioFlinger::EffectModule::configure() mConfig.inputCfg.buffer.frameCount = thread->frameCount(); mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount; + LOGV("configure() %p thread %p buffer %p framecount %d", + this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount); + status_t cmdStatus; int size = sizeof(int); status_t status = (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_CONFIGURE, sizeof(effect_config_t), &mConfig, &size, &cmdStatus); @@ -5753,7 +5929,7 @@ AudioFlinger::EffectChain::EffectChain(const wp<ThreadBase>& wThread, mVolumeCtrlIdx(-1), mLeftVolume(0), mRightVolume(0), mNewLeftVolume(0), mNewRightVolume(0) { - + mStrategy = AudioSystem::getStrategyForStream(AudioSystem::MUSIC); } AudioFlinger::EffectChain::~EffectChain() @@ -5786,7 +5962,8 @@ sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int size_t size = mEffects.size(); for (size_t i = 0; i < size; i++) { - if (mEffects[i]->id() == id) { + // by convention, return first effect if id provided is 0 (0 is never a valid id) + if (id == 0 || mEffects[i]->id() == id) { effect = mEffects[i]; break; } @@ -5816,21 +5993,24 @@ void AudioFlinger::EffectChain::process_l() } // addEffect_l() must be called with PlaybackThread::mLock held -status_t AudioFlinger::EffectChain::addEffect_l(sp<EffectModule>& effect) +status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect) { effect_descriptor_t desc = effect->desc(); uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK; Mutex::Autolock _l(mLock); + effect->setChain(this); + sp<ThreadBase> thread = mThread.promote(); + if (thread == 0) { + return NO_INIT; + } + effect->setThread(thread); if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { // Auxiliary effects are inserted at the beginning of mEffects vector as // they are processed first and accumulated in chain input buffer mEffects.insertAt(effect, 0); - sp<ThreadBase> thread = mThread.promote(); - if (thread == 0) { - return NO_INIT; - } + // the input buffer for auxiliary effect contains mono samples in // 32 bit format. This is to avoid saturation in AudoMixer // accumulation stage. Saturation is done in EffectModule::process() before diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h index 99816f93b65e..a8c9a92da919 100644 --- a/services/audioflinger/AudioFlinger.h +++ b/services/audioflinger/AudioFlinger.h @@ -168,8 +168,7 @@ public: int *id, int *enabled); - status_t registerEffectResource_l(effect_descriptor_t *desc); - void unregisterEffectResource_l(effect_descriptor_t *desc); + virtual status_t moveEffects(int session, int srcOutput, int dstOutput); enum hardware_call_state { AUDIO_HW_IDLE = 0, @@ -619,15 +618,22 @@ private: sp<EffectChain> getEffectChain_l(int sessionId); status_t addEffectChain_l(const sp<EffectChain>& chain); size_t removeEffectChain_l(const sp<EffectChain>& chain); - void lockEffectChains_l(); - void unlockEffectChains(); + void lockEffectChains_l(Vector<sp <EffectChain> >& effectChains); + void unlockEffectChains(Vector<sp <EffectChain> >& effectChains); sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId); void detachAuxEffect_l(int effectId); - status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId); - status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId); + status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, + int EffectId); + status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, + int EffectId); void setMode(uint32_t mode); + status_t addEffect_l(const sp< EffectModule>& effect); + void removeEffect_l(const sp< EffectModule>& effect); + + uint32_t getStrategyForSession_l(int sessionId); + struct stream_type_t { stream_type_t() : volume(1.0f), @@ -690,7 +696,10 @@ private: class MixerThread : public PlaybackThread { public: - MixerThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device); + MixerThread (const sp<AudioFlinger>& audioFlinger, + AudioStreamOut* output, + int id, + uint32_t device); virtual ~MixerThread(); // Thread virtuals @@ -701,7 +710,8 @@ private: virtual status_t dumpInternals(int fd, const Vector<String16>& args); protected: - uint32_t prepareTracks_l(const SortedVector< wp<Track> >& activeTracks, Vector< sp<Track> > *tracksToRemove); + uint32_t prepareTracks_l(const SortedVector< wp<Track> >& activeTracks, + Vector< sp<Track> > *tracksToRemove); virtual int getTrackName_l(); virtual void deleteTrackName_l(int name); virtual uint32_t activeSleepTimeUs(); @@ -764,6 +774,9 @@ private: void audioConfigChanged_l(int event, int ioHandle, void *param2); int nextUniqueId(); + status_t moveEffectChain_l(int session, + AudioFlinger::PlaybackThread *srcThread, + AudioFlinger::PlaybackThread *dstThread); friend class AudioBuffer; @@ -931,6 +944,9 @@ private: uint32_t status() { return mStatus; } + int sessionId() { + return mSessionId; + } status_t setEnabled(bool enabled); bool isEnabled(); @@ -938,6 +954,8 @@ private: int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; } void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; } int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; } + void setChain(const wp<EffectChain>& chain) { mChain = chain; } + void setThread(const wp<ThreadBase>& thread) { mThread = thread; } status_t addHandle(sp<EffectHandle>& handle); void disconnect(const wp<EffectHandle>& handle); @@ -1061,19 +1079,19 @@ private: mLock.unlock(); } - status_t addEffect_l(sp<EffectModule>& handle); + status_t addEffect_l(const sp<EffectModule>& handle); size_t removeEffect_l(const sp<EffectModule>& handle); int sessionId() { return mSessionId; } + sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor); sp<EffectModule> getEffectFromId_l(int id); bool setVolume_l(uint32_t *left, uint32_t *right); void setDevice_l(uint32_t device); void setMode_l(uint32_t mode); - void setInBuffer(int16_t *buffer, bool ownsBuffer = false) { mInBuffer = buffer; mOwnInBuffer = ownsBuffer; @@ -1092,6 +1110,10 @@ private: void stopTrack() {mActiveTrackCnt--;} int activeTracks() { return mActiveTrackCnt;} + uint32_t strategy() { return mStrategy; } + void setStrategy(uint32_t strategy) + { mStrategy = strategy; } + status_t dump(int fd, const Vector<String16>& args); protected: @@ -1112,7 +1134,7 @@ private: uint32_t mRightVolume; // previous volume on right channel uint32_t mNewLeftVolume; // new volume on left channel uint32_t mNewRightVolume; // new volume on right channel - + uint32_t mStrategy; // strategy for this effect chain }; friend class RecordThread; @@ -1142,12 +1164,6 @@ private: #endif uint32_t mMode; - // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units - static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000; - // Maximum memory allocated to audio effects in KB - static const uint32_t MAX_EFFECTS_MEMORY = 512; - uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects - uint32_t mTotalEffectsMemory; // current memory used by effects }; // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioPolicyManagerBase.cpp b/services/audioflinger/AudioPolicyManagerBase.cpp index 549d66118302..4614c8d551b6 100644 --- a/services/audioflinger/AudioPolicyManagerBase.cpp +++ b/services/audioflinger/AudioPolicyManagerBase.cpp @@ -538,9 +538,11 @@ audio_io_handle_t AudioPolicyManagerBase::getOutput(AudioSystem::stream_type str return output; } -status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) +status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { - LOGV("startOutput() output %d, stream %d", output, stream); + LOGV("startOutput() output %d, stream %d, session %d", output, stream, session); ssize_t index = mOutputs.indexOfKey(output); if (index < 0) { LOGW("startOutput() unknow output %d", output); @@ -574,9 +576,11 @@ status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output, AudioSyst return NO_ERROR; } -status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) +status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { - LOGV("stopOutput() output %d, stream %d", output, stream); + LOGV("stopOutput() output %d, stream %d, session %d", output, stream, session); ssize_t index = mOutputs.indexOfKey(output); if (index < 0) { LOGW("stopOutput() unknow output %d", output); @@ -602,8 +606,12 @@ status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output, AudioSyste setOutputDevice(output, getNewDevice(output)); #ifdef WITH_A2DP - if (mA2dpOutput != 0 && !a2dpUsedForSonification() && strategy == STRATEGY_SONIFICATION) { - setStrategyMute(STRATEGY_MEDIA, false, mA2dpOutput, mOutputs.valueFor(mHardwareOutput)->mLatency*2); + if (mA2dpOutput != 0 && !a2dpUsedForSonification() && + strategy == STRATEGY_SONIFICATION) { + setStrategyMute(STRATEGY_MEDIA, + false, + mA2dpOutput, + mOutputs.valueFor(mHardwareOutput)->mLatency*2); } #endif if (output != mHardwareOutput) { @@ -826,6 +834,85 @@ status_t AudioPolicyManagerBase::getStreamVolumeIndex(AudioSystem::stream_type s return NO_ERROR; } +audio_io_handle_t AudioPolicyManagerBase::getOutputForEffect(effect_descriptor_t *desc) +{ + LOGV("getOutputForEffect()"); + // apply simple rule where global effects are attached to the same output as MUSIC streams + return getOutput(AudioSystem::MUSIC); +} + +status_t AudioPolicyManagerBase::registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id) +{ + ssize_t index = mOutputs.indexOfKey(output); + if (index < 0) { + LOGW("registerEffect() unknown output %d", output); + return INVALID_OPERATION; + } + + if (mTotalEffectsCpuLoad + desc->cpuLoad > getMaxEffectsCpuLoad()) { + LOGW("registerEffect() CPU Load limit exceeded for Fx %s, CPU %f MIPS", + desc->name, (float)desc->cpuLoad/10); + return INVALID_OPERATION; + } + if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) { + LOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB", + desc->name, desc->memoryUsage); + return INVALID_OPERATION; + } + mTotalEffectsCpuLoad += desc->cpuLoad; + mTotalEffectsMemory += desc->memoryUsage; + LOGV("registerEffect() effect %s, output %d, strategy %d session %d id %d", + desc->name, output, strategy, session, id); + + LOGV("registerEffect() CPU %d, memory %d", desc->cpuLoad, desc->memoryUsage); + LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory); + + EffectDescriptor *pDesc = new EffectDescriptor(); + memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t)); + pDesc->mOutput = output; + pDesc->mStrategy = (routing_strategy)strategy; + pDesc->mSession = session; + mEffects.add(id, pDesc); + + return NO_ERROR; +} + +status_t AudioPolicyManagerBase::unregisterEffect(int id) +{ + ssize_t index = mEffects.indexOfKey(id); + if (index < 0) { + LOGW("unregisterEffect() unknown effect ID %d", id); + return INVALID_OPERATION; + } + + EffectDescriptor *pDesc = mEffects.valueAt(index); + + if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) { + LOGW("unregisterEffect() CPU load %d too high for total %d", + pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad); + pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad; + } + mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad; + if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) { + LOGW("unregisterEffect() memory %d too big for total %d", + pDesc->mDesc.memoryUsage, mTotalEffectsMemory); + pDesc->mDesc.memoryUsage = mTotalEffectsMemory; + } + mTotalEffectsMemory -= pDesc->mDesc.memoryUsage; + LOGV("unregisterEffect() effect %s, ID %d, CPU %d, memory %d", + pDesc->mDesc.name, id, pDesc->mDesc.cpuLoad, pDesc->mDesc.memoryUsage); + LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory); + + mEffects.removeItem(id); + delete pDesc; + + return NO_ERROR; +} + status_t AudioPolicyManagerBase::dump(int fd) { const size_t SIZE = 256; @@ -890,6 +977,19 @@ status_t AudioPolicyManagerBase::dump(int fd) write(fd, buffer, strlen(buffer)); } + snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n", + (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory); + write(fd, buffer, strlen(buffer)); + + snprintf(buffer, SIZE, "Registered effects:\n"); + write(fd, buffer, strlen(buffer)); + for (size_t i = 0; i < mEffects.size(); i++) { + snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i)); + write(fd, buffer, strlen(buffer)); + mEffects.valueAt(i)->dump(fd); + } + + return NO_ERROR; } @@ -903,7 +1003,7 @@ AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface *clien Thread(false), #endif //AUDIO_POLICY_TEST mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0), mMusicStopTime(0), mLimitRingtoneVolume(false), - mLastVoiceVolume(-1.0f) + mLastVoiceVolume(-1.0f), mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0) { mpClientInterface = clientInterface; @@ -939,6 +1039,7 @@ AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface *clien } else { addOutput(mHardwareOutput, outputDesc); setOutputDevice(mHardwareOutput, (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER, true); + //TODO: configure audio effect output stage here } updateDeviceForStrategy(); @@ -1153,6 +1254,9 @@ status_t AudioPolicyManagerBase::handleA2dpConnection(AudioSystem::audio_devices if (mA2dpOutput) { // add A2DP output descriptor addOutput(mA2dpOutput, outputDesc); + + //TODO: configure audio effect output stage here + // set initial stream volume for A2DP device applyStreamVolumes(mA2dpOutput, device); if (a2dpUsedForSonification()) { @@ -1270,6 +1374,7 @@ void AudioPolicyManagerBase::closeA2dpOutputs() AudioParameter param; param.add(String8("closing"), String8("true")); mpClientInterface->setParameters(mA2dpOutput, param.toString()); + mpClientInterface->closeOutput(mA2dpOutput); delete mOutputs.valueFor(mA2dpOutput); mOutputs.removeItem(mA2dpOutput); @@ -1283,48 +1388,54 @@ void AudioPolicyManagerBase::checkOutputForStrategy(routing_strategy strategy, u uint32_t curDevice = getDeviceForStrategy(strategy, false); bool a2dpWasUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(prevDevice & ~AudioSystem::DEVICE_OUT_SPEAKER)); bool a2dpIsUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(curDevice & ~AudioSystem::DEVICE_OUT_SPEAKER)); - AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput); - AudioOutputDescriptor *a2dpOutputDesc; + audio_io_handle_t srcOutput = 0; + audio_io_handle_t dstOutput = 0; if (a2dpWasUsed && !a2dpIsUsed) { bool dupUsed = a2dpUsedForSonification() && a2dpWasUsed && (AudioSystem::popCount(prevDevice) == 2); - + dstOutput = mHardwareOutput; if (dupUsed) { - LOGV("checkOutputForStrategy() moving strategy %d to duplicated", strategy); - a2dpOutputDesc = mOutputs.valueFor(mDuplicatedOutput); + LOGV("checkOutputForStrategy() moving strategy %d from duplicated", strategy); + srcOutput = mDuplicatedOutput; } else { - LOGV("checkOutputForStrategy() moving strategy %d to a2dp", strategy); - a2dpOutputDesc = mOutputs.valueFor(mA2dpOutput); + LOGV("checkOutputForStrategy() moving strategy %d from a2dp", strategy); + srcOutput = mA2dpOutput; } - for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) { - if (getStrategy((AudioSystem::stream_type)i) == strategy) { - mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, mHardwareOutput); - } - } // do not change newDevice if it was already set before this call by a previous call to // getNewDevice() or checkOutputForStrategy() for a strategy with higher priority - if (newDevice == 0 && hwOutputDesc->isUsedByStrategy(strategy)) { + if (newDevice == 0 && mOutputs.valueFor(mHardwareOutput)->isUsedByStrategy(strategy)) { newDevice = getDeviceForStrategy(strategy, false); } } if (a2dpIsUsed && !a2dpWasUsed) { bool dupUsed = a2dpUsedForSonification() && a2dpIsUsed && (AudioSystem::popCount(curDevice) == 2); - audio_io_handle_t a2dpOutput; - + srcOutput = mHardwareOutput; if (dupUsed) { - LOGV("checkOutputForStrategy() moving strategy %d from duplicated", strategy); - a2dpOutputDesc = mOutputs.valueFor(mDuplicatedOutput); - a2dpOutput = mDuplicatedOutput; + LOGV("checkOutputForStrategy() moving strategy %d to duplicated", strategy); + dstOutput = mDuplicatedOutput; } else { - LOGV("checkOutputForStrategy() moving strategy %d from a2dp", strategy); - a2dpOutputDesc = mOutputs.valueFor(mA2dpOutput); - a2dpOutput = mA2dpOutput; + LOGV("checkOutputForStrategy() moving strategy %d to a2dp", strategy); + dstOutput = mA2dpOutput; } + } + if (srcOutput != 0 && dstOutput != 0) { + // Move effects associated to this strategy from previous output to new output + for (size_t i = 0; i < mEffects.size(); i++) { + EffectDescriptor *desc = mEffects.valueAt(i); + if (desc->mSession != AudioSystem::SESSION_OUTPUT_STAGE && + desc->mStrategy == strategy && + desc->mOutput == srcOutput) { + LOGV("checkOutputForStrategy() moving effect %d to output %d", mEffects.keyAt(i), dstOutput); + mpClientInterface->moveEffects(desc->mSession, srcOutput, dstOutput); + desc->mOutput = dstOutput; + } + } + // Move tracks associated to this strategy from previous output to new output for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) { if (getStrategy((AudioSystem::stream_type)i) == strategy) { - mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, a2dpOutput); + mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, dstOutput); } } } @@ -1372,8 +1483,12 @@ uint32_t AudioPolicyManagerBase::getNewDevice(audio_io_handle_t output, bool fro return device; } -AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy(AudioSystem::stream_type stream) -{ +uint32_t AudioPolicyManagerBase::getStrategyForStream(AudioSystem::stream_type stream) { + return (uint32_t)getStrategy(stream); +} + +AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy( + AudioSystem::stream_type stream) { // stream to strategy mapping switch (stream) { case AudioSystem::VOICE_CALL: @@ -1846,6 +1961,16 @@ bool AudioPolicyManagerBase::needsDirectOuput(AudioSystem::stream_type stream, (format !=0 && !AudioSystem::isLinearPCM(format))); } +uint32_t AudioPolicyManagerBase::getMaxEffectsCpuLoad() +{ + return MAX_EFFECTS_CPU_LOAD; +} + +uint32_t AudioPolicyManagerBase::getMaxEffectsMemory() +{ + return MAX_EFFECTS_MEMORY; +} + // --- AudioOutputDescriptor class implementation AudioPolicyManagerBase::AudioOutputDescriptor::AudioOutputDescriptor() @@ -1979,5 +2104,27 @@ void AudioPolicyManagerBase::StreamDescriptor::dump(char* buffer, size_t size) mCanBeMuted); } +// --- EffectDescriptor class implementation + +status_t AudioPolicyManagerBase::EffectDescriptor::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " Output: %d\n", mOutput); + result.append(buffer); + snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy); + result.append(buffer); + snprintf(buffer, SIZE, " Session: %d\n", mSession); + result.append(buffer); + snprintf(buffer, SIZE, " Name: %s\n", mDesc.name); + result.append(buffer); + write(fd, result.string(), result.size()); + + return NO_ERROR; +} + + }; // namespace android diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp index bb3905c34c64..f24e08e4dc88 100644 --- a/services/audioflinger/AudioPolicyService.cpp +++ b/services/audioflinger/AudioPolicyService.cpp @@ -119,7 +119,8 @@ status_t AudioPolicyService::setDeviceConnectionState(AudioSystem::audio_devices if (!AudioSystem::isOutputDevice(device) && !AudioSystem::isInputDevice(device)) { return BAD_VALUE; } - if (state != AudioSystem::DEVICE_STATE_AVAILABLE && state != AudioSystem::DEVICE_STATE_UNAVAILABLE) { + if (state != AudioSystem::DEVICE_STATE_AVAILABLE && + state != AudioSystem::DEVICE_STATE_UNAVAILABLE) { return BAD_VALUE; } @@ -128,8 +129,9 @@ status_t AudioPolicyService::setDeviceConnectionState(AudioSystem::audio_devices return mpPolicyManager->setDeviceConnectionState(device, state, device_address); } -AudioSystem::device_connection_state AudioPolicyService::getDeviceConnectionState(AudioSystem::audio_devices device, - const char *device_address) +AudioSystem::device_connection_state AudioPolicyService::getDeviceConnectionState( + AudioSystem::audio_devices device, + const char *device_address) { if (mpPolicyManager == NULL) { return AudioSystem::DEVICE_STATE_UNAVAILABLE; @@ -175,7 +177,8 @@ status_t AudioPolicyService::setRingerMode(uint32_t mode, uint32_t mask) return NO_ERROR; } -status_t AudioPolicyService::setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config) +status_t AudioPolicyService::setForceUse(AudioSystem::force_use usage, + AudioSystem::forced_config config) { if (mpPolicyManager == NULL) { return NO_INIT; @@ -223,24 +226,28 @@ audio_io_handle_t AudioPolicyService::getOutput(AudioSystem::stream_type stream, return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags); } -status_t AudioPolicyService::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) +status_t AudioPolicyService::startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { if (mpPolicyManager == NULL) { return NO_INIT; } LOGV("startOutput() tid %d", gettid()); Mutex::Autolock _l(mLock); - return mpPolicyManager->startOutput(output, stream); + return mpPolicyManager->startOutput(output, stream, session); } -status_t AudioPolicyService::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) +status_t AudioPolicyService::stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session) { if (mpPolicyManager == NULL) { return NO_INIT; } LOGV("stopOutput() tid %d", gettid()); Mutex::Autolock _l(mLock); - return mpPolicyManager->stopOutput(output, stream); + return mpPolicyManager->stopOutput(output, stream, session); } void AudioPolicyService::releaseOutput(audio_io_handle_t output) @@ -339,8 +346,46 @@ status_t AudioPolicyService::getStreamVolumeIndex(AudioSystem::stream_type strea return mpPolicyManager->getStreamVolumeIndex(stream, index); } +uint32_t AudioPolicyService::getStrategyForStream(AudioSystem::stream_type stream) +{ + if (mpPolicyManager == NULL) { + return 0; + } + return mpPolicyManager->getStrategyForStream(stream); +} + +audio_io_handle_t AudioPolicyService::getOutputForEffect(effect_descriptor_t *desc) +{ + if (mpPolicyManager == NULL) { + return NO_INIT; + } + Mutex::Autolock _l(mLock); + return mpPolicyManager->getOutputForEffect(desc); +} + +status_t AudioPolicyService::registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id) +{ + if (mpPolicyManager == NULL) { + return NO_INIT; + } + return mpPolicyManager->registerEffect(desc, output, strategy, session, id); +} + +status_t AudioPolicyService::unregisterEffect(int id) +{ + if (mpPolicyManager == NULL) { + return NO_INIT; + } + return mpPolicyManager->unregisterEffect(id); +} + void AudioPolicyService::binderDied(const wp<IBinder>& who) { - LOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(), IPCThreadState::self()->getCallingPid()); + LOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(), + IPCThreadState::self()->getCallingPid()); } static bool tryLock(Mutex& mutex) @@ -447,10 +492,16 @@ audio_io_handle_t AudioPolicyService::openOutput(uint32_t *pDevices, return 0; } - return af->openOutput(pDevices, pSamplingRate, (uint32_t *)pFormat, pChannels, pLatencyMs, flags); + return af->openOutput(pDevices, + pSamplingRate, + (uint32_t *)pFormat, + pChannels, + pLatencyMs, + flags); } -audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) +audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1, + audio_io_handle_t output2) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); if (af == 0) { @@ -514,12 +565,16 @@ status_t AudioPolicyService::closeInput(audio_io_handle_t input) return af->closeInput(input); } -status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output, int delayMs) +status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream, + float volume, + audio_io_handle_t output, + int delayMs) { return mAudioCommandThread->volumeCommand((int)stream, volume, (int)output, delayMs); } -status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output) +status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream, + audio_io_handle_t output) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; @@ -527,8 +582,18 @@ status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream, au return af->setStreamOutput(stream, output); } +status_t AudioPolicyService::moveEffects(int session, audio_io_handle_t srcOutput, + audio_io_handle_t dstOutput) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) return PERMISSION_DENIED; + + return af->moveEffects(session, (int)srcOutput, (int)dstOutput); +} -void AudioPolicyService::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs) +void AudioPolicyService::setParameters(audio_io_handle_t ioHandle, + const String8& keyValuePairs, + int delayMs) { mAudioCommandThread->parametersCommand((int)ioHandle, keyValuePairs, delayMs); } @@ -539,7 +604,8 @@ String8 AudioPolicyService::getParameters(audio_io_handle_t ioHandle, const Stri return result; } -status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream) +status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone, + AudioSystem::stream_type stream) { mTonePlaybackThread->startToneCommand(tone, stream); return NO_ERROR; @@ -623,8 +689,11 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() }break; case SET_VOLUME: { VolumeData *data = (VolumeData *)command->mParam; - LOGV("AudioCommandThread() processing set volume stream %d, volume %f, output %d", data->mStream, data->mVolume, data->mIO); - command->mStatus = AudioSystem::setStreamVolume(data->mStream, data->mVolume, data->mIO); + LOGV("AudioCommandThread() processing set volume stream %d, \ + volume %f, output %d", data->mStream, data->mVolume, data->mIO); + command->mStatus = AudioSystem::setStreamVolume(data->mStream, + data->mVolume, + data->mIO); if (command->mWaitStatus) { command->mCond.signal(); mWaitWorkCV.wait(mLock); @@ -633,7 +702,8 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() }break; case SET_PARAMETERS: { ParametersData *data = (ParametersData *)command->mParam; - LOGV("AudioCommandThread() processing set parameters string %s, io %d", data->mKeyValuePairs.string(), data->mIO); + LOGV("AudioCommandThread() processing set parameters string %s, io %d", + data->mKeyValuePairs.string(), data->mIO); command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs); if (command->mWaitStatus) { command->mCond.signal(); @@ -643,7 +713,8 @@ bool AudioPolicyService::AudioCommandThread::threadLoop() }break; case SET_VOICE_VOLUME: { VoiceVolumeData *data = (VoiceVolumeData *)command->mParam; - LOGV("AudioCommandThread() processing set voice volume volume %f", data->mVolume); + LOGV("AudioCommandThread() processing set voice volume volume %f", + data->mVolume); command->mStatus = AudioSystem::setVoiceVolume(data->mVolume); if (command->mWaitStatus) { command->mCond.signal(); @@ -734,7 +805,10 @@ void AudioPolicyService::AudioCommandThread::stopToneCommand() mWaitWorkCV.signal(); } -status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream, float volume, int output, int delayMs) +status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream, + float volume, + int output, + int delayMs) { status_t status = NO_ERROR; @@ -752,7 +826,8 @@ status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream, float } Mutex::Autolock _l(mLock); insertCommand_l(command, delayMs); - LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d", stream, volume, output); + LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d", + stream, volume, output); mWaitWorkCV.signal(); if (command->mWaitStatus) { command->mCond.wait(mLock); @@ -762,7 +837,9 @@ status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream, float return status; } -status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle, const String8& keyValuePairs, int delayMs) +status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle, + const String8& keyValuePairs, + int delayMs) { status_t status = NO_ERROR; @@ -779,7 +856,8 @@ status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle, } Mutex::Autolock _l(mLock); insertCommand_l(command, delayMs); - LOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d", keyValuePairs.string(), ioHandle, delayMs); + LOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d", + keyValuePairs.string(), ioHandle, delayMs); mWaitWorkCV.signal(); if (command->mWaitStatus) { command->mCond.wait(mLock); @@ -840,7 +918,8 @@ void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *comma ParametersData *data = (ParametersData *)command->mParam; ParametersData *data2 = (ParametersData *)command2->mParam; if (data->mIO != data2->mIO) break; - LOGV("Comparing parameter command %s to new command %s", data2->mKeyValuePairs.string(), data->mKeyValuePairs.string()); + LOGV("Comparing parameter command %s to new command %s", + data2->mKeyValuePairs.string(), data->mKeyValuePairs.string()); AudioParameter param = AudioParameter(data->mKeyValuePairs); AudioParameter param2 = AudioParameter(data2->mKeyValuePairs); for (size_t j = 0; j < param.size(); j++) { @@ -872,7 +951,8 @@ void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *comma VolumeData *data2 = (VolumeData *)command2->mParam; if (data->mIO != data2->mIO) break; if (data->mStream != data2->mStream) break; - LOGV("Filtering out volume command on output %d for stream %d", data->mIO, data->mStream); + LOGV("Filtering out volume command on output %d for stream %d", + data->mIO, data->mStream); removedCommands.add(command2); } break; case START_TONE: @@ -896,7 +976,8 @@ void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *comma removedCommands.clear(); // insert command at the right place according to its time stamp - LOGV("inserting command: %d at index %d, num commands %d", command->mCommand, (int)i+1, mAudioCommands.size()); + LOGV("inserting command: %d at index %d, num commands %d", + command->mCommand, (int)i+1, mAudioCommands.size()); mAudioCommands.insertAt(command, i + 1); } diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h index a13d0bdce327..558f455e50f1 100644 --- a/services/audioflinger/AudioPolicyService.h +++ b/services/audioflinger/AudioPolicyService.h @@ -28,7 +28,8 @@ class String8; // ---------------------------------------------------------------------------- -class AudioPolicyService: public BnAudioPolicyService, public AudioPolicyClientInterface, public IBinder::DeathRecipient +class AudioPolicyService: public BnAudioPolicyService, public AudioPolicyClientInterface, + public IBinder::DeathRecipient { public: @@ -43,8 +44,9 @@ public: virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device, AudioSystem::device_connection_state state, const char *device_address); - virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device, - const char *device_address); + virtual AudioSystem::device_connection_state getDeviceConnectionState( + AudioSystem::audio_devices device, + const char *device_address); virtual status_t setPhoneState(int state); virtual status_t setRingerMode(uint32_t mode, uint32_t mask); virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config); @@ -53,15 +55,21 @@ public: uint32_t samplingRate = 0, uint32_t format = AudioSystem::FORMAT_DEFAULT, uint32_t channels = 0, - AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT); - virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream); - virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream); + AudioSystem::output_flags flags = + AudioSystem::OUTPUT_FLAG_INDIRECT); + virtual status_t startOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session = 0); + virtual status_t stopOutput(audio_io_handle_t output, + AudioSystem::stream_type stream, + int session = 0); virtual void releaseOutput(audio_io_handle_t output); virtual audio_io_handle_t getInput(int inputSource, uint32_t samplingRate = 0, uint32_t format = AudioSystem::FORMAT_DEFAULT, uint32_t channels = 0, - AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0); + AudioSystem::audio_in_acoustics acoustics = + (AudioSystem::audio_in_acoustics)0); virtual status_t startInput(audio_io_handle_t input); virtual status_t stopInput(audio_io_handle_t input); virtual void releaseInput(audio_io_handle_t input); @@ -71,6 +79,16 @@ public: virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index); virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index); + virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream); + + virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc); + virtual status_t registerEffect(effect_descriptor_t *desc, + audio_io_handle_t output, + uint32_t strategy, + int session, + int id); + virtual status_t unregisterEffect(int id); + virtual status_t onTransact( uint32_t code, const Parcel& data, @@ -89,7 +107,8 @@ public: uint32_t *pChannels, uint32_t *pLatencyMs, AudioSystem::output_flags flags); - virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2); + virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, + audio_io_handle_t output2); virtual status_t closeOutput(audio_io_handle_t output); virtual status_t suspendOutput(audio_io_handle_t output); virtual status_t restoreOutput(audio_io_handle_t output); @@ -99,13 +118,21 @@ public: uint32_t *pChannels, uint32_t acoustics); virtual status_t closeInput(audio_io_handle_t input); - virtual status_t setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output, int delayMs = 0); + virtual status_t setStreamVolume(AudioSystem::stream_type stream, + float volume, + audio_io_handle_t output, + int delayMs = 0); virtual status_t setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output); - virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0); + virtual void setParameters(audio_io_handle_t ioHandle, + const String8& keyValuePairs, + int delayMs = 0); virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys); virtual status_t startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream); virtual status_t stopTone(); virtual status_t setVoiceVolume(float volume, int delayMs = 0); + virtual status_t moveEffects(int session, + audio_io_handle_t srcOutput, + audio_io_handle_t dstOutput); private: AudioPolicyService(); |