summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api/current.txt1
-rw-r--r--api/system-current.txt1
-rw-r--r--api/test-current.txt1
-rw-r--r--core/jni/android_media_AudioRecord.cpp230
-rw-r--r--core/jni/android_media_AudioTrack.cpp296
-rw-r--r--media/java/android/media/AudioRecord.java30
-rw-r--r--media/java/android/media/AudioRouting.java8
-rw-r--r--media/java/android/media/AudioTrack.java25
8 files changed, 357 insertions, 235 deletions
diff --git a/api/current.txt b/api/current.txt
index 3245c0a61dee..c987ce186793 100644
--- a/api/current.txt
+++ b/api/current.txt
@@ -20021,6 +20021,7 @@ package android.media {
public abstract interface AudioRouting {
method public abstract void addOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener, android.os.Handler);
method public abstract android.media.AudioDeviceInfo getPreferredDevice();
+ method public abstract android.media.AudioDeviceInfo getRoutedDevice();
method public abstract void removeOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener);
method public abstract boolean setPreferredDevice(android.media.AudioDeviceInfo);
}
diff --git a/api/system-current.txt b/api/system-current.txt
index 9a221016051d..afbc667319da 100644
--- a/api/system-current.txt
+++ b/api/system-current.txt
@@ -21529,6 +21529,7 @@ package android.media {
public abstract interface AudioRouting {
method public abstract void addOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener, android.os.Handler);
method public abstract android.media.AudioDeviceInfo getPreferredDevice();
+ method public abstract android.media.AudioDeviceInfo getRoutedDevice();
method public abstract void removeOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener);
method public abstract boolean setPreferredDevice(android.media.AudioDeviceInfo);
}
diff --git a/api/test-current.txt b/api/test-current.txt
index 0b7914bd211d..58cf13b4ba54 100644
--- a/api/test-current.txt
+++ b/api/test-current.txt
@@ -20030,6 +20030,7 @@ package android.media {
public abstract interface AudioRouting {
method public abstract void addOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener, android.os.Handler);
method public abstract android.media.AudioDeviceInfo getPreferredDevice();
+ method public abstract android.media.AudioDeviceInfo getRoutedDevice();
method public abstract void removeOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener);
method public abstract boolean setPreferredDevice(android.media.AudioDeviceInfo);
}
diff --git a/core/jni/android_media_AudioRecord.cpp b/core/jni/android_media_AudioRecord.cpp
index 6904fda1bef8..3e4e3522d5dd 100644
--- a/core/jni/android_media_AudioRecord.cpp
+++ b/core/jni/android_media_AudioRecord.cpp
@@ -181,57 +181,14 @@ static sp<AudioRecord> setAudioRecord(JNIEnv* env, jobject thiz, const sp<AudioR
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate, jint channelMask, jint channelIndexMask,
- jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName)
+ jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName,
+ jlong nativeRecordInJavaObj)
{
- jint elements[1];
- env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
- int sampleRateInHertz = elements[0];
-
//ALOGV(">> Entering android_media_AudioRecord_setup");
- //ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d",
- // sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes);
-
- if (jaa == 0) {
- ALOGE("Error creating AudioRecord: invalid audio attributes");
- return (jint) AUDIO_JAVA_ERROR;
- }
-
- // channel index mask takes priority over channel position masks.
- if (channelIndexMask) {
- // Java channel index masks need the representation bits set.
- channelMask = audio_channel_mask_from_representation_and_bits(
- AUDIO_CHANNEL_REPRESENTATION_INDEX,
- channelIndexMask);
- }
- // Java channel position masks map directly to the native definition
-
- if (!audio_is_input_channel(channelMask)) {
- ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", channelMask);
- return (jint) AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
- }
- uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
-
- // compare the format against the Java constants
- audio_format_t format = audioFormatToNative(audioFormat);
- if (format == AUDIO_FORMAT_INVALID) {
- ALOGE("Error creating AudioRecord: unsupported audio format %d.", audioFormat);
- return (jint) AUDIORECORD_ERROR_SETUP_INVALIDFORMAT;
- }
-
- size_t bytesPerSample = audio_bytes_per_sample(format);
-
- if (buffSizeInBytes == 0) {
- ALOGE("Error creating AudioRecord: frameCount is 0.");
- return (jint) AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT;
- }
- size_t frameSize = channelCount * bytesPerSample;
- size_t frameCount = buffSizeInBytes / frameSize;
-
- jclass clazz = env->GetObjectClass(thiz);
- if (clazz == NULL) {
- ALOGE("Can't find %s when setting up callback.", kClassPathName);
- return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
- }
+ //ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d "
+ // "nativeRecordInJavaObj=0x%llX",
+ // sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes, nativeRecordInJavaObj);
+ audio_channel_mask_t localChanMask = inChannelMaskToNative(channelMask);
if (jSession == NULL) {
ALOGE("Error creating AudioRecord: invalid session ID pointer");
@@ -247,55 +204,132 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
- ScopedUtfChars opPackageNameStr(env, opPackageName);
+ audio_attributes_t *paa = NULL;
+ sp<AudioRecord> lpRecorder = 0;
+ audiorecord_callback_cookie *lpCallbackData = NULL;
+
+ jclass clazz = env->GetObjectClass(thiz);
+ if (clazz == NULL) {
+ ALOGE("Can't find %s when setting up callback.", kClassPathName);
+ return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
+ }
- // create an uninitialized AudioRecord object
- sp<AudioRecord> lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));
+ // if we pass in an existing *Native* AudioRecord, we don't need to create/initialize one.
+ if (nativeRecordInJavaObj == 0) {
+ if (jaa == 0) {
+ ALOGE("Error creating AudioRecord: invalid audio attributes");
+ return (jint) AUDIO_JAVA_ERROR;
+ }
- audio_attributes_t *paa = NULL;
- // read the AudioAttributes values
- paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
- const jstring jtags =
- (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
- const char* tags = env->GetStringUTFChars(jtags, NULL);
- // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
- strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
- env->ReleaseStringUTFChars(jtags, tags);
- paa->source = (audio_source_t) env->GetIntField(jaa, javaAudioAttrFields.fieldRecSource);
- paa->flags = (audio_flags_mask_t)env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);
- ALOGV("AudioRecord_setup for source=%d tags=%s flags=%08x", paa->source, paa->tags, paa->flags);
-
- audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
- if (paa->flags & AUDIO_FLAG_HW_HOTWORD) {
- flags = AUDIO_INPUT_FLAG_HW_HOTWORD;
- }
- // create the callback information:
- // this data will be passed with every AudioRecord callback
- audiorecord_callback_cookie *lpCallbackData = new audiorecord_callback_cookie;
- lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
- // we use a weak reference so the AudioRecord object can be garbage collected.
- lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
- lpCallbackData->busy = false;
-
- const status_t status = lpRecorder->set(paa->source,
- sampleRateInHertz,
- format, // word length, PCM
- channelMask,
- frameCount,
- recorderCallback,// callback_t
- lpCallbackData,// void* user
- 0, // notificationFrames,
- true, // threadCanCallJava
- sessionId,
- AudioRecord::TRANSFER_DEFAULT,
- flags,
- -1, -1, // default uid, pid
- paa);
-
- if (status != NO_ERROR) {
- ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",
- status);
- goto native_init_failure;
+ if (jSampleRate == 0) {
+ ALOGE("Error creating AudioRecord: invalid sample rates");
+ return (jint) AUDIO_JAVA_ERROR;
+ }
+ jint elements[1];
+ env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
+ int sampleRateInHertz = elements[0];
+
+ // channel index mask takes priority over channel position masks.
+ if (channelIndexMask) {
+ // Java channel index masks need the representation bits set.
+ localChanMask = audio_channel_mask_from_representation_and_bits(
+ AUDIO_CHANNEL_REPRESENTATION_INDEX,
+ channelIndexMask);
+ }
+ // Java channel position masks map directly to the native definition
+
+ if (!audio_is_input_channel(localChanMask)) {
+ ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", localChanMask);
+ return (jint) AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
+ }
+ uint32_t channelCount = audio_channel_count_from_in_mask(localChanMask);
+
+ // compare the format against the Java constants
+ audio_format_t format = audioFormatToNative(audioFormat);
+ if (format == AUDIO_FORMAT_INVALID) {
+ ALOGE("Error creating AudioRecord: unsupported audio format %d.", audioFormat);
+ return (jint) AUDIORECORD_ERROR_SETUP_INVALIDFORMAT;
+ }
+
+ size_t bytesPerSample = audio_bytes_per_sample(format);
+
+ if (buffSizeInBytes == 0) {
+ ALOGE("Error creating AudioRecord: frameCount is 0.");
+ return (jint) AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT;
+ }
+ size_t frameSize = channelCount * bytesPerSample;
+ size_t frameCount = buffSizeInBytes / frameSize;
+
+ ScopedUtfChars opPackageNameStr(env, opPackageName);
+
+ // create an uninitialized AudioRecord object
+ lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));
+
+ // read the AudioAttributes values
+ paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+ const jstring jtags =
+ (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
+ const char* tags = env->GetStringUTFChars(jtags, NULL);
+ // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
+ strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ env->ReleaseStringUTFChars(jtags, tags);
+ paa->source = (audio_source_t) env->GetIntField(jaa, javaAudioAttrFields.fieldRecSource);
+ paa->flags = (audio_flags_mask_t)env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);
+ ALOGV("AudioRecord_setup for source=%d tags=%s flags=%08x", paa->source, paa->tags, paa->flags);
+
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
+ if (paa->flags & AUDIO_FLAG_HW_HOTWORD) {
+ flags = AUDIO_INPUT_FLAG_HW_HOTWORD;
+ }
+ // create the callback information:
+ // this data will be passed with every AudioRecord callback
+ lpCallbackData = new audiorecord_callback_cookie;
+ lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
+ // we use a weak reference so the AudioRecord object can be garbage collected.
+ lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
+ lpCallbackData->busy = false;
+
+ const status_t status = lpRecorder->set(paa->source,
+ sampleRateInHertz,
+ format, // word length, PCM
+ localChanMask,
+ frameCount,
+ recorderCallback,// callback_t
+ lpCallbackData,// void* user
+ 0, // notificationFrames,
+ true, // threadCanCallJava
+ sessionId,
+ AudioRecord::TRANSFER_DEFAULT,
+ flags,
+ -1, -1, // default uid, pid
+ paa);
+
+ if (status != NO_ERROR) {
+ ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",
+ status);
+ goto native_init_failure;
+ }
+ } else { // end if nativeRecordInJavaObj == 0)
+ lpRecorder = (AudioRecord*)nativeRecordInJavaObj;
+ // TODO: We need to find out which members of the Java AudioRecord might need to be
+ // initialized from the Native AudioRecord
+ // these are directly returned from getters:
+ // mSampleRate
+ // mRecordSource
+ // mAudioFormat
+ // mChannelMask
+ // mChannelCount
+ // mState (?)
+ // mRecordingState (?)
+ // mPreferredDevice
+
+ // create the callback information:
+ // this data will be passed with every AudioRecord callback
+ lpCallbackData = new audiorecord_callback_cookie;
+ lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
+ // we use a weak reference so the AudioRecord object can be garbage collected.
+ lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
+ lpCallbackData->busy = false;
}
nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
@@ -726,8 +760,8 @@ static const JNINativeMethod gMethods[] = {
// name, signature, funcPtr
{"native_start", "(II)I", (void *)android_media_AudioRecord_start},
{"native_stop", "()V", (void *)android_media_AudioRecord_stop},
- {"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;)I",
- (void *)android_media_AudioRecord_setup},
+ {"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",
+ (void *)android_media_AudioRecord_setup},
{"native_finalize", "()V", (void *)android_media_AudioRecord_finalize},
{"native_release", "()V", (void *)android_media_AudioRecord_release},
{"native_read_in_byte_array",
diff --git a/core/jni/android_media_AudioTrack.cpp b/core/jni/android_media_AudioTrack.cpp
index 84cc185d13e2..660cbdcb4546 100644
--- a/core/jni/android_media_AudioTrack.cpp
+++ b/core/jni/android_media_AudioTrack.cpp
@@ -213,55 +213,17 @@ static inline audio_channel_mask_t nativeChannelMaskFromJavaChannelMasks(
// ----------------------------------------------------------------------------
static jint
-android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
- jobject jaa,
+android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
- jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession) {
+ jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
+ jlong nativeAudioTrack) {
- jint elements[1];
- env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
- int sampleRateInHertz = elements[0];
+ ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d"
+ "nativeAudioTrack=0x%llX",
+ jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
+ nativeAudioTrack);
- ALOGV("sampleRate=%d, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d",
- sampleRateInHertz, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes);
-
- if (jaa == 0) {
- ALOGE("Error creating AudioTrack: invalid audio attributes");
- return (jint) AUDIO_JAVA_ERROR;
- }
-
- // Invalid channel representations are caught by !audio_is_output_channel() below.
- audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
- channelPositionMask, channelIndexMask);
- if (!audio_is_output_channel(nativeChannelMask)) {
- ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
- return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
- }
-
- uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);
-
- // check the format.
- // This function was called from Java, so we compare the format against the Java constants
- audio_format_t format = audioFormatToNative(audioFormat);
- if (format == AUDIO_FORMAT_INVALID) {
- ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
- return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
- }
-
- // compute the frame count
- size_t frameCount;
- if (audio_has_proportional_frames(format)) {
- const size_t bytesPerSample = audio_bytes_per_sample(format);
- frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
- } else {
- frameCount = buffSizeInBytes;
- }
-
- jclass clazz = env->GetObjectClass(thiz);
- if (clazz == NULL) {
- ALOGE("Can't find %s when setting up callback.", kClassPathName);
- return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
- }
+ sp<AudioTrack> lpTrack = 0;
if (jSession == NULL) {
ALOGE("Error creating AudioTrack: invalid session ID pointer");
@@ -277,91 +239,168 @@ android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
- // create the native AudioTrack object
- sp<AudioTrack> lpTrack = new AudioTrack();
+ AudioTrackJniStorage* lpJniStorage = NULL;
audio_attributes_t *paa = NULL;
- // read the AudioAttributes values
- paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
- const jstring jtags =
- (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
- const char* tags = env->GetStringUTFChars(jtags, NULL);
- // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
- strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
- env->ReleaseStringUTFChars(jtags, tags);
- paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);
- paa->content_type =
- (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);
- paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);
-
- ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
- paa->usage, paa->content_type, paa->flags, paa->tags);
-
- // initialize the callback information:
- // this data will be passed with every AudioTrack callback
- AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
- lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
- // we use a weak reference so the AudioTrack object can be garbage collected.
- lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
- lpJniStorage->mCallbackData.busy = false;
-
- // initialize the native AudioTrack object
- status_t status = NO_ERROR;
- switch (memoryMode) {
- case MODE_STREAM:
-
- status = lpTrack->set(
- AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
- sampleRateInHertz,
- format,// word length, PCM
- nativeChannelMask,
- frameCount,
- AUDIO_OUTPUT_FLAG_NONE,
- audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
- 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
- 0,// shared mem
- true,// thread can call Java
- sessionId,// audio session ID
- AudioTrack::TRANSFER_SYNC,
- NULL, // default offloadInfo
- -1, -1, // default uid, pid values
- paa);
- break;
- case MODE_STATIC:
- // AudioTrack is using shared memory
+ jclass clazz = env->GetObjectClass(thiz);
+ if (clazz == NULL) {
+ ALOGE("Can't find %s when setting up callback.", kClassPathName);
+ return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
+ }
+
+ // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
+ if (nativeAudioTrack == 0) {
+ if (jaa == 0) {
+ ALOGE("Error creating AudioTrack: invalid audio attributes");
+ return (jint) AUDIO_JAVA_ERROR;
+ }
- if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
- ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
- goto native_init_failure;
+ if (jSampleRate == 0) {
+ ALOGE("Error creating AudioTrack: invalid sample rates");
+ return (jint) AUDIO_JAVA_ERROR;
}
- status = lpTrack->set(
- AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
- sampleRateInHertz,
- format,// word length, PCM
- nativeChannelMask,
- frameCount,
- AUDIO_OUTPUT_FLAG_NONE,
- audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
- 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
- lpJniStorage->mMemBase,// shared mem
- true,// thread can call Java
- sessionId,// audio session ID
- AudioTrack::TRANSFER_SHARED,
- NULL, // default offloadInfo
- -1, -1, // default uid, pid values
- paa);
- break;
+ int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
+ int sampleRateInHertz = sampleRates[0];
+ env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);
- default:
- ALOGE("Unknown mode %d", memoryMode);
- goto native_init_failure;
- }
+ // Invalid channel representations are caught by !audio_is_output_channel() below.
+ audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
+ channelPositionMask, channelIndexMask);
+ if (!audio_is_output_channel(nativeChannelMask)) {
+ ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
+ return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
+ }
- if (status != NO_ERROR) {
- ALOGE("Error %d initializing AudioTrack", status);
- goto native_init_failure;
+ uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);
+
+ // check the format.
+ // This function was called from Java, so we compare the format against the Java constants
+ audio_format_t format = audioFormatToNative(audioFormat);
+ if (format == AUDIO_FORMAT_INVALID) {
+ ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
+ return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
+ }
+
+ // compute the frame count
+ size_t frameCount;
+ if (audio_is_linear_pcm(format)) {
+ const size_t bytesPerSample = audio_bytes_per_sample(format);
+ frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
+ } else {
+ frameCount = buffSizeInBytes;
+ }
+
+ // create the native AudioTrack object
+ lpTrack = new AudioTrack();
+
+ // read the AudioAttributes values
+ paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+ const jstring jtags =
+ (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
+ const char* tags = env->GetStringUTFChars(jtags, NULL);
+ // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
+ strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ env->ReleaseStringUTFChars(jtags, tags);
+ paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);
+ paa->content_type =
+ (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);
+ paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);
+
+ ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
+ paa->usage, paa->content_type, paa->flags, paa->tags);
+
+ // initialize the callback information:
+ // this data will be passed with every AudioTrack callback
+ lpJniStorage = new AudioTrackJniStorage();
+ lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
+ // we use a weak reference so the AudioTrack object can be garbage collected.
+ lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
+ lpJniStorage->mCallbackData.busy = false;
+
+ // initialize the native AudioTrack object
+ status_t status = NO_ERROR;
+ switch (memoryMode) {
+ case MODE_STREAM:
+
+ status = lpTrack->set(
+ AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
+ sampleRateInHertz,
+ format,// word length, PCM
+ nativeChannelMask,
+ frameCount,
+ AUDIO_OUTPUT_FLAG_NONE,
+ audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
+ 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
+ 0,// shared mem
+ true,// thread can call Java
+ sessionId,// audio session ID
+ AudioTrack::TRANSFER_SYNC,
+ NULL, // default offloadInfo
+ -1, -1, // default uid, pid values
+ paa);
+ break;
+
+ case MODE_STATIC:
+ // AudioTrack is using shared memory
+
+ if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
+ ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
+ goto native_init_failure;
+ }
+
+ status = lpTrack->set(
+ AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
+ sampleRateInHertz,
+ format,// word length, PCM
+ nativeChannelMask,
+ frameCount,
+ AUDIO_OUTPUT_FLAG_NONE,
+ audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
+ 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
+ lpJniStorage->mMemBase,// shared mem
+ true,// thread can call Java
+ sessionId,// audio session ID
+ AudioTrack::TRANSFER_SHARED,
+ NULL, // default offloadInfo
+ -1, -1, // default uid, pid values
+ paa);
+ break;
+
+ default:
+ ALOGE("Unknown mode %d", memoryMode);
+ goto native_init_failure;
+ }
+
+ if (status != NO_ERROR) {
+ ALOGE("Error %d initializing AudioTrack", status);
+ goto native_init_failure;
+ }
+ } else { // end if (nativeAudioTrack == 0)
+ lpTrack = (AudioTrack*)nativeAudioTrack;
+ // TODO: We need to find out which members of the Java AudioTrack might
+ // need to be initialized from the Native AudioTrack
+ // these are directly returned from getters:
+ // mSampleRate
+ // mAudioFormat
+ // mStreamType
+ // mChannelConfiguration
+ // mChannelCount
+ // mState (?)
+ // mPlayState (?)
+ // these may be used internally (Java AudioTrack.audioParamCheck():
+ // mChannelMask
+ // mChannelIndexMask
+ // mDataLoadMode
+
+ // initialize the callback information:
+ // this data will be passed with every AudioTrack callback
+ lpJniStorage = new AudioTrackJniStorage();
+ lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
+ // we use a weak reference so the AudioTrack object can be garbage collected.
+ lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
+ lpJniStorage->mCallbackData.busy = false;
}
nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
@@ -394,9 +433,11 @@ android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
// since we had audio attributes, the stream type was derived from them during the
// creation of the native AudioTrack: push the same value to the Java object
env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
- // audio attributes were copied in AudioTrack creation
- free(paa);
- paa = NULL;
+ if (paa != NULL) {
+ // audio attributes were copied in AudioTrack creation
+ free(paa);
+ paa = NULL;
+ }
return (jint) AUDIO_JAVA_SUCCESS;
@@ -418,7 +459,6 @@ native_init_failure:
return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
-
// ----------------------------------------------------------------------------
static void
android_media_AudioTrack_start(JNIEnv *env, jobject thiz)
@@ -1123,7 +1163,7 @@ static const JNINativeMethod gMethods[] = {
{"native_stop", "()V", (void *)android_media_AudioTrack_stop},
{"native_pause", "()V", (void *)android_media_AudioTrack_pause},
{"native_flush", "()V", (void *)android_media_AudioTrack_flush},
- {"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[I)I",
+ {"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[IJ)I",
(void *)android_media_AudioTrack_setup},
{"native_finalize", "()V", (void *)android_media_AudioTrack_finalize},
{"native_release", "()V", (void *)android_media_AudioTrack_release},
diff --git a/media/java/android/media/AudioRecord.java b/media/java/android/media/AudioRecord.java
index 8f6b17877fd4..d84523a6523a 100644
--- a/media/java/android/media/AudioRecord.java
+++ b/media/java/android/media/AudioRecord.java
@@ -374,7 +374,7 @@ public class AudioRecord implements AudioRouting
int initResult = native_setup( new WeakReference<AudioRecord>(this),
mAudioAttributes, sampleRate, mChannelMask, mChannelIndexMask,
mAudioFormat, mNativeBufferSizeInBytes,
- session, ActivityThread.currentOpPackageName());
+ session, ActivityThread.currentOpPackageName(), 0 /*nativeRecordInJavaObj*/);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing native AudioRecord object.");
return; // with mState == STATE_UNINITIALIZED
@@ -390,12 +390,31 @@ public class AudioRecord implements AudioRouting
* A constructor which explicitly connects a Native (C++) AudioRecord. For use by
* the AudioRecordRoutingProxy subclass.
* @param nativeRecordInJavaObj A C/C++ pointer to a native AudioRecord
- * (associated with an OpenSL ES recorder).
+ * (associated with an OpenSL ES recorder). Note: the caller must ensure a correct
+ * value here as no error checking is or can be done.
*/
/*package*/ AudioRecord(long nativeRecordInJavaObj) {
- mNativeRecorderInJavaObj = nativeRecordInJavaObj;
+ int[] session = { 0 };
+ //TODO: update native initialization when information about hardware init failure
+ // due to capture device already open is available.
+ // Note that for this native_setup, we are providing an already created/initialized
+ // *Native* AudioRecord, so the attributes parameters to native_setup() are ignored.
+ int initResult = native_setup(new WeakReference<AudioRecord>(this),
+ null /*mAudioAttributes*/,
+ null /*mSampleRates*/,
+ 0 /*mChannelMask*/,
+ 0 /*mChannelIndexMask*/,
+ 0 /*mAudioFormat*/,
+ 0 /*mNativeBufferSizeInBytes*/,
+ session,
+ ActivityThread.currentOpPackageName(),
+ mNativeRecorderInJavaObj);
+ if (initResult != SUCCESS) {
+ loge("Error code "+initResult+" when initializing native AudioRecord object.");
+ return; // with mState == STATE_UNINITIALIZED
+ }
- // other initialization here...
+ mSessionId = session[0];
mState = STATE_INITIALIZED;
}
@@ -1712,7 +1731,8 @@ public class AudioRecord implements AudioRouting
private native final int native_setup(Object audiorecord_this,
Object /*AudioAttributes*/ attributes,
int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
- int buffSizeInBytes, int[] sessionId, String opPackageName);
+ int buffSizeInBytes, int[] sessionId, String opPackageName,
+ long nativeRecordInJavaObj);
// TODO remove: implementation calls directly into implementation of native_release()
private native final void native_finalize();
diff --git a/media/java/android/media/AudioRouting.java b/media/java/android/media/AudioRouting.java
index 2161cf3ee7ab..41f92d49e39f 100644
--- a/media/java/android/media/AudioRouting.java
+++ b/media/java/android/media/AudioRouting.java
@@ -41,6 +41,14 @@ public interface AudioRouting {
public AudioDeviceInfo getPreferredDevice();
/**
+ * Returns an {@link AudioDeviceInfo} identifying the current routing of this
+ * AudioTrack/AudioRecord.
+ * Note: The query is only valid if the AudioTrack/AudioRecord is currently playing.
+ * If it is not, <code>getRoutedDevice()</code> will return null.
+ */
+ public AudioDeviceInfo getRoutedDevice();
+
+ /**
* Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
* changes on this AudioTrack/AudioRecord.
* @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
diff --git a/media/java/android/media/AudioTrack.java b/media/java/android/media/AudioTrack.java
index 708768cea19b..f78a2deb5ac4 100644
--- a/media/java/android/media/AudioTrack.java
+++ b/media/java/android/media/AudioTrack.java
@@ -505,7 +505,7 @@ public class AudioTrack implements AudioRouting
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
- mNativeBufferSizeInBytes, mDataLoadMode, session);
+ mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
@@ -528,8 +528,6 @@ public class AudioTrack implements AudioRouting
* (associated with an OpenSL ES player).
*/
/*package*/ AudioTrack(long nativeTrackInJavaObj) {
- mNativeTrackInJavaObj = nativeTrackInJavaObj;
-
// "final"s
mAttributes = null;
mAppOps = null;
@@ -542,6 +540,25 @@ public class AudioTrack implements AudioRouting
mInitializationLooper = looper;
// other initialization...
+ // Note that for this native_setup, we are providing an already created/initialized
+ // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
+ int[] session = { 0 };
+ int initResult = native_setup(new WeakReference<AudioTrack>(this),
+ null /*mAttributes - NA*/,
+ null /*sampleRate - NA*/,
+ 0 /*mChannelMask - NA*/,
+ 0 /*mChannelIndexMask - NA*/,
+ 0 /*mAudioFormat - NA*/,
+ 0 /*mNativeBufferSizeInBytes - NA*/,
+ 0 /*mDataLoadMode - NA*/,
+ session,
+ mNativeTrackInJavaObj);
+ if (initResult != SUCCESS) {
+ loge("Error code "+initResult+" when initializing AudioTrack.");
+ return; // with mState == STATE_UNINITIALIZED
+ }
+
+ mSessionId = session[0];
mState = STATE_INITIALIZED;
}
@@ -2773,7 +2790,7 @@ public class AudioTrack implements AudioRouting
private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
Object /*AudioAttributes*/ attributes,
int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
- int buffSizeInBytes, int mode, int[] sessionId);
+ int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack);
private native final void native_finalize();