exynos: Import sthal
* From https://gitlab.com/Linaro/96boards/e850-96/platform/hardware/samsung_slsi/-/tree/3e8336d6f04504fb2377a52909c98cd38ac1d368/exynos/libaudio/sthal
Change-Id: I1c9ec7f159e8184ff23f1dc771630ad5f12423c6
Signed-off-by: Francescodario Cuzzocrea <bosconovic@gmail.com>
diff --git a/.gitignore b/.gitignore
index 75c0c37..7875233 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
libaudio/audiohal_comv2
libaudio/audiohal_comv2_vv
-libaudio/sthal
libaudio/sthal_vv
diff --git a/libaudio/sthal/Android.mk b/libaudio/sthal/Android.mk
new file mode 100644
index 0000000..8ca09c2
--- /dev/null
+++ b/libaudio/sthal/Android.mk
@@ -0,0 +1,59 @@
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Primary SoundTrigger HAL module
+#
+ifeq ($(BOARD_USE_SOUNDTRIGGER_HAL),true)
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_PROPRIETARY_MODULE := true
+
+DEVICE_BASE_PATH := $(TOP)/device/samsung
+
+LOCAL_SRC_FILES := \
+ sound_trigger_hw.c
+
+LOCAL_C_INCLUDES += \
+ external/tinyalsa/include \
+ $(DEVICE_BASE_PATH)/$(TARGET_BOOTLOADER_BOARD_NAME)/conf
+
+LOCAL_SHARED_LIBRARIES := \
+ liblog libcutils libtinyalsa libhardware
+
+ifeq ($(BOARD_USE_SOUNDTRIGGER_HAL_MMAP),true)
+LOCAL_CFLAGS += -DMMAP_INTERFACE_ENABLED
+endif
+
+LOCAL_MODULE := sound_trigger.primary.$(TARGET_SOC)
+
+ifeq ($(BOARD_USES_VENDORIMAGE), true)
+LOCAL_PROPRIETARY_MODULE := true
+endif
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
+
+else
+
+ifeq ($(BOARD_USE_SOUNDTRIGGER_HAL), STHAL_TEST)
+LOCAL_PATH := $(call my-dir)
+include $(call all-makefiles-under,$(LOCAL_PATH))
+endif
+
+endif
diff --git a/libaudio/sthal/sound_trigger_hw.c b/libaudio/sthal/sound_trigger_hw.c
new file mode 100644
index 0000000..fe0a613
--- /dev/null
+++ b/libaudio/sthal/sound_trigger_hw.c
@@ -0,0 +1,1848 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * @file : sound_trigger_hw.c
+ * @brief : Sound Trigger primary HAL implmentation
+ * @author : Palli Satish Kumar Reddy (palli.satish@samsung.com)
+ * @version : 1.0
+ * @history
+ * 2017.02.20 : Create
+ */
+
+#define LOG_TAG "soundtrigger_hw_primary"
+#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sys/prctl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <log/log.h>
+#include <cutils/uevent.h>
+#include <dlfcn.h>
+#include <cutils/str_parms.h>
+
+#include"sound_trigger_hw.h"
+#ifdef MMAP_INTERFACE_ENABLED
+#include"vts.h"
+#endif
+
+/* Note: odmword_uuid should be updated */
+static sound_trigger_uuid_t odmword_uuid = { 0x1817de20, 0xfa3b, 0x11e5, 0xbef2, { 0x00, 0x03, 0xa6, 0xd6, 0xc6, 0x1c } };
+static sound_trigger_uuid_t hotword_uuid = { 0x7038ddc8, 0x30f2, 0x11e6, 0xb0ac, { 0x40, 0xa8, 0xf0, 0x3d, 0x3f, 0x15 } };
+
+
+// A few function declarations to have some sort of logical ordering of code below.
+static void *callback_thread_loop(void *context);
+static void stdev_close_callback_thread_sockets(struct sound_trigger_device *stdev);
+static void stdev_join_callback_thread(struct sound_trigger_device *stdev,
+ bool keep_vts_powered);
+static int stdev_stop_recognition(const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle);
+static int stdev_stop_recognition_l(struct sound_trigger_device *stdev,
+ sound_model_handle_t handle);
+static void handle_stop_recognition_l(struct sound_trigger_device *stdev);
+
+
+// Since there's only ever one sound_trigger_device, keep it as a global so that other people can
+// dlopen this lib to get at the streaming audio.
+static struct sound_trigger_device g_stdev = { .lock = PTHREAD_MUTEX_INITIALIZER };
+
+// Utility function for configuration MIC mixer controls
+int set_mixer_ctrls(
+ struct sound_trigger_device *stdev,
+ char *path_name[],
+ int *path_ctlvalue,
+ int ctrl_count,
+ bool reverse)
+{
+ int i = (reverse ? (ctrl_count - 1): 0);
+ int ret = 0;
+ struct mixer_ctl *mixerctl = NULL;
+
+ ALOGV("%s, path: %s", __func__, path_name[0]);
+
+ if (stdev->mixer) {
+ //for (i=0; i < ctrl_count; i++) {
+ while(ctrl_count) {
+ ALOGVV("%s, ctrl_count: %d Loop index: %d", __func__, ctrl_count, i);
+ /* Get required control from mixer */
+ mixerctl = mixer_get_ctl_by_name(stdev->mixer, path_name[i]);
+ if (mixerctl) {
+ /* Enable the control */
+ if (path_ctlvalue)
+ ret = mixer_ctl_set_value(mixerctl, 0, path_ctlvalue[i]);
+ else
+ ret = mixer_ctl_set_value(mixerctl, 0, 0);
+
+ if (ret) {
+ ALOGE("%s: %s Failed to configure\n", __func__, path_name[i]);
+ ret = -EINVAL;
+ break;
+ } else {
+ ALOGV("%s: %s configured value: %d\n", __func__, path_name[i],
+ (path_ctlvalue ? path_ctlvalue[i] : 0));
+ }
+ } else {
+ ALOGE("%s: %s control doesn't exist\n", __func__, path_name[i]);
+ ret = -EINVAL;
+ break;
+ }
+ ctrl_count--;
+ if (reverse)
+ i--;
+ else
+ i++;
+ }
+ } else{
+ ALOGE("%s: Failed to open mixer\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+#ifdef MMAP_INTERFACE_ENABLED
+// Utility function for loading model binary to kernel through mmap interface
+static int load_modelbinary(
+ struct sound_trigger_device *stdev,
+ int model_index,
+ char *data,
+ int len)
+{
+ unsigned long ioctl_cmd = 0;
+ int ret = 0;
+
+ ALOGV("%s: Actual size %d\n", __func__, len);
+
+ /* Copy model binary to VTS mapped address */
+ memcpy(stdev->mapped_addr, data, len);
+
+ ALOGV("%s: %s Model loaded size[%d]", __func__,
+ (model_index == HOTWORD_INDEX ? "Google" : "ODMVoice"), len);
+
+ if (len > VTSDRV_MISC_MODEL_BIN_MAXSZ) {
+ ALOGW("%s: buffer overflow Model size[%d] > Mapped bufsize[%d]",
+ __func__, len, VTSDRV_MISC_MODEL_BIN_MAXSZ);
+ len = VTSDRV_MISC_MODEL_BIN_MAXSZ;
+ }
+
+ if (model_index == HOTWORD_INDEX)
+ ioctl_cmd = VTSDRV_MISC_IOCTL_WRITE_GOOGLE;
+ else
+ ioctl_cmd = VTSDRV_MISC_IOCTL_WRITE_ODMVOICE;
+
+ /* Update model binary inforation to VTS misc driver */
+ if (ioctl(stdev->vtsdev_fd, ioctl_cmd, &len) < 0) {
+ ALOGE("%s: VTS device IOCTL failed", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+#else
+// Utility function for loading model binary to kernel through sysfs interface
+static int sysfs_write(
+ const char *path,
+ char *data,
+ int len)
+{
+ char buf[80];
+ int fd = open(path, O_WRONLY);
+ int tmp = 0, written = 0;
+ int ret = 0;
+
+ ALOGV("%s: Actual size %d\n", __func__, len);
+
+ if (fd < 0) {
+ strerror_r(errno, buf, sizeof(buf));
+ ALOGE("Error opening %s: %s\n", path, buf);
+ return fd;
+ }
+
+ while (len) {
+ tmp = write(fd, data+written, len);
+ if (tmp < 0) {
+ strerror_r(errno, buf, sizeof(buf));
+ ALOGE("Error writing to %s: %s\n", path, buf);
+ ret = tmp;
+ break;
+ }
+ len -= tmp;
+ written += tmp;
+ //ALOGV("%s: current written %d Actual %d Total written %d\n", __func__, tmp, len, written);
+ }
+
+ ALOGV("%s: Total written %d\n", __func__, written);
+
+ close(fd);
+ return ret;
+}
+#endif
+
+// Utility function for allocating a hotword recognition event. Caller receives ownership of
+// allocated struct.
+static struct sound_trigger_recognition_event *sound_trigger_hotword_event_alloc(
+ struct sound_trigger_device *stdev)
+{
+ struct sound_trigger_phrase_recognition_event *event =
+ (struct sound_trigger_phrase_recognition_event *)calloc(
+ 1, sizeof(struct sound_trigger_phrase_recognition_event));
+ if (!event) {
+ return NULL;
+ }
+
+ event->common.status = RECOGNITION_STATUS_SUCCESS;
+ event->common.type = SOUND_MODEL_TYPE_KEYPHRASE;
+ event->common.model = stdev->model_handles[HOTWORD_INDEX];
+
+ if (stdev->configs[HOTWORD_INDEX]) {
+ unsigned int i;
+
+ event->num_phrases = stdev->configs[HOTWORD_INDEX]->num_phrases;
+ if (event->num_phrases > SOUND_TRIGGER_MAX_PHRASES)
+ event->num_phrases = SOUND_TRIGGER_MAX_PHRASES;
+ for (i=0; i < event->num_phrases; i++)
+ memcpy(&event->phrase_extras[i], &stdev->configs[HOTWORD_INDEX]->phrases[i],
+ sizeof(struct sound_trigger_phrase_recognition_extra));
+ }
+
+ event->num_phrases = 1;
+ event->phrase_extras[0].confidence_level = 100;
+ event->phrase_extras[0].num_levels = 1;
+ event->phrase_extras[0].levels[0].level = 100;
+ event->phrase_extras[0].levels[0].user_id = 0;
+ // Signify that all the data is coming through streaming, not through the
+ // buffer.
+ event->common.capture_available = true;
+ event->common.trigger_in_data = false;
+
+ event->common.audio_config = AUDIO_CONFIG_INITIALIZER;
+ event->common.audio_config.sample_rate = 16000;
+ event->common.audio_config.channel_mask = AUDIO_CHANNEL_IN_MONO;
+ event->common.audio_config.format = AUDIO_FORMAT_PCM_16_BIT;
+
+ return &event->common;
+}
+
+// Utility function for allocating a hotsound recognition event. Caller receives ownership of
+// allocated struct.
+static struct sound_trigger_recognition_event *sound_trigger_odmvoice_event_alloc(
+ struct sound_trigger_device* stdev)
+{
+#if 0 //Generic recognition event
+ struct sound_trigger_generic_recognition_event *event =
+ (struct sound_trigger_generic_recognition_event *)calloc(
+ 1, sizeof(struct sound_trigger_generic_recognition_event));
+ if (!event) {
+ return NULL;
+ }
+
+ event->common.status = RECOGNITION_STATUS_SUCCESS;
+ event->common.type = SOUND_MODEL_TYPE_GENERIC;
+ event->common.model = stdev->model_handles[ODMVOICE_INDEX];
+ // Signify that all the data is coming through streaming, not through the
+ // buffer.
+ event->common.capture_available = true;
+
+ event->common.audio_config = AUDIO_CONFIG_INITIALIZER;
+ event->common.audio_config.sample_rate = 16000;
+ event->common.audio_config.channel_mask = AUDIO_CHANNEL_IN_MONO;
+ event->common.audio_config.format = AUDIO_FORMAT_PCM_16_BIT;
+#else //as ODMVoice model
+ struct sound_trigger_phrase_recognition_event *event =
+ (struct sound_trigger_phrase_recognition_event *)calloc(
+ 1, sizeof(struct sound_trigger_phrase_recognition_event));
+ if (!event) {
+ return NULL;
+ }
+
+ event->common.status = RECOGNITION_STATUS_SUCCESS;
+ event->common.type = SOUND_MODEL_TYPE_KEYPHRASE;
+ event->common.model = stdev->model_handles[ODMVOICE_INDEX];
+
+ if (stdev->configs[ODMVOICE_INDEX]) {
+ unsigned int i;
+
+ event->num_phrases = stdev->configs[ODMVOICE_INDEX]->num_phrases;
+ if (event->num_phrases > SOUND_TRIGGER_MAX_PHRASES)
+ event->num_phrases = SOUND_TRIGGER_MAX_PHRASES;
+ for (i=0; i < event->num_phrases; i++)
+ memcpy(&event->phrase_extras[i], &stdev->configs[ODMVOICE_INDEX]->phrases[i],
+ sizeof(struct sound_trigger_phrase_recognition_extra));
+ }
+
+ event->num_phrases = 1;
+ event->phrase_extras[0].confidence_level = 100;
+ event->phrase_extras[0].num_levels = 1;
+ event->phrase_extras[0].levels[0].level = 100;
+ event->phrase_extras[0].levels[0].user_id = 0;
+ // Signify that all the data is coming through streaming, not through the
+ // buffer.
+ event->common.capture_available = true;
+ event->common.trigger_in_data = false;
+
+ event->common.audio_config = AUDIO_CONFIG_INITIALIZER;
+ event->common.audio_config.sample_rate = 16000;
+ event->common.audio_config.channel_mask = AUDIO_CHANNEL_IN_MONO;
+ event->common.audio_config.format = AUDIO_FORMAT_PCM_16_BIT;
+#endif
+ return &event->common;
+}
+
+static int stdev_get_properties(
+ const struct sound_trigger_hw_device *dev,
+ struct sound_trigger_properties *properties)
+{
+ struct sound_trigger_device *stdev = (struct sound_trigger_device *)dev;
+
+ ALOGI("%s", __func__);
+ if (stdev == NULL || properties == NULL) {
+ return -EINVAL;
+ }
+ memcpy(properties, &hw_properties, sizeof(struct sound_trigger_properties));
+ return 0;
+}
+
+//Parses Extra config structure data, for ODMVoice specification information
+static void ParseExtraConfigData(
+ struct sound_trigger_device *stdev,
+ const char *keyValuePairs)
+{
+ ALOGV("%s : %s", __func__, keyValuePairs);
+ struct str_parms *parms = str_parms_create_str(keyValuePairs);
+ int value;
+ int ret;
+
+ if (!parms) {
+ ALOGE("%s: str_params NULL", __func__);
+ return;
+ }
+
+ /* Note: if any Extra config data is defined by ODM,
+ * parsing strings should updated as ODM specific requirements
+ * Information that can be set in extra data,
+ * 1. Backlog_size: How many ms of previous data to be captured from the trigger word
+ * 2. Voice trigger mode: ODM specific, other default trigger mode will be used
+ */
+
+ // get backlog_size
+ ret = str_parms_get_int(parms, "backlog_size", &value);
+ if (ret >= 0) {
+ ALOGV("backlog_size = (%d)", value);
+ stdev->backlog_size = value;
+ str_parms_del(parms, "backlog_size");
+ }
+
+ /* ODM specific voice trigger mode if any
+ * currently 3 modes as reserved1, reserved2 & default one ODM Voice trigger mode
+ */
+ ret = str_parms_get_int(parms, "voice_trigger_mode", &value);
+ if (ret >= 0) {
+ ALOGV("voice_trigger_mode = (%d)", value);
+ stdev->odmvoicemodel_mode = value;
+ str_parms_del(parms, "voice_trigger_mode");
+ }
+ str_parms_destroy(parms);
+}
+
+// If enable_mic = 0, then the VTS MIC controls disabled.
+// Must be called with the stdev->lock held.
+static void stdev_vts_set_mic(
+ struct sound_trigger_device *stdev,
+ int enable_mic)
+{
+ char **active_mic_ctrls = NULL;
+ int *ctrl_values = NULL;
+
+ if (enable_mic != 0) {
+ /* Check whether MIC controls are configured or not, if not configure first */
+ /* FXIME: add condition to check whether MAIN or HEADSET MIC should be configured */
+ if (!stdev->is_mic_configured) {
+ active_mic_ctrls = main_mic_ctlname;
+ ctrl_values = main_mic_ctlvalue;
+
+ if (stdev->active_mic == VTS_HEADSET_MIC) {
+ active_mic_ctrls = headset_mic_ctlname;
+ ctrl_values = headset_mic_ctlvalue;
+ }
+
+ if (set_mixer_ctrls(stdev, active_mic_ctrls, ctrl_values, MAIN_MIC_CONTROL_COUNT, false)) {
+ ALOGW("%s: Enabling MIC control configuration Failed", __func__);
+ }
+ stdev->is_mic_configured = 1;
+ ALOGD("%s: Enable MIC Controls ", __func__);
+ }
+ } else {
+ active_mic_ctrls = main_mic_ctlname;
+
+ if (stdev->active_mic == VTS_HEADSET_MIC)
+ active_mic_ctrls = headset_mic_ctlname;
+
+ /* Reset MIC controls for disabling VTS */
+ if (stdev->is_mic_configured) {
+ if (set_mixer_ctrls(stdev, active_mic_ctrls, NULL, MAIN_MIC_CONTROL_COUNT, true)) {
+ ALOGW("%s: Enabling MIC control configuration Failed", __func__);
+ }
+ stdev->is_mic_configured = 0;
+ ALOGD("%s: Disable MIC Controls ", __func__);
+ }
+ }
+
+ return;
+}
+
+// If enabled_algorithms = 0, then the VTS will be turned off. Otherwise, treated as a bit mask for
+// which algorithms should be enabled on the VTS. Must be called with the stdev->lock held.
+static void stdev_vts_set_power(
+ struct sound_trigger_device *stdev,
+ int enabled_algorithms)
+{
+ ALOGV("%s enabled: %d", __func__, enabled_algorithms);
+
+ stdev->is_streaming = 0;
+
+ if (stdev->streaming_pcm) {
+ ALOGW("%s: Streaming PCM node is not closed", __func__);
+ pcm_close(stdev->streaming_pcm);
+ stdev->streaming_pcm = NULL;
+ }
+ stdev->is_seamless_recording = false;
+
+ if (enabled_algorithms != 0) {
+ /* Configure MIC controls first */
+ stdev_vts_set_mic(stdev, true);
+
+ /* Start recognition of bit masked algorithms */
+ if (enabled_algorithms & (0x1 << HOTWORD_INDEX)) {
+ ALOGV("%s: Google Model recognization start", __func__);
+ if (set_mixer_ctrls(stdev, model_recognize_start_ctlname,
+ hotword_recognize_start_ctlvalue, MODEL_CONTROL_COUNT, false)) {
+ ALOGE("%s: Google Model recognization start Failed", __func__);
+ goto exit;
+ }
+ stdev->recognize_started |= (0x1 << HOTWORD_INDEX);
+
+ if(stdev->notify_sthal_status)
+ stdev->notify_sthal_status(MODEL_RECOGNIZE_STARTED);
+ ALOGD("%s: Google Model recognization started & Notified to AudioHAL", __func__);
+ }
+
+ if (enabled_algorithms & (0x1 << ODMVOICE_INDEX)) {
+ int *ctrl_values = NULL;
+
+ if (stdev->odmvoicemodel_mode == ODMVOICE_RESERVED1_MODE)
+ ctrl_values = odmvoice_reserved1recognize_start_ctlvalue;
+ else if (stdev->odmvoicemodel_mode == ODMVOICE_RESERVED2_MODE)
+ ctrl_values = odmvoice_reserved2recognize_start_ctlvalue;
+ else if (stdev->odmvoicemodel_mode == ODMVOICE_TRIGGER_MODE)
+ ctrl_values = odmvoice_triggerrecognize_start_ctlvalue;
+ else {
+ ALOGE("%s: Unknown ODMVoice recognition mode to start, set default ODMVoice Trigger mode", __func__);
+ ctrl_values = odmvoice_triggerrecognize_start_ctlvalue;
+ }
+
+ ALOGV("%s: ODMVoice Model [%s] recognization start", __func__,
+ ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED2_MODE) ?
+ "ODM Reserved2 mode" : ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED1_MODE) ?
+ "ODM Reserved1 mode" : "ODMVoice trigger mode")));
+ if (set_mixer_ctrls(stdev, model_recognize_start_ctlname,
+ ctrl_values, MODEL_CONTROL_COUNT, false)) {
+ ALOGE("%s: ODMVoice Model recognization start Failed", __func__);
+ goto exit;
+ }
+
+ /* handle backlog control size */
+ if ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED1_MODE ||
+ stdev->odmvoicemodel_mode == ODMVOICE_TRIGGER_MODE) &&
+ stdev->backlog_size) {
+ if (set_mixer_ctrls(stdev, model_backlog_size_ctlname,
+ &stdev->backlog_size, MODEL_BACKLOG_CONTROL_COUNT, false)) {
+ ALOGE("%s: ODMVoice Model backlog size configuration Failed", __func__);
+ goto exit;
+ }
+ ALOGD("%s: ODMVoice Model Backlog size [%d] configured", __func__, stdev->backlog_size);
+ }
+ stdev->recognize_started |= (0x1 << ODMVOICE_INDEX);
+ ALOGD("%s: ODMVoice Model [%s] recognization started", __func__,
+ ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED2_MODE) ?
+ "ODM Reserved2 mode" : ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED1_MODE) ?
+ "ODM Reserved1 mode" : "ODMVoice trigger mode")));
+ }
+ } else {
+ /* Stop recognition of previous started models */
+ if (stdev->recognize_started & (0x1 << HOTWORD_INDEX)) {
+ if (set_mixer_ctrls(stdev, model_recognize_stop_ctlname,
+ hotword_recognize_stop_ctlvalue, MODEL_CONTROL_COUNT, false)) {
+ ALOGE("%s: Google Model recognization stop Failed", __func__);
+ goto exit;
+ }
+ stdev->recognize_started &= ~(0x1 << HOTWORD_INDEX);
+
+ if(stdev->notify_sthal_status)
+ stdev->notify_sthal_status(MODEL_RECOGNIZE_STOPPED);
+ ALOGD("%s: Google Model recognization stopped & Notified to AudioHAL", __func__);
+ }
+
+ if (stdev->recognize_started & (0x1 << ODMVOICE_INDEX)) {
+ int *ctrl_values = NULL;
+
+ if (stdev->odmvoicemodel_mode == ODMVOICE_RESERVED1_MODE)
+ ctrl_values = odmvoice_reserved1recognize_stop_ctlvalue;
+ else if (stdev->odmvoicemodel_mode == ODMVOICE_RESERVED2_MODE)
+ ctrl_values = odmvoice_reserved2recognize_stop_ctlvalue;
+ else if (stdev->odmvoicemodel_mode == ODMVOICE_TRIGGER_MODE)
+ ctrl_values = odmvoice_triggerrecognize_stop_ctlvalue;
+ else {
+ ALOGE("%s: Unknown ODMVoice recognition mode to stop, use default ODMVoice Trigger mode", __func__);
+ ctrl_values = odmvoice_triggerrecognize_stop_ctlvalue;
+ }
+
+ if (set_mixer_ctrls(stdev, model_recognize_stop_ctlname,
+ ctrl_values, MODEL_CONTROL_COUNT, false)) {
+ ALOGE("%s: ODMVoice Model recognization stop Failed", __func__);
+ goto exit;
+ }
+ stdev->recognize_started &= ~(0x1 << ODMVOICE_INDEX);
+ ALOGD("%s: ODMVoice Model [%s] recognization stopped", __func__,
+ ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED2_MODE) ?
+ "ODM Reserved2 mode" : ((stdev->odmvoicemodel_mode == ODMVOICE_RESERVED1_MODE) ?
+ "ODM Reserved1 mode" : "ODMVoice trigger mode")));
+ }
+
+ if (!stdev->recognize_started && !stdev->is_recording) {
+ /* Reset MIC controls for disabling VTS */
+ stdev_vts_set_mic(stdev, false);
+ }
+ }
+
+exit:
+ return;
+}
+
+static int stdev_init_mixer(struct sound_trigger_device *stdev)
+{
+ int ret = -1;
+
+ ALOGV("%s", __func__);
+
+ stdev->mixer = mixer_open(VTS_MIXER_CARD);
+ if (!stdev->mixer) {
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (stdev->mixer) {
+ mixer_close(stdev->mixer);
+ }
+ return ret;
+}
+
+static void stdev_close_mixer(struct sound_trigger_device *stdev)
+{
+ ALOGV("%s", __func__);
+
+ stdev_vts_set_power(stdev, 0);
+ stdev_join_callback_thread(stdev, false);
+ mixer_close(stdev->mixer);
+}
+
+// Starts the callback thread if not already running. Returns 0 on success, or a negative error code
+// otherwise. Must be called with the stdev->lock held.
+static int stdev_start_callback_thread(struct sound_trigger_device *stdev)
+{
+ ALOGV("%s", __func__);
+
+ if (stdev->callback_thread_active) {
+ ALOGV("%s callback thread is already running", __func__);
+ return 0;
+ }
+ int ret = 0;
+
+ // If we existing sockets to communicate with the thread, close them and make new ones.
+ stdev_close_callback_thread_sockets(stdev);
+
+ int thread_sockets[2];
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, thread_sockets) == -1) {
+ ALOGE("%s: Failed to create socket pair", __func__);
+ ret = errno;
+ goto err;
+ }
+ stdev->send_socket = thread_sockets[0];
+ stdev->term_socket = thread_sockets[1];
+
+ stdev->uevent_socket = uevent_open_socket(64*1024, true);
+ if (stdev->uevent_socket == -1) {
+ ALOGE("%s: Failed to open uevent socket", __func__);
+ ret = errno;
+ goto err;
+ }
+
+ stdev->callback_thread_active = true;
+ ret = pthread_create(&stdev->callback_thread, (const pthread_attr_t *) NULL,
+ callback_thread_loop, stdev);
+ if (ret) {
+ goto err;
+ }
+ return 0;
+
+err:
+ stdev->callback_thread_active = false;
+ stdev_close_callback_thread_sockets(stdev);
+ return -ret;
+}
+
+// Helper function to close (and mark as closed) all sockets used by the callback thread. Must be
+// called with the stdev->lock held.
+static void stdev_close_callback_thread_sockets(struct sound_trigger_device *stdev)
+{
+ ALOGV("%s", __func__);
+
+ if (stdev->send_socket >=0) {
+ close(stdev->send_socket);
+ stdev->send_socket = -1;
+ }
+ if (stdev->term_socket >=0) {
+ close(stdev->term_socket);
+ stdev->term_socket = -1;
+ }
+ if (stdev->uevent_socket >= 0) {
+ close(stdev->uevent_socket);
+ stdev->uevent_socket = -1;
+ }
+}
+
+// If the callback thread is active, stops it and joins the thread. Also closes all resources needed
+// to talk to the thread. If keep_vts_powered is false, then the VTS will be shut down after the
+// thread joins. Must be called with the stdev->lock held.
+static void stdev_join_callback_thread(
+ struct sound_trigger_device *stdev,
+ bool keep_vts_powered)
+{
+ ALOGV("%s", __func__);
+
+ if (stdev->callback_thread_active) {
+ // If the thread is active, send the termination signal and join up with it. Also, turn off
+ // the VTS, since we're no longer listening for events. callback_thread_active will be set
+ // to false when the thread joins.
+ write(stdev->send_socket, "T", 1);
+ pthread_mutex_unlock(&stdev->lock);
+
+ pthread_join(stdev->callback_thread, (void **)NULL);
+
+ pthread_mutex_lock(&stdev->lock);
+ }
+ if (!keep_vts_powered) {
+ stdev_vts_set_power(stdev, 0);
+ }
+ stdev_close_callback_thread_sockets(stdev);
+}
+
+// Loads a model file into the kernel. Must be called with the stdev->lock held.
+static int vts_load_sound_model(
+ struct sound_trigger_device *stdev __unused,
+ char *buffer,
+ size_t buffer_len,
+ int model_index)
+{
+ int ret = 0;
+
+ ALOGV("%s model_index: %d", __func__, model_index);
+
+ if (model_index == HOTWORD_INDEX) {
+ /* load Hotword model binary */
+#ifdef MMAP_INTERFACE_ENABLED
+ ret = load_modelbinary(stdev, model_index, buffer, buffer_len);
+#else
+ ret = sysfs_write(VTS_HOTWORD_MODEL, buffer, buffer_len);
+#endif
+ } else if (model_index == ODMVOICE_INDEX) {
+ /* load ODMVoice model binary */
+#ifdef MMAP_INTERFACE_ENABLED
+ ret = load_modelbinary(stdev, model_index, buffer, buffer_len);
+#else
+ ret = sysfs_write(VTS_SVOICE_MODEL, buffer, buffer_len);
+#endif
+ } else {
+ ALOGE("Can't determine model write ioctl %d", model_index);
+ return -ENOSYS;
+ }
+
+ if (ret) {
+ ALOGE("Error in VTS sysfs write : %d", ret);
+ }
+
+ return ret;
+}
+
+// Returns a bitmask where each bit is set if there is a recognition callback function set for that
+// index. Must be called with the stdev->lock held.
+static inline int stdev_active_callback_bitmask(struct sound_trigger_device* stdev)
+{
+ int bitmask = 0;
+ int i;
+
+ ALOGV("%s", __func__);
+
+ for (i = 0; i < MAX_SOUND_MODELS; ++i) {
+ if (stdev->recognition_callbacks[i] != NULL) {
+ bitmask |= (1 << i);
+ }
+ }
+ return bitmask;
+}
+
+static void *callback_thread_loop(void *context)
+{
+ char msg[UEVENT_MSG_LEN];
+ struct sound_trigger_device *stdev =
+ (struct sound_trigger_device *)context;
+ struct pollfd fds[2];
+ int err = 0;
+ int i, n;
+
+ ALOGI("%s", __func__);
+ prctl(PR_SET_NAME, (unsigned long)"sound trigger callback", 0, 0, 0);
+
+ pthread_mutex_lock(&stdev->lock);
+
+ fds[0].events = POLLIN;
+ fds[0].fd = stdev->uevent_socket;
+ fds[1].events = POLLIN;
+ fds[1].fd = stdev->term_socket;
+
+ stdev->recog_cbstate = RECOG_CB_NONE;
+ pthread_mutex_unlock(&stdev->lock);
+
+ while (1) {
+ err = poll(fds, 2, -1);
+ pthread_mutex_lock(&stdev->lock);
+ stdev->recog_cbstate = RECOG_CB_STARTED;
+ if (err < 0) {
+ ALOGE_IF(err < 0, "Error in poll: %d", err);
+ break;
+ }
+
+ if (fds[0].revents & POLLIN) {
+ n = uevent_kernel_multicast_recv(fds[0].fd, msg, UEVENT_MSG_LEN);
+ if (n <= 0) {
+ stdev->recog_cbstate = RECOG_CB_NONE;
+ pthread_mutex_unlock(&stdev->lock);
+ continue;
+ }
+ for (i = 0; i < n;) {
+ if (strstr(msg + i, "VOICE_WAKEUP_WORD_ID=1") ||
+ strstr(msg + i, "VOICE_WAKEUP_WORD_ID=2") ||
+ strstr(msg + i, "VOICE_WAKEUP_WORD_ID=3")) {
+ struct sound_trigger_recognition_event *event = NULL;
+ int trigger_index = 0;
+
+ if (strstr(msg + i, "VOICE_WAKEUP_WORD_ID=1")||
+ strstr(msg + i, "VOICE_WAKEUP_WORD_ID=3")) {
+ event = sound_trigger_odmvoice_event_alloc(stdev);
+ trigger_index = ODMVOICE_INDEX;
+ ALOGI("%s ODMVOICE Event Triggerred %d", __func__, trigger_index);
+ } else {
+ event = sound_trigger_hotword_event_alloc(stdev);
+ trigger_index = HOTWORD_INDEX;
+ ALOGI("%s HOTWORD Event Triggered --%d", __func__, trigger_index);
+ }
+
+ if (event) {
+ if (stdev->model_execstate[trigger_index] == MODEL_STATE_RUNNING &&
+ stdev->recognition_callbacks[trigger_index] != NULL) {
+ ALOGI("%s send callback model %d", __func__, trigger_index);
+ stdev->recog_cbstate = RECOG_CB_CALLED;
+ stdev->recognition_callbacks[trigger_index](
+ event, stdev->recognition_cookies[trigger_index]);
+ stdev->recognition_callbacks[trigger_index] = NULL;
+ } else {
+ ALOGE("%s no callback for model %d", __func__, trigger_index);
+ }
+ free(event);
+ // Start reading data from the VTS while the upper levels do their thing.
+ if (stdev->model_execstate[trigger_index] == MODEL_STATE_RUNNING &&
+ stdev->configs[trigger_index] &&
+ stdev->configs[trigger_index]->capture_requested) {
+ ALOGI("%s Streaming Enabled for %s", __func__, (trigger_index ?
+ "ODMVOICE" : "HOTWORD"));
+ stdev->is_streaming = (0x1 << trigger_index);
+ goto exit;
+ } else {
+ /* Error handling if models stop-recognition failed */
+ if ((stdev->model_stopfailedhandles[HOTWORD_INDEX] != HANDLE_NONE) ||
+ (stdev->model_stopfailedhandles[ODMVOICE_INDEX] != HANDLE_NONE) ||
+ stdev->model_execstate[trigger_index] == MODEL_STATE_STOPABORT) {
+ handle_stop_recognition_l(stdev);
+ stdev->model_execstate[trigger_index] = MODEL_STATE_NONE;
+ ALOGI("%s stop-recognition error state handled", __func__);
+ }
+
+ // If we're not supposed to capture data, power cycle the VTS and start
+ // whatever algorithms are still active.
+ int active_bitmask = stdev_active_callback_bitmask(stdev);
+ if (active_bitmask) {
+ stdev_vts_set_power(stdev, 0);
+ stdev_vts_set_power(stdev, active_bitmask);
+ } else {
+ goto exit;
+ }
+ }
+ } else {
+ ALOGE("%s: invalid trigger or out of memory", __func__);
+ goto exit;
+ }
+ }
+ i += strlen(msg + i) + 1;
+ }
+ } else if (fds[1].revents & POLLIN) {
+ read(fds[1].fd, &n, sizeof(n)); /* clear the socket */
+ ALOGI("%s: Termination message", __func__);
+ break;
+ } else {
+ ALOGI("%s: Message to ignore", __func__);
+ }
+ stdev->recog_cbstate = RECOG_CB_NONE;
+ pthread_mutex_unlock(&stdev->lock);
+ }
+
+exit:
+ if (!stdev->is_streaming) {
+ stdev_vts_set_power(stdev, 0);
+ }
+
+ stdev->callback_thread_active = false;
+
+ stdev->recog_cbstate = RECOG_CB_NONE;
+ pthread_mutex_unlock(&stdev->lock);
+
+ return (void *)(long)err;
+}
+
+static int stdev_load_sound_model(
+ const struct sound_trigger_hw_device *dev,
+ struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle)
+{
+ struct sound_trigger_device *stdev = (struct sound_trigger_device *)dev;
+ int ret = 0;
+ int model_index = -1;
+
+ ALOGI("%s", __func__);
+ pthread_mutex_lock(&stdev->lock);
+ if (handle == NULL || sound_model == NULL) {
+ ALOGE("%s: handle or sound_model pointer NULL error", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (sound_model->data_size == 0 ||
+ sound_model->data_offset < sizeof(struct sound_trigger_sound_model)) {
+ ALOGE("%s: Model data size [%d] or data offset [%d] expected offset [%zu] invalid",
+ __func__, sound_model->data_size, sound_model->data_offset,
+ sizeof(struct sound_trigger_sound_model));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* TODO: Figure out what the model type is by looking at the UUID? */
+ if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+ if (!memcmp(&sound_model->vendor_uuid, &odmword_uuid, sizeof(sound_trigger_uuid_t))) {
+ model_index = ODMVOICE_INDEX;
+ ALOGV("%s ODMVOICE_INDEX Sound Model", __func__);
+ } else if (!memcmp(&sound_model->vendor_uuid, &hotword_uuid, sizeof(sound_trigger_uuid_t))) {
+ model_index = HOTWORD_INDEX;
+ ALOGV("%s HOTWORD_INDEX Sound Model", __func__);
+ } else {
+ ALOGE("%s Invalid UUID: {0x%x, 0x%x, 0x%x, 0x%x \n {0x%x 0x%x 0x%x 0x%x 0x%x 0x%x}}", __func__,
+ sound_model->vendor_uuid.timeLow, sound_model->vendor_uuid.timeMid,
+ sound_model->vendor_uuid.timeHiAndVersion, sound_model->vendor_uuid.clockSeq,
+ sound_model->vendor_uuid.node[0], sound_model->vendor_uuid.node[1],
+ sound_model->vendor_uuid.node[2], sound_model->vendor_uuid.node[3],
+ sound_model->vendor_uuid.node[4], sound_model->vendor_uuid.node[5]);
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else if (sound_model->type == SOUND_MODEL_TYPE_GENERIC) {
+ if (!memcmp(&sound_model->vendor_uuid, &odmword_uuid, sizeof(sound_trigger_uuid_t))) {
+ model_index = ODMVOICE_INDEX;
+ ALOGV("%s ODMVOICE_INDEX Sound Model", __func__);
+ } else {
+ ALOGE("%s Generic Invalid UUID: {0x%x, 0x%x, 0x%x, 0x%x \n {0x%x 0x%x 0x%x 0x%x 0x%x 0x%x}}",
+ __func__, sound_model->vendor_uuid.timeLow, sound_model->vendor_uuid.timeMid,
+ sound_model->vendor_uuid.timeHiAndVersion, sound_model->vendor_uuid.clockSeq,
+ sound_model->vendor_uuid.node[0], sound_model->vendor_uuid.node[1],
+ sound_model->vendor_uuid.node[2], sound_model->vendor_uuid.node[3],
+ sound_model->vendor_uuid.node[4], sound_model->vendor_uuid.node[5]);
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ALOGE("%s: Could not determine model type", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (model_index < 0 || stdev->model_handles[model_index] != -1) {
+ ALOGE("%s: unknown Model type or already running", __func__);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ ret = vts_load_sound_model(stdev, ((char *)sound_model) + sound_model->data_offset,
+ sound_model->data_size, model_index);
+ if (ret) {
+ goto exit;
+ }
+
+ stdev->model_handles[model_index] = model_index;
+ stdev->sound_model_callbacks[model_index] = callback;
+ stdev->sound_model_cookies[model_index] = cookie;
+ *handle = stdev->model_handles[model_index];
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+static int stdev_unload_sound_model(
+ const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle)
+{
+ struct sound_trigger_device *stdev = (struct sound_trigger_device *)dev;
+ int ret = 0;
+
+ ALOGI("%s handle: %d", __func__, handle);
+ pthread_mutex_lock(&stdev->lock);
+ if (handle < 0 || handle >= MAX_SOUND_MODELS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (stdev->model_handles[handle] != handle) {
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ // If we still have a recognition callback, that means we should cancel the
+ // recognition first.
+ if (stdev->recognition_callbacks[handle] != NULL ||
+ (stdev->is_streaming & (0x1 << handle))) {
+ ret = stdev_stop_recognition_l(stdev, handle);
+ }
+ stdev->model_handles[handle] = -1;
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+static int stdev_start_recognition(
+ const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie)
+{
+ struct sound_trigger_device *stdev = (struct sound_trigger_device *)dev;
+ int ret = 0;
+
+ ALOGI("%s Handle %d", __func__, handle);
+ pthread_mutex_lock(&stdev->lock);
+ if (handle < 0 || handle >= MAX_SOUND_MODELS || stdev->model_handles[handle] != handle) {
+ ALOGE("%s: Handle doesn't match", __func__);
+ ret = -ENOSYS;
+ goto exit;
+ }
+ if (stdev->recognition_callbacks[handle] != NULL) {
+ ALOGW("%s:model recognition is already started Checking for error state", __func__);
+ /* Error handling if models stop-recognition failed */
+ if ((stdev->model_stopfailedhandles[HOTWORD_INDEX] != HANDLE_NONE) ||
+ (stdev->model_stopfailedhandles[ODMVOICE_INDEX] != HANDLE_NONE)) {
+ handle_stop_recognition_l(stdev);
+ ALOGI("%s stop-recognition error state handled", __func__);
+ }
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ // Copy the config for this handle.
+ if (config) {
+ if (stdev->configs[handle]) {
+ free(stdev->configs[handle]);
+ }
+ stdev->configs[handle] = malloc(sizeof(*config));
+ if (!stdev->configs[handle]) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memcpy(stdev->configs[handle], config, sizeof(*config));
+
+ /* Check whether config has extra inforamtion */
+ if (config->data_size > 0) {
+ char *params = (char*)config + sizeof(*config);
+ // reset & update user data
+ stdev->backlog_size = 0;
+ stdev->odmvoicemodel_mode = ODMVOICE_UNKNOWN_MODE;
+ if (params)
+ ParseExtraConfigData(stdev, params);
+ }
+ }
+
+ ret = stdev_start_callback_thread(stdev);
+ if (ret) {
+ goto exit;
+ }
+
+ stdev->recognition_callbacks[handle] = callback;
+ stdev->recognition_cookies[handle] = cookie;
+
+ // Reconfigure the VTS to run any algorithm that have a callback.
+ if (!stdev->is_streaming ||
+ (stdev->is_streaming & (0x1 << handle))) {
+ ALOGI("Starting VTS Recognition\n");
+ stdev_vts_set_power(stdev, 0);
+ stdev_vts_set_power(stdev, stdev_active_callback_bitmask(stdev));
+ }
+
+ stdev->model_stopfailedhandles[handle] = HANDLE_NONE;
+ stdev->model_execstate[handle] = MODEL_STATE_RUNNING;
+ ALOGI("%s Handle Exit %d", __func__, handle);
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+static int stdev_stop_recognition(
+ const struct sound_trigger_hw_device *dev,
+ sound_model_handle_t handle)
+{
+ struct sound_trigger_device *stdev = (struct sound_trigger_device *)dev;
+ int ret = 0;
+
+ ALOGI("%s Handle %d", __func__, handle);
+
+ /* Error handling to avoid ST HWservice Framework deadlock situation */
+ if (pthread_mutex_trylock(&stdev->lock)) {
+ int retry_count = 0;
+ do {
+ retry_count++;
+ if (stdev->recog_cbstate == RECOG_CB_CALLED) {
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ return 0;
+ }
+
+ if (handle < 0 || handle >= MAX_SOUND_MODELS || stdev->model_handles[handle] != handle) {
+ ALOGE("%s: Recognition Even CallBack function called - Handle doesn't match", __func__);
+ return -ENOSYS;
+ }
+ ALOGI("%s Handle %d Recognition Even CallBack function called",
+ __func__, handle);
+ stdev->model_stopfailedhandles[handle] = handle;
+ stdev->model_execstate[handle] = MODEL_STATE_STOPABORT;
+ return -EBUSY;
+ }
+
+ if (!(retry_count % 25))
+ ALOGI("%s Handle %d Trylock retry count %d!!", __func__, handle, retry_count);
+
+ if (retry_count > 100) {
+ ALOGE("%s Handle %d Unable to Acquire Lock", __func__, handle);
+ stdev->model_stopfailedhandles[handle] = handle;
+ stdev->model_execstate[handle] = MODEL_STATE_STOPABORT;
+ return -ENOSYS;
+ }
+ usleep(1000); // wait for 1msec before retrying
+ } while (pthread_mutex_trylock(&stdev->lock));
+ } else
+ ALOGV("%s Handle %d Trylock acquired successfully", __func__, handle);
+
+ ret = stdev_stop_recognition_l(stdev, handle);
+
+ pthread_mutex_unlock(&stdev->lock);
+ ALOGI("%s Handle Exit %d", __func__, handle);
+ return ret;
+}
+
+static int stdev_stop_recognition_l(
+ struct sound_trigger_device *stdev,
+ sound_model_handle_t handle)
+{
+ ALOGV("%s", __func__);
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ return 0;
+ }
+
+ if (handle < 0 || handle >= MAX_SOUND_MODELS || stdev->model_handles[handle] != handle) {
+ ALOGE("%s: Handle doesn't match", __func__);
+ return -ENOSYS;
+ }
+ if (stdev->recognition_callbacks[handle] == NULL &&
+ !(stdev->is_streaming & (0x1 << handle))) {
+ ALOGE("%s:model recognition is already stopped", __func__);
+ return -ENOSYS;
+ }
+ free(stdev->configs[handle]);
+ stdev->configs[handle] = NULL;
+ stdev->recognition_callbacks[handle] = NULL;
+
+ // If we're streaming, then we shouldn't touch the VTS's current state.
+ if (!stdev->is_streaming ||
+ (stdev->is_streaming & (0x1 << handle))) {
+ // Only stop when it's the last one, otherwise, turn off the VTS and reconfigure it for the
+ // new list of algorithms with callbacks.
+ int active_bitmask = stdev_active_callback_bitmask(stdev);
+ if (active_bitmask == 0) {
+ stdev_join_callback_thread(stdev, false);
+ } else {
+ // Callback thread should already be running, but make sure.
+ // stdev_start_callback_thread(stdev);
+ stdev_vts_set_power(stdev, 0);
+ stdev_vts_set_power(stdev, active_bitmask);
+ }
+ }
+
+ stdev->model_stopfailedhandles[handle] = HANDLE_NONE;
+ stdev->model_execstate[handle] = MODEL_STATE_NONE;
+ return 0;
+}
+
+/* calling function should acquire stdev lock */
+static void handle_stop_recognition_l(struct sound_trigger_device *stdev)
+{
+ int i;
+
+ ALOGV("%s", __func__);
+
+ for (i = 0; i < MAX_SOUND_MODELS; ++i) {
+ if (stdev->model_execstate[i] == MODEL_STATE_STOPABORT &&
+ stdev->model_stopfailedhandles[i] == i) {
+ if (stdev->recognition_callbacks[i] == NULL &&
+ !(stdev->is_streaming & (0x1 << i))) {
+ ALOGI("%s:model recognition is already stopped", __func__);
+ } else {
+ if (stdev->configs[i])
+ free(stdev->configs[i]);
+ stdev->configs[i] = NULL;
+ stdev->recognition_callbacks[i] = NULL;
+ ALOGI("%s:model recognition callback released", __func__);
+ }
+
+ // If we're streaming, then we shouldn't touch the VTS's current state.
+ if (!stdev->is_streaming ||
+ (stdev->is_streaming & (0x1 << i))) {
+ // force stop recognition
+ stdev_vts_set_power(stdev, 0);
+ ALOGI("%s:model recognition force stopped", __func__);
+ // reconfigure if other model is running.
+ int active_bitmask = stdev_active_callback_bitmask(stdev);
+ if (active_bitmask) {
+ stdev_vts_set_power(stdev, active_bitmask);
+ }
+ }
+ }
+
+ stdev->model_execstate[i] = MODEL_STATE_NONE;
+ stdev->model_stopfailedhandles[i] = HANDLE_NONE;
+ }
+
+ return;
+}
+
+__attribute__ ((visibility ("default")))
+int sound_trigger_open_for_streaming()
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+ int ret = 0;
+ char fn[256];
+
+ ALOGV("%s", __func__);
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (!stdev->is_streaming) {
+ ALOGE("%s: VTS is not streaming currently", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (stdev->is_seamless_recording) {
+ ALOGE("%s: VTS is already seamless recording currently", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ snprintf(fn, sizeof(fn), "/dev/snd/pcmC%uD%u%c", VTS_SOUND_CARD, VTS_TRICAP_DEVICE_NODE, 'c');
+ ALOGI("%s: Opening PCM Device %s", __func__, fn);
+
+ /* open vts streaming PCM node */
+ stdev->streaming_pcm = pcm_open(VTS_SOUND_CARD, VTS_TRICAP_DEVICE_NODE, PCM_IN, &pcm_config_vt_capture);
+ if (stdev->streaming_pcm && !pcm_is_ready(stdev->streaming_pcm)) {
+ ALOGE("%s: failed to open streaming PCM (%s)", __func__, pcm_get_error(stdev->streaming_pcm));
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ stdev->is_seamless_recording = true;
+ ret = 1;
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+__attribute__ ((visibility ("default")))
+size_t sound_trigger_read_samples(
+ int audio_handle,
+ void *buffer,
+ size_t buffer_len)
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+// int i;
+ size_t ret = 0;
+
+ //ALOGV("%s", __func__);
+
+ if (audio_handle <= 0) {
+ ALOGE("%s: invalid audio handle", __func__);
+ return -EINVAL;
+ }
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (!stdev->is_streaming) {
+ ALOGE("%s: VTS is not streaming currently", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if(stdev->streaming_pcm)
+ ret = pcm_read(stdev->streaming_pcm, buffer, buffer_len);
+ if (ret == 0) {
+ ALOGVV("%s: --Sent %zu bytes to buffer", __func__, buffer_len);
+ } else {
+ ALOGE("%s: Read Fail = %s", __func__, pcm_get_error(stdev->streaming_pcm));
+ }
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+__attribute__ ((visibility ("default")))
+int sound_trigger_close_for_streaming(int audio_handle __unused)
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+// int i;
+ size_t ret = 0;
+
+ ALOGV("%s", __func__);
+
+ if (audio_handle <= 0) {
+ ALOGE("%s: invalid audio handle", __func__);
+ return -EINVAL;
+ }
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (!stdev->is_seamless_recording) {
+ ALOGE("%s: VTS Seamless Recording PCM Node is not opened", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!stdev->is_streaming) {
+ ALOGE("%s: VTS is not currently streaming", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* close streaming pcm node */
+ pcm_close(stdev->streaming_pcm);
+ stdev->streaming_pcm = NULL;
+
+ stdev->is_seamless_recording = false;
+ // Power off the VTS, but then re-enable any algorithms that have callbacks.
+ int active_bitmask = stdev_active_callback_bitmask(stdev);
+ stdev_vts_set_power(stdev, 0);
+ if (active_bitmask) {
+ stdev_start_callback_thread(stdev);
+ stdev_vts_set_power(stdev, active_bitmask);
+ }
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+/* VTS recording sthal interface */
+__attribute__ ((visibility ("default")))
+int sound_trigger_open_recording()
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+ int ret = 0;
+ char fn[256];
+
+ ALOGV("%s", __func__);
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (stdev->is_recording) {
+ ALOGW("%s: VTS is already recording currently", __func__);
+
+ /* workaround to forcefully close current execution */
+ /* close streaming pcm node */
+ if(stdev->recording_pcm) {
+ pcm_close(stdev->recording_pcm);
+ stdev->recording_pcm = NULL;
+ }
+
+ /* disable VTS MIC controls */
+ int active_bitmask = stdev_active_callback_bitmask(stdev);
+ if (!active_bitmask && !stdev->is_streaming) {
+ stdev_vts_set_mic(stdev, false);
+ }
+ stdev->is_recording = false;
+ ALOGI("%s: Forcefully closed current recording", __func__);
+ }
+
+ /* Check & enable VTS MIC controls */
+ stdev_vts_set_mic(stdev, true);
+
+ snprintf(fn, sizeof(fn), "/dev/snd/pcmC%uD%u%c", VTS_SOUND_CARD, VTS_RECORD_DEVICE_NODE, 'c');
+ ALOGI("%s: Opening PCM Device %s", __func__, fn);
+
+ /* open vts streaming PCM node */
+ stdev->recording_pcm = pcm_open(VTS_SOUND_CARD, VTS_RECORD_DEVICE_NODE, PCM_IN, &pcm_config_vt_capture);
+ if (stdev->recording_pcm && !pcm_is_ready(stdev->recording_pcm)) {
+ ALOGE("%s: failed to open recording PCM (%s)", __func__, pcm_get_error(stdev->recording_pcm));
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ stdev->is_recording = true;
+ ret = 1;
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+__attribute__ ((visibility ("default")))
+size_t sound_trigger_read_recording_samples(
+ void *buffer,
+ size_t buffer_len)
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+// int i;
+ size_t ret = 0;
+
+ //ALOGV("%s", __func__);
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (!stdev->is_recording) {
+ ALOGE("%s: VTS Recording PCM Node is not opened", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if(stdev->recording_pcm)
+ ret = pcm_read(stdev->recording_pcm, buffer, buffer_len);
+ if (ret == 0) {
+ ALOGVV("%s: --Sent %zu bytes to buffer", __func__, buffer_len);
+ } else {
+ ALOGE("%s: Read Fail = %s", __func__, pcm_get_error(stdev->recording_pcm));
+ }
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+__attribute__ ((visibility ("default")))
+int sound_trigger_close_recording()
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+// int i;
+ size_t ret = 0;
+
+ ALOGV("%s", __func__);
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (stdev->voicecall_state == VOICECALL_STARTED) {
+ ALOGI("%s VoiceCall in progress", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Error handling if models stop-recognition failed */
+ if ((stdev->model_stopfailedhandles[HOTWORD_INDEX] != HANDLE_NONE) ||
+ (stdev->model_stopfailedhandles[ODMVOICE_INDEX] != HANDLE_NONE)) {
+ handle_stop_recognition_l(stdev);
+ ALOGI("%s stop-recognition error state handled", __func__);
+ }
+
+ if (!stdev->is_recording) {
+ ALOGE("%s: VTS Recording PCM Node is not opened", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* close streaming pcm node */
+ pcm_close(stdev->recording_pcm);
+ stdev->recording_pcm = NULL;
+
+ /* disable VTS MIC controls */
+ int active_bitmask = stdev_active_callback_bitmask(stdev);
+ if (!active_bitmask && !stdev->is_streaming) {
+ stdev_vts_set_mic(stdev, false);
+ }
+
+ stdev->is_recording = false;
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+__attribute__ ((visibility ("default")))
+int sound_trigger_headset_status(int is_connected)
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+ int active_bitmask = 0;
+ int ret = 0;
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* check whether vts mic is configured or not */
+ if (stdev->is_mic_configured) {
+ int tmp_streaming = stdev->is_streaming;
+ int tmp_recording = stdev->is_recording;
+ int tmp_seamlessrecording = stdev->is_seamless_recording;
+ //Check if recording is in progress
+ if (stdev->is_recording) {
+ ALOGI("%s: Close VTS Record PCM to reconfigure active Mic", __func__);
+ // Close record PCM before changing MIC
+ if (stdev->recording_pcm) {
+ pcm_close(stdev->recording_pcm);
+ stdev->recording_pcm = NULL;
+ }
+ stdev->is_recording = false;
+ }
+
+ //Check if seamless capture is in progress
+ if (stdev->is_streaming) {
+ ALOGI("%s: Close VTS Seamless PCM to reconfigure active Mic", __func__);
+ // Close seamless PCM before changing MIC
+ if (stdev->streaming_pcm) {
+ pcm_close(stdev->streaming_pcm);
+ stdev->streaming_pcm = NULL;
+ }
+ stdev->is_seamless_recording = false;
+ }
+
+ // Power off the VTS, but then re-enable any algorithms that have callbacks.
+ active_bitmask = stdev_active_callback_bitmask(stdev);
+ stdev_vts_set_power(stdev, 0);
+ /* update active mic only after disabling previous mic configuraiton */
+ if (is_connected)
+ stdev->active_mic = VTS_HEADSET_MIC;
+ else
+ stdev->active_mic = VTS_MAIN_MIC;
+
+ ALOGI("%s: Active MIC Changed to [%s] Active Models: 0x%x", __func__,
+ (is_connected ? "HEADSET MIC" : "MAIN MIC"), active_bitmask);
+
+ // Restore recording status
+ stdev->is_recording = tmp_recording;
+
+ if (active_bitmask || tmp_streaming) {
+ active_bitmask |= tmp_streaming;
+ ALOGI("%s: Re-started Models: 0x%x", __func__, active_bitmask);
+ stdev_vts_set_power(stdev, active_bitmask);
+ stdev->is_streaming = tmp_streaming;
+ stdev->is_seamless_recording = tmp_seamlessrecording;
+ }
+
+ //Check if recording enabled then start again
+ if (stdev->is_recording) {
+ ALOGI("%s: Re-route active Mic for recording", __func__);
+ /* Check & enable VTS MIC controls */
+ stdev_vts_set_mic(stdev, true);
+
+ /* open vts streaming PCM node */
+ if (!stdev->recording_pcm) {
+ stdev->recording_pcm = pcm_open(VTS_SOUND_CARD, VTS_RECORD_DEVICE_NODE, PCM_IN, &pcm_config_vt_capture);
+ if (stdev->recording_pcm && !pcm_is_ready(stdev->recording_pcm)) {
+ ALOGE("%s: failed to open recording PCM", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ ALOGI("%s: VTS Record reconfiguration & open PCM Completed", __func__);
+ }
+
+ //Check if seamless capture enable then start again
+ if (stdev->is_streaming) {
+ /* open vts streaming PCM node */
+ if (stdev->is_seamless_recording && !stdev->streaming_pcm) {
+ stdev->streaming_pcm = pcm_open(VTS_SOUND_CARD, VTS_TRICAP_DEVICE_NODE, PCM_IN, &pcm_config_vt_capture);
+ if (stdev->streaming_pcm && !pcm_is_ready(stdev->streaming_pcm)) {
+ ALOGE("%s: failed to open streaming PCM", __func__);
+ if (stdev->recording_pcm) {
+ pcm_close(stdev->recording_pcm);
+ stdev->recording_pcm = NULL;
+ }
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+ } else {
+ /* update the active mic information */
+ if (is_connected)
+ stdev->active_mic = VTS_HEADSET_MIC;
+ else
+ stdev->active_mic = VTS_MAIN_MIC;
+ }
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+__attribute__ ((visibility ("default")))
+int sound_trigger_voicecall_status(int callstate)
+{
+ struct sound_trigger_device *stdev = &g_stdev;
+ int active_bitmask = 0;
+ int ret = 0;
+
+ pthread_mutex_lock(&stdev->lock);
+
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: stdev has not been opened", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* check whether vts mic is configured or not */
+ if (callstate == VOICECALL_STARTED) {
+ if (stdev->is_mic_configured) {
+ //Check if recording is in progress
+ if (stdev->is_recording) {
+ ALOGI("%s: Close VTS Record PCM to reconfigure active Mic", __func__);
+ // Close record PCM before changing MIC
+ if (stdev->recording_pcm) {
+ pcm_close(stdev->recording_pcm);
+ stdev->recording_pcm = NULL;
+ }
+ stdev->is_recording = false;
+ }
+
+ //Check if seamless capture is in progress
+ if (stdev->is_streaming) {
+ ALOGI("%s: Close VTS Seamless PCM to reconfigure active Mic", __func__);
+ // Close seamless PCM before changing MIC
+ if (stdev->streaming_pcm) {
+ pcm_close(stdev->streaming_pcm);
+ stdev->streaming_pcm = NULL;
+ }
+ stdev->is_seamless_recording = false;
+ stdev->is_streaming = false;
+ }
+
+ // Power off the VTS, but then re-enable any algorithms that have callbacks.
+ active_bitmask = stdev_active_callback_bitmask(stdev);
+ stdev_vts_set_power(stdev, 0);
+ stdev->recognition_callbacks[HOTWORD_INDEX] = NULL;
+ stdev->recognition_callbacks[ODMVOICE_INDEX] = NULL;
+ }
+ /* update voicecall status */
+ stdev->voicecall_state = VOICECALL_STARTED;
+ ALOGI("%s: VoiceCall START notification received", __func__);
+ } else {
+ stdev->voicecall_state = VOICECALL_STOPPED;
+ ALOGI("%s: VoiceCall STOP notification received", __func__);
+ }
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+static int stdev_close(hw_device_t *device)
+{
+ struct sound_trigger_device *stdev = (struct sound_trigger_device *)device;
+ int ret = 0, i;
+
+ ALOGV("%s", __func__);
+
+ pthread_mutex_lock(&stdev->lock);
+ if (!stdev->sthal_opened) {
+ ALOGE("%s: device already closed", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ stdev_join_callback_thread(stdev, false);
+ stdev_close_mixer(stdev);
+#ifdef MMAP_INTERFACE_ENABLED
+ if (munmap(stdev->mapped_addr,VTSDRV_MISC_MODEL_BIN_MAXSZ) < 0) {
+ ALOGE("%s: munmap failed %s", __func__, strerror(errno));
+ }
+
+ close(stdev->vtsdev_fd);
+ stdev->vtsdev_fd = -1;
+#endif
+ memset(stdev->recognition_callbacks, 0, sizeof(stdev->recognition_callbacks));
+ memset(stdev->sound_model_callbacks, 0, sizeof(stdev->sound_model_callbacks));
+ for (i = 0; i < MAX_SOUND_MODELS; ++i) {
+ if (stdev->configs[i]) {
+ free(stdev->configs[i]);
+ stdev->configs[i] = NULL;
+ }
+ }
+
+ stdev->sthal_opened = false;
+
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+ return ret;
+}
+
+static int stdev_open(
+ const hw_module_t *module,
+ const char *name,
+ hw_device_t **device)
+{
+ struct sound_trigger_device *stdev;
+ int ret = -EINVAL;
+ int forcereset = 1;
+
+ ALOGV("%s", __func__);
+
+ if (strcmp(name, SOUND_TRIGGER_HARDWARE_INTERFACE) != 0)
+ return -EINVAL;
+
+ stdev = &g_stdev;
+ pthread_mutex_lock(&stdev->lock);
+
+ if (stdev->sthal_opened) {
+ ALOGE("%s: Only one sountrigger can be opened at a time", __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+#ifdef MMAP_INTERFACE_ENABLED
+ /* Open VTS Misc device for loading Model binary through MMAP interface */
+ stdev->vtsdev_fd = open("/dev/vts_fio_dev", O_RDWR);
+ if (stdev->vtsdev_fd < 0) {
+ ALOGE("%s: Failed to open VTS-Misc device %s", __func__, strerror(errno));
+ goto exit;
+ }
+
+ /* memory map VTS misc driver */
+ stdev->mapped_addr = mmap(NULL, VTSDRV_MISC_MODEL_BIN_MAXSZ,
+ PROT_READ | PROT_WRITE, MAP_SHARED, stdev->vtsdev_fd, 0);
+ if (stdev->mapped_addr == MAP_FAILED) {
+ ALOGE("%s: VTS Device MMAP failed", __func__);
+ close(stdev->vtsdev_fd);
+ goto exit;
+ }
+ ALOGI("%s: VTS device opened Successfully for MMAP Interface", __func__);
+#endif
+
+ ret = stdev_init_mixer(stdev);
+ if (ret) {
+ ALOGE("Error mixer init");
+ goto exit;
+ }
+
+ stdev->device.common.tag = HARDWARE_DEVICE_TAG;
+ stdev->device.common.version = SOUND_TRIGGER_DEVICE_API_VERSION_1_0;
+ stdev->device.common.module = (struct hw_module_t *)module;
+ stdev->device.common.close = stdev_close;
+ stdev->device.get_properties = stdev_get_properties;
+ stdev->device.load_sound_model = stdev_load_sound_model;
+ stdev->device.unload_sound_model = stdev_unload_sound_model;
+ stdev->device.start_recognition = stdev_start_recognition;
+ stdev->device.stop_recognition = stdev_stop_recognition;
+ stdev->send_socket = stdev->term_socket = stdev->uevent_socket = -1;
+ stdev->streaming_pcm = NULL;
+ stdev->is_seamless_recording = false;
+ stdev->sthal_opened = true;
+ stdev->recognize_started = 0;
+ stdev->is_mic_configured = 0;
+ stdev->active_mic = VTS_MAIN_MIC;
+ stdev->is_recording = false;
+ stdev->recording_pcm = NULL;
+ stdev->voicecall_state = VOICECALL_STOPPED;
+ stdev->recog_cbstate = RECOG_CB_NONE;
+
+ int i;
+ for (i = 0; i < MAX_SOUND_MODELS; ++i) {
+ stdev->model_handles[i] = -1;
+ stdev->model_execstate[i] = MODEL_STATE_NONE;
+ stdev->model_stopfailedhandles[i] = HANDLE_NONE;
+ }
+
+ *device = &stdev->device.common; // same address as stdev
+
+ if (access(AUDIO_PRIMARY_HAL_LIBRARY_PATH, R_OK) == 0) {
+ stdev->audio_primary_lib = dlopen(AUDIO_PRIMARY_HAL_LIBRARY_PATH, RTLD_NOW);
+ if (stdev->audio_primary_lib == NULL) {
+ ALOGE("%s: DLOPEN failed for %s", __func__, AUDIO_PRIMARY_HAL_LIBRARY_PATH);
+ goto hal_exit;
+ } else {
+ ALOGV("%s: DLOPEN successful for %s", __func__, AUDIO_PRIMARY_HAL_LIBRARY_PATH);
+ stdev->notify_sthal_status =
+ (int (*)(int))dlsym(stdev->audio_primary_lib,
+ "notify_sthal_status");
+ if (!stdev->notify_sthal_status) {
+ ALOGE("%s: Error in grabbing function from %s", __func__, AUDIO_PRIMARY_HAL_LIBRARY_PATH);
+ stdev->notify_sthal_status = 0;
+ goto notify_exit;
+ }
+ }
+ }
+
+ if (set_mixer_ctrls(stdev, vts_forcereset_ctlname, &forcereset, 1, false)) {
+ ALOGE("%s: VTS Force Reset configuration Failed", __func__);
+ goto exit;
+ }
+
+ pthread_mutex_unlock(&stdev->lock);
+ return 0;
+
+notify_exit:
+ if(stdev->audio_primary_lib)
+ dlclose(stdev->audio_primary_lib);
+hal_exit:
+ stdev_close_mixer(stdev);
+exit:
+ pthread_mutex_unlock(&stdev->lock);
+
+ ALOGI("%s: failed to open SoundTrigger HW Device", __func__);
+ return ret;
+}
+
+static struct hw_module_methods_t hal_module_methods = {
+ .open = stdev_open,
+};
+
+struct sound_trigger_module HAL_MODULE_INFO_SYM = {
+ .common = {
+ .tag = HARDWARE_MODULE_TAG,
+ .module_api_version = SOUND_TRIGGER_MODULE_API_VERSION_1_0,
+ .hal_api_version = HARDWARE_HAL_API_VERSION,
+ .id = SOUND_TRIGGER_HARDWARE_MODULE_ID,
+ .name = "Exynos Primary SoundTrigger HAL",
+ .author = "Samsung SLSI",
+ .methods = &hal_module_methods,
+ },
+};
diff --git a/libaudio/sthal/sound_trigger_hw.h b/libaudio/sthal/sound_trigger_hw.h
new file mode 100644
index 0000000..ce22870
--- /dev/null
+++ b/libaudio/sthal/sound_trigger_hw.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EXYNOS_SOUNDTRIGGERHAL_H__
+#define __EXYNOS_SOUNDTRIGGERHAL_H__
+
+#include <pthread.h>
+#include <tinyalsa/asoundlib.h>
+
+#include <hardware/hardware.h>
+#include <system/sound_trigger.h>
+#include <hardware/sound_trigger.h>
+
+#include"soundtrigger_conf.h"
+
+#define UEVENT_MSG_LEN 64*1024
+/* Model index */
+#define MAX_SOUND_MODELS 2
+#define HOTWORD_INDEX 0
+#define ODMVOICE_INDEX 1
+#define HANDLE_NONE -1
+
+#define MODEL_CONTROL_COUNT 3
+#define MODEL_BACKLOG_CONTROL_COUNT 1
+
+static const struct sound_trigger_properties hw_properties = {
+ "Samsung SLSI", // implementor
+ "Exynos Primary SoundTrigger HAL, OK Google and ODMVoice", // description
+ 1, // version
+ { 0x1817de20, 0xfa3b, 0x11e5, 0xbef2, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ 2, // max_sound_models
+ 1, // max_key_phrases
+ 1, // max_users
+ RECOGNITION_MODE_VOICE_TRIGGER, // recognition_modes
+ true, // capture_transition
+ 0, // max_buffer_ms
+ false, // concurrent_capture
+ false, // trigger_in_event
+ 0 // power_consumption_mw
+};
+
+typedef enum {
+ VTS_MAIN_MIC = 0, //Main mic
+ VTS_HEADSET_MIC = 1, //Headset mic
+}VTS_MIC_CONF;
+
+typedef enum {
+ VTS_RECOGNIZE_STOP = 0, //Stop recognization
+ VTS_RECOGNIZE_START = 1, //start recognization
+}VTS_RECOGNIZE_STATE;
+
+typedef enum {
+ ODMVOICE_UNKNOWN_MODE = 0, //None
+ ODMVOICE_RESERVED1_MODE = 1, //Reserved1 mode
+ ODMVOICE_RESERVED2_MODE = 2, //Reserved2 mode
+ ODMVOICE_TRIGGER_MODE = 3, //ODM voice trigger mode
+}ODMVOICE_MODEL_MODE;
+
+typedef enum {
+ VTS_MODE_OFF = 0, //default is off
+ VTS_MODE_RESERVED1_ON = 1, // ODM specific trigger mode1
+ VTS_MODE_RESERVED2_ON = 2, //ODM specific trigger mode2
+ VTS_MODE_ODMVOICE_TRIGGER_ON = 3, //ODM key phrase Detection (Trigger)
+ VTS_MODE_GOOGLE_TRIGGER_ON = 4, //Google key phrase Detection (Trigger)
+ VTS_MODE_SENSORY_TRIGGER_ON = 5, //sensory key phrase Detection (Trigger)
+ VTS_MODE_RESERVED1_OFF = 6, //OFF: ODM specific trigger mode1
+ VTS_MODE_RESERVED2_OFF = 7, //OFF: ODM specific trigger mode2
+ VTS_MODE_ODMVOICE_TRIGGER_OFF = 8, //OFF: ODM key phrase Detection (Trigger)
+ VTS_MODE_GOOGLE_TRIGGER_OFF = 9, //OFF: Google key phrase Detection
+ VTS_MODE_SENSORY_TRIGGER_OFF = 10, //OFF: sensory key phrase Detection
+ VTS_MODE_COUNT,
+}VTS_EXECUTING_MODE;
+
+typedef enum {
+ MODEL_RECOGNIZE_STOPPED = 0, //Voice Recognition stopped
+ MODEL_RECOGNIZE_STARTED = 1, //Voice Recognition started
+}MODEL_RUNNING_STATE;
+
+typedef enum {
+ VOICECALL_STOPPED = 0, //Stop recognization
+ VOICECALL_STARTED = 1, //start recognization
+}VOICECALL_STATE;
+
+typedef enum {
+ RECOG_CB_NONE = 0, // Recognition event callback function not called
+ RECOG_CB_STARTED = 1, // Recognition event received
+ RECOG_CB_CALLED = 2, //Recognition event callback of STHW Service Called
+}RECOG_CBSTATE;
+
+typedef enum {
+ MODEL_STATE_NONE = 0, // Model is not stated
+ MODEL_STATE_RUNNING = 1, // Model is not stated
+ MODEL_STATE_STOPABORT = 2, // Model is not stated
+}MODEL_EXECSTATE;
+
+/* VTS Force Reset control name */
+char *vts_forcereset_ctlname[] = {
+ "VTS Force Reset",
+};
+
+/* Backlog size control name */
+char *model_backlog_size_ctlname[] = {
+ "VTS VoiceTrigger Value",
+};
+
+/* VTS Model recognization start/stop controls */
+char *model_recognize_start_ctlname[] = {
+ "VTS Active Keyphrase",
+ "VTS VoiceRecognization Mode",
+ "VTS VoiceTrigger Value",
+};
+
+char *model_recognize_stop_ctlname[] = {
+ "VTS Active Keyphrase",
+ "VTS VoiceTrigger Value",
+ "VTS VoiceRecognization Mode",
+};
+
+int odmvoice_reserved1recognize_start_ctlvalue[] = {
+ 0, //"VTS Active Keyphrase",
+ VTS_MODE_RESERVED1_ON, //"VTS Execution Mode",
+ 1800, //back log size from trigger point
+};
+
+int odmvoice_reserved1recognize_stop_ctlvalue[] = {
+ 0, //"VTS Active Keyphrase",
+ 0, //back log size from trigger point
+ VTS_MODE_RESERVED1_OFF, //"VTS Execution Mode",
+};
+
+int odmvoice_reserved2recognize_start_ctlvalue[] = {
+ 0, //"VTS Active Keyphrase",
+ VTS_MODE_RESERVED2_ON, //"VTS Execution Mode",
+ 0, //back log size from trigger point
+};
+
+int odmvoice_reserved2recognize_stop_ctlvalue[] = {
+ 0, //"VTS Active Keyphrase",
+ 0, //back log size from trigger point
+ VTS_MODE_RESERVED2_OFF, //"VTS Execution Mode",
+};
+
+int odmvoice_triggerrecognize_start_ctlvalue[] = {
+ 0, //"VTS Active Keyphrase",
+ VTS_MODE_ODMVOICE_TRIGGER_ON, //"VTS Execution Mode",
+ 1800, //back log size from trigger point
+};
+
+int odmvoice_triggerrecognize_stop_ctlvalue[] = {
+ 0, //"VTS Active Keyphrase",
+ 0, //back log size from trigger point
+ VTS_MODE_ODMVOICE_TRIGGER_ON, //"VTS Execution Mode",
+};
+
+int hotword_recognize_start_ctlvalue[] = {
+ 1, //"VTS Active Keyphrase",
+ VTS_MODE_GOOGLE_TRIGGER_ON, //"VTS Execution Mode",
+ 2000, //back log size from trigger point
+};
+
+int hotword_recognize_stop_ctlvalue[] = {
+ 1, //"VTS Active Keyphrase",
+ 0, //back log size from trigger point
+ VTS_MODE_GOOGLE_TRIGGER_OFF, //"VTS Execution Mode",
+};
+
+struct pcm_config pcm_config_vt_capture = {
+ .channels = DEFAULT_VTS_CHANNELS,
+ .rate = DEFAULT_VTS_SAMPLING_RATE,
+ .period_size = PRIMARY_VTS_PERIOD_SIZE,
+ .period_count = PRIMARY_VTS_PERIOD_COUNT,
+ .format = PCM_FORMAT_S16_LE,
+};
+
+struct sound_trigger_device {
+ struct sound_trigger_hw_device device;
+ sound_model_handle_t model_handles[MAX_SOUND_MODELS];
+ recognition_callback_t recognition_callbacks[MAX_SOUND_MODELS];
+ void *recognition_cookies[MAX_SOUND_MODELS];
+
+ sound_model_callback_t sound_model_callbacks[MAX_SOUND_MODELS];
+ void *sound_model_cookies[MAX_SOUND_MODELS];
+ pthread_t callback_thread;
+ pthread_mutex_t lock;
+ int send_socket;
+ int term_socket;
+ int uevent_socket;
+ struct sound_trigger_recognition_config *configs[MAX_SOUND_MODELS];
+ struct mixer *mixer;
+#ifdef MMAP_INTERFACE_ENABLED
+ int vtsdev_fd;
+ void *mapped_addr;
+#endif
+ int is_streaming;
+ int triggered_model;
+ int sthal_opened;
+ bool callback_thread_active;
+
+ int is_seamless_recording;
+ struct pcm *streaming_pcm;
+ int recognize_started;
+ int active_mic; //Mic to be configured
+ int is_mic_configured;
+ void *audio_primary_lib;
+ int (*notify_sthal_status)(int);
+
+ int is_recording;
+ struct pcm *recording_pcm;
+
+ int backlog_size;
+ int odmvoicemodel_mode;
+
+ int voicecall_state;
+ int recog_cbstate;
+ int model_execstate[MAX_SOUND_MODELS];
+ sound_model_handle_t model_stopfailedhandles[MAX_SOUND_MODELS];
+};
+
+#endif // __EXYNOS_SOUNDTRIGGERHAL_H__
diff --git a/libaudio/sthal/test/Android.mk b/libaudio/sthal/test/Android.mk
new file mode 100644
index 0000000..052b1fc
--- /dev/null
+++ b/libaudio/sthal/test/Android.mk
@@ -0,0 +1,49 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# VTS Unit Test Console application
+#
+ifeq ($(BOARD_USE_SOUNDTRIGGER_HAL), STHAL_TEST)
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := vtshw-test
+
+LOCAL_SRC_FILES := \
+ vts_hw_test.c
+
+LOCAL_C_INCLUDES += \
+ external/tinyalsa/include \
+ $(TOP)/hardware/samsung_slsi-linaro/exynos/libaudio/sthal
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/device/samsung/$(TARGET_BOOTLOADER_BOARD_NAME)/conf
+
+LOCAL_SHARED_LIBRARIES := \
+ liblog \
+ libcutils \
+ libtinyalsa
+
+LOCAL_CFLAGS += -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function
+
+ifeq ($(BOARD_USE_SOUNDTRIGGER_HAL_MMAP),true)
+LOCAL_CFLAGS += -DMMAP_INTERFACE_ENABLED
+endif
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_EXECUTABLE)
+endif
diff --git a/libaudio/sthal/test/vts_hw_test.c b/libaudio/sthal/test/vts_hw_test.c
new file mode 100644
index 0000000..e14c0fe
--- /dev/null
+++ b/libaudio/sthal/test/vts_hw_test.c
@@ -0,0 +1,953 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * @file : vts_hw_test.c
+ * @brief : Voice Trigger Subsystem IP Unit test case application
+ * @author : Palli Satish Kumar Reddy (palli.satish@samsung.com)
+ * @version : 1.0
+ * @history
+ * 2016.05.16 : Create
+ * 2018.06.07 : Updated to make it a generic unit test application which
+ * can support all possible SoCs
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <malloc.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sys/prctl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <errno.h>
+#include <cutils/uevent.h>
+#include <tinyalsa/asoundlib.h>
+
+#include"sound_trigger_hw.h"
+#ifdef MMAP_INTERFACE_ENABLED
+#include"vts.h"
+#endif
+
+#define UEVENT_MSG_LEN 64*1024
+
+typedef enum {
+ VTS_RECOGNITION_STOPPED_STATE,
+ VTS_RECOGNITION_STARTED_STATE,
+} VTS_HW_STATE;
+
+typedef enum {
+ VTS_RECORD_STOPPED_STATE,
+ VTS_RECORD_STARTED_STATE,
+} VTS_RECORD_STATE;
+
+typedef enum {
+ VTS_RESERVED2_STOPPED_STATE,
+ VTS_RESERVED2_STARTED_STATE,
+} VTS_ODMRSVD2_STATE;
+
+typedef enum {
+ VTS_TRIGGERED_CAPTURE,
+ VTS_NORMAL_CAPTURE,
+} VTS_CAP_MODE;
+
+/* Sound model binaries */
+#define SOUND_MODEL_OKGOOGLE_BINARY "/data/voice_dva_okgoogle.bin"
+#define SOUND_MODEL_ODMVOICE_BINARY "/data/voice_dva_odmvoice.bin"
+
+#define VTS_TRIGGER_CAPTURE_OUTPUT "/sdcard/vts-trigger-cap.wav"
+#define VTS_NORMAL_CAPTURE_OUTPUT "/sdcard/vts-normal-cap.wav"
+
+#define VTS_STREAMING_BUFFER_SIZE 4800 //(4 * 1024)
+#define NUM_OF_SAMPLES_TO_CAPTURE 100
+
+struct vts_hw_device {
+ VTS_HW_STATE vts_state;
+ VTS_RECORD_STATE vts_rec_state;
+ VTS_ODMRSVD2_STATE vts_odmrsvd2_state;
+ pthread_mutex_t lock;
+ int send_sock;
+ int term_sock;
+ bool thread_exit;
+ bool rec_thread_exit;
+ bool odmrsvd2_thread_exit;
+ pthread_t callback_thread;
+ pthread_t rec_thread;
+ pthread_t odmrsvd2_thread;
+ bool model_loaded;
+};
+
+#define ID_RIFF 0x46464952
+#define ID_WAVE 0x45564157
+#define ID_FMT 0x20746d66
+#define ID_DATA 0x61746164
+
+#define FORMAT_PCM 1
+
+struct wav_header {
+ uint32_t riff_id;
+ uint32_t riff_sz;
+ uint32_t riff_fmt;
+ uint32_t fmt_id;
+ uint32_t fmt_sz;
+ uint16_t audio_format;
+ uint16_t num_channels;
+ uint32_t sample_rate;
+ uint32_t byte_rate;
+ uint16_t block_align;
+ uint16_t bits_per_sample;
+ uint32_t data_id;
+ uint32_t data_sz;
+};
+
+static struct vts_hw_device g_vtsdev = { .lock = PTHREAD_MUTEX_INITIALIZER };
+struct mixer *vtsMixerHandle = NULL;
+struct mixer_ctl *vtsMixerCtl = NULL;
+#ifdef MMAP_INTERFACE_ENABLED
+int vtsdev_fd = -1;
+void *mapped_addr = NULL;
+#endif
+
+void vts_recognition_stop(struct vts_hw_device *vts_dev);
+void vts_odmrsvd2_stop(struct vts_hw_device *vts_dev);
+void vts_record_stop(struct vts_hw_device *vts_dev);
+
+static int dmic_usagecnt = 0;
+
+// Utility function for configuration MIC mixer controls
+int set_mixer_ctrls(
+ struct mixer *mixer_handle,
+ char *path_name[],
+ int *path_ctlvalue,
+ int ctrl_count,
+ bool reverse)
+{
+ int i = (reverse ? (ctrl_count - 1): 0);
+ int ret = 0;
+ struct mixer_ctl *mixerctl = NULL;
+
+ printf("%s, path: %s", __func__, path_name[0]);
+
+ if (mixer_handle) {
+ //for (i=0; i < ctrl_count; i++) {
+ while(ctrl_count) {
+ //printf("%s, ctrl_count: %d Loop index: %d", __func__, ctrl_count, i);
+ /* Get required control from mixer */
+ mixerctl = mixer_get_ctl_by_name(mixer_handle, path_name[i]);
+ if (mixerctl) {
+ /* Enable the control */
+ if (path_ctlvalue)
+ ret = mixer_ctl_set_value(mixerctl, 0, path_ctlvalue[i]);
+ else
+ ret = mixer_ctl_set_value(mixerctl, 0, 0);
+
+ if (ret) {
+ printf("%s: %s Failed to configure\n", __func__, path_name[i]);
+ ret = -EINVAL;
+ break;
+ } else {
+ printf("%s: %s configured value: %d\n", __func__, path_name[i],
+ (path_ctlvalue ? path_ctlvalue[i] : 0));
+ }
+ } else {
+ printf("%s: %s control doesn't exist\n", __func__, path_name[i]);
+ ret = -EINVAL;
+ break;
+ }
+ ctrl_count--;
+ if (reverse)
+ i--;
+ else
+ i++;
+ }
+ } else{
+ printf("%s: Failed to open mixer\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+#ifdef MMAP_INTERFACE_ENABLED
+static void load_modelbinary(char *data, int len)
+{
+ printf("%s: Size: %d\n", __func__, len);
+
+ /* Copy model binary to VTS mapped address */
+ memcpy(mapped_addr, data, len);
+
+ printf("%s: Model binary copied to Mapped memory Size: %d\n", __func__, len);
+
+ if (len > VTSDRV_MISC_MODEL_BIN_MAXSZ) {
+ printf("%s: MMAP buffer overflow Model Binary size greater then mapped size !!!\n", __func__);
+ }
+
+ /* Update model binary inforation to VTS misc driver */
+ if (ioctl(vtsdev_fd, VTSDRV_MISC_IOCTL_WRITE_ODMVOICE, &len) < 0) {
+ printf("%s: Failed to update model binary size\n", __func__);
+ }
+
+ return;
+}
+#endif
+static void sysfs_write(const char *path, char *data, int len)
+{
+ char buf[80];
+ int fd = open(path, O_WRONLY);
+ int tmp = 0, written = 0;
+
+ if (fd < 0) {
+ strerror_r(errno, buf, sizeof(buf));
+ printf("Error opening %s: %s\n", path, buf);
+ return;
+ }
+
+ while (len) {
+ tmp = write(fd, data+written, len);
+ if (tmp < 0) {
+ strerror_r(errno, buf, sizeof(buf));
+ printf("Error writing to %s: %s\n", path, buf);
+ break;
+ }
+ len -= tmp;
+ written += tmp;
+ printf("%s: current written %d Actual %d Total written %d\n",__func__, tmp, len, written);
+ }
+
+ close(fd);
+ return;
+}
+
+int set_dmic_ctrls(int flag)
+{
+ int i;
+ int ret = EXIT_SUCCESS;
+ char **active_mic_ctrls = NULL;
+ int *ctrl_values = NULL;
+
+ if (vtsMixerHandle) {
+ if (!flag) {
+ if (dmic_usagecnt) {
+ dmic_usagecnt--;
+ printf("Dmic Disabled usage count %d \n", dmic_usagecnt);
+ } else {
+ printf("Dmic usage count is Zero \n");
+ return ret;
+ }
+ }
+
+ if (!dmic_usagecnt) {
+ active_mic_ctrls = main_mic_ctlname;
+ ctrl_values = main_mic_ctlvalue;
+
+ if (set_mixer_ctrls(vtsMixerHandle, active_mic_ctrls, ctrl_values, MAIN_MIC_CONTROL_COUNT, !flag)) {
+ printf("%s: %s MIC control configuration Failed", __func__,
+ flag ? "Enabling" : "Disabling");
+ mixer_close(vtsMixerHandle);
+ vtsMixerHandle = NULL;
+ return -EINVAL;
+ }
+ printf("%s: %s MIC Controls ", __func__, flag ? "Enable" : "Disable");
+ }
+
+ if (flag) {
+ dmic_usagecnt++;
+ printf("Dmic Enabled usage count %d \n", dmic_usagecnt);
+ }
+ } else{
+ printf("%s: Failed to open mixer \n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void check_vts_state(struct vts_hw_device *vts_dev)
+{
+ if (vts_dev->vts_state == VTS_RECOGNITION_STARTED_STATE) {
+ vts_recognition_stop(vts_dev);
+ } else if (vts_dev->vts_odmrsvd2_state == VTS_RESERVED2_STARTED_STATE) {
+ vts_odmrsvd2_stop(vts_dev);
+ } else if (vts_dev->vts_rec_state == VTS_RECORD_STARTED_STATE) {
+ vts_record_stop(vts_dev);
+ }
+
+ printf("%s: Exit \n", __func__);
+ return;
+}
+
+static void vts_close_term_sock(struct vts_hw_device *vts_dev)
+{
+ if (vts_dev->send_sock >=0) {
+ close(vts_dev->send_sock);
+ vts_dev->send_sock = -1;
+ }
+ if (vts_dev->term_sock >=0) {
+ close(vts_dev->term_sock);
+ vts_dev->term_sock = -1;
+ }
+
+ return;
+}
+
+// The vts_dev should be locked when you call this function.
+static int fetch_streaming_buffer(struct vts_hw_device *vts_dev, int cap_mode)
+{
+ int ret = 0;
+ unsigned int flags, frames;
+ int i = 0;
+ struct pcm *vcap_pcm = NULL;
+ FILE *out_vcap_fp = NULL;
+ char *streaming_buf;
+ unsigned int pcmnode = (cap_mode == VTS_TRIGGERED_CAPTURE ? VTS_TRICAP_DEVICE_NODE : VTS_RECORD_DEVICE_NODE);
+ struct wav_header header;
+ struct pcm_config pcmconfig = {
+ .channels = 1,
+ .rate = 16000,
+ .period_size = 160,
+ .period_count = 1024,
+ .format = PCM_FORMAT_S16_LE,
+ };
+
+ streaming_buf = malloc(VTS_STREAMING_BUFFER_SIZE);
+ if (!streaming_buf) {
+ printf("Failed to malloc streaming buffer!!\n");
+ goto out;
+ }
+
+ printf("%s: Fetching bytes \n", __func__);
+ if (!vcap_pcm) {
+ /* Open vts capture pcm node */
+ pcmnode = (cap_mode == VTS_TRIGGERED_CAPTURE ? VTS_TRICAP_DEVICE_NODE : VTS_RECORD_DEVICE_NODE);
+ flags = PCM_IN;
+ vcap_pcm = pcm_open(VTS_SOUND_CARD, pcmnode, flags, &pcmconfig);
+ if (vcap_pcm && !pcm_is_ready(vcap_pcm)) {
+ printf("%s - FAILED to open VTS PCM Node : %d\n", __func__, pcmnode);
+ goto out;
+ }
+
+ sysfs_write("/sys/power/wake_lock", "vtshw-test", sizeof("vtshw-test"));
+ printf("%s - Wake Lock Acquired\n", __func__);
+ /* initialize the Wav Header information */
+ header.riff_id = ID_RIFF;
+ header.riff_sz = 0;
+ header.riff_fmt = ID_WAVE;
+ header.fmt_id = ID_FMT;
+ header.fmt_sz = 16;
+ header.audio_format = FORMAT_PCM;
+ header.num_channels = pcmconfig.channels;
+ header.sample_rate = pcmconfig.rate;
+
+ header.bits_per_sample = pcm_format_to_bits(PCM_FORMAT_S16_LE);
+ header.byte_rate = (header.bits_per_sample / 8) * pcmconfig.channels * pcmconfig.rate;
+ header.block_align = pcmconfig.channels * (header.bits_per_sample / 8);
+ header.data_id = ID_DATA;
+
+ /* open an output file to dump capture vts voice commands */
+ out_vcap_fp = fopen((cap_mode == VTS_TRIGGERED_CAPTURE ? VTS_TRIGGER_CAPTURE_OUTPUT :
+ VTS_NORMAL_CAPTURE_OUTPUT), "w+");
+ if (!out_vcap_fp ) {
+ printf("%s - FAILED to open output file !!! %s\n", __func__, (cap_mode == VTS_TRIGGERED_CAPTURE ?
+ VTS_TRIGGER_CAPTURE_OUTPUT : VTS_NORMAL_CAPTURE_OUTPUT));
+ /* Release VTS capture node */
+ pcm_close(vcap_pcm);
+ vcap_pcm = NULL;
+ goto out;
+ } else {
+ /* leave enough room for header */
+ fseek(out_vcap_fp, sizeof(struct wav_header), SEEK_SET);
+
+ for (i=0; i < NUM_OF_SAMPLES_TO_CAPTURE; i++) {
+ ret = pcm_read(vcap_pcm, (void*)streaming_buf, (unsigned int)VTS_STREAMING_BUFFER_SIZE);
+ if (ret == 0) {
+ printf("%s - Captured %d samples\n", __func__, VTS_STREAMING_BUFFER_SIZE);
+ /* write capture pcm data to output file */
+ fwrite((void*)streaming_buf, (size_t)VTS_STREAMING_BUFFER_SIZE, 1, out_vcap_fp);
+ } else {
+ printf("%s - Failed to capture requested samples %s \n", __func__, pcm_get_error(vcap_pcm));
+ sleep(10);
+ }
+
+ if ((cap_mode == VTS_TRIGGERED_CAPTURE && vts_dev->thread_exit == true) ||
+ (cap_mode == VTS_NORMAL_CAPTURE && vts_dev->rec_thread_exit == true))
+ break;
+ }
+ }
+
+ /* write header now all information is known */
+ frames = pcm_bytes_to_frames(vcap_pcm, (i * VTS_STREAMING_BUFFER_SIZE));
+ header.data_sz = frames * header.block_align;
+ header.riff_sz = (uint32_t)(header.data_sz + sizeof(header) - 8);
+ fseek(out_vcap_fp, 0, SEEK_SET);
+ fwrite(&header, sizeof(struct wav_header), 1, out_vcap_fp);
+
+ /* close output file */
+ fclose(out_vcap_fp);
+ out_vcap_fp = NULL;
+
+ /* Release VTS capture node */
+ pcm_close(vcap_pcm);
+ vcap_pcm = NULL;
+ sysfs_write("/sys/power/wake_unlock", "vtshw-test", sizeof("vtshw-test"));
+ printf("%s - Wake Lock Released\n", __func__);
+ }
+
+out:
+ if (streaming_buf)
+ free(streaming_buf);
+ return ret;
+}
+
+/******** VTS Trigger Mode support function ***************/
+static void *callback_thread_loop(void * context)
+{
+ char msg[UEVENT_MSG_LEN];
+ struct pollfd fds[2];
+ int exit_sockets[2];
+ int err = 0;
+ int i, n;
+ struct vts_hw_device *vts_dev = (struct vts_hw_device *)context;
+
+ prctl(PR_SET_NAME, (unsigned long)"VTS callback", 0, 0, 0);
+
+#if 0
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, exit_sockets) == -1)
+ goto func_exit;
+
+ vts_close_term_sock(vts_dev);
+ vts_dev->send_sock = exit_sockets[0];
+ vts_dev->term_sock = exit_sockets[1];
+
+ printf("%s - Terminate Socket %d, %d\n", __func__, exit_sockets[0], exit_sockets[1]);
+#else
+ vts_dev->send_sock = -1;
+ vts_dev->term_sock = -1;
+#endif
+
+ memset(fds, 0, 2 * sizeof(struct pollfd));
+ fds[0].events = POLLIN;
+ fds[0].fd = uevent_open_socket(64*1024, true);
+ //fds[0].fd = vts_dev->term_sock;
+ if (fds[0].fd == -1) {
+ printf("Error opening socket for hotplug uevent");
+ goto func_exit;
+ }
+#if 0
+ fds[1].events = POLLIN;
+ fds[1].fd = vts_dev->term_sock;
+#endif
+
+ while (1) {
+ printf("%s: Before poll \n", __func__);
+ /* wait for VTS trigger Uevent */
+ err = poll(fds, 1, -1);
+ printf("%s: After poll \n", __func__);
+
+ if (fds[0].revents & POLLIN) {
+ n = uevent_kernel_multicast_recv(fds[0].fd, msg, UEVENT_MSG_LEN);
+ if (n <= 0) {
+ printf("%s: Cannot Read Socket message %d \n", __func__, __LINE__);
+ continue;
+ }
+ for (i=0; i < n;) {
+ //if (strstr(msg + i, "DEVPATH=/devices/platform/14020000.vts")) {
+ if (strstr(msg + i, "VOICE_WAKEUP_WORD_ID=1")) {
+ printf("%s VTS Trigger received for model %s\n", __func__, (msg+i));
+ /* Start reading data from the VTS IP */
+ fetch_streaming_buffer(vts_dev, VTS_TRIGGERED_CAPTURE);
+ printf("\n%s Want to Continue...then Start Recognitoin again\n", __func__);
+ goto found;
+ }
+ i += strlen(msg + i) + 1;
+ }
+#if EN_DEBUG
+ if (i >= n) {
+ i = 0;
+ printf("%s UEVENT MSG INFO: ---\n", __func__);
+ while (i < n) {
+ printf("%s \n", (msg+i));
+ i += strlen(msg+i)+1;
+ }
+ }
+#else
+ if (i >= n)
+ printf("%s UEVENT is NOT VTS event !!\n", __func__);
+#endif
+ } else {
+ printf("%s: Poll returned %d\n", __func__, err);
+ }
+ if (vts_dev->thread_exit == true)
+ break;
+ }
+
+found:
+ close(fds[0].fd);
+func_exit:
+ vts_close_term_sock(vts_dev);
+ if (vts_dev->thread_exit == false) {
+ /* reset the state as Loaded by stopping the recognition,
+ * so that we restart the recogniton again
+ * if we have received the Hotwork notification */
+ /* once callback thread is closed set vts_stop using sysfs */
+ /* Set active keyphrase for voice recognition */
+ set_mixer_ctrls(vtsMixerHandle, model_recognize_stop_ctlname,
+ odmvoice_triggerrecognize_stop_ctlvalue, MODEL_CONTROL_COUNT, false);
+ vts_dev->vts_state = VTS_RECOGNITION_STOPPED_STATE;
+ /* reset DMIC controls */
+ set_dmic_ctrls(false);
+ }
+ vts_dev->thread_exit = false;
+ printf("%s: Exit \n", __func__);
+ return (void *)(long)err;
+}
+
+void vts_recognition_start(struct vts_hw_device *vts_dev)
+{
+ int ret = 0;
+ pthread_mutex_lock(&vts_dev->lock);
+ if (vts_dev->vts_state == VTS_RECOGNITION_STOPPED_STATE) {
+ if (!vts_dev->model_loaded ) {
+ FILE *pfile = NULL;
+ int rd_sz, bin_sz;
+ char * data = NULL;
+ /* Read model net binay file*/
+ pfile = fopen(SOUND_MODEL_ODMVOICE_BINARY, "rb+");
+ if (!pfile) {
+ printf("Model Binary voice_dva_svoice.bin should be copied to \\data\\firmware folder \n");
+ printf("Failed to Open Model Binary from [%s]\n", SOUND_MODEL_ODMVOICE_BINARY);
+ goto error;
+ } else {
+ printf("Successfully [%s] file opened!! \n", SOUND_MODEL_ODMVOICE_BINARY);
+ }
+
+ fseek(pfile, 0L, SEEK_END);
+ bin_sz = ftell(pfile);
+ fseek(pfile, 0L, SEEK_SET);
+ printf(" Model %s File size %d \n", SOUND_MODEL_ODMVOICE_BINARY, bin_sz);
+
+ data = (char *)calloc(1, bin_sz);
+ if (!data) {
+ printf("Failed to allocated buffer of Size: %d\n", bin_sz);
+ fclose(pfile);
+ goto error;
+ }
+ /* read file data to allocated buffer */
+ rd_sz = fread(data, 1, bin_sz, pfile);
+
+ if (rd_sz != bin_sz) {
+ printf("%s - Failed to read data from %s file\n", __func__, SOUND_MODEL_ODMVOICE_BINARY);
+ fclose(pfile);
+ goto error;
+ }
+
+ /* Load net binary to VTS driver */
+#ifdef MMAP_INTERFACE_ENABLED
+ load_modelbinary(data, rd_sz);
+#else
+ sysfs_write(VTS_SVOICE_MODEL, data, rd_sz);
+#endif
+
+ fclose(pfile);
+ free(data);
+ vts_dev->model_loaded = true;
+ }
+
+ /* configure DMIC controls */
+ set_dmic_ctrls(true);
+
+ /* Create Callback thread to catch VTS trigger notification through uevent*/
+ vts_dev->thread_exit = false;
+ pthread_create(&vts_dev->callback_thread, (const pthread_attr_t *) NULL,
+ callback_thread_loop, vts_dev);
+
+ set_mixer_ctrls(vtsMixerHandle, model_recognize_start_ctlname,
+ odmvoice_triggerrecognize_start_ctlvalue, MODEL_CONTROL_COUNT, false);
+
+ vts_dev->vts_state = VTS_RECOGNITION_STARTED_STATE;
+ } else {
+ printf("VTS Voice Recognition already running \n");
+ }
+
+error:
+ pthread_mutex_unlock(&vts_dev->lock);
+ printf("%s: Exit Tar-size 1000ms\n", __func__);
+ return;
+}
+
+void vts_recognition_stop(struct vts_hw_device *vts_dev)
+{
+ pthread_mutex_lock(&vts_dev->lock);
+ if (vts_dev->vts_state == VTS_RECOGNITION_STARTED_STATE) {
+ vts_dev->thread_exit = true;
+ /* Stop Callback thread first */
+ if (vts_dev->send_sock >= 0)
+ write(vts_dev->send_sock, "T", 1);
+
+ pthread_mutex_unlock(&vts_dev->lock);
+
+ pthread_join(vts_dev->callback_thread, (void**)NULL);
+
+ pthread_mutex_lock(&vts_dev->lock);
+
+ set_mixer_ctrls(vtsMixerHandle, model_recognize_stop_ctlname,
+ odmvoice_triggerrecognize_stop_ctlvalue, MODEL_CONTROL_COUNT, false);
+
+ /* configure DMIC controls */
+ set_dmic_ctrls(false);
+
+ vts_dev->vts_state = VTS_RECOGNITION_STOPPED_STATE;
+ } else {
+ printf("Sound Model Recognition is NOT Started\n");
+ }
+ pthread_mutex_unlock(&vts_dev->lock);
+ printf("%s: Exit \n", __func__);
+ return;
+}
+
+/******** VTS Recording support function ***************/
+static void *record_thread_loop(void * context)
+{
+ int err = 0;
+ int i, n;
+ struct vts_hw_device *vts_dev = (struct vts_hw_device *)context;
+
+ prctl(PR_SET_NAME, (unsigned long)"VTSRecord", 0, 0, 0);
+
+ printf("%s Started!!\n", __func__);
+ fetch_streaming_buffer(vts_dev, VTS_NORMAL_CAPTURE);
+
+ vts_dev->vts_rec_state = VTS_RECORD_STOPPED_STATE;
+
+ if (vts_dev->rec_thread_exit == false) {
+ /* reset DMIC controls */
+ set_dmic_ctrls(false);
+ }
+ printf("%s: Exit \n", __func__);
+ return (void *)(long)err;
+}
+
+void vts_record_start(struct vts_hw_device *vts_dev)
+{
+ pthread_mutex_lock(&vts_dev->lock);
+ if (vts_dev->vts_rec_state == VTS_RECORD_STOPPED_STATE) {
+ /* configure DMIC controls */
+ set_dmic_ctrls(true);
+
+ /* Create recorrd thread to capture data from VTS IP*/
+ vts_dev->rec_thread_exit = false;
+ pthread_create(&vts_dev->rec_thread, (const pthread_attr_t *) NULL,
+ record_thread_loop, vts_dev);
+
+ vts_dev->vts_rec_state = VTS_RECORD_STARTED_STATE;
+ } else {
+ printf("VTS Recording Already started \n");
+ }
+ pthread_mutex_unlock(&vts_dev->lock);
+ printf("%s: Exit \n", __func__);
+ return;
+}
+
+void vts_record_stop(struct vts_hw_device *vts_dev)
+{
+ pthread_mutex_lock(&vts_dev->lock);
+ if (vts_dev->vts_rec_state == VTS_RECORD_STARTED_STATE) {
+ /* Stop record thread first */
+ vts_dev->rec_thread_exit = true;
+ pthread_mutex_unlock(&vts_dev->lock);
+
+ pthread_join(vts_dev->rec_thread, (void**)NULL);
+
+ vts_dev->vts_rec_state = VTS_RECORD_STOPPED_STATE;
+
+ /* reset DMIC controls */
+ set_dmic_ctrls(false);
+ pthread_mutex_lock(&vts_dev->lock);
+
+ } else {
+ printf("VTS Recording NOT started yet \n");
+ }
+ pthread_mutex_unlock(&vts_dev->lock);
+ printf("%s: Exit \n", __func__);
+ return;
+}
+
+/******** VTS ODM Reserved2 Mode support function ***************/
+static void *odmrsvd2_thread_loop(void * context)
+{
+ char msg[UEVENT_MSG_LEN];
+ struct pollfd fds[2];
+ int exit_sockets[2];
+ int err = 0;
+ int i, n;
+ struct vts_hw_device *vts_dev = (struct vts_hw_device *)context;
+
+ prctl(PR_SET_NAME, (unsigned long)"VTSODMReserved2Mode", 0, 0, 0);
+
+ printf("%s Started!!\n", __func__);
+
+ memset(fds, 0, 2 * sizeof(struct pollfd));
+ fds[0].events = POLLIN;
+ fds[0].fd = uevent_open_socket(64*1024, true);
+ if (fds[0].fd == -1) {
+ printf("Error opening socket for hotplug uevent");
+ goto func_exit;
+ }
+
+ while (1) {
+ printf("%s: ODMReserved2Mode Before poll \n", __func__);
+ /* wait for VTS ODMReserved2Mode Uevent */
+ err = poll(fds, 1, 10000);
+ printf("%s: ODMReserved2Mode After poll \n", __func__);
+
+ if (fds[0].revents & POLLIN) {
+ n = uevent_kernel_multicast_recv(fds[0].fd, msg, UEVENT_MSG_LEN);
+ if (n <= 0) {
+ printf("%s: Cannot Read Socket message %d \n", __func__, __LINE__);
+ continue;
+ }
+
+ for (i=0; i < n;) {
+ if (strstr(msg + i, "VOICE_WAKEUP_WORD_ID=3")) {
+ printf("%s VTS ODMReserved2Mode UEVENT received %s\n", __func__, (msg+i));
+ goto found;
+ }
+ i += strlen(msg + i) + 1;
+ }
+#if EN_DEBUG
+ if (i >= n) {
+ i = 0;
+ printf("%s ODMReserved2Mode UEVENT MSG INFO: \n", __func__);
+ while (i < n) {
+ printf("%s \n", (msg+i));
+ i += strlen(msg+i)+1;
+ }
+ }
+#else
+ if (i >= n)
+ printf("%s Received UEVENT is NOT ODMReserved2Mode event !!\n", __func__);
+#endif
+ } else {
+ printf("%s: Poll returned %d\n", __func__, err);
+ }
+ if (vts_dev->odmrsvd2_thread_exit == true)
+ break;
+ }
+
+found:
+ close(fds[0].fd);
+func_exit:
+ if (vts_dev->odmrsvd2_thread_exit == false) {
+ /* once callback thread is closed set vts_stop using sysfs */
+ set_mixer_ctrls(vtsMixerHandle, model_recognize_stop_ctlname,
+ odmvoice_reserved2recognize_stop_ctlvalue, MODEL_CONTROL_COUNT, false);
+
+ vts_dev->vts_odmrsvd2_state = VTS_RESERVED2_STOPPED_STATE;
+
+ /* reset DMIC controls */
+ set_dmic_ctrls(false);
+ }
+
+ vts_dev->odmrsvd2_thread_exit = false;
+ printf("%s: Exit \n", __func__);
+ return (void *)(long)err;
+}
+
+void vts_odmrsvd2_start(struct vts_hw_device *vts_dev)
+{
+ pthread_mutex_lock(&vts_dev->lock);
+ if (vts_dev->vts_odmrsvd2_state == VTS_RESERVED2_STOPPED_STATE) {
+ /* Create odmrsvd2 thread to wait on uevent from VTS IP*/
+ vts_dev->odmrsvd2_thread_exit = false;
+ pthread_create(&vts_dev->odmrsvd2_thread, (const pthread_attr_t *) NULL,
+ odmrsvd2_thread_loop, vts_dev);
+
+ /* configure DMIC controls */
+ set_dmic_ctrls(true);
+
+ set_mixer_ctrls(vtsMixerHandle, model_recognize_start_ctlname,
+ odmvoice_reserved2recognize_start_ctlvalue, MODEL_CONTROL_COUNT, false);
+
+ vts_dev->vts_odmrsvd2_state = VTS_RESERVED2_STARTED_STATE;
+ } else {
+ printf("VTS ODMRsvd2 Already started \n");
+ }
+ pthread_mutex_unlock(&vts_dev->lock);
+ printf("%s: Exit \n", __func__);
+ return;
+}
+
+void vts_odmrsvd2_stop(struct vts_hw_device *vts_dev)
+{
+ pthread_mutex_lock(&vts_dev->lock);
+ if (vts_dev->vts_odmrsvd2_state == VTS_RESERVED2_STARTED_STATE) {
+ /* Stop ODMRsvd2 thread first */
+ vts_dev->odmrsvd2_thread_exit = true;
+ /* once callback thread is closed disable voice recognition */
+
+ pthread_mutex_unlock(&vts_dev->lock);
+
+ pthread_join(vts_dev->odmrsvd2_thread, (void**)NULL);
+
+ pthread_mutex_lock(&vts_dev->lock);
+
+ set_mixer_ctrls(vtsMixerHandle, model_recognize_stop_ctlname,
+ odmvoice_reserved2recognize_stop_ctlvalue, MODEL_CONTROL_COUNT, false);
+
+ /* configure DMIC controls */
+ set_dmic_ctrls(false);
+
+ vts_dev->vts_odmrsvd2_state = VTS_RESERVED2_STOPPED_STATE;
+ } else {
+ printf("VTS ODMRsvd2 NOT started yet \n");
+ }
+ pthread_mutex_unlock(&vts_dev->lock);
+ printf("%s: Exit \n", __func__);
+ return;
+}
+
+/****************** Unit test main function *************************/
+
+void print_options(struct vts_hw_device *vts_dev __unused)
+{
+ printf("********************** Generic Dual VA VTS HW Test Options ***********************\n");
+#ifdef MMAP_INTERFACE_ENABLED
+ printf("********************** MMAP interface for Model Binary loading***********************\n");
+#else
+ printf("********************** SYSFS interface for Model Binary loading***********************\n");
+#endif
+ printf("1. Voice Recoginition Start\n");
+ printf("2. Voice Recoginition Stop\n");
+ printf("3. VTS Record Start\n");
+ printf("4. VTS Record Stop\n");
+ printf("5. ODMRsvd2 Mode Start\n");
+ printf("6. ODMRsvd2 Mode Stop\n");
+ printf("7. Exit - VTS Test Application\n");
+ printf("****************************************************************!\n");
+ printf("Enter an Option: \n");
+ return;
+}
+
+int main(void)
+{
+ struct vts_hw_device *vts_dev = NULL;
+ int option, i;
+ int ret = EXIT_SUCCESS;
+ char pstr[50];
+ struct pollfd fds[1];
+
+ vts_dev = &g_vtsdev;
+ dmic_usagecnt = 0;
+
+#ifdef MMAP_INTERFACE_ENABLED
+ /* Open VTS Misc device for loading Model binary through MMAP interface */
+ vtsdev_fd = open("/dev/vts_fio_dev", O_RDWR);
+ if (vtsdev_fd < 0) {
+ printf("%s: Failed to open VTS-Misc device %d\n", __func__,errno);
+ return -EINVAL;
+ }
+
+ /* memory map VTS misc driver */
+ mapped_addr = mmap(NULL, VTSDRV_MISC_MODEL_BIN_MAXSZ, PROT_READ | PROT_WRITE, MAP_SHARED, vtsdev_fd, 0);
+ if (mapped_addr == MAP_FAILED) {
+ printf("%s: Unable to MMAP VTS Model downloadable memory \n", __func__);
+ close(vtsdev_fd);
+ return -EINVAL;
+ }
+#endif
+
+ /* open mixer control */
+ vtsMixerHandle = mixer_open(VTS_MIXER_CARD);
+ if (!vtsMixerHandle) {
+ printf("%s: Failed to open mixer \n", __func__);
+ close(vtsdev_fd);
+ return -EINVAL;
+ }
+
+ pthread_mutex_lock(&vts_dev->lock);
+
+ vts_dev->vts_state = VTS_RECOGNITION_STOPPED_STATE;
+ vts_dev->vts_rec_state = VTS_RECORD_STOPPED_STATE;
+ vts_dev->thread_exit = false;
+ vts_dev->model_loaded = false;
+
+ memset(fds, 0, sizeof(struct pollfd));
+ fds[0].events = POLLIN;
+ fds[0].fd = fileno(stdin);
+
+ pthread_mutex_unlock(&vts_dev->lock);
+
+ while (1) {
+ print_options(vts_dev);
+ ret = poll(fds,1,-1);
+ if (fds[0].revents & POLLIN) {
+ if (fgets(pstr, 50, stdin) == NULL) {
+ printf("Failed to get data from stdin \n");
+ continue;
+ }
+ } else {
+ printf("%s - Poll ret value %d\n", __func__, ret);
+ continue;
+ }
+ option = atoi(pstr);
+ printf("%s - Selected option %d\n", __func__, option);
+ /* Called corresponding function based on Option selected */
+ switch (option) {
+ case 1: /* Start loaded sound Model Recognition */
+ vts_recognition_start(vts_dev);
+ break;
+ case 2: /* Stop loaded sound Model Recognition */
+ vts_recognition_stop(vts_dev);
+ break;
+ case 3: /* Start VTS recording */
+ vts_record_start(vts_dev);
+ break;
+ case 4: /* Stop VTS recording */
+ vts_record_stop(vts_dev);
+ break;
+ case 5:
+ vts_odmrsvd2_start(vts_dev);
+ break;
+ case 6:
+ vts_odmrsvd2_stop(vts_dev);
+ break;
+ case 7:
+ check_vts_state(vts_dev);
+ printf("VTS HW Testing completed\n");
+ break;
+
+ default:
+ printf("UNSUPPORTED Option - Try again !\n");
+ break;
+ }
+
+ if (option == 7) break;
+ option = 0;
+ }
+
+ if (vtsMixerHandle) {
+ mixer_close(vtsMixerHandle);
+ vtsMixerHandle = NULL;
+ }
+
+#ifdef MMAP_INTERFACE_ENABLED
+ close(vtsdev_fd);
+#endif
+ return EXIT_SUCCESS;
+}
diff --git a/libaudio/sthal/vts.h b/libaudio/sthal/vts.h
new file mode 100644
index 0000000..1ee7e63
--- /dev/null
+++ b/libaudio/sthal/vts.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _UAPI__SOUND_VTS_H
+#define _UAPI__SOUND_VTS_H
+
+#if defined(__KERNEL__) || defined(__linux__)
+#include <linux/types.h>
+#else
+#include <sys/ioctl.h>
+#endif
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
+
+#define VTSDRV_MISC_IOCTL_WRITE_ODMVOICE _IOW('V', 0x00, int)
+#define VTSDRV_MISC_IOCTL_WRITE_GOOGLE _IOW('V', 0x01, int)
+/* Google model binary size is used, as this is greater then SVoice model size */
+#define VTSDRV_MISC_MODEL_BIN_MAXSZ 0xB500
+
+#endif /* _UAPI__SOUND_VTS_H */