Merge "msm: ipa3: Adding changes to compile IPA driver for monaco target"
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_eth.c b/drivers/platform/msm/ipa/ipa_clients/ipa_eth.c
index 5eab5c9..6668645 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_eth.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_eth.c
@@ -429,6 +429,7 @@
return ipa_client_type;
}
+#if IPA_ETH_API_VER < 2
static struct ipa_eth_client_pipe_info
*ipa_eth_get_pipe_from_hdl(ipa_eth_hdl_t hdl)
{
@@ -440,6 +441,7 @@
return pipe;
}
+#endif
static int ipa_eth_client_connect_pipe(
@@ -859,19 +861,19 @@
IPA_ETH_DBG("register interface for netdev %s\n", intf->net_dev->name);
/* multiple attach support */
if (strnstr(intf->net_dev->name, STR_ETH0_IFACE, strlen(intf->net_dev->name))) {
- ret = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH0, &vlan_mode);
+ ret = ipa_is_vlan_mode(IPA_VLAN_IF_ETH0, &vlan_mode);
if (ret) {
IPA_ETH_ERR("Could not determine IPA VLAN mode\n");
return ret;
}
} else if (strnstr(intf->net_dev->name, STR_ETH1_IFACE, strlen(intf->net_dev->name))) {
- ret = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH1, &vlan_mode);
+ ret = ipa_is_vlan_mode(IPA_VLAN_IF_ETH1, &vlan_mode);
if (ret) {
IPA_ETH_ERR("Could not determine IPA VLAN mode\n");
return ret;
}
} else {
- ret = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
+ ret = ipa_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
if (ret) {
IPA_ETH_ERR("Could not determine IPA VLAN mode\n");
return ret;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
index f911cc8..d12bbd0 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ipa_wdi3.h>
@@ -39,6 +40,13 @@
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
+#define IPA_CLIENT_IS_WLAN0_INSTANCE(inst_id) \
+ (inst_id == 0 || inst_id == -1)
+#define IPA_CLIENT_IS_WLAN1_INSTANCE(inst_id) \
+ (inst_id == 1)
+#define DEFAULT_INSTANCE_ID (-1)
+#define INVALID_INSTANCE_ID (-2)
+
struct ipa_wdi_intf_info {
char netdev_name[IPA_RESOURCE_NAME_MAX];
u8 hdr_len;
@@ -58,100 +66,68 @@
bool is_tx1_used;
u32 sys_pipe_hdl[IPA_WDI_MAX_SUPPORTED_SYS_PIPE];
u32 ipa_pm_hdl;
+ int inst_id;
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
ipa_wdi_meter_notifier_cb wdi_notify;
#endif
};
-static struct ipa_wdi_context *ipa_wdi_ctx;
+static struct ipa_wdi_context *ipa_wdi_ctx_list[IPA_WDI_INST_MAX];
-static int ipa_wdi_init_internal(struct ipa_wdi_init_in_params *in,
- struct ipa_wdi_init_out_params *out)
+/**
+ * function to Assign Handle for instance
+ *
+ * Note: If it is called for Old API then
+ * max one handle is allowed.
+ *
+ * @Return handle on success, negative on failure
+ */
+static int assign_hdl_for_inst(int inst_id)
{
- struct ipa_wdi_uc_ready_params uc_ready_params;
- struct ipa_smmu_in_params smmu_in;
- struct ipa_smmu_out_params smmu_out;
+ int hdl;
- if (ipa_wdi_ctx) {
- IPA_WDI_ERR("ipa_wdi_ctx was initialized before\n");
- return -EFAULT;
+ IPA_WDI_DBG("Assigning handle for instance id %d\n", inst_id);
+ if (inst_id <= INVALID_INSTANCE_ID) {
+ IPA_WDI_ERR("Invalid instance id %d\n", inst_id);
+ return -EINVAL;
+ }
+ else if (ipa_wdi_ctx_list[0] && (inst_id == DEFAULT_INSTANCE_ID ||
+ ipa_wdi_ctx_list[0]->inst_id == DEFAULT_INSTANCE_ID)) {
+ IPA_WDI_ERR("Invalid instance id %d\n", inst_id);
+ return -EINVAL;
+ }
+ else {
+ for (hdl = 0; hdl < IPA_WDI_INST_MAX; hdl++) {
+ if (!ipa_wdi_ctx_list[hdl])
+ break;
+ }
+ }
+ if (hdl == IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Already Maximum Instance Registered\n");
+ return -EINVAL;
}
- if (in->wdi_version > IPA_WDI_3 || in->wdi_version < IPA_WDI_1) {
- IPA_WDI_ERR("wrong wdi version: %d\n", in->wdi_version);
- return -EFAULT;
- }
-
- ipa_wdi_ctx = kzalloc(sizeof(*ipa_wdi_ctx), GFP_KERNEL);
- if (ipa_wdi_ctx == NULL) {
- IPA_WDI_ERR("fail to alloc wdi ctx\n");
- return -ENOMEM;
- }
- mutex_init(&ipa_wdi_ctx->lock);
- init_completion(&ipa_wdi_ctx->wdi_completion);
- INIT_LIST_HEAD(&ipa_wdi_ctx->head_intf_list);
-
- ipa_wdi_ctx->wdi_version = in->wdi_version;
- uc_ready_params.notify = in->notify;
- uc_ready_params.priv = in->priv;
-#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
- ipa_wdi_ctx->wdi_notify = in->wdi_notify;
-#endif
-
- if (ipa3_uc_reg_rdyCB(&uc_ready_params) != 0) {
- mutex_destroy(&ipa_wdi_ctx->lock);
- kfree(ipa_wdi_ctx);
- ipa_wdi_ctx = NULL;
- return -EFAULT;
- }
-
- out->is_uC_ready = uc_ready_params.is_uC_ready;
-
- smmu_in.smmu_client = IPA_SMMU_WLAN_CLIENT;
- if (ipa3_get_smmu_params(&smmu_in, &smmu_out))
- out->is_smmu_enabled = false;
- else
- out->is_smmu_enabled = smmu_out.smmu_enable;
-
- ipa_wdi_ctx->is_smmu_enabled = out->is_smmu_enabled;
-
- if (IPA_WDI2_OVER_GSI() || (in->wdi_version == IPA_WDI_3))
- out->is_over_gsi = true;
- else
- out->is_over_gsi = false;
- return 0;
+ return hdl;
}
static int ipa_get_wdi_version_internal(void)
{
- if (ipa_wdi_ctx)
- return ipa_wdi_ctx->wdi_version;
+ if (ipa_wdi_ctx_list[0])
+ return ipa_wdi_ctx_list[0]->wdi_version;
/* default version is IPA_WDI_3 */
return IPA_WDI_3;
}
static bool ipa_wdi_is_tx1_used_internal(void)
{
- if (ipa_wdi_ctx)
- return ipa_wdi_ctx->is_tx1_used;
+ if (ipa_wdi_ctx_list[0])
+ return ipa_wdi_ctx_list[0]->is_tx1_used;
return 0;
}
-static int ipa_wdi_cleanup_internal(void)
+static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
{
- struct ipa_wdi_intf_info *entry;
- struct ipa_wdi_intf_info *next;
-
- /* clear interface list */
- list_for_each_entry_safe(entry, next,
- &ipa_wdi_ctx->head_intf_list, link) {
- list_del(&entry->link);
- kfree(entry);
- }
- mutex_destroy(&ipa_wdi_ctx->lock);
- kfree(ipa_wdi_ctx);
- ipa_wdi_ctx = NULL;
- return 0;
+ IPA_WDI_DBG("received pm event %d\n", event);
}
static int ipa_wdi_commit_partial_hdr(
@@ -190,7 +166,114 @@
return 0;
}
-static int ipa_wdi_reg_intf_internal(struct ipa_wdi_reg_intf_in_params *in)
+/**
+ * function to know the WDI capabilities
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+static int ipa_wdi_get_capabilities_internal(
+ struct ipa_wdi_capabilities_out_params *out)
+{
+ if (out == NULL) {
+ IPA_WDI_ERR("invalid params out=%pK\n", out);
+ return -EINVAL;
+ }
+
+ out->num_of_instances = IPA_WDI_INST_MAX;
+ IPA_WDI_DBG("Wdi Capability: %d\n", out->num_of_instances);
+ return 0;
+}
+
+/**
+ * function to init WDI IPA offload data path
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+static int ipa_wdi_init_per_inst_internal(struct ipa_wdi_init_in_params *in,
+ struct ipa_wdi_init_out_params *out)
+{
+ struct ipa_wdi_uc_ready_params uc_ready_params;
+ struct ipa_smmu_in_params smmu_in;
+ struct ipa_smmu_out_params smmu_out;
+ int hdl;
+
+ if (!(in && out)) {
+ IPA_WDI_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+ return -EINVAL;
+ }
+
+ if (in->wdi_version > IPA_WDI_3 || in->wdi_version < IPA_WDI_1) {
+ IPA_WDI_ERR("wrong wdi version: %d\n", in->wdi_version);
+ return -EFAULT;
+ }
+
+ hdl = assign_hdl_for_inst(in->inst_id);
+ if (hdl < 0) {
+ IPA_WDI_ERR("Error assigning hdl\n");
+ return hdl;
+ }
+
+ IPA_WDI_DBG("Assigned Handle %d\n",hdl);
+ ipa_wdi_ctx_list[hdl] = kzalloc(sizeof(struct ipa_wdi_context), GFP_KERNEL);
+ if (ipa_wdi_ctx_list[hdl] == NULL) {
+ IPA_WDI_ERR("fail to alloc wdi ctx\n");
+ return -ENOMEM;
+ }
+ mutex_init(&ipa_wdi_ctx_list[hdl]->lock);
+ init_completion(&ipa_wdi_ctx_list[hdl]->wdi_completion);
+ INIT_LIST_HEAD(&ipa_wdi_ctx_list[hdl]->head_intf_list);
+
+ ipa_wdi_ctx_list[hdl]->inst_id = in->inst_id;
+ ipa_wdi_ctx_list[hdl]->wdi_version = in->wdi_version;
+ uc_ready_params.notify = in->notify;
+ uc_ready_params.priv = in->priv;
+
+ if (ipa3_uc_reg_rdyCB(&uc_ready_params) != 0) {
+ mutex_destroy(&ipa_wdi_ctx_list[hdl]->lock);
+ kfree(ipa_wdi_ctx_list[hdl]);
+ ipa_wdi_ctx_list[hdl] = NULL;
+ return -EFAULT;
+ }
+
+ out->is_uC_ready = uc_ready_params.is_uC_ready;
+
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id))
+ smmu_in.smmu_client = IPA_SMMU_WLAN_CLIENT;
+ else
+ smmu_in.smmu_client = IPA_SMMU_WLAN1_CLIENT;
+
+ if (ipa3_get_smmu_params(&smmu_in, &smmu_out))
+ out->is_smmu_enabled = false;
+ else
+ out->is_smmu_enabled = smmu_out.smmu_enable;
+
+ ipa_wdi_ctx_list[hdl]->is_smmu_enabled = out->is_smmu_enabled;
+
+ if (IPA_WDI2_OVER_GSI() || (in->wdi_version == IPA_WDI_3))
+ out->is_over_gsi = true;
+ else
+ out->is_over_gsi = false;
+
+ out->hdl = hdl;
+
+ return 0;
+}
+
+/**
+ * function to register interface
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+static int ipa_wdi_reg_intf_per_inst_internal(
+ struct ipa_wdi_reg_intf_in_params *in)
{
struct ipa_ioc_add_hdr *hdr;
struct ipa_wdi_intf_info *new_intf;
@@ -207,19 +290,32 @@
return -EINVAL;
}
- if (!ipa_wdi_ctx) {
+ if (in->hdl < 0 || in->hdl >=IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid handle =%d\n", in->hdl);
+ return -EINVAL;
+ }
+
+ if (!ipa_wdi_ctx_list[in->hdl]) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
return -EPERM;
}
+ if (ipa_wdi_ctx_list[in->hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[in->hdl]->wdi_version < IPA_WDI_3 &&
+ in->hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[in->hdl]->wdi_version);
+ return -EPERM;
+ }
+
IPA_WDI_DBG("register interface for netdev %s\n",
in->netdev_name);
- mutex_lock(&ipa_wdi_ctx->lock);
- list_for_each_entry(entry, &ipa_wdi_ctx->head_intf_list, link)
+ mutex_lock(&ipa_wdi_ctx_list[in->hdl]->lock);
+ list_for_each_entry(entry, &ipa_wdi_ctx_list[in->hdl]->head_intf_list, link)
if (strcmp(entry->netdev_name, in->netdev_name) == 0) {
IPA_WDI_DBG("intf was added before.\n");
- mutex_unlock(&ipa_wdi_ctx->lock);
+ mutex_unlock(&ipa_wdi_ctx_list[in->hdl]->lock);
return 0;
}
@@ -233,7 +329,7 @@
new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
if (new_intf == NULL) {
IPA_WDI_ERR("fail to alloc new intf\n");
- mutex_unlock(&ipa_wdi_ctx->lock);
+ mutex_unlock(&ipa_wdi_ctx_list[in->hdl]->lock);
return -ENOMEM;
}
@@ -265,14 +361,16 @@
/* populate tx prop */
tx.num_props = 2;
tx.prop = tx_prop;
-
+ IPA_WDI_DBG("Setting tx/rx props\n");
memset(tx_prop, 0, sizeof(tx_prop));
tx_prop[0].ip = IPA_IP_v4;
if (ipa3_get_ctx()->ipa_wdi3_over_gsi) {
if (in->is_tx1_used && ipa3_ctx->is_wdi3_tx1_needed)
- tx_prop[0].dst_pipe = IPA_CLIENT_WLAN2_CONS1;
- else
+ tx_prop[0].dst_pipe = IPA_CLIENT_WLAN2_CONS1;
+ else if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[in->hdl]->inst_id))
tx_prop[0].dst_pipe = IPA_CLIENT_WLAN2_CONS;
+ else
+ tx_prop[0].dst_pipe = IPA_CLIENT_WLAN4_CONS;
}
else
tx_prop[0].dst_pipe = IPA_CLIENT_WLAN1_CONS;
@@ -285,8 +383,10 @@
if (ipa3_get_ctx()->ipa_wdi3_over_gsi) {
if (in->is_tx1_used && ipa3_ctx->is_wdi3_tx1_needed)
tx_prop[1].dst_pipe = IPA_CLIENT_WLAN2_CONS1;
- else
+ else if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[in->hdl]->inst_id))
tx_prop[1].dst_pipe = IPA_CLIENT_WLAN2_CONS;
+ else
+ tx_prop[1].dst_pipe = IPA_CLIENT_WLAN4_CONS;
}
else
tx_prop[1].dst_pipe = IPA_CLIENT_WLAN1_CONS;
@@ -300,10 +400,14 @@
rx.prop = rx_prop;
memset(rx_prop, 0, sizeof(rx_prop));
rx_prop[0].ip = IPA_IP_v4;
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3)
- rx_prop[0].src_pipe = IPA_CLIENT_WLAN2_PROD;
- else
+ if (ipa_wdi_ctx_list[in->hdl]->wdi_version == IPA_WDI_3) {
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[in->hdl]->inst_id))
+ rx_prop[0].src_pipe = IPA_CLIENT_WLAN2_PROD;
+ else
+ rx_prop[0].src_pipe = IPA_CLIENT_WLAN3_PROD;
+ } else {
rx_prop[0].src_pipe = IPA_CLIENT_WLAN1_PROD;
+ }
rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
if (in->is_meta_data_valid) {
rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -312,107 +416,52 @@
}
rx_prop[1].ip = IPA_IP_v6;
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3)
- rx_prop[1].src_pipe = IPA_CLIENT_WLAN2_PROD;
- else
+ if (ipa_wdi_ctx_list[in->hdl]->wdi_version == IPA_WDI_3) {
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[in->hdl]->inst_id))
+ rx_prop[1].src_pipe = IPA_CLIENT_WLAN2_PROD;
+ else
+ rx_prop[1].src_pipe = IPA_CLIENT_WLAN3_PROD;
+ } else {
rx_prop[1].src_pipe = IPA_CLIENT_WLAN1_PROD;
+ }
rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
if (in->is_meta_data_valid) {
rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_prop[1].attrib.meta_data = in->meta_data;
rx_prop[1].attrib.meta_data_mask = in->meta_data_mask;
}
-
if (ipa3_register_intf(in->netdev_name, &tx, &rx)) {
IPA_WDI_ERR("fail to add interface prop\n");
ret = -EFAULT;
- goto fail_commit_hdr;
}
-
- list_add(&new_intf->link, &ipa_wdi_ctx->head_intf_list);
- init_completion(&ipa_wdi_ctx->wdi_completion);
+ IPA_WDI_DBG("Done Register Interface\n");
+ list_add(&new_intf->link, &ipa_wdi_ctx_list[in->hdl]->head_intf_list);
+ init_completion(&ipa_wdi_ctx_list[in->hdl]->wdi_completion);
kfree(hdr);
- mutex_unlock(&ipa_wdi_ctx->lock);
+ mutex_unlock(&ipa_wdi_ctx_list[in->hdl]->lock);
return 0;
fail_commit_hdr:
kfree(hdr);
fail_alloc_hdr:
kfree(new_intf);
- mutex_unlock(&ipa_wdi_ctx->lock);
+ mutex_unlock(&ipa_wdi_ctx_list[in->hdl]->lock);
return ret;
}
-static int ipa_wdi_dereg_intf_internal(const char *netdev_name)
-{
- int len, ret = 0;
- struct ipa_ioc_del_hdr *hdr = NULL;
- struct ipa_wdi_intf_info *entry;
- struct ipa_wdi_intf_info *next;
-
- if (!netdev_name) {
- IPA_WDI_ERR("no netdev name.\n");
- return -EINVAL;
- }
-
- if (!ipa_wdi_ctx) {
- IPA_WDI_ERR("wdi ctx is not initialized.\n");
- return -EPERM;
- }
-
- mutex_lock(&ipa_wdi_ctx->lock);
- list_for_each_entry_safe(entry, next, &ipa_wdi_ctx->head_intf_list,
- link)
- if (strcmp(entry->netdev_name, netdev_name) == 0) {
- len = sizeof(struct ipa_ioc_del_hdr) +
- 2 * sizeof(struct ipa_hdr_del);
- hdr = kzalloc(len, GFP_KERNEL);
- if (hdr == NULL) {
- IPA_WDI_ERR("fail to alloc %d bytes\n", len);
- mutex_unlock(&ipa_wdi_ctx->lock);
- return -ENOMEM;
- }
-
- hdr->commit = 1;
- hdr->num_hdls = 2;
- hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
- hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
- IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
- hdr->hdl[0].hdl, hdr->hdl[1].hdl);
-
- if (ipa3_del_hdr(hdr)) {
- IPA_WDI_ERR("fail to delete partial header\n");
- ret = -EFAULT;
- goto fail;
- }
-
- if (ipa3_deregister_intf(entry->netdev_name)) {
- IPA_WDI_ERR("fail to del interface props\n");
- ret = -EFAULT;
- goto fail;
- }
-
- list_del(&entry->link);
- kfree(entry);
-
- break;
- }
-
-fail:
- kfree(hdr);
- mutex_unlock(&ipa_wdi_ctx->lock);
- return ret;
-}
-
-
-static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
-{
- IPA_WDI_DBG("received pm event %d\n", event);
-}
-
-static int ipa_wdi_conn_pipes_internal(struct ipa_wdi_conn_in_params *in,
- struct ipa_wdi_conn_out_params *out)
+/**
+ * function to connect pipes
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *in,
+ struct ipa_wdi_conn_out_params *out)
{
int i, j, ret = 0;
struct ipa_pm_register_params pm_params;
@@ -427,33 +476,46 @@
return -EINVAL;
}
- if (!ipa_wdi_ctx) {
+ if (in->hdl < 0 || in->hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid handle %d \n", in->hdl);
+ return -EINVAL;
+ }
+
+ if (!ipa_wdi_ctx_list[in->hdl]) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
return -EPERM;
}
+ if (ipa_wdi_ctx_list[in->hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[in->hdl]->wdi_version < IPA_WDI_3 &&
+ in->hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[in->hdl]->wdi_version);
+ return -EPERM;
+ }
+
if (in->num_sys_pipe_needed > IPA_WDI_MAX_SUPPORTED_SYS_PIPE) {
IPA_WDI_ERR("ipa can only support up to %d sys pipe\n",
IPA_WDI_MAX_SUPPORTED_SYS_PIPE);
return -EINVAL;
}
- ipa_wdi_ctx->num_sys_pipe_needed = in->num_sys_pipe_needed;
+ ipa_wdi_ctx_list[in->hdl]->num_sys_pipe_needed = in->num_sys_pipe_needed;
IPA_WDI_DBG("number of sys pipe %d\n", in->num_sys_pipe_needed);
ipa_ep_idx_tx1 = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS1);
if ((ipa_ep_idx_tx1 != IPA_EP_NOT_ALLOCATED) &&
(ipa_ep_idx_tx1 < IPA3_MAX_NUM_PIPES) &&
ipa3_ctx->is_wdi3_tx1_needed) {
- ipa_wdi_ctx->is_tx1_used = in->is_tx1_used;
+ ipa_wdi_ctx_list[in->hdl]->is_tx1_used = in->is_tx1_used;
} else
- ipa_wdi_ctx->is_tx1_used = false;
+ ipa_wdi_ctx_list[in->hdl]->is_tx1_used = false;
IPA_WDI_DBG("number of sys pipe %d,Tx1 asked=%d,Tx1 supported=%d\n",
in->num_sys_pipe_needed, in->is_tx1_used,
ipa3_ctx->is_wdi3_tx1_needed);
/* setup sys pipe when needed */
- for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) {
+ for (i = 0; i < in->num_sys_pipe_needed; i++) {
ret = ipa_setup_sys_pipe(&in->sys_in[i],
- &ipa_wdi_ctx->sys_pipe_hdl[i]);
+ &ipa_wdi_ctx_list[in->hdl]->sys_pipe_hdl[i]);
if (ret) {
IPA_WDI_ERR("fail to setup sys pipe %d\n", i);
ret = -EFAULT;
@@ -462,18 +524,21 @@
}
memset(&pm_params, 0, sizeof(pm_params));
- pm_params.name = "wdi";
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[in->hdl]->inst_id))
+ pm_params.name = "wdi";
+ else
+ pm_params.name = "wdi1";
pm_params.callback = ipa_wdi_pm_cb;
pm_params.user_data = NULL;
pm_params.group = IPA_PM_GROUP_DEFAULT;
- if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
+ if (ipa_pm_register(&pm_params, &ipa_wdi_ctx_list[in->hdl]->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to register ipa pm\n");
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
-
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
- if (ipa3_conn_wdi3_pipes(in, out, ipa_wdi_ctx->wdi_notify)) {
+ IPA_WDI_DBG("PM handle Registered\n");
+ if (ipa_wdi_ctx_list[in->hdl]->wdi_version == IPA_WDI_3) {
+ if (ipa3_conn_wdi3_pipes(in, out, ipa_wdi_ctx_list[in->hdl]->wdi_notify)) {
IPA_WDI_ERR("fail to setup wdi pipes\n");
ret = -EFAULT;
goto fail_connect_pipe;
@@ -484,7 +549,7 @@
memset(&out_tx, 0, sizeof(out_tx));
memset(&out_rx, 0, sizeof(out_rx));
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
- in_rx.wdi_notify = ipa_wdi_ctx->wdi_notify;
+ in_rx.wdi_notify = ipa_wdi_ctx_list[in->hdl]->wdi_notify;
#endif
if (in->is_smmu_enabled == false) {
/* firsr setup rx pipe */
@@ -514,7 +579,7 @@
ret = -EFAULT;
goto fail_connect_pipe;
}
- ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl;
+ ipa_wdi_ctx_list[in->hdl]->rx_pipe_hdl = out_rx.clnt_hdl;
out->rx_uc_db_pa = out_rx.uc_door_bell_pa;
IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa);
@@ -543,7 +608,7 @@
ret = -EFAULT;
goto fail;
}
- ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl;
+ ipa_wdi_ctx_list[in->hdl]->tx_pipe_hdl = out_tx.clnt_hdl;
out->tx_uc_db_pa = out_tx.uc_door_bell_pa;
IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa);
} else { /* smmu is enabled */
@@ -574,7 +639,7 @@
ret = -EFAULT;
goto fail_connect_pipe;
}
- ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl;
+ ipa_wdi_ctx_list[in->hdl]->rx_pipe_hdl = out_rx.clnt_hdl;
out->rx_uc_db_pa = out_rx.uc_door_bell_pa;
IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa);
@@ -603,100 +668,72 @@
ret = -EFAULT;
goto fail;
}
- ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl;
+ ipa_wdi_ctx_list[in->hdl]->tx_pipe_hdl = out_tx.clnt_hdl;
out->tx_uc_db_pa = out_tx.uc_door_bell_pa;
- ret = ipa_pm_associate_ipa_cons_to_client(ipa_wdi_ctx->ipa_pm_hdl,
- in_tx.sys.client);
+ ret = ipa_pm_associate_ipa_cons_to_client(ipa_wdi_ctx_list[in->hdl]->ipa_pm_hdl,
+ in_tx.sys.client);
if (ret) {
IPA_WDI_ERR("fail to associate cons with PM %d\n", ret);
goto fail;
}
IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa);
}
+ IPA_WDI_DBG("conn pipes done\n");
}
return 0;
fail:
- ipa3_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl);
+ ipa3_disconnect_wdi_pipe(ipa_wdi_ctx_list[in->hdl]->rx_pipe_hdl);
fail_connect_pipe:
- ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
+ ipa_pm_deregister(ipa_wdi_ctx_list[in->hdl]->ipa_pm_hdl);
fail_setup_sys_pipe:
for (j = 0; j < i; j++)
- ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[j]);
+ ipa_teardown_sys_pipe(ipa_wdi_ctx_list[in->hdl]->sys_pipe_hdl[j]);
return ret;
}
-static int ipa_wdi_disconn_pipes_internal(void)
-{
- int i, ipa_ep_idx_rx, ipa_ep_idx_tx;
- int ipa_ep_idx_tx1 = IPA_EP_NOT_ALLOCATED;
-
- if (!ipa_wdi_ctx) {
- IPA_WDI_ERR("wdi ctx is not initialized\n");
- return -EPERM;
- }
-
- /* tear down sys pipe if needed */
- for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) {
- if (ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[i])) {
- IPA_WDI_ERR("fail to tear down sys pipe %d\n", i);
- return -EFAULT;
- }
- }
-
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
- ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
- ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
- if (ipa_wdi_ctx->is_tx1_used)
- ipa_ep_idx_tx1 =
- ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS1);
- } else {
- ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
- ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
- }
-
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
- if (ipa3_disconn_wdi3_pipes(
- ipa_ep_idx_tx, ipa_ep_idx_rx, ipa_ep_idx_tx1)) {
- IPA_WDI_ERR("fail to tear down wdi pipes\n");
- return -EFAULT;
- }
- } else {
- if (ipa3_disconnect_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
- IPA_WDI_ERR("fail to tear down wdi tx pipes\n");
- return -EFAULT;
- }
- if (ipa3_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
- IPA_WDI_ERR("fail to tear down wdi rx pipes\n");
- return -EFAULT;
- }
- }
-
- if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
- IPA_WDI_ERR("fail to deregister ipa pm\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int ipa_wdi_enable_pipes_internal(void)
+/**
+ * function to enable IPA offload data path
+ *
+ * @hdl: hdl to wdi client
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_wdi_enable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
{
int ret;
int ipa_ep_idx_tx, ipa_ep_idx_rx;
int ipa_ep_idx_tx1 = IPA_EP_NOT_ALLOCATED;
- if (!ipa_wdi_ctx) {
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid handle %d\n", hdl);
+ }
+
+ if (!ipa_wdi_ctx_list[hdl]) {
IPA_WDI_ERR("wdi ctx is not initialized.\n");
return -EPERM;
}
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
- ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
- ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
- if (ipa_wdi_ctx->is_tx1_used)
+ if (ipa_wdi_ctx_list[hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[hdl]->wdi_version < IPA_WDI_3 &&
+ hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[hdl]->wdi_version);
+ return -EPERM;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version == IPA_WDI_3) {
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id)) {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+ } else {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN3_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN4_CONS);
+ }
+ if (ipa_wdi_ctx_list[hdl]->is_tx1_used)
ipa_ep_idx_tx1 =
ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS1);
} else {
@@ -706,40 +743,39 @@
if (ipa_ep_idx_tx <= 0 || ipa_ep_idx_rx <= 0)
return -EFAULT;
-
- ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
+ ret = ipa_pm_activate_sync(ipa_wdi_ctx_list[hdl]->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to activate ipa pm\n");
return -EFAULT;
}
-
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
+ IPA_WDI_DBG("Enable WDI pipes\n");
+ if (ipa_wdi_ctx_list[hdl]->wdi_version == IPA_WDI_3) {
if (ipa3_enable_wdi3_pipes(
ipa_ep_idx_tx, ipa_ep_idx_rx, ipa_ep_idx_tx1)) {
IPA_WDI_ERR("fail to enable wdi pipes\n");
return -EFAULT;
}
} else {
- if ((ipa_wdi_ctx->tx_pipe_hdl >= IPA3_MAX_NUM_PIPES) ||
- (ipa_wdi_ctx->tx_pipe_hdl < 0) ||
- (ipa_wdi_ctx->rx_pipe_hdl >= IPA3_MAX_NUM_PIPES) ||
- (ipa_wdi_ctx->rx_pipe_hdl < 0)) {
+ if ((ipa_wdi_ctx_list[hdl]->tx_pipe_hdl >= IPA3_MAX_NUM_PIPES) ||
+ (ipa_wdi_ctx_list[hdl]->tx_pipe_hdl < 0) ||
+ (ipa_wdi_ctx_list[hdl]->rx_pipe_hdl >= IPA3_MAX_NUM_PIPES) ||
+ (ipa_wdi_ctx_list[hdl]->rx_pipe_hdl < 0)) {
IPA_WDI_ERR("pipe handle not valid\n");
return -EFAULT;
}
- if (ipa3_enable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+ if (ipa3_enable_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to enable wdi tx pipe\n");
return -EFAULT;
}
- if (ipa3_resume_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+ if (ipa3_resume_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to resume wdi tx pipe\n");
return -EFAULT;
}
- if (ipa3_enable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+ if (ipa3_enable_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to enable wdi rx pipe\n");
return -EFAULT;
}
- if (ipa3_resume_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+ if (ipa3_resume_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to resume wdi rx pipe\n");
return -EFAULT;
}
@@ -748,75 +784,40 @@
return 0;
}
-static int ipa_wdi_disable_pipes_internal(void)
-{
- int ret;
- int ipa_ep_idx_tx, ipa_ep_idx_rx;
- int ipa_ep_idx_tx1 = IPA_EP_NOT_ALLOCATED;
-
- if (!ipa_wdi_ctx) {
- IPA_WDI_ERR("wdi ctx is not initialized.\n");
- return -EPERM;
- }
-
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
- ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
- ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
- if (ipa_wdi_ctx->is_tx1_used)
- ipa_ep_idx_tx1 =
- ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS1);
- } else {
- ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
- ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
- }
-
- if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
- if (ipa3_disable_wdi3_pipes(
- ipa_ep_idx_tx, ipa_ep_idx_rx, ipa_ep_idx_tx1)) {
- IPA_WDI_ERR("fail to disable wdi pipes\n");
- return -EFAULT;
- }
- } else {
- if (ipa3_suspend_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
- IPA_WDI_ERR("fail to suspend wdi tx pipe\n");
- return -EFAULT;
- }
- if (ipa3_disable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
- IPA_WDI_ERR("fail to disable wdi tx pipe\n");
- return -EFAULT;
- }
- if (ipa3_suspend_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
- IPA_WDI_ERR("fail to suspend wdi rx pipe\n");
- return -EFAULT;
- }
- if (ipa3_disable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
- IPA_WDI_ERR("fail to disable wdi rx pipe\n");
- return -EFAULT;
- }
- }
-
- ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
- if (ret) {
- IPA_WDI_ERR("fail to deactivate ipa pm\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int ipa_wdi_set_perf_profile_internal(struct ipa_wdi_perf_profile *profile)
+/**
+ * set IPA clock bandwidth based on data rates
+ *
+ * @hdl: hdl to wdi client
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_wdi_set_perf_profile_per_inst_internal(ipa_wdi_hdl_t hdl,
+ struct ipa_wdi_perf_profile *profile)
{
int res = 0;
-
if (profile == NULL) {
IPA_WDI_ERR("Invalid input\n");
return -EINVAL;
}
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[hdl]->wdi_version < IPA_WDI_3 &&
+ hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[hdl]->wdi_version);
+ return -EPERM;
+ }
+
if (ipa3_ctx->use_pm_wrapper) {
res = ipa_pm_wrapper_wdi_set_perf_profile_internal(profile);
} else {
- res = ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
+ res = ipa_pm_set_throughput(ipa_wdi_ctx_list[hdl]->ipa_pm_hdl,
profile->max_supported_bw_mbps);
}
@@ -828,6 +829,464 @@
return res;
}
+/**
+ * function to create smmu mapping
+ *
+ * @hdl: hdl to wdi client
+ * @num_buffers: number of buffers
+ * @info: wdi buffer info
+ */
+static int ipa_wdi_create_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
+ u32 num_buffers,
+ struct ipa_wdi_buffer_info *info)
+{
+ struct ipa_smmu_cb_ctx *cb;
+ int i;
+ int ret = 0;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (!info) {
+ IPAERR_RL("info = %pK\n", info);
+ return -EINVAL;
+ }
+
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id))
+ cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+ else
+ cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN1);
+
+ if (!cb->valid) {
+ IPA_WDI_ERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ if ((IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id) &&
+ ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]) ||
+ (IPA_CLIENT_IS_WLAN1_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id) &&
+ ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN1])) {
+ IPA_WDI_ERR("IPA SMMU not enabled\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ IPA_WDI_DBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ &info[i].pa, info[i].iova, info[i].size);
+ info[i].result = ipa3_iommu_map(cb->iommu_domain,
+ rounddown(info[i].iova, PAGE_SIZE),
+ rounddown(info[i].pa, PAGE_SIZE),
+ roundup(info[i].size + info[i].pa -
+ rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+ prot);
+ }
+
+ return ret;
+}
+
+
+/**
+ * function to release smmu mapping
+ *
+ * @hdl: hdl to wdi client
+ * @num_buffers: number of buffers
+ *
+ * @info: wdi buffer info
+ */
+static int ipa_wdi_release_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
+ u32 num_buffers,
+ struct ipa_wdi_buffer_info *info)
+{
+ struct ipa_smmu_cb_ctx *cb;
+ int i;
+ int ret = 0;
+
+ if (!info) {
+ IPAERR_RL("info = %pK\n", info);
+ return -EINVAL;
+ }
+
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id))
+ cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+ else
+ cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN1);
+
+ if (!cb->valid) {
+ IPA_WDI_ERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ IPA_WDI_DBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ &info[i].pa, info[i].iova, info[i].size);
+ info[i].result = iommu_unmap(cb->iommu_domain,
+ rounddown(info[i].iova, PAGE_SIZE),
+ roundup(info[i].size + info[i].pa -
+ rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+ }
+
+ return ret;
+}
+
+/**
+ * clean up WDI IPA offload data path
+ *
+ * @hdl: hdl to wdi client
+ *
+ * @Return 0 on success, negative on failure
+ */
+static int ipa_wdi_cleanup_per_inst_internal(ipa_wdi_hdl_t hdl)
+{
+ struct ipa_wdi_intf_info *entry;
+ struct ipa_wdi_intf_info *next;
+
+ IPA_WDI_DBG("client hdl = %d, Instance = %d\n", hdl,ipa_wdi_ctx_list[hdl]->inst_id);
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[hdl]->wdi_version < IPA_WDI_3 &&
+ hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[hdl]->wdi_version);
+ return -EPERM;
+ }
+
+ /* clear interface list */
+ list_for_each_entry_safe(entry, next,
+ &ipa_wdi_ctx_list[hdl]->head_intf_list, link) {
+ list_del(&entry->link);
+ kfree(entry);
+ }
+ mutex_destroy(&ipa_wdi_ctx_list[hdl]->lock);
+ kfree(ipa_wdi_ctx_list[hdl]);
+ ipa_wdi_ctx_list[hdl] = NULL;
+ return 0;
+}
+
+/**
+ * function to deregister before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+static int ipa_wdi_dereg_intf_per_inst_internal(const char *netdev_name,ipa_wdi_hdl_t hdl)
+{
+ int len, ret = 0;
+ struct ipa_ioc_del_hdr *hdr = NULL;
+ struct ipa_wdi_intf_info *entry;
+ struct ipa_wdi_intf_info *next;
+
+ if (!netdev_name) {
+ IPA_WDI_ERR("no netdev name.\n");
+ return -EINVAL;
+ }
+
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[hdl]->wdi_version < IPA_WDI_3 &&
+ hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[hdl]->wdi_version);
+ return -EPERM;
+ }
+
+ if (!ipa_wdi_ctx_list[hdl]) {
+ IPA_WDI_ERR("wdi ctx is not initialized.\n");
+ return -EPERM;
+ }
+ IPA_WDI_DBG("Deregister Instance hdl %d\n",hdl);
+ mutex_lock(&ipa_wdi_ctx_list[hdl]->lock);
+ list_for_each_entry_safe(entry, next, &ipa_wdi_ctx_list[hdl]->head_intf_list,
+ link)
+ if (strcmp(entry->netdev_name, netdev_name) == 0) {
+ len = sizeof(struct ipa_ioc_del_hdr) +
+ 2 * sizeof(struct ipa_hdr_del);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_WDI_ERR("fail to alloc %d bytes\n", len);
+ mutex_unlock(&ipa_wdi_ctx_list[hdl]->lock);
+ return -ENOMEM;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdls = 2;
+ hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
+ hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
+ IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+ hdr->hdl[0].hdl, hdr->hdl[1].hdl);
+
+ if (ipa3_del_hdr(hdr)) {
+ IPA_WDI_ERR("fail to delete partial header\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_deregister_intf(entry->netdev_name)) {
+ IPA_WDI_ERR("fail to del interface props\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ list_del(&entry->link);
+ kfree(entry);
+
+ break;
+ }
+
+fail:
+ kfree(hdr);
+ mutex_unlock(&ipa_wdi_ctx_list[hdl]->lock);
+ return ret;
+}
+
+/**
+ * function to disconnect pipes
+ *
+ * @hdl: hdl to wdi client
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_wdi_disconn_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
+{
+ int i, ipa_ep_idx_rx, ipa_ep_idx_tx;
+ int ipa_ep_idx_tx1 = IPA_EP_NOT_ALLOCATED;
+
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[hdl]->wdi_version < IPA_WDI_3 &&
+ hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[hdl]->wdi_version);
+ return -EPERM;
+ }
+
+ if (!ipa_wdi_ctx_list[hdl]) {
+ IPA_WDI_ERR("wdi ctx is not initialized\n");
+ return -EPERM;
+ }
+ IPA_WDI_DBG("Disconnect pipes for hdl %d\n",hdl);
+ /* tear down sys pipe if needed */
+ for (i = 0; i < ipa_wdi_ctx_list[hdl]->num_sys_pipe_needed; i++) {
+ if (ipa_teardown_sys_pipe(ipa_wdi_ctx_list[hdl]->sys_pipe_hdl[i])) {
+ IPA_WDI_ERR("fail to tear down sys pipe %d\n", i);
+ return -EFAULT;
+ }
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version == IPA_WDI_3) {
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id)) {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+ } else {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN3_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN4_CONS);
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->is_tx1_used)
+ ipa_ep_idx_tx1 =
+ ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS1);
+ } else {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version == IPA_WDI_3) {
+ if (ipa3_disconn_wdi3_pipes(
+ ipa_ep_idx_tx, ipa_ep_idx_rx, ipa_ep_idx_tx1)) {
+ IPA_WDI_ERR("fail to tear down wdi pipes\n");
+ return -EFAULT;
+ }
+ } else {
+ if (ipa3_disconnect_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+ IPA_WDI_ERR("fail to tear down wdi tx pipes\n");
+ return -EFAULT;
+ }
+ if (ipa3_disconnect_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+ IPA_WDI_ERR("fail to tear down wdi rx pipes\n");
+ return -EFAULT;
+ }
+ }
+
+ if (ipa_pm_deregister(ipa_wdi_ctx_list[hdl]->ipa_pm_hdl)) {
+ IPA_WDI_ERR("fail to deregister ipa pm\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * function to disable IPA offload data path
+ *
+ * @hdl: hdl to wdi client
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa_wdi_disable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
+{
+ int ret;
+ int ipa_ep_idx_tx, ipa_ep_idx_rx;
+ int ipa_ep_idx_tx1 = IPA_EP_NOT_ALLOCATED;
+
+
+ if (hdl < 0 || hdl >= IPA_WDI_INST_MAX) {
+ IPA_WDI_ERR("Invalid Handle %d\n",hdl);
+ return -EFAULT;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version >= IPA_WDI_1 &&
+ ipa_wdi_ctx_list[hdl]->wdi_version < IPA_WDI_3 &&
+ hdl > 0) {
+ IPA_WDI_ERR("More than one instance not supported for WDI ver = %d\n",
+ ipa_wdi_ctx_list[hdl]->wdi_version);
+ return -EPERM;
+ }
+
+ if (!ipa_wdi_ctx_list[hdl]) {
+ IPA_WDI_ERR("wdi ctx is not initialized.\n");
+ return -EPERM;
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version == IPA_WDI_3) {
+ if (IPA_CLIENT_IS_WLAN0_INSTANCE(ipa_wdi_ctx_list[hdl]->inst_id)) {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+ } else {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN3_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN4_CONS);
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->is_tx1_used)
+ ipa_ep_idx_tx1 =
+ ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS1);
+ } else {
+ ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+ ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+ }
+
+ if (ipa_wdi_ctx_list[hdl]->wdi_version == IPA_WDI_3) {
+ if (ipa3_disable_wdi3_pipes(
+ ipa_ep_idx_tx, ipa_ep_idx_rx, ipa_ep_idx_tx1)) {
+ IPA_WDI_ERR("fail to disable wdi pipes\n");
+ return -EFAULT;
+ }
+ } else {
+ if (ipa3_suspend_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+ IPA_WDI_ERR("fail to suspend wdi tx pipe\n");
+ return -EFAULT;
+ }
+ if (ipa3_disable_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+ IPA_WDI_ERR("fail to disable wdi tx pipe\n");
+ return -EFAULT;
+ }
+ if (ipa3_suspend_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+ IPA_WDI_ERR("fail to suspend wdi rx pipe\n");
+ return -EFAULT;
+ }
+ if (ipa3_disable_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+ IPA_WDI_ERR("fail to disable wdi rx pipe\n");
+ return -EFAULT;
+ }
+ }
+
+ ret = ipa_pm_deactivate_sync(ipa_wdi_ctx_list[hdl]->ipa_pm_hdl);
+ if (ret) {
+ IPA_WDI_ERR("fail to deactivate ipa pm\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa_wdi_init_internal(struct ipa_wdi_init_in_params *in,
+ struct ipa_wdi_init_out_params *out)
+{
+ if (in == NULL) {
+ IPA_WDI_ERR("invalid params in=%pK\n", in);
+ return -EINVAL;
+ }
+
+ in->inst_id = DEFAULT_INSTANCE_ID;
+ return ipa_wdi_init_per_inst_internal(in, out);
+}
+
+static int ipa_wdi_cleanup_internal(void)
+{
+ return ipa_wdi_cleanup_per_inst_internal(0);
+}
+
+static int ipa_wdi_reg_intf_internal(struct ipa_wdi_reg_intf_in_params *in)
+{
+ if (in == NULL) {
+ IPA_WDI_ERR("invalid params in=%pK\n", in);
+ return -EINVAL;
+ }
+ in->hdl = 0;
+ return ipa_wdi_reg_intf_per_inst_internal(in);
+}
+
+static int ipa_wdi_dereg_intf_internal(const char *netdev_name)
+{
+ return ipa_wdi_dereg_intf_per_inst_internal(netdev_name, 0);
+}
+
+static int ipa_wdi_conn_pipes_internal(struct ipa_wdi_conn_in_params *in,
+ struct ipa_wdi_conn_out_params *out)
+{
+ if (!(in && out)) {
+ IPA_WDI_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+ return -EINVAL;
+ }
+
+ in->hdl = 0;
+ return ipa_wdi_conn_pipes_per_inst_internal(in, out);
+}
+
+static int ipa_wdi_disconn_pipes_internal(void)
+{
+ return ipa_wdi_disconn_pipes_per_inst_internal(0);
+}
+
+static int ipa_wdi_enable_pipes_internal(void)
+{
+ return ipa_wdi_enable_pipes_per_inst_internal(0);
+}
+
+static int ipa_wdi_disable_pipes_internal(void)
+{
+ return ipa_wdi_disable_pipes_per_inst_internal(0);
+}
+
+static int ipa_wdi_set_perf_profile_internal(struct ipa_wdi_perf_profile *profile)
+{
+ if (profile == NULL) {
+ IPA_WDI_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ return ipa_wdi_set_perf_profile_per_inst_internal(0, profile);
+}
+
void ipa_wdi3_register(void)
{
struct ipa_wdi3_data funcs;
@@ -848,6 +1307,18 @@
funcs.ipa_wdi_sw_stats = ipa3_set_wlan_tx_info;
funcs.ipa_get_wdi_version = ipa_get_wdi_version_internal;
funcs.ipa_wdi_is_tx1_used = ipa_wdi_is_tx1_used_internal;
+ funcs.ipa_wdi_get_capabilities = ipa_wdi_get_capabilities_internal;
+ funcs.ipa_wdi_init_per_inst = ipa_wdi_init_per_inst_internal;
+ funcs.ipa_wdi_cleanup_per_inst = ipa_wdi_cleanup_per_inst_internal;
+ funcs.ipa_wdi_reg_intf_per_inst = ipa_wdi_reg_intf_per_inst_internal;
+ funcs.ipa_wdi_dereg_intf_per_inst = ipa_wdi_dereg_intf_per_inst_internal;
+ funcs.ipa_wdi_conn_pipes_per_inst = ipa_wdi_conn_pipes_per_inst_internal;
+ funcs.ipa_wdi_disconn_pipes_per_inst = ipa_wdi_disconn_pipes_per_inst_internal;
+ funcs.ipa_wdi_enable_pipes_per_inst = ipa_wdi_enable_pipes_per_inst_internal;
+ funcs.ipa_wdi_disable_pipes_per_inst = ipa_wdi_disable_pipes_per_inst_internal;
+ funcs.ipa_wdi_set_perf_profile_per_inst = ipa_wdi_set_perf_profile_per_inst_internal;
+ funcs.ipa_wdi_create_smmu_mapping_per_inst = ipa_wdi_create_smmu_mapping_per_inst_internal;
+ funcs.ipa_wdi_release_smmu_mapping_per_inst = ipa_wdi_release_smmu_mapping_per_inst_internal;
if (ipa_fmwk_register_ipa_wdi3(&funcs))
pr_err("failed to register ipa_wdi3 APIs\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7295f6e..0d96a11 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -507,7 +507,7 @@
switch (event) {
case PM_POST_SUSPEND:
#ifdef CONFIG_DEEPSLEEP
- if (mem_sleep_current == PM_SUSPEND_MEM && ipa3_ctx->deepsleep) {
+ if (pm_suspend_via_firmware() && ipa3_ctx->deepsleep) {
IPADBG("Enter deepsleep resume\n");
ipa3_deepsleep_resume();
IPADBG("Exit deepsleep resume\n");
@@ -979,6 +979,7 @@
{
return &smmu_cb[cb_type];
}
+EXPORT_SYMBOL(ipa3_get_smmu_ctx);
static int ipa3_open(struct inode *inode, struct file *filp)
{
@@ -2815,7 +2816,7 @@
{
int retval = 0;
u32 pyld_sz;
- u8 header[256] = { 0 };
+ u8 header[512] = { 0 };
u8 *param = NULL;
bool is_vlan_mode;
struct ipa_ioc_coal_evict_policy evict_pol;
@@ -9520,6 +9521,11 @@
result = -ENOMEM;
goto fail_gsi_map;
}
+ mutex_init(&ipa3_ctx->recycle_stats_collection_lock);
+ memset(&ipa3_ctx->recycle_stats, 0, sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+ memset(&ipa3_ctx->prev_coal_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
+ memset(&ipa3_ctx->prev_default_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
+ memset(&ipa3_ctx->prev_low_lat_data_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
ipa3_ctx->transport_power_mgmt_wq =
create_singlethread_workqueue("transport_power_mgmt");
@@ -9529,6 +9535,17 @@
goto fail_create_transport_wq;
}
+ /* Create workqueue for recycle stats collection */
+ ipa3_ctx->collect_recycle_stats_wq =
+ create_singlethread_workqueue("page_recycle_stats_collection");
+ if (!ipa3_ctx->collect_recycle_stats_wq) {
+ IPAERR("failed to create page recycling stats collection wq\n");
+ result = -ENOMEM;
+ goto fail_create_recycle_stats_wq;
+ }
+ memset(&ipa3_ctx->recycle_stats, 0,
+ sizeof(ipa3_ctx->recycle_stats));
+
mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
/* init the lookaside cache */
@@ -9863,6 +9880,8 @@
fail_rt_rule_cache:
kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
fail_flt_rule_cache:
+ destroy_workqueue(ipa3_ctx->collect_recycle_stats_wq);
+fail_create_recycle_stats_wq:
destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
fail_create_transport_wq:
destroy_workqueue(ipa3_ctx->power_mgmt_wq);
@@ -11738,7 +11757,7 @@
}
#ifdef CONFIG_DEEPSLEEP
- if (mem_sleep_current == PM_SUSPEND_MEM) {
+ if (pm_suspend_via_firmware()) {
IPADBG("Enter deepsleep suspend\n");
ipa3_deepsleep_suspend();
IPADBG("Exit deepsleep suspend\n");
@@ -11932,6 +11951,7 @@
return iommu_map(domain, iova, paddr, size, prot);
}
+EXPORT_SYMBOL(ipa3_iommu_map);
/**
* ipa3_get_smmu_params()- Return the ipa3 smmu related params.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index baf3c7b..8de3694 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -26,6 +26,7 @@
#include "ipa_trace.h"
#include "ipahal.h"
#include "ipahal_fltrt.h"
+#include "ipa_stats.h"
#define IPA_GSI_EVENT_RP_SIZE 8
#define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
@@ -155,6 +156,156 @@
struct gsi_chan_xfer_notify g_lan_rx_notify[IPA_LAN_NAPI_MAX_FRAMES];
+static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_collect_default_coal_recycle_stats_wq_work,
+ ipa3_collect_default_coal_recycle_stats_wq);
+
+static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_collect_low_lat_data_recycle_stats_wq_work,
+ ipa3_collect_low_lat_data_recycle_stats_wq);
+
+static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+ int stat_interval_index;
+ int ep_idx = -1;
+
+ /* For targets which don't require coalescing pipe */
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+ if (ep_idx == -1)
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+ if (ep_idx == -1)
+ sys = NULL;
+ else
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+ stat_interval_index = ipa3_ctx->recycle_stats.default_coal_stats_index;
+ ipa3_ctx->recycle_stats.interval_time_in_ms = IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME;
+
+ /* Coalescing pipe page recycling stats */
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[0].total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[0].page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative
+ - ipa3_ctx->prev_coal_recycle_stats.total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative
+ - ipa3_ctx->prev_coal_recycle_stats.page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative
+ - ipa3_ctx->prev_coal_recycle_stats.tmp_alloc;
+
+ ipa3_ctx->prev_coal_recycle_stats.total_replenished
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative;
+ ipa3_ctx->prev_coal_recycle_stats.page_recycled
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative;
+ ipa3_ctx->prev_coal_recycle_stats.tmp_alloc
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative;
+
+ /* Default pipe page recycling stats */
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[1].total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[1].page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative
+ - ipa3_ctx->prev_default_recycle_stats.total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative
+ - ipa3_ctx->prev_default_recycle_stats.page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative
+ - ipa3_ctx->prev_default_recycle_stats.tmp_alloc;
+
+ ipa3_ctx->prev_default_recycle_stats.total_replenished
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative;
+ ipa3_ctx->prev_default_recycle_stats.page_recycled
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative;
+ ipa3_ctx->prev_default_recycle_stats.tmp_alloc
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].valid = 1;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].valid = 1;
+
+ /* Single Indexing for coalescing and default pipe */
+ ipa3_ctx->recycle_stats.default_coal_stats_index =
+ (ipa3_ctx->recycle_stats.default_coal_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
+ if (sys && atomic_read(&sys->curr_polling_state))
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10));
+
+ mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+ return;
+
+}
+
+static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+ int stat_interval_index;
+ int ep_idx;
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+ if (ep_idx == -1)
+ sys = NULL;
+ else
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+ stat_interval_index = ipa3_ctx->recycle_stats.low_lat_stats_index;
+
+ /* Low latency data pipe page recycling stats */
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[2].total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[2].page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[2].tmp_alloc;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative
+ - ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative
+ - ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative
+ - ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc;
+
+ ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative;
+ ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative;
+ ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].valid = 1;
+
+ /* Indexing for low lat data stats pipe */
+ ipa3_ctx->recycle_stats.low_lat_stats_index =
+ (ipa3_ctx->recycle_stats.low_lat_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
+ if (sys && atomic_read(&sys->curr_polling_state))
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10));
+
+ mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+ return;
+}
+
/**
* ipa3_write_done_common() - this function is responsible on freeing
* all tx_pkt_wrappers related to a skb
@@ -5099,7 +5250,8 @@
/* Check added for handling LAN consumer packet without EOT flag */
if (notify->evt_id == GSI_CHAN_EVT_EOT ||
- sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
+ sys->ep->client == IPA_CLIENT_APPS_LAN_CONS ||
+ sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
/* go over the list backward to save computations on updating length */
list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
rx_skb = rx_pkt->data.skb;
@@ -7003,6 +7155,9 @@
/* call repl_hdlr before napi_reschedule / napi_complete */
ep->sys->repl_hdlr(ep->sys);
wan_def_sys->repl_hdlr(wan_def_sys);
+ /* Scheduling WAN and COAL collect stats work wueue */
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10));
/* When not able to replenish enough descriptors, keep in polling
* mode, wait for napi-poll and replenish again.
*/
@@ -7191,7 +7346,6 @@
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI_LL");
-
remain_aggr_weight = budget / ipa3_ctx->ipa_wan_aggr_pkt_cnt;
if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
IPAERR("NAPI weight is higher than expected\n");
@@ -7231,6 +7385,9 @@
cnt += budget - remain_aggr_weight * ipa3_ctx->ipa_wan_aggr_pkt_cnt;
/* call repl_hdlr before napi_reschedule / napi_complete */
sys->repl_hdlr(sys);
+ /* Scheduling RMNET LOW LAT DATA collect stats work queue */
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10));
/* When not able to replenish enough descriptors, keep in polling
* mode, wait for napi-poll and replenish again.
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6afc5a0..1922d0f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2585,6 +2585,13 @@
phys_addr_t per_stats_smem_pa;
void *per_stats_smem_va;
u32 ipa_smem_size;
+ bool is_dual_pine_config;
+ struct workqueue_struct *collect_recycle_stats_wq;
+ struct ipa_lnx_pipe_page_recycling_stats recycle_stats;
+ struct ipa3_page_recycle_stats prev_coal_recycle_stats;
+ struct ipa3_page_recycle_stats prev_default_recycle_stats;
+ struct ipa3_page_recycle_stats prev_low_lat_data_recycle_stats;
+ struct mutex recycle_stats_collection_lock;
};
struct ipa3_plat_drv_res {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
index c5c7b2e..43f48a9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
@@ -926,7 +926,7 @@
IPA_CLIENT_AQC_ETHERNET_CONS;
#if IPA_ETH_API_VER >= 2
/* Get the client pipe info[0] from the allocation info context only if it is NTN3 */
- if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3)) {
+ if (instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3) {
tx_instance_ptr_local->tx_client =
ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
i].pipes_client_type[0];
@@ -1025,7 +1025,7 @@
IPA_CLIENT_AQC_ETHERNET_PROD;
#if IPA_ETH_API_VER >= 2
/* Get the client pipe info[1] from the allocation info context only if it is NTN3 */
- if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3)) {
+ if (instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3) {
rx_instance_ptr_local->rx_client =
ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
i].pipes_client_type[1];
@@ -1472,6 +1472,42 @@
}
#endif
+static int ipa_get_page_recycle_stats(unsigned long arg)
+{
+ struct ipa_lnx_pipe_page_recycling_stats *page_recycle_stats;
+ int alloc_size;
+
+ alloc_size = sizeof(struct ipa_lnx_pipe_page_recycling_stats);
+
+ page_recycle_stats = (struct ipa_lnx_pipe_page_recycling_stats *) memdup_user((
+ const void __user *)arg, alloc_size);
+ if (IS_ERR(page_recycle_stats)) {
+ IPA_STATS_ERR("copy from user failed");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+ memcpy(page_recycle_stats, &ipa3_ctx->recycle_stats,
+ sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+
+ /* Clear all the data and valid bits */
+ memset(&ipa3_ctx->recycle_stats, 0,
+ sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+
+ mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+ if(copy_to_user((void __user *)arg,
+ (u8 *)page_recycle_stats,
+ alloc_size)) {
+ IPA_STATS_ERR("copy to user failed");
+ kfree(page_recycle_stats);
+ return -EFAULT;
+ }
+
+ kfree(page_recycle_stats);
+ return 0;
+}
+
static int ipa_stats_get_alloc_info(unsigned long arg)
{
int i = 0;
@@ -1665,41 +1701,44 @@
#if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
- goto success;
+ } else {
+ if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
+ ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
+ else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
+ ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET;
+ else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE;
+ ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] =
+ IPA_CLIENT_MHI_PRIME_TETH_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] =
+ IPA_CLIENT_MHI_PRIME_TETH_PROD;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] =
+ IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] =
+ IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0]
+ = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1]
+ = IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0]
+ = IPA_CLIENT_MHI_PRIME_TETH_PROD;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1]
+ = IPA_CLIENT_MHI_PRIME_RMNET_PROD;
}
- if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
- ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
- else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
- ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET;
- else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE;
- ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] =
- IPA_CLIENT_MHI_PRIME_TETH_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] =
- IPA_CLIENT_MHI_PRIME_TETH_PROD;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] =
- IPA_CLIENT_MHI_PRIME_RMNET_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] =
- IPA_CLIENT_MHI_PRIME_RMNET_PROD;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0]
- = IPA_CLIENT_MHI_PRIME_TETH_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1]
- = IPA_CLIENT_MHI_PRIME_RMNET_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0]
- = IPA_CLIENT_MHI_PRIME_TETH_PROD;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1]
- = IPA_CLIENT_MHI_PRIME_RMNET_PROD;
-
-success:
#else
/* MHI Prime is not enabled */
ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
#endif
}
+ /* For Page recycling stats for default, coal and Low lat pipes */
+ if (ipa_lnx_agent_ctx.log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS)
+ ipa_lnx_agent_ctx.alloc_info.num_page_rec_interval =
+ IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
if(copy_to_user((u8 *)arg,
&ipa_lnx_agent_ctx,
sizeof(struct ipa_lnx_stats_spearhead_ctx))) {
@@ -1818,6 +1857,13 @@
}
#endif
}
+ if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS) {
+ retval = ipa_get_page_recycle_stats((unsigned long) consolidated_stats->recycle_stats);
+ if (retval) {
+ IPA_STATS_ERR("ipa get page recycle stats fail\n");
+ break;
+ }
+ }
break;
default:
retval = -ENOTTY;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
index 45ee926..8e0ddfd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
@@ -56,6 +56,9 @@
#define SPEARHEAD_NUM_MAX_INSTANCES 2
+#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT 5
+#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME 10 /* In milli second */
+
/**
* This is used to indicate which set of logs is enabled from IPA
* These bitmapped macros are copied from
@@ -67,6 +70,7 @@
#define SPRHD_IPA_LOG_TYPE_ETH_STATS 0x00008
#define SPRHD_IPA_LOG_TYPE_USB_STATS 0x00010
#define SPRHD_IPA_LOG_TYPE_MHIP_STATS 0x00020
+#define SPRHD_IPA_LOG_TYPE_RECYCLE_STATS 0x00040
/**
@@ -340,7 +344,6 @@
};
#define IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT (8 + 248)
-
struct ipa_lnx_consolidated_stats {
uint64_t log_type_mask;
struct ipa_lnx_generic_stats *generic_stats;
@@ -349,9 +352,43 @@
struct ipa_lnx_eth_inst_stats *eth_stats;
struct ipa_lnx_usb_inst_stats *usb_stats;
struct ipa_lnx_mhip_inst_stats *mhip_stats;
+ struct ipa_lnx_pipe_page_recycling_stats *recycle_stats;
};
#define IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN_INT (8 + 48)
+enum rx_channel_type {
+ RX_WAN_COALESCING,
+ RX_WAN_DEFAULT,
+ RX_WAN_LOW_LAT_DATA,
+ RX_CHANNEL_MAX,
+};
+
+struct ipa_lnx_recycling_stats {
+ uint64_t total_cumulative;
+ uint64_t recycle_cumulative;
+ uint64_t temp_cumulative;
+ uint64_t total_diff;
+ uint64_t recycle_diff;
+ uint64_t temp_diff;
+ uint64_t valid;
+};
+
+/**
+ * The consolidated stats will be in the 0th index.
+ * Diff. between each interval values will be in
+ * indices 1 to (IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT - 1)
+ * @new_set: Indicates if this is the new set of data or previous data.
+ * @interval_time_ms: Interval time in millisecond
+ */
+struct ipa_lnx_pipe_page_recycling_stats {
+ uint32_t interval_time_in_ms;
+ uint32_t default_coal_stats_index;
+ uint32_t low_lat_stats_index;
+ uint32_t sequence_id;
+ uint64_t reserved;
+ struct ipa_lnx_recycling_stats rx_channel[RX_CHANNEL_MAX][IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT];
+};
+
/* Explain below structures */
struct ipa_lnx_each_inst_alloc_info {
uint32_t pipes_client_type[SPEARHEAD_NUM_MAX_PIPES];
@@ -372,7 +409,7 @@
uint32_t num_eth_instances;
uint32_t num_usb_instances;
uint32_t num_mhip_instances;
- uint32_t reserved;
+ uint32_t num_page_rec_interval;
struct ipa_lnx_each_inst_alloc_info wlan_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
struct ipa_lnx_each_inst_alloc_info eth_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
struct ipa_lnx_each_inst_alloc_info usb_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5a93158..fd44782 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4186,7 +4186,7 @@
IPA_TX_INSTANCE_NA },
[IPA_5_0][IPA_CLIENT_WLAN3_PROD] = {
true, IPA_v5_0_GROUP_UL,
- false,
+ true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1 , 0, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2},
@@ -9362,11 +9362,13 @@
param_in->client == IPA_CLIENT_RTK_ETHERNET_PROD) {
result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
} else if (param_in->client == IPA_CLIENT_WLAN1_PROD ||
- param_in->client == IPA_CLIENT_WLAN2_PROD) {
+ param_in->client == IPA_CLIENT_WLAN2_PROD ||
+ param_in->client == IPA_CLIENT_WLAN3_PROD) {
ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
- if (param_in->client == IPA_CLIENT_WLAN2_PROD)
- result = ipa3_write_qmapid_wdi3_gsi_pipe(
- ipa_ep_idx, meta.qmap_id);
+ if (param_in->client == IPA_CLIENT_WLAN2_PROD ||
+ param_in->client == IPA_CLIENT_WLAN3_PROD)
+ result = ipa3_write_qmapid_wdi3_gsi_pipe(
+ ipa_ep_idx, meta.qmap_id);
else
result = ipa3_write_qmapid_wdi_pipe(
ipa_ep_idx, meta.qmap_id);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a37ec31..af242f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2223,6 +2223,7 @@
if (rc == -EFAULT) {
IPAWANERR("Failed to setup wan/coal cons pipes\n");
+ mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
return rc;
}
@@ -2300,6 +2301,7 @@
sizeof(struct rmnet_ingress_param) *
ingress_ioctl_v2_data.number_of_eps)) {
IPAWANERR("Ingress copy to user failed\n");
+ mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
return -EFAULT;
}
@@ -2523,6 +2525,7 @@
if (rc == -EFAULT) {
IPAWANERR("Failed to setup wan prod pipes\n");
+ mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
return rc;
}
diff --git a/drivers/platform/msm/ipa/test/ipa_test_ntn.c b/drivers/platform/msm/ipa/test/ipa_test_ntn.c
index 741bd3b..7777e96 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_ntn.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_ntn.c
@@ -105,11 +105,12 @@
}__packed;
static inline void ipa_test_ntn_set_client_params(enum ipa_client_type cons_type,
- enum ipa_client_type prod_type, int inst_id)
+ enum ipa_client_type prod_type, int inst_id, enum ipa_eth_client_type eth_client_type)
{
test_ntn_ctx->cons_client_type = cons_type;
test_ntn_ctx->prod_client_type = prod_type;
test_ntn_ctx->eth_client_inst_id = inst_id;
+ test_ntn_ctx->client.client_type = eth_client_type;
}
static void ipa_test_ntn_free_dma_buff(struct ipa_mem_buffer *mem)
@@ -408,7 +409,8 @@
return -ENOMEM;
}
- ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0);
+ ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0,
+ IPA_ETH_CLIENT_NTN3);
init_completion(&test_ntn_ctx->init_completion_obj);
@@ -546,13 +548,20 @@
struct ipa_eth_client *client;
int ret, i;
#if IPA_ETH_API_VER >= 2
- struct net_device dummy_net_dev;
+ struct net_device *dummy_net_dev;
unsigned char dummy_dev_addr = 1;
- memset(dummy_net_dev.name, 0, sizeof(dummy_net_dev.name));
- dummy_net_dev.dev_addr = &dummy_dev_addr;
+ dummy_net_dev = kzalloc(sizeof(*dummy_net_dev), GFP_KERNEL);
+ if (dummy_net_dev == NULL){
+ IPA_UT_ERR("kzalloc failed\n");
+ return -ENOMEM;
+ }
- test_ntn_ctx->client.client_type = IPA_ETH_CLIENT_NTN3;
+ memset(dummy_net_dev->name, 0, sizeof(dummy_net_dev->name));
+ dummy_net_dev->dev_addr = &dummy_dev_addr;
+
+
+ /* client_type is set in ipa_test_ntn_set_client_params */
test_ntn_ctx->client.inst_id = test_ntn_ctx->eth_client_inst_id;
#else
test_ntn_ctx->client.client_type = IPA_ETH_CLIENT_NTN;
@@ -560,7 +569,7 @@
#endif
test_ntn_ctx->client.traffic_type = IPA_ETH_PIPE_BEST_EFFORT;
#if IPA_ETH_API_VER >= 2
- test_ntn_ctx->client.net_dev = &dummy_net_dev;
+ test_ntn_ctx->client.net_dev = dummy_net_dev;
#endif
/* RX pipe */
@@ -688,21 +697,22 @@
ret = ipa_eth_client_conn_pipes(client);
if(ret) {
IPA_UT_ERR("ipa_eth_client_conn_pipes failed ret %d\n", ret);
- goto conn_failed;
+ ipa_ntn_test_del_client_list();
}
- return 0;
+#if IPA_ETH_API_VER >= 2
+ kfree(dummy_net_dev);
+#endif
-conn_failed:
- ipa_ntn_test_del_client_list();
return ret;
+
}
static int ipa_ntn_test_reg_intf(void)
{
struct ipa_eth_intf_info intf;
#if IPA_ETH_API_VER >= 2
- struct net_device dummy_net_dev;
+ struct net_device *dummy_net_dev;
unsigned char dummy_dev_addr[ETH_ALEN] = { 0 };
#else
char netdev_name[IPA_RESOURCE_NAME_MAX] = { 0 };
@@ -712,11 +722,17 @@
memset(&intf, 0, sizeof(intf));
#if IPA_ETH_API_VER >= 2
- memset(dummy_net_dev.name, 0, sizeof(dummy_net_dev.name));
+ dummy_net_dev = kzalloc(sizeof(*dummy_net_dev), GFP_KERNEL);
+ if (dummy_net_dev == NULL){
+ IPA_UT_ERR("kzalloc failed\n");
+ return -ENOMEM;
+ }
- intf.net_dev = &dummy_net_dev;
+ memset(dummy_net_dev->name, 0, sizeof(dummy_net_dev->name));
+ intf.net_dev = dummy_net_dev;
intf.net_dev->dev_addr = (unsigned char *)dummy_dev_addr;
intf.is_conn_evt = true;
+ intf.client = &test_ntn_ctx->client;
snprintf(intf.net_dev->name, sizeof(intf.net_dev->name), "ntn_test");
IPA_UT_INFO("netdev name: %s strlen: %lu\n", intf.net_dev->name, strlen(intf.net_dev->name));
@@ -756,6 +772,7 @@
}
#if IPA_ETH_API_VER >= 2
+ kfree(dummy_net_dev);
#else
kfree(intf.pipe_hdl_list);
#endif
@@ -766,17 +783,24 @@
static int ipa_ntn_test_unreg_intf(void)
{
struct ipa_eth_intf_info intf;
+ int ret = 0;
#if IPA_ETH_API_VER >= 2
- struct net_device dummy_net_dev;
+ struct net_device *dummy_net_dev;
#else
char netdev_name[IPA_RESOURCE_NAME_MAX] = { 0 };
#endif
memset(&intf, 0, sizeof(intf));
#if IPA_ETH_API_VER >= 2
- memset(dummy_net_dev.name, 0, sizeof(dummy_net_dev.name));
+ dummy_net_dev = kzalloc(sizeof(*dummy_net_dev), GFP_KERNEL);
+ if (dummy_net_dev == NULL){
+ IPA_UT_ERR("kzalloc failed\n");
+ return -ENOMEM;
+ }
- intf.net_dev = &dummy_net_dev;
+ memset(dummy_net_dev->name, 0, sizeof(dummy_net_dev->name));
+ intf.net_dev = dummy_net_dev;
+ intf.client = &test_ntn_ctx->client;
snprintf(intf.net_dev->name, sizeof(intf.net_dev->name), "ntn_test");
IPA_UT_INFO("netdev name: %s strlen: %lu\n", intf.net_dev->name, strlen(intf.net_dev->name));
@@ -787,7 +811,14 @@
strlen(intf.netdev_name));
#endif
- return (ipa_eth_client_unreg_intf(&intf));
+ ret = ipa_eth_client_unreg_intf(&intf);
+
+#if IPA_ETH_API_VER >= 2
+ kfree(dummy_net_dev);
+#endif
+
+ return ret;
+
}
static void ipa_ntn_test_advance_db(u32 *db, int steps,
@@ -970,7 +1001,7 @@
}
static int ipa_ntn_test_prepare_test(void)
{
- struct ipa_ep_cfg ep_cfg = { { 0 } };
+ struct ipa_ep_cfg *ep_cfg;
int offset = 0;
int ret = 0;
@@ -1013,21 +1044,29 @@
}
/* configure NTN RX EP in DMA mode */
- ep_cfg.mode.mode = IPA_DMA;
- ep_cfg.mode.dst = test_ntn_ctx->cons_client_type;
+ ep_cfg = kzalloc(sizeof(*ep_cfg), GFP_KERNEL);
+ if (ep_cfg == NULL){
+ IPA_UT_ERR("kzalloc failed\n");
+ return -ENOMEM;
+ }
- ep_cfg.seq.set_dynamic = true;
+ ep_cfg->mode.mode = IPA_DMA;
+ ep_cfg->mode.dst = test_ntn_ctx->cons_client_type;
+ ep_cfg->seq.set_dynamic = true;
- if (ipa3_cfg_ep(ipa_get_ep_mapping(test_ntn_ctx->prod_client_type),
- &ep_cfg)) {
+
+ if (ipa3_cfg_ep(ipa_get_ep_mapping(test_ntn_ctx->prod_client_type), ep_cfg)) {
IPA_UT_ERR("fail to configure DMA mode.\n");
ret = -EFAULT;
goto unreg;
}
+ kfree(ep_cfg);
+
return 0;
unreg:
+ kfree(ep_cfg);
if (ipa_ntn_test_unreg_intf()) {
IPA_UT_ERR("fail to unregister interface.\n");
ret = -EFAULT;
@@ -1418,6 +1457,7 @@
}
IPA_UT_INFO("sent the last packet succesfully!\n");
+
ipa_ntn_test_print_stats();
fail:
@@ -1431,16 +1471,20 @@
return ret;
}
-static int ipa_ntn_test_clients2_multi_transfer_burst(void *priv)
+#if IPA_ETH_API_VER >= 2
+static int ipa_ntn_test_eth1_multi_transfer_burst(void *priv)
{
int ret;
- ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET2_CONS, IPA_CLIENT_ETHERNET2_PROD, 1);
+ ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET2_CONS, IPA_CLIENT_ETHERNET2_PROD, 1,
+ IPA_ETH_CLIENT_NTN3);
ret = ipa_ntn_test_multi_transfer_burst(priv);
- ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0);
+ ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0,
+ IPA_ETH_CLIENT_NTN3);
return ret;
}
+#endif
/* Suite definition block */
IPA_UT_DEFINE_SUITE_START(ntn, "NTN3 tests",
@@ -1471,10 +1515,12 @@
ipa_ntn_test_multi_transfer_burst,
true, IPA_HW_v5_0, IPA_HW_MAX),
- IPA_UT_ADD_TEST(clients2_multi_transfer_burst,
- "Clients pair 2 send entire ring in one shot",
- ipa_ntn_test_clients2_multi_transfer_burst,
+#if IPA_ETH_API_VER >= 2
+ IPA_UT_ADD_TEST(eth1_multi_transfer_burst,
+ "eth1: send entire ring in one shot",
+ ipa_ntn_test_eth1_multi_transfer_burst,
true, IPA_HW_v5_0, IPA_HW_MAX),
+#endif
} IPA_UT_DEFINE_SUITE_END(ntn);