Merge "msm: ipa3: LAN Coalescing Feature Addition"
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index ef5face..33ea10e 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -170,6 +170,19 @@
(x < IPA_CLIENT_MAX && (x & 0x1) == 0)
#define IPA_CLIENT_IS_CONS(x) \
(x < IPA_CLIENT_MAX && (x & 0x1) == 1)
+/*
+ * The following macro does two things:
+ * 1) It checks to see if client x is allocated, and
+ * 2) It assigns a value to index idx
+ */
+#define IPA_CLIENT_IS_MAPPED(x, idx) \
+ ((idx = ipa3_get_ep_mapping(x)) != IPA_EP_NOT_ALLOCATED)
+/*
+ * Same behavior as the macro above; but in addition, determines if
+ * the client is valid as well.
+ */
+#define IPA_CLIENT_IS_MAPPED_VALID(x, idx) \
+ (IPA_CLIENT_IS_MAPPED(x, idx) && ipa3_ctx->ep[idx].valid == 1)
#define IPA_CLIENT_IS_ETH_PROD(x) \
((x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD)) || \
(x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD)) || \
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 90db6a0..9e9e4a9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4345,62 +4345,65 @@
static int ipa3_setup_exception_path(void)
{
- struct ipa_ioc_add_hdr *hdr;
- struct ipa_hdr_add *hdr_entry;
- struct ipahal_reg_route route = { 0 };
- struct ipa3_hdr_entry *hdr_entry_internal;
- int ret;
+ struct ipa_ioc_add_hdr *hdr = NULL;
+ int ret = 0;
- /* install the basic exception header */
- hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
- sizeof(struct ipa_hdr_add), GFP_KERNEL);
- if (!hdr)
- return -ENOMEM;
+ if ( ! lan_coal_enabled() ) {
- hdr->num_hdrs = 1;
- hdr->commit = 1;
- hdr_entry = &hdr->hdr[0];
+ struct ipa_hdr_add *hdr_entry;
+ struct ipahal_reg_route route = { 0 };
+ struct ipa3_hdr_entry *hdr_entry_internal;
- strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
- hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+ /* install the basic exception header */
+ hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
- if (ipa3_add_hdr(hdr)) {
- IPAERR("fail to add exception hdr\n");
- ret = -EPERM;
- goto bail;
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+
+ strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+ hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+
+ if (ipa3_add_hdr(hdr)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
+ if (unlikely(!hdr_entry_internal)) {
+ IPAERR("fail to find internal hdr structure\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+ /* set the route register to pass exception packets to Apps */
+ route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ route.route_frag_def_pipe = ipa3_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
+ route.route_def_retain_hdr = 1;
+
+ if (ipa3_cfg_route(&route)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
}
- if (hdr_entry->status) {
- IPAERR("fail to add exception hdr\n");
- ret = -EPERM;
- goto bail;
- }
-
- hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
- if (unlikely(!hdr_entry_internal)) {
- IPAERR("fail to find internal hdr structure\n");
- ret = -EPERM;
- goto bail;
- }
-
- ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
-
- /* set the route register to pass exception packets to Apps */
- route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
- route.route_frag_def_pipe = ipa3_get_ep_mapping(
- IPA_CLIENT_APPS_LAN_CONS);
- route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
- route.route_def_retain_hdr = 1;
-
- if (ipa3_cfg_route(&route)) {
- IPAERR("fail to add exception hdr\n");
- ret = -EPERM;
- goto bail;
- }
-
- ret = 0;
bail:
- kfree(hdr);
+ if ( hdr ) kfree(hdr);
return ret;
}
@@ -6115,35 +6118,75 @@
}
IPADBG("default routing was set\n");
- /* LAN IN (IPA->AP) */
- memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
- sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
- sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
- sys_in.notify = ipa3_lan_rx_cb;
- sys_in.priv = NULL;
- if (ipa3_ctx->lan_rx_napi_enable)
- sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
- sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
- sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
- sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
- sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
- sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
- sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
- sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
- sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+ ipa3_ctx->clnt_hdl_data_in = 0;
- /**
- * ipa_lan_rx_cb() intended to notify the source EP about packet
- * being received on the LAN_CONS via calling the source EP call-back.
- * There could be a race condition with calling this call-back. Other
- * thread may nullify it - e.g. on EP disconnect.
- * This lock intended to protect the access to the source EP call-back
- */
- spin_lock_init(&ipa3_ctx->disconnect_lock);
- if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
- IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
- result = -EPERM;
- goto fail_flt_hash_tuple;
+ if ( ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5 ) {
+ /*
+ * LAN_COAL IN (IPA->AP)
+ */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_LAN_COAL_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.notify = ipa3_lan_coal_rx_cb;
+ sys_in.priv = NULL;
+ if (ipa3_ctx->lan_rx_napi_enable)
+ sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+ sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+
+ /**
+ * ipa3_lan_coal_rx_cb() intended to notify the source EP about
+ * packet being received on the LAN_COAL_CONS via calling the
+ * source EP call-back. There could be a race condition with
+ * calling this call-back. Other thread may nullify it - e.g. on
+ * EP disconnect. This lock intended to protect the access to the
+ * source EP call-back
+ */
+ spin_lock_init(&ipa3_ctx->disconnect_lock);
+ if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe (LAN_COAL_CONS) failed.\n");
+ result = -EPERM;
+ goto fail_flt_hash_tuple;
+ }
+
+ } else { /* ipa3_ctx->ipa_hw_type < IPA_HW_v5_5 */
+ /*
+ * LAN IN (IPA->AP)
+ */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.notify = ipa3_lan_rx_cb;
+ sys_in.priv = NULL;
+ if (ipa3_ctx->lan_rx_napi_enable)
+ sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+ sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+ sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+
+ /**
+ * ipa_lan_rx_cb() intended to notify the source EP about packet
+ * being received on the LAN_CONS via calling the source EP call-back.
+ * There could be a race condition with calling this call-back. Other
+ * thread may nullify it - e.g. on EP disconnect.
+ * This lock intended to protect the access to the source EP call-back
+ */
+ spin_lock_init(&ipa3_ctx->disconnect_lock);
+ if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
+ result = -EPERM;
+ goto fail_flt_hash_tuple;
+ }
}
/* LAN OUT (AP->IPA) */
@@ -6172,7 +6215,8 @@
return 0;
fail_lan_data_out:
- ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+ if ( ipa3_ctx->clnt_hdl_data_in )
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
fail_flt_hash_tuple:
if (ipa3_ctx->dflt_v6_rt_rule_hdl)
__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
@@ -6189,7 +6233,8 @@
{
if (!ipa3_ctx->ipa_config_is_mhi)
ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
- ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+ if ( ipa3_ctx->clnt_hdl_data_in )
+ ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
@@ -6798,7 +6843,7 @@
*/
if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
!ipa3_ctx->tag_process_before_gating) {
- ipa3_force_close_coal();
+ ipa3_force_close_coal(true, true);
/* While sending force close command setting
* tag process as true to make configure to
* original state
@@ -8789,8 +8834,11 @@
if (ipa3_ctx->lan_rx_napi_enable || ipa3_ctx->tx_napi_enable) {
init_dummy_netdev(&ipa3_ctx->generic_ndev);
if(ipa3_ctx->lan_rx_napi_enable) {
- netif_napi_add(&ipa3_ctx->generic_ndev, &ipa3_ctx->napi_lan_rx,
- ipa3_lan_poll, NAPI_WEIGHT);
+ netif_napi_add(
+ &ipa3_ctx->generic_ndev,
+ &ipa3_ctx->napi_lan_rx,
+ ipa3_lan_poll,
+ NAPI_WEIGHT);
}
}
}
@@ -8909,10 +8957,18 @@
ipa3_ctx->uc_ctx.holb_monitor.max_cnt_11ad =
resource_p->ipa_holb_monitor_max_cnt_11ad;
ipa3_ctx->ipa_wan_aggr_pkt_cnt = resource_p->ipa_wan_aggr_pkt_cnt;
- ipa3_ctx->stats.page_recycle_stats[0].total_replenished = 0;
- ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc = 0;
- ipa3_ctx->stats.page_recycle_stats[1].total_replenished = 0;
- ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc = 0;
+ memset(
+ ipa3_ctx->stats.page_recycle_stats,
+ 0,
+ sizeof(ipa3_ctx->stats.page_recycle_stats));
+ memset(
+ ipa3_ctx->stats.cache_recycle_stats,
+ 0,
+ sizeof(ipa3_ctx->stats.cache_recycle_stats));
+ memset(
+ &ipa3_ctx->stats.coal,
+ 0,
+ sizeof(ipa3_ctx->stats.coal));
memset(ipa3_ctx->stats.page_recycle_cnt, 0,
sizeof(ipa3_ctx->stats.page_recycle_cnt));
ipa3_ctx->stats.num_sort_tasklet_sched[0] = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 393132d..d0941b7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1621,33 +1621,40 @@
int nbytes;
int cnt = 0, i = 0, k = 0;
- nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
- "COAL : Total number of packets replenished =%llu\n"
- "COAL : Number of page recycled packets =%llu\n"
- "COAL : Number of tmp alloc packets =%llu\n"
- "COAL : Number of times tasklet scheduled =%llu\n"
- "DEF : Total number of packets replenished =%llu\n"
- "DEF : Number of page recycled packets =%llu\n"
- "DEF : Number of tmp alloc packets =%llu\n"
- "DEF : Number of times tasklet scheduled =%llu\n"
- "COMMON : Number of page recycled in tasklet =%llu\n"
- "COMMON : Number of times free pages not found in tasklet =%llu\n",
- ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
- ipa3_ctx->stats.page_recycle_stats[0].page_recycled,
- ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
- ipa3_ctx->stats.num_sort_tasklet_sched[0],
- ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
- ipa3_ctx->stats.page_recycle_stats[1].page_recycled,
- ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc,
- ipa3_ctx->stats.num_sort_tasklet_sched[1],
- ipa3_ctx->stats.page_recycle_cnt_in_tasklet,
- ipa3_ctx->stats.num_of_times_wq_reschd);
+ nbytes = scnprintf(
+ dbg_buff, IPA_MAX_MSG_LEN,
+ "COAL : Total number of packets replenished =%llu\n"
+ "COAL : Number of page recycled packets =%llu\n"
+ "COAL : Number of tmp alloc packets =%llu\n"
+ "COAL : Number of times tasklet scheduled =%llu\n"
+
+ "DEF : Total number of packets replenished =%llu\n"
+ "DEF : Number of page recycled packets =%llu\n"
+ "DEF : Number of tmp alloc packets =%llu\n"
+ "DEF : Number of times tasklet scheduled =%llu\n"
+
+ "COMMON : Number of page recycled in tasklet =%llu\n"
+ "COMMON : Number of times free pages not found in tasklet =%llu\n",
+
+ ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
+ ipa3_ctx->stats.page_recycle_stats[0].page_recycled,
+ ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
+ ipa3_ctx->stats.num_sort_tasklet_sched[0],
+
+ ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
+ ipa3_ctx->stats.page_recycle_stats[1].page_recycled,
+ ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc,
+ ipa3_ctx->stats.num_sort_tasklet_sched[1],
+
+ ipa3_ctx->stats.page_recycle_cnt_in_tasklet,
+ ipa3_ctx->stats.num_of_times_wq_reschd);
cnt += nbytes;
for (k = 0; k < 2; k++) {
for (i = 0; i < ipa3_ctx->page_poll_threshold; i++) {
- nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN,
+ nbytes = scnprintf(
+ dbg_buff + cnt, IPA_MAX_MSG_LEN,
"COMMON : Page replenish efficiency[%d][%d] =%llu\n",
k, i, ipa3_ctx->stats.page_recycle_cnt[k][i]);
cnt += nbytes;
@@ -1656,6 +1663,111 @@
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+
+static ssize_t ipa3_read_lan_coal_stats(
+ struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes=0, cnt=0;
+ u32 i;
+ char buf[4096];
+
+ *buf = '\0';
+
+ for ( i = 0;
+ i < sizeof(ipa3_ctx->stats.coal.coal_veid) /
+ sizeof(ipa3_ctx->stats.coal.coal_veid[0]);
+ i++ ) {
+
+ nbytes += scnprintf(
+ buf + nbytes,
+ sizeof(buf) - nbytes,
+ "(%u/%llu) ",
+ i,
+ ipa3_ctx->stats.coal.coal_veid[i]);
+ }
+
+ nbytes = scnprintf(
+ dbg_buff, IPA_MAX_MSG_LEN,
+ "LAN COAL rx = %llu\n"
+ "LAN COAL pkts = %llu\n"
+ "LAN COAL left as is = %llu\n"
+ "LAN COAL reconstructed = %llu\n"
+ "LAN COAL hdr qmap err = %llu\n"
+ "LAN COAL hdr nlo err = %llu\n"
+ "LAN COAL hdr pkt err = %llu\n"
+ "LAN COAL csum err = %llu\n"
+
+ "LAN COAL ip invalid = %llu\n"
+ "LAN COAL trans invalid = %llu\n"
+ "LAN COAL tcp = %llu\n"
+ "LAN COAL tcp bytes = %llu\n"
+ "LAN COAL udp = %llu\n"
+ "LAN COAL udp bytes = %llu\n"
+ "LAN COAL (veid/cnt)...(veid/cnt) = %s\n",
+
+ ipa3_ctx->stats.coal.coal_rx,
+ ipa3_ctx->stats.coal.coal_pkts,
+ ipa3_ctx->stats.coal.coal_left_as_is,
+ ipa3_ctx->stats.coal.coal_reconstructed,
+ ipa3_ctx->stats.coal.coal_hdr_qmap_err,
+ ipa3_ctx->stats.coal.coal_hdr_nlo_err,
+ ipa3_ctx->stats.coal.coal_hdr_pkt_err,
+ ipa3_ctx->stats.coal.coal_csum_err,
+ ipa3_ctx->stats.coal.coal_ip_invalid,
+ ipa3_ctx->stats.coal.coal_trans_invalid,
+ ipa3_ctx->stats.coal.coal_tcp,
+ ipa3_ctx->stats.coal.coal_tcp_bytes,
+ ipa3_ctx->stats.coal.coal_udp,
+ ipa3_ctx->stats.coal.coal_udp_bytes,
+ buf);
+
+ cnt += nbytes;
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_cache_recycle_stats(
+ struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes;
+ int cnt = 0;
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "COAL (cache) : Total number of pkts replenished =%llu\n"
+ "COAL (cache) : Number of pkts alloced =%llu\n"
+ "COAL (cache) : Number of pkts not alloced =%llu\n"
+
+ "DEF (cache) : Total number of pkts replenished =%llu\n"
+ "DEF (cache) : Number of pkts alloced =%llu\n"
+ "DEF (cache) : Number of pkts not alloced =%llu\n"
+
+ "OTHER (cache) : Total number of packets replenished =%llu\n"
+ "OTHER (cache) : Number of pkts alloced =%llu\n"
+ "OTHER (cache) : Number of pkts not alloced =%llu\n",
+
+ ipa3_ctx->stats.cache_recycle_stats[0].tot_pkt_replenished,
+ ipa3_ctx->stats.cache_recycle_stats[0].pkt_allocd,
+ ipa3_ctx->stats.cache_recycle_stats[0].pkt_found,
+
+ ipa3_ctx->stats.cache_recycle_stats[1].tot_pkt_replenished,
+ ipa3_ctx->stats.cache_recycle_stats[1].pkt_allocd,
+ ipa3_ctx->stats.cache_recycle_stats[1].pkt_found,
+
+ ipa3_ctx->stats.cache_recycle_stats[2].tot_pkt_replenished,
+ ipa3_ctx->stats.cache_recycle_stats[2].pkt_allocd,
+ ipa3_ctx->stats.cache_recycle_stats[2].pkt_found);
+
+ cnt += nbytes;
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -3321,6 +3433,14 @@
.read = ipa3_read_page_recycle_stats,
}
}, {
+ "lan_coal_stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_lan_coal_stats,
+ }
+ }, {
+ "cache_recycle_stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_cache_recycle_stats,
+ }
+ }, {
"wdi", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_wdi,
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 8014416..1900e0a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -5,6 +5,11 @@
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inet.h>
+#include <linux/if_ether.h>
+#include <net/ip6_checksum.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -12,7 +17,6 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/msm_gsi.h>
-#include <uapi/linux/ip.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <asm/page.h>
@@ -133,7 +137,7 @@
u32 ring_size, gfp_t mem_flag);
static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag);
-static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl);
+static int ipa3_teardown_pipe(u32 clnt_hdl);
static int ipa_populate_tag_field(struct ipa3_desc *desc,
struct ipa3_tx_pkt_wrapper *tx_pkt,
struct ipahal_imm_cmd_pyld **tag_pyld_ret);
@@ -961,6 +965,12 @@
case IPA_CLIENT_APPS_WAN_CONS:
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
break;
+ case IPA_CLIENT_APPS_LAN_COAL_CONS:
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ break;
+ case IPA_CLIENT_APPS_LAN_CONS:
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
+ break;
default:
break;
}
@@ -1064,6 +1074,8 @@
if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
+ else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
+ client_type = IPA_CLIENT_APPS_LAN_COAL_CONS;
else
client_type = sys->ep->client;
@@ -1131,6 +1143,11 @@
usleep_range(SUSPEND_MIN_SLEEP_RX,
SUSPEND_MAX_SLEEP_RX);
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
+ } else if (sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LAN_COAL");
+ usleep_range(SUSPEND_MIN_SLEEP_RX,
+ SUSPEND_MAX_SLEEP_RX);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN_COAL");
} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) {
IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LOW_LAT");
usleep_range(SUSPEND_MIN_SLEEP_RX,
@@ -1269,7 +1286,9 @@
int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
{
struct ipa3_ep_context *ep;
- int i, ipa_ep_idx, wan_handle, coal_ep_id;
+ int i, ipa_ep_idx;
+ int wan_handle, lan_handle;
+ int wan_coal_ep_id, lan_coal_ep_id;
int result = -EINVAL;
struct ipahal_reg_coal_qmap_cfg qmap_cfg;
char buff[IPA_RESOURCE_NAME_MAX];
@@ -1277,18 +1296,21 @@
int (*tx_completion_func)(struct napi_struct *, int);
if (sys_in == NULL || clnt_hdl == NULL) {
- IPAERR("NULL args\n");
+ IPAERR(
+ "NULL args: sys_in(%p) and/or clnt_hdl(%u)\n",
+ sys_in, clnt_hdl);
goto fail_gen;
}
+ *clnt_hdl = 0;
+
if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
IPAERR("bad parm client:%d fifo_sz:%d\n",
sys_in->client, sys_in->desc_fifo_sz);
goto fail_gen;
}
- ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
- if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+ if ( ! IPA_CLIENT_IS_MAPPED(sys_in->client, ipa_ep_idx) ) {
IPAERR("Invalid client.\n");
goto fail_gen;
}
@@ -1299,9 +1321,11 @@
goto fail_gen;
}
- coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+ wan_coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+ lan_coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
+
/* save the input config parameters */
- if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+ if (IPA_CLIENT_IS_APPS_COAL_CONS(sys_in->client))
ep_cfg_copy = sys_in->ipa_ep_cfg;
IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
@@ -1358,10 +1382,15 @@
/* create IPA PM resources for handling polling mode */
if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
- coal_ep_id != IPA_EP_NOT_ALLOCATED &&
- ipa3_ctx->ep[coal_ep_id].valid == 1) {
+ wan_coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+ ipa3_ctx->ep[wan_coal_ep_id].valid == 1) {
/* Use coalescing pipe PM handle for default pipe also*/
- ep->sys->pm_hdl = ipa3_ctx->ep[coal_ep_id].sys->pm_hdl;
+ ep->sys->pm_hdl = ipa3_ctx->ep[wan_coal_ep_id].sys->pm_hdl;
+ } else if (sys_in->client == IPA_CLIENT_APPS_LAN_CONS &&
+ lan_coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+ ipa3_ctx->ep[lan_coal_ep_id].valid == 1) {
+ /* Use coalescing pipe PM handle for default pipe also*/
+ ep->sys->pm_hdl = ipa3_ctx->ep[lan_coal_ep_id].sys->pm_hdl;
} else if (IPA_CLIENT_IS_CONS(sys_in->client)) {
ep->sys->freepage_wq = alloc_workqueue(buff,
WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS |
@@ -1531,8 +1560,8 @@
if (ep->sys->repl_hdlr == ipa3_replenish_rx_page_recycle) {
if (!(ipa3_ctx->wan_common_page_pool &&
sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
- coal_ep_id != IPA_EP_NOT_ALLOCATED &&
- ipa3_ctx->ep[coal_ep_id].valid == 1)) {
+ wan_coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+ ipa3_ctx->ep[wan_coal_ep_id].valid == 1)) {
/* Allocate page recycling pool only once. */
if (!ep->sys->page_recycle_repl) {
ep->sys->page_recycle_repl = kzalloc(
@@ -1593,10 +1622,10 @@
} else {
/* Use pool same as coal pipe when common page pool is used. */
ep->sys->common_buff_pool = true;
- ep->sys->common_sys = ipa3_ctx->ep[coal_ep_id].sys;
- ep->sys->repl = ipa3_ctx->ep[coal_ep_id].sys->repl;
+ ep->sys->common_sys = ipa3_ctx->ep[wan_coal_ep_id].sys;
+ ep->sys->repl = ipa3_ctx->ep[wan_coal_ep_id].sys->repl;
ep->sys->page_recycle_repl =
- ipa3_ctx->ep[coal_ep_id].sys->page_recycle_repl;
+ ipa3_ctx->ep[wan_coal_ep_id].sys->page_recycle_repl;
}
}
@@ -1648,17 +1677,40 @@
/*
* Configure the registers and setup the default pipe
*/
- if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
- qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
- ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
+ if (IPA_CLIENT_IS_APPS_COAL_CONS(sys_in->client)) {
- if (!sys_in->ext_ioctl_v2) {
- sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
- sys_in->ipa_ep_cfg = ep_cfg_copy;
- result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
+ const char* str = "";
+
+ if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+
+ str = "wan";
+
+ qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
+
+ ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
+
+ if (!sys_in->ext_ioctl_v2) {
+ sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
+ sys_in->ipa_ep_cfg = ep_cfg_copy;
+ result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
+ }
+
+ } else { /* (sys_in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) */
+
+ str = "lan";
+
+ if (!sys_in->ext_ioctl_v2) {
+ sys_in->client = IPA_CLIENT_APPS_LAN_CONS;
+ sys_in->ipa_ep_cfg = ep_cfg_copy;
+ sys_in->notify = ipa3_lan_rx_cb;
+ result = ipa3_setup_sys_pipe(sys_in, &lan_handle);
+ }
}
+
if (result) {
- IPAERR("failed to setup default coalescing pipe\n");
+ IPAERR(
+ "Failed to setup default %s coalescing pipe\n",
+ str);
goto fail_repl;
}
@@ -1782,6 +1834,11 @@
netif_napi_del(&ep->sys->napi_rx);
}
+ if ( ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS ) {
+ stop_coalescing();
+ ipa3_force_close_coal(false, true);
+ }
+
/* channel stop might fail on timeout if IPA is busy */
for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1793,6 +1850,10 @@
break;
}
+ if ( ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS ) {
+ start_coalescing();
+ }
+
if (result != GSI_STATUS_SUCCESS) {
IPAERR("GSI stop chan err: %d.\n", result);
ipa_assert();
@@ -1810,12 +1871,13 @@
if (IPA_CLIENT_IS_PROD(ep->client))
atomic_set(&ep->sys->workqueue_flushed, 1);
- /* tear down the default pipe before we reset the channel*/
+ /*
+ * Tear down the default pipe before we reset the channel
+ */
if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
- i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
- if (i == IPA_EP_NOT_ALLOCATED) {
- IPAERR("failed to get idx");
+ if ( ! IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_CONS, i) ) {
+ IPAERR("Failed to get idx for IPA_CLIENT_APPS_WAN_CONS");
return i;
}
@@ -1823,7 +1885,29 @@
* resetting only coalescing channel.
*/
if (ipa3_ctx->ep[i].valid) {
- result = ipa3_teardown_coal_def_pipe(i);
+ result = ipa3_teardown_pipe(i);
+ if (result) {
+ IPAERR("failed to teardown default coal pipe\n");
+ return result;
+ }
+ }
+ }
+
+ /*
+ * Tear down the default pipe before we reset the channel
+ */
+ if (ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+
+ if ( ! IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_CONS, i) ) {
+ IPAERR("Failed to get idx for IPA_CLIENT_APPS_LAN_CONS,");
+ return i;
+ }
+
+ /* If the default channel is already torn down,
+ * resetting only coalescing channel.
+ */
+ if (ipa3_ctx->ep[i].valid) {
+ result = ipa3_teardown_pipe(i);
if (result) {
IPAERR("failed to teardown default coal pipe\n");
return result;
@@ -1913,14 +1997,18 @@
}
/**
- * ipa3_teardown_coal_def_pipe() - Teardown the APPS_WAN_COAL_CONS
- * default GPI pipe and cleanup IPA EP
- * called after the coalesced pipe is destroyed.
- * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
+ * ipa3_teardown_pipe()
+ *
+ * Teardown and cleanup of the physical connection (i.e. data
+ * structures, buffers, GSI channel, work queues, etc) associated
+ * with the passed client handle and the endpoint context that the
+ * handle represents.
+ *
+ * @clnt_hdl: [in] A handle obtained from ipa3_setup_sys_pipe
*
* Returns: 0 on success, negative on failure
*/
-static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl)
+static int ipa3_teardown_pipe(u32 clnt_hdl)
{
struct ipa3_ep_context *ep;
int result;
@@ -2298,6 +2386,8 @@
*/
if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
+ else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
+ client_type = IPA_CLIENT_APPS_LAN_COAL_CONS;
else
client_type = sys->ep->client;
@@ -2371,10 +2461,9 @@
fail_kmem_cache_alloc:
if (atomic_read(&sys->repl->tail_idx) ==
atomic_read(&sys->repl->head_idx)) {
- if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
- sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+ if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
- else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+ else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
IPA_STATS_INC_CNT(ipa3_ctx->stats.low_lat_repl_rx_empty);
@@ -2753,6 +2842,8 @@
}
else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+ else if (sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS)
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty_coal);
else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
IPA_STATS_INC_CNT(ipa3_ctx->stats.rmnet_ll_rx_empty);
else
@@ -3158,6 +3249,9 @@
int rx_len_cached = 0;
struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+ u32 stats_i =
+ (sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) ? 0 :
+ (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) ? 1 : 2;
/* start replenish only when buffers go lower than the threshold */
if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
@@ -3182,30 +3276,26 @@
rx_pkt);
goto fail_kmem_cache_alloc;
}
- ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
- rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
- ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(ipa3_ctx->pdev,
- rx_pkt->data.dma_addr)) {
- IPAERR("dma_map_single failure %pK for %pK\n",
- (void *)rx_pkt->data.dma_addr, ptr);
- goto fail_dma_mapping;
- }
+ ipa3_ctx->stats.cache_recycle_stats[stats_i].pkt_allocd++;
} else {
spin_lock_bh(&sys->spinlock);
- rx_pkt = list_first_entry(&sys->rcycl_list,
+ rx_pkt = list_first_entry(
+ &sys->rcycl_list,
struct ipa3_rx_pkt_wrapper, link);
list_del_init(&rx_pkt->link);
spin_unlock_bh(&sys->spinlock);
- ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
- rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
- ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(ipa3_ctx->pdev,
- rx_pkt->data.dma_addr)) {
- IPAERR("dma_map_single failure %pK for %pK\n",
- (void *)rx_pkt->data.dma_addr, ptr);
- goto fail_dma_mapping;
- }
+ ipa3_ctx->stats.cache_recycle_stats[stats_i].pkt_found++;
+ }
+
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+
+ rx_pkt->data.dma_addr = dma_map_single(
+ ipa3_ctx->pdev, ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error( ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+ IPAERR("dma_map_single failure %pK for %pK\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
}
gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
@@ -3217,6 +3307,7 @@
gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
idx++;
rx_len_cached++;
+ ipa3_ctx->stats.cache_recycle_stats[stats_i].tot_pkt_replenished++;
/*
* gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
* If this size is reached we need to queue the xfers.
@@ -3325,10 +3416,9 @@
__trigger_repl_work(sys);
if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
- if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
- sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+ if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
- else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+ else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
IPA_STATS_INC_CNT(ipa3_ctx->stats.low_lat_rx_empty);
@@ -3489,7 +3579,7 @@
struct ipahal_pkt_status status;
u32 pkt_status_sz;
struct sk_buff *skb2;
- int pad_len_byte;
+ int pad_len_byte = 0;
int len;
unsigned char *buf;
int src_pipe;
@@ -3696,7 +3786,12 @@
goto out;
}
- pad_len_byte = ((status.pkt_len + 3) & ~3) -
+ /*
+ * Padding not needed for LAN coalescing pipe, hence we
+ * only pad when not LAN coalescing pipe.
+ */
+ if (sys->ep->client != IPA_CLIENT_APPS_LAN_COAL_CONS)
+ pad_len_byte = ((status.pkt_len + 3) & ~3) -
status.pkt_len;
len = status.pkt_len + pad_len_byte;
IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
@@ -4060,9 +4155,10 @@
dev_kfree_skb_any(rx_skb);
return;
}
- if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
- skb_pull(rx_skb, ipahal_pkt_status_get_size() +
- IPA_LAN_RX_HEADER_LENGTH);
+ if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
+ u32 extra = ( lan_coal_enabled() ) ? 0 : IPA_LAN_RX_HEADER_LENGTH;
+ skb_pull(rx_skb, ipahal_pkt_status_get_size() + extra);
+ }
else
skb_pull(rx_skb, ipahal_pkt_status_get_size());
@@ -4093,6 +4189,783 @@
}
+/*
+ * The following will help us deduce the real size of an ipv6 header
+ * that may or may not have extensions...
+ */
+static int _skip_ipv6_exthdr(
+ u8 *hdr_ptr,
+ int start,
+ u8 *nexthdrp,
+ __be16 *fragp )
+{
+ u8 nexthdr = *nexthdrp;
+
+ *fragp = 0;
+
+ while ( ipv6_ext_hdr(nexthdr) ) {
+
+ struct ipv6_opt_hdr *hp;
+
+ int hdrlen;
+
+ if (nexthdr == NEXTHDR_NONE)
+ return -EINVAL;
+
+ hp = (struct ipv6_opt_hdr*) (hdr_ptr + (u32) start);
+
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+
+ u32 off = offsetof(struct frag_hdr, frag_off);
+
+ __be16 *fp = (__be16*) (hdr_ptr + (u32)start + off);
+
+ *fragp = *fp;
+
+ if (ntohs(*fragp) & ~0x7)
+ break;
+
+ hdrlen = 8;
+
+ } else if (nexthdr == NEXTHDR_AUTH) {
+
+ hdrlen = ipv6_authlen(hp);
+
+ } else {
+
+ hdrlen = ipv6_optlen(hp);
+ }
+
+ nexthdr = hp->nexthdr;
+
+ start += hdrlen;
+ }
+
+ *nexthdrp = nexthdr;
+
+ return start;
+}
+
+/*
+ * The following defines and structure used for calculating Ethernet
+ * frame type and size...
+ */
+#define IPA_ETH_VLAN_2TAG 0x88A8
+#define IPA_ETH_VLAN_TAG 0x8100
+#define IPA_ETH_TAG_SZ sizeof(u32)
+
+/*
+ * The following structure used for containing packet payload
+ * information.
+ */
+typedef struct ipa_pkt_data_s {
+ void* pkt;
+ u32 pkt_len;
+} ipa_pkt_data_t;
+
+/*
+ * The following structure used for consolidating all header
+ * information.
+ */
+typedef struct ipa_header_data_s {
+ struct ethhdr* eth_hdr;
+ u32 eth_hdr_size;
+ u8 ip_vers;
+ void* ip_hdr;
+ u32 ip_hdr_size;
+ u8 ip_proto;
+ void* proto_hdr;
+ u32 proto_hdr_size;
+ u32 aggr_hdr_len;
+ u32 curr_seq;
+} ipa_header_data_t;
+
+static int
+_calc_partial_csum(
+ struct sk_buff* skb,
+ ipa_header_data_t* hdr_data,
+ u32 aggr_payload_size )
+{
+ u32 ip_hdr_size;
+ u32 proto_hdr_size;
+ u8 ip_vers;
+ u8 ip_proto;
+ u8* new_ip_hdr;
+ u8* new_proto_hdr;
+ u32 len_for_calc;
+ __sum16 pseudo;
+
+ if ( !skb || !hdr_data ) {
+
+ IPAERR(
+ "NULL args: skb(%p) and/or hdr_data(%p)\n",
+ skb, hdr_data);
+
+ return -1;
+
+ } else {
+
+ ip_hdr_size = hdr_data->ip_hdr_size;
+ proto_hdr_size = hdr_data->proto_hdr_size;
+ ip_vers = hdr_data->ip_vers;
+ ip_proto = hdr_data->ip_proto;
+
+ new_ip_hdr = (u8*) skb->data + hdr_data->eth_hdr_size;
+
+ new_proto_hdr = new_ip_hdr + ip_hdr_size;
+
+ len_for_calc = proto_hdr_size + aggr_payload_size;
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ if ( ip_vers == 4 ) {
+
+ struct iphdr* iph = (struct iphdr*) new_ip_hdr;
+
+ iph->check = 0;
+ iph->check = ip_fast_csum(iph, iph->ihl);
+
+ pseudo = ~csum_tcpudp_magic(
+ iph->saddr,
+ iph->daddr,
+ len_for_calc,
+ ip_proto,
+ 0);
+
+ } else { /* ( ip_vers == 6 ) */
+
+ struct ipv6hdr* iph = (struct ipv6hdr*) new_ip_hdr;
+
+ pseudo = ~csum_ipv6_magic(
+ &iph->saddr,
+ &iph->daddr,
+ len_for_calc,
+ ip_proto,
+ 0);
+ }
+
+ if ( ip_proto == IPPROTO_TCP ) {
+
+ struct tcphdr* hdr = (struct tcphdr*) new_proto_hdr;
+
+ hdr->check = pseudo;
+
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ } else {
+
+ struct udphdr* hdr = (struct udphdr*) new_proto_hdr;
+
+ hdr->check = pseudo;
+
+ skb->csum_offset = offsetof(struct udphdr, check);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The following function takes the constituent parts of an Ethernet
+ * and IP packet and creates an skb from them...
+ */
+static int
+_prep_and_send_skb(
+ struct sk_buff* rx_skb,
+ struct ipa3_ep_context* ep,
+ u32 metadata,
+ u8 ucp,
+ ipa_header_data_t* hdr_data,
+ ipa_pkt_data_t* pkts,
+ u32 num_pkts,
+ u32 aggr_payload_size,
+ u8 pkt_id,
+ bool recalc_cksum )
+{
+ struct ethhdr* eth_hdr;
+ u32 eth_hdr_size;
+ u8 ip_vers;
+ void* ip_hdr;
+ u32 ip_hdr_size;
+ u8 ip_proto;
+ void* proto_hdr;
+ u32 proto_hdr_size;
+ u32 aggr_hdr_len;
+ u32 i;
+
+ void *new_proto_hdr, *new_ip_hdr, *new_eth_hdr;
+
+ struct skb_shared_info *shinfo;
+
+ struct sk_buff *head_skb;
+
+ void *client_priv;
+ void (*client_notify)(
+ void *client_priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+ client_notify = 0;
+
+ spin_lock(&ipa3_ctx->disconnect_lock);
+ if (ep->valid && ep->client_notify &&
+ likely((!atomic_read(&ep->disconnect_in_progress)))) {
+
+ client_notify = ep->client_notify;
+ client_priv = ep->priv;
+ }
+ spin_unlock(&ipa3_ctx->disconnect_lock);
+
+ if ( client_notify ) {
+
+ eth_hdr = hdr_data->eth_hdr;
+ eth_hdr_size = hdr_data->eth_hdr_size;
+ ip_vers = hdr_data->ip_vers;
+ ip_hdr = hdr_data->ip_hdr;
+ ip_hdr_size = hdr_data->ip_hdr_size;
+ ip_proto = hdr_data->ip_proto;
+ proto_hdr = hdr_data->proto_hdr;
+ proto_hdr_size = hdr_data->proto_hdr_size;
+ aggr_hdr_len = hdr_data->aggr_hdr_len;
+
+ if ( rx_skb ) {
+
+ head_skb = rx_skb;
+
+ ipa3_ctx->stats.coal.coal_left_as_is++;
+
+ } else {
+
+ head_skb = alloc_skb(aggr_hdr_len + aggr_payload_size, GFP_ATOMIC);
+
+ if ( unlikely(!head_skb) ) {
+ IPAERR("skb alloc failure\n");
+ return -1;
+ }
+
+ ipa3_ctx->stats.coal.coal_reconstructed++;
+
+ head_skb->protocol = ip_proto;
+
+ /*
+ * Copy MAC header into the skb...
+ */
+ new_eth_hdr = skb_put_data(head_skb, eth_hdr, eth_hdr_size);
+
+ skb_reset_mac_header(head_skb);
+
+ /*
+ * Copy, and update, IP[4|6] header into the skb...
+ */
+ new_ip_hdr = skb_put_data(head_skb, ip_hdr, ip_hdr_size);
+
+ if ( ip_vers == 4 ) {
+
+ struct iphdr* ip4h = new_ip_hdr;
+
+ ip4h->id = htons(ntohs(ip4h->id) + pkt_id);
+
+ ip4h->tot_len =
+ htons(ip_hdr_size + proto_hdr_size + aggr_payload_size);
+
+ } else {
+
+ struct ipv6hdr* ip6h = new_ip_hdr;
+
+ ip6h->payload_len =
+ htons(proto_hdr_size + aggr_payload_size);
+ }
+
+ skb_reset_network_header(head_skb);
+
+ /*
+ * Copy, and update, [TCP|UDP] header into the skb...
+ */
+ new_proto_hdr = skb_put_data(head_skb, proto_hdr, proto_hdr_size);
+
+ if ( ip_proto == IPPROTO_TCP ) {
+
+ struct tcphdr* hdr = new_proto_hdr;
+
+ hdr_data->curr_seq += (aggr_payload_size) ? aggr_payload_size : 1;
+
+ hdr->seq = htonl(hdr_data->curr_seq);
+
+ } else {
+
+ struct udphdr* hdr = new_proto_hdr;
+
+ u16 len = sizeof(struct udphdr) + aggr_payload_size;
+
+ hdr->len = htons(len);
+ }
+
+ skb_reset_transport_header(head_skb);
+
+ /*
+ * Now aggregate all the individual physical payloads into
+ * th eskb.
+ */
+ for ( i = 0; i < num_pkts; i++ ) {
+ skb_put_data(head_skb, pkts[i].pkt, pkts[i].pkt_len);
+ }
+ }
+
+ /*
+ * Is a recalc of the various checksums in order?
+ */
+ if ( recalc_cksum ) {
+ _calc_partial_csum(head_skb, hdr_data, aggr_payload_size);
+ }
+
+ /*
+ * Let's add some resegmentation info into the head skb. The
+ * data will allow the stack to resegment the data...should it
+ * need to relative to MTU...
+ */
+ shinfo = skb_shinfo(head_skb);
+
+ shinfo->gso_segs = num_pkts;
+ shinfo->gso_size = pkts[0].pkt_len;
+
+ if (ip_proto == IPPROTO_TCP) {
+ shinfo->gso_type = (ip_vers == 4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ ipa3_ctx->stats.coal.coal_tcp++;
+ ipa3_ctx->stats.coal.coal_tcp_bytes += aggr_payload_size;
+ } else {
+ shinfo->gso_type = SKB_GSO_UDP_L4;
+ ipa3_ctx->stats.coal.coal_udp++;
+ ipa3_ctx->stats.coal.coal_udp_bytes += aggr_payload_size;
+ }
+
+ /*
+ * Send this new skb to the client...
+ */
+ *(u16 *)head_skb->cb = ((metadata >> 16) & 0xFFFF);
+ *(u8 *)(head_skb->cb + 4) = ucp;
+
+ IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
+ metadata, *(u32 *)head_skb->cb);
+ IPADBG_LOW("ucp: %d\n", *(u8 *)(head_skb->cb + 4));
+
+ client_notify(client_priv, IPA_RECEIVE, (unsigned long)(head_skb));
+ }
+
+ return 0;
+}
+
+/*
+ * The following will process a coalesced LAN packet from the IPA...
+ */
+void ipa3_lan_coal_rx_cb(
+ void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *rx_skb = (struct sk_buff *) data;
+
+ unsigned int src_pipe;
+ u8 ucp;
+ u32 metadata;
+
+ struct ipahal_pkt_status_thin status;
+ struct ipa3_ep_context *ep;
+
+ u8* qmap_hdr_data_ptr;
+ struct qmap_hdr_data qmap_hdr;
+
+ struct coal_packet_status_info *cpsi, *cpsi_orig;
+ u8* stat_info_ptr;
+
+ u32 pkt_status_sz = ipahal_pkt_status_get_size();
+
+ u32 eth_hdr_size;
+ u32 ip_hdr_size;
+ u8 ip_vers, ip_proto;
+ u32 proto_hdr_size;
+ u32 cpsi_hdrs_size;
+ u32 aggr_payload_size;
+
+ u32 pkt_len;
+
+ struct ethhdr* eth_hdr;
+ void* ip_hdr;
+ struct iphdr* ip4h;
+ struct ipv6hdr* ip6h;
+ void* proto_hdr;
+ u8* pkt_data;
+ bool gro = true;
+ bool cksum_is_zero;
+ ipa_header_data_t hdr_data;
+
+ ipa_pkt_data_t in_pkts[MAX_COAL_PACKETS];
+ u32 in_pkts_sub;
+
+ u8 tot_pkts;
+
+ u32 i, j;
+
+ u64 cksum_mask = 0;
+
+ int ret;
+
+ IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+ ipa3_ctx->stats.coal.coal_rx++;
+
+ ipahal_pkt_status_parse_thin(rx_skb->data, &status);
+ src_pipe = status.endp_src_idx;
+ metadata = status.metadata;
+ ucp = status.ucp;
+ ep = &ipa3_ctx->ep[src_pipe];
+ if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes) ||
+ unlikely(atomic_read(&ep->disconnect_in_progress))) {
+ IPAERR("drop pipe=%d\n", src_pipe);
+ goto process_done;
+ }
+
+ memset(&hdr_data, 0, sizeof(hdr_data));
+ memset(&qmap_hdr, 0, sizeof(qmap_hdr));
+
+ /*
+ * Let's get to, then parse, the qmap header...
+ */
+ qmap_hdr_data_ptr = rx_skb->data + pkt_status_sz;
+
+ ret = ipahal_qmap_parse(qmap_hdr_data_ptr, &qmap_hdr);
+
+ if ( unlikely(ret) ) {
+ IPAERR("ipahal_qmap_parse fail\n");
+ ipa3_ctx->stats.coal.coal_hdr_qmap_err++;
+ goto process_done;
+ }
+
+ if ( ! VALID_NLS(qmap_hdr.num_nlos) ) {
+ IPAERR("Bad num_nlos(%u) value\n", qmap_hdr.num_nlos);
+ ipa3_ctx->stats.coal.coal_hdr_nlo_err++;
+ goto process_done;
+ }
+
+ stat_info_ptr = qmap_hdr_data_ptr + sizeof(union qmap_hdr_u);
+
+ cpsi = cpsi_orig = (struct coal_packet_status_info*) stat_info_ptr;
+
+ /*
+ * Reconstruct the 48 bits of checksum info. And count total
+ * packets as well...
+ */
+ for (i = tot_pkts = 0;
+ i < MAX_COAL_PACKET_STATUS_INFO;
+ ++i, ++cpsi) {
+
+ cpsi->pkt_len = ntohs(cpsi->pkt_len);
+
+ cksum_mask |= ((u64) cpsi->pkt_cksum_errs) << (8 * i);
+
+ if ( i < qmap_hdr.num_nlos ) {
+ tot_pkts += cpsi->num_pkts;
+ }
+ }
+
+ /*
+ * A bounds check.
+ *
+ * Technically, the hardware shouldn't give us a bad count, but
+ * just to be safe...
+ */
+ if ( tot_pkts > MAX_COAL_PACKETS ) {
+ IPAERR("tot_pkts(%u) > MAX_COAL_PACKETS(%u)\n",
+ tot_pkts, MAX_COAL_PACKETS);
+ ipa3_ctx->stats.coal.coal_hdr_pkt_err++;
+ goto process_done;
+ }
+
+ ipa3_ctx->stats.coal.coal_pkts += tot_pkts;
+
+ /*
+ * Move along past the coal headers...
+ */
+ cpsi_hdrs_size = MAX_COAL_PACKET_STATUS_INFO * sizeof(u32);
+
+ pkt_data = stat_info_ptr + cpsi_hdrs_size;
+
+ /*
+ * Let's processes the Ethernet header...
+ */
+ eth_hdr = (struct ethhdr*) pkt_data;
+
+ switch ( ntohs(eth_hdr->h_proto) )
+ {
+ case IPA_ETH_VLAN_2TAG:
+ eth_hdr_size = sizeof(struct ethhdr) + (IPA_ETH_TAG_SZ * 2);
+ break;
+ case IPA_ETH_VLAN_TAG:
+ eth_hdr_size = sizeof(struct ethhdr) + IPA_ETH_TAG_SZ;
+ break;
+ default:
+ eth_hdr_size = sizeof(struct ethhdr);
+ break;
+ }
+
+ /*
+ * Get to and process the ip header...
+ */
+ ip_hdr = (u8*) eth_hdr + eth_hdr_size;
+
+ /*
+ * Is it a IPv[4|6] header?
+ */
+ if (((struct iphdr*) ip_hdr)->version == 4) {
+ /*
+ * Eth frame is carrying ip v4 payload.
+ */
+ ip_vers = 4;
+ ip4h = (struct iphdr*) ip_hdr;
+ ip_hdr_size = ip4h->ihl * sizeof(u32);
+ ip_proto = ip4h->protocol;
+
+ /*
+ * Don't allow grouping of any packets with IP options
+ * (i.e. don't allow when ihl != 5)...
+ */
+ gro = (ip4h->ihl == 5);
+
+ } else if (((struct ipv6hdr*) ip_hdr)->version == 6) {
+ /*
+ * Eth frame is carrying ip v6 payload.
+ */
+ int hdr_size;
+ __be16 frag_off;
+
+ ip_vers = 6;
+ ip6h = (struct ipv6hdr*) ip_hdr;
+ ip_proto = ip6h->nexthdr;
+
+ /*
+ * If extension headers exist, we need to analyze/skip them,
+ * hence...
+ */
+ hdr_size = _skip_ipv6_exthdr(
+ (u8*) ip_hdr,
+ sizeof(*ip6h),
+ &ip_proto,
+ &frag_off);
+
+ /*
+ * If we run into a problem, or this has a fragmented header
+ * (which technically should not be possible if the HW works
+ * as intended), bail.
+ */
+ if (hdr_size < 0 || frag_off) {
+ IPAERR(
+ "_skip_ipv6_exthdr() failed. Errored with hdr_size(%d) "
+ "and/or frag_off(%d)\n",
+ hdr_size,
+ ntohs(frag_off));
+ ipa3_ctx->stats.coal.coal_ip_invalid++;
+ goto process_done;
+ }
+
+ ip_hdr_size = hdr_size;
+
+ /*
+ * Don't allow grouping of any packets with IPv6 extension
+ * headers (i.e. don't allow when ip_hdr_size != basic v6
+ * header size).
+ */
+ gro = (ip_hdr_size == sizeof(*ip6h));
+
+ } else {
+
+ IPAERR("Not a v4 or v6 header...can't process\n");
+ ipa3_ctx->stats.coal.coal_ip_invalid++;
+ goto process_done;
+ }
+
+ /*
+ * Get to and process the protocol header...
+ */
+ proto_hdr = (u8*) ip_hdr + ip_hdr_size;
+
+ if (ip_proto == IPPROTO_TCP) {
+
+ struct tcphdr* hdr = (struct tcphdr*) proto_hdr;
+
+ hdr_data.curr_seq = ntohl(hdr->seq);
+
+ proto_hdr_size = hdr->doff * sizeof(u32);
+
+ cksum_is_zero = false;
+
+ } else if (ip_proto == IPPROTO_UDP) {
+
+ proto_hdr_size = sizeof(struct udphdr);
+
+ cksum_is_zero = (ip_vers == 4 && ((struct udphdr*) proto_hdr)->check == 0);
+
+ } else {
+
+ IPAERR("Not a TCP or UDP heqder...can't process\n");
+ ipa3_ctx->stats.coal.coal_trans_invalid++;
+ goto process_done;
+
+ }
+
+ /*
+ * The following will adjust the skb internals (ie. skb->data and
+ * skb->len), such that they're positioned, and reflect, the data
+ * starting at the ETH header...
+ */
+ skb_pull(
+ rx_skb,
+ pkt_status_sz +
+ sizeof(union qmap_hdr_u) +
+ cpsi_hdrs_size);
+
+ /*
+ * Consolidate all header, header type, and header size info...
+ */
+ hdr_data.eth_hdr = eth_hdr;
+ hdr_data.eth_hdr_size = eth_hdr_size;
+ hdr_data.ip_vers = ip_vers;
+ hdr_data.ip_hdr = ip_hdr;
+ hdr_data.ip_hdr_size = ip_hdr_size;
+ hdr_data.ip_proto = ip_proto;
+ hdr_data.proto_hdr = proto_hdr;
+ hdr_data.proto_hdr_size = proto_hdr_size;
+ hdr_data.aggr_hdr_len = eth_hdr_size + ip_hdr_size + proto_hdr_size;
+
+ if ( qmap_hdr.vcid < GSI_VEID_MAX ) {
+ ipa3_ctx->stats.coal.coal_veid[qmap_hdr.vcid] += 1;
+ }
+
+ /*
+ * Quick check to see if we really need to go any further...
+ */
+ if ( gro && qmap_hdr.num_nlos == 1 && qmap_hdr.chksum_valid ) {
+
+ cpsi = cpsi_orig;
+
+ in_pkts[0].pkt = rx_skb->data + hdr_data.aggr_hdr_len;
+ in_pkts[0].pkt_len = cpsi->pkt_len - (ip_hdr_size + proto_hdr_size);
+
+ in_pkts_sub = 1;
+
+ aggr_payload_size = rx_skb->len - hdr_data.aggr_hdr_len;
+
+ _prep_and_send_skb(
+ rx_skb,
+ ep, metadata, ucp,
+ &hdr_data,
+ in_pkts,
+ in_pkts_sub,
+ aggr_payload_size,
+ tot_pkts,
+ false);
+
+ return;
+ }
+
+ /*
+ * Time to process packet payloads...
+ */
+ pkt_data = (u8*) proto_hdr + proto_hdr_size;
+
+ for ( i = tot_pkts = 0, cpsi = cpsi_orig;
+ i < qmap_hdr.num_nlos;
+ ++i, ++cpsi ) {
+
+ aggr_payload_size = in_pkts_sub = 0;
+
+ for ( j = 0;
+ j < cpsi->num_pkts;
+ j++, tot_pkts++, cksum_mask >>= 1 ) {
+
+ bool csum_err = cksum_mask & 1;
+
+ pkt_len = cpsi->pkt_len - (ip_hdr_size + proto_hdr_size);
+
+ if ( csum_err || ! gro ) {
+
+ if ( csum_err ) {
+ ipa3_ctx->stats.coal.coal_csum_err++;
+ }
+
+ /*
+ * If there are previously queued packets, send them
+ * now...
+ */
+ if ( in_pkts_sub ) {
+
+ _prep_and_send_skb(
+ NULL,
+ ep, metadata, ucp,
+ &hdr_data,
+ in_pkts,
+ in_pkts_sub,
+ aggr_payload_size,
+ tot_pkts,
+ !cksum_is_zero);
+
+ in_pkts_sub = aggr_payload_size = 0;
+ }
+
+ /*
+ * Now send the singleton...
+ */
+ in_pkts[in_pkts_sub].pkt = pkt_data;
+ in_pkts[in_pkts_sub].pkt_len = pkt_len;
+
+ aggr_payload_size += in_pkts[in_pkts_sub].pkt_len;
+ pkt_data += in_pkts[in_pkts_sub].pkt_len;
+
+ in_pkts_sub++;
+
+ _prep_and_send_skb(
+ NULL,
+ ep, metadata, ucp,
+ &hdr_data,
+ in_pkts,
+ in_pkts_sub,
+ aggr_payload_size,
+ tot_pkts,
+ (csum_err) ? false : !cksum_is_zero);
+
+ in_pkts_sub = aggr_payload_size = 0;
+
+ continue;
+ }
+
+ in_pkts[in_pkts_sub].pkt = pkt_data;
+ in_pkts[in_pkts_sub].pkt_len = pkt_len;
+
+ aggr_payload_size += in_pkts[in_pkts_sub].pkt_len;
+ pkt_data += in_pkts[in_pkts_sub].pkt_len;
+
+ in_pkts_sub++;
+ }
+
+ if ( in_pkts_sub ) {
+
+ _prep_and_send_skb(
+ NULL,
+ ep, metadata, ucp,
+ &hdr_data,
+ in_pkts,
+ in_pkts_sub,
+ aggr_payload_size,
+ tot_pkts,
+ !cksum_is_zero);
+ }
+ }
+
+process_done:
+ /*
+ * One way or the other, we no longer need the skb, hence...
+ */
+ dev_kfree_skb_any(rx_skb);
+}
+
static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
{
rx_pkt->data.dma_addr = 0;
@@ -4124,8 +4997,10 @@
* corresponding rx pkt. Once finished return the head_skb to be sent up the
* network stack.
*/
-static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
- *notify, bool update_truesize)
+static struct sk_buff *handle_skb_completion(
+ struct gsi_chan_xfer_notify *notify,
+ bool update_truesize,
+ struct ipa3_rx_pkt_wrapper **rx_pkt_ptr )
{
struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
struct sk_buff *rx_skb, *next_skb = NULL;
@@ -4135,6 +5010,10 @@
sys = (struct ipa3_sys_context *) notify->chan_user_data;
rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
+ if ( rx_pkt_ptr ) {
+ *rx_pkt_ptr = rx_pkt;
+ }
+
spin_lock_bh(&rx_pkt->sys->spinlock);
rx_pkt->sys->len--;
spin_unlock_bh(&rx_pkt->sys->spinlock);
@@ -4183,7 +5062,7 @@
/* Check added for handling LAN consumer packet without EOT flag */
if (notify->evt_id == GSI_CHAN_EVT_EOT ||
sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
- /* go over the list backward to save computations on updating length */
+ /* go over the list backward to save computations on updating length */
list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
rx_skb = rx_pkt->data.skb;
@@ -4328,37 +5207,23 @@
return rx_skb;
}
-static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
+static void ipa3_wq_rx_common(
+ struct ipa3_sys_context *sys,
struct gsi_chan_xfer_notify *notify)
{
- struct sk_buff *rx_skb;
- struct ipa3_sys_context *coal_sys;
- int ipa_ep_idx;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ struct sk_buff *rx_skb;
if (!notify) {
IPAERR_RL("gsi_chan_xfer_notify is null\n");
return;
}
- rx_skb = handle_skb_completion(notify, true);
+
+ rx_skb = handle_skb_completion(notify, true, &rx_pkt);
if (rx_skb) {
- sys->pyld_hdlr(rx_skb, sys);
-
- /* For coalescing, we have 2 transfer rings to replenish */
- if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
- ipa_ep_idx = ipa3_get_ep_mapping(
- IPA_CLIENT_APPS_WAN_CONS);
-
- if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
- IPAERR("Invalid client.\n");
- return;
- }
-
- coal_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
- coal_sys->repl_hdlr(coal_sys);
- }
-
- sys->repl_hdlr(sys);
+ rx_pkt->sys->pyld_hdlr(rx_skb, rx_pkt->sys);
+ rx_pkt->sys->repl_hdlr(rx_pkt->sys);
}
}
@@ -4374,7 +5239,7 @@
for (i = 0; i < num; i++) {
if (!ipa3_ctx->ipa_wan_skb_page)
rx_skb = handle_skb_completion(
- ¬ify[i], false);
+ ¬ify[i], false, NULL);
else
rx_skb = handle_page_completion(
¬ify[i], false);
@@ -4404,7 +5269,7 @@
/* TODO: add chaining for coal case */
for (i = 0; i < num; i++) {
rx_skb = handle_skb_completion(
- ¬ify[i], false);
+ ¬ify[i], false, NULL);
if (rx_skb) {
sys->pyld_hdlr(rx_skb, sys);
/*
@@ -4628,9 +5493,8 @@
atomic_set(&sys->workqueue_flushed, 0);
}
} else {
- if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
- in->client == IPA_CLIENT_APPS_WAN_CONS ||
- in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+ if (IPA_CLIENT_IS_LAN_CONS(in->client) ||
+ IPA_CLIENT_IS_WAN_CONS(in->client) ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
sys->ep->status.status_en = true;
@@ -4645,11 +5509,11 @@
IPA_GENERIC_RX_BUFF_BASE_SZ);
sys->get_skb = ipa3_get_skb_ipa_rx;
sys->free_skb = ipa3_free_skb_rx;
- if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+ if (IPA_CLIENT_IS_APPS_COAL_CONS(in->client))
in->ipa_ep_cfg.aggr.aggr = IPA_COALESCE;
else
in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
- if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+ if (IPA_CLIENT_IS_LAN_CONS(in->client)) {
INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
sys->repl_hdlr =
@@ -4665,8 +5529,11 @@
IPA_GENERIC_AGGR_PKT_LIMIT;
in->ipa_ep_cfg.aggr.aggr_time_limit =
IPA_GENERIC_AGGR_TIME_LIMIT;
- } else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
- in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+ if (in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+ in->ipa_ep_cfg.aggr.aggr_coal_l2 = true;
+ in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
+ }
+ } else if (IPA_CLIENT_IS_WAN_CONS(in->client) ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
if (!in->ext_ioctl_v2)
@@ -5297,6 +6164,8 @@
*/
if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
+ else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
+ client_type = IPA_CLIENT_APPS_LAN_COAL_CONS;
else
client_type = sys->ep->client;
/*
@@ -5475,10 +6344,10 @@
u32 ring_size;
int result;
gfp_t mem_flag = GFP_KERNEL;
- u32 coale_ep_idx;
+ u32 wan_coal_ep_id, lan_coal_ep_id;
- if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
- in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+ if (IPA_CLIENT_IS_WAN_CONS(in->client) ||
+ IPA_CLIENT_IS_LAN_CONS(in->client) ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS ||
@@ -5490,7 +6359,7 @@
IPAERR("EP context is empty\n");
return -EINVAL;
}
- coale_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
/*
* GSI ring length is calculated based on the desc_fifo_sz
* which was meant to define the BAM desc fifo. GSI descriptors
@@ -5516,11 +6385,25 @@
goto fail_setup_event_ring;
} else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
- coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
- ipa3_ctx->ep[coale_ep_idx].valid == 1) {
+ IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_WAN_COAL_CONS, wan_coal_ep_id)) {
IPADBG("Wan consumer pipe configured\n");
result = ipa_gsi_setup_coal_def_channel(in, ep,
- &ipa3_ctx->ep[coale_ep_idx]);
+ &ipa3_ctx->ep[wan_coal_ep_id]);
+ if (result) {
+ IPAERR("Failed to setup default coal GSI channel\n");
+ goto fail_setup_event_ring;
+ }
+ return result;
+ } else if (in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+ result = ipa_gsi_setup_event_ring(ep,
+ IPA_COMMON_EVENT_RING_SIZE, mem_flag);
+ if (result)
+ goto fail_setup_event_ring;
+ } else if (in->client == IPA_CLIENT_APPS_LAN_CONS &&
+ IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, lan_coal_ep_id)) {
+ IPADBG("Lan consumer pipe configured\n");
+ result = ipa_gsi_setup_coal_def_channel(in, ep,
+ &ipa3_ctx->ep[lan_coal_ep_id]);
if (result) {
IPAERR("Failed to setup default coal GSI channel\n");
goto fail_setup_event_ring;
@@ -5672,7 +6555,7 @@
int result;
memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
- if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+ if (IPA_CLIENT_IS_APPS_COAL_CONS(ep->client))
gsi_channel_props.prot = GSI_CHAN_PROT_GCI;
else
gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
@@ -6241,7 +7124,7 @@
ret = ipa_poll_gsi_pkt(sys, ¬ify);
if (ret)
break;
- rx_skb = handle_skb_completion(¬ify, true);
+ rx_skb = handle_skb_completion(¬ify, true, NULL);
if (rx_skb) {
sys->pyld_hdlr(rx_skb, sys);
sys->repl_hdlr(sys);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 676e0aa..2ca24be 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1546,6 +1546,30 @@
u64 tmp_alloc;
};
+struct ipa3_cache_recycle_stats {
+ u64 pkt_allocd;
+ u64 pkt_found;
+ u64 tot_pkt_replenished;
+};
+
+struct lan_coal_stats {
+ u64 coal_rx;
+ u64 coal_left_as_is;
+ u64 coal_reconstructed;
+ u64 coal_pkts;
+ u64 coal_hdr_qmap_err;
+ u64 coal_hdr_nlo_err;
+ u64 coal_hdr_pkt_err;
+ u64 coal_csum_err;
+ u64 coal_ip_invalid;
+ u64 coal_trans_invalid;
+ u64 coal_veid[GSI_VEID_MAX];
+ u64 coal_tcp;
+ u64 coal_tcp_bytes;
+ u64 coal_udp;
+ u64 coal_udp_bytes;
+};
+
struct ipa3_stats {
u32 tx_sw_pkts;
u32 tx_hw_pkts;
@@ -1565,6 +1589,7 @@
u32 rmnet_ll_rx_empty;
u32 rmnet_ll_repl_rx_empty;
u32 lan_rx_empty;
+ u32 lan_rx_empty_coal;
u32 lan_repl_rx_empty;
u32 low_lat_rx_empty;
u32 low_lat_repl_rx_empty;
@@ -1575,11 +1600,13 @@
u64 lower_order;
u32 pipe_setup_fail_cnt;
struct ipa3_page_recycle_stats page_recycle_stats[3];
+ struct ipa3_cache_recycle_stats cache_recycle_stats[3];
u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX];
atomic_t num_buff_above_thresh_for_def_pipe_notified;
atomic_t num_buff_above_thresh_for_coal_pipe_notified;
atomic_t num_buff_below_thresh_for_def_pipe_notified;
atomic_t num_buff_below_thresh_for_coal_pipe_notified;
+ struct lan_coal_stats coal;
u64 num_sort_tasklet_sched[3];
u64 num_of_times_wq_reschd;
u64 page_recycle_cnt_in_tasklet;
@@ -2174,6 +2201,7 @@
* mhi_ctrl_state: state of mhi ctrl pipes
*/
struct ipa3_context {
+ bool coal_stopped;
struct ipa3_char_device_context cdev;
struct ipa3_ep_context ep[IPA5_MAX_NUM_PIPES];
bool skip_ep_cfg_shadow[IPA5_MAX_NUM_PIPES];
@@ -2364,7 +2392,11 @@
u32 icc_num_cases;
u32 icc_num_paths;
u32 icc_clk[IPA_ICC_LVL_MAX][IPA_ICC_PATH_MAX][IPA_ICC_TYPE_MAX];
- struct ipahal_imm_cmd_pyld *coal_cmd_pyld[2];
+#define WAN_COAL_SUB 0
+#define LAN_COAL_SUB 1
+#define ULSO_COAL_SUB 2
+#define MAX_CCP_SUB (ULSO_COAL_SUB + 1)
+ struct ipahal_imm_cmd_pyld *coal_cmd_pyld[MAX_CCP_SUB];
struct ipa_mem_buffer ulso_wa_cmd;
u32 tx_wrapper_cache_max_size;
struct ipa3_app_clock_vote app_clock_vote;
@@ -2745,6 +2777,36 @@
struct icc_path *icc_path[IPA_ICC_PATH_MAX];
};
+/*
+ * When data arrives on IPA_CLIENT_APPS_LAN_COAL_CONS, said data will
+ * contain a qmap header followed by an array of the following. The
+ * number of them in the array is always MAX_COAL_PACKET_STATUS_INFO
+ * (see below); however, only "num_nlos" (a field in the cmap heeader)
+ * will be valid. The rest are to be ignored.
+ */
+struct coal_packet_status_info {
+ u16 pkt_len;
+ u8 pkt_cksum_errs;
+ u8 num_pkts;
+} __aligned(1);
+/*
+ * This is the number of the struct coal_packet_status_info that
+ * follow the qmap header. As above, only "num_nlos" are valid. The
+ * rest are to be ignored.
+ */
+#define MAX_COAL_PACKET_STATUS_INFO (6)
+#define VALID_NLS(nls) \
+ ((nls) > 0 && (nls) <= MAX_COAL_PACKET_STATUS_INFO)
+/*
+ * The following is the total number of bits in all the pkt_cksum_errs
+ * in each of the struct coal_packet_status_info(s) that follow the
+ * qmap header. Each bit is meant to tell us if a packet is good or
+ * bad, relative to a checksum. Given this, the max number of bits
+ * dictates the max number of packets that can be in a buffer from the
+ * IPA.
+ */
+#define MAX_COAL_PACKETS (48)
+
extern struct ipa3_context *ipa3_ctx;
extern bool ipa_net_initialized;
@@ -2937,6 +2999,9 @@
void ipa3_default_evict_register( void );
int ipa3_set_evict_policy(
struct ipa_ioc_coal_evict_policy *evict_pol);
+void start_coalescing( void );
+void stop_coalescing( void );
+bool lan_coal_enabled( void );
/*
* Messaging
@@ -3238,6 +3303,10 @@
int ipa3_teth_bridge_driver_init(void);
void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+void ipa3_lan_coal_rx_cb(
+ void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
int _ipa_init_sram_v3(void);
int _ipa_init_hdr_v3_0(void);
@@ -3410,7 +3479,9 @@
void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_set_resorce_groups_config(void);
int ipa3_suspend_apps_pipes(bool suspend);
-void ipa3_force_close_coal(void);
+void ipa3_force_close_coal(
+ bool close_wan,
+ bool close_lan );
int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
enum ipa_ip_type ip_type,
bool hashable,
@@ -3619,7 +3690,7 @@
*/
#define IPA_COAL_VP_LRU_THRSHLD 0
#define IPA_COAL_EVICTION_EN true
-#define IPA_COAL_VP_LRU_GRAN_SEL IPA_EVICT_TIME_GRAN_10_USEC
+#define IPA_COAL_VP_LRU_GRAN_SEL 0
#define IPA_COAL_VP_LRU_UDP_THRSHLD 0
#define IPA_COAL_VP_LRU_TCP_THRSHLD 0
#define IPA_COAL_VP_LRU_UDP_THRSHLD_EN 1
@@ -3631,15 +3702,10 @@
* eviction timers.
*/
enum ipa_evict_time_gran_type {
- IPA_EVICT_TIME_GRAN_10_USEC,
- IPA_EVICT_TIME_GRAN_20_USEC,
- IPA_EVICT_TIME_GRAN_50_USEC,
- IPA_EVICT_TIME_GRAN_100_USEC,
- IPA_EVICT_TIME_GRAN_1_MSEC,
- IPA_EVICT_TIME_GRAN_10_MSEC,
- IPA_EVICT_TIME_GRAN_100_MSEC,
- IPA_EVICT_TIME_GRAN_NEAR_HALF_SEC, /* 0.65536s */
- IPA_EVICT_TIME_GRAN_MAX,
+ IPA_EVICT_TIME_GRAN_0,
+ IPA_EVICT_TIME_GRAN_1,
+ IPA_EVICT_TIME_GRAN_2,
+ IPA_EVICT_TIME_GRAN_3,
};
/* query ipa APQ mode*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 64c8f8f..f214423 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1077,9 +1077,18 @@
(*(entry))->ipacm_installed = user;
if ((*(entry))->rule.coalesce &&
- (*(entry))->rule.dst == IPA_CLIENT_APPS_WAN_CONS &&
- ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
- (*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
+ IPA_CLIENT_IS_LAN_or_WAN_CONS((*(entry))->rule.dst)) {
+ int unused;
+ if ((*(entry))->rule.dst == IPA_CLIENT_APPS_LAN_CONS) {
+ if (IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, unused)) {
+ (*(entry))->rule.dst = IPA_CLIENT_APPS_LAN_COAL_CONS;
+ }
+ } else { /* == IPA_CLIENT_APPS_WAN_CONS */
+ if (IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, unused)) {
+ (*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
+ }
+ }
+ }
if (rule->enable_stats)
(*entry)->cnt_idx = rule->cnt_idx;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index b672e1a..de60a93 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -6531,9 +6531,57 @@
__stringify(RESERVERD_CONS_123),
__stringify(RESERVERD_PROD_124),
__stringify(IPA_CLIENT_TPUT_CONS),
+ __stringify(RESERVERD_PROD_126),
+ __stringify(IPA_CLIENT_APPS_LAN_COAL_CONS),
};
EXPORT_SYMBOL(ipa_clients_strings);
+static void _set_coalescing_disposition(
+ bool force_to_default )
+{
+ if ( ipa3_ctx->ipa_initialization_complete
+ &&
+ ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5 ) {
+
+ struct ipahal_reg_coal_master_cfg master_cfg;
+
+ memset(&master_cfg, 0, sizeof(master_cfg));
+
+ ipahal_read_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
+
+ master_cfg.coal_force_to_default = force_to_default;
+
+ ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
+ }
+}
+
+void start_coalescing()
+{
+ if ( ipa3_ctx->coal_stopped ) {
+ _set_coalescing_disposition(false);
+ ipa3_ctx->coal_stopped = false;
+ }
+}
+
+void stop_coalescing()
+{
+ if ( ! ipa3_ctx->coal_stopped ) {
+ _set_coalescing_disposition(true);
+ ipa3_ctx->coal_stopped = true;
+ }
+}
+
+bool lan_coal_enabled()
+{
+ if ( ipa3_ctx->ipa_initialization_complete ) {
+ int ep_idx;
+ if ( IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx) ) {
+ return true;
+ }
+ }
+ return false;
+}
+
int ipa3_set_evict_policy(
struct ipa_ioc_coal_evict_policy *evict_pol)
{
@@ -6638,6 +6686,8 @@
break;
case IPA_HW_v5_1:
str = "5.1";
+ case IPA_HW_v5_5:
+ str = "5.5";
default:
str = "Invalid version";
break;
@@ -7486,12 +7536,13 @@
master_cfg.coal_ipv4_id_ignore = ipa3_ctx->coal_ipv4_id_ignore;
ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
- IPADBG(": coal-ipv4-id-ignore = %s\n",
- master_cfg.coal_ipv4_id_ignore
- ? "True" : "False");
-
+ IPADBG(
+ ": coal-ipv4-id-ignore = %s\n",
+ master_cfg.coal_ipv4_id_ignore ?
+ "True" : "False");
ipa_comp_cfg();
+
/*
* In IPA 4.2 filter and routing hashing not supported
* disabling hash enable register.
@@ -11801,7 +11852,7 @@
static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
{
struct ipa_ep_cfg_ctrl cfg;
- int ipa_ep_idx, coal_ep_idx;
+ int ipa_ep_idx, wan_coal_ep_idx, lan_coal_ep_idx;
struct ipa3_ep_context *ep;
int res;
@@ -11832,8 +11883,6 @@
return 0;
}
- coal_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
-
/*
* Configure the callback mode only one time after starting the channel
* otherwise observing IEOB interrupt received before configure callmode
@@ -11858,7 +11907,7 @@
/* Apps prod pipes use common event ring so cannot configure mode*/
/*
- * Skipping to configure mode for default wan pipe,
+ * Skipping to configure mode for default [w|l]an pipe,
* as both pipes using commong event ring. if both pipes
* configure same event ring observing race condition in
* updating current polling state.
@@ -11866,7 +11915,9 @@
if (IPA_CLIENT_IS_APPS_PROD(client) ||
(client == IPA_CLIENT_APPS_WAN_CONS &&
- coal_ep_idx != IPA_EP_NOT_ALLOCATED))
+ IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, wan_coal_ep_idx)) ||
+ (client == IPA_CLIENT_APPS_LAN_CONS &&
+ IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, lan_coal_ep_idx)))
return 0;
if (suspend) {
@@ -11883,24 +11934,57 @@
return 0;
}
-void ipa3_force_close_coal(void)
+void ipa3_force_close_coal(
+ bool close_wan,
+ bool close_lan )
{
- struct ipa3_desc desc[2];
+ struct ipa3_desc desc[ MAX_CCP_SUB ];
+
int ep_idx, num_desc = 0;
- ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
- if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
- return;
+ if ( close_wan
+ &&
+ IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_WAN_COAL_CONS, ep_idx)
+ &&
+ ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB] ) {
- ipa3_init_imm_cmd_desc(&desc[0], ipa3_ctx->coal_cmd_pyld[0]);
- num_desc++;
- if (ipa3_ctx->ulso_wa) {
- ipa3_init_imm_cmd_desc(&desc[1], ipa3_ctx->coal_cmd_pyld[1]);
+ ipa3_init_imm_cmd_desc(
+ &desc[num_desc],
+ ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]);
+
num_desc++;
}
- IPADBG("Sending %d descriptor for coal force close\n", num_desc);
- if (ipa3_send_cmd(num_desc, desc))
- IPADBG("ipa3_send_cmd timedout\n");
+
+ if ( close_lan
+ &&
+ IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx)
+ &&
+ ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB] ) {
+
+ ipa3_init_imm_cmd_desc(
+ &desc[num_desc],
+ ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]);
+
+ num_desc++;
+ }
+
+ if (ipa3_ctx->ulso_wa && ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB] ) {
+ ipa3_init_imm_cmd_desc(
+ &desc[num_desc],
+ ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]);
+
+ num_desc++;
+ }
+
+ if ( num_desc ) {
+ IPADBG("Sending %d descriptor(s) for coal force close\n", num_desc);
+ if ( ipa3_send_cmd_timeout(
+ num_desc,
+ desc,
+ IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC) ) {
+ IPADBG("ipa3_send_cmd_timeout timedout\n");
+ }
+ }
}
int ipa3_suspend_apps_pipes(bool suspend)
@@ -11909,25 +11993,45 @@
struct ipa_ep_cfg_holb holb_cfg;
int odl_ep_idx;
+ if (suspend) {
+ stop_coalescing();
+ ipa3_force_close_coal(true, true);
+ }
+
/* As per HPG first need start/stop coalescing channel
* then default one. Coalescing client number was greater then
* default one so starting the last client.
*/
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, suspend);
- if (res == -EAGAIN)
+ if (res == -EAGAIN) {
+ if (suspend) start_coalescing();
goto undo_coal_cons;
+ }
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
- if (res == -EAGAIN)
+ if (res == -EAGAIN) {
+ if (suspend) start_coalescing();
goto undo_wan_cons;
+ }
+
+ res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_COAL_CONS, suspend);
+ if (res == -EAGAIN) {
+ if (suspend) start_coalescing();
+ goto undo_lan_coal_cons;
+ }
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
- if (res == -EAGAIN)
+ if (res == -EAGAIN) {
+ if (suspend) start_coalescing();
goto undo_lan_cons;
+ }
+
+ if (suspend) start_coalescing();
res = _ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, suspend);
- if (res == -EAGAIN)
+ if (res == -EAGAIN) {
goto undo_odl_cons;
+ }
odl_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
if (odl_ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[odl_ep_idx].valid) {
@@ -11949,13 +12053,15 @@
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS,
suspend);
- if (res == -EAGAIN)
+ if (res == -EAGAIN) {
goto undo_qmap_cons;
+ }
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS,
suspend);
- if (res == -EAGAIN)
+ if (res == -EAGAIN) {
goto undo_low_lat_data_cons;
+ }
if (suspend) {
struct ipahal_reg_tx_wrapper tx;
@@ -12033,6 +12139,8 @@
_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
undo_lan_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, !suspend);
+undo_lan_coal_cons:
+ _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_COAL_CONS, !suspend);
undo_wan_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, !suspend);
@@ -12094,57 +12202,98 @@
struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
struct ipahal_imm_cmd_register_read dummy_reg_read = { 0 };
struct ipahal_reg_valmask valmask;
- int ep_idx;
u32 offset = 0;
+ int ep_idx, num_desc = 0;
- ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
- if (ep_idx == IPA_EP_NOT_ALLOCATED)
- return 0;
- IPADBG("Allocate coal close frame cmd\n");
- reg_write_cmd.skip_pipeline_clear = false;
- if (ipa3_ctx->ulso_wa) {
- reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
- } else {
- reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- }
- if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
- offset = ipahal_get_reg_ofst(
- IPA_AGGR_FORCE_CLOSE);
- else
- offset = ipahal_get_ep_reg_offset(
- IPA_AGGR_FORCE_CLOSE_n, ep_idx);
- reg_write_cmd.offset = offset;
- ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
- reg_write_cmd.value = valmask.val;
- reg_write_cmd.value_mask = valmask.mask;
- ipa3_ctx->coal_cmd_pyld[0] =
- ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
- ®_write_cmd, false);
- if (!ipa3_ctx->coal_cmd_pyld[0]) {
- IPAERR("fail construct register_write imm cmd\n");
- ipa_assert();
- return 0;
+ if ( IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, ep_idx) ) {
+
+ IPADBG("Allocate wan coal close frame cmd\n");
+
+ reg_write_cmd.skip_pipeline_clear = false;
+ if (ipa3_ctx->ulso_wa) {
+ reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
+ } else {
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ }
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
+ offset = ipahal_get_reg_ofst(
+ IPA_AGGR_FORCE_CLOSE);
+ else
+ offset = ipahal_get_ep_reg_offset(
+ IPA_AGGR_FORCE_CLOSE_n, ep_idx);
+ reg_write_cmd.offset = offset;
+ ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB] =
+ ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE,
+ ®_write_cmd, false);
+ if (!ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]) {
+ IPAERR("fail construct register_write imm cmd\n");
+ ipa_assert();
+ return 0;
+ }
+ num_desc++;
}
- if (ipa3_ctx->ulso_wa) {
- /* dummary regsiter read IC with HPS clear*/
+ if ( IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx) ) {
+
+ IPADBG("Allocate lan coal close frame cmd\n");
+
+ reg_write_cmd.skip_pipeline_clear = false;
+ if (ipa3_ctx->ulso_wa) {
+ reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
+ } else {
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ }
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
+ offset = ipahal_get_reg_ofst(
+ IPA_AGGR_FORCE_CLOSE);
+ else
+ offset = ipahal_get_ep_reg_offset(
+ IPA_AGGR_FORCE_CLOSE_n, ep_idx);
+ reg_write_cmd.offset = offset;
+ ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB] =
+ ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE,
+ ®_write_cmd, false);
+ if (!ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]) {
+ IPAERR("fail construct register_write imm cmd\n");
+ ipa_assert();
+ return 0;
+ }
+ num_desc++;
+ }
+
+ if ( ipa3_ctx->ulso_wa ) {
+ /*
+ * Dummy regsiter read IC with HPS clear
+ */
ipa3_ctx->ulso_wa_cmd.size = 4;
- ipa3_ctx->ulso_wa_cmd.base = dma_alloc_coherent(ipa3_ctx->pdev,
- ipa3_ctx->ulso_wa_cmd.size,
- &ipa3_ctx->ulso_wa_cmd.phys_base, GFP_KERNEL);
+ ipa3_ctx->ulso_wa_cmd.base =
+ dma_alloc_coherent(
+ ipa3_ctx->pdev,
+ ipa3_ctx->ulso_wa_cmd.size,
+ &ipa3_ctx->ulso_wa_cmd.phys_base, GFP_KERNEL);
if (ipa3_ctx->ulso_wa_cmd.base == NULL) {
ipa_assert();
}
- offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
+ offset = ipahal_get_reg_n_ofst(
+ IPA_STAT_QUOTA_BASE_n,
ipa3_ctx->ee);
dummy_reg_read.skip_pipeline_clear = false;
dummy_reg_read.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dummy_reg_read.offset = offset;
dummy_reg_read.sys_addr = ipa3_ctx->ulso_wa_cmd.phys_base;
- ipa3_ctx->coal_cmd_pyld[1] = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_REGISTER_READ,
- &dummy_reg_read, false);
- if (!ipa3_ctx->coal_cmd_pyld[1]) {
+ ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB] =
+ ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_READ,
+ &dummy_reg_read, false);
+ if (!ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]) {
IPAERR("failed to construct DUMMY READ IC\n");
ipa_assert();
}
@@ -12155,15 +12304,27 @@
void ipa3_free_coal_close_frame(void)
{
- if (ipa3_ctx->coal_cmd_pyld[0])
- ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[0]);
+ if (ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]) {
+ ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]);
+ }
- if (ipa3_ctx->coal_cmd_pyld[1]) {
- ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[1]);
- dma_free_coherent(ipa3_ctx->pdev, ipa3_ctx->ulso_wa_cmd.size,
- ipa3_ctx->ulso_wa_cmd.base, ipa3_ctx->ulso_wa_cmd.phys_base);
+ if (ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]) {
+ ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]);
+ }
+
+ if (ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]) {
+ ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]);
+ }
+
+ if ( ipa3_ctx->ulso_wa_cmd.base ) {
+ dma_free_coherent(
+ ipa3_ctx->pdev,
+ ipa3_ctx->ulso_wa_cmd.size,
+ ipa3_ctx->ulso_wa_cmd.base,
+ ipa3_ctx->ulso_wa_cmd.phys_base);
}
}
+
/**
* ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
*
@@ -12715,6 +12876,7 @@
case IPA_HW_v4_9:
case IPA_HW_v4_11:
case IPA_HW_v5_1:
+ case IPA_HW_v5_5:
return true;
default:
IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 42c5918..c76a7e2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -57,8 +57,11 @@
__stringify(IPAHAL_PKT_STATUS_EXCEPTION_CSUM),
};
+/*
+ * Forward declarations.
+ */
static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
-
+static int ipahal_qmap_init(enum ipa_hw_type ipa_hw_type);
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
@@ -2576,6 +2579,12 @@
goto bail_free_ctx;
}
+ if (ipahal_qmap_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal qmap\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
ipahal_hdr_init(ipa_hw_type);
if (ipahal_fltrt_init(ipa_hw_type)) {
@@ -2636,3 +2645,184 @@
mem->phys_base = 0;
}
}
+
+/*
+ * ***************************************************************
+ *
+ * To follow, a generalized qmap header manipulation API.
+ *
+ * The functions immediately following this comment are version
+ * specific qmap parsing functions. The referred to in the
+ * ipahal_qmap_parse_tbl below.
+ *
+ * ***************************************************************
+ */
+void ipa_qmap_hdr_parse_v4_5(
+ union qmap_hdr_u* qmap_hdr,
+ struct qmap_hdr_data* qmap_data_rslt )
+{
+ qmap_data_rslt->cd = qmap_hdr->qmap5_0.cd;
+ qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_0.qmap_next_hdr;
+ qmap_data_rslt->pad = qmap_hdr->qmap5_0.pad;
+ qmap_data_rslt->mux_id = qmap_hdr->qmap5_0.mux_id;
+ qmap_data_rslt->packet_len_with_pad = qmap_hdr->qmap5_0.packet_len_with_pad;
+
+ qmap_data_rslt->hdr_type = qmap_hdr->qmap5_0.hdr_type;
+ qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_0.coal_next_hdr;
+ qmap_data_rslt->zero_checksum = qmap_hdr->qmap5_0.zero_checksum;
+}
+
+void ipa_qmap_hdr_parse_v5_0(
+ union qmap_hdr_u* qmap_hdr,
+ struct qmap_hdr_data* qmap_data_rslt )
+{
+ qmap_data_rslt->cd = qmap_hdr->qmap5_0.cd;
+ qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_0.qmap_next_hdr;
+ qmap_data_rslt->pad = qmap_hdr->qmap5_0.pad;
+ qmap_data_rslt->mux_id = qmap_hdr->qmap5_0.mux_id;
+ qmap_data_rslt->packet_len_with_pad = qmap_hdr->qmap5_0.packet_len_with_pad;
+
+ qmap_data_rslt->hdr_type = qmap_hdr->qmap5_0.hdr_type;
+ qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_0.coal_next_hdr;
+ qmap_data_rslt->ip_id_cfg = qmap_hdr->qmap5_0.ip_id_cfg;
+ qmap_data_rslt->zero_checksum = qmap_hdr->qmap5_0.zero_checksum;
+ qmap_data_rslt->additional_hdr_size = qmap_hdr->qmap5_0.additional_hdr_size;
+ qmap_data_rslt->segment_size = qmap_hdr->qmap5_0.segment_size;
+}
+
+void ipa_qmap_hdr_parse_v5_5(
+ union qmap_hdr_u* qmap_hdr,
+ struct qmap_hdr_data* qmap_data_rslt )
+{
+ qmap_data_rslt->cd = qmap_hdr->qmap5_5.cd;
+ qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_5.qmap_next_hdr;
+ qmap_data_rslt->pad = qmap_hdr->qmap5_5.pad;
+ qmap_data_rslt->mux_id = qmap_hdr->qmap5_5.mux_id;
+ qmap_data_rslt->packet_len_with_pad = ntohs(qmap_hdr->qmap5_5.packet_len_with_pad);
+
+ qmap_data_rslt->hdr_type = qmap_hdr->qmap5_5.hdr_type;
+ qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_5.coal_next_hdr;
+ qmap_data_rslt->chksum_valid = qmap_hdr->qmap5_5.chksum_valid;
+ qmap_data_rslt->num_nlos = qmap_hdr->qmap5_5.num_nlos;
+ qmap_data_rslt->inc_ip_id = qmap_hdr->qmap5_5.inc_ip_id;
+ qmap_data_rslt->rnd_ip_id = qmap_hdr->qmap5_5.rnd_ip_id;
+ qmap_data_rslt->close_value = qmap_hdr->qmap5_5.close_value;
+ qmap_data_rslt->close_type = qmap_hdr->qmap5_5.close_type;
+ qmap_data_rslt->vcid = qmap_hdr->qmap5_5.vcid;
+}
+
+/*
+ * Structure used to describe a version specific qmap parsing table.
+ */
+struct ipahal_qmap_parse_s {
+ /*
+ * Function prototype for a version specific qmap parsing
+ * function.
+ */
+ void (*parse)(
+ union qmap_hdr_u* qmap_hdr,
+ struct qmap_hdr_data* qmap_data_rslt );
+};
+
+/*
+ * Table used to contain and drive version specific qmap parsing
+ * functions.
+ */
+static struct ipahal_qmap_parse_s ipahal_qmap_parse_tbl[IPA_HW_MAX] = {
+ /* IPAv4.5 */
+ [IPA_HW_v4_5] = {
+ ipa_qmap_hdr_parse_v4_5
+ },
+ /* IPAv5.0 */
+ [IPA_HW_v5_0] = {
+ ipa_qmap_hdr_parse_v5_0
+ },
+ /* IPAv5.5 */
+ [IPA_HW_v5_5] = {
+ ipa_qmap_hdr_parse_v5_5
+ },
+};
+
+static int ipahal_qmap_init(
+ enum ipa_hw_type ipa_hw_type)
+{
+ struct ipahal_qmap_parse_s zero_obj;
+ int i;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if (ipa_hw_type < 0 || ipa_hw_type >= IPA_HW_MAX) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+
+ for (i = IPA_HW_v4_5; i < ipa_hw_type; i++) {
+
+ if (memcmp(&ipahal_qmap_parse_tbl[i+1],
+ &zero_obj,
+ sizeof(struct ipahal_qmap_parse_s)) == 0 ) {
+ memcpy(
+ &ipahal_qmap_parse_tbl[i+1],
+ &ipahal_qmap_parse_tbl[i],
+ sizeof(struct ipahal_qmap_parse_s));
+ } else {
+ if (ipahal_qmap_parse_tbl[i+1].parse == 0) {
+ IPAHAL_ERR(
+ "QMAP parse table missing parse function ipa_ver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * FUNCTION: ipahal_qmap_parse()
+ *
+ * The following Function to be called when version specific qmap parsing is
+ * required.
+ *
+ * ARGUMENTS:
+ *
+ * unparsed_qmap
+ *
+ * The QMAP header off of a freshly recieved data packet. As per
+ * the architecture documentation, the data contained herein will
+ * be in network order.
+ *
+ * qmap_data_rslt
+ *
+ * A location to store the parsed data from unparsed_qmap above.
+ */
+int ipahal_qmap_parse(
+ const void* unparsed_qmap,
+ struct qmap_hdr_data* qmap_data_rslt )
+{
+ union qmap_hdr_u qmap_hdr;
+
+ IPAHAL_DBG_LOW("Parse qmap/coal header\n");
+
+ if (!unparsed_qmap || !qmap_data_rslt) {
+ IPAHAL_ERR(
+ "Input Error: unparsed_qmap=%pK qmap_data_rslt=%pK\n",
+ unparsed_qmap, qmap_data_rslt);
+ return -EINVAL;
+ }
+
+ if (ipahal_ctx->hw_type < IPA_HW_v4_5) {
+ IPAHAL_ERR(
+ "Unsupported qmap parse for IPA HW type (%d)\n",
+ ipahal_ctx->hw_type);
+ return -EINVAL;
+ }
+
+ ipahal_qmap_ntoh(unparsed_qmap, &qmap_hdr);
+
+ ipahal_qmap_parse_tbl[ipahal_ctx->hw_type].parse(&qmap_hdr, qmap_data_rslt);
+
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 0aafc74..4e4fa02 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -843,4 +843,295 @@
*/
u32 ipahal_get_ep_reg_idx(u32 ep_num);
+/*
+ * ***************************************************************
+ *
+ * To follow, a generalized qmap header manipulation API.
+ *
+ * ***************************************************************
+ */
+/**
+ * qmap_hdr_v4_5 -
+ *
+ * @cd -
+ * @qmap_next_hdr -
+ * @pad -
+ * @mux_id -
+ * @packet_len_with_pad -
+ * @hdr_type -
+ * @coal_next_hdr -
+ * @zero_checksum -
+ *
+ * The following bit layout is when the data are in host order.
+ *
+ * FIXME FINDME Need to be reordered properly to reflect network
+ * ordering as seen by little endian host (qmap_hdr_v5_5
+ * below proplerly done).
+ */
+struct qmap_hdr_v4_5 {
+ /*
+ * 32 bits of qmap header to follow
+ */
+ u64 cd: 1;
+ u64 qmap_next_hdr: 1;
+ u64 pad: 6;
+ u64 mux_id: 8;
+ u64 packet_len_with_pad: 16;
+ /*
+ * 32 bits of coalescing frame header to follow
+ */
+ u64 hdr_type: 7;
+ u64 coal_next_hdr: 1;
+ u64 zero_checksum: 1;
+ u64 rsrvd1: 7;
+ u64 rsrvd2: 16;
+} __packed;
+
+/**
+ * qmap_hdr_v5_0 -
+ *
+ * @cd -
+ * @qmap_next_hdr -
+ * @pad -
+ * @mux_id -
+ * @packet_len_with_pad -
+ * @hdr_type -
+ * @coal_next_hdr -
+ * @ip_id_cfg -
+ * @zero_checksum -
+ * @additional_hdr_size -
+ * @segment_size -
+ *
+ * The following bit layout is when the data are in host order.
+ *
+ * FIXME FINDME Need to be reordered properly to reflect network
+ * ordering as seen by little endian host (qmap_hdr_v5_5
+ * below proplerly done).
+ */
+struct qmap_hdr_v5_0 {
+ /*
+ * 32 bits of qmap header to follow
+ */
+ u64 cd: 1;
+ u64 qmap_next_hdr: 1;
+ u64 pad: 6;
+ u64 mux_id: 8;
+ u64 packet_len_with_pad: 16;
+ /*
+ * 32 bits of coalescing frame header to follow
+ */
+ u64 hdr_type: 7;
+ u64 coal_next_hdr: 1;
+ u64 ip_id_cfg: 1;
+ u64 zero_checksum: 1;
+ u64 rsrvd: 1;
+ u64 additional_hdr_size: 5;
+ u64 segment_size: 16;
+} __packed;
+
+/**
+ * qmap_hdr_v5_5 -
+ *
+ * @cd -
+ * @qmap_next_hdr -
+ * @pad -
+ * @mux_id -
+ * @packet_len_with_pad -
+ * @hdr_type -
+ * @coal_next_hdr -
+ * @chksum_valid -
+ * @num_nlos -
+ * @inc_ip_id -
+ * @rnd_ip_id -
+ * @close_value -
+ * @close_type -
+ * @vcid -
+ *
+ * NOTE:
+ *
+ * The layout below is different when compared against
+ * documentation, which shows the fields as they are in network byte
+ * order - and network byte order is how we receive the data from
+ * the IPA. To avoid using cycles converting from network to host
+ * order, we've defined the stucture below such that we can access
+ * the correct fields while the data are still in network order.
+ */
+struct qmap_hdr_v5_5 {
+ /*
+ * 32 bits of qmap header to follow
+ */
+ u8 pad: 6;
+ u8 qmap_next_hdr: 1;
+ u8 cd: 1;
+ u8 mux_id;
+ u16 packet_len_with_pad;
+ /*
+ * 32 bits of coalescing frame header to follow
+ */
+ u8 coal_next_hdr: 1;
+ u8 hdr_type: 7;
+ u8 rsrvd1: 2;
+ u8 rnd_ip_id: 1;
+ u8 inc_ip_id: 1;
+ u8 num_nlos: 3;
+ u8 chksum_valid: 1;
+
+ u8 close_type: 4;
+ u8 close_value: 4;
+ u8 rsrvd2: 4;
+ u8 vcid: 4;
+} __packed;
+
+/**
+ * qmap_hdr_u -
+ *
+ * The following is a union of all of the qmap versions above.
+ *
+ * NOTE WELL: REMEMBER to keep it in sync with the bit strucure
+ * definitions above.
+ */
+union qmap_hdr_u {
+ struct qmap_hdr_v4_5 qmap4_5;
+ struct qmap_hdr_v5_0 qmap5_0;
+ struct qmap_hdr_v5_5 qmap5_5;
+ u32 words[2]; /* these used to flip from ntoh and hton */
+} __packed;
+
+/**
+ * qmap_hdr_data -
+ *
+ * The following is an aggregation of the qmap header bit structures
+ * above.
+ *
+ * NOTE WELL: REMEMBER to keep it in sync with the bit structure
+ * definitions above.
+ */
+struct qmap_hdr_data {
+ /*
+ * Data from qmap header to follow
+ */
+ u8 cd;
+ u8 qmap_next_hdr;
+ u8 pad;
+ u8 mux_id;
+ u16 packet_len_with_pad;
+ /*
+ * Data from coalescing frame header to follow
+ */
+ u8 hdr_type;
+ u8 coal_next_hdr;
+ u8 ip_id_cfg;
+ u8 zero_checksum;
+ u8 additional_hdr_size;
+ u16 segment_size;
+ u8 chksum_valid;
+ u8 num_nlos;
+ u8 inc_ip_id;
+ u8 rnd_ip_id;
+ u8 close_value;
+ u8 close_type;
+ u8 vcid;
+};
+
+/**
+ * FUNCTION: ipahal_qmap_parse()
+ *
+ * The following function to be called when version specific qmap parsing is
+ * required.
+ *
+ * ARGUMENTS:
+ *
+ * unparsed_qmap
+ *
+ * The QMAP header off of a freshly recieved data packet. As per
+ * the architecture documentation, the data contained herein will
+ * be in network order.
+ *
+ * qmap_data_rslt
+ *
+ * A location to store the parsed data from unparsed_qmap above.
+ */
+int ipahal_qmap_parse(
+ const void* unparsed_qmap,
+ struct qmap_hdr_data* qmap_data_rslt);
+
+
+/**
+ * FUNCTION: ipahal_qmap_ntoh()
+ *
+ * The following function will take a QMAP header, which you know is
+ * in network order, and convert it to host order.
+ *
+ * NOTE WELL: Once in host order, the data will align with the bit
+ * descriptions in the headers above.
+ *
+ * ARGUMENTS:
+ *
+ * src_data_from_packet
+ *
+ * The QMAP header off of a freshly recieved data packet. As per
+ * the architecture documentation, the data contained herein will
+ * be in network order.
+ *
+ * dst_result
+ *
+ * A location to where the original data will be copied, then
+ * converted to host order.
+ */
+static inline void ipahal_qmap_ntoh(
+ const void* src_data_from_packet,
+ union qmap_hdr_u* dst_result)
+{
+ /*
+ * Nothing to do, since we define the bit fields in the
+ * structure, such that we can access them correctly while
+ * keeping the data in network order...
+ */
+ if (src_data_from_packet && dst_result) {
+ memcpy(
+ dst_result,
+ src_data_from_packet,
+ sizeof(union qmap_hdr_u));
+ }
+}
+
+/**
+ * FUNCTION: ipahal_qmap_hton()
+ *
+ * The following function will take QMAP data, that you've assembled
+ * in host otder (ie. via using the bit structures definitions above),
+ * and convert it to network order.
+ *
+ * This function is to be used for QMAP data destined for network
+ * transmission.
+ *
+ * ARGUMENTS:
+ *
+ * src_data_from_host
+ *
+ * QMAP data in host order.
+ *
+ * dst_result
+ *
+ * A location to where the host ordered data above will be copied,
+ * then converted to network order.
+ */
+static inline void ipahal_qmap_hton(
+ union qmap_hdr_u* src_data_from_host,
+ void* dst_result)
+{
+ if (src_data_from_host && dst_result) {
+ memcpy(
+ dst_result,
+ src_data_from_host,
+ sizeof(union qmap_hdr_u));
+ /*
+ * Reusing variable below to do the host to network swap...
+ */
+ src_data_from_host = (union qmap_hdr_u*) dst_result;
+ src_data_from_host->words[0] = htonl(src_data_from_host->words[0]);
+ src_data_from_host->words[1] = htonl(src_data_from_host->words[1]);
+ }
+}
+
#endif /* _IPAHAL_H_ */