Merge "msm: ipa3: debug change"
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index d4b19da..e7452ba 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -26,6 +26,9 @@
 #include <linux/delay.h>
 #include <linux/version.h>
 
+#define CREATE_TRACE_POINTS
+#include "gsi_trace.h"
+
 #define GSI_CMD_TIMEOUT (5*HZ)
 #define GSI_FC_CMD_TIMEOUT (2*GSI_CMD_TIMEOUT)
 #define GSI_START_CMD_TIMEOUT_MS 1000
@@ -832,6 +835,15 @@
 			msk = gsihal_read_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_k, ee, k);
 			gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee, k, ch & msk);
 
+			if (trace_gsi_qtimer_enabled())
+			{
+				uint64_t qtimer = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+				qtimer = arch_timer_read_cntpct_el0();
+#endif
+				trace_gsi_qtimer(qtimer, false, 0, ch, msk);
+			}
+
 			for (i = 0; i < GSI_STTS_REG_BITS; i++) {
 				if ((1 << i) & ch & msk) {
 					evt_hdl = i + (GSI_STTS_REG_BITS * k);
@@ -1133,6 +1145,14 @@
 	evt = gsi_ctx->msi.evt[msi];
 	evt_ctxt = &gsi_ctx->evtr[evt];
 
+	if (trace_gsi_qtimer_enabled()) {
+		uint64_t qtimer = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+		qtimer = arch_timer_read_cntpct_el0();
+#endif
+		trace_gsi_qtimer(qtimer, true, evt, 0, 0);
+	}
+
 	if (evt_ctxt->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
 		GSIERR("Unexpected irq intf %d\n",
 			evt_ctxt->props.intf);
@@ -1839,8 +1859,7 @@
 	devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
 	gsihal_destroy();
 	gsi_unmap_base();
-	memset(gsi_ctx, 0, sizeof(*gsi_ctx));
-
+	gsi_ctx->per_registered = false;
 	return GSI_STATUS_SUCCESS;
 }
 EXPORT_SYMBOL(gsi_deregister_device);
@@ -2101,7 +2120,12 @@
 static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* props,
 	uint8_t id, int ee)
 {
-	return gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
+	uint64_t rp;
+
+	rp = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
+	rp |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_5, ee, id)) << 32;
+
+	return rp;
 }
 
 static int __gsi_pair_msi(struct gsi_evt_ctx *ctx,
@@ -4382,7 +4406,7 @@
 		/* update rp to see of we have anything new to process */
 		rp = ctx->evtr->props.gsi_read_event_ring_rp(
 			&ctx->evtr->props, ctx->evtr->id, ee);
-		rp |= ctx->ring.rp & GSI_MSB_MASK;
+		rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
 
 		ctx->evtr->ring.rp = rp;
 		/* read gsi event ring rp again if last read is empty */
@@ -4401,7 +4425,7 @@
 			__iowmb();
 			rp = ctx->evtr->props.gsi_read_event_ring_rp(
 				&ctx->evtr->props, ctx->evtr->id, ee);
-			rp |= ctx->ring.rp & GSI_MSB_MASK;
+			rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
 			ctx->evtr->ring.rp = rp;
 			if (rp == ctx->evtr->ring.rp_local) {
 				spin_unlock_irqrestore(
diff --git a/drivers/platform/msm/gsi/gsi_trace.h b/drivers/platform/msm/gsi/gsi_trace.h
new file mode 100644
index 0000000..e73689e
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gsi
+#define TRACE_INCLUDE_FILE gsi_trace
+
+#if !defined(_GSI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GSI_TRACE_H
+
+#include <linux/tracepoint.h>
+
+
+TRACE_EVENT(
+	gsi_qtimer,
+
+	TP_PROTO(u64 qtimer, bool is_ll, uint8_t evt, uint32_t ch, uint32_t msk),
+
+	TP_ARGS(qtimer, is_ll, evt, ch, msk),
+
+	TP_STRUCT__entry(
+		__field(u64,		qtimer)
+		__field(bool,		is_ll)
+		__field(uint8_t,	evt)
+		__field(uint32_t,	ch)
+		__field(uint32_t,	msk)
+	),
+
+	TP_fast_assign(
+		__entry->qtimer = qtimer;
+		__entry->is_ll = is_ll;
+		__entry->evt = evt;
+		__entry->ch = ch;
+		__entry->msk = msk;
+	),
+
+	TP_printk("qtimer=%llu is_ll=%s, evt=%u, ch=0x%x, msk=0x%x",
+		__entry->qtimer,
+		__entry->is_ll ? "true" : "false",
+		__entry->evt,
+		__entry->ch,
+		__entry->msk)
+);
+
+#endif /* _GSI_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#ifdef CONFIG_IPA_VENDOR_DLKM
+#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/dataipa/drivers/platform/msm/gsi
+#else
+#define TRACE_INCLUDE_PATH ../../techpack/dataipa/drivers/platform/msm/gsi
+#endif
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index ef5face..33ea10e 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -170,6 +170,19 @@
 	(x < IPA_CLIENT_MAX && (x & 0x1) == 0)
 #define IPA_CLIENT_IS_CONS(x) \
 	(x < IPA_CLIENT_MAX && (x & 0x1) == 1)
+/*
+ * The following macro does two things:
+ *   1) It checks to see if client x is allocated, and
+ *   2) It assigns a value to index idx
+ */
+#define IPA_CLIENT_IS_MAPPED(x, idx) \
+	((idx = ipa3_get_ep_mapping(x)) != IPA_EP_NOT_ALLOCATED)
+/*
+ * Same behavior as the macro above; but in addition, determines if
+ * the client is valid as well.
+ */
+#define IPA_CLIENT_IS_MAPPED_VALID(x, idx) \
+	(IPA_CLIENT_IS_MAPPED(x, idx) && ipa3_ctx->ep[idx].valid == 1)
 #define IPA_CLIENT_IS_ETH_PROD(x) \
 	((x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD)) || \
 	 (x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD)) || \
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 81101fb..ca6afff 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -46,6 +46,7 @@
 #endif
 #include "gsi.h"
 #include "ipa_stats.h"
+#include <linux/suspend.h>
 
 #ifdef CONFIG_ARM64
 
@@ -139,6 +140,11 @@
 static void ipa3_free_pkt_init(void);
 static void ipa3_free_pkt_init_ex(void);
 
+#ifdef CONFIG_DEEPSLEEP
+static void ipa3_deepsleep_resume(void);
+static void ipa3_deepsleep_suspend(void);
+#endif
+
 static void ipa3_load_ipa_fw(struct work_struct *work);
 static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
 static DECLARE_DELAYED_WORK(ipa3_fw_load_failure_handle, ipa3_load_ipa_fw);
@@ -483,6 +489,38 @@
 }
 EXPORT_SYMBOL(ipa_smmu_free_sgt);
 
+/**
+ * ipa_pm_notify() - PM notify to listen suspend events
+ *
+ * This callback will be invoked by the pm framework to suspend
+ * operation is invoked.
+ *
+ * Returns NOTIFY_DONE to pm framework completed operation.
+ */
+
+static int ipa_pm_notify(struct notifier_block *b, unsigned long event, void *p)
+{
+	IPAERR("Entry\n");
+	switch (event) {
+		case PM_POST_SUSPEND:
+#ifdef CONFIG_DEEPSLEEP
+			if (mem_sleep_current == PM_SUSPEND_MEM && ipa3_ctx->deepsleep) {
+				IPADBG("Enter deepsleep resume\n");
+				ipa3_deepsleep_resume();
+				IPADBG("Exit deepsleep resume\n");
+			}
+#endif
+			break;
+	}
+	IPAERR("Exit\n");
+	return NOTIFY_DONE;
+}
+
+
+static struct notifier_block ipa_pm_notifier = {
+	.notifier_call = ipa_pm_notify,
+};
+
 static const struct dev_pm_ops ipa_pm_ops = {
 	.suspend_late = ipa3_ap_suspend,
 	.resume_early = ipa3_ap_resume,
@@ -507,6 +545,155 @@
 
 static char *active_clients_table_buf;
 
+void ipa3_get_default_evict_values(
+	struct ipahal_reg_coal_evict_lru *evict_lru )
+{
+	if (evict_lru) {
+
+		struct device *dev = &ipa3_ctx->master_pdev->dev;
+
+		u32 val;
+		int result;
+
+		memset(evict_lru, 0, sizeof(*evict_lru));
+
+		/*
+		 * Get coal_vp_lru_thrshld
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal-vp-lru-thrshld",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_thrshld = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal-vp-lru-thrshld...will use default\n");
+			evict_lru->coal_vp_lru_thrshld = IPA_COAL_VP_LRU_THRSHLD;
+		}
+		IPADBG(": coal_vp_lru_thrshld = %u", evict_lru->coal_vp_lru_thrshld);
+
+		/*
+		 * Get coal_eviction_en
+		 */
+		evict_lru->coal_eviction_en =
+			of_property_read_bool(
+				dev->of_node,
+				"qcom,coal-eviction-en");
+		if ( evict_lru->coal_eviction_en == false ) {
+			evict_lru->coal_eviction_en = IPA_COAL_EVICTION_EN;
+		}
+		IPADBG(": coal_eviction_en = %s",
+			   (evict_lru->coal_eviction_en) ? "true" : "false");
+
+		/*
+		 * Get coal_vp_lru_gran_sel
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal_vp_lru_gran_sel",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_gran_sel = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal_vp_lru_gran_sel...will use default\n");
+			evict_lru->coal_vp_lru_gran_sel = IPA_COAL_VP_LRU_GRAN_SEL;
+		}
+		IPADBG(": coal_vp_lru_gran_sel = %u\n",
+			   evict_lru->coal_vp_lru_gran_sel);
+
+		/*
+		 * Get coal_vp_lru_udp_thrshld
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal-vp-lru-udp-thrshld",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_udp_thrshld = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal-vp-lru-udp-thrshld...will use default\n");
+			evict_lru->coal_vp_lru_udp_thrshld = IPA_COAL_VP_LRU_UDP_THRSHLD;
+		}
+		IPADBG(": coal_vp_lru_udp_thrshld = %u", evict_lru->coal_vp_lru_udp_thrshld);
+
+		/*
+		 * Get coal_vp_lru_tcp_thrshld
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal-vp-lru-tcp-thrshld",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_tcp_thrshld = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal-vp-lru-tcp-thrshld...will use default\n");
+			evict_lru->coal_vp_lru_tcp_thrshld = IPA_COAL_VP_LRU_TCP_THRSHLD;
+		}
+		IPADBG(": coal_vp_lru_tcp_thrshld = %u", evict_lru->coal_vp_lru_tcp_thrshld);
+
+		/*
+		 * Get coal_vp_lru_udp_thrshld_en
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal-vp-lru-udp-thrshld-en",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_udp_thrshld_en = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal-vp-lru-udp-thrshld-en...will use default\n");
+			evict_lru->coal_vp_lru_udp_thrshld_en = IPA_COAL_VP_LRU_UDP_THRSHLD_EN;
+		}
+		IPADBG(": coal_vp_lru_udp_thrshld_en = %u",
+			   evict_lru->coal_vp_lru_udp_thrshld_en);
+
+		/*
+		 * Get coal_vp_lru_tcp_thrshld_en
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal-vp-lru-tcp-thrshld-en",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_tcp_thrshld_en = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal-vp-lru-tcp-thrshld-en...will use default\n");
+			evict_lru->coal_vp_lru_tcp_thrshld_en = IPA_COAL_VP_LRU_TCP_THRSHLD_EN;
+		}
+		IPADBG(": coal_vp_lru_tcp_thrshld_en = %u",
+			   evict_lru->coal_vp_lru_tcp_thrshld_en);
+
+		/*
+		 * Get coal_vp_lru_tcp_num
+		 */
+		result =
+			of_property_read_u32(
+				dev->of_node,
+				"qcom,coal-vp-lru-tcp-num",
+				&val);
+		if ( result == 0 ) {
+			evict_lru->coal_vp_lru_tcp_num = val;
+		} else {
+			IPADBG(
+				"Error reading qcom,coal-vp-lru-tcp-num...will use default\n");
+			evict_lru->coal_vp_lru_tcp_num = IPA_COAL_VP_LRU_TCP_NUM;
+		}
+		IPADBG(": coal_vp_lru_tcp_num = %u", evict_lru->coal_vp_lru_tcp_num);
+	}
+}
+
 int ipa3_active_clients_log_print_buffer(char *buf, int size)
 {
 	int i;
@@ -2628,6 +2815,7 @@
 	u8 header[256] = { 0 };
 	u8 *param = NULL;
 	bool is_vlan_mode;
+	struct ipa_ioc_coal_evict_policy evict_pol;
 	struct ipa_ioc_nat_alloc_mem nat_mem;
 	struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
 	struct ipa_ioc_v4_nat_init nat_init;
@@ -2661,6 +2849,17 @@
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
 	switch (cmd) {
+	case IPA_IOC_COAL_EVICT_POLICY:
+		if (copy_from_user(
+				&evict_pol,
+				(const void __user *) arg,
+				sizeof(struct ipa_ioc_coal_evict_policy))) {
+			IPAERR_RL("copy_from_user fails\n");
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa3_set_evict_policy(&evict_pol);
+		break;
 	case IPA_IOC_ALLOC_NAT_MEM:
 		if (copy_from_user(&nat_mem, (const void __user *)arg,
 			sizeof(struct ipa_ioc_nat_alloc_mem))) {
@@ -4146,62 +4345,65 @@
 
 static int ipa3_setup_exception_path(void)
 {
-	struct ipa_ioc_add_hdr *hdr;
-	struct ipa_hdr_add *hdr_entry;
-	struct ipahal_reg_route route = { 0 };
-	struct ipa3_hdr_entry *hdr_entry_internal;
-	int ret;
+	struct ipa_ioc_add_hdr *hdr = NULL;
+	int ret = 0;
 
-	/* install the basic exception header */
-	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
-		sizeof(struct ipa_hdr_add), GFP_KERNEL);
-	if (!hdr)
-		return -ENOMEM;
+	if ( ! lan_coal_enabled() ) {
 
-	hdr->num_hdrs = 1;
-	hdr->commit = 1;
-	hdr_entry = &hdr->hdr[0];
+		struct ipa_hdr_add *hdr_entry;
+		struct ipahal_reg_route route = { 0 };
+		struct ipa3_hdr_entry *hdr_entry_internal;
 
-	strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
-	hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+		/* install the basic exception header */
+		hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+					  sizeof(struct ipa_hdr_add), GFP_KERNEL);
+		if (!hdr)
+			return -ENOMEM;
 
-	if (ipa3_add_hdr(hdr)) {
-		IPAERR("fail to add exception hdr\n");
-		ret = -EPERM;
-		goto bail;
+		hdr->num_hdrs = 1;
+		hdr->commit = 1;
+		hdr_entry = &hdr->hdr[0];
+
+		strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+		hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+
+		if (ipa3_add_hdr(hdr)) {
+			IPAERR("fail to add exception hdr\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+		if (hdr_entry->status) {
+			IPAERR("fail to add exception hdr\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+		hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
+		if (unlikely(!hdr_entry_internal)) {
+			IPAERR("fail to find internal hdr structure\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+		ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+		/* set the route register to pass exception packets to Apps */
+		route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+		route.route_frag_def_pipe = ipa3_get_ep_mapping(
+			IPA_CLIENT_APPS_LAN_CONS);
+		route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
+		route.route_def_retain_hdr = 1;
+
+		if (ipa3_cfg_route(&route)) {
+			IPAERR("fail to add exception hdr\n");
+			ret = -EPERM;
+			goto bail;
+		}
 	}
 
-	if (hdr_entry->status) {
-		IPAERR("fail to add exception hdr\n");
-		ret = -EPERM;
-		goto bail;
-	}
-
-	hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
-	if (unlikely(!hdr_entry_internal)) {
-		IPAERR("fail to find internal hdr structure\n");
-		ret = -EPERM;
-		goto bail;
-	}
-
-	ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
-
-	/* set the route register to pass exception packets to Apps */
-	route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
-	route.route_frag_def_pipe = ipa3_get_ep_mapping(
-		IPA_CLIENT_APPS_LAN_CONS);
-	route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
-	route.route_def_retain_hdr = 1;
-
-	if (ipa3_cfg_route(&route)) {
-		IPAERR("fail to add exception hdr\n");
-		ret = -EPERM;
-		goto bail;
-	}
-
-	ret = 0;
 bail:
-	kfree(hdr);
+	if ( hdr ) kfree(hdr);
 	return ret;
 }
 
@@ -5916,35 +6118,75 @@
 	}
 	IPADBG("default routing was set\n");
 
-	/* LAN IN (IPA->AP) */
-	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
-	sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
-	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
-	sys_in.notify = ipa3_lan_rx_cb;
-	sys_in.priv = NULL;
-	if (ipa3_ctx->lan_rx_napi_enable)
-		sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
-	sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
-	sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
-	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
-	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
-	sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
-	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
-	sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
-	sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+	ipa3_ctx->clnt_hdl_data_in = 0;
 
-	/**
-	 * ipa_lan_rx_cb() intended to notify the source EP about packet
-	 * being received on the LAN_CONS via calling the source EP call-back.
-	 * There could be a race condition with calling this call-back. Other
-	 * thread may nullify it - e.g. on EP disconnect.
-	 * This lock intended to protect the access to the source EP call-back
-	 */
-	spin_lock_init(&ipa3_ctx->disconnect_lock);
-	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
-		IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
-		result = -EPERM;
-		goto fail_flt_hash_tuple;
+	if ( ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5 ) {
+		/*
+		 * LAN_COAL IN (IPA->AP)
+		 */
+		memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+		sys_in.client = IPA_CLIENT_APPS_LAN_COAL_CONS;
+		sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+		sys_in.notify = ipa3_lan_coal_rx_cb;
+		sys_in.priv = NULL;
+		if (ipa3_ctx->lan_rx_napi_enable)
+			sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+		sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+
+		/**
+		 * ipa3_lan_coal_rx_cb() intended to notify the source EP about
+		 * packet being received on the LAN_COAL_CONS via calling the
+		 * source EP call-back.  There could be a race condition with
+		 * calling this call-back. Other thread may nullify it - e.g. on
+		 * EP disconnect.  This lock intended to protect the access to the
+		 * source EP call-back
+		 */
+		spin_lock_init(&ipa3_ctx->disconnect_lock);
+		if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+			IPAERR(":setup sys pipe (LAN_COAL_CONS) failed.\n");
+			result = -EPERM;
+			goto fail_flt_hash_tuple;
+		}
+
+	} else { /* ipa3_ctx->ipa_hw_type < IPA_HW_v5_5 */
+		/*
+		 * LAN IN (IPA->AP)
+		 */
+		memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+		sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+		sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+		sys_in.notify = ipa3_lan_rx_cb;
+		sys_in.priv = NULL;
+		if (ipa3_ctx->lan_rx_napi_enable)
+			sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
+		sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+		sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+
+		/**
+		 * ipa_lan_rx_cb() intended to notify the source EP about packet
+		 * being received on the LAN_CONS via calling the source EP call-back.
+		 * There could be a race condition with calling this call-back. Other
+		 * thread may nullify it - e.g. on EP disconnect.
+		 * This lock intended to protect the access to the source EP call-back
+		 */
+		spin_lock_init(&ipa3_ctx->disconnect_lock);
+		if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+			IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
+			result = -EPERM;
+			goto fail_flt_hash_tuple;
+		}
 	}
 
 	/* LAN OUT (AP->IPA) */
@@ -5973,7 +6215,8 @@
 	return 0;
 
 fail_lan_data_out:
-	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+	if ( ipa3_ctx->clnt_hdl_data_in )
+		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
 fail_flt_hash_tuple:
 	if (ipa3_ctx->dflt_v6_rt_rule_hdl)
 		__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
@@ -5990,11 +6233,13 @@
 {
 	if (!ipa3_ctx->ipa_config_is_mhi)
 		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
-	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+	if ( ipa3_ctx->clnt_hdl_data_in )
+		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
 	__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
 	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+	ipa3_dealloc_common_event_ring();
 }
 
 #ifdef CONFIG_COMPAT
@@ -6598,7 +6843,7 @@
 	 */
 	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
 		!ipa3_ctx->tag_process_before_gating) {
-		ipa3_force_close_coal();
+		ipa3_force_close_coal(true, true);
 		/* While sending force close command setting
 		 * tag process as true to make configure to
 		 * original state
@@ -7152,12 +7397,21 @@
 	/* Make sure IPA clock voted when collecting the reg dump */
 	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PANIC_VOTE");
 	res = ipa3_inc_client_enable_clks_no_block(&log_info);
-	if (res) {
+	if (!ipa3_active_clks_status()) {
 		IPAERR("IPA clk off not saving the IPA registers\n");
 	} else {
+		/*make sure clock won't disable in middle of save reg*/
+		if (res) {
+			IPADBG("IPA resume in progress increment clinet cnt\n");
+			atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+		}
 		ipa_save_registers();
 		ipahal_print_all_regs(false);
 		ipa_wigig_save_regs();
+		if (res) {
+			IPADBG("IPA resume in progress decrement clinet cnt\n");
+			atomic_dec(&ipa3_ctx->ipa3_active_clients.cnt);
+		}
 	}
 
 	ipa3_active_clients_log_print_table(active_clients_table_buf,
@@ -7292,6 +7546,12 @@
 		napi_enable(&ipa3_ctx->napi_lan_rx);
 }
 
+static inline void ipa3_disable_napi_lan_rx(void)
+{
+	if (ipa3_ctx->lan_rx_napi_enable)
+		napi_disable(&ipa3_ctx->napi_lan_rx);
+}
+
 static inline void ipa3_register_to_fmwk(void)
 {
 	struct ipa_core_data data;
@@ -7789,6 +8049,10 @@
 	/* init uc-activation tbl*/
 	ipa3_setup_uc_act_tbl();
 
+#ifdef CONFIG_DEEPSLEEP
+	if (!ipa3_is_ready())
+		ipa_fmwk_deepsleep_exit_ipa();
+#endif
 	complete_all(&ipa3_ctx->init_completion_obj);
 
 	ipa_ut_module_init();
@@ -7986,13 +8250,12 @@
 
 static int ipa3_pil_load_ipa_fws(const char *sub_sys)
 {
-	void *subsystem_get_retval = NULL;
 
 	IPADBG("PIL FW loading process initiated sub_sys=%s\n",
 		sub_sys);
 
-	subsystem_get_retval = subsystem_get(sub_sys);
-	if (IS_ERR_OR_NULL(subsystem_get_retval)) {
+	ipa3_ctx->subsystem_get_retval = subsystem_get(sub_sys);
+	if (IS_ERR_OR_NULL(ipa3_ctx->subsystem_get_retval)) {
 		IPAERR("Unable to PIL load FW for sub_sys=%s\n", sub_sys);
 		return -EINVAL;
 	}
@@ -8002,6 +8265,20 @@
 }
 #endif /* IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
 
+#ifdef CONFIG_DEEPSLEEP
+static int ipa3_pil_unload_ipa_fws(void)
+{
+
+	IPADBG("PIL FW unloading process initiated sub_sys\n");
+
+	if (ipa3_ctx->subsystem_get_retval)
+		subsystem_put(ipa3_ctx->subsystem_get_retval);
+
+	IPADBG("PIL FW unloading process is complete sub_sys\n");
+	return 0;
+}
+#endif
+
 static void ipa3_load_ipa_fw(struct work_struct *work)
 {
 	int result;
@@ -8009,7 +8286,7 @@
 	IPADBG("Entry\n");
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	
+
 	result = ipa3_attach_to_smmu();
 	if (result) {
 		IPAERR("IPA attach to smmu failed %d\n", result);
@@ -8557,8 +8834,11 @@
 	if (ipa3_ctx->lan_rx_napi_enable || ipa3_ctx->tx_napi_enable) {
 		init_dummy_netdev(&ipa3_ctx->generic_ndev);
 		if(ipa3_ctx->lan_rx_napi_enable) {
-			netif_napi_add(&ipa3_ctx->generic_ndev, &ipa3_ctx->napi_lan_rx,
-					ipa3_lan_poll, NAPI_WEIGHT);
+			netif_napi_add(
+				&ipa3_ctx->generic_ndev,
+				&ipa3_ctx->napi_lan_rx,
+				ipa3_lan_poll,
+				NAPI_WEIGHT);
 		}
 	}
 }
@@ -8677,10 +8957,18 @@
 	ipa3_ctx->uc_ctx.holb_monitor.max_cnt_11ad =
 		resource_p->ipa_holb_monitor_max_cnt_11ad;
 	ipa3_ctx->ipa_wan_aggr_pkt_cnt = resource_p->ipa_wan_aggr_pkt_cnt;
-	ipa3_ctx->stats.page_recycle_stats[0].total_replenished = 0;
-	ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc = 0;
-	ipa3_ctx->stats.page_recycle_stats[1].total_replenished = 0;
-	ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc = 0;
+	memset(
+		ipa3_ctx->stats.page_recycle_stats,
+		0,
+		sizeof(ipa3_ctx->stats.page_recycle_stats));
+	memset(
+		ipa3_ctx->stats.cache_recycle_stats,
+		0,
+		sizeof(ipa3_ctx->stats.cache_recycle_stats));
+	memset(
+		&ipa3_ctx->stats.coal,
+		0,
+		sizeof(ipa3_ctx->stats.coal));
 	memset(ipa3_ctx->stats.page_recycle_cnt, 0,
 		sizeof(ipa3_ctx->stats.page_recycle_cnt));
 	ipa3_ctx->stats.num_sort_tasklet_sched[0] = 0;
@@ -11162,7 +11450,8 @@
 	int i;
 
 	IPADBG("Enter...\n");
-
+	if (!of_device_is_compatible(dev->of_node,"qcom,ipa"))
+		return 0;
 	/* In case there is a tx/rx handler in polling mode fail to suspend */
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		if (ipa3_ctx->ep[i].sys &&
@@ -11173,6 +11462,13 @@
 		}
 	}
 
+#ifdef CONFIG_DEEPSLEEP
+	if (mem_sleep_current == PM_SUSPEND_MEM) {
+		IPADBG("Enter deepsleep suspend\n");
+		ipa3_deepsleep_suspend();
+		IPADBG("Exit deepsleep suspend\n");
+	}
+#endif
 	ipa_pm_deactivate_all_deferred();
 
 	IPADBG("Exit\n");
@@ -11205,6 +11501,59 @@
 	return ipa3_ctx->lan_rx_napi_enable;
 }
 
+
+#ifdef CONFIG_DEEPSLEEP
+static void ipa3_deepsleep_suspend(void)
+{
+	IPADBG("Entry\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* To allow default routing table delection using this flag */
+	ipa3_ctx->deepsleep = true;
+	/*Disabling the LAN NAPI*/
+	ipa3_disable_napi_lan_rx();
+	/*NOt allow uC related operations until uC load again*/
+	ipa3_ctx->uc_ctx.uc_loaded = false;
+	/*Disconnecting LAN PROD/LAN CONS/CMD PROD apps pipes*/
+	ipa3_teardown_apps_pipes();
+	/*Deregistering the GSI driver*/
+	gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
+	/*Destroying filter table ids*/
+	ipa3_destroy_flt_tbl_idrs();
+	/*Disabling IPA interrupt*/
+	ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+	ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
+	ipa3_uc_interface_destroy();
+	/*Destroy the NAT device*/
+	ipa3_nat_ipv6ct_destroy_devices();
+	/*Freeing memory allocated for coalesing and dma task*/
+	ipa3_free_coal_close_frame();
+	ipa3_free_dma_task_for_gsi();
+	/*Destroying ipa hal module*/
+	ipahal_destroy();
+	ipa3_ctx->ipa_initialization_complete = false;
+	ipa3_debugfs_remove();
+	/*Unloading IPA FW to allow FW load in resume*/
+	ipa3_pil_unload_ipa_fws();
+	/*Calling framework API to reset IPA ready flag to false*/
+	ipa_fmwk_deepsleep_entry_ipa();
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("Exit\n");
+}
+
+static void ipa3_deepsleep_resume(void)
+{
+
+	IPADBG("Entry\n");
+	/*After deeplseep exit we shouldn't allow delete the default routing table*/
+	ipa3_ctx->deepsleep = false;
+	/*Scheduling WQ to load IPA FW*/
+	queue_work(ipa3_ctx->transport_power_mgmt_wq,
+		&ipa3_fw_loading_work);
+	IPADBG("Exit\n");
+}
+#endif
+
 static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
 {
 	/*
@@ -11656,6 +12005,8 @@
 		/* Register as a PCI device driver */
 		return pci_register_driver(&ipa_pci_driver);
 	}
+
+	register_pm_notifier(&ipa_pm_notifier);
 	/* Register as a platform device driver */
 	return platform_driver_register(&ipa_plat_drv);
 }
@@ -11670,6 +12021,7 @@
 		kfree(ipa3_ctx->hw_stats);
 		ipa3_ctx->hw_stats = NULL;
 	}
+	unregister_pm_notifier(&ipa_pm_notifier);
 	kfree(ipa3_ctx);
 	ipa3_ctx = NULL;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 68e0bbc..d0941b7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -2,39 +2,7 @@
 /*
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  *
- * Changes from Qualcomm Innovation Center are provided under the following license:
- *
  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted (subject to the limitations in the
- * disclaimer below) provided that the following conditions are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *     * Redistributions in binary form must reproduce the above
- *       copyright notice, this list of conditions and the following
- *       disclaimer in the documentation and/or other materials provided
- *       with the distribution.
- *
- *     * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
- * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
- * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
  */
 
 #ifdef CONFIG_DEBUG_FS
@@ -1653,33 +1621,40 @@
 	int nbytes;
 	int cnt = 0, i = 0, k = 0;
 
-	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
-			"COAL : Total number of packets replenished =%llu\n"
-			"COAL : Number of page recycled packets  =%llu\n"
-			"COAL : Number of tmp alloc packets  =%llu\n"
-			"COAL  : Number of times tasklet scheduled  =%llu\n"
-			"DEF  : Total number of packets replenished =%llu\n"
-			"DEF  : Number of page recycled packets =%llu\n"
-			"DEF  : Number of tmp alloc packets  =%llu\n"
-			"DEF  : Number of times tasklet scheduled  =%llu\n"
-			"COMMON  : Number of page recycled in tasklet  =%llu\n"
-			"COMMON  : Number of times free pages not found in tasklet =%llu\n",
-			ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
-			ipa3_ctx->stats.page_recycle_stats[0].page_recycled,
-			ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
-			ipa3_ctx->stats.num_sort_tasklet_sched[0],
-			ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
-			ipa3_ctx->stats.page_recycle_stats[1].page_recycled,
-			ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc,
-			ipa3_ctx->stats.num_sort_tasklet_sched[1],
-			ipa3_ctx->stats.page_recycle_cnt_in_tasklet,
-			ipa3_ctx->stats.num_of_times_wq_reschd);
+	nbytes = scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"COAL   : Total number of packets replenished =%llu\n"
+		"COAL   : Number of page recycled packets  =%llu\n"
+		"COAL   : Number of tmp alloc packets  =%llu\n"
+		"COAL   : Number of times tasklet scheduled  =%llu\n"
+
+		"DEF    : Total number of packets replenished =%llu\n"
+		"DEF    : Number of page recycled packets =%llu\n"
+		"DEF    : Number of tmp alloc packets  =%llu\n"
+		"DEF    : Number of times tasklet scheduled  =%llu\n"
+
+		"COMMON : Number of page recycled in tasklet  =%llu\n"
+		"COMMON : Number of times free pages not found in tasklet =%llu\n",
+
+		ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
+		ipa3_ctx->stats.page_recycle_stats[0].page_recycled,
+		ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
+		ipa3_ctx->stats.num_sort_tasklet_sched[0],
+
+		ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
+		ipa3_ctx->stats.page_recycle_stats[1].page_recycled,
+		ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc,
+		ipa3_ctx->stats.num_sort_tasklet_sched[1],
+
+		ipa3_ctx->stats.page_recycle_cnt_in_tasklet,
+		ipa3_ctx->stats.num_of_times_wq_reschd);
 
 	cnt += nbytes;
 
 	for (k = 0; k < 2; k++) {
 		for (i = 0; i < ipa3_ctx->page_poll_threshold; i++) {
-			nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN,
+			nbytes = scnprintf(
+				dbg_buff + cnt, IPA_MAX_MSG_LEN,
 				"COMMON  : Page replenish efficiency[%d][%d]  =%llu\n",
 				k, i, ipa3_ctx->stats.page_recycle_cnt[k][i]);
 			cnt += nbytes;
@@ -1688,6 +1663,111 @@
 
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
 }
+
+static ssize_t ipa3_read_lan_coal_stats(
+	struct file *file,
+	char __user *ubuf,
+	size_t       count,
+	loff_t      *ppos)
+{
+	int nbytes=0, cnt=0;
+	u32 i;
+	char buf[4096];
+
+	*buf = '\0';
+
+	for ( i = 0;
+		  i < sizeof(ipa3_ctx->stats.coal.coal_veid) /
+			  sizeof(ipa3_ctx->stats.coal.coal_veid[0]);
+		  i++ ) {
+
+		nbytes += scnprintf(
+			buf         + nbytes,
+			sizeof(buf) - nbytes,
+			"(%u/%llu) ",
+			i,
+			ipa3_ctx->stats.coal.coal_veid[i]);
+	}
+
+	nbytes = scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"LAN COAL rx            = %llu\n"
+		"LAN COAL pkts          = %llu\n"
+		"LAN COAL left as is    = %llu\n"
+		"LAN COAL reconstructed = %llu\n"
+		"LAN COAL hdr qmap err  = %llu\n"
+		"LAN COAL hdr nlo err   = %llu\n"
+		"LAN COAL hdr pkt err   = %llu\n"
+		"LAN COAL csum err      = %llu\n"
+
+		"LAN COAL ip invalid    = %llu\n"
+		"LAN COAL trans invalid = %llu\n"
+		"LAN COAL tcp           = %llu\n"
+		"LAN COAL tcp bytes     = %llu\n"
+		"LAN COAL udp           = %llu\n"
+		"LAN COAL udp bytes     = %llu\n"
+		"LAN COAL (veid/cnt)...(veid/cnt) = %s\n",
+
+		ipa3_ctx->stats.coal.coal_rx,
+		ipa3_ctx->stats.coal.coal_pkts,
+		ipa3_ctx->stats.coal.coal_left_as_is,
+		ipa3_ctx->stats.coal.coal_reconstructed,
+		ipa3_ctx->stats.coal.coal_hdr_qmap_err,
+		ipa3_ctx->stats.coal.coal_hdr_nlo_err,
+		ipa3_ctx->stats.coal.coal_hdr_pkt_err,
+		ipa3_ctx->stats.coal.coal_csum_err,
+		ipa3_ctx->stats.coal.coal_ip_invalid,
+		ipa3_ctx->stats.coal.coal_trans_invalid,
+		ipa3_ctx->stats.coal.coal_tcp,
+		ipa3_ctx->stats.coal.coal_tcp_bytes,
+		ipa3_ctx->stats.coal.coal_udp,
+		ipa3_ctx->stats.coal.coal_udp_bytes,
+		buf);
+
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_cache_recycle_stats(
+	struct file *file,
+	char __user *ubuf,
+	size_t       count,
+	loff_t      *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"COAL  (cache) : Total number of pkts replenished =%llu\n"
+			"COAL  (cache) : Number of pkts alloced  =%llu\n"
+			"COAL  (cache) : Number of pkts not alloced  =%llu\n"
+
+			"DEF   (cache) : Total number of pkts replenished =%llu\n"
+			"DEF   (cache) : Number of pkts alloced  =%llu\n"
+			"DEF   (cache) : Number of pkts not alloced  =%llu\n"
+
+			"OTHER (cache) : Total number of packets replenished =%llu\n"
+			"OTHER (cache) : Number of pkts alloced  =%llu\n"
+			"OTHER (cache) : Number of pkts not alloced  =%llu\n",
+
+			ipa3_ctx->stats.cache_recycle_stats[0].tot_pkt_replenished,
+			ipa3_ctx->stats.cache_recycle_stats[0].pkt_allocd,
+			ipa3_ctx->stats.cache_recycle_stats[0].pkt_found,
+
+			ipa3_ctx->stats.cache_recycle_stats[1].tot_pkt_replenished,
+			ipa3_ctx->stats.cache_recycle_stats[1].pkt_allocd,
+			ipa3_ctx->stats.cache_recycle_stats[1].pkt_found,
+
+			ipa3_ctx->stats.cache_recycle_stats[2].tot_pkt_replenished,
+			ipa3_ctx->stats.cache_recycle_stats[2].pkt_allocd,
+			ipa3_ctx->stats.cache_recycle_stats[2].pkt_found);
+
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
 static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
@@ -3353,6 +3433,14 @@
 			.read = ipa3_read_page_recycle_stats,
 		}
 	}, {
+		"lan_coal_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_lan_coal_stats,
+		}
+	}, {
+		"cache_recycle_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_cache_recycle_stats,
+		}
+	}, {
 		"wdi", IPA_READ_ONLY_MODE, NULL, {
 			.read = ipa3_read_wdi,
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 4c91d58..1900e0a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3,40 +3,13 @@
 /*
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  *
- * Changes from Qualcomm Innovation Center are provided under the following license:
- *
  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted (subject to the limitations in the
- * disclaimer below) provided that the following conditions are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *     * Redistributions in binary form must reproduce the above
- *       copyright notice, this list of conditions and the following
- *       disclaimer in the documentation and/or other materials provided
- *       with the distribution.
- *
- *     * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
- * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
- * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
  */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inet.h>
+#include <linux/if_ether.h>
+#include <net/ip6_checksum.h>
 
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -44,7 +17,6 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/msm_gsi.h>
-#include <uapi/linux/ip.h>
 #include <net/sock.h>
 #include <net/ipv6.h>
 #include <asm/page.h>
@@ -165,7 +137,7 @@
 	u32 ring_size, gfp_t mem_flag);
 static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 	u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag);
-static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl);
+static int ipa3_teardown_pipe(u32 clnt_hdl);
 static int ipa_populate_tag_field(struct ipa3_desc *desc,
 		struct ipa3_tx_pkt_wrapper *tx_pkt,
 		struct ipahal_imm_cmd_pyld **tag_pyld_ret);
@@ -993,6 +965,12 @@
 		case IPA_CLIENT_APPS_WAN_CONS:
 			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 			break;
+		case IPA_CLIENT_APPS_LAN_COAL_CONS:
+			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+			break;
+		case IPA_CLIENT_APPS_LAN_CONS:
+			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
+			break;
 		default:
 			break;
 	}
@@ -1096,6 +1074,8 @@
 
 	if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
 		client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
+	else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
+		client_type = IPA_CLIENT_APPS_LAN_COAL_CONS;
 	else
 		client_type = sys->ep->client;
 
@@ -1163,6 +1143,11 @@
 			usleep_range(SUSPEND_MIN_SLEEP_RX,
 				SUSPEND_MAX_SLEEP_RX);
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
+		} else if (sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LAN_COAL");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN_COAL");
 		} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) {
 			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LOW_LAT");
 			usleep_range(SUSPEND_MIN_SLEEP_RX,
@@ -1301,27 +1286,31 @@
 int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
-	int i, ipa_ep_idx, wan_handle, coal_ep_id;
+	int i, ipa_ep_idx;
+	int wan_handle, lan_handle;
+	int wan_coal_ep_id, lan_coal_ep_id;
 	int result = -EINVAL;
 	struct ipahal_reg_coal_qmap_cfg qmap_cfg;
-	struct ipahal_reg_coal_evict_lru evict_lru;
 	char buff[IPA_RESOURCE_NAME_MAX];
 	struct ipa_ep_cfg ep_cfg_copy;
 	int (*tx_completion_func)(struct napi_struct *, int);
 
 	if (sys_in == NULL || clnt_hdl == NULL) {
-		IPAERR("NULL args\n");
+		IPAERR(
+			"NULL args: sys_in(%p) and/or clnt_hdl(%u)\n",
+			sys_in, clnt_hdl);
 		goto fail_gen;
 	}
 
+	*clnt_hdl = 0;
+
 	if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
 		IPAERR("bad parm client:%d fifo_sz:%d\n",
 			sys_in->client, sys_in->desc_fifo_sz);
 		goto fail_gen;
 	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
-	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+	if ( ! IPA_CLIENT_IS_MAPPED(sys_in->client, ipa_ep_idx) ) {
 		IPAERR("Invalid client.\n");
 		goto fail_gen;
 	}
@@ -1332,9 +1321,11 @@
 		goto fail_gen;
 	}
 
-	coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	wan_coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	lan_coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
+
 	/* save the input config parameters */
-	if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+	if (IPA_CLIENT_IS_APPS_COAL_CONS(sys_in->client))
 		ep_cfg_copy = sys_in->ipa_ep_cfg;
 
 	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
@@ -1391,10 +1382,15 @@
 
 		/* create IPA PM resources for handling polling mode */
 		if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
-			coal_ep_id != IPA_EP_NOT_ALLOCATED &&
-			ipa3_ctx->ep[coal_ep_id].valid == 1) {
+			wan_coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[wan_coal_ep_id].valid == 1) {
 			/* Use coalescing pipe PM handle for default pipe also*/
-			ep->sys->pm_hdl = ipa3_ctx->ep[coal_ep_id].sys->pm_hdl;
+			ep->sys->pm_hdl = ipa3_ctx->ep[wan_coal_ep_id].sys->pm_hdl;
+		} else if (sys_in->client == IPA_CLIENT_APPS_LAN_CONS &&
+			lan_coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[lan_coal_ep_id].valid == 1) {
+			/* Use coalescing pipe PM handle for default pipe also*/
+			ep->sys->pm_hdl = ipa3_ctx->ep[lan_coal_ep_id].sys->pm_hdl;
 		} else if (IPA_CLIENT_IS_CONS(sys_in->client)) {
 			ep->sys->freepage_wq = alloc_workqueue(buff,
 					WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS |
@@ -1564,30 +1560,42 @@
 	if (ep->sys->repl_hdlr == ipa3_replenish_rx_page_recycle) {
 		if (!(ipa3_ctx->wan_common_page_pool &&
 			sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
-			coal_ep_id != IPA_EP_NOT_ALLOCATED &&
-			ipa3_ctx->ep[coal_ep_id].valid == 1)) {
-			ep->sys->page_recycle_repl = kzalloc(
-				sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
+			wan_coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[wan_coal_ep_id].valid == 1)) {
+			/* Allocate page recycling pool only once. */
 			if (!ep->sys->page_recycle_repl) {
-				IPAERR("failed to alloc repl for client %d\n",
-						sys_in->client);
-				result = -ENOMEM;
-				goto fail_napi;
+				ep->sys->page_recycle_repl = kzalloc(
+					sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
+				if (!ep->sys->page_recycle_repl) {
+					IPAERR("failed to alloc repl for client %d\n",
+							sys_in->client);
+					result = -ENOMEM;
+					goto fail_napi;
+				}
+				atomic_set(&ep->sys->page_recycle_repl->pending, 0);
+				/* For common page pool double the pool size. */
+				if (ipa3_ctx->wan_common_page_pool &&
+					sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+					ep->sys->page_recycle_repl->capacity =
+							(ep->sys->rx_pool_sz + 1) *
+							IPA_GENERIC_RX_CMN_PAGE_POOL_SZ_FACTOR;
+				else
+					ep->sys->page_recycle_repl->capacity =
+							(ep->sys->rx_pool_sz + 1) *
+							IPA_GENERIC_RX_PAGE_POOL_SZ_FACTOR;
+				IPADBG("Page repl capacity for client:%d, value:%d\n",
+						   sys_in->client, ep->sys->page_recycle_repl->capacity);
+				INIT_LIST_HEAD(&ep->sys->page_recycle_repl->page_repl_head);
+				INIT_DELAYED_WORK(&ep->sys->freepage_work, ipa3_schd_freepage_work);
+				tasklet_init(&ep->sys->tasklet_find_freepage,
+					ipa3_tasklet_find_freepage, (unsigned long) ep->sys);
+				ipa3_replenish_rx_page_cache(ep->sys);
+			} else {
+ 				ep->sys->napi_sort_page_thrshld_cnt = 0;
+				/* Sort the pages once. */
+				ipa3_tasklet_find_freepage((unsigned long) ep->sys);
 			}
-			atomic_set(&ep->sys->page_recycle_repl->pending, 0);
-			/* For common page pool double the pool size. */
-			if (ipa3_ctx->wan_common_page_pool &&
-				sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
-				ep->sys->page_recycle_repl->capacity =
-						(ep->sys->rx_pool_sz + 1) *
-						IPA_GENERIC_RX_CMN_PAGE_POOL_SZ_FACTOR;
-			else
-				ep->sys->page_recycle_repl->capacity =
-						(ep->sys->rx_pool_sz + 1) *
-						IPA_GENERIC_RX_PAGE_POOL_SZ_FACTOR;
-			IPADBG("Page repl capacity for client:%d, value:%d\n",
-					   sys_in->client, ep->sys->page_recycle_repl->capacity);
-			INIT_LIST_HEAD(&ep->sys->page_recycle_repl->page_repl_head);
+
 			ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
 			if (!ep->sys->repl) {
 				IPAERR("failed to alloc repl for client %d\n",
@@ -1610,19 +1618,14 @@
 			atomic_set(&ep->sys->repl->head_idx, 0);
 			atomic_set(&ep->sys->repl->tail_idx, 0);
 
-			tasklet_init(&ep->sys->tasklet_find_freepage,
-					ipa3_tasklet_find_freepage, (unsigned long) ep->sys);
-			INIT_DELAYED_WORK(&ep->sys->freepage_work, ipa3_schd_freepage_work);
-			ep->sys->napi_sort_page_thrshld_cnt = 0;
-			ipa3_replenish_rx_page_cache(ep->sys);
 			ipa3_wq_page_repl(&ep->sys->repl_work);
 		} else {
 			/* Use pool same as coal pipe when common page pool is used. */
 			ep->sys->common_buff_pool = true;
-			ep->sys->common_sys = ipa3_ctx->ep[coal_ep_id].sys;
-			ep->sys->repl = ipa3_ctx->ep[coal_ep_id].sys->repl;
+			ep->sys->common_sys = ipa3_ctx->ep[wan_coal_ep_id].sys;
+			ep->sys->repl = ipa3_ctx->ep[wan_coal_ep_id].sys->repl;
 			ep->sys->page_recycle_repl =
-				ipa3_ctx->ep[coal_ep_id].sys->page_recycle_repl;
+				ipa3_ctx->ep[wan_coal_ep_id].sys->page_recycle_repl;
 		}
 	}
 
@@ -1671,24 +1674,47 @@
 	IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
 			ipa_ep_idx, ep->sys);
 
-	/* configure the registers and setup the default pipe */
-	if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
-		evict_lru.coal_vp_lru_thrshld = 0;
-		evict_lru.coal_eviction_en = true;
-		ipahal_write_reg_fields(IPA_COAL_EVICT_LRU, &evict_lru);
+	/*
+	 * Configure the registers and setup the default pipe
+	 */
+	if (IPA_CLIENT_IS_APPS_COAL_CONS(sys_in->client)) {
 
-		qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
-		ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
+		const char* str = "";
 
-		if (!sys_in->ext_ioctl_v2) {
-			sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
-			sys_in->ipa_ep_cfg = ep_cfg_copy;
-			result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
+		if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+
+			str = "wan";
+
+			qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
+
+			ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
+
+			if (!sys_in->ext_ioctl_v2) {
+				sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
+				sys_in->ipa_ep_cfg = ep_cfg_copy;
+				result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
+			}
+
+		} else { /* (sys_in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) */
+
+			str = "lan";
+
+			if (!sys_in->ext_ioctl_v2) {
+				sys_in->client = IPA_CLIENT_APPS_LAN_CONS;
+				sys_in->ipa_ep_cfg = ep_cfg_copy;
+				sys_in->notify = ipa3_lan_rx_cb;
+				result = ipa3_setup_sys_pipe(sys_in, &lan_handle);
+			}
 		}
+
 		if (result) {
-			IPAERR("failed to setup default coalescing pipe\n");
+			IPAERR(
+				"Failed to setup default %s coalescing pipe\n",
+				str);
 			goto fail_repl;
 		}
+
+		ipa3_default_evict_register();
 	}
 
 	if (!ep->keep_ipa_awake)
@@ -1724,7 +1750,8 @@
 fail_gen2:
 	ipa_pm_deregister(ep->sys->pm_hdl);
 fail_pm:
-	destroy_workqueue(ep->sys->freepage_wq);
+	if (ep->sys->freepage_wq)
+		destroy_workqueue(ep->sys->freepage_wq);
 fail_wq3:
 	destroy_workqueue(ep->sys->repl_wq);
 fail_wq2:
@@ -1807,6 +1834,11 @@
 		netif_napi_del(&ep->sys->napi_rx);
 	}
 
+	if ( ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS ) {
+		stop_coalescing();
+		ipa3_force_close_coal(false, true);
+	}
+
 	/* channel stop might fail on timeout if IPA is busy */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1818,6 +1850,10 @@
 			break;
 	}
 
+	if ( ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS ) {
+		start_coalescing();
+	}
+
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("GSI stop chan err: %d.\n", result);
 		ipa_assert();
@@ -1835,12 +1871,13 @@
 	if (IPA_CLIENT_IS_PROD(ep->client))
 		atomic_set(&ep->sys->workqueue_flushed, 1);
 
-	/* tear down the default pipe before we reset the channel*/
+	/*
+	 * Tear down the default pipe before we reset the channel
+	 */
 	if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
 
-		if (i == IPA_EP_NOT_ALLOCATED) {
-			IPAERR("failed to get idx");
+		if ( ! IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_CONS, i) ) {
+			IPAERR("Failed to get idx for IPA_CLIENT_APPS_WAN_CONS");
 			return i;
 		}
 
@@ -1848,7 +1885,29 @@
 		 * resetting only coalescing channel.
 		 */
 		if (ipa3_ctx->ep[i].valid) {
-			result = ipa3_teardown_coal_def_pipe(i);
+			result = ipa3_teardown_pipe(i);
+			if (result) {
+				IPAERR("failed to teardown default coal pipe\n");
+				return result;
+			}
+		}
+	}
+
+	/*
+	 * Tear down the default pipe before we reset the channel
+	 */
+	if (ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+
+		if ( ! IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_CONS, i) ) {
+			IPAERR("Failed to get idx for IPA_CLIENT_APPS_LAN_CONS,");
+			return i;
+		}
+
+		/* If the default channel is already torn down,
+		 * resetting only coalescing channel.
+		 */
+		if (ipa3_ctx->ep[i].valid) {
+			result = ipa3_teardown_pipe(i);
 			if (result) {
 				IPAERR("failed to teardown default coal pipe\n");
 				return result;
@@ -1938,14 +1997,18 @@
 }
 
 /**
- * ipa3_teardown_coal_def_pipe() - Teardown the APPS_WAN_COAL_CONS
- *				   default GPI pipe and cleanup IPA EP
- *				   called after the coalesced pipe is destroyed.
- * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
+ * ipa3_teardown_pipe()
+ *
+ *   Teardown and cleanup of the physical connection (i.e. data
+ *   structures, buffers, GSI channel, work queues, etc) associated
+ *   with the passed client handle and the endpoint context that the
+ *   handle represents.
+ *
+ * @clnt_hdl:  [in] A handle obtained from ipa3_setup_sys_pipe
  *
  * Returns:	0 on success, negative on failure
  */
-static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl)
+static int ipa3_teardown_pipe(u32 clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
 	int result;
@@ -2323,6 +2386,8 @@
 	 */
 	if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
 		client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
+	else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
+		client_type = IPA_CLIENT_APPS_LAN_COAL_CONS;
 	else
 		client_type = sys->ep->client;
 
@@ -2396,10 +2461,9 @@
 fail_kmem_cache_alloc:
 	if (atomic_read(&sys->repl->tail_idx) ==
 			atomic_read(&sys->repl->head_idx)) {
-		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
-			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
-		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+		else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.low_lat_repl_rx_empty);
@@ -2696,6 +2760,11 @@
 		}
 		rx_pkt->sys = sys;
 
+		trace_ipa3_replenish_rx_page_recycle(
+			stats_i,
+			rx_pkt->page_data.page,
+			rx_pkt->page_data.is_tmp_alloc);
+
 		dma_sync_single_for_device(ipa3_ctx->pdev,
 			rx_pkt->page_data.dma_addr,
 			rx_pkt->len, DMA_FROM_DEVICE);
@@ -2773,6 +2842,8 @@
 		}
 		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty_coal);
 		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.rmnet_ll_rx_empty);
 		else
@@ -3178,6 +3249,9 @@
 	int rx_len_cached = 0;
 	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
 	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+	u32 stats_i =
+		(sys->ep->client == IPA_CLIENT_APPS_LAN_COAL_CONS) ? 0 :
+		(sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)      ? 1 : 2;
 
 	/* start replenish only when buffers go lower than the threshold */
 	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
@@ -3202,30 +3276,26 @@
 					rx_pkt);
 				goto fail_kmem_cache_alloc;
 			}
-			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
-			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
-				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
-			if (dma_mapping_error(ipa3_ctx->pdev,
-				rx_pkt->data.dma_addr)) {
-				IPAERR("dma_map_single failure %pK for %pK\n",
-					(void *)rx_pkt->data.dma_addr, ptr);
-				goto fail_dma_mapping;
-			}
+			ipa3_ctx->stats.cache_recycle_stats[stats_i].pkt_allocd++;
 		} else {
 			spin_lock_bh(&sys->spinlock);
-			rx_pkt = list_first_entry(&sys->rcycl_list,
+			rx_pkt = list_first_entry(
+				&sys->rcycl_list,
 				struct ipa3_rx_pkt_wrapper, link);
 			list_del_init(&rx_pkt->link);
 			spin_unlock_bh(&sys->spinlock);
-			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
-			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
-				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
-			if (dma_mapping_error(ipa3_ctx->pdev,
-				rx_pkt->data.dma_addr)) {
-				IPAERR("dma_map_single failure %pK for %pK\n",
-					(void *)rx_pkt->data.dma_addr, ptr);
-				goto fail_dma_mapping;
-			}
+			ipa3_ctx->stats.cache_recycle_stats[stats_i].pkt_found++;
+		}
+
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+
+		rx_pkt->data.dma_addr = dma_map_single(
+			ipa3_ctx->pdev, ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+
+		if (dma_mapping_error( ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+			IPAERR("dma_map_single failure %pK for %pK\n",
+				   (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
 		}
 
 		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
@@ -3237,6 +3307,7 @@
 		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
 		idx++;
 		rx_len_cached++;
+		ipa3_ctx->stats.cache_recycle_stats[stats_i].tot_pkt_replenished++;
 		/*
 		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
 		 * If this size is reached we need to queue the xfers.
@@ -3345,10 +3416,9 @@
 	__trigger_repl_work(sys);
 
 	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
-		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
-			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
-		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+		else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.low_lat_rx_empty);
@@ -3409,11 +3479,17 @@
 	if (!rx_pkt->page_data.is_tmp_alloc) {
 		list_del_init(&rx_pkt->link);
 		page_ref_dec(rx_pkt->page_data.page);
+		spin_lock_bh(&rx_pkt->sys->common_sys->spinlock);
+		/* Add the element to head. */
+		list_add(&rx_pkt->link,
+			&rx_pkt->sys->page_recycle_repl->page_repl_head);
+		spin_unlock_bh(&rx_pkt->sys->common_sys->spinlock);
+	} else {
+		dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
+			rx_pkt->len, DMA_FROM_DEVICE);
+		__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
-	dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
-		rx_pkt->len, DMA_FROM_DEVICE);
-	__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
-	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 }
 
 /**
@@ -3473,23 +3549,6 @@
 		kfree(sys->repl);
 		sys->repl = NULL;
 	}
-	if (sys->page_recycle_repl) {
-		list_for_each_entry_safe(rx_pkt, r,
-		&sys->page_recycle_repl->page_repl_head, link) {
-			list_del(&rx_pkt->link);
-			dma_unmap_page(ipa3_ctx->pdev,
-				rx_pkt->page_data.dma_addr,
-				rx_pkt->len,
-				DMA_FROM_DEVICE);
-			__free_pages(rx_pkt->page_data.page,
-				rx_pkt->page_data.page_order);
-			kmem_cache_free(
-				ipa3_ctx->rx_pkt_wrapper_cache,
-				rx_pkt);
-		}
-		kfree(sys->page_recycle_repl);
-		sys->page_recycle_repl = NULL;
-	}
 }
 
 static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
@@ -3520,7 +3579,7 @@
 	struct ipahal_pkt_status status;
 	u32 pkt_status_sz;
 	struct sk_buff *skb2;
-	int pad_len_byte;
+	int pad_len_byte = 0;
 	int len;
 	unsigned char *buf;
 	int src_pipe;
@@ -3727,7 +3786,12 @@
 				goto out;
 			}
 
-			pad_len_byte = ((status.pkt_len + 3) & ~3) -
+			/*
+			 * Padding not needed for LAN coalescing pipe, hence we
+			 * only pad when not LAN coalescing pipe.
+			 */
+			if (sys->ep->client != IPA_CLIENT_APPS_LAN_COAL_CONS)
+				pad_len_byte = ((status.pkt_len + 3) & ~3) -
 					status.pkt_len;
 			len = status.pkt_len + pad_len_byte;
 			IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
@@ -4091,9 +4155,10 @@
 		dev_kfree_skb_any(rx_skb);
 		return;
 	}
-	if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
-		skb_pull(rx_skb, ipahal_pkt_status_get_size() +
-				IPA_LAN_RX_HEADER_LENGTH);
+	if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
+		u32 extra = ( lan_coal_enabled() ) ? 0 : IPA_LAN_RX_HEADER_LENGTH;
+		skb_pull(rx_skb, ipahal_pkt_status_get_size() + extra);
+	}
 	else
 		skb_pull(rx_skb, ipahal_pkt_status_get_size());
 
@@ -4124,6 +4189,783 @@
 
 }
 
+/*
+ * The following will help us deduce the real size of an ipv6 header
+ * that may or may not have extensions...
+ */
+static int _skip_ipv6_exthdr(
+	u8     *hdr_ptr,
+	int     start,
+	u8     *nexthdrp,
+	__be16 *fragp )
+{
+	u8 nexthdr = *nexthdrp;
+
+	*fragp = 0;
+
+	while ( ipv6_ext_hdr(nexthdr) ) {
+
+		struct ipv6_opt_hdr *hp;
+
+		int hdrlen;
+
+		if (nexthdr == NEXTHDR_NONE)
+			return -EINVAL;
+
+		hp = (struct ipv6_opt_hdr*) (hdr_ptr + (u32) start);
+
+		if (nexthdr == NEXTHDR_FRAGMENT) {
+
+			u32 off = offsetof(struct frag_hdr, frag_off);
+
+			__be16 *fp = (__be16*) (hdr_ptr + (u32)start + off);
+
+			*fragp = *fp;
+
+			if (ntohs(*fragp) & ~0x7)
+				break;
+
+			hdrlen = 8;
+
+		} else if (nexthdr == NEXTHDR_AUTH) {
+
+			hdrlen = ipv6_authlen(hp);
+
+		} else {
+
+			hdrlen = ipv6_optlen(hp);
+		}
+
+		nexthdr = hp->nexthdr;
+
+		start += hdrlen;
+	}
+
+	*nexthdrp = nexthdr;
+
+	return start;
+}
+
+/*
+ * The following defines and structure used for calculating Ethernet
+ * frame type and size...
+ */
+#define IPA_ETH_VLAN_2TAG 0x88A8
+#define IPA_ETH_VLAN_TAG  0x8100
+#define IPA_ETH_TAG_SZ    sizeof(u32)
+
+/*
+ * The following structure used for containing packet payload
+ * information.
+ */
+typedef struct ipa_pkt_data_s {
+	void* pkt;
+	u32   pkt_len;
+} ipa_pkt_data_t;
+
+/*
+ * The following structure used for consolidating all header
+ * information.
+ */
+typedef struct ipa_header_data_s {
+	struct ethhdr* eth_hdr;
+	u32            eth_hdr_size;
+	u8             ip_vers;
+	void*          ip_hdr;
+	u32            ip_hdr_size;
+	u8             ip_proto;
+	void*          proto_hdr;
+	u32            proto_hdr_size;
+	u32            aggr_hdr_len;
+	u32            curr_seq;
+} ipa_header_data_t;
+
+static int
+_calc_partial_csum(
+	struct sk_buff*    skb,
+	ipa_header_data_t* hdr_data,
+	u32                aggr_payload_size )
+{
+	u32 ip_hdr_size;
+	u32 proto_hdr_size;
+	u8  ip_vers;
+	u8  ip_proto;
+	u8* new_ip_hdr;
+	u8* new_proto_hdr;
+	u32 len_for_calc;
+	__sum16 pseudo;
+
+	if ( !skb || !hdr_data ) {
+
+		IPAERR(
+			"NULL args: skb(%p) and/or hdr_data(%p)\n",
+			skb, hdr_data);
+
+		return -1;
+
+	} else {
+
+		ip_hdr_size    = hdr_data->ip_hdr_size;
+		proto_hdr_size = hdr_data->proto_hdr_size;
+		ip_vers        = hdr_data->ip_vers;
+		ip_proto       = hdr_data->ip_proto;
+
+		new_ip_hdr    = (u8*) skb->data + hdr_data->eth_hdr_size;
+
+		new_proto_hdr = new_ip_hdr + ip_hdr_size;
+
+		len_for_calc  = proto_hdr_size + aggr_payload_size;
+
+		skb->ip_summed = CHECKSUM_PARTIAL;
+
+		if ( ip_vers == 4 ) {
+
+			struct iphdr* iph = (struct iphdr*) new_ip_hdr;
+
+			iph->check = 0;
+			iph->check = ip_fast_csum(iph, iph->ihl);
+
+			pseudo = ~csum_tcpudp_magic(
+				iph->saddr,
+				iph->daddr,
+				len_for_calc,
+				ip_proto,
+				0);
+
+		} else { /* ( ip_vers == 6 ) */
+
+			struct ipv6hdr* iph = (struct ipv6hdr*) new_ip_hdr;
+
+			pseudo = ~csum_ipv6_magic(
+				&iph->saddr,
+				&iph->daddr,
+				len_for_calc,
+				ip_proto,
+				0);
+		}
+
+		if ( ip_proto == IPPROTO_TCP ) {
+
+			struct tcphdr* hdr = (struct tcphdr*) new_proto_hdr;
+
+			hdr->check = pseudo;
+
+			skb->csum_offset = offsetof(struct tcphdr, check);
+
+		} else {
+
+			struct udphdr* hdr = (struct udphdr*) new_proto_hdr;
+
+			hdr->check = pseudo;
+
+			skb->csum_offset = offsetof(struct udphdr, check);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * The following function takes the constituent parts of an Ethernet
+ * and IP packet and creates an skb from them...
+ */
+static int
+_prep_and_send_skb(
+	struct sk_buff*         rx_skb,
+	struct ipa3_ep_context* ep,
+	u32                     metadata,
+	u8                      ucp,
+	ipa_header_data_t*      hdr_data,
+	ipa_pkt_data_t*         pkts,
+	u32                     num_pkts,
+	u32                     aggr_payload_size,
+	u8                      pkt_id,
+	bool                    recalc_cksum )
+{
+	struct ethhdr* eth_hdr;
+	u32            eth_hdr_size;
+	u8             ip_vers;
+	void*          ip_hdr;
+	u32            ip_hdr_size;
+	u8             ip_proto;
+	void*          proto_hdr;
+	u32            proto_hdr_size;
+	u32            aggr_hdr_len;
+	u32            i;
+
+	void          *new_proto_hdr, *new_ip_hdr, *new_eth_hdr;
+
+	struct skb_shared_info *shinfo;
+
+	struct sk_buff *head_skb;
+
+	void *client_priv;
+	void (*client_notify)(
+		void *client_priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data);
+
+	client_notify = 0;
+
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	if (ep->valid && ep->client_notify &&
+		likely((!atomic_read(&ep->disconnect_in_progress)))) {
+
+		client_notify = ep->client_notify;
+		client_priv   = ep->priv;
+	}
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+
+	if ( client_notify ) {
+
+		eth_hdr        = hdr_data->eth_hdr;
+		eth_hdr_size   = hdr_data->eth_hdr_size;
+		ip_vers        = hdr_data->ip_vers;
+		ip_hdr         = hdr_data->ip_hdr;
+		ip_hdr_size    = hdr_data->ip_hdr_size;
+		ip_proto       = hdr_data->ip_proto;
+		proto_hdr      = hdr_data->proto_hdr;
+		proto_hdr_size = hdr_data->proto_hdr_size;
+		aggr_hdr_len   = hdr_data->aggr_hdr_len;
+
+		if ( rx_skb ) {
+
+			head_skb = rx_skb;
+
+			ipa3_ctx->stats.coal.coal_left_as_is++;
+
+		} else {
+
+			head_skb = alloc_skb(aggr_hdr_len + aggr_payload_size, GFP_ATOMIC);
+
+			if ( unlikely(!head_skb) ) {
+				IPAERR("skb alloc failure\n");
+				return -1;
+			}
+
+			ipa3_ctx->stats.coal.coal_reconstructed++;
+
+			head_skb->protocol = ip_proto;
+
+			/*
+			 * Copy MAC header into the skb...
+			 */
+			new_eth_hdr = skb_put_data(head_skb, eth_hdr, eth_hdr_size);
+
+			skb_reset_mac_header(head_skb);
+
+			/*
+			 * Copy, and update, IP[4|6] header into the skb...
+			 */
+			new_ip_hdr = skb_put_data(head_skb, ip_hdr, ip_hdr_size);
+
+			if ( ip_vers == 4 ) {
+
+				struct iphdr* ip4h = new_ip_hdr;
+
+				ip4h->id = htons(ntohs(ip4h->id) + pkt_id);
+
+				ip4h->tot_len =
+					htons(ip_hdr_size + proto_hdr_size + aggr_payload_size);
+
+			} else {
+
+				struct ipv6hdr* ip6h = new_ip_hdr;
+
+				ip6h->payload_len =
+					htons(proto_hdr_size + aggr_payload_size);
+			}
+
+			skb_reset_network_header(head_skb);
+
+			/*
+			 * Copy, and update, [TCP|UDP] header into the skb...
+			 */
+			new_proto_hdr = skb_put_data(head_skb, proto_hdr, proto_hdr_size);
+
+			if ( ip_proto == IPPROTO_TCP ) {
+
+				struct tcphdr* hdr = new_proto_hdr;
+
+				hdr_data->curr_seq += (aggr_payload_size) ? aggr_payload_size : 1;
+
+				hdr->seq = htonl(hdr_data->curr_seq);
+
+			} else {
+
+				struct udphdr* hdr = new_proto_hdr;
+
+				u16 len = sizeof(struct udphdr) + aggr_payload_size;
+
+				hdr->len = htons(len);
+			}
+
+			skb_reset_transport_header(head_skb);
+
+			/*
+			 * Now aggregate all the individual physical payloads into
+			 * th eskb.
+			 */
+			for ( i = 0; i < num_pkts; i++ ) {
+				skb_put_data(head_skb, pkts[i].pkt, pkts[i].pkt_len);
+			}
+		}
+
+		/*
+		 * Is a recalc of the various checksums in order?
+		 */
+		if ( recalc_cksum ) {
+			_calc_partial_csum(head_skb, hdr_data, aggr_payload_size);
+		}
+
+		/*
+		 * Let's add some resegmentation info into the head skb. The
+		 * data will allow the stack to resegment the data...should it
+		 * need to relative to MTU...
+		 */
+		shinfo = skb_shinfo(head_skb);
+
+		shinfo->gso_segs = num_pkts;
+		shinfo->gso_size = pkts[0].pkt_len;
+
+		if (ip_proto == IPPROTO_TCP) {
+			shinfo->gso_type = (ip_vers == 4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+			ipa3_ctx->stats.coal.coal_tcp++;
+			ipa3_ctx->stats.coal.coal_tcp_bytes += aggr_payload_size;
+		} else {
+			shinfo->gso_type = SKB_GSO_UDP_L4;
+			ipa3_ctx->stats.coal.coal_udp++;
+			ipa3_ctx->stats.coal.coal_udp_bytes += aggr_payload_size;
+		}
+
+		/*
+		 * Send this new skb to the client...
+		 */
+		*(u16 *)head_skb->cb = ((metadata >> 16) & 0xFFFF);
+		*(u8 *)(head_skb->cb + 4) = ucp;
+
+		IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
+				   metadata, *(u32 *)head_skb->cb);
+		IPADBG_LOW("ucp: %d\n", *(u8 *)(head_skb->cb + 4));
+
+		client_notify(client_priv, IPA_RECEIVE, (unsigned long)(head_skb));
+	}
+
+	return 0;
+}
+
+/*
+ * The following will process a coalesced LAN packet from the IPA...
+ */
+void ipa3_lan_coal_rx_cb(
+	void                *priv,
+	enum ipa_dp_evt_type evt,
+	unsigned long        data)
+{
+	struct sk_buff *rx_skb = (struct sk_buff *) data;
+
+	unsigned int                    src_pipe;
+	u8                              ucp;
+	u32                             metadata;
+
+	struct ipahal_pkt_status_thin   status;
+	struct ipa3_ep_context         *ep;
+
+	u8*                             qmap_hdr_data_ptr;
+	struct qmap_hdr_data            qmap_hdr;
+
+	struct coal_packet_status_info *cpsi, *cpsi_orig;
+	u8*                             stat_info_ptr;
+
+	u32               pkt_status_sz = ipahal_pkt_status_get_size();
+
+	u32               eth_hdr_size;
+	u32               ip_hdr_size;
+	u8                ip_vers, ip_proto;
+	u32               proto_hdr_size;
+	u32               cpsi_hdrs_size;
+	u32               aggr_payload_size;
+
+	u32               pkt_len;
+
+	struct ethhdr*    eth_hdr;
+	void*             ip_hdr;
+	struct iphdr*     ip4h;
+	struct ipv6hdr*   ip6h;
+	void*             proto_hdr;
+	u8*               pkt_data;
+	bool              gro = true;
+	bool              cksum_is_zero;
+	ipa_header_data_t hdr_data;
+
+	ipa_pkt_data_t    in_pkts[MAX_COAL_PACKETS];
+	u32               in_pkts_sub;
+
+	u8                tot_pkts;
+
+	u32               i, j;
+
+	u64               cksum_mask = 0;
+
+	int               ret;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+	ipa3_ctx->stats.coal.coal_rx++;
+
+	ipahal_pkt_status_parse_thin(rx_skb->data, &status);
+	src_pipe = status.endp_src_idx;
+	metadata = status.metadata;
+	ucp = status.ucp;
+	ep = &ipa3_ctx->ep[src_pipe];
+	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes) ||
+		unlikely(atomic_read(&ep->disconnect_in_progress))) {
+		IPAERR("drop pipe=%d\n", src_pipe);
+		goto process_done;
+	}
+
+	memset(&hdr_data, 0, sizeof(hdr_data));
+	memset(&qmap_hdr, 0, sizeof(qmap_hdr));
+
+	/*
+	 * Let's get to, then parse, the qmap header...
+	 */
+	qmap_hdr_data_ptr = rx_skb->data + pkt_status_sz;
+
+	ret = ipahal_qmap_parse(qmap_hdr_data_ptr, &qmap_hdr);
+
+	if ( unlikely(ret) ) {
+		IPAERR("ipahal_qmap_parse fail\n");
+		ipa3_ctx->stats.coal.coal_hdr_qmap_err++;
+		goto process_done;
+	}
+
+	if ( ! VALID_NLS(qmap_hdr.num_nlos) ) {
+		IPAERR("Bad num_nlos(%u) value\n", qmap_hdr.num_nlos);
+		ipa3_ctx->stats.coal.coal_hdr_nlo_err++;
+		goto process_done;
+	}
+
+	stat_info_ptr = qmap_hdr_data_ptr + sizeof(union qmap_hdr_u);
+
+	cpsi = cpsi_orig = (struct coal_packet_status_info*) stat_info_ptr;
+
+	/*
+	 * Reconstruct the 48 bits of checksum info. And count total
+	 * packets as well...
+	 */
+	for (i = tot_pkts = 0;
+		 i < MAX_COAL_PACKET_STATUS_INFO;
+		 ++i, ++cpsi) {
+
+		cpsi->pkt_len = ntohs(cpsi->pkt_len);
+
+		cksum_mask |= ((u64) cpsi->pkt_cksum_errs) << (8 * i);
+
+		if ( i < qmap_hdr.num_nlos ) {
+			tot_pkts += cpsi->num_pkts;
+		}
+	}
+
+	/*
+	 * A bounds check.
+	 *
+	 * Technically, the hardware shouldn't give us a bad count, but
+	 * just to be safe...
+	 */
+	if ( tot_pkts > MAX_COAL_PACKETS ) {
+		IPAERR("tot_pkts(%u) > MAX_COAL_PACKETS(%u)\n",
+			   tot_pkts, MAX_COAL_PACKETS);
+		ipa3_ctx->stats.coal.coal_hdr_pkt_err++;
+		goto process_done;
+	}
+
+	ipa3_ctx->stats.coal.coal_pkts += tot_pkts;
+
+	/*
+	 * Move along past the coal headers...
+	 */
+	cpsi_hdrs_size = MAX_COAL_PACKET_STATUS_INFO * sizeof(u32);
+
+	pkt_data = stat_info_ptr + cpsi_hdrs_size;
+
+	/*
+	 * Let's processes the Ethernet header...
+	 */
+	eth_hdr = (struct ethhdr*) pkt_data;
+
+	switch ( ntohs(eth_hdr->h_proto) )
+	{
+	case IPA_ETH_VLAN_2TAG:
+		eth_hdr_size = sizeof(struct ethhdr) + (IPA_ETH_TAG_SZ * 2);
+		break;
+	case IPA_ETH_VLAN_TAG:
+		eth_hdr_size = sizeof(struct ethhdr) + IPA_ETH_TAG_SZ;
+		break;
+	default:
+		eth_hdr_size = sizeof(struct ethhdr);
+		break;
+	}
+
+	/*
+	 * Get to and process the ip header...
+	 */
+	ip_hdr = (u8*) eth_hdr + eth_hdr_size;
+
+	/*
+	 * Is it a IPv[4|6] header?
+	 */
+	if (((struct iphdr*) ip_hdr)->version == 4) {
+		/*
+		 * Eth frame is carrying ip v4 payload.
+		 */
+		ip_vers     = 4;
+		ip4h        = (struct iphdr*) ip_hdr;
+		ip_hdr_size = ip4h->ihl * sizeof(u32);
+		ip_proto    = ip4h->protocol;
+
+		/*
+		 * Don't allow grouping of any packets with IP options
+		 * (i.e. don't allow when ihl != 5)...
+		 */
+		gro = (ip4h->ihl == 5);
+
+	} else if (((struct ipv6hdr*) ip_hdr)->version == 6) {
+		/*
+		 * Eth frame is carrying ip v6 payload.
+		 */
+		int hdr_size;
+		__be16 frag_off;
+
+		ip_vers     = 6;
+		ip6h        = (struct ipv6hdr*) ip_hdr;
+		ip_proto    = ip6h->nexthdr;
+
+		/*
+		 * If extension headers exist, we need to analyze/skip them,
+		 * hence...
+		 */
+		hdr_size = _skip_ipv6_exthdr(
+			(u8*) ip_hdr,
+			sizeof(*ip6h),
+			&ip_proto,
+			&frag_off);
+
+		/*
+		 * If we run into a problem, or this has a fragmented header
+		 * (which technically should not be possible if the HW works
+		 * as intended), bail.
+		 */
+		if (hdr_size < 0 || frag_off) {
+			IPAERR(
+				"_skip_ipv6_exthdr() failed. Errored with hdr_size(%d) "
+				"and/or frag_off(%d)\n",
+				hdr_size,
+				ntohs(frag_off));
+			ipa3_ctx->stats.coal.coal_ip_invalid++;
+			goto process_done;
+		}
+
+		ip_hdr_size = hdr_size;
+
+		/*
+		 * Don't allow grouping of any packets with IPv6 extension
+		 * headers (i.e. don't allow when ip_hdr_size != basic v6
+		 * header size).
+		 */
+		gro = (ip_hdr_size == sizeof(*ip6h));
+
+	} else {
+
+		IPAERR("Not a v4 or v6 header...can't process\n");
+		ipa3_ctx->stats.coal.coal_ip_invalid++;
+		goto process_done;
+	}
+
+	/*
+	 * Get to and process the protocol header...
+	 */
+	proto_hdr = (u8*) ip_hdr + ip_hdr_size;
+
+	if (ip_proto == IPPROTO_TCP) {
+
+		struct tcphdr* hdr = (struct tcphdr*) proto_hdr;
+
+		hdr_data.curr_seq = ntohl(hdr->seq);
+
+		proto_hdr_size = hdr->doff * sizeof(u32);
+
+		cksum_is_zero = false;
+
+	} else if (ip_proto == IPPROTO_UDP) {
+
+		proto_hdr_size = sizeof(struct udphdr);
+
+		cksum_is_zero = (ip_vers == 4 && ((struct udphdr*) proto_hdr)->check == 0);
+
+	} else {
+
+		IPAERR("Not a TCP or UDP heqder...can't process\n");
+		ipa3_ctx->stats.coal.coal_trans_invalid++;
+		goto process_done;
+
+	}
+
+	/*
+	 * The following will adjust the skb internals (ie. skb->data and
+	 * skb->len), such that they're positioned, and reflect, the data
+	 * starting at the ETH header...
+	 */
+	skb_pull(
+		rx_skb,
+		pkt_status_sz +
+		sizeof(union qmap_hdr_u) +
+		cpsi_hdrs_size);
+
+	/*
+	 * Consolidate all header, header type, and header size info...
+	 */
+	hdr_data.eth_hdr        = eth_hdr;
+	hdr_data.eth_hdr_size   = eth_hdr_size;
+	hdr_data.ip_vers        = ip_vers;
+	hdr_data.ip_hdr         = ip_hdr;
+	hdr_data.ip_hdr_size    = ip_hdr_size;
+	hdr_data.ip_proto       = ip_proto;
+	hdr_data.proto_hdr      = proto_hdr;
+	hdr_data.proto_hdr_size = proto_hdr_size;
+	hdr_data.aggr_hdr_len   = eth_hdr_size + ip_hdr_size + proto_hdr_size;
+
+	if ( qmap_hdr.vcid < GSI_VEID_MAX ) {
+		ipa3_ctx->stats.coal.coal_veid[qmap_hdr.vcid] += 1;
+	}
+
+	/*
+	 * Quick check to see if we really need to go any further...
+	 */
+	if ( gro && qmap_hdr.num_nlos == 1 && qmap_hdr.chksum_valid ) {
+
+		cpsi = cpsi_orig;
+
+		in_pkts[0].pkt     = rx_skb->data  + hdr_data.aggr_hdr_len;
+		in_pkts[0].pkt_len = cpsi->pkt_len - (ip_hdr_size + proto_hdr_size);
+
+		in_pkts_sub = 1;
+
+		aggr_payload_size = rx_skb->len - hdr_data.aggr_hdr_len;
+
+		_prep_and_send_skb(
+			rx_skb,
+			ep, metadata, ucp,
+			&hdr_data,
+			in_pkts,
+			in_pkts_sub,
+			aggr_payload_size,
+			tot_pkts,
+			false);
+
+		return;
+	}
+
+	/*
+	 * Time to process packet payloads...
+	 */
+	pkt_data = (u8*) proto_hdr + proto_hdr_size;
+
+	for ( i = tot_pkts = 0, cpsi = cpsi_orig;
+		  i < qmap_hdr.num_nlos;
+		  ++i, ++cpsi ) {
+
+		aggr_payload_size = in_pkts_sub = 0;
+
+		for ( j = 0;
+			  j < cpsi->num_pkts;
+			  j++, tot_pkts++, cksum_mask >>= 1 ) {
+
+			bool csum_err = cksum_mask & 1;
+
+			pkt_len = cpsi->pkt_len - (ip_hdr_size + proto_hdr_size);
+
+			if ( csum_err || ! gro ) {
+
+				if ( csum_err ) {
+					ipa3_ctx->stats.coal.coal_csum_err++;
+				}
+
+				/*
+				 * If there are previously queued packets, send them
+				 * now...
+				 */
+				if ( in_pkts_sub ) {
+
+					_prep_and_send_skb(
+						NULL,
+						ep, metadata, ucp,
+						&hdr_data,
+						in_pkts,
+						in_pkts_sub,
+						aggr_payload_size,
+						tot_pkts,
+						!cksum_is_zero);
+
+					in_pkts_sub = aggr_payload_size = 0;
+				}
+
+				/*
+				 * Now send the singleton...
+				 */
+				in_pkts[in_pkts_sub].pkt     = pkt_data;
+				in_pkts[in_pkts_sub].pkt_len = pkt_len;
+
+				aggr_payload_size += in_pkts[in_pkts_sub].pkt_len;
+				pkt_data          += in_pkts[in_pkts_sub].pkt_len;
+
+				in_pkts_sub++;
+
+				_prep_and_send_skb(
+					NULL,
+					ep, metadata, ucp,
+					&hdr_data,
+					in_pkts,
+					in_pkts_sub,
+					aggr_payload_size,
+					tot_pkts,
+					(csum_err) ? false : !cksum_is_zero);
+
+				in_pkts_sub = aggr_payload_size = 0;
+
+				continue;
+			}
+
+			in_pkts[in_pkts_sub].pkt     = pkt_data;
+			in_pkts[in_pkts_sub].pkt_len = pkt_len;
+
+			aggr_payload_size += in_pkts[in_pkts_sub].pkt_len;
+			pkt_data          += in_pkts[in_pkts_sub].pkt_len;
+
+			in_pkts_sub++;
+		}
+
+		if ( in_pkts_sub ) {
+
+			_prep_and_send_skb(
+				NULL,
+				ep, metadata, ucp,
+				&hdr_data,
+				in_pkts,
+				in_pkts_sub,
+				aggr_payload_size,
+				tot_pkts,
+				!cksum_is_zero);
+		}
+	}
+
+process_done:
+	/*
+	 * One way or the other, we no longer need the skb, hence...
+	 */
+	dev_kfree_skb_any(rx_skb);
+}
+
 static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
 {
 	rx_pkt->data.dma_addr = 0;
@@ -4155,8 +4997,10 @@
  * corresponding rx pkt. Once finished return the head_skb to be sent up the
  * network stack.
  */
-static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
-		*notify, bool update_truesize)
+static struct sk_buff *handle_skb_completion(
+	struct gsi_chan_xfer_notify *notify,
+	bool                         update_truesize,
+	struct ipa3_rx_pkt_wrapper **rx_pkt_ptr )
 {
 	struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
 	struct sk_buff *rx_skb, *next_skb = NULL;
@@ -4166,6 +5010,10 @@
 	sys = (struct ipa3_sys_context *) notify->chan_user_data;
 	rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
 
+	if ( rx_pkt_ptr ) {
+		*rx_pkt_ptr = rx_pkt;
+	}
+
 	spin_lock_bh(&rx_pkt->sys->spinlock);
 	rx_pkt->sys->len--;
 	spin_unlock_bh(&rx_pkt->sys->spinlock);
@@ -4214,7 +5062,7 @@
 	/* Check added for handling LAN consumer packet without EOT flag */
 	if (notify->evt_id == GSI_CHAN_EVT_EOT ||
 		sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
-	/* go over the list backward to save computations on updating length */
+		/* go over the list backward to save computations on updating length */
 		list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
 			rx_skb = rx_pkt->data.skb;
 
@@ -4348,6 +5196,10 @@
 				rx_page.page, 0,
 				size,
 				PAGE_SIZE << rx_page.page_order);
+
+			trace_handle_page_completion(rx_page.page,
+				rx_skb, notify->bytes_xfered,
+				rx_page.is_tmp_alloc, sys->ep->client);
 		}
 	} else {
 		return NULL;
@@ -4355,37 +5207,23 @@
 	return rx_skb;
 }
 
-static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
+static void ipa3_wq_rx_common(
+	struct ipa3_sys_context     *sys,
 	struct gsi_chan_xfer_notify *notify)
 {
-	struct sk_buff *rx_skb;
-	struct ipa3_sys_context *coal_sys;
-	int ipa_ep_idx;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct sk_buff             *rx_skb;
 
 	if (!notify) {
 		IPAERR_RL("gsi_chan_xfer_notify is null\n");
 		return;
 	}
-	rx_skb = handle_skb_completion(notify, true);
+
+	rx_skb = handle_skb_completion(notify, true, &rx_pkt);
 
 	if (rx_skb) {
-		sys->pyld_hdlr(rx_skb, sys);
-
-		/* For coalescing, we have 2 transfer rings to replenish */
-		if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
-			ipa_ep_idx = ipa3_get_ep_mapping(
-					IPA_CLIENT_APPS_WAN_CONS);
-
-			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
-				IPAERR("Invalid client.\n");
-				return;
-			}
-
-			coal_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
-			coal_sys->repl_hdlr(coal_sys);
-		}
-
-		sys->repl_hdlr(sys);
+		rx_pkt->sys->pyld_hdlr(rx_skb, rx_pkt->sys);
+		rx_pkt->sys->repl_hdlr(rx_pkt->sys);
 	}
 }
 
@@ -4401,7 +5239,7 @@
 		for (i = 0; i < num; i++) {
 			if (!ipa3_ctx->ipa_wan_skb_page)
 				rx_skb = handle_skb_completion(
-					&notify[i], false);
+					&notify[i], false, NULL);
 			else
 				rx_skb = handle_page_completion(
 					&notify[i], false);
@@ -4415,6 +5253,10 @@
 					skb_shinfo(prev_skb)->frag_list =
 						rx_skb;
 
+				trace_ipa3_rx_napi_chain(first_skb,
+							 prev_skb,
+							 rx_skb);
+
 				prev_skb = rx_skb;
 			}
 		}
@@ -4427,7 +5269,7 @@
 			/* TODO: add chaining for coal case */
 			for (i = 0; i < num; i++) {
 				rx_skb = handle_skb_completion(
-					&notify[i], false);
+					&notify[i], false, NULL);
 				if (rx_skb) {
 					sys->pyld_hdlr(rx_skb, sys);
 					/*
@@ -4462,6 +5304,11 @@
 							= rx_skb;
 
 					prev_skb = rx_skb;
+
+					trace_ipa3_rx_napi_chain(first_skb,
+								 prev_skb,
+								 rx_skb);
+
 				}
 			}
 			if (prev_skb) {
@@ -4646,9 +5493,8 @@
 			atomic_set(&sys->workqueue_flushed, 0);
 		}
 	} else {
-		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
-		    in->client == IPA_CLIENT_APPS_WAN_CONS ||
-		    in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+		if (IPA_CLIENT_IS_LAN_CONS(in->client) ||
+		    IPA_CLIENT_IS_WAN_CONS(in->client) ||
 		    in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS ||
 		    in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
 			sys->ep->status.status_en = true;
@@ -4663,11 +5509,11 @@
 				IPA_GENERIC_RX_BUFF_BASE_SZ);
 			sys->get_skb = ipa3_get_skb_ipa_rx;
 			sys->free_skb = ipa3_free_skb_rx;
-			if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			if (IPA_CLIENT_IS_APPS_COAL_CONS(in->client))
 				in->ipa_ep_cfg.aggr.aggr = IPA_COALESCE;
 			else
 				in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
-			if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+			if (IPA_CLIENT_IS_LAN_CONS(in->client)) {
 				INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
 				sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
 				sys->repl_hdlr =
@@ -4683,8 +5529,11 @@
 				IPA_GENERIC_AGGR_PKT_LIMIT;
 				in->ipa_ep_cfg.aggr.aggr_time_limit =
 					IPA_GENERIC_AGGR_TIME_LIMIT;
-			} else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
-				in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+				if (in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+					in->ipa_ep_cfg.aggr.aggr_coal_l2 = true;
+					in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
+				}
+			} else if (IPA_CLIENT_IS_WAN_CONS(in->client) ||
 				in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
 				in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
 				if (!in->ext_ioctl_v2)
@@ -5315,6 +6164,8 @@
 	 */
 	if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
 		client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
+	else if (IPA_CLIENT_IS_LAN_CONS(sys->ep->client))
+		client_type = IPA_CLIENT_APPS_LAN_COAL_CONS;
 	else
 		client_type = sys->ep->client;
 	/*
@@ -5412,6 +6263,13 @@
 	}
 }
 
+void ipa3_dealloc_common_event_ring(void)
+{
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	gsi_dealloc_evt_ring(ipa3_ctx->gsi_evt_comm_hdl);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
 int ipa3_alloc_common_event_ring(void)
 {
 	struct gsi_evt_ring_props gsi_evt_ring_props;
@@ -5486,10 +6344,10 @@
 	u32 ring_size;
 	int result;
 	gfp_t mem_flag = GFP_KERNEL;
-	u32 coale_ep_idx;
+	u32 wan_coal_ep_id, lan_coal_ep_id;
 
-	if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
-		in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+	if (IPA_CLIENT_IS_WAN_CONS(in->client) ||
+		IPA_CLIENT_IS_LAN_CONS(in->client) ||
 		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS ||
 		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD ||
 		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS ||
@@ -5501,7 +6359,7 @@
 		IPAERR("EP context is empty\n");
 		return -EINVAL;
 	}
-	coale_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
 	/*
 	 * GSI ring length is calculated based on the desc_fifo_sz
 	 * which was meant to define the BAM desc fifo. GSI descriptors
@@ -5527,11 +6385,25 @@
 			goto fail_setup_event_ring;
 
 	} else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
-			coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
-			ipa3_ctx->ep[coale_ep_idx].valid == 1) {
+		IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_WAN_COAL_CONS, wan_coal_ep_id)) {
 		IPADBG("Wan consumer pipe configured\n");
 		result = ipa_gsi_setup_coal_def_channel(in, ep,
-					&ipa3_ctx->ep[coale_ep_idx]);
+					&ipa3_ctx->ep[wan_coal_ep_id]);
+		if (result) {
+			IPAERR("Failed to setup default coal GSI channel\n");
+			goto fail_setup_event_ring;
+		}
+		return result;
+	} else if (in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) {
+		result = ipa_gsi_setup_event_ring(ep,
+				IPA_COMMON_EVENT_RING_SIZE, mem_flag);
+		if (result)
+			goto fail_setup_event_ring;
+	} else if (in->client == IPA_CLIENT_APPS_LAN_CONS &&
+		IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, lan_coal_ep_id)) {
+		IPADBG("Lan consumer pipe configured\n");
+		result = ipa_gsi_setup_coal_def_channel(in, ep,
+					&ipa3_ctx->ep[lan_coal_ep_id]);
 		if (result) {
 			IPAERR("Failed to setup default coal GSI channel\n");
 			goto fail_setup_event_ring;
@@ -5683,7 +6555,7 @@
 	int result;
 
 	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
-	if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+	if (IPA_CLIENT_IS_APPS_COAL_CONS(ep->client))
 		gsi_channel_props.prot = GSI_CHAN_PROT_GCI;
 	else
 		gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
@@ -6252,7 +7124,7 @@
 		ret = ipa_poll_gsi_pkt(sys, &notify);
 		if (ret)
 			break;
-		rx_skb = handle_skb_completion(&notify, true);
+		rx_skb = handle_skb_completion(&notify, true, NULL);
 		if (rx_skb) {
 			sys->pyld_hdlr(rx_skb, sys);
 			sys->repl_hdlr(sys);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3c0cf8c..2ca24be 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2,39 +2,7 @@
 /*
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  *
- * Changes from Qualcomm Innovation Center are provided under the following license:
- *
  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted (subject to the limitations in the
- * disclaimer below) provided that the following conditions are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *     * Redistributions in binary form must reproduce the above
- *       copyright notice, this list of conditions and the following
- *       disclaimer in the documentation and/or other materials provided
- *       with the distribution.
- *
- *     * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
- * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
- * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
  */
 
 #ifndef _IPA3_I_H_
@@ -434,6 +402,9 @@
 #define IPA_MEM_INIT_VAL 0xFFFFFFFF
 
 #ifdef CONFIG_COMPAT
+#define IPA_IOC_COAL_EVICT_POLICY32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COAL_EVICT_POLICY, \
+					compat_uptr_t)
 #define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_HDR, \
 					compat_uptr_t)
@@ -1221,7 +1192,6 @@
 	struct work_struct repl_work;
 	void (*repl_hdlr)(struct ipa3_sys_context *sys);
 	struct ipa3_repl_ctx *repl;
-	struct ipa3_page_repl_ctx *page_recycle_repl;
 	u32 pkt_sent;
 	struct napi_struct *napi_obj;
 	struct list_head pending_pkts[GSI_VEID_MAX];
@@ -1241,7 +1211,6 @@
 	bool ext_ioctl_v2;
 	bool common_buff_pool;
 	struct ipa3_sys_context *common_sys;
-	struct tasklet_struct tasklet_find_freepage;
 	atomic_t page_avilable;
 	u32 napi_sort_page_thrshld_cnt;
 
@@ -1257,8 +1226,10 @@
 	struct workqueue_struct *repl_wq;
 	struct ipa3_status_stats *status_stat;
 	u32 pm_hdl;
+	struct ipa3_page_repl_ctx *page_recycle_repl;
 	struct workqueue_struct *freepage_wq;
 	struct delayed_work freepage_work;
+	struct tasklet_struct tasklet_find_freepage;
 	/* ordering is important - other immutable fields go below */
 };
 
@@ -1575,6 +1546,30 @@
 	u64 tmp_alloc;
 };
 
+struct ipa3_cache_recycle_stats {
+	u64 pkt_allocd;
+	u64 pkt_found;
+	u64 tot_pkt_replenished;
+};
+
+struct lan_coal_stats {
+	u64 coal_rx;
+	u64 coal_left_as_is;
+	u64 coal_reconstructed;
+	u64 coal_pkts;
+	u64 coal_hdr_qmap_err;
+	u64 coal_hdr_nlo_err;
+	u64 coal_hdr_pkt_err;
+	u64 coal_csum_err;
+	u64 coal_ip_invalid;
+	u64 coal_trans_invalid;
+	u64 coal_veid[GSI_VEID_MAX];
+	u64 coal_tcp;
+	u64 coal_tcp_bytes;
+	u64 coal_udp;
+	u64 coal_udp_bytes;
+};
+
 struct ipa3_stats {
 	u32 tx_sw_pkts;
 	u32 tx_hw_pkts;
@@ -1594,6 +1589,7 @@
 	u32 rmnet_ll_rx_empty;
 	u32 rmnet_ll_repl_rx_empty;
 	u32 lan_rx_empty;
+	u32 lan_rx_empty_coal;
 	u32 lan_repl_rx_empty;
 	u32 low_lat_rx_empty;
 	u32 low_lat_repl_rx_empty;
@@ -1604,11 +1600,13 @@
 	u64 lower_order;
 	u32 pipe_setup_fail_cnt;
 	struct ipa3_page_recycle_stats page_recycle_stats[3];
+	struct ipa3_cache_recycle_stats cache_recycle_stats[3];
 	u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX];
 	atomic_t num_buff_above_thresh_for_def_pipe_notified;
 	atomic_t num_buff_above_thresh_for_coal_pipe_notified;
 	atomic_t num_buff_below_thresh_for_def_pipe_notified;
 	atomic_t num_buff_below_thresh_for_coal_pipe_notified;
+	struct lan_coal_stats coal;
 	u64 num_sort_tasklet_sched[3];
 	u64 num_of_times_wq_reschd;
 	u64 page_recycle_cnt_in_tasklet;
@@ -2203,6 +2201,7 @@
  * mhi_ctrl_state: state of mhi ctrl pipes
  */
 struct ipa3_context {
+	bool coal_stopped;
 	struct ipa3_char_device_context cdev;
 	struct ipa3_ep_context ep[IPA5_MAX_NUM_PIPES];
 	bool skip_ep_cfg_shadow[IPA5_MAX_NUM_PIPES];
@@ -2215,6 +2214,7 @@
 	u32 ipa_wrapper_base;
 	u32 ipa_wrapper_size;
 	u32 ipa_cfg_offset;
+	bool set_evict_reg;
 	struct ipa3_hdr_tbl hdr_tbl[HDR_TBLS_TOTAL];
 	struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
 	struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
@@ -2377,6 +2377,8 @@
 	bool (*get_teth_port_state[IPA_MAX_CLNT])(void);
 
 	atomic_t is_ssr;
+	bool deepsleep;
+	void *subsystem_get_retval;
 	struct IpaHwOffloadStatsAllocCmdData_t
 		gsi_info[IPA_HW_PROTOCOL_MAX];
 	bool ipa_wan_skb_page;
@@ -2390,7 +2392,11 @@
 	u32 icc_num_cases;
 	u32 icc_num_paths;
 	u32 icc_clk[IPA_ICC_LVL_MAX][IPA_ICC_PATH_MAX][IPA_ICC_TYPE_MAX];
-	struct ipahal_imm_cmd_pyld *coal_cmd_pyld[2];
+#define WAN_COAL_SUB  0
+#define LAN_COAL_SUB  1
+#define ULSO_COAL_SUB 2
+#define MAX_CCP_SUB (ULSO_COAL_SUB + 1)
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld[MAX_CCP_SUB];
 	struct ipa_mem_buffer ulso_wa_cmd;
 	u32 tx_wrapper_cache_max_size;
 	struct ipa3_app_clock_vote app_clock_vote;
@@ -2771,6 +2777,36 @@
 	struct icc_path *icc_path[IPA_ICC_PATH_MAX];
 };
 
+/*
+ * When data arrives on IPA_CLIENT_APPS_LAN_COAL_CONS, said data will
+ * contain a qmap header followed by an array of the following.  The
+ * number of them in the array is always MAX_COAL_PACKET_STATUS_INFO
+ * (see below); however, only "num_nlos" (a field in the cmap heeader)
+ * will be valid.  The rest are to be ignored.
+ */
+struct coal_packet_status_info {
+	u16 pkt_len;
+	u8  pkt_cksum_errs;
+	u8  num_pkts;
+} __aligned(1);
+/*
+ * This is the number of the struct coal_packet_status_info that
+ * follow the qmap header.  As above, only "num_nlos" are valid.  The
+ * rest are to be ignored.
+ */
+#define MAX_COAL_PACKET_STATUS_INFO (6)
+#define VALID_NLS(nls) \
+	((nls) > 0 && (nls) <= MAX_COAL_PACKET_STATUS_INFO)
+/*
+ * The following is the total number of bits in all the pkt_cksum_errs
+ * in each of the struct coal_packet_status_info(s) that follow the
+ * qmap header.  Each bit is meant to tell us if a packet is good or
+ * bad, relative to a checksum. Given this, the max number of bits
+ * dictates the max number of packets that can be in a buffer from the
+ * IPA.
+ */
+#define MAX_COAL_PACKETS            (48)
+
 extern struct ipa3_context *ipa3_ctx;
 extern bool ipa_net_initialized;
 
@@ -2804,7 +2840,6 @@
 		bool start_chnl);
 void ipa3_client_prod_post_shutdown_cleanup(void);
 
-
 int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
 		enum ipa_client_type client);
 
@@ -2959,6 +2994,14 @@
 	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
 int ipa3_nat_get_sram_info(struct ipa_nat_in_sram_info *info_ptr);
 int ipa3_app_clk_vote(enum ipa_app_clock_vote_type vote_type);
+void ipa3_get_default_evict_values(
+	struct ipahal_reg_coal_evict_lru *evict_lru);
+void ipa3_default_evict_register( void );
+int ipa3_set_evict_policy(
+	struct ipa_ioc_coal_evict_policy *evict_pol);
+void start_coalescing( void );
+void stop_coalescing( void );
+bool lan_coal_enabled( void );
 
 /*
  * Messaging
@@ -3260,6 +3303,10 @@
 
 int ipa3_teth_bridge_driver_init(void);
 void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+void ipa3_lan_coal_rx_cb(
+	void                *priv,
+	enum ipa_dp_evt_type evt,
+	unsigned long        data);
 
 int _ipa_init_sram_v3(void);
 int _ipa_init_hdr_v3_0(void);
@@ -3328,6 +3375,7 @@
 void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
 			      struct ipa3_uc_hdlrs *hdlrs);
 int ipa3_uc_notify_clk_state(bool enabled);
+void ipa3_uc_interface_destroy(void);
 int ipa3_dma_setup(void);
 void ipa3_dma_shutdown(void);
 void ipa3_dma_async_memcpy_notify_cb(void *priv,
@@ -3431,7 +3479,9 @@
 void ipa3_set_resorce_groups_min_max_limits(void);
 void ipa3_set_resorce_groups_config(void);
 int ipa3_suspend_apps_pipes(bool suspend);
-void ipa3_force_close_coal(void);
+void ipa3_force_close_coal(
+	bool close_wan,
+	bool close_lan );
 int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
 	enum ipa_ip_type ip_type,
 	bool hashable,
@@ -3505,6 +3555,7 @@
 bool ipa3_is_msm_device(void);
 void ipa3_enable_dcd(void);
 void ipa3_disable_prefetch(enum ipa_client_type client);
+void ipa3_dealloc_common_event_ring(void);
 int ipa3_alloc_common_event_ring(void);
 int ipa3_allocate_dma_task_for_gsi(void);
 void ipa3_free_dma_task_for_gsi(void);
@@ -3634,6 +3685,29 @@
 	return ptr;
 }
 
+/**
+ * The following used as defaults for struct ipa_ioc_coal_evict_policy.
+ */
+#define IPA_COAL_VP_LRU_THRSHLD        0
+#define IPA_COAL_EVICTION_EN           true
+#define IPA_COAL_VP_LRU_GRAN_SEL       0
+#define IPA_COAL_VP_LRU_UDP_THRSHLD    0
+#define IPA_COAL_VP_LRU_TCP_THRSHLD    0
+#define IPA_COAL_VP_LRU_UDP_THRSHLD_EN 1
+#define IPA_COAL_VP_LRU_TCP_THRSHLD_EN 1
+#define IPA_COAL_VP_LRU_TCP_NUM        0
+
+/**
+ * enum ipa_evict_time_gran_type - Time granularity to be used with
+ * eviction timers.
+ */
+enum ipa_evict_time_gran_type {
+	IPA_EVICT_TIME_GRAN_0,
+	IPA_EVICT_TIME_GRAN_1,
+	IPA_EVICT_TIME_GRAN_2,
+	IPA_EVICT_TIME_GRAN_3,
+};
+
 /* query ipa APQ mode*/
 bool ipa3_is_apq(void);
 /* check if odl is connected */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index 2050858..7bdb57e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -546,7 +546,9 @@
 		return -EFAULT;
 	}
 
-	kfree(ipa_interrupt_to_cb[irq_num].private_data);
+	/*If free ipa3_ctx pointer causing device crash during remove interrupt*/
+	if(ipa_interrupt_to_cb[irq_num].private_data != ipa3_ctx)
+		kfree(ipa_interrupt_to_cb[irq_num].private_data);
 	ipa_interrupt_to_cb[irq_num].deferred_flag = false;
 	ipa_interrupt_to_cb[irq_num].handler = NULL;
 	ipa_interrupt_to_cb[irq_num].private_data = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index a69a412..79bd5de 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -26,7 +26,7 @@
 #define IPA_Q6_SERVICE_INS_ID 2
 
 #define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
-#define QMI_SEND_REQ_TIMEOUT_MS 60000
+#define QMI_SEND_REQ_TIMEOUT_MS 10000
 #define QMI_MHI_SEND_REQ_TIMEOUT_MS 1000
 
 #define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000
@@ -547,16 +547,22 @@
 	struct qmi_txn txn;
 	int ret;
 
-	if (!client_handle)
+	mutex_lock(&ipa3_qmi_lock);
+
+	if (!client_handle) {
+
+		mutex_unlock(&ipa3_qmi_lock);
 		return -EINVAL;
+	}
+
 	ret = qmi_txn_init(client_handle, &txn, resp_desc->ei_array, resp);
 
 	if (ret < 0) {
 		IPAWANERR("QMI txn init failed, ret= %d\n", ret);
+		mutex_unlock(&ipa3_qmi_lock);
 		return ret;
 	}
 
-	mutex_lock(&ipa3_qmi_lock);
 	ret = qmi_send_request(client_handle,
 		&ipa3_qmi_ctx->server_sq,
 		&txn,
@@ -565,19 +571,16 @@
 		req_desc->ei_array,
 		req);
 
-	if (unlikely(!ipa_q6_clnt)) {
-		mutex_unlock(&ipa3_qmi_lock);
-		return -EINVAL;
-	}
 
-	mutex_unlock(&ipa3_qmi_lock);
 
 	if (ret < 0) {
 		qmi_txn_cancel(&txn);
+		mutex_unlock(&ipa3_qmi_lock);
 		return ret;
 	}
-	ret = qmi_txn_wait(&txn, msecs_to_jiffies(timeout_ms));
 
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(timeout_ms));
+	mutex_unlock(&ipa3_qmi_lock);
 	return ret;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index cd917ed..f214423 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1077,9 +1077,18 @@
 	(*(entry))->ipacm_installed = user;
 
 	if ((*(entry))->rule.coalesce &&
-		(*(entry))->rule.dst == IPA_CLIENT_APPS_WAN_CONS &&
-		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
-		(*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
+		IPA_CLIENT_IS_LAN_or_WAN_CONS((*(entry))->rule.dst)) {
+		int unused;
+		if ((*(entry))->rule.dst == IPA_CLIENT_APPS_LAN_CONS) {
+			if (IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, unused)) {
+				(*(entry))->rule.dst = IPA_CLIENT_APPS_LAN_COAL_CONS;
+			}
+		} else { /* == IPA_CLIENT_APPS_WAN_CONS */
+			if (IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, unused)) {
+				(*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
+			}
+		}
+	}
 
 	if (rule->enable_stats)
 		(*entry)->cnt_idx = rule->cnt_idx;
@@ -1773,7 +1782,7 @@
 		!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) {
 		IPADBG("Deleting rule from default rt table idx=%u\n",
 			entry->tbl->idx);
-		if (entry->tbl->rule_cnt == 1) {
+		if (entry->tbl->rule_cnt == 1 && !ipa3_ctx->deepsleep) {
 			IPAERR_RL("Default tbl last rule cannot be deleted\n");
 			return -EINVAL;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
index 7a797f2..aa58b83 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #undef TRACE_SYSTEM
@@ -297,6 +298,89 @@
 	TP_printk("client=%lu", __entry->client)
 );
 
+TRACE_EVENT(
+	ipa3_replenish_rx_page_recycle,
+
+	TP_PROTO(u32 i, struct page *p, bool is_tmp_alloc),
+
+	TP_ARGS(i, p, is_tmp_alloc),
+
+	TP_STRUCT__entry(
+		__field(u32, i)
+		__field(struct page *,	p)
+		__field(bool,		is_tmp_alloc)
+		__field(unsigned long,	pfn)
+	),
+
+	TP_fast_assign(
+		__entry->i = i;
+		__entry->p = p;
+		__entry->is_tmp_alloc = is_tmp_alloc;
+		__entry->pfn = page_to_pfn(p);
+	),
+
+	TP_printk("wan_cons type=%u: page=0x%pK pfn=0x%lx tmp=%s",
+		__entry->i, __entry->p, __entry->pfn,
+		__entry->is_tmp_alloc ? "true" : "false")
+);
+
+TRACE_EVENT(
+	handle_page_completion,
+
+	TP_PROTO(struct page *p, struct sk_buff *skb, u16 len,
+		 bool is_tmp_alloc, enum ipa_client_type client),
+
+	TP_ARGS(p, skb, len, is_tmp_alloc, client),
+
+	TP_STRUCT__entry(
+		__field(struct page *,		p)
+		__field(struct sk_buff *,	skb)
+		__field(u16,			len)
+		__field(bool,			is_tmp_alloc)
+		__field(unsigned long,		pfn)
+		__field(enum ipa_client_type,	client)
+	),
+
+	TP_fast_assign(
+		__entry->p = p;
+		__entry->skb = skb;
+		__entry->len = len;
+		__entry->is_tmp_alloc = is_tmp_alloc;
+		__entry->pfn = page_to_pfn(p);
+		__entry->client = client;
+	),
+
+	TP_printk("%s: page=0x%pK pfn=0x%lx skb=0x%pK len=%u tmp=%s",
+		(__entry->client == IPA_CLIENT_APPS_WAN_CONS) ? "WAN_CONS"
+							      : "WAN_COAL_CONS",
+		__entry->p, __entry->pfn, __entry->skb, __entry->len,
+		__entry->is_tmp_alloc ? "true" : "false")
+);
+
+TRACE_EVENT(
+	ipa3_rx_napi_chain,
+
+	TP_PROTO(struct sk_buff *first_skb, struct sk_buff *prev_skb,
+		 struct sk_buff *rx_skb),
+
+	TP_ARGS(first_skb, prev_skb, rx_skb),
+
+	TP_STRUCT__entry(
+		__field(struct sk_buff *,	first_skb)
+		__field(struct sk_buff *,	prev_skb)
+		__field(struct sk_buff *,	rx_skb)
+	),
+
+	TP_fast_assign(
+		__entry->first_skb = first_skb;
+		__entry->prev_skb = prev_skb;
+		__entry->rx_skb = rx_skb;
+	),
+
+	TP_printk("first_skb=0x%pK prev_skb=0x%pK rx_skb=0x%pK",
+		__entry->first_skb, __entry->prev_skb, __entry->rx_skb)
+);
+
 #endif /* _IPA_TRACE_H */
 
 /* This part must be outside protection */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 34d98e6..2f6decb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1224,6 +1224,16 @@
 }
 EXPORT_SYMBOL(ipa3_uc_load_notify);
 
+void ipa3_uc_interface_destroy(void)
+{
+	if(ipa3_ctx->uc_ctx.uc_inited) {
+		ipa3_remove_interrupt_handler(IPA_UC_IRQ_2);
+		ipa3_remove_interrupt_handler(IPA_UC_IRQ_1);
+		ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
+		iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio);
+		ipa3_ctx->uc_ctx.uc_inited = false;
+	}
+}
 
 /**
  * ipa3_uc_send_cmd() - Send a command to the uC
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index ac998d2..de60a93 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -6531,9 +6531,94 @@
 	__stringify(RESERVERD_CONS_123),
 	__stringify(RESERVERD_PROD_124),
 	__stringify(IPA_CLIENT_TPUT_CONS),
+	__stringify(RESERVERD_PROD_126),
+	__stringify(IPA_CLIENT_APPS_LAN_COAL_CONS),
 };
 EXPORT_SYMBOL(ipa_clients_strings);
 
+static void _set_coalescing_disposition(
+	bool force_to_default )
+{
+	if ( ipa3_ctx->ipa_initialization_complete
+		 &&
+		 ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5 ) {
+
+		struct ipahal_reg_coal_master_cfg master_cfg;
+
+		memset(&master_cfg, 0, sizeof(master_cfg));
+
+		ipahal_read_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
+
+		master_cfg.coal_force_to_default = force_to_default;
+
+		ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
+	}
+}
+
+void start_coalescing()
+{
+	if ( ipa3_ctx->coal_stopped ) {
+		_set_coalescing_disposition(false);
+		ipa3_ctx->coal_stopped = false;
+	}
+}
+
+void stop_coalescing()
+{
+	if ( ! ipa3_ctx->coal_stopped ) {
+		_set_coalescing_disposition(true);
+		ipa3_ctx->coal_stopped = true;
+	}
+}
+
+bool lan_coal_enabled()
+{
+	if ( ipa3_ctx->ipa_initialization_complete ) {
+		int ep_idx;
+		if ( IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx) ) {
+			return true;
+		}
+	}
+	return false;
+}
+
+int ipa3_set_evict_policy(
+	struct ipa_ioc_coal_evict_policy *evict_pol)
+{
+	if (!evict_pol) {
+		IPAERR_RL("Bad arg evict_pol(%p)\n", evict_pol);
+		return -1;
+	}
+
+	if ( ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 )
+	{
+		struct ipahal_reg_coal_evict_lru evict_lru_reg;
+
+		memset(&evict_lru_reg, 0, sizeof(evict_lru_reg));
+
+		evict_lru_reg.coal_vp_lru_thrshld =
+			evict_pol->coal_vp_thrshld;
+		evict_lru_reg.coal_eviction_en =
+			evict_pol->coal_eviction_en;
+		evict_lru_reg.coal_vp_lru_gran_sel =
+			evict_pol->coal_vp_gran_sel;
+		evict_lru_reg.coal_vp_lru_udp_thrshld =
+			evict_pol->coal_vp_udp_thrshld;
+		evict_lru_reg.coal_vp_lru_tcp_thrshld =
+			evict_pol->coal_vp_tcp_thrshld;
+		evict_lru_reg.coal_vp_lru_udp_thrshld_en =
+			evict_pol->coal_vp_udp_thrshld_en;
+		evict_lru_reg.coal_vp_lru_tcp_thrshld_en =
+			evict_pol->coal_vp_tcp_thrshld_en;
+		evict_lru_reg.coal_vp_lru_tcp_num =
+			evict_pol->coal_vp_tcp_num;
+
+		ipahal_write_reg_fields(IPA_COAL_EVICT_LRU, &evict_lru_reg);
+	}
+
+	return 0;
+}
+
 /**
  * ipa_get_version_string() - Get string representation of IPA version
  * @ver: IPA version
@@ -6601,6 +6686,8 @@
 		break;
 	case IPA_HW_v5_1:
 		str = "5.1";
+	case IPA_HW_v5_5:
+		str = "5.5";
 	default:
 		str = "Invalid version";
 		break;
@@ -7449,12 +7536,13 @@
 	master_cfg.coal_ipv4_id_ignore = ipa3_ctx->coal_ipv4_id_ignore;
 	ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
 
-	IPADBG(": coal-ipv4-id-ignore = %s\n",
-			master_cfg.coal_ipv4_id_ignore
-			? "True" : "False");
-
+	IPADBG(
+		": coal-ipv4-id-ignore = %s\n",
+		master_cfg.coal_ipv4_id_ignore ?
+		"True" : "False");
 
 	ipa_comp_cfg();
+
 	/*
 	 * In IPA 4.2 filter and routing hashing not supported
 	 * disabling hash enable register.
@@ -11764,7 +11852,7 @@
 static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 {
 	struct ipa_ep_cfg_ctrl cfg;
-	int ipa_ep_idx, coal_ep_idx;
+	int ipa_ep_idx, wan_coal_ep_idx, lan_coal_ep_idx;
 	struct ipa3_ep_context *ep;
 	int res;
 
@@ -11795,8 +11883,6 @@
 		return 0;
 	}
 
-	coal_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
-
 	/*
 	 * Configure the callback mode only one time after starting the channel
 	 * otherwise observing IEOB interrupt received before configure callmode
@@ -11821,7 +11907,7 @@
 	/* Apps prod pipes use common event ring so cannot configure mode*/
 
 	/*
-	 * Skipping to configure mode for default wan pipe,
+	 * Skipping to configure mode for default [w|l]an pipe,
 	 * as both pipes using commong event ring. if both pipes
 	 * configure same event ring observing race condition in
 	 * updating current polling state.
@@ -11829,7 +11915,9 @@
 
 	if (IPA_CLIENT_IS_APPS_PROD(client) ||
 		(client == IPA_CLIENT_APPS_WAN_CONS &&
-			coal_ep_idx != IPA_EP_NOT_ALLOCATED))
+		 IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, wan_coal_ep_idx)) ||
+		(client == IPA_CLIENT_APPS_LAN_CONS &&
+		 IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, lan_coal_ep_idx)))
 		return 0;
 
 	if (suspend) {
@@ -11846,24 +11934,57 @@
 	return 0;
 }
 
-void ipa3_force_close_coal(void)
+void ipa3_force_close_coal(
+	bool close_wan,
+	bool close_lan )
 {
-	struct ipa3_desc desc[2];
+	struct ipa3_desc desc[ MAX_CCP_SUB ];
+
 	int ep_idx, num_desc = 0;
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
-	if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
-		return;
+	if ( close_wan
+		 &&
+		 IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_WAN_COAL_CONS, ep_idx)
+		 &&
+		 ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB] ) {
 
-	ipa3_init_imm_cmd_desc(&desc[0], ipa3_ctx->coal_cmd_pyld[0]);
-	num_desc++;
-	if (ipa3_ctx->ulso_wa) {
-		ipa3_init_imm_cmd_desc(&desc[1], ipa3_ctx->coal_cmd_pyld[1]);
+		ipa3_init_imm_cmd_desc(
+			&desc[num_desc],
+			ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]);
+
 		num_desc++;
 	}
-	IPADBG("Sending %d descriptor for coal force close\n", num_desc);
-	if (ipa3_send_cmd(num_desc, desc))
-		IPADBG("ipa3_send_cmd timedout\n");
+
+	if ( close_lan
+		 &&
+		 IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx)
+		 &&
+		 ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB] ) {
+
+		ipa3_init_imm_cmd_desc(
+			&desc[num_desc],
+			ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]);
+
+		num_desc++;
+	}
+
+	if (ipa3_ctx->ulso_wa && ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB] ) {
+		ipa3_init_imm_cmd_desc(
+			&desc[num_desc],
+			ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]);
+
+		num_desc++;
+	}
+
+	if ( num_desc ) {
+		IPADBG("Sending %d descriptor(s) for coal force close\n", num_desc);
+		if ( ipa3_send_cmd_timeout(
+				 num_desc,
+				 desc,
+				 IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC) ) {
+			IPADBG("ipa3_send_cmd_timeout timedout\n");
+		}
+	}
 }
 
 int ipa3_suspend_apps_pipes(bool suspend)
@@ -11872,25 +11993,45 @@
 	struct ipa_ep_cfg_holb holb_cfg;
 	int odl_ep_idx;
 
+	if (suspend) {
+		stop_coalescing();
+		ipa3_force_close_coal(true, true);
+	}
+
 	/* As per HPG first need start/stop coalescing channel
 	 * then default one. Coalescing client number was greater then
 	 * default one so starting the last client.
 	 */
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, suspend);
-	if (res == -EAGAIN)
+	if (res == -EAGAIN) {
+		if (suspend) start_coalescing();
 		goto undo_coal_cons;
+	}
 
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
-	if (res == -EAGAIN)
+	if (res == -EAGAIN) {
+		if (suspend) start_coalescing();
 		goto undo_wan_cons;
+	}
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_COAL_CONS, suspend);
+	if (res == -EAGAIN) {
+		if (suspend) start_coalescing();
+		goto undo_lan_coal_cons;
+	}
 
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
-	if (res == -EAGAIN)
+	if (res == -EAGAIN) {
+		if (suspend) start_coalescing();
 		goto undo_lan_cons;
+	}
+
+	if (suspend) start_coalescing();
 
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, suspend);
-	if (res == -EAGAIN)
+	if (res == -EAGAIN) {
 		goto undo_odl_cons;
+	}
 
 	odl_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
 	if (odl_ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[odl_ep_idx].valid) {
@@ -11912,13 +12053,15 @@
 
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS,
 		suspend);
-	if (res == -EAGAIN)
+	if (res == -EAGAIN) {
 		goto undo_qmap_cons;
+	}
 
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS,
 		suspend);
-	if (res == -EAGAIN)
+	if (res == -EAGAIN) {
 		goto undo_low_lat_data_cons;
+	}
 
 	if (suspend) {
 		struct ipahal_reg_tx_wrapper tx;
@@ -11996,6 +12139,8 @@
 	_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
 undo_lan_cons:
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, !suspend);
+undo_lan_coal_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_COAL_CONS, !suspend);
 undo_wan_cons:
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, !suspend);
@@ -12057,57 +12202,98 @@
 	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
 	struct ipahal_imm_cmd_register_read dummy_reg_read = { 0 };
 	struct ipahal_reg_valmask valmask;
-	int ep_idx;
 	u32 offset = 0;
+	int ep_idx, num_desc = 0;
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
-	if (ep_idx == IPA_EP_NOT_ALLOCATED)
-		return 0;
-	IPADBG("Allocate coal close frame cmd\n");
-	reg_write_cmd.skip_pipeline_clear = false;
-	if (ipa3_ctx->ulso_wa) {
-		reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
-	} else {
-		reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
-	}
-	if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
-		offset = ipahal_get_reg_ofst(
-			IPA_AGGR_FORCE_CLOSE);
-	else
-		offset = ipahal_get_ep_reg_offset(
-			IPA_AGGR_FORCE_CLOSE_n, ep_idx);
-	reg_write_cmd.offset = offset;
-	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
-	reg_write_cmd.value = valmask.val;
-	reg_write_cmd.value_mask = valmask.mask;
-	ipa3_ctx->coal_cmd_pyld[0] =
-		ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
-			&reg_write_cmd, false);
-	if (!ipa3_ctx->coal_cmd_pyld[0]) {
-		IPAERR("fail construct register_write imm cmd\n");
-		ipa_assert();
-		return 0;
+	if ( IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, ep_idx) ) {
+
+		IPADBG("Allocate wan coal close frame cmd\n");
+
+		reg_write_cmd.skip_pipeline_clear = false;
+		if (ipa3_ctx->ulso_wa) {
+			reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
+		} else {
+			reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		}
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
+			offset = ipahal_get_reg_ofst(
+				IPA_AGGR_FORCE_CLOSE);
+		else
+			offset = ipahal_get_ep_reg_offset(
+				IPA_AGGR_FORCE_CLOSE_n, ep_idx);
+		reg_write_cmd.offset = offset;
+		ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+		reg_write_cmd.value = valmask.val;
+		reg_write_cmd.value_mask = valmask.mask;
+		ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB] =
+			ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE,
+				&reg_write_cmd, false);
+		if (!ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]) {
+			IPAERR("fail construct register_write imm cmd\n");
+			ipa_assert();
+			return 0;
+		}
+		num_desc++;
 	}
 
-	if (ipa3_ctx->ulso_wa) {
-		/* dummary regsiter read IC with HPS clear*/
+	if ( IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx) ) {
+
+		IPADBG("Allocate lan coal close frame cmd\n");
+
+		reg_write_cmd.skip_pipeline_clear = false;
+		if (ipa3_ctx->ulso_wa) {
+			reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
+		} else {
+			reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		}
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
+			offset = ipahal_get_reg_ofst(
+				IPA_AGGR_FORCE_CLOSE);
+		else
+			offset = ipahal_get_ep_reg_offset(
+				IPA_AGGR_FORCE_CLOSE_n, ep_idx);
+		reg_write_cmd.offset = offset;
+		ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+		reg_write_cmd.value = valmask.val;
+		reg_write_cmd.value_mask = valmask.mask;
+		ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB] =
+			ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE,
+				&reg_write_cmd, false);
+		if (!ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]) {
+			IPAERR("fail construct register_write imm cmd\n");
+			ipa_assert();
+			return 0;
+		}
+		num_desc++;
+	}
+
+	if ( ipa3_ctx->ulso_wa ) {
+		/*
+		 * Dummy regsiter read IC with HPS clear
+		 */
 		ipa3_ctx->ulso_wa_cmd.size = 4;
-		ipa3_ctx->ulso_wa_cmd.base = dma_alloc_coherent(ipa3_ctx->pdev,
-			ipa3_ctx->ulso_wa_cmd.size,
-			&ipa3_ctx->ulso_wa_cmd.phys_base, GFP_KERNEL);
+		ipa3_ctx->ulso_wa_cmd.base =
+			dma_alloc_coherent(
+				ipa3_ctx->pdev,
+				ipa3_ctx->ulso_wa_cmd.size,
+				&ipa3_ctx->ulso_wa_cmd.phys_base, GFP_KERNEL);
 		if (ipa3_ctx->ulso_wa_cmd.base == NULL) {
 			ipa_assert();
 		}
-		offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
+		offset = ipahal_get_reg_n_ofst(
+			IPA_STAT_QUOTA_BASE_n,
 			ipa3_ctx->ee);
 		dummy_reg_read.skip_pipeline_clear = false;
 		dummy_reg_read.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		dummy_reg_read.offset = offset;
 		dummy_reg_read.sys_addr = ipa3_ctx->ulso_wa_cmd.phys_base;
-		ipa3_ctx->coal_cmd_pyld[1] = ipahal_construct_imm_cmd(
-			IPA_IMM_CMD_REGISTER_READ,
-			&dummy_reg_read, false);
-		if (!ipa3_ctx->coal_cmd_pyld[1]) {
+		ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB] =
+			ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_READ,
+				&dummy_reg_read, false);
+		if (!ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]) {
 			IPAERR("failed to construct DUMMY READ IC\n");
 			ipa_assert();
 		}
@@ -12118,15 +12304,27 @@
 
 void ipa3_free_coal_close_frame(void)
 {
-	if (ipa3_ctx->coal_cmd_pyld[0])
-		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[0]);
+	if (ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]) {
+		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]);
+	}
 
-	if (ipa3_ctx->coal_cmd_pyld[1]) {
-		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[1]);
-		dma_free_coherent(ipa3_ctx->pdev, ipa3_ctx->ulso_wa_cmd.size,
-			ipa3_ctx->ulso_wa_cmd.base, ipa3_ctx->ulso_wa_cmd.phys_base);
+	if (ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]) {
+		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]);
+	}
+
+	if (ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]) {
+		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]);
+	}
+
+	if ( ipa3_ctx->ulso_wa_cmd.base ) {
+		dma_free_coherent(
+			ipa3_ctx->pdev,
+			ipa3_ctx->ulso_wa_cmd.size,
+			ipa3_ctx->ulso_wa_cmd.base,
+			ipa3_ctx->ulso_wa_cmd.phys_base);
 	}
 }
+
 /**
  * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
  *
@@ -12678,6 +12876,7 @@
 	case IPA_HW_v4_9:
 	case IPA_HW_v4_11:
 	case IPA_HW_v5_1:
+	case IPA_HW_v5_5:
 		return true;
 	default:
 		IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
@@ -13463,6 +13662,7 @@
 	ipa_send_mhi_endp_ind_to_modem();
 }
 EXPORT_SYMBOL(ipa3_update_mhi_ctrl_state);
+
 /**
  * ipa3_setup_uc_act_tbl() - IPA setup uc_act_tbl
  *
@@ -13685,6 +13885,24 @@
 	return res;
 }
 
+void ipa3_default_evict_register( void )
+{
+	struct ipahal_reg_coal_evict_lru evict_lru;
+
+	if ( ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5
+		 &&
+		 ipa3_ctx->set_evict_reg == false )
+	{
+		ipa3_ctx->set_evict_reg = true;
+
+		IPADBG("Setting COAL eviction register with default values\n");
+
+		ipa3_get_default_evict_values(&evict_lru);
+
+		ipahal_write_reg_fields(IPA_COAL_EVICT_LRU, &evict_lru);
+	}
+}
+
 /**
  * ipa3_del_socksv5_conn() - IPA add socksv5_conn
  *
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 42c5918..c76a7e2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -57,8 +57,11 @@
 	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_CSUM),
 };
 
+/*
+ * Forward declarations.
+ */
 static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
-
+static int ipahal_qmap_init(enum ipa_hw_type ipa_hw_type);
 
 static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
 	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
@@ -2576,6 +2579,12 @@
 		goto bail_free_ctx;
 	}
 
+	if (ipahal_qmap_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal qmap\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
 	ipahal_hdr_init(ipa_hw_type);
 
 	if (ipahal_fltrt_init(ipa_hw_type)) {
@@ -2636,3 +2645,184 @@
 		mem->phys_base = 0;
 	}
 }
+
+/*
+ * ***************************************************************
+ *
+ * To follow, a generalized qmap header manipulation API.
+ *
+ * The functions immediately following this comment are version
+ * specific qmap parsing functions.  The referred to in the
+ * ipahal_qmap_parse_tbl below.
+ *
+ * ***************************************************************
+ */
+void ipa_qmap_hdr_parse_v4_5(
+	union qmap_hdr_u*     qmap_hdr,
+	struct qmap_hdr_data* qmap_data_rslt )
+{
+	qmap_data_rslt->cd = qmap_hdr->qmap5_0.cd;
+	qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_0.qmap_next_hdr;
+	qmap_data_rslt->pad = qmap_hdr->qmap5_0.pad;
+	qmap_data_rslt->mux_id = qmap_hdr->qmap5_0.mux_id;
+	qmap_data_rslt->packet_len_with_pad = qmap_hdr->qmap5_0.packet_len_with_pad;
+
+	qmap_data_rslt->hdr_type = qmap_hdr->qmap5_0.hdr_type;
+	qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_0.coal_next_hdr;
+	qmap_data_rslt->zero_checksum = qmap_hdr->qmap5_0.zero_checksum;
+}
+
+void ipa_qmap_hdr_parse_v5_0(
+	union qmap_hdr_u*     qmap_hdr,
+	struct qmap_hdr_data* qmap_data_rslt )
+{
+	qmap_data_rslt->cd = qmap_hdr->qmap5_0.cd;
+	qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_0.qmap_next_hdr;
+	qmap_data_rslt->pad = qmap_hdr->qmap5_0.pad;
+	qmap_data_rslt->mux_id = qmap_hdr->qmap5_0.mux_id;
+	qmap_data_rslt->packet_len_with_pad = qmap_hdr->qmap5_0.packet_len_with_pad;
+
+	qmap_data_rslt->hdr_type = qmap_hdr->qmap5_0.hdr_type;
+	qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_0.coal_next_hdr;
+	qmap_data_rslt->ip_id_cfg = qmap_hdr->qmap5_0.ip_id_cfg;
+	qmap_data_rslt->zero_checksum = qmap_hdr->qmap5_0.zero_checksum;
+	qmap_data_rslt->additional_hdr_size = qmap_hdr->qmap5_0.additional_hdr_size;
+	qmap_data_rslt->segment_size = qmap_hdr->qmap5_0.segment_size;
+}
+
+void ipa_qmap_hdr_parse_v5_5(
+	union qmap_hdr_u*     qmap_hdr,
+	struct qmap_hdr_data* qmap_data_rslt )
+{
+	qmap_data_rslt->cd = qmap_hdr->qmap5_5.cd;
+	qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_5.qmap_next_hdr;
+	qmap_data_rslt->pad = qmap_hdr->qmap5_5.pad;
+	qmap_data_rslt->mux_id = qmap_hdr->qmap5_5.mux_id;
+	qmap_data_rslt->packet_len_with_pad = ntohs(qmap_hdr->qmap5_5.packet_len_with_pad);
+
+	qmap_data_rslt->hdr_type = qmap_hdr->qmap5_5.hdr_type;
+	qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_5.coal_next_hdr;
+	qmap_data_rslt->chksum_valid = qmap_hdr->qmap5_5.chksum_valid;
+	qmap_data_rslt->num_nlos = qmap_hdr->qmap5_5.num_nlos;
+	qmap_data_rslt->inc_ip_id = qmap_hdr->qmap5_5.inc_ip_id;
+	qmap_data_rslt->rnd_ip_id = qmap_hdr->qmap5_5.rnd_ip_id;
+	qmap_data_rslt->close_value = qmap_hdr->qmap5_5.close_value;
+	qmap_data_rslt->close_type = qmap_hdr->qmap5_5.close_type;
+	qmap_data_rslt->vcid = qmap_hdr->qmap5_5.vcid;
+}
+
+/*
+ * Structure used to describe a version specific qmap parsing table.
+ */
+struct ipahal_qmap_parse_s {
+	/*
+	 * Function prototype for a version specific qmap parsing
+	 * function.
+	 */
+	void (*parse)(
+		union qmap_hdr_u*     qmap_hdr,
+		struct qmap_hdr_data* qmap_data_rslt );
+};
+
+/*
+ * Table used to contain and drive version specific qmap parsing
+ * functions.
+ */
+static struct ipahal_qmap_parse_s ipahal_qmap_parse_tbl[IPA_HW_MAX] = {
+	/* IPAv4.5 */
+	[IPA_HW_v4_5] = {
+		ipa_qmap_hdr_parse_v4_5
+	},
+	/* IPAv5.0 */
+	[IPA_HW_v5_0] = {
+		ipa_qmap_hdr_parse_v5_0
+	},
+	/* IPAv5.5 */
+	[IPA_HW_v5_5] = {
+		ipa_qmap_hdr_parse_v5_5
+	},
+};
+
+static int ipahal_qmap_init(
+	enum ipa_hw_type ipa_hw_type)
+{
+	struct ipahal_qmap_parse_s zero_obj;
+	int i;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if (ipa_hw_type < 0 || ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+
+	for (i = IPA_HW_v4_5; i < ipa_hw_type; i++) {
+
+		if (memcmp(&ipahal_qmap_parse_tbl[i+1],
+				   &zero_obj,
+				   sizeof(struct ipahal_qmap_parse_s)) == 0 ) {
+			memcpy(
+				&ipahal_qmap_parse_tbl[i+1],
+				&ipahal_qmap_parse_tbl[i],
+				sizeof(struct ipahal_qmap_parse_s));
+		} else {
+			if (ipahal_qmap_parse_tbl[i+1].parse == 0) {
+				IPAHAL_ERR(
+					"QMAP parse table missing parse function ipa_ver=%d\n",
+					i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * FUNCTION: ipahal_qmap_parse()
+ *
+ * The following Function to be called when version specific qmap parsing is
+ * required.
+ *
+ * ARGUMENTS:
+ *
+ *   unparsed_qmap
+ *
+ *     The QMAP header off of a freshly recieved data packet.  As per
+ *     the architecture documentation, the data contained herein will
+ *     be in network order.
+ *
+ *   qmap_data_rslt
+ *
+ *     A location to store the parsed data from unparsed_qmap above.
+ */
+int ipahal_qmap_parse(
+	const void*           unparsed_qmap,
+	struct qmap_hdr_data* qmap_data_rslt )
+{
+	union qmap_hdr_u qmap_hdr;
+
+	IPAHAL_DBG_LOW("Parse qmap/coal header\n");
+
+	if (!unparsed_qmap || !qmap_data_rslt) {
+		IPAHAL_ERR(
+			"Input Error: unparsed_qmap=%pK qmap_data_rslt=%pK\n",
+			unparsed_qmap, qmap_data_rslt);
+		return -EINVAL;
+	}
+
+	if (ipahal_ctx->hw_type < IPA_HW_v4_5) {
+		IPAHAL_ERR(
+			"Unsupported qmap parse for IPA HW type (%d)\n",
+			ipahal_ctx->hw_type);
+		return -EINVAL;
+	}
+
+	ipahal_qmap_ntoh(unparsed_qmap, &qmap_hdr);
+
+	ipahal_qmap_parse_tbl[ipahal_ctx->hw_type].parse(&qmap_hdr, qmap_data_rslt);
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 0aafc74..4e4fa02 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -843,4 +843,295 @@
 */
 u32 ipahal_get_ep_reg_idx(u32 ep_num);
 
+/*
+ * ***************************************************************
+ *
+ * To follow, a generalized qmap header manipulation API.
+ *
+ * ***************************************************************
+ */
+/**
+ * qmap_hdr_v4_5 -
+ *
+ * @cd -
+ * @qmap_next_hdr -
+ * @pad -
+ * @mux_id -
+ * @packet_len_with_pad -
+ * @hdr_type -
+ * @coal_next_hdr -
+ * @zero_checksum -
+ *
+ * The following bit layout is when the data are in host order.
+ *
+ * FIXME FINDME Need to be reordered properly to reflect network
+ *              ordering as seen by little endian host (qmap_hdr_v5_5
+ *              below proplerly done).
+ */
+struct qmap_hdr_v4_5 {
+	/*
+	 * 32 bits of qmap header to follow
+	 */
+	u64 cd: 1;
+	u64 qmap_next_hdr: 1;
+	u64 pad: 6;
+	u64 mux_id: 8;
+	u64 packet_len_with_pad: 16;
+	/*
+	 * 32 bits of coalescing frame header to follow
+	 */
+	u64 hdr_type: 7;
+	u64 coal_next_hdr: 1;
+	u64 zero_checksum: 1;
+	u64 rsrvd1: 7;
+	u64 rsrvd2: 16;
+} __packed;
+
+/**
+ * qmap_hdr_v5_0 -
+ *
+ * @cd -
+ * @qmap_next_hdr -
+ * @pad -
+ * @mux_id -
+ * @packet_len_with_pad -
+ * @hdr_type -
+ * @coal_next_hdr -
+ * @ip_id_cfg -
+ * @zero_checksum -
+ * @additional_hdr_size -
+ * @segment_size -
+ *
+ * The following bit layout is when the data are in host order.
+ *
+ * FIXME FINDME Need to be reordered properly to reflect network
+ *              ordering as seen by little endian host (qmap_hdr_v5_5
+ *              below proplerly done).
+ */
+struct qmap_hdr_v5_0 {
+	/*
+	 * 32 bits of qmap header to follow
+	 */
+	u64 cd: 1;
+	u64 qmap_next_hdr: 1;
+	u64 pad: 6;
+	u64 mux_id: 8;
+	u64 packet_len_with_pad: 16;
+	/*
+	 * 32 bits of coalescing frame header to follow
+	 */
+	u64 hdr_type: 7;
+	u64 coal_next_hdr: 1;
+	u64 ip_id_cfg: 1;
+	u64 zero_checksum: 1;
+	u64 rsrvd: 1;
+	u64 additional_hdr_size: 5;
+	u64 segment_size: 16;
+} __packed;
+
+/**
+ * qmap_hdr_v5_5 -
+ *
+ * @cd -
+ * @qmap_next_hdr -
+ * @pad -
+ * @mux_id -
+ * @packet_len_with_pad -
+ * @hdr_type -
+ * @coal_next_hdr -
+ * @chksum_valid -
+ * @num_nlos -
+ * @inc_ip_id -
+ * @rnd_ip_id -
+ * @close_value -
+ * @close_type -
+ * @vcid -
+ *
+ * NOTE:
+ *
+ *   The layout below is different when compared against
+ *   documentation, which shows the fields as they are in network byte
+ *   order - and network byte order is how we receive the data from
+ *   the IPA.  To avoid using cycles converting from network to host
+ *   order, we've defined the stucture below such that we can access
+ *   the correct fields while the data are still in network order.
+ */
+struct qmap_hdr_v5_5 {
+	/*
+	 * 32 bits of qmap header to follow
+	 */
+	u8 pad: 6;
+	u8 qmap_next_hdr: 1;
+	u8 cd: 1;
+	u8 mux_id;
+	u16 packet_len_with_pad;
+	/*
+	 * 32 bits of coalescing frame header to follow
+	 */
+	u8 coal_next_hdr: 1;
+	u8 hdr_type: 7;
+	u8 rsrvd1: 2;
+	u8 rnd_ip_id: 1;
+	u8 inc_ip_id: 1;
+	u8 num_nlos: 3;
+	u8 chksum_valid: 1;
+
+	u8 close_type: 4;
+	u8 close_value: 4;
+	u8 rsrvd2: 4;
+	u8 vcid: 4;
+} __packed;
+
+/**
+ * qmap_hdr_u -
+ *
+ * The following is a union of all of the qmap versions above.
+ *
+ * NOTE WELL: REMEMBER to keep it in sync with the bit strucure
+ *            definitions above.
+ */
+union qmap_hdr_u {
+	struct qmap_hdr_v4_5 qmap4_5;
+	struct qmap_hdr_v5_0 qmap5_0;
+	struct qmap_hdr_v5_5 qmap5_5;
+	u32                  words[2]; /* these used to flip from ntoh and hton */
+} __packed;
+
+/**
+ * qmap_hdr_data -
+ *
+ * The following is an aggregation of the qmap header bit structures
+ * above.
+ *
+ * NOTE WELL: REMEMBER to keep it in sync with the bit structure
+ *            definitions above.
+ */
+struct qmap_hdr_data {
+	/*
+	 * Data from qmap header to follow
+	 */
+	u8 cd;
+	u8 qmap_next_hdr;
+	u8 pad;
+	u8 mux_id;
+	u16 packet_len_with_pad;
+	/*
+	 * Data from coalescing frame header to follow
+	 */
+	u8 hdr_type;
+	u8 coal_next_hdr;
+	u8 ip_id_cfg;
+	u8 zero_checksum;
+	u8 additional_hdr_size;
+	u16 segment_size;
+	u8 chksum_valid;
+	u8 num_nlos;
+	u8 inc_ip_id;
+	u8 rnd_ip_id;
+	u8 close_value;
+	u8 close_type;
+	u8 vcid;
+};
+
+/**
+ * FUNCTION: ipahal_qmap_parse()
+ *
+ * The following function to be called when version specific qmap parsing is
+ * required.
+ *
+ * ARGUMENTS:
+ *
+ *   unparsed_qmap
+ *
+ *     The QMAP header off of a freshly recieved data packet.  As per
+ *     the architecture documentation, the data contained herein will
+ *     be in network order.
+ *
+ *   qmap_data_rslt
+ *
+ *     A location to store the parsed data from unparsed_qmap above.
+ */
+int ipahal_qmap_parse(
+	const void*           unparsed_qmap,
+	struct qmap_hdr_data* qmap_data_rslt);
+
+
+/**
+ * FUNCTION: ipahal_qmap_ntoh()
+ *
+ * The following function will take a QMAP header, which you know is
+ * in network order, and convert it to host order.
+ *
+ * NOTE WELL: Once in host order, the data will align with the bit
+ *            descriptions in the headers above.
+ *
+ * ARGUMENTS:
+ *
+ *   src_data_from_packet
+ *
+ *     The QMAP header off of a freshly recieved data packet.  As per
+ *     the architecture documentation, the data contained herein will
+ *     be in network order.
+ *
+ *  dst_result
+ *
+ *    A location to where the original data will be copied, then
+ *    converted to host order.
+ */
+static inline void ipahal_qmap_ntoh(
+	const void*       src_data_from_packet,
+	union qmap_hdr_u* dst_result)
+{
+	/*
+	 * Nothing to do, since we define the bit fields in the
+	 * structure, such that we can access them correctly while
+	 * keeping the data in network order...
+	 */
+	if (src_data_from_packet && dst_result) {
+		memcpy(
+			dst_result,
+			src_data_from_packet,
+			sizeof(union qmap_hdr_u));
+	}
+}
+
+/**
+ * FUNCTION: ipahal_qmap_hton()
+ *
+ * The following function will take QMAP data, that you've assembled
+ * in host otder (ie. via using the bit structures definitions above),
+ * and convert it to network order.
+ *
+ * This function is to be used for QMAP data destined for network
+ * transmission.
+ *
+ * ARGUMENTS:
+ *
+ *   src_data_from_host
+ *
+ *     QMAP data in host order.
+ *
+ *  dst_result
+ *
+ *    A location to where the host ordered data above will be copied,
+ *    then converted to network order.
+ */
+static inline void ipahal_qmap_hton(
+	union qmap_hdr_u* src_data_from_host,
+	void*             dst_result)
+{
+	if (src_data_from_host && dst_result) {
+		memcpy(
+			dst_result,
+			src_data_from_host,
+			sizeof(union qmap_hdr_u));
+		/*
+		 * Reusing variable below to do the host to network swap...
+		 */
+		src_data_from_host = (union qmap_hdr_u*) dst_result;
+		src_data_from_host->words[0] = htonl(src_data_from_host->words[0]);
+		src_data_from_host->words[1] = htonl(src_data_from_host->words[1]);
+	}
+}
+
 #endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index f42ebdf..a37ec31 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -3946,6 +3946,9 @@
 	}
 
 	switch (code) {
+#if IS_ENABLED(CONFIG_DEEPSLEEP)
+	case SUBSYS_BEFORE_DS_ENTRY:
+#endif
 #if IS_ENABLED(CONFIG_QCOM_Q6V5_PAS)
 	case QCOM_SSR_BEFORE_SHUTDOWN:
 #else
@@ -3972,6 +3975,17 @@
 		ipa3_odl_pipe_cleanup(true);
 		IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n");
 		break;
+#if IS_ENABLED(CONFIG_DEEPSLEEP)
+	case SUBSYS_AFTER_DS_ENTRY:
+		IPAWANINFO("IPA Received AFTER DEEPSLEEP ENTRY\n");
+		if (atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
+				ipa3_ctx_get_type(IPA_HW_TYPE) < IPA_HW_v4_0)
+			ipa3_q6_post_shutdown_cleanup();
+
+		IPAWANINFO("AFTER DEEPSLEEP ENTRY handling is complete\n");
+		break;
+#endif
+
 #if IS_ENABLED(CONFIG_QCOM_Q6V5_PAS)
 	case QCOM_SSR_AFTER_SHUTDOWN:
 #else
@@ -3997,6 +4011,21 @@
 			ipa3_client_prod_post_shutdown_cleanup();
 		IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n");
 		break;
+#if IS_ENABLED(CONFIG_DEEPSLEEP)
+	case SUBSYS_BEFORE_DS_EXIT:
+		IPAWANINFO("IPA received BEFORE DEEPSLEEP EXIT\n");
+		if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+			/* clean up cached QMI msg/handlers */
+			ipa3_qmi_service_exit();
+			ipa3_q6_pre_powerup_cleanup();
+		}
+		/* hold a proxy vote for the modem. */
+		ipa3_proxy_clk_vote(atomic_read(&rmnet_ipa3_ctx->is_ssr));
+		ipa3_reset_freeze_vote();
+		IPAWANINFO("BEFORE DEEPSLEEP EXIT handling is complete\n");
+		break;
+#endif
+
 #if IS_ENABLED(CONFIG_QCOM_Q6V5_PAS)
 	case QCOM_SSR_BEFORE_POWERUP:
 #else
@@ -4013,6 +4042,9 @@
 		ipa3_reset_freeze_vote();
 		IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
 		break;
+#if IS_ENABLED(CONFIG_DEEPSLEEP)
+	case SUBSYS_AFTER_DS_EXIT:
+#endif
 #if IS_ENABLED(CONFIG_QCOM_Q6V5_PAS)
 	case QCOM_SSR_AFTER_POWERUP:
 #else
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index ceb6051..9f6b486 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -236,6 +236,11 @@
 {
 	int res, i;
 
+	if(ipa3_teth_ctx) {
+		TETH_DBG("Tethering bridge already initlized\n");
+		return 0;
+	}
+
 	TETH_DBG("Tethering bridge driver init\n");
 	ipa3_teth_ctx = kzalloc(sizeof(*ipa3_teth_ctx), GFP_KERNEL);
 	if (!ipa3_teth_ctx)