Merge "msm: ipa3: Changes to support 2X PINE."
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e16399b..06bf6fb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -49,7 +49,7 @@
 #endif
 
 #define DRV_NAME "ipa"
-
+#define DELAY_BEFORE_FW_LOAD 500
 #define IPA_SUBSYSTEM_NAME "ipa_fws"
 #define IPA_UC_SUBSYSTEM_NAME "ipa_uc"
 
@@ -134,6 +134,7 @@
 
 static void ipa3_load_ipa_fw(struct work_struct *work);
 static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
+static DECLARE_DELAYED_WORK(ipa3_fw_load_failure_handle, ipa3_load_ipa_fw);
 
 static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
 static DECLARE_DELAYED_WORK(ipa_dec_clients_disable_clks_on_wq_work,
@@ -7983,11 +7984,14 @@
 	IPADBG("Entry\n");
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
+	
 	result = ipa3_attach_to_smmu();
 	if (result) {
 		IPAERR("IPA attach to smmu failed %d\n", result);
 		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+			&ipa3_fw_load_failure_handle,
+			msecs_to_jiffies(DELAY_BEFORE_FW_LOAD));
 		return;
 	}
 
@@ -8015,13 +8019,18 @@
 		result = ipa3_manual_load_ipa_fws();
 	}
 
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 	if (result) {
-		IPAERR("IPA FW loading process has failed result=%d\n",
-			result);
+
+		ipa3_ctx->ipa_pil_load++;
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		IPADBG("IPA firmware loading deffered to a work queue\n");
+		queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+			&ipa3_fw_load_failure_handle,
+			msecs_to_jiffies(DELAY_BEFORE_FW_LOAD));
 		return;
 	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	mutex_lock(&ipa3_ctx->fw_load_data.lock);
 	ipa3_ctx->fw_load_data.state = IPA_FW_LOAD_STATE_LOADED;
 	mutex_unlock(&ipa3_ctx->fw_load_data.lock);
@@ -8090,7 +8099,7 @@
 		if (ipa3_ctx->fw_load_data.state == IPA_FW_LOAD_STATE_INIT) {
 			ipa3_ctx->fw_load_data.state =
 				IPA_FW_LOAD_STATE_SMMU_DONE;
-			goto out;
+			goto sched_fw_load;
 		}
 		if (ipa3_ctx->fw_load_data.state ==
 			IPA_FW_LOAD_STATE_FWFILE_READY) {
@@ -10320,6 +10329,7 @@
 		}
 	}
 
+	cb->done = true;
 	return 0;
 }
 
@@ -10399,10 +10409,35 @@
 	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = (bypass != 0);
 
 	ipa3_ctx->uc_pdev = dev;
-
+	cb->done = true;
 	return 0;
 }
 
+static void ipa3_ap_iommu_unmap(struct ipa_smmu_cb_ctx *cb, const u32 *add_map, u32 add_map_size) {
+
+	int i, res;
+
+	/* iterate of each entry of the additional mapping array */
+	for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+		u32 iova = be32_to_cpu(add_map[i]);
+		u32 pa = be32_to_cpu(add_map[i + 1]);
+		u32 size = be32_to_cpu(add_map[i + 2]);
+		unsigned long iova_p;
+		phys_addr_t pa_p;
+		u32 size_p;
+
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+			iova_p, pa_p, size_p);
+		IPADBG_LOW("unmapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+
+		res = iommu_unmap(cb->iommu_domain,iova_p, size_p);
+		if(res != size_p) {
+			pr_err("iommu unmap failed for AP cb\n");
+			ipa_assert();
+		}
+	}
+}
 static int ipa_smmu_ap_cb_probe(struct device *dev)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
@@ -10537,6 +10572,8 @@
 		if (ret < 0 && ret != -EEXIST) {
 			IPAERR("unable to allocate smem MODEM entry\n");
 			cb->valid = false;
+			if(add_map)
+				ipa3_ap_iommu_unmap(cb, add_map, add_map_size);
 			return -EFAULT;
 		}
 		smem_addr = qcom_smem_get(SMEM_MODEM,
@@ -10545,6 +10582,8 @@
 		if (IS_ERR(smem_addr)) {
 			IPAERR("unable to acquire smem MODEM entry\n");
 			cb->valid = false;
+			if(add_map)
+				ipa3_ap_iommu_unmap(cb, add_map, add_map_size);
 			return -EFAULT;
 		}
 		if (smem_size != ipa_smem_size)
@@ -10565,6 +10604,7 @@
 
 	smmu_info.present[IPA_SMMU_CB_AP] = true;
 
+	cb->done = true;
 	ipa3_ctx->pdev = dev;
 	cb->next_addr = cb->va_end;
 
@@ -10617,14 +10657,21 @@
 		IPADBG("11AD using shared CB\n");
 		cb->shared = true;
 	}
-
+	cb->done = true;
 	return 0;
 }
 
 static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
 {
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(cb_type);
+
+	if((cb != NULL) && (cb->done == true)) {
+		IPADBG("SMMU CB type %d already initialized\n", cb_type);
+		return 0;
+	}
 	switch (cb_type) {
 	case IPA_SMMU_CB_AP:
+		ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
 		return ipa_smmu_ap_cb_probe(dev);
 	case IPA_SMMU_CB_WLAN:
 	case IPA_SMMU_CB_WLAN1:
@@ -10632,6 +10679,7 @@
 	case IPA_SMMU_CB_ETH1:
 		return ipa_smmu_perph_cb_probe(dev, cb_type);
 	case IPA_SMMU_CB_UC:
+		ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
 		return ipa_smmu_uc_cb_probe(dev);
 	case IPA_SMMU_CB_11AD:
 		return ipa_smmu_11ad_cb_probe(dev);
@@ -10646,16 +10694,15 @@
 	struct ipa_smmu_cb_ctx *cb;
 	int i, result;
 
-	ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
-	ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
-
 	if (smmu_info.arm_smmu) {
 		IPADBG("smmu is enabled\n");
 		for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
 			cb = ipa3_get_smmu_ctx(i);
 			result = ipa_smmu_cb_probe(cb->dev, i);
-			if (result)
+			if (result) {
 				IPAERR("probe failed for cb %d\n", i);
+				return result;
+			}
 		}
 	} else {
 		IPADBG("smmu is disabled\n");
@@ -10738,7 +10785,6 @@
 			ipa3_ctx->num_smmu_cb_probed ==
 			ipa3_ctx->max_num_smmu_cb) {
 			IPADBG("All %d CBs probed\n", IPA_SMMU_CB_MAX);
-			ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
 
 			if (ipa3_ctx->use_xbl_boot) {
 				IPAERR("Using XBL boot load for IPA FW\n");
@@ -10758,6 +10804,9 @@
 					IPAERR("IPA post init failed %d\n", result);
 					return result;
 				}
+			} else {
+
+				ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
 			}
 		}
 	} else {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 6734987..07ad67e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -776,26 +776,14 @@
 			nbytes = scnprintf(
 				dbg_buff,
 				IPA_MAX_MSG_LEN,
-				"name:%s len=%d ref=%d partial=%d type=%s ",
+				"name:%s len=%d ref=%d partial=%d type=%s ofst=%u ",
 				entry->name,
 				entry->hdr_len,
 				entry->ref_cnt,
 				entry->is_partial,
-				ipa3_hdr_l2_type_name[entry->type]);
+				ipa3_hdr_l2_type_name[entry->type],
+				entry->offset_entry->offset >> 2);
 
-			if (entry->is_hdr_proc_ctx) {
-				nbytes += scnprintf(
-					dbg_buff + nbytes,
-					IPA_MAX_MSG_LEN - nbytes,
-					"phys_base=0x%pa ",
-					&entry->phys_base);
-			} else {
-				nbytes += scnprintf(
-					dbg_buff + nbytes,
-					IPA_MAX_MSG_LEN - nbytes,
-					"ofst=%u ",
-					entry->offset_entry->offset >> 2);
-			}
 			for (i = 0; i < entry->hdr_len; i++) {
 				scnprintf(dbg_buff + nbytes + i * 2,
 					  IPA_MAX_MSG_LEN - nbytes - i * 2,
@@ -1288,29 +1276,16 @@
 		ofst_words = (entry->offset_entry->offset +
 			ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
 			>> 5;
-		if (entry->hdr->is_hdr_proc_ctx) {
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
-				entry->id,
-				ipa3_hdr_proc_type_name[entry->type],
-				ofst_words);
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"hdr_phys_base:0x%pa\n",
-				&entry->hdr->phys_base);
-		} else {
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
-				entry->id,
-				ipa3_hdr_proc_type_name[entry->type],
-				ofst_words);
-			nbytes += scnprintf(dbg_buff + nbytes,
-				IPA_MAX_MSG_LEN - nbytes,
-				"hdr[words]:%u\n",
-				entry->hdr->offset_entry->offset >> 2);
-		}
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+			entry->id,
+			ipa3_hdr_proc_type_name[entry->type],
+			ofst_words);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"hdr[words]:%u\n",
+			entry->hdr->offset_entry->offset >> 2);
 	}
 	mutex_unlock(&ipa3_ctx->lock);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 00ae2c5..7825c0c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -100,6 +100,7 @@
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_first_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_work_func(struct work_struct *work);
 static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys);
@@ -1523,7 +1524,7 @@
 			ipa3_ctx->ipa_wan_skb_page) {
 			ipa3_replenish_rx_page_recycle(ep->sys);
 		} else
-			ipa3_replenish_rx_cache(ep->sys);
+			ipa3_first_replenish_rx_cache(ep->sys);
 		for (i = 0; i < GSI_VEID_MAX; i++)
 			INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
 	}
@@ -2822,6 +2823,111 @@
 	return;
 }
 
+/**
+ * ipa3_first_replenish_rx_cache() - Replenish the Rx packets cache for the first time.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ */
+static void ipa3_first_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int idx = 0;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	/* start replenish only when buffers go lower than the threshold */
+	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
+		return;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc cache\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+			IPAERR("dma_map_single failure %pK for %pK\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
+		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
+		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
+		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
+		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
+		idx++;
+		rx_len_cached++;
+		/*
+		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
+		 * If this size is reached we need to queue the xfers.
+		 */
+		if (idx == IPA_REPL_XFER_MAX) {
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+				gsi_xfer_elem_array, false);
+			if (ret != GSI_STATUS_SUCCESS) {
+				/* we don't expect this will happen */
+				IPAERR("failed to provide buffer: %d\n", ret);
+				WARN_ON(1);
+				break;
+			}
+			idx = 0;
+		}
+	}
+	goto done;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	/* Ensuring minimum buffers are submitted to HW */
+	if (rx_len_cached < IPA_REPL_XFER_THRESH) {
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+		return;
+	}
+done:
+	/* only ring doorbell once here */
+	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+		gsi_xfer_elem_array, true);
+	if (ret == GSI_STATUS_SUCCESS) {
+		sys->len = rx_len_cached;
+	} else {
+		/* we don't expect this will happen */
+		IPAERR("failed to provide buffer: %d\n", ret);
+		WARN_ON(1);
+	}
+}
 
 /**
  * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 56750d9..0f95e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -47,10 +47,7 @@
 		return -ENOMEM;
 	}
 
-	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl[loc].head_hdr_entry_list,
-			link) {
-		if (entry->is_hdr_proc_ctx)
-			continue;
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl[loc].head_hdr_entry_list, link) {
 		IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
 				entry->offset_entry->offset);
 		ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
@@ -101,14 +98,12 @@
 
 		/* Check the pointer and header length to avoid dangerous overflow in HW */
 		if (unlikely(!entry->hdr || !entry->hdr->offset_entry ||
-					 entry->hdr->hdr_len > ipa_hdr_bin_sz[IPA_HDR_BIN_MAX - 1]))
+			entry->hdr->hdr_len > ipa_hdr_bin_sz[IPA_HDR_BIN_MAX - 1]))
 			return -EINVAL;
 
 		ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
 				entry->offset_entry->offset,
 				entry->hdr->hdr_len,
-				entry->hdr->is_hdr_proc_ctx,
-				entry->hdr->phys_base,
 				(entry->hdr->is_lcl) ? hdr_lcl_addr : hdr_sys_addr,
 				entry->hdr->offset_entry,
 				&entry->l2tp_params,
@@ -441,8 +436,7 @@
 		WARN_ON_RATELIMIT_IPA(1);
 		return -EINVAL;
 	}
-	IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
-		hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
+	IPADBG("Associated header is name=%s\n", hdr_entry->name);
 
 	entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
 	if (!entry) {
@@ -581,7 +575,6 @@
 	entry->eth2_ofst = hdr->eth2_ofst;
 	entry->cookie = IPA_HDR_COOKIE;
 	entry->ipacm_installed = user;
-	entry->is_hdr_proc_ctx = false;
 	entry->is_lcl = ((IPA_MEM_PART(apps_hdr_size_ddr) &&
 			 (entry->is_partial || (hdr->status == IPA_HDR_TO_DDR_PATTERN))) ||
 			 !IPA_MEM_PART(apps_hdr_size)) ? false : true;
@@ -609,13 +602,22 @@
 	mem_size = entry->is_lcl ? IPA_MEM_PART(apps_hdr_size) : IPA_MEM_PART(apps_hdr_size_ddr);
 
 	if (list_empty(&htbl->head_free_offset_list[bin])) {
-		/* if header does not fit to table, place it in DDR */
-		if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+		/*
+		 * In case of a local header entry,
+		 * first iteration will check against SRAM partition space,
+		 * and the second iteration will check against DDR partition space.
+		 * In case of a system header entry, the loop will iterate only once,
+		 * and check against DDR partition space.
+		 */
+		while (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
 			if (entry->is_lcl) {
+				/* if header does not fit to SRAM table, place it in DDR */
 				htbl = &ipa3_ctx->hdr_tbl[HDR_TBL_SYS];
 				mem_size = IPA_MEM_PART(apps_hdr_size_ddr);
 				entry->is_lcl = false;
 			} else {
+				/* if the entry is intended to be in DDR,
+				   and there is no space -> error */
 				IPAERR("No space in DDR header buffer! Requested: %d Left: %d\n",
 				       ipa_hdr_bin_sz[bin], mem_size - htbl->end);
 				goto bad_hdr_len;
@@ -778,13 +780,8 @@
 
 	htbl = entry->is_lcl ? &ipa3_ctx->hdr_tbl[HDR_TBL_LCL] : &ipa3_ctx->hdr_tbl[HDR_TBL_SYS];
 
-	if (entry->is_hdr_proc_ctx)
-		IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
-			entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
-	else
-		IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
-			entry->hdr_len, htbl->hdr_cnt,
-			entry->offset_entry->offset);
+	IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len, htbl->hdr_cnt,
+		entry->offset_entry->offset);
 
 	if (by_user && entry->user_deleted) {
 		IPAERR_RL("proc_ctx already deleted by user\n");
@@ -809,17 +806,12 @@
 		return 0;
 	}
 
-	if (entry->is_hdr_proc_ctx || entry->proc_ctx) {
-		dma_unmap_single(ipa3_ctx->pdev,
-			entry->phys_base,
-			entry->hdr_len,
-			DMA_TO_DEVICE);
+	if (entry->proc_ctx)
 		__ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
-	} else {
+	else
 		/* move the offset entry to appropriate free list */
 		list_move(&entry->offset_entry->link,
 			&htbl->head_free_offset_list[entry->offset_entry->bin]);
-	}
 	list_del(&entry->link);
 	htbl->hdr_cnt--;
 	entry->cookie = 0;
@@ -1257,12 +1249,6 @@
 				IPADBG("Trying to remove hdr %s offset=%u\n",
 					entry->name, entry->offset_entry->offset);
 				if (!entry->offset_entry->offset) {
-					if (entry->is_hdr_proc_ctx) {
-						IPAERR("default header is proc ctx\n");
-						mutex_unlock(&ipa3_ctx->lock);
-						WARN_ON_RATELIMIT_IPA(1);
-						return -EFAULT;
-					}
 					IPADBG("skip default header\n");
 					continue;
 				}
@@ -1276,20 +1262,17 @@
 			}
 
 			if (!user_only || entry->ipacm_installed) {
-				if (entry->is_hdr_proc_ctx) {
-					dma_unmap_single(ipa3_ctx->pdev,
-						entry->phys_base,
-						entry->hdr_len,
-						DMA_TO_DEVICE);
+				if (entry->proc_ctx) {
 					entry->proc_ctx->hdr = NULL;
 					entry->proc_ctx = NULL;
-				} else {
-					/* move the offset entry to free list */
-					entry->offset_entry->ipacm_installed = false;
-					list_move(&entry->offset_entry->link,
-					&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_free_offset_list[
-						entry->offset_entry->bin]);
 				}
+				/* move the offset entry to free list */
+				entry->offset_entry->ipacm_installed = false;
+				list_move(&entry->offset_entry->link,
+				&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_free_offset_list[
+					entry->offset_entry->bin]);
+
+				/* delete the hdr entry from headers list */
 				list_del(&entry->link);
 				ipa3_ctx->hdr_tbl[hdr_tbl_loc].hdr_cnt--;
 				entry->ref_cnt = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index a065a33..595c901 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -618,6 +618,7 @@
 	u32 va_end;
 	bool shared;
 	bool is_cache_coherent;
+	bool done;
 };
 
 /**
@@ -780,10 +781,6 @@
  * @name: name of header table entry
  * @type: l2 header type
  * @is_partial: flag indicating if header table entry is partial
- * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
- * true - hdr entry resides in DDR and pointed to by proc ctx
- * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
- * else 0
  * @proc_ctx: processing context header
  * @offset_entry: entry's offset
  * @cookie: cookie used for validity check
@@ -803,8 +800,6 @@
 	char name[IPA_RESOURCE_NAME_MAX];
 	enum ipa_hdr_l2_type type;
 	u8 is_partial;
-	bool is_hdr_proc_ctx;
-	dma_addr_t phys_base;
 	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
 	struct ipa_hdr_offset_entry *offset_entry;
 	u32 ref_cnt;
@@ -2393,6 +2388,8 @@
 	struct mutex act_tbl_lock;
 	int uc_act_tbl_total;
 	int uc_act_tbl_next_index;
+	int ipa_pil_load;
+
 };
 
 struct ipa3_plat_drv_res {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
index 8e5af46..3726355 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -284,7 +285,7 @@
 	ipa_odl_ep_cfg = &ipa3_odl_ctx->odl_sys_param;
 
 	IPADBG("Setting up the odl endpoint\n");
-	ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+	ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
 
 	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
 	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 2219c1b..2d82c29 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -94,7 +94,7 @@
 		}
 	}
 
-	if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+	if (entry->proc_ctx) {
 		struct ipa3_hdr_proc_ctx_entry *proc_ctx;
 
 		proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 37118f0..c16ea49 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1739,8 +1739,6 @@
  * @base: dma base address
  * @offset: offset from base address where the data will be copied
  * @hdr_len: the length of the header
- * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
- * @phys_base: memory location in DDR
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
@@ -1750,8 +1748,7 @@
  */
 static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset,
-		u32 hdr_len, bool is_hdr_proc_ctx,
-		dma_addr_t phys_base, u64 hdr_base_addr,
+		u32 hdr_len, u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
 		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
 		struct ipa_eogre_hdr_proc_ctx_params *eogre_params,
@@ -1768,8 +1765,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
 			hdr_addr);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -1788,8 +1784,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
 			hdr_addr);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -1825,8 +1820,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx length %d\n",
 			hdr_addr, ctx->hdr_add.tlv.value);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -1866,8 +1860,7 @@
 		ctx->hdr_add.tlv.length = 2;
 		if (l2tp_params->hdr_remove_param.eth_hdr_retained) {
 			ctx->hdr_add.tlv.value = hdr_len;
-			hdr_addr = is_hdr_proc_ctx ? phys_base :
-				hdr_base_addr + offset_entry->offset;
+			hdr_addr = hdr_base_addr + offset_entry->offset;
 			IPAHAL_DBG("header address 0x%llx length %d\n",
 				hdr_addr, ctx->hdr_add.tlv.value);
 			IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -1910,8 +1903,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%x\n",
 			ctx->hdr_add.hdr_addr);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -1940,8 +1932,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
 			hdr_addr);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -1976,8 +1967,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx length %d\n",
 				   hdr_addr, ctx->hdr_add.tlv.value);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(
@@ -2001,8 +1991,7 @@
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
 		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = hdr_len;
-		hdr_addr = is_hdr_proc_ctx ? phys_base :
-			hdr_base_addr + offset_entry->offset;
+		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
 			hdr_addr);
 		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
@@ -2106,7 +2095,6 @@
 
 	int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
 			void *const base, u32 offset, u32 hdr_len,
-			bool is_hdr_proc_ctx, dma_addr_t phys_base,
 			u64 hdr_base_addr,
 			struct ipa_hdr_offset_entry *offset_entry,
 			struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
@@ -2174,8 +2162,6 @@
  * @base: dma base address
  * @offset: offset from base address where the data will be copied
  * @hdr_len: the length of the header
- * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
- * @phys_base: memory location in DDR
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
@@ -2185,7 +2171,6 @@
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset, u32 hdr_len,
-		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
 		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
 		struct ipa_eogre_hdr_proc_ctx_params *eogre_params,
@@ -2193,24 +2178,18 @@
 		bool is_64)
 {
 	IPAHAL_DBG(
-		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
-			, type, base, offset, hdr_len, is_hdr_proc_ctx,
-			hdr_base_addr, offset_entry, is_64);
+		"type %d, base %pK, offset %d, hdr_len %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
+			, type, base, offset, hdr_len, hdr_base_addr, offset_entry, is_64);
 
-	if (!base ||
-		(is_hdr_proc_ctx && !phys_base) ||
-		(!is_hdr_proc_ctx && !offset_entry) ||
-		(!is_hdr_proc_ctx && !hdr_base_addr)) {
+	if (!base || !offset_entry || !hdr_base_addr) {
 		IPAHAL_ERR(
-			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%llu is_hdr_proc_ctx:%d offset_entry:%pK\n"
-			, hdr_len, &phys_base, hdr_base_addr
-			, is_hdr_proc_ctx, offset_entry);
+			"invalid input: hdr_len:%u hdr_base_addr:%llu offset_entry:%pK\n",
+			hdr_len, hdr_base_addr, offset_entry);
 		return -EINVAL;
 	}
 
 	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
-			hdr_len, is_hdr_proc_ctx, phys_base,
-			hdr_base_addr, offset_entry, l2tp_params,
+			hdr_len, hdr_base_addr, offset_entry, l2tp_params,
 			eogre_params, generic_params, is_64);
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 92747a6..db5de06 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -753,8 +753,6 @@
  * @base: dma base address
  * @offset: offset from base address where the data will be copied
  * @hdr_len: the length of the header
- * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
- * @phys_base: memory location in DDR
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
@@ -764,7 +762,6 @@
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *base, u32 offset, u32 hdr_len,
-		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
 		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index b4c2b7a..ceb6051 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -30,6 +30,8 @@
 	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
 #define TETH_ERR(fmt, args...) \
 	pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define TETH_ERR_RL(fmt, args...) \
+	pr_err_ratelimited_ipa(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
 
 enum ipa_num_teth_iface {
 	IPA_TETH_IFACE_1 = 0,
@@ -76,7 +78,7 @@
 		return;
 	}
 
-	TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+	TETH_ERR_RL("Unexpected exception packet from USB, dropping packet\n");
 	dev_kfree_skb_any(skb);
 	TETH_DBG_FUNC_EXIT();
 }