msm: ipa: allocate page recycling buffers only once

Order-3 allocations are costly when system is in full use.
Make changes to allocate page recycling buffers only once on boot up.

Change-Id: I445e27ecc03aaa00483e7a46a468ef0a6f4bfcfb
Signed-off-by: Chaitanya Pratapa <quic_cpratapa@quicinc.com>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 008ac24..97920ad 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1566,28 +1566,40 @@
 			sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
 			coal_ep_id != IPA_EP_NOT_ALLOCATED &&
 			ipa3_ctx->ep[coal_ep_id].valid == 1)) {
-			ep->sys->page_recycle_repl = kzalloc(
-				sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
+			/* Allocate page recycling pool only once. */
 			if (!ep->sys->page_recycle_repl) {
-				IPAERR("failed to alloc repl for client %d\n",
-						sys_in->client);
-				result = -ENOMEM;
-				goto fail_napi;
+				ep->sys->page_recycle_repl = kzalloc(
+					sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
+				if (!ep->sys->page_recycle_repl) {
+					IPAERR("failed to alloc repl for client %d\n",
+							sys_in->client);
+					result = -ENOMEM;
+					goto fail_napi;
+				}
+				atomic_set(&ep->sys->page_recycle_repl->pending, 0);
+				/* For common page pool double the pool size. */
+				if (ipa3_ctx->wan_common_page_pool &&
+					sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+					ep->sys->page_recycle_repl->capacity =
+							(ep->sys->rx_pool_sz + 1) *
+							IPA_GENERIC_RX_CMN_PAGE_POOL_SZ_FACTOR;
+				else
+					ep->sys->page_recycle_repl->capacity =
+							(ep->sys->rx_pool_sz + 1) *
+							IPA_GENERIC_RX_PAGE_POOL_SZ_FACTOR;
+				IPADBG("Page repl capacity for client:%d, value:%d\n",
+						   sys_in->client, ep->sys->page_recycle_repl->capacity);
+				INIT_LIST_HEAD(&ep->sys->page_recycle_repl->page_repl_head);
+				INIT_DELAYED_WORK(&ep->sys->freepage_work, ipa3_schd_freepage_work);
+				tasklet_init(&ep->sys->tasklet_find_freepage,
+					ipa3_tasklet_find_freepage, (unsigned long) ep->sys);
+				ipa3_replenish_rx_page_cache(ep->sys);
+			} else {
+ 				ep->sys->napi_sort_page_thrshld_cnt = 0;
+				/* Sort the pages once. */
+				ipa3_tasklet_find_freepage((unsigned long) ep->sys);
 			}
-			atomic_set(&ep->sys->page_recycle_repl->pending, 0);
-			/* For common page pool double the pool size. */
-			if (ipa3_ctx->wan_common_page_pool &&
-				sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
-				ep->sys->page_recycle_repl->capacity =
-						(ep->sys->rx_pool_sz + 1) *
-						IPA_GENERIC_RX_CMN_PAGE_POOL_SZ_FACTOR;
-			else
-				ep->sys->page_recycle_repl->capacity =
-						(ep->sys->rx_pool_sz + 1) *
-						IPA_GENERIC_RX_PAGE_POOL_SZ_FACTOR;
-			IPADBG("Page repl capacity for client:%d, value:%d\n",
-					   sys_in->client, ep->sys->page_recycle_repl->capacity);
-			INIT_LIST_HEAD(&ep->sys->page_recycle_repl->page_repl_head);
+
 			ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
 			if (!ep->sys->repl) {
 				IPAERR("failed to alloc repl for client %d\n",
@@ -1610,11 +1622,6 @@
 			atomic_set(&ep->sys->repl->head_idx, 0);
 			atomic_set(&ep->sys->repl->tail_idx, 0);
 
-			tasklet_init(&ep->sys->tasklet_find_freepage,
-					ipa3_tasklet_find_freepage, (unsigned long) ep->sys);
-			INIT_DELAYED_WORK(&ep->sys->freepage_work, ipa3_schd_freepage_work);
-			ep->sys->napi_sort_page_thrshld_cnt = 0;
-			ipa3_replenish_rx_page_cache(ep->sys);
 			ipa3_wq_page_repl(&ep->sys->repl_work);
 		} else {
 			/* Use pool same as coal pipe when common page pool is used. */
@@ -3415,11 +3422,17 @@
 	if (!rx_pkt->page_data.is_tmp_alloc) {
 		list_del_init(&rx_pkt->link);
 		page_ref_dec(rx_pkt->page_data.page);
+		spin_lock_bh(&rx_pkt->sys->common_sys->spinlock);
+		/* Add the element to head. */
+		list_add(&rx_pkt->link,
+			&rx_pkt->sys->page_recycle_repl->page_repl_head);
+		spin_unlock_bh(&rx_pkt->sys->common_sys->spinlock);
+	} else {
+		dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
+			rx_pkt->len, DMA_FROM_DEVICE);
+		__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
-	dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
-		rx_pkt->len, DMA_FROM_DEVICE);
-	__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
-	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 }
 
 /**
@@ -3479,23 +3492,6 @@
 		kfree(sys->repl);
 		sys->repl = NULL;
 	}
-	if (sys->page_recycle_repl) {
-		list_for_each_entry_safe(rx_pkt, r,
-		&sys->page_recycle_repl->page_repl_head, link) {
-			list_del(&rx_pkt->link);
-			dma_unmap_page(ipa3_ctx->pdev,
-				rx_pkt->page_data.dma_addr,
-				rx_pkt->len,
-				DMA_FROM_DEVICE);
-			__free_pages(rx_pkt->page_data.page,
-				rx_pkt->page_data.page_order);
-			kmem_cache_free(
-				ipa3_ctx->rx_pkt_wrapper_cache,
-				rx_pkt);
-		}
-		kfree(sys->page_recycle_repl);
-		sys->page_recycle_repl = NULL;
-	}
 }
 
 static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 208c669..13f02ce 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1221,7 +1221,6 @@
 	struct work_struct repl_work;
 	void (*repl_hdlr)(struct ipa3_sys_context *sys);
 	struct ipa3_repl_ctx *repl;
-	struct ipa3_page_repl_ctx *page_recycle_repl;
 	u32 pkt_sent;
 	struct napi_struct *napi_obj;
 	struct list_head pending_pkts[GSI_VEID_MAX];
@@ -1241,7 +1240,6 @@
 	bool ext_ioctl_v2;
 	bool common_buff_pool;
 	struct ipa3_sys_context *common_sys;
-	struct tasklet_struct tasklet_find_freepage;
 	atomic_t page_avilable;
 	u32 napi_sort_page_thrshld_cnt;
 
@@ -1257,8 +1255,10 @@
 	struct workqueue_struct *repl_wq;
 	struct ipa3_status_stats *status_stat;
 	u32 pm_hdl;
+	struct ipa3_page_repl_ctx *page_recycle_repl;
 	struct workqueue_struct *freepage_wq;
 	struct delayed_work freepage_work;
+	struct tasklet_struct tasklet_find_freepage;
 	/* ordering is important - other immutable fields go below */
 };