Merge "dataipa: changes to support page recycling stats"
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 59d24b6..0d96a11 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -9521,6 +9521,11 @@
result = -ENOMEM;
goto fail_gsi_map;
}
+ mutex_init(&ipa3_ctx->recycle_stats_collection_lock);
+ memset(&ipa3_ctx->recycle_stats, 0, sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+ memset(&ipa3_ctx->prev_coal_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
+ memset(&ipa3_ctx->prev_default_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
+ memset(&ipa3_ctx->prev_low_lat_data_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
ipa3_ctx->transport_power_mgmt_wq =
create_singlethread_workqueue("transport_power_mgmt");
@@ -9530,6 +9535,17 @@
goto fail_create_transport_wq;
}
+ /* Create workqueue for recycle stats collection */
+ ipa3_ctx->collect_recycle_stats_wq =
+ create_singlethread_workqueue("page_recycle_stats_collection");
+ if (!ipa3_ctx->collect_recycle_stats_wq) {
+ IPAERR("failed to create page recycling stats collection wq\n");
+ result = -ENOMEM;
+ goto fail_create_recycle_stats_wq;
+ }
+ memset(&ipa3_ctx->recycle_stats, 0,
+ sizeof(ipa3_ctx->recycle_stats));
+
mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
/* init the lookaside cache */
@@ -9864,6 +9880,8 @@
fail_rt_rule_cache:
kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
fail_flt_rule_cache:
+ destroy_workqueue(ipa3_ctx->collect_recycle_stats_wq);
+fail_create_recycle_stats_wq:
destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
fail_create_transport_wq:
destroy_workqueue(ipa3_ctx->power_mgmt_wq);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index baf3c7b..df30080 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -26,6 +26,7 @@
#include "ipa_trace.h"
#include "ipahal.h"
#include "ipahal_fltrt.h"
+#include "ipa_stats.h"
#define IPA_GSI_EVENT_RP_SIZE 8
#define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
@@ -155,6 +156,156 @@
struct gsi_chan_xfer_notify g_lan_rx_notify[IPA_LAN_NAPI_MAX_FRAMES];
+static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_collect_default_coal_recycle_stats_wq_work,
+ ipa3_collect_default_coal_recycle_stats_wq);
+
+static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_collect_low_lat_data_recycle_stats_wq_work,
+ ipa3_collect_low_lat_data_recycle_stats_wq);
+
+static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+ int stat_interval_index;
+ int ep_idx = -1;
+
+ /* For targets which don't require coalescing pipe */
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+ if (ep_idx == -1)
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+ if (ep_idx == -1)
+ sys = NULL;
+ else
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+ stat_interval_index = ipa3_ctx->recycle_stats.default_coal_stats_index;
+ ipa3_ctx->recycle_stats.interval_time_in_ms = IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME;
+
+ /* Coalescing pipe page recycling stats */
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[0].total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[0].page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative
+ - ipa3_ctx->prev_coal_recycle_stats.total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative
+ - ipa3_ctx->prev_coal_recycle_stats.page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative
+ - ipa3_ctx->prev_coal_recycle_stats.tmp_alloc;
+
+ ipa3_ctx->prev_coal_recycle_stats.total_replenished
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative;
+ ipa3_ctx->prev_coal_recycle_stats.page_recycled
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative;
+ ipa3_ctx->prev_coal_recycle_stats.tmp_alloc
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative;
+
+ /* Default pipe page recycling stats */
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[1].total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[1].page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative
+ - ipa3_ctx->prev_default_recycle_stats.total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative
+ - ipa3_ctx->prev_default_recycle_stats.page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative
+ - ipa3_ctx->prev_default_recycle_stats.tmp_alloc;
+
+ ipa3_ctx->prev_default_recycle_stats.total_replenished
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative;
+ ipa3_ctx->prev_default_recycle_stats.page_recycled
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative;
+ ipa3_ctx->prev_default_recycle_stats.tmp_alloc
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].valid = 1;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].valid = 1;
+
+ /* Single Indexing for coalescing and default pipe */
+ ipa3_ctx->recycle_stats.default_coal_stats_index =
+ (ipa3_ctx->recycle_stats.default_coal_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
+ if (sys && atomic_read(&sys->curr_polling_state))
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10));
+
+ mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+ return;
+
+}
+
+static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work)
+{
+ struct ipa3_sys_context *sys;
+ int stat_interval_index;
+ int ep_idx;
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+ if (ep_idx == -1)
+ sys = NULL;
+ else
+ sys = ipa3_ctx->ep[ep_idx].sys;
+
+ mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+ stat_interval_index = ipa3_ctx->recycle_stats.low_lat_stats_index;
+
+ /* Low latency data pipe page recycling stats */
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[2].total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[2].page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative
+ = ipa3_ctx->stats.page_recycle_stats[2].tmp_alloc;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative
+ - ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative
+ - ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled;
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_diff
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative
+ - ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc;
+
+ ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative;
+ ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative;
+ ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc
+ = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative;
+
+ ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].valid = 1;
+
+ /* Indexing for low lat data stats pipe */
+ ipa3_ctx->recycle_stats.low_lat_stats_index =
+ (ipa3_ctx->recycle_stats.low_lat_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
+ if (sys && atomic_read(&sys->curr_polling_state))
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10));
+
+ mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+ return;
+}
+
/**
* ipa3_write_done_common() - this function is responsible on freeing
* all tx_pkt_wrappers related to a skb
@@ -7003,6 +7154,9 @@
/* call repl_hdlr before napi_reschedule / napi_complete */
ep->sys->repl_hdlr(ep->sys);
wan_def_sys->repl_hdlr(wan_def_sys);
+ /* Scheduling WAN and COAL collect stats work wueue */
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10));
/* When not able to replenish enough descriptors, keep in polling
* mode, wait for napi-poll and replenish again.
*/
@@ -7191,7 +7345,6 @@
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI_LL");
-
remain_aggr_weight = budget / ipa3_ctx->ipa_wan_aggr_pkt_cnt;
if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
IPAERR("NAPI weight is higher than expected\n");
@@ -7231,6 +7384,9 @@
cnt += budget - remain_aggr_weight * ipa3_ctx->ipa_wan_aggr_pkt_cnt;
/* call repl_hdlr before napi_reschedule / napi_complete */
sys->repl_hdlr(sys);
+ /* Scheduling RMNET LOW LAT DATA collect stats work queue */
+ queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+ &ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10));
/* When not able to replenish enough descriptors, keep in polling
* mode, wait for napi-poll and replenish again.
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6afc5a0..1922d0f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2585,6 +2585,13 @@
phys_addr_t per_stats_smem_pa;
void *per_stats_smem_va;
u32 ipa_smem_size;
+ bool is_dual_pine_config;
+ struct workqueue_struct *collect_recycle_stats_wq;
+ struct ipa_lnx_pipe_page_recycling_stats recycle_stats;
+ struct ipa3_page_recycle_stats prev_coal_recycle_stats;
+ struct ipa3_page_recycle_stats prev_default_recycle_stats;
+ struct ipa3_page_recycle_stats prev_low_lat_data_recycle_stats;
+ struct mutex recycle_stats_collection_lock;
};
struct ipa3_plat_drv_res {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
index a771ecd..43f48a9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
@@ -1472,6 +1472,42 @@
}
#endif
+static int ipa_get_page_recycle_stats(unsigned long arg)
+{
+ struct ipa_lnx_pipe_page_recycling_stats *page_recycle_stats;
+ int alloc_size;
+
+ alloc_size = sizeof(struct ipa_lnx_pipe_page_recycling_stats);
+
+ page_recycle_stats = (struct ipa_lnx_pipe_page_recycling_stats *) memdup_user((
+ const void __user *)arg, alloc_size);
+ if (IS_ERR(page_recycle_stats)) {
+ IPA_STATS_ERR("copy from user failed");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+ memcpy(page_recycle_stats, &ipa3_ctx->recycle_stats,
+ sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+
+ /* Clear all the data and valid bits */
+ memset(&ipa3_ctx->recycle_stats, 0,
+ sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+
+ mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+ if(copy_to_user((void __user *)arg,
+ (u8 *)page_recycle_stats,
+ alloc_size)) {
+ IPA_STATS_ERR("copy to user failed");
+ kfree(page_recycle_stats);
+ return -EFAULT;
+ }
+
+ kfree(page_recycle_stats);
+ return 0;
+}
+
static int ipa_stats_get_alloc_info(unsigned long arg)
{
int i = 0;
@@ -1665,41 +1701,44 @@
#if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
- goto success;
+ } else {
+ if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
+ ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
+ else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
+ ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET;
+ else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE;
+ ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] =
+ IPA_CLIENT_MHI_PRIME_TETH_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] =
+ IPA_CLIENT_MHI_PRIME_TETH_PROD;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] =
+ IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] =
+ IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0]
+ = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1]
+ = IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0]
+ = IPA_CLIENT_MHI_PRIME_TETH_PROD;
+ ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1]
+ = IPA_CLIENT_MHI_PRIME_RMNET_PROD;
}
- if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
- ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
- else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
- ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET;
- else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE;
- ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] =
- IPA_CLIENT_MHI_PRIME_TETH_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] =
- IPA_CLIENT_MHI_PRIME_TETH_PROD;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] =
- IPA_CLIENT_MHI_PRIME_RMNET_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] =
- IPA_CLIENT_MHI_PRIME_RMNET_PROD;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0]
- = IPA_CLIENT_MHI_PRIME_TETH_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1]
- = IPA_CLIENT_MHI_PRIME_RMNET_CONS;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0]
- = IPA_CLIENT_MHI_PRIME_TETH_PROD;
- ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1]
- = IPA_CLIENT_MHI_PRIME_RMNET_PROD;
-
-success:
#else
/* MHI Prime is not enabled */
ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
#endif
}
+ /* For Page recycling stats for default, coal and Low lat pipes */
+ if (ipa_lnx_agent_ctx.log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS)
+ ipa_lnx_agent_ctx.alloc_info.num_page_rec_interval =
+ IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
if(copy_to_user((u8 *)arg,
&ipa_lnx_agent_ctx,
sizeof(struct ipa_lnx_stats_spearhead_ctx))) {
@@ -1818,6 +1857,13 @@
}
#endif
}
+ if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS) {
+ retval = ipa_get_page_recycle_stats((unsigned long) consolidated_stats->recycle_stats);
+ if (retval) {
+ IPA_STATS_ERR("ipa get page recycle stats fail\n");
+ break;
+ }
+ }
break;
default:
retval = -ENOTTY;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
index 45ee926..8e0ddfd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
@@ -56,6 +56,9 @@
#define SPEARHEAD_NUM_MAX_INSTANCES 2
+#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT 5
+#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME 10 /* In milli second */
+
/**
* This is used to indicate which set of logs is enabled from IPA
* These bitmapped macros are copied from
@@ -67,6 +70,7 @@
#define SPRHD_IPA_LOG_TYPE_ETH_STATS 0x00008
#define SPRHD_IPA_LOG_TYPE_USB_STATS 0x00010
#define SPRHD_IPA_LOG_TYPE_MHIP_STATS 0x00020
+#define SPRHD_IPA_LOG_TYPE_RECYCLE_STATS 0x00040
/**
@@ -340,7 +344,6 @@
};
#define IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT (8 + 248)
-
struct ipa_lnx_consolidated_stats {
uint64_t log_type_mask;
struct ipa_lnx_generic_stats *generic_stats;
@@ -349,9 +352,43 @@
struct ipa_lnx_eth_inst_stats *eth_stats;
struct ipa_lnx_usb_inst_stats *usb_stats;
struct ipa_lnx_mhip_inst_stats *mhip_stats;
+ struct ipa_lnx_pipe_page_recycling_stats *recycle_stats;
};
#define IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN_INT (8 + 48)
+enum rx_channel_type {
+ RX_WAN_COALESCING,
+ RX_WAN_DEFAULT,
+ RX_WAN_LOW_LAT_DATA,
+ RX_CHANNEL_MAX,
+};
+
+struct ipa_lnx_recycling_stats {
+ uint64_t total_cumulative;
+ uint64_t recycle_cumulative;
+ uint64_t temp_cumulative;
+ uint64_t total_diff;
+ uint64_t recycle_diff;
+ uint64_t temp_diff;
+ uint64_t valid;
+};
+
+/**
+ * The consolidated stats will be in the 0th index.
+ * Diff. between each interval values will be in
+ * indices 1 to (IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT - 1)
+ * @new_set: Indicates if this is the new set of data or previous data.
+ * @interval_time_ms: Interval time in millisecond
+ */
+struct ipa_lnx_pipe_page_recycling_stats {
+ uint32_t interval_time_in_ms;
+ uint32_t default_coal_stats_index;
+ uint32_t low_lat_stats_index;
+ uint32_t sequence_id;
+ uint64_t reserved;
+ struct ipa_lnx_recycling_stats rx_channel[RX_CHANNEL_MAX][IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT];
+};
+
/* Explain below structures */
struct ipa_lnx_each_inst_alloc_info {
uint32_t pipes_client_type[SPEARHEAD_NUM_MAX_PIPES];
@@ -372,7 +409,7 @@
uint32_t num_eth_instances;
uint32_t num_usb_instances;
uint32_t num_mhip_instances;
- uint32_t reserved;
+ uint32_t num_page_rec_interval;
struct ipa_lnx_each_inst_alloc_info wlan_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
struct ipa_lnx_each_inst_alloc_info eth_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
struct ipa_lnx_each_inst_alloc_info usb_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];