[PATCH] zoned vm counters: convert nr_mapped to per zone counter
nr_mapped is important because it allows a determination of how many pages of
a zone are not mapped, which would allow a more efficient means of determining
when we need to reclaim memory in a zone.
We take the nr_mapped field out of the page state structure and define a new
per zone counter named NR_FILE_MAPPED (the anonymous pages will be split off
from NR_MAPPED in the next patch).
We replace the use of nr_mapped in various kernel locations. This avoids the
looping over all processors in try_to_free_pages(), writeback, reclaim (swap +
zone reclaim).
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4ec7026..60c7244 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -111,7 +111,7 @@
{
wbs->nr_dirty = read_page_state(nr_dirty);
wbs->nr_unstable = read_page_state(nr_unstable);
- wbs->nr_mapped = read_page_state(nr_mapped);
+ wbs->nr_mapped = global_page_state(NR_FILE_MAPPED);
wbs->nr_writeback = read_page_state(nr_writeback);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3a877fe..04dd2b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1319,7 +1319,7 @@
ps.nr_unstable,
nr_free_pages(),
ps.nr_slab,
- ps.nr_mapped,
+ global_page_state(NR_FILE_MAPPED),
ps.nr_page_table_pages);
for_each_zone(zone) {
diff --git a/mm/rmap.c b/mm/rmap.c
index e76909e..af5e980 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -455,7 +455,7 @@
* nr_mapped state can be updated without turning off
* interrupts because it is not modified via interrupt.
*/
- __inc_page_state(nr_mapped);
+ __inc_zone_page_state(page, NR_FILE_MAPPED);
}
/**
@@ -499,7 +499,7 @@
void page_add_file_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount))
- __inc_page_state(nr_mapped);
+ __inc_zone_page_state(page, NR_FILE_MAPPED);
}
/**
@@ -531,7 +531,7 @@
*/
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- __dec_page_state(nr_mapped);
+ __dec_zone_page_state(page, NR_FILE_MAPPED);
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eeacb0d..d2caf74 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -990,7 +990,7 @@
}
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
- sc.nr_mapped = read_page_state(nr_mapped);
+ sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
sc.nr_scanned = 0;
if (!priority)
disable_swap_token();
@@ -1075,7 +1075,7 @@
total_scanned = 0;
nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
- sc.nr_mapped = read_page_state(nr_mapped);
+ sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
inc_page_state(pageoutrun);
@@ -1407,7 +1407,7 @@
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
unsigned long nr_to_scan = nr_pages - ret;
- sc.nr_mapped = read_page_state(nr_mapped);
+ sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
sc.nr_scanned = 0;
ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
@@ -1548,7 +1548,7 @@
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
- .nr_mapped = read_page_state(nr_mapped),
+ .nr_mapped = global_page_state(NR_FILE_MAPPED),
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 210f9bb..4800091 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -401,13 +401,13 @@
static char *vmstat_text[] = {
/* Zoned VM counters */
+ "nr_mapped",
/* Page state */
"nr_dirty",
"nr_writeback",
"nr_unstable",
"nr_page_table_pages",
- "nr_mapped",
"nr_slab",
"pgpgin",