[PATCH] zoned vm counters: convert nr_mapped to per zone counter

nr_mapped is important because it allows a determination of how many pages of
a zone are not mapped, which would allow a more efficient means of determining
when we need to reclaim memory in a zone.

We take the nr_mapped field out of the page state structure and define a new
per zone counter named NR_FILE_MAPPED (the anonymous pages will be split off
from NR_MAPPED in the next patch).

We replace the use of nr_mapped in various kernel locations.  This avoids the
looping over all processors in try_to_free_pages(), writeback, reclaim (swap +
zone reclaim).

[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-06-30 01:55:34 -07:00 committed by Linus Torvalds
parent 2244b95a7b
commit 65ba55f500
10 changed files with 16 additions and 17 deletions

View file

@ -61,7 +61,7 @@ void show_mem(void)
get_page_state(&ps);
printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped);
printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
printk(KERN_INFO "%lu pages slab\n", ps.nr_slab);
printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
}

View file

@ -54,8 +54,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
ps.nr_dirty = 0;
if ((long)ps.nr_writeback < 0)
ps.nr_writeback = 0;
if ((long)ps.nr_mapped < 0)
ps.nr_mapped = 0;
if ((long)ps.nr_slab < 0)
ps.nr_slab = 0;
@ -84,7 +82,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
nid, K(i.freeram - i.freehigh),
nid, K(ps.nr_dirty),
nid, K(ps.nr_writeback),
nid, K(ps.nr_mapped),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(ps.nr_slab));
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;

View file

@ -190,7 +190,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(i.freeswap),
K(ps.nr_dirty),
K(ps.nr_writeback),
K(ps.nr_mapped),
K(global_page_state(NR_FILE_MAPPED)),
K(ps.nr_slab),
K(allowed),
K(committed),

View file

@ -47,6 +47,9 @@ struct zone_padding {
#endif
enum zone_stat_item {
NR_FILE_MAPPED, /* mapped into pagetables.
only modified from process context */
NR_VM_ZONE_STAT_ITEMS };
struct per_cpu_pages {

View file

@ -26,8 +26,6 @@ struct page_state {
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_mapped; /* mapped into pagetables.
* only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab

View file

@ -111,7 +111,7 @@ static void get_writeback_state(struct writeback_state *wbs)
{
wbs->nr_dirty = read_page_state(nr_dirty);
wbs->nr_unstable = read_page_state(nr_unstable);
wbs->nr_mapped = read_page_state(nr_mapped);
wbs->nr_mapped = global_page_state(NR_FILE_MAPPED);
wbs->nr_writeback = read_page_state(nr_writeback);
}

View file

@ -1319,7 +1319,7 @@ void show_free_areas(void)
ps.nr_unstable,
nr_free_pages(),
ps.nr_slab,
ps.nr_mapped,
global_page_state(NR_FILE_MAPPED),
ps.nr_page_table_pages);
for_each_zone(zone) {

View file

@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
* nr_mapped state can be updated without turning off
* interrupts because it is not modified via interrupt.
*/
__inc_page_state(nr_mapped);
__inc_zone_page_state(page, NR_FILE_MAPPED);
}
/**
@ -499,7 +499,7 @@ void page_add_new_anon_rmap(struct page *page,
void page_add_file_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount))
__inc_page_state(nr_mapped);
__inc_zone_page_state(page, NR_FILE_MAPPED);
}
/**
@ -531,7 +531,7 @@ void page_remove_rmap(struct page *page)
*/
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
__dec_page_state(nr_mapped);
__dec_zone_page_state(page, NR_FILE_MAPPED);
}
}

View file

@ -990,7 +990,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
}
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
sc.nr_scanned = 0;
if (!priority)
disable_swap_token();
@ -1075,7 +1075,7 @@ loop_again:
total_scanned = 0;
nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
inc_page_state(pageoutrun);
@ -1407,7 +1407,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
unsigned long nr_to_scan = nr_pages - ret;
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
sc.nr_scanned = 0;
ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
@ -1548,7 +1548,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.nr_mapped = read_page_state(nr_mapped),
.nr_mapped = global_page_state(NR_FILE_MAPPED),
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,

View file

@ -401,13 +401,13 @@ struct seq_operations fragmentation_op = {
static char *vmstat_text[] = {
/* Zoned VM counters */
"nr_mapped",
/* Page state */
"nr_dirty",
"nr_writeback",
"nr_unstable",
"nr_page_table_pages",
"nr_mapped",
"nr_slab",
"pgpgin",