Commit 347ce434 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] zoned vm counters: conversion of nr_pagecache to per zone counter

Currently a single atomic variable is used to establish the size of the page
cache in the whole machine.  The zoned VM counters have the same method of
implementation as the nr_pagecache code but also allow the determination of
the pagecache size per zone.

Remove the special implementation for nr_pagecache and make it a zoned counter
named NR_FILE_PAGES.

Updates of the page cache counters are always performed with interrupts off.
We can therefore use the __ variant here.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 65ba55f5
......@@ -130,7 +130,8 @@ static void appldata_get_mem_data(void *data)
mem_data->totalhigh = P2K(val.totalhigh);
mem_data->freehigh = P2K(val.freehigh);
mem_data->bufferram = P2K(val.bufferram);
mem_data->cached = P2K(atomic_read(&nr_pagecache) - val.bufferram);
mem_data->cached = P2K(global_page_state(NR_FILE_PAGES)
- val.bufferram);
si_swapinfo(&val);
mem_data->totalswap = P2K(val.totalswap);
......
......@@ -196,7 +196,7 @@ asmlinkage int sunos_brk(unsigned long brk)
* simple, it hopefully works in most obvious cases.. Easy to
* fool it, but this should catch most mistakes.
*/
freepages = get_page_cache_size();
freepages = global_page_state(NR_FILE_PAGES);
freepages >>= 1;
freepages += nr_free_pages();
freepages += nr_swap_pages;
......
......@@ -155,7 +155,7 @@ asmlinkage int sunos_brk(u32 baddr)
* simple, it hopefully works in most obvious cases.. Easy to
* fool it, but this should catch most mistakes.
*/
freepages = get_page_cache_size();
freepages = global_page_state(NR_FILE_PAGES);
freepages >>= 1;
freepages += nr_free_pages();
freepages += nr_swap_pages;
......
......@@ -69,6 +69,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
"Node %d LowFree: %8lu kB\n"
"Node %d Dirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n"
"Node %d Slab: %8lu kB\n",
nid, K(i.totalram),
......@@ -82,6 +83,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
nid, K(i.freeram - i.freehigh),
nid, K(ps.nr_dirty),
nid, K(ps.nr_writeback),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(ps.nr_slab));
n += hugetlb_report_node_meminfo(nid, buf + n);
......
......@@ -142,7 +142,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
allowed = ((totalram_pages - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100) + total_swap_pages;
cached = get_page_cache_size() - total_swapcache_pages - i.bufferram;
cached = global_page_state(NR_FILE_PAGES) -
total_swapcache_pages - i.bufferram;
if (cached < 0)
cached = 0;
......
......@@ -49,7 +49,7 @@ struct zone_padding {
enum zone_stat_item {
NR_FILE_MAPPED, /* mapped into pagetables.
only modified from process context */
NR_FILE_PAGES,
NR_VM_ZONE_STAT_ITEMS };
struct per_cpu_pages {
......
......@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
extern atomic_t nr_pagecache;
#ifdef CONFIG_SMP
#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
DECLARE_PER_CPU(long, nr_pagecache_local);
/*
* pagecache_acct implements approximate accounting for pagecache.
* vm_enough_memory() do not need high accuracy. Writers will keep
* an offset in their per-cpu arena and will spill that into the
* global count whenever the absolute value of the local count
* exceeds the counter's threshold.
*
* MUST be protected from preemption.
* current protection is mapping->page_lock.
*/
static inline void pagecache_acct(int count)
{
long *local;
local = &__get_cpu_var(nr_pagecache_local);
*local += count;
if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
atomic_add(*local, &nr_pagecache);
*local = 0;
}
}
#else
static inline void pagecache_acct(int count)
{
atomic_add(count, &nr_pagecache);
}
#endif
static inline unsigned long get_page_cache_size(void)
{
int ret = atomic_read(&nr_pagecache);
if (unlikely(ret < 0))
ret = 0;
return ret;
}
/*
* Return byte-offset into filesystem object for page.
*/
......
......@@ -120,7 +120,7 @@ void __remove_from_page_cache(struct page *page)
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
mapping->nrpages--;
pagecache_acct(-1);
__dec_zone_page_state(page, NR_FILE_PAGES);
}
void remove_from_page_cache(struct page *page)
......@@ -449,7 +449,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
page->mapping = mapping;
page->index = offset;
mapping->nrpages++;
pagecache_acct(1);
__inc_zone_page_state(page, NR_FILE_PAGES);
}
write_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end();
......
......@@ -96,7 +96,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
unsigned long n;
free = get_page_cache_size();
free = global_page_state(NR_FILE_PAGES);
free += nr_swap_pages;
/*
......
......@@ -1122,7 +1122,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
unsigned long n;
free = get_page_cache_size();
free = global_page_state(NR_FILE_PAGES);
free += nr_swap_pages;
/*
......
......@@ -2124,16 +2124,11 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (unsigned long)hcpu;
long *count;
unsigned long *src, *dest;
if (action == CPU_DEAD) {
int i;
/* Drain local pagecache count. */
count = &per_cpu(nr_pagecache_local, cpu);
atomic_add(*count, &nr_pagecache);
*count = 0;
local_irq_disable();
__drain_pages(cpu);
......
......@@ -87,7 +87,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
SetPageSwapCache(page);
set_page_private(page, entry.val);
total_swapcache_pages++;
pagecache_acct(1);
__inc_zone_page_state(page, NR_FILE_PAGES);
}
write_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
......@@ -132,7 +132,7 @@ void __delete_from_swap_cache(struct page *page)
set_page_private(page, 0);
ClearPageSwapCache(page);
total_swapcache_pages--;
pagecache_acct(-1);
__dec_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(del_total);
}
......
......@@ -20,12 +20,6 @@
*/
DEFINE_PER_CPU(struct page_state, page_states) = {0};
atomic_t nr_pagecache = ATOMIC_INIT(0);
EXPORT_SYMBOL(nr_pagecache);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif
static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
unsigned cpu;
......@@ -402,6 +396,7 @@ struct seq_operations fragmentation_op = {
static char *vmstat_text[] = {
/* Zoned VM counters */
"nr_mapped",
"nr_file_pages",
/* Page state */
"nr_dirty",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment