aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2011-01-31 04:17:03 +0000
committerSteve French <sfrench@us.ibm.com>2011-01-31 04:17:03 +0000
commit58b8a5b45a097b477c037bc376e65dc5f214bf3d (patch)
tree9fa2538a113fbd3aaa322d9c972aca3ca16b7e30 /mm
parentffeb414a59291d5891f09727beb793c109f19f08 (diff)
parent70d1f365568e0cdbc9f4ab92428e1830fdb09ab0 (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/kmemleak.c13
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c18
-rw-r--r--mm/pgtable-generic.c1
-rw-r--r--mm/vmscan.c3
8 files changed, 51 insertions, 25 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3ad483bdf505..e9c0c61f2ddd 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -179,7 +179,7 @@ config SPLIT_PTLOCK_CPUS
config COMPACTION
bool "Allow for memory compaction"
select MIGRATION
- depends on EXPERIMENTAL && HUGETLB_PAGE && MMU
+ depends on MMU
help
Allows the compaction of memory for the allocation of huge pages.
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index 177a5169bbde..ff0d9779cec8 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -75,13 +75,11 @@ static int __init kmemleak_test_init(void)
* after the module is removed.
*/
for (i = 0; i < 10; i++) {
- elem = kmalloc(sizeof(*elem), GFP_KERNEL);
- pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem);
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ pr_info("kmemleak: kzalloc(sizeof(*elem)) = %p\n", elem);
if (!elem)
return -ENOMEM;
- memset(elem, 0, sizeof(*elem));
INIT_LIST_HEAD(&elem->list);
-
list_add_tail(&elem->list, &test_list);
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index bd9bc214091b..84225f3b7190 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -113,7 +113,9 @@
#define BYTES_PER_POINTER sizeof(void *)
/* GFP bitmask for kmemleak internal allocations */
-#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+ __GFP_NORETRY | __GFP_NOMEMALLOC | \
+ __GFP_NOWARN)
/* scanning area inside a memory block */
struct kmemleak_scan_area {
@@ -511,9 +513,10 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
struct kmemleak_object *object;
struct prio_tree_node *node;
- object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
+ object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
if (!object) {
- kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
+ pr_warning("Cannot allocate a kmemleak_object structure\n");
+ kmemleak_disable();
return NULL;
}
@@ -734,9 +737,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
return;
}
- area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
+ area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
if (!area) {
- kmemleak_warn("Cannot allocate a scan area\n");
+ pr_warning("Cannot allocate a scan area\n");
goto out;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index db76ef726293..3878cfe399dc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1832,6 +1832,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
if (likely(!ret))
return CHARGE_OK;
+ res_counter_uncharge(&mem->res, csize);
mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
} else
@@ -2144,6 +2145,8 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
unsigned long flags;
+ if (mem_cgroup_disabled())
+ return;
/*
* We have no races with charge/uncharge but will have races with
* page state accounting.
@@ -2233,7 +2236,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
{
int ret = -EINVAL;
unsigned long flags;
-
+ /*
+ * The page is isolated from LRU. So, collapse function
+ * will not handle this page. But page splitting can happen.
+ * Do this check under compound_page_lock(). The caller should
+ * hold it.
+ */
if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
return -EBUSY;
@@ -2265,7 +2273,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
struct cgroup *cg = child->css.cgroup;
struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
- int charge = PAGE_SIZE;
+ int page_size = PAGE_SIZE;
unsigned long flags;
int ret;
@@ -2278,23 +2286,26 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
goto out;
if (isolate_lru_page(page))
goto put;
- /* The page is isolated from LRU and we have no race with splitting */
- charge = PAGE_SIZE << compound_order(page);
+
+ if (PageTransHuge(page))
+ page_size = HPAGE_SIZE;
parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, charge);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask,
+ &parent, false, page_size);
if (ret || !parent)
goto put_back;
- if (charge > PAGE_SIZE)
+ if (page_size > PAGE_SIZE)
flags = compound_lock_irqsave(page);
- ret = mem_cgroup_move_account(pc, child, parent, true, charge);
+ ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
if (ret)
- mem_cgroup_cancel_charge(parent, charge);
-put_back:
- if (charge > PAGE_SIZE)
+ mem_cgroup_cancel_charge(parent, page_size);
+
+ if (page_size > PAGE_SIZE)
compound_unlock_irqrestore(page, flags);
+put_back:
putback_lru_page(page);
put:
put_page(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 46fe8cc13d67..9f29a3b7aac2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -888,7 +888,7 @@ out:
* are movable anymore because to has become empty
* or no retryable pages exist anymore.
* Caller should call putback_lru_pages to return pages to the LRU
- * or free list.
+ * or free list only if ret != 0.
*
* Return: Number of pages not migrated or error code.
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 90c1439549fd..a873e61e312e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1088,8 +1088,10 @@ static void drain_pages(unsigned int cpu)
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- free_pcppages_bulk(zone, pcp->count, pcp);
- pcp->count = 0;
+ if (pcp->count) {
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ pcp->count = 0;
+ }
local_irq_restore(flags);
}
}
@@ -2034,6 +2036,14 @@ restart:
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
+ /*
+ * Find the true preferred zone if the allocation is unconstrained by
+ * cpusets.
+ */
+ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+ first_zones_zonelist(zonelist, high_zoneidx, NULL,
+ &preferred_zone);
+
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2192,7 +2202,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
get_mems_allowed();
/* The preferred zone is used for statistics later */
- first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+ first_zones_zonelist(zonelist, high_zoneidx,
+ nodemask ? : &cpuset_current_mems_allowed,
+ &preferred_zone);
if (!preferred_zone) {
put_mems_allowed();
return NULL;
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 0369f5b3ba1b..eb663fb533e0 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 Linus Torvalds
*/
+#include <linux/pagemap.h>
#include <asm/tlb.h>
#include <asm-generic/pgtable.h>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f5d90dedebba..148c6e630df2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2083,7 +2083,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct zone *preferred_zone;
first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
- NULL, &preferred_zone);
+ &cpuset_current_mems_allowed,
+ &preferred_zone);
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
}
}