aboutsummaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c112
1 files changed, 83 insertions, 29 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 5b0894b45ee5..fc64874dc6f4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -25,7 +25,7 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/spinlock.h>
-#include <linux/jhash.h>
+#include <linux/xxhash.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/wait.h>
@@ -296,6 +296,7 @@ static unsigned long ksm_run = KSM_RUN_STOP;
static void wait_while_offlining(void);
static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
+static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
static DEFINE_MUTEX(ksm_thread_mutex);
static DEFINE_SPINLOCK(ksm_mmlist_lock);
@@ -597,7 +598,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
chain->chain_prune_time = jiffies;
chain->rmap_hlist_len = STABLE_NODE_CHAIN;
#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
- chain->nid = -1; /* debug */
+ chain->nid = NUMA_NO_NODE; /* debug */
#endif
ksm_stable_node_chains++;
@@ -666,6 +667,12 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
free_stable_node(stable_node);
}
+enum get_ksm_page_flags {
+ GET_KSM_PAGE_NOLOCK,
+ GET_KSM_PAGE_LOCK,
+ GET_KSM_PAGE_TRYLOCK
+};
+
/*
* get_ksm_page: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
@@ -685,7 +692,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
* a page to put something that might look like our key in page->mapping.
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
+static struct page *get_ksm_page(struct stable_node *stable_node,
+ enum get_ksm_page_flags flags)
{
struct page *page;
void *expected_mapping;
@@ -705,8 +713,9 @@ again:
* case this node is no longer referenced, and should be freed;
* however, it might mean that the page is under page_ref_freeze().
* The __remove_mapping() case is easy, again the node is now stale;
- * but if page is swapcache in migrate_page_move_mapping(), it might
- * still be our page, in which case it's essential to keep the node.
+ * the same is in reuse_ksm_page() case; but if page is swapcache
+ * in migrate_page_move_mapping(), it might still be our page,
+ * in which case it's essential to keep the node.
*/
while (!get_page_unless_zero(page)) {
/*
@@ -727,8 +736,15 @@ again:
goto stale;
}
- if (lock_it) {
+ if (flags == GET_KSM_PAGE_TRYLOCK) {
+ if (!trylock_page(page)) {
+ put_page(page);
+ return ERR_PTR(-EBUSY);
+ }
+ } else if (flags == GET_KSM_PAGE_LOCK)
lock_page(page);
+
+ if (flags != GET_KSM_PAGE_NOLOCK) {
if (READ_ONCE(page->mapping) != expected_mapping) {
unlock_page(page);
put_page(page);
@@ -762,7 +778,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
struct page *page;
stable_node = rmap_item->head;
- page = get_ksm_page(stable_node, true);
+ page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
if (!page)
goto out;
@@ -862,7 +878,7 @@ static int remove_stable_node(struct stable_node *stable_node)
struct page *page;
int err;
- page = get_ksm_page(stable_node, true);
+ page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
if (!page) {
/*
* get_ksm_page did remove_node_from_stable_tree itself.
@@ -1009,7 +1025,7 @@ static u32 calc_checksum(struct page *page)
{
u32 checksum;
void *addr = kmap_atomic(page);
- checksum = jhash2(addr, PAGE_SIZE / 4, 17);
+ checksum = xxhash(addr, PAGE_SIZE, 0);
kunmap_atomic(addr);
return checksum;
}
@@ -1042,8 +1058,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
};
int swapped;
int err = -EFAULT;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ struct mmu_notifier_range range;
pvmw.address = page_address_in_vma(page, vma);
if (pvmw.address == -EFAULT)
@@ -1051,9 +1066,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
BUG_ON(PageTransCompound(page));
- mmun_start = pvmw.address;
- mmun_end = pvmw.address + PAGE_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ mmu_notifier_range_init(&range, mm, pvmw.address,
+ pvmw.address + PAGE_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
if (!page_vma_mapped_walk(&pvmw))
goto out_mn;
@@ -1105,7 +1120,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
out_unlock:
page_vma_mapped_walk_done(&pvmw);
out_mn:
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range_end(&range);
out:
return err;
}
@@ -1129,8 +1144,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
spinlock_t *ptl;
unsigned long addr;
int err = -EFAULT;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ struct mmu_notifier_range range;
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
@@ -1140,9 +1154,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
if (!pmd)
goto out;
- mmun_start = addr;
- mmun_end = addr + PAGE_SIZE;
- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_same(*ptep, orig_pte)) {
@@ -1188,7 +1201,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
pte_unmap_unlock(ptep, ptl);
err = 0;
out_mn:
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range_end(&range);
out:
return err;
}
@@ -1387,7 +1400,7 @@ static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
* stable_node parameter itself will be freed from
* under us if it returns NULL.
*/
- _tree_page = get_ksm_page(dup, false);
+ _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
if (!_tree_page)
continue;
nr += 1;
@@ -1510,7 +1523,7 @@ static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
if (!is_stable_node_chain(stable_node)) {
if (is_page_sharing_candidate(stable_node)) {
*_stable_node_dup = stable_node;
- return get_ksm_page(stable_node, false);
+ return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
}
/*
* _stable_node_dup set to NULL means the stable_node
@@ -1615,7 +1628,8 @@ again:
* wrprotected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any, false);
+ tree_page = get_ksm_page(stable_node_any,
+ GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_page) {
@@ -1675,7 +1689,12 @@ again:
* It would be more elegant to return stable_node
* than kpage, but that involves more changes.
*/
- tree_page = get_ksm_page(stable_node_dup, true);
+ tree_page = get_ksm_page(stable_node_dup,
+ GET_KSM_PAGE_TRYLOCK);
+
+ if (PTR_ERR(tree_page) == -EBUSY)
+ return ERR_PTR(-EBUSY);
+
if (unlikely(!tree_page))
/*
* The tree may have been rebalanced,
@@ -1844,7 +1863,8 @@ again:
* wrprotected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any, false);
+ tree_page = get_ksm_page(stable_node_any,
+ GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_page) {
@@ -2070,6 +2090,9 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
remove_rmap_item_from_tree(rmap_item);
if (kpage) {
+ if (PTR_ERR(kpage) == -EBUSY)
+ return;
+
err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
if (!err) {
/*
@@ -2244,7 +2267,8 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
list_for_each_entry_safe(stable_node, next,
&migrate_nodes, list) {
- page = get_ksm_page(stable_node, false);
+ page = get_ksm_page(stable_node,
+ GET_KSM_PAGE_NOLOCK);
if (page)
put_page(page);
cond_resched();
@@ -2391,6 +2415,8 @@ static int ksmd_should_run(void)
static int ksm_scan_thread(void *nothing)
{
+ unsigned int sleep_ms;
+
set_freezable();
set_user_nice(current, 5);
@@ -2404,8 +2430,10 @@ static int ksm_scan_thread(void *nothing)
try_to_freeze();
if (ksmd_should_run()) {
- schedule_timeout_interruptible(
- msecs_to_jiffies(ksm_thread_sleep_millisecs));
+ sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
+ wait_event_interruptible_timeout(ksm_iter_wait,
+ sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
+ msecs_to_jiffies(sleep_ms));
} else {
wait_event_freezable(ksm_thread_wait,
ksmd_should_run() || kthread_should_stop());
@@ -2640,6 +2668,31 @@ again:
goto again;
}
+bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma,
+ unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+ if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
+ WARN_ON(!page_mapped(page)) ||
+ WARN_ON(!PageLocked(page))) {
+ dump_page(page, "reuse_ksm_page");
+ return false;
+ }
+#endif
+
+ if (PageSwapCache(page) || !page_stable_node(page))
+ return false;
+ /* Prohibit parallel get_ksm_page() */
+ if (!page_ref_freeze(page, 1))
+ return false;
+
+ page_move_anon_rmap(page, vma);
+ page->index = linear_page_index(vma, address);
+ page_ref_unfreeze(page, 1);
+
+ return true;
+}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
@@ -2824,6 +2877,7 @@ static ssize_t sleep_millisecs_store(struct kobject *kobj,
return -EINVAL;
ksm_thread_sleep_millisecs = msecs;
+ wake_up_interruptible(&ksm_iter_wait);
return count;
}