diff options
author | Vitaly Wool <[email protected]> | 2019-09-23 15:39:43 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2019-09-24 15:54:12 -0700 |
commit | 068619e32ff6229a09407d267e36ea7710b96ea1 (patch) | |
tree | ea940e56669826675ee347d6b2e11c298044e5e7 | |
parent | d2fcd82bb83aab47c6d63aa8c960cd5edb578065 (diff) |
zswap: do not map same object twice
zswap_writeback_entry() maps a handle to read swpentry first, and
then in the most common case it would map the same handle again.
This is ok when zbud is the backend since its mapping callback is
plain and simple, but it slows things down for z3fold.
Since there's hardly a point in unmapping a handle _that_ fast as
zswap_writeback_entry() does when it reads swpentry, the
suggestion is to keep the handle mapped till the end.
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Vitaly Wool <[email protected]>
Reviewed-by: Dan Streetman <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Seth Jennings <[email protected]>
Cc: Vitaly Wool <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | mm/zswap.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index 08b6cefae5d8..46a322316e52 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -856,7 +856,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) /* extract swpentry from data */ zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); swpentry = zhdr->swpentry; /* here */ - zpool_unmap_handle(pool, handle); tree = zswap_trees[swp_type(swpentry)]; offset = swp_offset(swpentry); @@ -866,6 +865,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) if (!entry) { /* entry was invalidated */ spin_unlock(&tree->lock); + zpool_unmap_handle(pool, handle); return 0; } spin_unlock(&tree->lock); @@ -886,15 +886,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) case ZSWAP_SWAPCACHE_NEW: /* page is locked */ /* decompress */ dlen = PAGE_SIZE; - src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, - ZPOOL_MM_RO) + sizeof(struct zswap_header); + src = (u8 *)zhdr + sizeof(struct zswap_header); dst = kmap_atomic(page); tfm = *get_cpu_ptr(entry->pool->tfm); ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen); put_cpu_ptr(entry->pool->tfm); kunmap_atomic(dst); - zpool_unmap_handle(entry->pool->zpool, entry->handle); BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); @@ -940,6 +938,7 @@ fail: spin_unlock(&tree->lock); end: + zpool_unmap_handle(pool, handle); return ret; } |