diff options
Diffstat (limited to 'mm/z3fold.c')
| -rw-r--r-- | mm/z3fold.c | 10 | 
1 files changed, 5 insertions, 5 deletions
| diff --git a/mm/z3fold.c b/mm/z3fold.c index 486550df32be..b2ba2ba585f3 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)  	WARN_ON(!list_empty(&zhdr->buddy));  	set_bit(PAGE_STALE, &page->private); +	clear_bit(NEEDS_COMPACTING, &page->private);  	spin_lock(&pool->lock);  	if (!list_empty(&page->lru))  		list_del(&page->lru); @@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)  		list_del(&zhdr->buddy);  		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))  			continue; -		clear_bit(NEEDS_COMPACTING, &page->private);  		spin_unlock(&pool->stale_lock);  		cancel_work_sync(&zhdr->work);  		free_z3fold_page(page); @@ -624,10 +624,8 @@ lookup:  	 * stale pages list. cancel_work_sync() can sleep so we must make  	 * sure it won't be called in case we're in atomic context.  	 */ -	if (zhdr && (can_sleep || !work_pending(&zhdr->work) || -	    !unlikely(work_busy(&zhdr->work)))) { +	if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {  		list_del(&zhdr->buddy); -		clear_bit(NEEDS_COMPACTING, &page->private);  		spin_unlock(&pool->stale_lock);  		if (can_sleep)  			cancel_work_sync(&zhdr->work); @@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)  				goto next;  		}  next: +		spin_lock(&pool->lock);  		if (test_bit(PAGE_HEADLESS, &page->private)) {  			if (ret == 0) { +				spin_unlock(&pool->lock);  				free_z3fold_page(page);  				return 0;  			}  		} else if (kref_put(&zhdr->refcount, release_z3fold_page)) {  			atomic64_dec(&pool->pages_nr); +			spin_unlock(&pool->lock);  			return 0;  		} -		spin_lock(&pool->lock);  		/*  		 * Add to the beginning of LRU. |