diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 31 | 
1 files changed, 18 insertions, 13 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 863bef9f9234..09874d695188 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,   *   * @pool: to free the pages from   * @free_all: If set to true will free all pages in pool + * @gfp: GFP flags.   **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, +			      gfp_t gfp)  {  	unsigned long irq_flags;  	struct page *p; @@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)  	if (NUM_PAGES_TO_ALLOC < nr_free)  		npages_to_free = NUM_PAGES_TO_ALLOC; -	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), -			GFP_KERNEL); +	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);  	if (!pages_to_free) {  		pr_err("Failed to allocate memory for pool free operation\n");  		return 0; @@ -382,32 +383,35 @@ out:   *   * XXX: (dchinner) Deadlock warning!   * - * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means - * this can deadlock when called a sc->gfp_mask that is not equal to - * GFP_KERNEL. + * We need to pass sc->gfp_mask to ttm_page_pool_free().   *   * This code is crying out for a shrinker per pool....   */  static unsigned long  ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)  { -	static atomic_t start_pool = ATOMIC_INIT(0); +	static DEFINE_MUTEX(lock); +	static unsigned start_pool;  	unsigned i; -	unsigned pool_offset = atomic_add_return(1, &start_pool); +	unsigned pool_offset;  	struct ttm_page_pool *pool;  	int shrink_pages = sc->nr_to_scan;  	unsigned long freed = 0; -	pool_offset = pool_offset % NUM_POOLS; +	if (!mutex_trylock(&lock)) +		return SHRINK_STOP; +	pool_offset = ++start_pool % NUM_POOLS;  	/* select start pool in round robin fashion */  	for (i = 0; i < NUM_POOLS; ++i) {  		unsigned nr_free = shrink_pages;  		if (shrink_pages == 0)  			break;  		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; -		shrink_pages = ttm_page_pool_free(pool, nr_free); +		shrink_pages = ttm_page_pool_free(pool, nr_free, +						  sc->gfp_mask);  		freed += nr_free - shrink_pages;  	} +	mutex_unlock(&lock);  	return freed;  } @@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,  	}  	spin_unlock_irqrestore(&pool->lock, irq_flags);  	if (npages) -		ttm_page_pool_free(pool, npages); +		ttm_page_pool_free(pool, npages, GFP_KERNEL);  }  /* @@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,  	return 0;  } -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,  		char *name)  {  	spin_lock_init(&pool->lock); @@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)  	ttm_pool_mm_shrink_fini(_manager);  	for (i = 0; i < NUM_POOLS; ++i) -		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); +		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, +				   GFP_KERNEL);  	kobject_put(&_manager->kobj);  	_manager = NULL;  |