diff options
Diffstat (limited to 'mm/memremap.c')
| -rw-r--r-- | mm/memremap.c | 105 | 
1 files changed, 63 insertions, 42 deletions
diff --git a/mm/memremap.c b/mm/memremap.c index ed70c4e8e52a..32c79b51af86 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -21,13 +21,13 @@ DEFINE_STATIC_KEY_FALSE(devmap_managed_key);  EXPORT_SYMBOL(devmap_managed_key);  static atomic_t devmap_managed_enable; -static void devmap_managed_enable_put(void *data) +static void devmap_managed_enable_put(void)  {  	if (atomic_dec_and_test(&devmap_managed_enable))  		static_branch_disable(&devmap_managed_key);  } -static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) +static int devmap_managed_enable_get(struct dev_pagemap *pgmap)  {  	if (!pgmap->ops || !pgmap->ops->page_free) {  		WARN(1, "Missing page_free method\n"); @@ -36,13 +36,16 @@ static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgm  	if (atomic_inc_return(&devmap_managed_enable) == 1)  		static_branch_enable(&devmap_managed_key); -	return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL); +	return 0;  }  #else -static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) +static int devmap_managed_enable_get(struct dev_pagemap *pgmap)  {  	return -EINVAL;  } +static void devmap_managed_enable_put(void) +{ +}  #endif /* CONFIG_DEV_PAGEMAP_OPS */  static void pgmap_array_delete(struct resource *res) @@ -99,10 +102,8 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)  		pgmap->ref = NULL;  } -static void devm_memremap_pages_release(void *data) +void memunmap_pages(struct dev_pagemap *pgmap)  { -	struct dev_pagemap *pgmap = data; -	struct device *dev = pgmap->dev;  	struct resource *res = &pgmap->res;  	unsigned long pfn;  	int nid; @@ -129,8 +130,14 @@ static void devm_memremap_pages_release(void *data)  	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));  	pgmap_array_delete(res); -	dev_WARN_ONCE(dev, pgmap->altmap.alloc, -		      "%s: failed to free all reserved pages\n", __func__); +	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); +	devmap_managed_enable_put(); +} +EXPORT_SYMBOL_GPL(memunmap_pages); + +static void devm_memremap_pages_release(void *data) +{ +	memunmap_pages(data);  }  static void dev_pagemap_percpu_release(struct percpu_ref *ref) @@ -141,27 +148,12 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref)  	complete(&pgmap->done);  } -/** - * devm_memremap_pages - remap and provide memmap backing for the given resource - * @dev: hosting device for @res - * @pgmap: pointer to a struct dev_pagemap - * - * Notes: - * 1/ At a minimum the res and type members of @pgmap must be initialized - *    by the caller before passing it to this function - * - * 2/ The altmap field may optionally be initialized, in which case - *    PGMAP_ALTMAP_VALID must be set in pgmap->flags. - * - * 3/ The ref field may optionally be provided, in which pgmap->ref must be - *    'live' on entry and will be killed and reaped at - *    devm_memremap_pages_release() time, or if this routine fails. - * - * 4/ res is expected to be a host memory range that could feasibly be - *    treated as a "System RAM" range, i.e. not a device mmio range, but - *    this is not enforced. +/* + * Not device managed version of dev_memremap_pages, undone by + * memunmap_pages().  Please use dev_memremap_pages if you have a struct + * device available.   */ -void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) +void *memremap_pages(struct dev_pagemap *pgmap, int nid)  {  	struct resource *res = &pgmap->res;  	struct dev_pagemap *conflict_pgmap; @@ -172,7 +164,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)  		.altmap = pgmap_altmap(pgmap),  	};  	pgprot_t pgprot = PAGE_KERNEL; -	int error, nid, is_ram; +	int error, is_ram;  	bool need_devmap_managed = true;  	switch (pgmap->type) { @@ -220,14 +212,14 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)  	}  	if (need_devmap_managed) { -		error = devmap_managed_enable_get(dev, pgmap); +		error = devmap_managed_enable_get(pgmap);  		if (error)  			return ERR_PTR(error);  	}  	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);  	if (conflict_pgmap) { -		dev_WARN(dev, "Conflicting mapping in same section\n"); +		WARN(1, "Conflicting mapping in same section\n");  		put_dev_pagemap(conflict_pgmap);  		error = -ENOMEM;  		goto err_array; @@ -235,7 +227,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)  	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);  	if (conflict_pgmap) { -		dev_WARN(dev, "Conflicting mapping in same section\n"); +		WARN(1, "Conflicting mapping in same section\n");  		put_dev_pagemap(conflict_pgmap);  		error = -ENOMEM;  		goto err_array; @@ -251,14 +243,11 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)  		goto err_array;  	} -	pgmap->dev = dev; -  	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),  				PHYS_PFN(res->end), pgmap, GFP_KERNEL));  	if (error)  		goto err_array; -	nid = dev_to_node(dev);  	if (nid < 0)  		nid = numa_mem_id(); @@ -314,12 +303,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)  				PHYS_PFN(res->start),  				PHYS_PFN(resource_size(res)), pgmap);  	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); - -	error = devm_add_action_or_reset(dev, devm_memremap_pages_release, -			pgmap); -	if (error) -		return ERR_PTR(error); -  	return __va(res->start);   err_add_memory: @@ -331,8 +314,46 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)   err_array:  	dev_pagemap_kill(pgmap);  	dev_pagemap_cleanup(pgmap); +	devmap_managed_enable_put();  	return ERR_PTR(error);  } +EXPORT_SYMBOL_GPL(memremap_pages); + +/** + * devm_memremap_pages - remap and provide memmap backing for the given resource + * @dev: hosting device for @res + * @pgmap: pointer to a struct dev_pagemap + * + * Notes: + * 1/ At a minimum the res and type members of @pgmap must be initialized + *    by the caller before passing it to this function + * + * 2/ The altmap field may optionally be initialized, in which case + *    PGMAP_ALTMAP_VALID must be set in pgmap->flags. + * + * 3/ The ref field may optionally be provided, in which pgmap->ref must be + *    'live' on entry and will be killed and reaped at + *    devm_memremap_pages_release() time, or if this routine fails. + * + * 4/ res is expected to be a host memory range that could feasibly be + *    treated as a "System RAM" range, i.e. not a device mmio range, but + *    this is not enforced. + */ +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) +{ +	int error; +	void *ret; + +	ret = memremap_pages(pgmap, dev_to_node(dev)); +	if (IS_ERR(ret)) +		return ret; + +	error = devm_add_action_or_reset(dev, devm_memremap_pages_release, +			pgmap); +	if (error) +		return ERR_PTR(error); +	return ret; +}  EXPORT_SYMBOL_GPL(devm_memremap_pages);  void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)  |