aboutsummaryrefslogtreecommitdiff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c539
1 files changed, 390 insertions, 149 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 966bef5acc75..c7c943c661e6 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -15,99 +15,27 @@
#include <asm/set_memory.h>
#endif
#include <sound/memalloc.h>
+#include "memalloc_local.h"
-/*
- *
- * Bus-specific memory allocators
- *
- */
-
-#ifdef CONFIG_HAS_DMA
-/* allocate the coherent DMA pages */
-static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
-{
- gfp_t gfp_flags;
-
- gfp_flags = GFP_KERNEL
- | __GFP_COMP /* compound page lets parts be mapped */
- | __GFP_NORETRY /* don't trigger OOM-killer */
- | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
- gfp_flags);
-#ifdef CONFIG_X86
- if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wc((unsigned long)dmab->area,
- PAGE_ALIGN(size) >> PAGE_SHIFT);
-#endif
-}
-
-/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
-{
-#ifdef CONFIG_X86
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wb((unsigned long)dmab->area,
- PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
-#endif
- dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
-}
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
-#ifdef CONFIG_GENERIC_ALLOCATOR
-/**
- * snd_malloc_dev_iram - allocate memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- * @size: number of bytes to allocate from the iram
- *
- * This function requires iram phandle provided via of_node
- */
-static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
+/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
+static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
+ gfp_t default_gfp)
{
- struct device *dev = dmab->dev.dev;
- struct gen_pool *pool = NULL;
-
- dmab->area = NULL;
- dmab->addr = 0;
-
- if (dev->of_node)
- pool = of_gen_pool_get(dev->of_node, "iram", 0);
-
- if (!pool)
- return;
-
- /* Assign the pool into private_data field */
- dmab->private_data = pool;
-
- dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
- PAGE_SIZE);
+ if (!dmab->dev.dev)
+ return default_gfp;
+ else
+ return (__force gfp_t)(unsigned long)dmab->dev.dev;
}
-/**
- * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- */
-static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
+static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{
- struct gen_pool *pool = dmab->private_data;
-
- if (pool && dmab->area)
- gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
-}
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-#endif /* CONFIG_HAS_DMA */
-
-/*
- *
- * ALSA generic memory management
- *
- */
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
-static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
- gfp_t default_gfp)
-{
- if (!dev)
- return default_gfp;
- else
- return (__force gfp_t)(unsigned long)dev;
+ if (WARN_ON_ONCE(!ops || !ops->alloc))
+ return NULL;
+ return ops->alloc(dmab, size);
}
/**
@@ -126,8 +54,6 @@ static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
- gfp_t gfp;
-
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
@@ -137,46 +63,10 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
- dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = NULL;
- switch (type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
- dmab->area = alloc_pages_exact(size, gfp);
- break;
- case SNDRV_DMA_TYPE_VMALLOC:
- gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
- dmab->area = __vmalloc(size, gfp);
- break;
-#ifdef CONFIG_HAS_DMA
-#ifdef CONFIG_GENERIC_ALLOCATOR
- case SNDRV_DMA_TYPE_DEV_IRAM:
- snd_malloc_dev_iram(dmab, size);
- if (dmab->area)
- break;
- /* Internal memory might have limited size and no enough space,
- * so if we fail to malloc, try to fetch memory traditionally.
- */
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- fallthrough;
-#endif /* CONFIG_GENERIC_ALLOCATOR */
- case SNDRV_DMA_TYPE_DEV:
- case SNDRV_DMA_TYPE_DEV_UC:
- snd_malloc_dev_pages(dmab, size);
- break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- snd_malloc_sgbuf_pages(device, size, dmab, NULL);
- break;
-#endif
- default:
- pr_err("snd-malloc: invalid device type %d\n", type);
- return -ENXIO;
- }
- if (! dmab->area)
+ dmab->area = __snd_dma_alloc_pages(dmab, size);
+ if (!dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
@@ -217,7 +107,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
}
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
-
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
@@ -226,32 +115,384 @@ EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
- switch (dmab->dev.type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- free_pages_exact(dmab->area, dmab->bytes);
- break;
- case SNDRV_DMA_TYPE_VMALLOC:
- vfree(dmab->area);
- break;
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->free)
+ ops->free(dmab);
+}
+EXPORT_SYMBOL(snd_dma_free_pages);
+
+/* called by devres */
+static void __snd_release_pages(struct device *dev, void *res)
+{
+ snd_dma_free_pages(res);
+}
+
+/**
+ * snd_devm_alloc_pages - allocate the buffer and manage with devres
+ * @dev: the device pointer
+ * @type: the DMA buffer type
+ * @size: the buffer size to allocate
+ *
+ * Allocate buffer pages depending on the given type and manage using devres.
+ * The pages will be released automatically at the device removal.
+ *
+ * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
+ * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
+ * SNDRV_DMA_TYPE_VMALLOC type.
+ *
+ * The function returns the snd_dma_buffer object at success, or NULL if failed.
+ */
+struct snd_dma_buffer *
+snd_devm_alloc_pages(struct device *dev, int type, size_t size)
+{
+ struct snd_dma_buffer *dmab;
+ int err;
+
+ if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
+ type == SNDRV_DMA_TYPE_VMALLOC))
+ return NULL;
+
+ dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
+ if (!dmab)
+ return NULL;
+
+ err = snd_dma_alloc_pages(type, dev, size, dmab);
+ if (err < 0) {
+ devres_free(dmab);
+ return NULL;
+ }
+
+ devres_add(dev, dmab);
+ return dmab;
+}
+EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
+
+/**
+ * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
+ * @dmab: buffer allocation information
+ * @area: VM area information
+ */
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->mmap)
+ return ops->mmap(dmab, area);
+ else
+ return -ENOENT;
+}
+EXPORT_SYMBOL(snd_dma_buffer_mmap);
+
+/**
+ * snd_sgbuf_get_addr - return the physical address at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_addr)
+ return ops->get_addr(dmab, offset);
+ else
+ return dmab->addr + offset;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_addr);
+
+/**
+ * snd_sgbuf_get_page - return the physical page at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_page)
+ return ops->get_page(dmab, offset);
+ else
+ return virt_to_page(dmab->area + offset);
+}
+EXPORT_SYMBOL(snd_sgbuf_get_page);
+
+/**
+ * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
+ * on sg-buffer
+ * @dmab: buffer allocation information
+ * @ofs: offset in the ring buffer
+ * @size: the requested size
+ */
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_chunk_size)
+ return ops->get_chunk_size(dmab, ofs, size);
+ else
+ return size;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+/*
+ * Continuous pages allocator
+ */
+static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
+ void *p = alloc_pages_exact(size, gfp);
+
+ if (p)
+ dmab->addr = page_to_phys(virt_to_page(p));
+ return p;
+}
+
+static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
+{
+ free_pages_exact(dmab->area, dmab->bytes);
+}
+
+static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return remap_pfn_range(area, area->vm_start,
+ dmab->addr >> PAGE_SHIFT,
+ area->vm_end - area->vm_start,
+ area->vm_page_prot);
+}
+
+static const struct snd_malloc_ops snd_dma_continuous_ops = {
+ .alloc = snd_dma_continuous_alloc,
+ .free = snd_dma_continuous_free,
+ .mmap = snd_dma_continuous_mmap,
+};
+
+/*
+ * VMALLOC allocator
+ */
+static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
+
+ return __vmalloc(size, gfp);
+}
+
+static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
+{
+ vfree(dmab->area);
+}
+
+static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return remap_vmalloc_range(area, dmab->area, 0);
+}
+
+#define get_vmalloc_page_addr(dmab, offset) \
+ page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
+
+static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return vmalloc_to_page(dmab->area + offset);
+}
+
+static unsigned int
+snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ unsigned int start, end;
+ unsigned long addr;
+
+ start = ALIGN_DOWN(ofs, PAGE_SIZE);
+ end = ofs + size - 1; /* the last byte address */
+ /* check page continuity */
+ addr = get_vmalloc_page_addr(dmab, start);
+ for (;;) {
+ start += PAGE_SIZE;
+ if (start > end)
+ break;
+ addr += PAGE_SIZE;
+ if (get_vmalloc_page_addr(dmab, start) != addr)
+ return start - ofs;
+ }
+ /* ok, all on continuous pages */
+ return size;
+}
+
+static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
+ .alloc = snd_dma_vmalloc_alloc,
+ .free = snd_dma_vmalloc_free,
+ .mmap = snd_dma_vmalloc_mmap,
+ .get_addr = snd_dma_vmalloc_get_addr,
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
#ifdef CONFIG_HAS_DMA
+/*
+ * IRAM allocator
+ */
#ifdef CONFIG_GENERIC_ALLOCATOR
- case SNDRV_DMA_TYPE_DEV_IRAM:
- snd_free_dev_iram(dmab);
- break;
+static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct device *dev = dmab->dev.dev;
+ struct gen_pool *pool;
+ void *p;
+
+ if (dev->of_node) {
+ pool = of_gen_pool_get(dev->of_node, "iram", 0);
+ /* Assign the pool into private_data field */
+ dmab->private_data = pool;
+
+ p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
+ if (p)
+ return p;
+ }
+
+ /* Internal memory might have limited size and no enough space,
+ * so if we fail to malloc, try to fetch memory traditionally.
+ */
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+ return __snd_dma_alloc_pages(dmab, size);
+}
+
+static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
+{
+ struct gen_pool *pool = dmab->private_data;
+
+ if (pool && dmab->area)
+ gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
+}
+
+static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return remap_pfn_range(area, area->vm_start,
+ dmab->addr >> PAGE_SHIFT,
+ area->vm_end - area->vm_start,
+ area->vm_page_prot);
+}
+
+static const struct snd_malloc_ops snd_dma_iram_ops = {
+ .alloc = snd_dma_iram_alloc,
+ .free = snd_dma_iram_free,
+ .mmap = snd_dma_iram_mmap,
+};
#endif /* CONFIG_GENERIC_ALLOCATOR */
- case SNDRV_DMA_TYPE_DEV:
- case SNDRV_DMA_TYPE_DEV_UC:
- snd_free_dev_pages(dmab);
- break;
+
+#define DEFAULT_GFP \
+ (GFP_KERNEL | \
+ __GFP_COMP | /* compound page lets parts be mapped */ \
+ __GFP_NORETRY | /* don't trigger OOM-killer */ \
+ __GFP_NOWARN) /* no stack trace print - this call is non-critical */
+
+/*
+ * Coherent device pages allocator
+ */
+static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ void *p;
+
+ p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
+#ifdef CONFIG_X86
+ if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
+ set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
+#endif
+ return p;
+}
+
+static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
+{
+#ifdef CONFIG_X86
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
+ set_memory_wb((unsigned long)dmab->area,
+ PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+#ifdef CONFIG_X86
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
#endif
+ return dma_mmap_coherent(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_dev_ops = {
+ .alloc = snd_dma_dev_alloc,
+ .free = snd_dma_dev_free,
+ .mmap = snd_dma_dev_mmap,
+};
+
+/*
+ * Write-combined pages
+ */
+#ifdef CONFIG_X86
+/* On x86, share the same ops as the standard dev ops */
+#define snd_dma_wc_ops snd_dma_dev_ops
+#else /* CONFIG_X86 */
+static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
+}
+
+static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
+{
+ dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_wc(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_wc_ops = {
+ .alloc = snd_dma_wc_alloc,
+ .free = snd_dma_wc_free,
+ .mmap = snd_dma_wc_mmap,
+};
+#endif /* CONFIG_X86 */
+#endif /* CONFIG_HAS_DMA */
+
+/*
+ * Entry points
+ */
+static const struct snd_malloc_ops *dma_ops[] = {
+ [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
+ [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
+#ifdef CONFIG_HAS_DMA
+ [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
+ [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+#endif /* CONFIG_GENERIC_ALLOCATOR */
+#endif /* CONFIG_HAS_DMA */
#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- snd_free_sgbuf_pages(dmab);
- break;
+ [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+ [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
#endif
- default:
- pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
- }
+};
+
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
+{
+ if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
+ dmab->dev.type >= ARRAY_SIZE(dma_ops)))
+ return NULL;
+ return dma_ops[dmab->dev.type];
}
-EXPORT_SYMBOL(snd_dma_free_pages);