aboutsummaryrefslogtreecommitdiff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c64
1 files changed, 37 insertions, 27 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 03cffe771366..3cf5a87d69ea 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/genalloc.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
@@ -20,7 +21,6 @@
#define DEFAULT_GFP \
(GFP_KERNEL | \
- __GFP_COMP | /* compound page lets parts be mapped */ \
__GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
__GFP_NOWARN) /* no stack trace print - this call is non-critical */
@@ -542,18 +542,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
void *p;
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
- DEFAULT_GFP, 0);
- if (!sgt) {
+ DEFAULT_GFP | __GFP_COMP, 0);
#ifdef CONFIG_SND_DMA_SGBUF
+ if (!sgt && !get_dma_ops(dmab->dev.dev)) {
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
else
dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
return snd_dma_sg_fallback_alloc(dmab, size);
-#else
- return NULL;
-#endif
}
+#endif
+ if (!sgt)
+ return NULL;
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
sg_dma_address(sgt->sgl));
@@ -719,7 +719,6 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
struct snd_dma_sg_fallback {
size_t count;
struct page **pages;
- dma_addr_t *addrs;
};
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
@@ -731,38 +730,49 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
kvfree(sgbuf->pages);
- kvfree(sgbuf->addrs);
kfree(sgbuf);
}
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
{
struct snd_dma_sg_fallback *sgbuf;
- struct page **pages;
- size_t i, count;
+ struct page **pagep, *curp;
+ size_t chunk, npages;
+ dma_addr_t addr;
void *p;
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
return NULL;
- count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- goto error;
- sgbuf->pages = pages;
- sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
- if (!sgbuf->addrs)
+ size = PAGE_ALIGN(size);
+ sgbuf->count = size >> PAGE_SHIFT;
+ sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
+ if (!sgbuf->pages)
goto error;
- for (i = 0; i < count; sgbuf->count++, i++) {
- p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc);
- if (!p)
- goto error;
- sgbuf->pages[i] = virt_to_page(p);
+ pagep = sgbuf->pages;
+ chunk = size;
+ while (size > 0) {
+ chunk = min(size, chunk);
+ p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
+ if (!p) {
+ if (chunk <= PAGE_SIZE)
+ goto error;
+ chunk >>= 1;
+ chunk = PAGE_SIZE << get_order(chunk);
+ continue;
+ }
+
+ size -= chunk;
+ /* fill pages */
+ npages = chunk >> PAGE_SHIFT;
+ curp = virt_to_page(p);
+ while (npages--)
+ *pagep++ = curp++;
}
- p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
+ p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
if (!p)
goto error;
dmab->private_data = sgbuf;
@@ -810,7 +820,7 @@ static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
void *p;
p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
- dmab->dev.dir, DEFAULT_GFP);
+ dmab->dev.dir, DEFAULT_GFP | __GFP_COMP);
if (p)
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
return p;
@@ -857,7 +867,7 @@ static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
/*
* Entry points
*/
-static const struct snd_malloc_ops *dma_ops[] = {
+static const struct snd_malloc_ops *snd_dma_ops[] = {
[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
#ifdef CONFIG_HAS_DMA
@@ -883,7 +893,7 @@ static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
if (WARN_ON_ONCE(!dmab))
return NULL;
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
- dmab->dev.type >= ARRAY_SIZE(dma_ops)))
+ dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
return NULL;
- return dma_ops[dmab->dev.type];
+ return snd_dma_ops[dmab->dev.type];
}