aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/xilinx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c44
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c39
2 files changed, 47 insertions, 36 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index c12442312595..02880963092f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -190,6 +190,8 @@
/* AXI CDMA Specific Masks */
#define XILINX_CDMA_CR_SGMODE BIT(3)
+#define xilinx_prep_dma_addr_t(addr) \
+ ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
@@ -887,6 +889,24 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
chan->id);
return -ENOMEM;
}
+ /*
+ * For cyclic DMA mode we need to program the tail Descriptor
+ * register with a value which is not a part of the BD chain
+ * so allocating a desc segment during channel allocation for
+ * programming tail descriptor.
+ */
+ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p, GFP_KERNEL);
+ if (!chan->cyclic_seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate desc segment for cyclic DMA\n");
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+ return -ENOMEM;
+ }
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
chan->seg_v[i].hw.next_desc =
@@ -922,24 +942,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM;
}
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- /*
- * For cyclic DMA mode we need to program the tail Descriptor
- * register with a value which is not a part of the BD chain
- * so allocating a desc segment during channel allocation for
- * programming tail descriptor.
- */
- chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->cyclic_seg_v),
- &chan->cyclic_seg_p, GFP_KERNEL);
- if (!chan->cyclic_seg_v) {
- dev_err(chan->dev,
- "unable to allocate desc segment for cyclic DMA\n");
- return -ENOMEM;
- }
- chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
- }
-
dma_cookie_init(dchan);
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
@@ -1245,8 +1247,10 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
hw = &segment->hw;
- xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
- xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+ xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
+ xilinx_prep_dma_addr_t(hw->src_addr));
+ xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
+ xilinx_prep_dma_addr_t(hw->dest_addr));
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index c74a88b65039..8db51750ce93 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll {
u32 ctrl;
u64 nxtdscraddr;
u64 rsvd;
-}; __aligned(64)
+};
/**
* struct zynqmp_dma_desc_sw - Per Transaction structure
@@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct zynqmp_dma_chan *chan = to_chan(tx->chan);
struct zynqmp_dma_desc_sw *desc, *new;
dma_cookie_t cookie;
+ unsigned long irqflags;
new = tx_to_desc(tx);
- spin_lock_bh(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
cookie = dma_cookie_assign(tx);
if (!list_empty(&chan->pending_list)) {
@@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
}
list_add_tail(&new->node, &chan->pending_list);
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
return cookie;
}
@@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw *
zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
{
struct zynqmp_dma_desc_sw *desc;
+ unsigned long irqflags;
- spin_lock_bh(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
desc = list_first_entry(&chan->free_list,
struct zynqmp_dma_desc_sw, node);
list_del(&desc->node);
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
INIT_LIST_HEAD(&desc->tx_list);
/* Clear the src and dst descriptor memory */
@@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
+ unsigned long irqflags;
- spin_lock_bh(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_start_transfer(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
}
/**
@@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
+ unsigned long irqflags;
- spin_lock_bh(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_free_descriptors(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
dma_free_coherent(chan->dev,
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
@@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
{
struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
u32 count;
+ unsigned long irqflags;
- spin_lock(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
if (chan->err) {
zynqmp_dma_reset(chan);
@@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
zynqmp_dma_start_transfer(chan);
unlock:
- spin_unlock(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
}
/**
@@ -776,11 +781,12 @@ unlock:
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
+ unsigned long irqflags;
- spin_lock_bh(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
zynqmp_dma_free_descriptors(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
return 0;
}
@@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
void *desc = NULL, *prev = NULL;
size_t copy;
u32 desc_cnt;
+ unsigned long irqflags;
chan = to_chan(dchan);
desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
- spin_lock_bh(&chan->lock);
+ spin_lock_irqsave(&chan->lock, irqflags);
if (desc_cnt > chan->desc_free_cnt) {
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
return NULL;
}
chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
- spin_unlock_bh(&chan->lock);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
do {
/* Allocate and populate the descriptor */