summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/at_hdmac.c34
-rw-r--r--drivers/dma/at_hdmac_regs.h10
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/mv_xor_v2.c1
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c8
5 files changed, 30 insertions, 29 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c52718b37..adcae1077 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -252,6 +252,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary));
+ /* Don't allow CPU to reorder channel enable. */
+ wmb();
dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan);
@@ -312,7 +314,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
- u32 ctrla, dscr, trials;
+ u32 ctrla, dscr;
+ unsigned int i;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -382,7 +385,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
- for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+ for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
@@ -408,7 +411,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
- if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+ if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
@@ -556,10 +559,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node);
- /* As we are stopped, take advantage to push queued descriptors
- * in active_list */
- list_splice_init(&atchan->queue, atchan->active_list.prev);
-
/* Try to restart the controller */
if (!list_empty(&atchan->active_list))
atc_dostart(atchan, atc_first_active(atchan));
@@ -680,19 +679,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx);
- if (list_empty(&atchan->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
- desc->txd.cookie);
- atc_dostart(atchan, desc);
- list_add_tail(&desc->desc_node, &atchan->active_list);
- } else {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &atchan->queue);
- }
-
+ list_add_tail(&desc->desc_node, &atchan->queue);
spin_unlock_irqrestore(&atchan->lock, flags);
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
+ desc->txd.cookie);
return cookie;
}
@@ -1967,7 +1958,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels);
- dma_async_device_register(&atdma->dma_common);
+ err = dma_async_device_register(&atdma->dma_common);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register: %d.\n", err);
+ goto err_dma_async_device_register;
+ }
/*
* Do not return an error if the dmac node is not present in order to
@@ -1987,6 +1982,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
+err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index ef3f227ce..00517cbd3 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -168,13 +168,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli {
/* values that are not changed by hardware */
- dma_addr_t saddr;
- dma_addr_t daddr;
+ u32 saddr;
+ u32 daddr;
/* value that may get written back: */
- u32 ctrla;
+ u32 ctrla;
/* more values that are not changed by hardware */
- u32 ctrlb;
- dma_addr_t dscr; /* chain to next lli */
+ u32 ctrlb;
+ u32 dscr; /* chain to next lli */
};
/**
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 890cadf3e..e86a3d19b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -653,7 +653,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
if (active - i == 0) {
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
__func__);
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* microsecond delay by sysfs variable per pending descriptor */
@@ -679,7 +679,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
}
@@ -876,7 +876,7 @@ static void check_active(struct ioatdma_chan *ioat_chan)
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
void ioat_timer_event(struct timer_list *t)
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 462adf7e4..62864b3e1 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -906,6 +906,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk);
+ clk_disable_unprepare(xor_dev->reg_clk);
return 0;
}
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 3f38df6b5..0ba70be4e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2654,7 +2654,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -2674,7 +2674,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;