aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c6
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/pl330.c26
-rw-r--r--drivers/dma/pxa_dma.c1
-rw-r--r--drivers/dma/sh/rcar-dmac.c5
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/stm32-mdma.c8
-rw-r--r--drivers/dma/ti/edma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c78
12 files changed, 116 insertions, 35 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e5f31af65aab..00e1ffa4fcf1 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -212,6 +212,7 @@ config FSL_DMA
config FSL_EDMA
tristate "Freescale eDMA engine support"
depends on OF
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -258,6 +259,7 @@ config IMX_SDMA
config INTEL_IDMA64
tristate "Intel integrated DMA 64-bit support"
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a451ecae1669..33ea7abd8cc9 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -223,6 +223,7 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
+ u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -1878,6 +1879,7 @@ static int atmel_xdmac_suspend(struct device *dev)
}
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
at_xdmac_off(atxdmac);
clk_disable_unprepare(atxdmac->clk);
@@ -1914,7 +1916,8 @@ static int atmel_xdmac_resume(struct device *dev)
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
wmb();
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ if (atxdmac->save_gs & atchan->mask)
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
return 0;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8a52a5efee4f..e1cf7c250c33 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -223,7 +223,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
- goto out;
+ chan->client_count++;
+ return 0;
}
if (!try_module_get(owner))
@@ -236,11 +237,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
+ chan->client_count++;
+
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
-out:
- chan->client_count++;
return 0;
err_out:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c05ef7f1d7b6..99a40385267c 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -551,6 +551,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
@@ -565,6 +570,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 709ead443fc5..5794d3120bb8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1347,10 +1347,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
- goto err_desc_out;
+ goto err_bd_out;
return desc;
+err_bd_out:
+ sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 9cb1c4228205..1bba1fa3a809 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -405,6 +405,12 @@ enum desc_status {
*/
BUSY,
/*
+ * Pause was called while descriptor was BUSY. Due to hardware
+ * limitations, only termination is possible for descriptors
+ * that have been paused.
+ */
+ PAUSED,
+ /*
* Sitting on the channel work_list but xfer done
* by PL330 core
*/
@@ -1042,7 +1048,7 @@ static bool _trigger(struct pl330_thread *thrd)
return true;
}
-static bool _start(struct pl330_thread *thrd)
+static bool pl330_start_thread(struct pl330_thread *thrd)
{
switch (_state(thrd)) {
case PL330_STATE_FAULT_COMPLETING:
@@ -1690,7 +1696,7 @@ static int pl330_update(struct pl330_dmac *pl330)
thrd->req_running = -1;
/* Get going again ASAP */
- _start(thrd);
+ pl330_start_thread(thrd);
/* For now, just make a list of callbacks to be done */
list_add_tail(&descdone->rqd, &pl330->req_done);
@@ -2028,7 +2034,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
list_for_each_entry(desc, &pch->work_list, node) {
/* If already submitted */
- if (desc->status == BUSY)
+ if (desc->status == BUSY || desc->status == PAUSED)
continue;
ret = pl330_submit_req(pch->thread, desc);
@@ -2076,7 +2082,7 @@ static void pl330_tasklet(unsigned long data)
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
}
@@ -2094,7 +2100,7 @@ static void pl330_tasklet(unsigned long data)
if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = false;
}
@@ -2305,6 +2311,7 @@ static int pl330_pause(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
+ struct dma_pl330_desc *desc;
unsigned long flags;
pm_runtime_get_sync(pl330->ddma.dev);
@@ -2314,6 +2321,10 @@ static int pl330_pause(struct dma_chan *chan)
_stop(pch->thread);
spin_unlock(&pl330->lock);
+ list_for_each_entry(desc, &pch->work_list, node) {
+ if (desc->status == BUSY)
+ desc->status = PAUSED;
+ }
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
@@ -2404,7 +2415,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
else if (running && desc == running)
transferred =
pl330_get_current_xferred_count(pch, desc);
- else if (desc->status == BUSY)
+ else if (desc->status == BUSY || desc->status == PAUSED)
/*
* Busy but not running means either just enqueued,
* or finished and not yet marked done
@@ -2421,6 +2432,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
case DONE:
ret = DMA_COMPLETE;
break;
+ case PAUSED:
+ ret = DMA_PAUSED;
+ break;
case PREP:
case BUSY:
ret = DMA_IN_PROGRESS;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index c54986902b9d..6dfd08dadb12 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -772,7 +772,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
dma_addr_t dma;
struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
- BUG_ON(sw_desc->nb_desc == 0);
for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
if (i > 0)
dma = sw_desc->hw_desc[i - 1]->ddadr;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 29c51762336d..e18d4116d9ab 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1816,7 +1816,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
dmac->dev->dma_parms = &dmac->parms;
- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e588dc5daaa8..d7d3bf7920ea 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3584,6 +3584,10 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
base->irq = platform_get_irq(pdev, 0);
+ if (base->irq < 0) {
+ ret = base->irq;
+ goto destroy_cache;
+ }
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
@@ -3681,6 +3685,7 @@ static int __init d40_probe(struct platform_device *pdev)
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
+ pm_runtime_disable(base->dev);
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 3259c450544c..e18090f83bec 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -520,7 +520,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_maxburst = chan->dma_config.src_maxburst;
dst_maxburst = chan->dma_config.dst_maxburst;
- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
@@ -948,7 +948,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
@@ -1217,6 +1217,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
unsigned long flags;
u32 status, reg;
+ /* Transfer can be terminated */
+ if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
+ return -EPERM;
+
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 44158fa85973..3a1b37971bef 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2303,7 +2303,7 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
@@ -2319,7 +2319,7 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 2);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 0ba70be4ea85..5f9945651e95 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -164,7 +164,9 @@
#define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */
-#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
+#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
+#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
+#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
@@ -428,6 +430,7 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
+ * @max_buffer_len: Max buffer length
*/
struct xilinx_dma_device {
void __iomem *regs;
@@ -447,6 +450,7 @@ struct xilinx_dma_device {
struct clk *rxs_clk;
u32 nr_channels;
u32 chan_id;
+ u32 max_buffer_len;
};
/* Macros */
@@ -970,6 +974,25 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
+ * @chan: Driver specific DMA channel
+ * @size: Total data that needs to be copied
+ * @done: Amount of data that has been already copied
+ *
+ * Return: Amount of data that has to be copied
+ */
+static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
+ int size, int done)
+{
+ size_t copy;
+
+ copy = min_t(size_t, size - done,
+ chan->xdev->max_buffer_len);
+
+ return copy;
+}
+
+/**
* xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
* @cookie: Transaction identifier
@@ -1002,7 +1025,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw;
residue += (hw->control - hw->status) &
- XILINX_DMA_MAX_TRANS_LEN;
+ chan->xdev->max_buffer_len;
}
}
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1262,7 +1285,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1365,7 +1388,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1729,7 +1752,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw;
- if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+ if (!len || len > chan->xdev->max_buffer_len)
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
@@ -1819,8 +1842,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
+ copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
+ sg_used);
hw = &segment->hw;
/* Fill in the descriptor */
@@ -1924,8 +1947,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
- copy = min_t(size_t, period_len - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
+ copy = xilinx_dma_calc_copysize(chan, period_len,
+ sg_used);
hw = &segment->hw;
xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
period_len * i);
@@ -2613,7 +2636,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
- u32 num_frames, addr_width;
+ u32 num_frames, addr_width, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@@ -2640,13 +2663,30 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Request and map I/O memory */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xdev->regs = devm_ioremap_resource(&pdev->dev, io);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+ if (!of_property_read_u32(node, "xlnx,sg-length-width",
+ &len_width)) {
+ if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
+ len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
+ dev_warn(xdev->dev,
+ "invalid xlnx,sg-length-width property value. Using default width\n");
+ } else {
+ if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
+ dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
+ xdev->max_buffer_len =
+ GENMASK(len_width - 1, 0);
+ }
+ }
+ }
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
@@ -2719,8 +2759,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
- if (err < 0)
- goto disable_clks;
+ if (err < 0) {
+ of_node_put(child);
+ goto error;
+ }
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2753,12 +2795,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}