aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorPaul Cercueil <paul@crapouillou.net>2023-12-15 14:13:13 +0100
committerVinod Koul <vkoul@kernel.org>2023-12-21 21:39:56 +0530
commitf60dfe0c561a8f1b8e30d3770997cbaa636f57f9 (patch)
tree90d92f313157f49707eca4889ca09f4df8e55108 /drivers/dma
parent238f68a08e19a612b8912c8697901e9982f97811 (diff)
downloadlinux-f60dfe0c561a8f1b8e30d3770997cbaa636f57f9.tar.gz
dmaengine: axi-dmac: Improve cyclic DMA transfers in SG mode
For cyclic transfers, chain the last descriptor to the first one, and disable IRQ generation if there is no callback registered with the cyclic transfer. Signed-off-by: Paul Cercueil <paul@crapouillou.net> Link: https://lore.kernel.org/r/20231215131313.23840-6-paul@crapouillou.net Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dma-axi-dmac.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index f63acae511fb2..4e339c04fc1ea 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -285,12 +285,14 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call and only a single segment, enable hw cyclic mode to avoid
- * unnecessary interrupts.
+ * call, enable hw cyclic mode to avoid unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
- desc->num_sgs == 1)
- flags |= AXI_DMAC_FLAG_CYCLIC;
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
+ if (chan->hw_sg)
+ desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
+ else if (desc->num_sgs == 1)
+ flags |= AXI_DMAC_FLAG_CYCLIC;
+ }
if (chan->hw_partial_xfer)
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
@@ -411,7 +413,6 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
if (chan->hw_sg) {
if (active->cyclic) {
vchan_cyclic_callback(&active->vdesc);
- start_next = true;
} else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
@@ -667,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, num_segments;
+ unsigned int num_periods, num_segments, num_sgs;
if (direction != chan->direction)
return NULL;
@@ -681,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ num_sgs = num_periods * num_segments;
- desc = axi_dmac_alloc_desc(chan, num_periods * num_segments);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
+ /* Chain the last descriptor to the first, and remove its "last" flag */
+ desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
+ desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
+
axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
period_len, desc->sg);