From 5b3168763f507fd46285b7310fc2d18dafe7f1c7 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Mon, 9 Jan 2012 10:32:49 +0100 Subject: dma: imx-dma: start transfer in issue_pending The DMA engine API requires that transfers are started in issue_pending instead of tx_submit. Fix this. Signed-off-by: Sascha Hauer [corrected change log to DMA engine API insteadof DMA API] Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index e4383ee2c9ac..3296a7337f25 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -186,8 +186,6 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) cookie = imxdma_assign_cookie(imxdmac); - imx_dma_enable(imxdmac->imxdma_channel); - spin_unlock_irq(&imxdmac->lock); return cookie; @@ -332,9 +330,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( static void imxdma_issue_pending(struct dma_chan *chan) { - /* - * Nothing to do. We only have a single descriptor - */ + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); + + if (imxdmac->status == DMA_IN_PROGRESS) + imx_dma_enable(imxdmac->imxdma_channel); } static int __init imxdma_probe(struct platform_device *pdev) -- cgit v1.2.3 From 865d9438eb1f7670d2e88849f059db551b320887 Mon Sep 17 00:00:00 2001 From: Danny Kukawka Date: Wed, 15 Feb 2012 20:20:26 +0100 Subject: drivers/dma: linux/module.h included twice drivers/dma/imx-dma.c and drivers/dma/imx-sdma.c included 'linux/module.h' twice, remove the duplicates. Signed-off-by: Danny Kukawka Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 3296a7337f25..b51391adb289 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 6c05f09155f40368c51ce00b8291401858e49bcb Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Tue, 28 Feb 2012 17:08:17 +0100 Subject: dmaengine: Add support for MEMCPY for imx-dma. MEMCPY transfers allow DMA copies from memory to memory. This patch has been tested with dmatest device driver. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index b51391adb289..9a1797873169 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -195,7 +195,8 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imx_dma_data *data = chan->private; - imxdmac->dma_request = data->dma_request; + if (data != NULL) + imxdmac->dma_request = data->dma_request; dma_async_tx_descriptor_init(&imxdmac->desc, chan); imxdmac->desc.tx_submit = imxdma_tx_submit; @@ -327,6 +328,36 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( return &imxdmac->desc; } +static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; + int ret; + + dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", + __func__, imxdmac->channel, src, dest, len); + + if (imxdmac->status == DMA_IN_PROGRESS) + return NULL; + imxdmac->status = DMA_IN_PROGRESS; + + ret = imx_dma_config_channel(imxdmac->imxdma_channel, + IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, + IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, + 0, 0); + if (ret) + return NULL; + + ret = imx_dma_setup_single(imxdmac->imxdma_channel, src, len, + dest, DMA_MODE_WRITE); + if (ret) + return NULL; + + return &imxdmac->desc; +} + static void imxdma_issue_pending(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); @@ -348,6 +379,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); + dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); /* Initialize channel parameters */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { @@ -381,11 +413,13 @@ static int __init imxdma_probe(struct platform_device *pdev) imxdma->dma_device.device_tx_status = imxdma_tx_status; imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; + imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; imxdma->dma_device.device_control = imxdma_control; imxdma->dma_device.device_issue_pending = imxdma_issue_pending; platform_set_drvdata(pdev, imxdma); + imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); -- cgit v1.2.3 From 9e15db7ce949e9f2d8bb6ce32a74212a4f662370 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Fri, 2 Mar 2012 09:28:47 +0100 Subject: dmaengine: Add support for multiple descriptors for imx-dma. dmaengine specifies the possibility that several descriptors can be queued for transfer. It also indicates that tasklets must be used for DMA callbacks. Acked-by: Sascha Hauer Signed-off-by: Javier Martin Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 332 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 258 insertions(+), 74 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 9a1797873169..85b3d3c21d91 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -5,6 +5,7 @@ * found on i.MX1/21/27 * * Copyright 2010 Sascha Hauer, Pengutronix + * Copyright 2012 Javier Martin, Vista Silicon * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License @@ -13,6 +14,7 @@ * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ + #include #include #include @@ -29,19 +31,52 @@ #include #include +#define IMXDMA_MAX_CHAN_DESCRIPTORS 16 + +enum imxdma_prep_type { + IMXDMA_DESC_MEMCPY, + IMXDMA_DESC_INTERLEAVED, + IMXDMA_DESC_SLAVE_SG, + IMXDMA_DESC_CYCLIC, +}; + +struct imxdma_desc { + struct list_head node; + struct dma_async_tx_descriptor desc; + enum dma_status status; + dma_addr_t src; + dma_addr_t dest; + size_t len; + unsigned int dmamode; + enum imxdma_prep_type type; + /* For memcpy and interleaved */ + unsigned int config_port; + unsigned int config_mem; + /* For interleaved transfers */ + unsigned int x; + unsigned int y; + unsigned int w; + /* For slave sg and cyclic */ + struct scatterlist *sg; + unsigned int sgcount; +}; + struct imxdma_channel { struct imxdma_engine *imxdma; unsigned int channel; unsigned int imxdma_channel; + struct tasklet_struct dma_tasklet; + struct list_head ld_free; + struct list_head ld_queue; + struct list_head ld_active; + int descs_allocated; enum dma_slave_buswidth word_size; dma_addr_t per_address; u32 watermark_level; struct dma_chan chan; spinlock_t lock; - struct dma_async_tx_descriptor desc; dma_cookie_t last_completed; - enum dma_status status; int dma_request; struct scatterlist *sg_list; }; @@ -60,27 +95,31 @@ static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) return container_of(chan, struct imxdma_channel, chan); } -static void imxdma_handle(struct imxdma_channel *imxdmac) +static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) { - if (imxdmac->desc.callback) - imxdmac->desc.callback(imxdmac->desc.callback_param); - imxdmac->last_completed = imxdmac->desc.cookie; + struct imxdma_desc *desc; + + if (!list_empty(&imxdmac->ld_active)) { + desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, + node); + if (desc->type == IMXDMA_DESC_CYCLIC) + return true; + } + return false; } static void imxdma_irq_handler(int channel, void *data) { struct imxdma_channel *imxdmac = data; - imxdmac->status = DMA_SUCCESS; - imxdma_handle(imxdmac); + tasklet_schedule(&imxdmac->dma_tasklet); } static void imxdma_err_handler(int channel, void *data, int error) { struct imxdma_channel *imxdmac = data; - imxdmac->status = DMA_ERROR; - imxdma_handle(imxdmac); + tasklet_schedule(&imxdmac->dma_tasklet); } static void imxdma_progression(int channel, void *data, @@ -88,8 +127,88 @@ static void imxdma_progression(int channel, void *data, { struct imxdma_channel *imxdmac = data; - imxdmac->status = DMA_SUCCESS; - imxdma_handle(imxdmac); + tasklet_schedule(&imxdmac->dma_tasklet); +} + +static int imxdma_xfer_desc(struct imxdma_desc *d) +{ + struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); + int ret; + + /* Configure and enable */ + switch (d->type) { + case IMXDMA_DESC_MEMCPY: + ret = imx_dma_config_channel(imxdmac->imxdma_channel, + d->config_port, d->config_mem, 0, 0); + if (ret < 0) + return ret; + ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src, + d->len, d->dest, d->dmamode); + if (ret < 0) + return ret; + break; + case IMXDMA_DESC_CYCLIC: + ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, + imxdma_progression); + if (ret < 0) + return ret; + /* + * We fall through here since cyclic transfer is the same as + * slave_sg adding a progression handler and a specific sg + * configuration which is done in 'imxdma_prep_dma_cyclic'. + */ + case IMXDMA_DESC_SLAVE_SG: + if (d->dmamode == DMA_MODE_READ) + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, + d->sgcount, d->len, d->src, d->dmamode); + else + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, + d->sgcount, d->len, d->dest, d->dmamode); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + } + imx_dma_enable(imxdmac->imxdma_channel); + return 0; +} + +static void imxdma_tasklet(unsigned long data) +{ + struct imxdma_channel *imxdmac = (void *)data; + struct imxdma_engine *imxdma = imxdmac->imxdma; + struct imxdma_desc *desc; + + spin_lock(&imxdmac->lock); + + if (list_empty(&imxdmac->ld_active)) { + /* Someone might have called terminate all */ + goto out; + } + desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); + + if (desc->desc.callback) + desc->desc.callback(desc->desc.callback_param); + + imxdmac->last_completed = desc->desc.cookie; + + /* If we are dealing with a cyclic descriptor keep it on ld_active */ + if (imxdma_chan_is_doing_cyclic(imxdmac)) + goto out; + + list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); + + if (!list_empty(&imxdmac->ld_queue)) { + desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, + node); + list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); + if (imxdma_xfer_desc(desc) < 0) + dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", + __func__, imxdmac->channel); + } +out: + spin_unlock(&imxdmac->lock); } static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -98,12 +217,17 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct dma_slave_config *dmaengine_cfg = (void *)arg; int ret; + unsigned long flags; unsigned int mode = 0; switch (cmd) { case DMA_TERMINATE_ALL: - imxdmac->status = DMA_ERROR; imx_dma_disable(imxdmac->imxdma_channel); + + spin_lock_irqsave(&imxdmac->lock, flags); + list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); + list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); + spin_unlock_irqrestore(&imxdmac->lock, flags); return 0; case DMA_SLAVE_CONFIG: if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { @@ -154,11 +278,14 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, struct imxdma_channel *imxdmac = to_imxdma_chan(chan); dma_cookie_t last_used; enum dma_status ret; + unsigned long flags; + spin_lock_irqsave(&imxdmac->lock, flags); last_used = chan->cookie; ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); + spin_unlock_irqrestore(&imxdmac->lock, flags); return ret; } @@ -171,7 +298,6 @@ static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) cookie = 1; imxdma->chan.cookie = cookie; - imxdma->desc.cookie = cookie; return cookie; } @@ -180,12 +306,15 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); dma_cookie_t cookie; + unsigned long flags; - spin_lock_irq(&imxdmac->lock); + spin_lock_irqsave(&imxdmac->lock, flags); + list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); cookie = imxdma_assign_cookie(imxdmac); + tx->cookie = cookie; - spin_unlock_irq(&imxdmac->lock); + spin_unlock_irqrestore(&imxdmac->lock, flags); return cookie; } @@ -198,21 +327,48 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) if (data != NULL) imxdmac->dma_request = data->dma_request; - dma_async_tx_descriptor_init(&imxdmac->desc, chan); - imxdmac->desc.tx_submit = imxdma_tx_submit; - /* txd.flags will be overwritten in prep funcs */ - imxdmac->desc.flags = DMA_CTRL_ACK; + while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { + struct imxdma_desc *desc; - imxdmac->status = DMA_SUCCESS; + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + break; + __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); + dma_async_tx_descriptor_init(&desc->desc, chan); + desc->desc.tx_submit = imxdma_tx_submit; + /* txd.flags will be overwritten in prep funcs */ + desc->desc.flags = DMA_CTRL_ACK; + desc->status = DMA_SUCCESS; + + list_add_tail(&desc->node, &imxdmac->ld_free); + imxdmac->descs_allocated++; + } - return 0; + if (!imxdmac->descs_allocated) + return -ENOMEM; + + return imxdmac->descs_allocated; } static void imxdma_free_chan_resources(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); + struct imxdma_desc *desc, *_desc; + unsigned long flags; + + spin_lock_irqsave(&imxdmac->lock, flags); imx_dma_disable(imxdmac->imxdma_channel); + list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); + list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); + + spin_unlock_irqrestore(&imxdmac->lock, flags); + + list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { + kfree(desc); + imxdmac->descs_allocated--; + } + INIT_LIST_HEAD(&imxdmac->ld_free); if (imxdmac->sg_list) { kfree(imxdmac->sg_list); @@ -227,23 +383,19 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct scatterlist *sg; - int i, ret, dma_length = 0; - unsigned int dmamode; + int i, dma_length = 0; + struct imxdma_desc *desc; - if (imxdmac->status == DMA_IN_PROGRESS) + if (list_empty(&imxdmac->ld_free) || + imxdma_chan_is_doing_cyclic(imxdmac)) return NULL; - imxdmac->status = DMA_IN_PROGRESS; + desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); for_each_sg(sgl, sg, sg_len, i) { dma_length += sg->length; } - if (direction == DMA_DEV_TO_MEM) - dmamode = DMA_MODE_READ; - else - dmamode = DMA_MODE_WRITE; - switch (imxdmac->word_size) { case DMA_SLAVE_BUSWIDTH_4_BYTES: if (sgl->length & 3 || sgl->dma_address & 3) @@ -259,12 +411,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( return NULL; } - ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, - dma_length, imxdmac->per_address, dmamode); - if (ret) - return NULL; + desc->type = IMXDMA_DESC_SLAVE_SG; + desc->sg = sgl; + desc->sgcount = sg_len; + desc->len = dma_length; + if (direction == DMA_DEV_TO_MEM) { + desc->dmamode = DMA_MODE_READ; + desc->src = imxdmac->per_address; + } else { + desc->dmamode = DMA_MODE_WRITE; + desc->dest = imxdmac->per_address; + } + desc->desc.callback = NULL; + desc->desc.callback_param = NULL; - return &imxdmac->desc; + return &desc->desc; } static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( @@ -273,23 +434,18 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; - int i, ret; + struct imxdma_desc *desc; + int i; unsigned int periods = buf_len / period_len; - unsigned int dmamode; dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", __func__, imxdmac->channel, buf_len, period_len); - if (imxdmac->status == DMA_IN_PROGRESS) + if (list_empty(&imxdmac->ld_free) || + imxdma_chan_is_doing_cyclic(imxdmac)) return NULL; - imxdmac->status = DMA_IN_PROGRESS; - ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, - imxdma_progression); - if (ret) { - dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); - return NULL; - } + desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); if (imxdmac->sg_list) kfree(imxdmac->sg_list); @@ -315,17 +471,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( imxdmac->sg_list[periods].page_link = ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; - if (direction == DMA_DEV_TO_MEM) - dmamode = DMA_MODE_READ; - else - dmamode = DMA_MODE_WRITE; - - ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, - IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); - if (ret) - return NULL; + desc->type = IMXDMA_DESC_CYCLIC; + desc->sg = imxdmac->sg_list; + desc->sgcount = periods; + desc->len = IMX_DMA_LENGTH_LOOP; + if (direction == DMA_DEV_TO_MEM) { + desc->dmamode = DMA_MODE_READ; + desc->src = imxdmac->per_address; + } else { + desc->dmamode = DMA_MODE_WRITE; + desc->dest = imxdmac->per_address; + } + desc->desc.callback = NULL; + desc->desc.callback_param = NULL; - return &imxdmac->desc; + return &desc->desc; } static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( @@ -334,36 +494,53 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; - int ret; + struct imxdma_desc *desc; dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", __func__, imxdmac->channel, src, dest, len); - if (imxdmac->status == DMA_IN_PROGRESS) + if (list_empty(&imxdmac->ld_free) || + imxdma_chan_is_doing_cyclic(imxdmac)) return NULL; - imxdmac->status = DMA_IN_PROGRESS; - ret = imx_dma_config_channel(imxdmac->imxdma_channel, - IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, - IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, - 0, 0); - if (ret) - return NULL; + desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); - ret = imx_dma_setup_single(imxdmac->imxdma_channel, src, len, - dest, DMA_MODE_WRITE); - if (ret) - return NULL; + desc->type = IMXDMA_DESC_MEMCPY; + desc->src = src; + desc->dest = dest; + desc->len = len; + desc->dmamode = DMA_MODE_WRITE; + desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; + desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; + desc->desc.callback = NULL; + desc->desc.callback_param = NULL; - return &imxdmac->desc; + return &desc->desc; } static void imxdma_issue_pending(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); - - if (imxdmac->status == DMA_IN_PROGRESS) - imx_dma_enable(imxdmac->imxdma_channel); + struct imxdma_engine *imxdma = imxdmac->imxdma; + struct imxdma_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&imxdmac->lock, flags); + if (list_empty(&imxdmac->ld_active) && + !list_empty(&imxdmac->ld_queue)) { + desc = list_first_entry(&imxdmac->ld_queue, + struct imxdma_desc, node); + + if (imxdma_xfer_desc(desc) < 0) { + dev_warn(imxdma->dev, + "%s: channel: %d couldn't issue DMA xfer\n", + __func__, imxdmac->channel); + } else { + list_move_tail(imxdmac->ld_queue.next, + &imxdmac->ld_active); + } + } + spin_unlock_irqrestore(&imxdmac->lock, flags); } static int __init imxdma_probe(struct platform_device *pdev) @@ -398,11 +575,18 @@ static int __init imxdma_probe(struct platform_device *pdev) imxdmac->imxdma = imxdma; spin_lock_init(&imxdmac->lock); + INIT_LIST_HEAD(&imxdmac->ld_queue); + INIT_LIST_HEAD(&imxdmac->ld_free); + INIT_LIST_HEAD(&imxdmac->ld_active); + + tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, + (unsigned long)imxdmac); imxdmac->chan.device = &imxdma->dma_device; imxdmac->channel = i; /* Add the channel to the DMAC list */ - list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); + list_add_tail(&imxdmac->chan.device_node, + &imxdma->dma_device.channels); } imxdma->dev = &pdev->dev; -- cgit v1.2.3 From 5170c051a56244816d948c43592c1b2805ed4f3a Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 9 Mar 2012 14:55:25 +0530 Subject: Revert "drivers/dma: linux/module.h included twice" This reverts commit 865d9438eb1f7670d2e88849f059db551b320887. The module.h incsuion twice has been updated tree wide hence this is not required to be merged. Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 85b3d3c21d91..c32103f04fb3 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 4d4e58de32a192fea65ab84509d17d199bd291c8 Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:34:06 +0000 Subject: dmaengine: move last completed cookie into generic dma_chan structure Every DMA engine implementation declares a last completed dma cookie in their private dma channel structures. This is pointless, and forces driver specific code. Move this out into the common dma_chan structure. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 3296a7337f25..d3ddcba87f81 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -41,7 +41,6 @@ struct imxdma_channel { struct dma_chan chan; spinlock_t lock; struct dma_async_tx_descriptor desc; - dma_cookie_t last_completed; enum dma_status status; int dma_request; struct scatterlist *sg_list; @@ -65,7 +64,7 @@ static void imxdma_handle(struct imxdma_channel *imxdmac) { if (imxdmac->desc.callback) imxdmac->desc.callback(imxdmac->desc.callback_param); - imxdmac->last_completed = imxdmac->desc.cookie; + imxdmac->chan.completed_cookie = imxdmac->desc.cookie; } static void imxdma_irq_handler(int channel, void *data) @@ -158,8 +157,8 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, last_used = chan->cookie; - ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); - dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); + ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used); + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); return ret; } -- cgit v1.2.3 From d2ebfb335b0426deb1a4fb14e4e926d81ecd8235 Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:34:26 +0000 Subject: dmaengine: add private header file Add a local private header file to contain definitions and declarations which should only be used by DMA engine drivers. We also fix linux/dmaengine.h to use LINUX_DMAENGINE_H to guard against multiple inclusion. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index d3ddcba87f81..cead5e4bd38c 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -30,6 +30,8 @@ #include #include +#include "dmaengine.h" + struct imxdma_channel { struct imxdma_engine *imxdma; unsigned int channel; -- cgit v1.2.3 From 884485e1f12dcd39390f042e772cdbefc9ebb750 Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:34:46 +0000 Subject: dmaengine: consolidate assignment of DMA cookies Everyone deals with assigning DMA cookies in the same way (it's part of the API so they should be), so lets consolidate the common code into a helper function to avoid this duplication. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index cead5e4bd38c..687fc687aaf6 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -165,19 +165,6 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, return ret; } -static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) -{ - dma_cookie_t cookie = imxdma->chan.cookie; - - if (++cookie < 0) - cookie = 1; - - imxdma->chan.cookie = cookie; - imxdma->desc.cookie = cookie; - - return cookie; -} - static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); @@ -185,7 +172,7 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irq(&imxdmac->lock); - cookie = imxdma_assign_cookie(imxdmac); + cookie = dma_cookie_assign(tx); spin_unlock_irq(&imxdmac->lock); -- cgit v1.2.3 From f7fbce07c6ce26a25b4e0cb5f241c361fde87901 Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:35:07 +0000 Subject: dmaengine: provide a common function for completing a dma descriptor Provide a common function to do the cookie mechanics for completing a DMA descriptor. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 687fc687aaf6..9a3cbac3d695 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -66,7 +66,7 @@ static void imxdma_handle(struct imxdma_channel *imxdmac) { if (imxdmac->desc.callback) imxdmac->desc.callback(imxdmac->desc.callback_param); - imxdmac->chan.completed_cookie = imxdmac->desc.cookie; + dma_cookie_complete(&imxdmac->desc); } static void imxdma_irq_handler(int channel, void *data) -- cgit v1.2.3 From 96a2af41c78b1fbb1f567a3486bdc63f7b31c5fd Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:35:27 +0000 Subject: dmaengine: consolidate tx_status functions Now that we have the completed cookie in the dma_chan structure, we can consolidate the tx_status functions by providing a function to set the txstate structure and returning the DMA status. We also provide a separate helper to set the residue for cookies which are still in progress. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 9a3cbac3d695..6731f1918c55 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -153,16 +153,7 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - struct imxdma_channel *imxdmac = to_imxdma_chan(chan); - dma_cookie_t last_used; - enum dma_status ret; - - last_used = chan->cookie; - - ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used); - dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); - - return ret; + return dma_cookie_status(chan, cookie, txstate); } static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) -- cgit v1.2.3 From 8ac695463f37af902e953d575d3f782e32e170da Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:36:27 +0000 Subject: dmaengine: ensure all DMA engine drivers initialize their cookies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure all DMA engine drivers initialize their cookies in the same way, so that they all behave in a similar fashion. This means their first issued cookie will be 2 rather than 1, and will increment to INT_MAX before returning 1 and starting over. In connection with this, Dan Williams said: > Russell King wrote: > > Secondly, some DMA engine drivers initialize the dma_chan cookie to 0, > > others to 1.  Is there a reason for this, or are these all buggy? > > I know that ioat and iop-adma expect 0 to mean "I have cleaned up this > descriptor and it is idle", and would break if zero was an in-flight > cookie value. The reserved usage of zero is an driver internal > concern, but I have no problem formalizing it as a reserved value. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 6731f1918c55..f0485c0a685a 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -347,6 +347,7 @@ static int __init imxdma_probe(struct platform_device *pdev) spin_lock_init(&imxdmac->lock); imxdmac->chan.device = &imxdma->dma_device; + dma_cookie_init(&imxdmac->chan); imxdmac->channel = i; /* Add the channel to the DMAC list */ -- cgit v1.2.3 From 185ecb5f4fd43911c35956d4cc7d94a1da30417f Mon Sep 17 00:00:00 2001 From: Alexandre Bounine Date: Thu, 8 Mar 2012 15:35:13 -0500 Subject: dmaengine: add context parameter to prep_slave_sg and prep_dma_cyclic Add context parameter to device_prep_slave_sg() and device_prep_dma_cyclic() interfaces to allow passing client/target specific information associated with the data transfer. Modify all affected DMA engine drivers. Signed-off-by: Alexandre Bounine Acked-by: Linus Walleij Acked-by: Felipe Balbi Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 20c1565a7486..304839a99ae5 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -354,7 +354,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, - unsigned long flags) + unsigned long flags, void *context) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct scatterlist *sg; @@ -405,7 +405,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, - size_t period_len, enum dma_transfer_direction direction) + size_t period_len, enum dma_transfer_direction direction, + void *context) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; -- cgit v1.2.3 From 6bd081277ea03e2b165fc68534b61bc64db93990 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:01 +0100 Subject: dmaengine: imx-dma: merge old dma-v1.c with imx-dma.c It is mainly a simple merge changing the prefix of some functions to fit the imx-dma namings. As there are no users of the old dma-v1.c api we can safely remove this file. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 604 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 552 insertions(+), 52 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 304839a99ae5..fbb1aaad6128 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -14,7 +14,6 @@ * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ - #include #include #include @@ -25,15 +24,89 @@ #include #include #include +#include #include #include #include -#include +#include #include #include "dmaengine.h" #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 +#define IMX_DMA_CHANNELS 16 + +#define DMA_MODE_READ 0 +#define DMA_MODE_WRITE 1 +#define DMA_MODE_MASK 1 + +#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) +#define IMX_DMA_MEMSIZE_32 (0 << 4) +#define IMX_DMA_MEMSIZE_8 (1 << 4) +#define IMX_DMA_MEMSIZE_16 (2 << 4) +#define IMX_DMA_TYPE_LINEAR (0 << 10) +#define IMX_DMA_TYPE_2D (1 << 10) +#define IMX_DMA_TYPE_FIFO (2 << 10) + +#define IMX_DMA_ERR_BURST (1 << 0) +#define IMX_DMA_ERR_REQUEST (1 << 1) +#define IMX_DMA_ERR_TRANSFER (1 << 2) +#define IMX_DMA_ERR_BUFFER (1 << 3) +#define IMX_DMA_ERR_TIMEOUT (1 << 4) + +#define DMA_DCR 0x00 /* Control Register */ +#define DMA_DISR 0x04 /* Interrupt status Register */ +#define DMA_DIMR 0x08 /* Interrupt mask Register */ +#define DMA_DBTOSR 0x0c /* Burst timeout status Register */ +#define DMA_DRTOSR 0x10 /* Request timeout Register */ +#define DMA_DSESR 0x14 /* Transfer Error Status Register */ +#define DMA_DBOSR 0x18 /* Buffer overflow status Register */ +#define DMA_DBTOCR 0x1c /* Burst timeout control Register */ +#define DMA_WSRA 0x40 /* W-Size Register A */ +#define DMA_XSRA 0x44 /* X-Size Register A */ +#define DMA_YSRA 0x48 /* Y-Size Register A */ +#define DMA_WSRB 0x4c /* W-Size Register B */ +#define DMA_XSRB 0x50 /* X-Size Register B */ +#define DMA_YSRB 0x54 /* Y-Size Register B */ +#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ +#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ +#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ +#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ +#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ +#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ +#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ +#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ +#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ + +#define DCR_DRST (1<<1) +#define DCR_DEN (1<<0) +#define DBTOCR_EN (1<<15) +#define DBTOCR_CNT(x) ((x) & 0x7fff) +#define CNTR_CNT(x) ((x) & 0xffffff) +#define CCR_ACRPT (1<<14) +#define CCR_DMOD_LINEAR (0x0 << 12) +#define CCR_DMOD_2D (0x1 << 12) +#define CCR_DMOD_FIFO (0x2 << 12) +#define CCR_DMOD_EOBFIFO (0x3 << 12) +#define CCR_SMOD_LINEAR (0x0 << 10) +#define CCR_SMOD_2D (0x1 << 10) +#define CCR_SMOD_FIFO (0x2 << 10) +#define CCR_SMOD_EOBFIFO (0x3 << 10) +#define CCR_MDIR_DEC (1<<9) +#define CCR_MSEL_B (1<<8) +#define CCR_DSIZ_32 (0x0 << 6) +#define CCR_DSIZ_8 (0x1 << 6) +#define CCR_DSIZ_16 (0x2 << 6) +#define CCR_SSIZ_32 (0x0 << 4) +#define CCR_SSIZ_8 (0x1 << 4) +#define CCR_SSIZ_16 (0x2 << 4) +#define CCR_REN (1<<3) +#define CCR_RPT (1<<2) +#define CCR_FRC (1<<1) +#define CCR_CEN (1<<0) +#define RTOR_EN (1<<15) +#define RTOR_CLK (1<<14) +#define RTOR_PSC (1<<13) enum imxdma_prep_type { IMXDMA_DESC_MEMCPY, @@ -42,6 +115,39 @@ enum imxdma_prep_type { IMXDMA_DESC_CYCLIC, }; +/* + * struct imxdma_channel_internal - i.MX specific DMA extension + * @name: name specified by DMA client + * @irq_handler: client callback for end of transfer + * @err_handler: client callback for error condition + * @data: clients context data for callbacks + * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE + * @sg: pointer to the actual read/written chunk for scatter-gather emulation + * @resbytes: total residual number of bytes to transfer + * (it can be lower or same as sum of SG mapped chunk sizes) + * @sgcount: number of chunks to be read/written + * + * Structure is used for IMX DMA processing. It would be probably good + * @struct dma_struct in the future for external interfacing and use + * @struct imxdma_channel_internal only as extension to it. + */ + +struct imxdma_channel_internal { + void *data; + unsigned int dma_mode; + struct scatterlist *sg; + unsigned int resbytes; + + int in_use; + + u32 ccr_from_device; + u32 ccr_to_device; + + struct timer_list watchdog; + + int hw_chaining; +}; + struct imxdma_desc { struct list_head node; struct dma_async_tx_descriptor desc; @@ -64,9 +170,9 @@ struct imxdma_desc { }; struct imxdma_channel { + struct imxdma_channel_internal internal; struct imxdma_engine *imxdma; unsigned int channel; - unsigned int imxdma_channel; struct tasklet_struct dma_tasklet; struct list_head ld_free; @@ -84,13 +190,11 @@ struct imxdma_channel { struct scatterlist *sg_list; }; -#define MAX_DMA_CHANNELS 8 - struct imxdma_engine { struct device *dev; struct device_dma_parameters dma_parms; struct dma_device dma_device; - struct imxdma_channel channel[MAX_DMA_CHANNELS]; + struct imxdma_channel channel[IMX_DMA_CHANNELS]; }; static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) @@ -111,28 +215,381 @@ static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) return false; } -static void imxdma_irq_handler(int channel, void *data) +/* TODO: put this inside any struct */ +static void __iomem *imx_dmav1_baseaddr; +static struct clk *dma_clk; + +static void imx_dmav1_writel(unsigned val, unsigned offset) +{ + __raw_writel(val, imx_dmav1_baseaddr + offset); +} + +static unsigned imx_dmav1_readl(unsigned offset) { - struct imxdma_channel *imxdmac = data; + return __raw_readl(imx_dmav1_baseaddr + offset); +} - tasklet_schedule(&imxdmac->dma_tasklet); +static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) +{ + if (cpu_is_mx27()) + return imxdma->hw_chaining; + else + return 0; +} + +/* + * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation + */ +static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterlist *sg) +{ + struct imxdma_channel_internal *imxdma = &imxdmac->internal; + unsigned long now; + + now = min(imxdma->resbytes, sg->length); + if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) + imxdma->resbytes -= now; + + if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) + imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); + else + imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); + + imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel)); + + pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " + "size 0x%08x\n", imxdmac->channel, + imx_dmav1_readl(DMA_DAR(imxdmac->channel)), + imx_dmav1_readl(DMA_SAR(imxdmac->channel)), + imx_dmav1_readl(DMA_CNTR(imxdmac->channel))); + + return now; +} + +static int +imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, + unsigned int dma_length, unsigned int dev_addr, + unsigned int dmamode) +{ + int channel = imxdmac->channel; + + imxdmac->internal.sg = NULL; + imxdmac->internal.dma_mode = dmamode; + + if (!dma_address) { + printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", + channel); + return -EINVAL; + } + + if (!dma_length) { + printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", + channel); + return -EINVAL; + } + + if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { + pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " + "dev_addr=0x%08x for read\n", + channel, __func__, (unsigned int)dma_address, + dma_length, dev_addr); + + imx_dmav1_writel(dev_addr, DMA_SAR(channel)); + imx_dmav1_writel(dma_address, DMA_DAR(channel)); + imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); + } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { + pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " + "dev_addr=0x%08x for write\n", + channel, __func__, (unsigned int)dma_address, + dma_length, dev_addr); + + imx_dmav1_writel(dma_address, DMA_SAR(channel)); + imx_dmav1_writel(dev_addr, DMA_DAR(channel)); + imx_dmav1_writel(imxdmac->internal.ccr_to_device, + DMA_CCR(channel)); + } else { + printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", + channel); + return -EINVAL; + } + + imx_dmav1_writel(dma_length, DMA_CNTR(channel)); + + return 0; +} + +static void imxdma_enable_hw(struct imxdma_channel *imxdmac) +{ + int channel = imxdmac->channel; + unsigned long flags; + + pr_debug("imxdma%d: imx_dma_enable\n", channel); + + if (imxdmac->internal.in_use) + return; + + local_irq_save(flags); + + imx_dmav1_writel(1 << channel, DMA_DISR); + imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); + imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | + CCR_ACRPT, DMA_CCR(channel)); + + if ((cpu_is_mx21() || cpu_is_mx27()) && + imxdmac->internal.sg && imxdma_hw_chain(&imxdmac->internal)) { + imxdmac->internal.sg = sg_next(imxdmac->internal.sg); + if (imxdmac->internal.sg) { + u32 tmp; + imxdma_sg_next(imxdmac, imxdmac->internal.sg); + tmp = imx_dmav1_readl(DMA_CCR(channel)); + imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, + DMA_CCR(channel)); + } + } + imxdmac->internal.in_use = 1; + + local_irq_restore(flags); +} + +static void imxdma_disable_hw(struct imxdma_channel *imxdmac) +{ + int channel = imxdmac->channel; + unsigned long flags; + + pr_debug("imxdma%d: imx_dma_disable\n", channel); + + if (imxdma_hw_chain(&imxdmac->internal)) + del_timer(&imxdmac->internal.watchdog); + + local_irq_save(flags); + imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); + imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, + DMA_CCR(channel)); + imx_dmav1_writel(1 << channel, DMA_DISR); + imxdmac->internal.in_use = 0; + local_irq_restore(flags); +} + +static int +imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_port, + unsigned int config_mem, unsigned int dmareq, int hw_chaining) +{ + int channel = imxdmac->channel; + u32 dreq = 0; + + imxdmac->internal.hw_chaining = 0; + + if (hw_chaining) { + imxdmac->internal.hw_chaining = 1; + if (!imxdma_hw_chain(&imxdmac->internal)) + return -EINVAL; + } + + if (dmareq) + dreq = CCR_REN; + + imxdmac->internal.ccr_from_device = config_port | (config_mem << 2) | dreq; + imxdmac->internal.ccr_to_device = config_mem | (config_port << 2) | dreq; + + imx_dmav1_writel(dmareq, DMA_RSSR(channel)); + + return 0; +} + +static int +imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, + struct scatterlist *sg, unsigned int sgcount, + unsigned int dma_length, unsigned int dev_addr, + unsigned int dmamode) +{ + int channel = imxdmac->channel; + + if (imxdmac->internal.in_use) + return -EBUSY; + + imxdmac->internal.sg = sg; + imxdmac->internal.dma_mode = dmamode; + imxdmac->internal.resbytes = dma_length; + + if (!sg || !sgcount) { + printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", + channel); + return -EINVAL; + } + + if (!sg->length) { + printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", + channel); + return -EINVAL; + } + + if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { + pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " + "dev_addr=0x%08x for read\n", + channel, __func__, sg, sgcount, dma_length, dev_addr); + + imx_dmav1_writel(dev_addr, DMA_SAR(channel)); + imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); + } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { + pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " + "dev_addr=0x%08x for write\n", + channel, __func__, sg, sgcount, dma_length, dev_addr); + + imx_dmav1_writel(dev_addr, DMA_DAR(channel)); + imx_dmav1_writel(imxdmac->internal.ccr_to_device, DMA_CCR(channel)); + } else { + printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", + channel); + return -EINVAL; + } + + imxdma_sg_next(imxdmac, sg); + + return 0; } -static void imxdma_err_handler(int channel, void *data, int error) +static void imxdma_watchdog(unsigned long data) { - struct imxdma_channel *imxdmac = data; + struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; + int channel = imxdmac->channel; + imx_dmav1_writel(0, DMA_CCR(channel)); + imxdmac->internal.in_use = 0; + imxdmac->internal.sg = NULL; + + /* Tasklet watchdog error handler */ tasklet_schedule(&imxdmac->dma_tasklet); + pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel); +} + +static irqreturn_t imxdma_err_handler(int irq, void *dev_id) +{ + struct imxdma_engine *imxdma = dev_id; + struct imxdma_channel_internal *internal; + unsigned int err_mask; + int i, disr; + int errcode; + + disr = imx_dmav1_readl(DMA_DISR); + + err_mask = imx_dmav1_readl(DMA_DBTOSR) | + imx_dmav1_readl(DMA_DRTOSR) | + imx_dmav1_readl(DMA_DSESR) | + imx_dmav1_readl(DMA_DBOSR); + + if (!err_mask) + return IRQ_HANDLED; + + imx_dmav1_writel(disr & err_mask, DMA_DISR); + + for (i = 0; i < IMX_DMA_CHANNELS; i++) { + if (!(err_mask & (1 << i))) + continue; + internal = &imxdma->channel[i].internal; + errcode = 0; + + if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { + imx_dmav1_writel(1 << i, DMA_DBTOSR); + errcode |= IMX_DMA_ERR_BURST; + } + if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { + imx_dmav1_writel(1 << i, DMA_DRTOSR); + errcode |= IMX_DMA_ERR_REQUEST; + } + if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { + imx_dmav1_writel(1 << i, DMA_DSESR); + errcode |= IMX_DMA_ERR_TRANSFER; + } + if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { + imx_dmav1_writel(1 << i, DMA_DBOSR); + errcode |= IMX_DMA_ERR_BUFFER; + } + /* Tasklet error handler */ + tasklet_schedule(&imxdma->channel[i].dma_tasklet); + + printk(KERN_WARNING + "DMA timeout on channel %d -%s%s%s%s\n", i, + errcode & IMX_DMA_ERR_BURST ? " burst" : "", + errcode & IMX_DMA_ERR_REQUEST ? " request" : "", + errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", + errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); + } + return IRQ_HANDLED; } -static void imxdma_progression(int channel, void *data, - struct scatterlist *sg) +static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) { - struct imxdma_channel *imxdmac = data; + struct imxdma_channel_internal *imxdma = &imxdmac->internal; + int chno = imxdmac->channel; + + if (imxdma->sg) { + u32 tmp; + imxdma->sg = sg_next(imxdma->sg); + + if (imxdma->sg) { + imxdma_sg_next(imxdmac, imxdma->sg); + + tmp = imx_dmav1_readl(DMA_CCR(chno)); + + if (imxdma_hw_chain(imxdma)) { + /* FIXME: The timeout should probably be + * configurable + */ + mod_timer(&imxdma->watchdog, + jiffies + msecs_to_jiffies(500)); + + tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; + imx_dmav1_writel(tmp, DMA_CCR(chno)); + } else { + imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); + tmp |= CCR_CEN; + } + + imx_dmav1_writel(tmp, DMA_CCR(chno)); + + if (imxdma_chan_is_doing_cyclic(imxdmac)) + /* Tasklet progression */ + tasklet_schedule(&imxdmac->dma_tasklet); + return; + } + + if (imxdma_hw_chain(imxdma)) { + del_timer(&imxdma->watchdog); + return; + } + } + + imx_dmav1_writel(0, DMA_CCR(chno)); + imxdma->in_use = 0; + /* Tasklet irq */ tasklet_schedule(&imxdmac->dma_tasklet); } +static irqreturn_t dma_irq_handler(int irq, void *dev_id) +{ + struct imxdma_engine *imxdma = dev_id; + struct imxdma_channel_internal *internal; + int i, disr; + + if (cpu_is_mx21() || cpu_is_mx27()) + imxdma_err_handler(irq, dev_id); + + disr = imx_dmav1_readl(DMA_DISR); + + pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", + disr); + + imx_dmav1_writel(disr, DMA_DISR); + for (i = 0; i < IMX_DMA_CHANNELS; i++) { + if (disr & (1 << i)) { + internal = &imxdma->channel[i].internal; + dma_irq_handle_channel(&imxdma->channel[i]); + } + } + + return IRQ_HANDLED; +} + static int imxdma_xfer_desc(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); @@ -141,31 +598,24 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Configure and enable */ switch (d->type) { case IMXDMA_DESC_MEMCPY: - ret = imx_dma_config_channel(imxdmac->imxdma_channel, + ret = imxdma_config_channel_hw(imxdmac, d->config_port, d->config_mem, 0, 0); if (ret < 0) return ret; - ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src, + ret = imxdma_setup_single_hw(imxdmac, d->src, d->len, d->dest, d->dmamode); if (ret < 0) return ret; break; + + /* Cyclic transfer is the same as slave_sg with special sg configuration. */ case IMXDMA_DESC_CYCLIC: - ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, - imxdma_progression); - if (ret < 0) - return ret; - /* - * We fall through here since cyclic transfer is the same as - * slave_sg adding a progression handler and a specific sg - * configuration which is done in 'imxdma_prep_dma_cyclic'. - */ case IMXDMA_DESC_SLAVE_SG: if (d->dmamode == DMA_MODE_READ) - ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, + ret = imxdma_setup_sg_hw(imxdmac, d->sg, d->sgcount, d->len, d->src, d->dmamode); else - ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, + ret = imxdma_setup_sg_hw(imxdmac, d->sg, d->sgcount, d->len, d->dest, d->dmamode); if (ret < 0) return ret; @@ -173,7 +623,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) default: return -EINVAL; } - imx_dma_enable(imxdmac->imxdma_channel); + imxdma_enable_hw(imxdmac); return 0; } @@ -225,7 +675,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_TERMINATE_ALL: - imx_dma_disable(imxdmac->imxdma_channel); + imxdma_disable_hw(imxdmac); spin_lock_irqsave(&imxdmac->lock, flags); list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); @@ -255,16 +705,16 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, mode = IMX_DMA_MEMSIZE_32; break; } - ret = imx_dma_config_channel(imxdmac->imxdma_channel, + ret = imxdma_config_channel_hw(imxdmac, mode | IMX_DMA_TYPE_FIFO, IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, imxdmac->dma_request, 1); if (ret) return ret; - - imx_dma_config_burstlen(imxdmac->imxdma_channel, - imxdmac->watermark_level * imxdmac->word_size); + /* Set burst length */ + imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size, + DMA_BLR(imxdmac->channel)); return 0; default: @@ -333,7 +783,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&imxdmac->lock, flags); - imx_dma_disable(imxdmac->imxdma_channel); + imxdma_disable_hw(imxdmac); list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); @@ -520,10 +970,51 @@ static void imxdma_issue_pending(struct dma_chan *chan) } static int __init imxdma_probe(struct platform_device *pdev) -{ + { struct imxdma_engine *imxdma; int ret, i; + if (cpu_is_mx1()) + imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); + else if (cpu_is_mx21()) + imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); + else if (cpu_is_mx27()) + imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); + else + return 0; + + dma_clk = clk_get(NULL, "dma"); + if (IS_ERR(dma_clk)) + return PTR_ERR(dma_clk); + clk_enable(dma_clk); + + /* reset DMA module */ + imx_dmav1_writel(DCR_DRST, DMA_DCR); + + if (cpu_is_mx1()) { + ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); + if (ret) { + pr_crit("Can't register IRQ for DMA\n"); + return ret; + } + + ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); + if (ret) { + pr_crit("Can't register ERRIRQ for DMA\n"); + free_irq(MX1_DMA_INT, NULL); + return ret; + } + } + + /* enable DMA module */ + imx_dmav1_writel(DCR_DEN, DMA_DCR); + + /* clear all interrupts */ + imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); + + /* disable interrupts */ + imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); + imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); if (!imxdma) return -ENOMEM; @@ -535,19 +1026,22 @@ static int __init imxdma_probe(struct platform_device *pdev) dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); /* Initialize channel parameters */ - for (i = 0; i < MAX_DMA_CHANNELS; i++) { + for (i = 0; i < IMX_DMA_CHANNELS; i++) { struct imxdma_channel *imxdmac = &imxdma->channel[i]; - - imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", - DMA_PRIO_MEDIUM); - if ((int)imxdmac->channel < 0) { - ret = -ENODEV; - goto err_init; + memset(&imxdmac->internal, 0, sizeof(imxdmac->internal)); + if (cpu_is_mx21() || cpu_is_mx27()) { + ret = request_irq(MX2x_INT_DMACH0 + i, + dma_irq_handler, 0, "DMA", imxdma); + if (ret) { + pr_crit("Can't register IRQ %d for DMA channel %d\n", + MX2x_INT_DMACH0 + i, i); + goto err_init; + } + init_timer(&imxdmac->internal.watchdog); + imxdmac->internal.watchdog.function = &imxdma_watchdog; + imxdmac->internal.watchdog.data = (unsigned long)imxdmac; } - imx_dma_setup_handlers(imxdmac->imxdma_channel, - imxdma_irq_handler, imxdma_err_handler, imxdmac); - imxdmac->imxdma = imxdma; spin_lock_init(&imxdmac->lock); @@ -593,9 +1087,13 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; err_init: - while (--i >= 0) { - struct imxdma_channel *imxdmac = &imxdma->channel[i]; - imx_dma_free(imxdmac->imxdma_channel); + + if (cpu_is_mx21() || cpu_is_mx27()) { + while (--i >= 0) + free_irq(MX2x_INT_DMACH0 + i, NULL); + } else if cpu_is_mx1() { + free_irq(MX1_DMA_INT, NULL); + free_irq(MX1_DMA_ERR, NULL); } kfree(imxdma); @@ -609,10 +1107,12 @@ static int __exit imxdma_remove(struct platform_device *pdev) dma_async_device_unregister(&imxdma->dma_device); - for (i = 0; i < MAX_DMA_CHANNELS; i++) { - struct imxdma_channel *imxdmac = &imxdma->channel[i]; - - imx_dma_free(imxdmac->imxdma_channel); + if (cpu_is_mx21() || cpu_is_mx27()) { + for (i = 0; i < IMX_DMA_CHANNELS; i++) + free_irq(MX2x_INT_DMACH0 + i, NULL); + } else if cpu_is_mx1() { + free_irq(MX1_DMA_INT, NULL); + free_irq(MX1_DMA_ERR, NULL); } kfree(imxdma); -- cgit v1.2.3 From 232e3c2c7961fb3312a80df3747f1c29f0ed512e Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:02 +0100 Subject: dmaengine: imx-dma: remove data member from internal structure. Internal structure is just an auxiliary structure used for the initial merge which is meant to be gone. As data member is not use anywhere we can simply remove it. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index fbb1aaad6128..8603c75b0e1b 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -133,7 +133,6 @@ enum imxdma_prep_type { */ struct imxdma_channel_internal { - void *data; unsigned int dma_mode; struct scatterlist *sg; unsigned int resbytes; -- cgit v1.2.3 From 2efc3449d7b11f36f532180cb738364fd2c28e03 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:03 +0100 Subject: dmaengine: imx-dma: remove dma_mode member of internal structure. dmaengine now provides 'enum dma_transfer_direction' to properly specify DMA transfer direction. For this reason, DMA_MODE_* defines are replaced by this new type and therefore dma_mode member becomes redundant. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 103 ++++++++++++++++++++++---------------------------- 1 file changed, 45 insertions(+), 58 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 8603c75b0e1b..04a2c1446dc2 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -36,10 +36,6 @@ #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 #define IMX_DMA_CHANNELS 16 -#define DMA_MODE_READ 0 -#define DMA_MODE_WRITE 1 -#define DMA_MODE_MASK 1 - #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) #define IMX_DMA_MEMSIZE_32 (0 << 4) #define IMX_DMA_MEMSIZE_8 (1 << 4) @@ -133,7 +129,6 @@ enum imxdma_prep_type { */ struct imxdma_channel_internal { - unsigned int dma_mode; struct scatterlist *sg; unsigned int resbytes; @@ -154,7 +149,7 @@ struct imxdma_desc { dma_addr_t src; dma_addr_t dest; size_t len; - unsigned int dmamode; + enum dma_transfer_direction direction; enum imxdma_prep_type type; /* For memcpy and interleaved */ unsigned int config_port; @@ -239,8 +234,9 @@ static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) /* * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation */ -static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterlist *sg) +static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) { + struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); struct imxdma_channel_internal *imxdma = &imxdmac->internal; unsigned long now; @@ -248,7 +244,7 @@ static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterl if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) imxdma->resbytes -= now; - if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) + if (d->direction == DMA_DEV_TO_MEM) imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); else imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); @@ -265,14 +261,12 @@ static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterl } static int -imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, - unsigned int dma_length, unsigned int dev_addr, - unsigned int dmamode) +imxdma_setup_mem2mem_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, + unsigned int dma_length, unsigned int dev_addr) { int channel = imxdmac->channel; imxdmac->internal.sg = NULL; - imxdmac->internal.dma_mode = dmamode; if (!dma_address) { printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", @@ -286,38 +280,24 @@ imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, return -EINVAL; } - if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { - pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " - "dev_addr=0x%08x for read\n", - channel, __func__, (unsigned int)dma_address, - dma_length, dev_addr); + pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " + "dev_addr=0x%08x for write\n", + channel, __func__, (unsigned int)dma_address, + dma_length, dev_addr); - imx_dmav1_writel(dev_addr, DMA_SAR(channel)); - imx_dmav1_writel(dma_address, DMA_DAR(channel)); - imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); - } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { - pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " - "dev_addr=0x%08x for write\n", - channel, __func__, (unsigned int)dma_address, - dma_length, dev_addr); - - imx_dmav1_writel(dma_address, DMA_SAR(channel)); - imx_dmav1_writel(dev_addr, DMA_DAR(channel)); - imx_dmav1_writel(imxdmac->internal.ccr_to_device, - DMA_CCR(channel)); - } else { - printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", - channel); - return -EINVAL; - } + imx_dmav1_writel(dma_address, DMA_SAR(channel)); + imx_dmav1_writel(dev_addr, DMA_DAR(channel)); + imx_dmav1_writel(imxdmac->internal.ccr_to_device, + DMA_CCR(channel)); imx_dmav1_writel(dma_length, DMA_CNTR(channel)); return 0; } -static void imxdma_enable_hw(struct imxdma_channel *imxdmac) +static void imxdma_enable_hw(struct imxdma_desc *d) { + struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); int channel = imxdmac->channel; unsigned long flags; @@ -338,7 +318,7 @@ static void imxdma_enable_hw(struct imxdma_channel *imxdmac) imxdmac->internal.sg = sg_next(imxdmac->internal.sg); if (imxdmac->internal.sg) { u32 tmp; - imxdma_sg_next(imxdmac, imxdmac->internal.sg); + imxdma_sg_next(d, imxdmac->internal.sg); tmp = imx_dmav1_readl(DMA_CCR(channel)); imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, DMA_CCR(channel)); @@ -395,18 +375,18 @@ imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_por } static int -imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, +imxdma_setup_sg_hw(struct imxdma_desc *d, struct scatterlist *sg, unsigned int sgcount, unsigned int dma_length, unsigned int dev_addr, - unsigned int dmamode) + enum dma_transfer_direction direction) { + struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); int channel = imxdmac->channel; if (imxdmac->internal.in_use) return -EBUSY; imxdmac->internal.sg = sg; - imxdmac->internal.dma_mode = dmamode; imxdmac->internal.resbytes = dma_length; if (!sg || !sgcount) { @@ -421,14 +401,14 @@ imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, return -EINVAL; } - if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { + if (direction == DMA_DEV_TO_MEM) { pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " "dev_addr=0x%08x for read\n", channel, __func__, sg, sgcount, dma_length, dev_addr); imx_dmav1_writel(dev_addr, DMA_SAR(channel)); imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); - } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { + } else if (direction == DMA_MEM_TO_DEV) { pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " "dev_addr=0x%08x for write\n", channel, __func__, sg, sgcount, dma_length, dev_addr); @@ -441,7 +421,7 @@ imxdma_setup_sg_hw(struct imxdma_channel *imxdmac, return -EINVAL; } - imxdma_sg_next(imxdmac, sg); + imxdma_sg_next(d, sg); return 0; } @@ -519,13 +499,26 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) { struct imxdma_channel_internal *imxdma = &imxdmac->internal; int chno = imxdmac->channel; + struct imxdma_desc *desc; if (imxdma->sg) { u32 tmp; imxdma->sg = sg_next(imxdma->sg); if (imxdma->sg) { - imxdma_sg_next(imxdmac, imxdma->sg); + + spin_lock(&imxdmac->lock); + if (list_empty(&imxdmac->ld_active)) { + spin_unlock(&imxdmac->lock); + goto out; + } + + desc = list_first_entry(&imxdmac->ld_active, + struct imxdma_desc, + node); + spin_unlock(&imxdmac->lock); + + imxdma_sg_next(desc, imxdma->sg); tmp = imx_dmav1_readl(DMA_CCR(chno)); @@ -558,6 +551,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) } } +out: imx_dmav1_writel(0, DMA_CCR(chno)); imxdma->in_use = 0; /* Tasklet irq */ @@ -601,8 +595,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) d->config_port, d->config_mem, 0, 0); if (ret < 0) return ret; - ret = imxdma_setup_single_hw(imxdmac, d->src, - d->len, d->dest, d->dmamode); + ret = imxdma_setup_mem2mem_hw(imxdmac, d->src, d->len, d->dest); if (ret < 0) return ret; break; @@ -610,19 +603,15 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Cyclic transfer is the same as slave_sg with special sg configuration. */ case IMXDMA_DESC_CYCLIC: case IMXDMA_DESC_SLAVE_SG: - if (d->dmamode == DMA_MODE_READ) - ret = imxdma_setup_sg_hw(imxdmac, d->sg, - d->sgcount, d->len, d->src, d->dmamode); - else - ret = imxdma_setup_sg_hw(imxdmac, d->sg, - d->sgcount, d->len, d->dest, d->dmamode); + ret = imxdma_setup_sg_hw(d, d->sg, d->sgcount, d->len, + imxdmac->per_address, d->direction); if (ret < 0) return ret; break; default: return -EINVAL; } - imxdma_enable_hw(imxdmac); + imxdma_enable_hw(d); return 0; } @@ -839,11 +828,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( desc->sg = sgl; desc->sgcount = sg_len; desc->len = dma_length; + desc->direction = direction; if (direction == DMA_DEV_TO_MEM) { - desc->dmamode = DMA_MODE_READ; desc->src = imxdmac->per_address; } else { - desc->dmamode = DMA_MODE_WRITE; desc->dest = imxdmac->per_address; } desc->desc.callback = NULL; @@ -900,11 +888,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( desc->sg = imxdmac->sg_list; desc->sgcount = periods; desc->len = IMX_DMA_LENGTH_LOOP; + desc->direction = direction; if (direction == DMA_DEV_TO_MEM) { - desc->dmamode = DMA_MODE_READ; desc->src = imxdmac->per_address; } else { - desc->dmamode = DMA_MODE_WRITE; desc->dest = imxdmac->per_address; } desc->desc.callback = NULL; @@ -934,7 +921,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( desc->src = src; desc->dest = dest; desc->len = len; - desc->dmamode = DMA_MODE_WRITE; + desc->direction = DMA_MEM_TO_MEM; desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; desc->desc.callback = NULL; -- cgit v1.2.3 From 3b4b6dfc202dc5bedb03f2fae4ccc3f5b95dd563 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:04 +0100 Subject: dmaengine: imx-dma: remove 'imxdma_setup_mem2mem_hw' function. This function is only used once in the driver and has a lot of checks that are not needed anymore. For this reason it's been merged with 'imxdma_enable_hw'. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 57 +++++++++++++-------------------------------------- 1 file changed, 14 insertions(+), 43 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 04a2c1446dc2..dcb2c70aec7a 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -260,41 +260,6 @@ static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) return now; } -static int -imxdma_setup_mem2mem_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, - unsigned int dma_length, unsigned int dev_addr) -{ - int channel = imxdmac->channel; - - imxdmac->internal.sg = NULL; - - if (!dma_address) { - printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", - channel); - return -EINVAL; - } - - if (!dma_length) { - printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", - channel); - return -EINVAL; - } - - pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " - "dev_addr=0x%08x for write\n", - channel, __func__, (unsigned int)dma_address, - dma_length, dev_addr); - - imx_dmav1_writel(dma_address, DMA_SAR(channel)); - imx_dmav1_writel(dev_addr, DMA_DAR(channel)); - imx_dmav1_writel(imxdmac->internal.ccr_to_device, - DMA_CCR(channel)); - - imx_dmav1_writel(dma_length, DMA_CNTR(channel)); - - return 0; -} - static void imxdma_enable_hw(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); @@ -586,20 +551,26 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) static int imxdma_xfer_desc(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; int ret; /* Configure and enable */ switch (d->type) { case IMXDMA_DESC_MEMCPY: - ret = imxdma_config_channel_hw(imxdmac, - d->config_port, d->config_mem, 0, 0); - if (ret < 0) - return ret; - ret = imxdma_setup_mem2mem_hw(imxdmac, d->src, d->len, d->dest); - if (ret < 0) - return ret; - break; + imxdmac->internal.sg = NULL; + + imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel)); + imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel)); + imx_dmav1_writel(d->config_mem | (d->config_port << 2), + DMA_CCR(imxdmac->channel)); + imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel)); + + dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " + "dma_length=%d\n", __func__, imxdmac->channel, + d->dest, d->src, d->len); + + break; /* Cyclic transfer is the same as slave_sg with special sg configuration. */ case IMXDMA_DESC_CYCLIC: case IMXDMA_DESC_SLAVE_SG: -- cgit v1.2.3 From bdc0c7534c80c479b2336aed3e4016f4743f4853 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:05 +0100 Subject: dmaengine: imx-dma: remove 'imxdma_config_channel_hw' function. This function is only used once in the driver and uses some intermediary variables that are not needed anymore. For this reason it's been merged with 'imxdma_control'. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 46 +++++++++++++--------------------------------- 1 file changed, 13 insertions(+), 33 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index dcb2c70aec7a..25b4108f6224 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -313,32 +313,6 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) local_irq_restore(flags); } -static int -imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_port, - unsigned int config_mem, unsigned int dmareq, int hw_chaining) -{ - int channel = imxdmac->channel; - u32 dreq = 0; - - imxdmac->internal.hw_chaining = 0; - - if (hw_chaining) { - imxdmac->internal.hw_chaining = 1; - if (!imxdma_hw_chain(&imxdmac->internal)) - return -EINVAL; - } - - if (dmareq) - dreq = CCR_REN; - - imxdmac->internal.ccr_from_device = config_port | (config_mem << 2) | dreq; - imxdmac->internal.ccr_to_device = config_mem | (config_port << 2) | dreq; - - imx_dmav1_writel(dmareq, DMA_RSSR(channel)); - - return 0; -} - static int imxdma_setup_sg_hw(struct imxdma_desc *d, struct scatterlist *sg, unsigned int sgcount, @@ -628,7 +602,6 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct dma_slave_config *dmaengine_cfg = (void *)arg; - int ret; unsigned long flags; unsigned int mode = 0; @@ -664,13 +637,20 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, mode = IMX_DMA_MEMSIZE_32; break; } - ret = imxdma_config_channel_hw(imxdmac, - mode | IMX_DMA_TYPE_FIFO, - IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, - imxdmac->dma_request, 1); - if (ret) - return ret; + imxdmac->internal.hw_chaining = 1; + if (!imxdma_hw_chain(&imxdmac->internal)) + return -EINVAL; + imxdmac->internal.ccr_from_device = + (mode | IMX_DMA_TYPE_FIFO) | + ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | + CCR_REN; + imxdmac->internal.ccr_to_device = + (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | + ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; + imx_dmav1_writel(imxdmac->dma_request, + DMA_RSSR(imxdmac->channel)); + /* Set burst length */ imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size, DMA_BLR(imxdmac->channel)); -- cgit v1.2.3 From 359291a1a095a8a402405cd9c4bab46684e7bcfe Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:06 +0100 Subject: dmaengine: imx-dma: remove 'imxdma_setup_sg_hw' function. Removing this function allows moving 'ccr_to_device' and 'ccr_from_device' from internal struct to channel struct. This repesents a step forward towards removing auxiliary 'internal' structure. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 98 ++++++++++++++++++--------------------------------- 1 file changed, 35 insertions(+), 63 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 25b4108f6224..484f35365902 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -134,9 +134,6 @@ struct imxdma_channel_internal { int in_use; - u32 ccr_from_device; - u32 ccr_to_device; - struct timer_list watchdog; int hw_chaining; @@ -182,6 +179,8 @@ struct imxdma_channel { enum dma_status status; int dma_request; struct scatterlist *sg_list; + u32 ccr_from_device; + u32 ccr_to_device; }; struct imxdma_engine { @@ -313,58 +312,6 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) local_irq_restore(flags); } -static int -imxdma_setup_sg_hw(struct imxdma_desc *d, - struct scatterlist *sg, unsigned int sgcount, - unsigned int dma_length, unsigned int dev_addr, - enum dma_transfer_direction direction) -{ - struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); - int channel = imxdmac->channel; - - if (imxdmac->internal.in_use) - return -EBUSY; - - imxdmac->internal.sg = sg; - imxdmac->internal.resbytes = dma_length; - - if (!sg || !sgcount) { - printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", - channel); - return -EINVAL; - } - - if (!sg->length) { - printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", - channel); - return -EINVAL; - } - - if (direction == DMA_DEV_TO_MEM) { - pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " - "dev_addr=0x%08x for read\n", - channel, __func__, sg, sgcount, dma_length, dev_addr); - - imx_dmav1_writel(dev_addr, DMA_SAR(channel)); - imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); - } else if (direction == DMA_MEM_TO_DEV) { - pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " - "dev_addr=0x%08x for write\n", - channel, __func__, sg, sgcount, dma_length, dev_addr); - - imx_dmav1_writel(dev_addr, DMA_DAR(channel)); - imx_dmav1_writel(imxdmac->internal.ccr_to_device, DMA_CCR(channel)); - } else { - printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", - channel); - return -EINVAL; - } - - imxdma_sg_next(d, sg); - - return 0; -} - static void imxdma_watchdog(unsigned long data) { struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; @@ -526,7 +473,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); struct imxdma_engine *imxdma = imxdmac->imxdma; - int ret; /* Configure and enable */ switch (d->type) { @@ -548,10 +494,37 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Cyclic transfer is the same as slave_sg with special sg configuration. */ case IMXDMA_DESC_CYCLIC: case IMXDMA_DESC_SLAVE_SG: - ret = imxdma_setup_sg_hw(d, d->sg, d->sgcount, d->len, - imxdmac->per_address, d->direction); - if (ret < 0) - return ret; + imxdmac->internal.sg = d->sg; + imxdmac->internal.resbytes = d->len; + + if (d->direction == DMA_DEV_TO_MEM) { + imx_dmav1_writel(imxdmac->per_address, + DMA_SAR(imxdmac->channel)); + imx_dmav1_writel(imxdmac->ccr_from_device, + DMA_CCR(imxdmac->channel)); + + dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " + "total length=%d dev_addr=0x%08x (dev2mem)\n", + __func__, imxdmac->channel, d->sg, d->sgcount, + d->len, imxdmac->per_address); + } else if (d->direction == DMA_MEM_TO_DEV) { + imx_dmav1_writel(imxdmac->per_address, + DMA_DAR(imxdmac->channel)); + imx_dmav1_writel(imxdmac->ccr_to_device, + DMA_CCR(imxdmac->channel)); + + dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " + "total length=%d dev_addr=0x%08x (mem2dev)\n", + __func__, imxdmac->channel, d->sg, d->sgcount, + d->len, imxdmac->per_address); + } else { + dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", + __func__, imxdmac->channel); + return -EINVAL; + } + + imxdma_sg_next(d, d->sg); + break; default: return -EINVAL; @@ -641,11 +614,10 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, imxdmac->internal.hw_chaining = 1; if (!imxdma_hw_chain(&imxdmac->internal)) return -EINVAL; - imxdmac->internal.ccr_from_device = - (mode | IMX_DMA_TYPE_FIFO) | + imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | CCR_REN; - imxdmac->internal.ccr_to_device = + imxdmac->ccr_to_device = (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; imx_dmav1_writel(imxdmac->dma_request, -- cgit v1.2.3 From 833bc03bf14ef6d3f82d86845c29aa1f7e2037e3 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:07 +0100 Subject: dmaengine: imx-dma: remove sg member from internal structure. This member is redundant, because it is already present in descriptor structure. Removing it will make further removing of 'internal' structure easier. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 42 ++++++++++++++++++------------------------ 1 file changed, 18 insertions(+), 24 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 484f35365902..82d4099bd8f4 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -129,7 +129,6 @@ enum imxdma_prep_type { */ struct imxdma_channel_internal { - struct scatterlist *sg; unsigned int resbytes; int in_use; @@ -278,11 +277,11 @@ static void imxdma_enable_hw(struct imxdma_desc *d) CCR_ACRPT, DMA_CCR(channel)); if ((cpu_is_mx21() || cpu_is_mx27()) && - imxdmac->internal.sg && imxdma_hw_chain(&imxdmac->internal)) { - imxdmac->internal.sg = sg_next(imxdmac->internal.sg); - if (imxdmac->internal.sg) { + d->sg && imxdma_hw_chain(&imxdmac->internal)) { + d->sg = sg_next(d->sg); + if (d->sg) { u32 tmp; - imxdma_sg_next(d, imxdmac->internal.sg); + imxdma_sg_next(d, d->sg); tmp = imx_dmav1_readl(DMA_CCR(channel)); imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, DMA_CCR(channel)); @@ -319,7 +318,6 @@ static void imxdma_watchdog(unsigned long data) imx_dmav1_writel(0, DMA_CCR(channel)); imxdmac->internal.in_use = 0; - imxdmac->internal.sg = NULL; /* Tasklet watchdog error handler */ tasklet_schedule(&imxdmac->dma_tasklet); @@ -387,24 +385,23 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) int chno = imxdmac->channel; struct imxdma_desc *desc; - if (imxdma->sg) { - u32 tmp; - imxdma->sg = sg_next(imxdma->sg); - - if (imxdma->sg) { + spin_lock(&imxdmac->lock); + if (list_empty(&imxdmac->ld_active)) { + spin_unlock(&imxdmac->lock); + goto out; + } - spin_lock(&imxdmac->lock); - if (list_empty(&imxdmac->ld_active)) { - spin_unlock(&imxdmac->lock); - goto out; - } + desc = list_first_entry(&imxdmac->ld_active, + struct imxdma_desc, + node); + spin_unlock(&imxdmac->lock); - desc = list_first_entry(&imxdmac->ld_active, - struct imxdma_desc, - node); - spin_unlock(&imxdmac->lock); + if (desc->sg) { + u32 tmp; + desc->sg = sg_next(desc->sg); - imxdma_sg_next(desc, imxdma->sg); + if (desc->sg) { + imxdma_sg_next(desc, desc->sg); tmp = imx_dmav1_readl(DMA_CCR(chno)); @@ -477,8 +474,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Configure and enable */ switch (d->type) { case IMXDMA_DESC_MEMCPY: - imxdmac->internal.sg = NULL; - imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel)); imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel)); imx_dmav1_writel(d->config_mem | (d->config_port << 2), @@ -494,7 +489,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Cyclic transfer is the same as slave_sg with special sg configuration. */ case IMXDMA_DESC_CYCLIC: case IMXDMA_DESC_SLAVE_SG: - imxdmac->internal.sg = d->sg; imxdmac->internal.resbytes = d->len; if (d->direction == DMA_DEV_TO_MEM) { -- cgit v1.2.3 From e4756b5e068d866239b6880a7030c9d31400b254 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:08 +0100 Subject: dmaengine: imx-dma: remove 'in_use' field of 'internal' structure. It makes no sense keeping an 'in_use' flag when the multiple descriptor mechanism already prevents a new DMA transfer to be issued when another one is in course. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 82d4099bd8f4..d7309e44c0df 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -131,8 +131,6 @@ enum imxdma_prep_type { struct imxdma_channel_internal { unsigned int resbytes; - int in_use; - struct timer_list watchdog; int hw_chaining; @@ -266,9 +264,6 @@ static void imxdma_enable_hw(struct imxdma_desc *d) pr_debug("imxdma%d: imx_dma_enable\n", channel); - if (imxdmac->internal.in_use) - return; - local_irq_save(flags); imx_dmav1_writel(1 << channel, DMA_DISR); @@ -287,7 +282,6 @@ static void imxdma_enable_hw(struct imxdma_desc *d) DMA_CCR(channel)); } } - imxdmac->internal.in_use = 1; local_irq_restore(flags); } @@ -307,7 +301,6 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, DMA_CCR(channel)); imx_dmav1_writel(1 << channel, DMA_DISR); - imxdmac->internal.in_use = 0; local_irq_restore(flags); } @@ -317,7 +310,6 @@ static void imxdma_watchdog(unsigned long data) int channel = imxdmac->channel; imx_dmav1_writel(0, DMA_CCR(channel)); - imxdmac->internal.in_use = 0; /* Tasklet watchdog error handler */ tasklet_schedule(&imxdmac->dma_tasklet); @@ -436,7 +428,6 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) out: imx_dmav1_writel(0, DMA_CCR(chno)); - imxdma->in_use = 0; /* Tasklet irq */ tasklet_schedule(&imxdmac->dma_tasklet); } -- cgit v1.2.3 From 6b0e2f55e3ebc7089abf5e4770f03fb264b6d2ea Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:09 +0100 Subject: dmaengine: imx-dma: remove 'resbytes' field of 'internal' structure. Use per-descriptor 'len' field to keep track of the remaining bytes instead. This goes on the direction of eventually removing the 'internal' structure. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index d7309e44c0df..6e03f928ca81 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -129,8 +129,6 @@ enum imxdma_prep_type { */ struct imxdma_channel_internal { - unsigned int resbytes; - struct timer_list watchdog; int hw_chaining; @@ -233,12 +231,11 @@ static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); - struct imxdma_channel_internal *imxdma = &imxdmac->internal; unsigned long now; - now = min(imxdma->resbytes, sg->length); - if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) - imxdma->resbytes -= now; + now = min(d->len, sg->length); + if (d->len != IMX_DMA_LENGTH_LOOP) + d->len -= now; if (d->direction == DMA_DEV_TO_MEM) imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); @@ -480,8 +477,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Cyclic transfer is the same as slave_sg with special sg configuration. */ case IMXDMA_DESC_CYCLIC: case IMXDMA_DESC_SLAVE_SG: - imxdmac->internal.resbytes = d->len; - if (d->direction == DMA_DEV_TO_MEM) { imx_dmav1_writel(imxdmac->per_address, DMA_SAR(imxdmac->channel)); -- cgit v1.2.3 From 2d9c2fc59a74e625eff795a788cacc65648290d6 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:10 +0100 Subject: dmaengine: imx-dma: remove internal structure. This structure was created to allow an smoothless merge but was meant to be removed. Remaining members 'hw_chaining' and 'watchdog' are moved to the channel structure. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 64 +++++++++++++++------------------------------------ 1 file changed, 18 insertions(+), 46 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 6e03f928ca81..629be353c63a 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -111,29 +111,6 @@ enum imxdma_prep_type { IMXDMA_DESC_CYCLIC, }; -/* - * struct imxdma_channel_internal - i.MX specific DMA extension - * @name: name specified by DMA client - * @irq_handler: client callback for end of transfer - * @err_handler: client callback for error condition - * @data: clients context data for callbacks - * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE - * @sg: pointer to the actual read/written chunk for scatter-gather emulation - * @resbytes: total residual number of bytes to transfer - * (it can be lower or same as sum of SG mapped chunk sizes) - * @sgcount: number of chunks to be read/written - * - * Structure is used for IMX DMA processing. It would be probably good - * @struct dma_struct in the future for external interfacing and use - * @struct imxdma_channel_internal only as extension to it. - */ - -struct imxdma_channel_internal { - struct timer_list watchdog; - - int hw_chaining; -}; - struct imxdma_desc { struct list_head node; struct dma_async_tx_descriptor desc; @@ -156,7 +133,8 @@ struct imxdma_desc { }; struct imxdma_channel { - struct imxdma_channel_internal internal; + int hw_chaining; + struct timer_list watchdog; struct imxdma_engine *imxdma; unsigned int channel; @@ -217,10 +195,10 @@ static unsigned imx_dmav1_readl(unsigned offset) return __raw_readl(imx_dmav1_baseaddr + offset); } -static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) +static int imxdma_hw_chain(struct imxdma_channel *imxdmac) { if (cpu_is_mx27()) - return imxdma->hw_chaining; + return imxdmac->hw_chaining; else return 0; } @@ -269,7 +247,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d) CCR_ACRPT, DMA_CCR(channel)); if ((cpu_is_mx21() || cpu_is_mx27()) && - d->sg && imxdma_hw_chain(&imxdmac->internal)) { + d->sg && imxdma_hw_chain(imxdmac)) { d->sg = sg_next(d->sg); if (d->sg) { u32 tmp; @@ -290,8 +268,8 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) pr_debug("imxdma%d: imx_dma_disable\n", channel); - if (imxdma_hw_chain(&imxdmac->internal)) - del_timer(&imxdmac->internal.watchdog); + if (imxdma_hw_chain(imxdmac)) + del_timer(&imxdmac->watchdog); local_irq_save(flags); imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); @@ -316,7 +294,6 @@ static void imxdma_watchdog(unsigned long data) static irqreturn_t imxdma_err_handler(int irq, void *dev_id) { struct imxdma_engine *imxdma = dev_id; - struct imxdma_channel_internal *internal; unsigned int err_mask; int i, disr; int errcode; @@ -336,7 +313,6 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id) for (i = 0; i < IMX_DMA_CHANNELS; i++) { if (!(err_mask & (1 << i))) continue; - internal = &imxdma->channel[i].internal; errcode = 0; if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { @@ -370,7 +346,6 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id) static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) { - struct imxdma_channel_internal *imxdma = &imxdmac->internal; int chno = imxdmac->channel; struct imxdma_desc *desc; @@ -394,11 +369,11 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) tmp = imx_dmav1_readl(DMA_CCR(chno)); - if (imxdma_hw_chain(imxdma)) { + if (imxdma_hw_chain(imxdmac)) { /* FIXME: The timeout should probably be * configurable */ - mod_timer(&imxdma->watchdog, + mod_timer(&imxdmac->watchdog, jiffies + msecs_to_jiffies(500)); tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; @@ -417,8 +392,8 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) return; } - if (imxdma_hw_chain(imxdma)) { - del_timer(&imxdma->watchdog); + if (imxdma_hw_chain(imxdmac)) { + del_timer(&imxdmac->watchdog); return; } } @@ -432,7 +407,6 @@ out: static irqreturn_t dma_irq_handler(int irq, void *dev_id) { struct imxdma_engine *imxdma = dev_id; - struct imxdma_channel_internal *internal; int i, disr; if (cpu_is_mx21() || cpu_is_mx27()) @@ -445,10 +419,8 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) imx_dmav1_writel(disr, DMA_DISR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { - if (disr & (1 << i)) { - internal = &imxdma->channel[i].internal; + if (disr & (1 << i)) dma_irq_handle_channel(&imxdma->channel[i]); - } } return IRQ_HANDLED; @@ -591,8 +563,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - imxdmac->internal.hw_chaining = 1; - if (!imxdma_hw_chain(&imxdmac->internal)) + imxdmac->hw_chaining = 1; + if (!imxdma_hw_chain(imxdmac)) return -EINVAL; imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | @@ -917,7 +889,7 @@ static int __init imxdma_probe(struct platform_device *pdev) /* Initialize channel parameters */ for (i = 0; i < IMX_DMA_CHANNELS; i++) { struct imxdma_channel *imxdmac = &imxdma->channel[i]; - memset(&imxdmac->internal, 0, sizeof(imxdmac->internal)); + if (cpu_is_mx21() || cpu_is_mx27()) { ret = request_irq(MX2x_INT_DMACH0 + i, dma_irq_handler, 0, "DMA", imxdma); @@ -926,9 +898,9 @@ static int __init imxdma_probe(struct platform_device *pdev) MX2x_INT_DMACH0 + i, i); goto err_init; } - init_timer(&imxdmac->internal.watchdog); - imxdmac->internal.watchdog.function = &imxdma_watchdog; - imxdmac->internal.watchdog.data = (unsigned long)imxdmac; + init_timer(&imxdmac->watchdog); + imxdmac->watchdog.function = &imxdma_watchdog; + imxdmac->watchdog.data = (unsigned long)imxdmac; } imxdmac->imxdma = imxdma; -- cgit v1.2.3 From a6cbb2d87d20817e555a6ffa3131bfa1cdd9ab73 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:11 +0100 Subject: dmaengine: imx-dma: remove unused arg of imxdma_sg_next. Since this function is always used with 'desc' as first argument and 'desc->sg' as second argument, the latter is clearly redundant and can be removed. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 629be353c63a..628b0f61ab38 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -206,9 +206,10 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac) /* * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation */ -static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) +static inline int imxdma_sg_next(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); + struct scatterlist *sg = d->sg; unsigned long now; now = min(d->len, sg->length); @@ -251,7 +252,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d) d->sg = sg_next(d->sg); if (d->sg) { u32 tmp; - imxdma_sg_next(d, d->sg); + imxdma_sg_next(d); tmp = imx_dmav1_readl(DMA_CCR(channel)); imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, DMA_CCR(channel)); @@ -365,7 +366,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) desc->sg = sg_next(desc->sg); if (desc->sg) { - imxdma_sg_next(desc, desc->sg); + imxdma_sg_next(desc); tmp = imx_dmav1_readl(DMA_CCR(chno)); @@ -475,7 +476,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) return -EINVAL; } - imxdma_sg_next(d, d->sg); + imxdma_sg_next(d); break; default: -- cgit v1.2.3 From cd5cf9da020293118800864641e09b71e23ba41c Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:12 +0100 Subject: dmaengine: imx-dma: remove 'imx_dmav1_baseaddr' and 'dma_clk'. These global variables are integrated into the dmaengine structure. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 169 +++++++++++++++++++++++++++----------------------- 1 file changed, 93 insertions(+), 76 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 628b0f61ab38..cdca95a5666c 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -160,6 +160,8 @@ struct imxdma_engine { struct device *dev; struct device_dma_parameters dma_parms; struct dma_device dma_device; + void __iomem *base; + struct clk *dma_clk; struct imxdma_channel channel[IMX_DMA_CHANNELS]; }; @@ -181,18 +183,17 @@ static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) return false; } -/* TODO: put this inside any struct */ -static void __iomem *imx_dmav1_baseaddr; -static struct clk *dma_clk; -static void imx_dmav1_writel(unsigned val, unsigned offset) + +static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, + unsigned offset) { - __raw_writel(val, imx_dmav1_baseaddr + offset); + __raw_writel(val, imxdma->base + offset); } -static unsigned imx_dmav1_readl(unsigned offset) +static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) { - return __raw_readl(imx_dmav1_baseaddr + offset); + return __raw_readl(imxdma->base + offset); } static int imxdma_hw_chain(struct imxdma_channel *imxdmac) @@ -209,6 +210,7 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac) static inline int imxdma_sg_next(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; struct scatterlist *sg = d->sg; unsigned long now; @@ -217,17 +219,19 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) d->len -= now; if (d->direction == DMA_DEV_TO_MEM) - imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); + imx_dmav1_writel(imxdma, sg->dma_address, + DMA_DAR(imxdmac->channel)); else - imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); + imx_dmav1_writel(imxdma, sg->dma_address, + DMA_SAR(imxdmac->channel)); - imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel)); + imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " "size 0x%08x\n", imxdmac->channel, - imx_dmav1_readl(DMA_DAR(imxdmac->channel)), - imx_dmav1_readl(DMA_SAR(imxdmac->channel)), - imx_dmav1_readl(DMA_CNTR(imxdmac->channel))); + imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), + imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), + imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); return now; } @@ -235,6 +239,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) static void imxdma_enable_hw(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; int channel = imxdmac->channel; unsigned long flags; @@ -242,10 +247,11 @@ static void imxdma_enable_hw(struct imxdma_desc *d) local_irq_save(flags); - imx_dmav1_writel(1 << channel, DMA_DISR); - imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); - imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | - CCR_ACRPT, DMA_CCR(channel)); + imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); + imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & + ~(1 << channel), DMA_DIMR); + imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | + CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); if ((cpu_is_mx21() || cpu_is_mx27()) && d->sg && imxdma_hw_chain(imxdmac)) { @@ -253,9 +259,9 @@ static void imxdma_enable_hw(struct imxdma_desc *d) if (d->sg) { u32 tmp; imxdma_sg_next(d); - tmp = imx_dmav1_readl(DMA_CCR(channel)); - imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, - DMA_CCR(channel)); + tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); + imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, + DMA_CCR(channel)); } } @@ -264,6 +270,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d) static void imxdma_disable_hw(struct imxdma_channel *imxdmac) { + struct imxdma_engine *imxdma = imxdmac->imxdma; int channel = imxdmac->channel; unsigned long flags; @@ -273,19 +280,21 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) del_timer(&imxdmac->watchdog); local_irq_save(flags); - imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); - imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, - DMA_CCR(channel)); - imx_dmav1_writel(1 << channel, DMA_DISR); + imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | + (1 << channel), DMA_DIMR); + imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & + ~CCR_CEN, DMA_CCR(channel)); + imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); local_irq_restore(flags); } static void imxdma_watchdog(unsigned long data) { struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; + struct imxdma_engine *imxdma = imxdmac->imxdma; int channel = imxdmac->channel; - imx_dmav1_writel(0, DMA_CCR(channel)); + imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); /* Tasklet watchdog error handler */ tasklet_schedule(&imxdmac->dma_tasklet); @@ -299,37 +308,37 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id) int i, disr; int errcode; - disr = imx_dmav1_readl(DMA_DISR); + disr = imx_dmav1_readl(imxdma, DMA_DISR); - err_mask = imx_dmav1_readl(DMA_DBTOSR) | - imx_dmav1_readl(DMA_DRTOSR) | - imx_dmav1_readl(DMA_DSESR) | - imx_dmav1_readl(DMA_DBOSR); + err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | + imx_dmav1_readl(imxdma, DMA_DRTOSR) | + imx_dmav1_readl(imxdma, DMA_DSESR) | + imx_dmav1_readl(imxdma, DMA_DBOSR); if (!err_mask) return IRQ_HANDLED; - imx_dmav1_writel(disr & err_mask, DMA_DISR); + imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { if (!(err_mask & (1 << i))) continue; errcode = 0; - if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { - imx_dmav1_writel(1 << i, DMA_DBTOSR); + if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { + imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); errcode |= IMX_DMA_ERR_BURST; } - if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { - imx_dmav1_writel(1 << i, DMA_DRTOSR); + if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { + imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); errcode |= IMX_DMA_ERR_REQUEST; } - if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { - imx_dmav1_writel(1 << i, DMA_DSESR); + if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { + imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); errcode |= IMX_DMA_ERR_TRANSFER; } - if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { - imx_dmav1_writel(1 << i, DMA_DBOSR); + if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { + imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); errcode |= IMX_DMA_ERR_BUFFER; } /* Tasklet error handler */ @@ -347,6 +356,7 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id) static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) { + struct imxdma_engine *imxdma = imxdmac->imxdma; int chno = imxdmac->channel; struct imxdma_desc *desc; @@ -368,7 +378,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) if (desc->sg) { imxdma_sg_next(desc); - tmp = imx_dmav1_readl(DMA_CCR(chno)); + tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); if (imxdma_hw_chain(imxdmac)) { /* FIXME: The timeout should probably be @@ -378,13 +388,14 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) jiffies + msecs_to_jiffies(500)); tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; - imx_dmav1_writel(tmp, DMA_CCR(chno)); + imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); } else { - imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); + imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, + DMA_CCR(chno)); tmp |= CCR_CEN; } - imx_dmav1_writel(tmp, DMA_CCR(chno)); + imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); if (imxdma_chan_is_doing_cyclic(imxdmac)) /* Tasklet progression */ @@ -400,7 +411,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) } out: - imx_dmav1_writel(0, DMA_CCR(chno)); + imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); /* Tasklet irq */ tasklet_schedule(&imxdmac->dma_tasklet); } @@ -413,12 +424,12 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) if (cpu_is_mx21() || cpu_is_mx27()) imxdma_err_handler(irq, dev_id); - disr = imx_dmav1_readl(DMA_DISR); + disr = imx_dmav1_readl(imxdma, DMA_DISR); pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", disr); - imx_dmav1_writel(disr, DMA_DISR); + imx_dmav1_writel(imxdma, disr, DMA_DISR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { if (disr & (1 << i)) dma_irq_handle_channel(&imxdma->channel[i]); @@ -435,12 +446,12 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) /* Configure and enable */ switch (d->type) { case IMXDMA_DESC_MEMCPY: - imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel)); - imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel)); - imx_dmav1_writel(d->config_mem | (d->config_port << 2), + imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); + imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); + imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), DMA_CCR(imxdmac->channel)); - imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel)); + imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " "dma_length=%d\n", __func__, imxdmac->channel, @@ -451,9 +462,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) case IMXDMA_DESC_CYCLIC: case IMXDMA_DESC_SLAVE_SG: if (d->direction == DMA_DEV_TO_MEM) { - imx_dmav1_writel(imxdmac->per_address, + imx_dmav1_writel(imxdma, imxdmac->per_address, DMA_SAR(imxdmac->channel)); - imx_dmav1_writel(imxdmac->ccr_from_device, + imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, DMA_CCR(imxdmac->channel)); dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " @@ -461,9 +472,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) __func__, imxdmac->channel, d->sg, d->sgcount, d->len, imxdmac->per_address); } else if (d->direction == DMA_MEM_TO_DEV) { - imx_dmav1_writel(imxdmac->per_address, + imx_dmav1_writel(imxdma, imxdmac->per_address, DMA_DAR(imxdmac->channel)); - imx_dmav1_writel(imxdmac->ccr_to_device, + imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, DMA_CCR(imxdmac->channel)); dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " @@ -528,6 +539,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct dma_slave_config *dmaengine_cfg = (void *)arg; + struct imxdma_engine *imxdma = imxdmac->imxdma; unsigned long flags; unsigned int mode = 0; @@ -573,12 +585,12 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, imxdmac->ccr_to_device = (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; - imx_dmav1_writel(imxdmac->dma_request, + imx_dmav1_writel(imxdma, imxdmac->dma_request, DMA_RSSR(imxdmac->channel)); /* Set burst length */ - imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size, - DMA_BLR(imxdmac->channel)); + imx_dmav1_writel(imxdma, imxdmac->watermark_level * + imxdmac->word_size, DMA_BLR(imxdmac->channel)); return 0; default: @@ -836,27 +848,35 @@ static int __init imxdma_probe(struct platform_device *pdev) struct imxdma_engine *imxdma; int ret, i; - if (cpu_is_mx1()) - imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); - else if (cpu_is_mx21()) - imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); - else if (cpu_is_mx27()) - imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); - else + + imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); + if (!imxdma) + return -ENOMEM; + + if (cpu_is_mx1()) { + imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); + } else if (cpu_is_mx21()) { + imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); + } else if (cpu_is_mx27()) { + imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); + } else { + kfree(imxdma); return 0; + } - dma_clk = clk_get(NULL, "dma"); - if (IS_ERR(dma_clk)) - return PTR_ERR(dma_clk); - clk_enable(dma_clk); + imxdma->dma_clk = clk_get(NULL, "dma"); + if (IS_ERR(imxdma->dma_clk)) + return PTR_ERR(imxdma->dma_clk); + clk_enable(imxdma->dma_clk); /* reset DMA module */ - imx_dmav1_writel(DCR_DRST, DMA_DCR); + imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); if (cpu_is_mx1()) { ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { pr_crit("Can't register IRQ for DMA\n"); + kfree(imxdma); return ret; } @@ -864,22 +884,19 @@ static int __init imxdma_probe(struct platform_device *pdev) if (ret) { pr_crit("Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); + kfree(imxdma); return ret; } } /* enable DMA module */ - imx_dmav1_writel(DCR_DEN, DMA_DCR); + imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); /* clear all interrupts */ - imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); + imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); /* disable interrupts */ - imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); - - imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); - if (!imxdma) - return -ENOMEM; + imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); INIT_LIST_HEAD(&imxdma->dma_device.channels); -- cgit v1.2.3 From f9b283a6e41be584f4b1f4c6634625f41ff0c728 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:13 +0100 Subject: dmaengine: imx-dma: use 'dev_dbg' and 'dev_warn' for messages. There were some 'pr_crit' and 'pr_debug' messages due to the initial merge. Replace them by 'dev_dbg' and 'dev_warn' to be consistent. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index cdca95a5666c..307cd142f06a 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -227,8 +227,8 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); - pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " - "size 0x%08x\n", imxdmac->channel, + dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " + "size 0x%08x\n", __func__, imxdmac->channel, imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); @@ -243,7 +243,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d) int channel = imxdmac->channel; unsigned long flags; - pr_debug("imxdma%d: imx_dma_enable\n", channel); + dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); local_irq_save(flags); @@ -274,7 +274,7 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) int channel = imxdmac->channel; unsigned long flags; - pr_debug("imxdma%d: imx_dma_disable\n", channel); + dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); if (imxdma_hw_chain(imxdmac)) del_timer(&imxdmac->watchdog); @@ -298,7 +298,8 @@ static void imxdma_watchdog(unsigned long data) /* Tasklet watchdog error handler */ tasklet_schedule(&imxdmac->dma_tasklet); - pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel); + dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n", + imxdmac->channel); } static irqreturn_t imxdma_err_handler(int irq, void *dev_id) @@ -426,8 +427,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) disr = imx_dmav1_readl(imxdma, DMA_DISR); - pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", - disr); + dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr); imx_dmav1_writel(imxdma, disr, DMA_DISR); for (i = 0; i < IMX_DMA_CHANNELS; i++) { @@ -875,14 +875,14 @@ static int __init imxdma_probe(struct platform_device *pdev) if (cpu_is_mx1()) { ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { - pr_crit("Can't register IRQ for DMA\n"); + dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); kfree(imxdma); return ret; } ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { - pr_crit("Can't register ERRIRQ for DMA\n"); + dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); kfree(imxdma); return ret; @@ -912,8 +912,9 @@ static int __init imxdma_probe(struct platform_device *pdev) ret = request_irq(MX2x_INT_DMACH0 + i, dma_irq_handler, 0, "DMA", imxdma); if (ret) { - pr_crit("Can't register IRQ %d for DMA channel %d\n", - MX2x_INT_DMACH0 + i, i); + dev_warn(imxdma->dev, "Can't register IRQ %d " + "for DMA channel %d\n", + MX2x_INT_DMACH0 + i, i); goto err_init; } init_timer(&imxdmac->watchdog); -- cgit v1.2.3 From f606ab897b6d7f35b57c7474424676e30457520b Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:14 +0100 Subject: dmaengine: i.MX: Add support for interleaved transfers. i.MX2 and i.MX1 chips have the possibility to do interleaved transfers with two constraints: - Only one chunk can be used (i.e. only 2D transfers are allowed). - Only 2 interleaved configurations can be applied at the same time for all channels. Since this patch adds a new resource 'slots_2d' which is shared by all the DMA channels and to avoid disgustin locking BUGs, the 'lock' member has been moved to the global 'imxdma_engine' structure. Signed-off-by: Javier Martin Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 146 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 131 insertions(+), 15 deletions(-) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 307cd142f06a..0f698f883cca 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -36,6 +36,10 @@ #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 #define IMX_DMA_CHANNELS 16 +#define IMX_DMA_2D_SLOTS 2 +#define IMX_DMA_2D_SLOT_A 0 +#define IMX_DMA_2D_SLOT_B 1 + #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) #define IMX_DMA_MEMSIZE_32 (0 << 4) #define IMX_DMA_MEMSIZE_8 (1 << 4) @@ -111,6 +115,13 @@ enum imxdma_prep_type { IMXDMA_DESC_CYCLIC, }; +struct imx_dma_2d_config { + u16 xsr; + u16 ysr; + u16 wsr; + int count; +}; + struct imxdma_desc { struct list_head node; struct dma_async_tx_descriptor desc; @@ -147,13 +158,14 @@ struct imxdma_channel { dma_addr_t per_address; u32 watermark_level; struct dma_chan chan; - spinlock_t lock; struct dma_async_tx_descriptor desc; enum dma_status status; int dma_request; struct scatterlist *sg_list; u32 ccr_from_device; u32 ccr_to_device; + bool enabled_2d; + int slot_2d; }; struct imxdma_engine { @@ -162,6 +174,8 @@ struct imxdma_engine { struct dma_device dma_device; void __iomem *base; struct clk *dma_clk; + spinlock_t lock; + struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; }; @@ -361,16 +375,16 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) int chno = imxdmac->channel; struct imxdma_desc *desc; - spin_lock(&imxdmac->lock); + spin_lock(&imxdma->lock); if (list_empty(&imxdmac->ld_active)) { - spin_unlock(&imxdmac->lock); + spin_unlock(&imxdma->lock); goto out; } desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); - spin_unlock(&imxdmac->lock); + spin_unlock(&imxdma->lock); if (desc->sg) { u32 tmp; @@ -442,9 +456,53 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); struct imxdma_engine *imxdma = imxdmac->imxdma; + unsigned long flags; + int slot = -1; + int i; /* Configure and enable */ switch (d->type) { + case IMXDMA_DESC_INTERLEAVED: + /* Try to get a free 2D slot */ + spin_lock_irqsave(&imxdma->lock, flags); + for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { + if ((imxdma->slots_2d[i].count > 0) && + ((imxdma->slots_2d[i].xsr != d->x) || + (imxdma->slots_2d[i].ysr != d->y) || + (imxdma->slots_2d[i].wsr != d->w))) + continue; + slot = i; + break; + } + if (slot < 0) + return -EBUSY; + + imxdma->slots_2d[slot].xsr = d->x; + imxdma->slots_2d[slot].ysr = d->y; + imxdma->slots_2d[slot].wsr = d->w; + imxdma->slots_2d[slot].count++; + + imxdmac->slot_2d = slot; + imxdmac->enabled_2d = true; + spin_unlock_irqrestore(&imxdma->lock, flags); + + if (slot == IMX_DMA_2D_SLOT_A) { + d->config_mem &= ~CCR_MSEL_B; + d->config_port &= ~CCR_MSEL_B; + imx_dmav1_writel(imxdma, d->x, DMA_XSRA); + imx_dmav1_writel(imxdma, d->y, DMA_YSRA); + imx_dmav1_writel(imxdma, d->w, DMA_WSRA); + } else { + d->config_mem |= CCR_MSEL_B; + d->config_port |= CCR_MSEL_B; + imx_dmav1_writel(imxdma, d->x, DMA_XSRB); + imx_dmav1_writel(imxdma, d->y, DMA_YSRB); + imx_dmav1_writel(imxdma, d->w, DMA_WSRB); + } + /* + * We fall-through here intentionally, since a 2D transfer is + * similar to MEMCPY just adding the 2D slot configuration. + */ case IMXDMA_DESC_MEMCPY: imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); @@ -503,7 +561,7 @@ static void imxdma_tasklet(unsigned long data) struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - spin_lock(&imxdmac->lock); + spin_lock(&imxdma->lock); if (list_empty(&imxdmac->ld_active)) { /* Someone might have called terminate all */ @@ -520,6 +578,12 @@ static void imxdma_tasklet(unsigned long data) if (imxdma_chan_is_doing_cyclic(imxdmac)) goto out; + /* Free 2D slot if it was an interleaved transfer */ + if (imxdmac->enabled_2d) { + imxdma->slots_2d[imxdmac->slot_2d].count--; + imxdmac->enabled_2d = false; + } + list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { @@ -531,7 +595,7 @@ static void imxdma_tasklet(unsigned long data) __func__, imxdmac->channel); } out: - spin_unlock(&imxdmac->lock); + spin_unlock(&imxdma->lock); } static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -547,10 +611,10 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, case DMA_TERMINATE_ALL: imxdma_disable_hw(imxdmac); - spin_lock_irqsave(&imxdmac->lock, flags); + spin_lock_irqsave(&imxdma->lock, flags); list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); - spin_unlock_irqrestore(&imxdmac->lock, flags); + spin_unlock_irqrestore(&imxdma->lock, flags); return 0; case DMA_SLAVE_CONFIG: if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { @@ -610,12 +674,13 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; dma_cookie_t cookie; unsigned long flags; - spin_lock_irqsave(&imxdmac->lock, flags); + spin_lock_irqsave(&imxdma->lock, flags); cookie = dma_cookie_assign(tx); - spin_unlock_irqrestore(&imxdmac->lock, flags); + spin_unlock_irqrestore(&imxdma->lock, flags); return cookie; } @@ -654,16 +719,17 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) static void imxdma_free_chan_resources(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc, *_desc; unsigned long flags; - spin_lock_irqsave(&imxdmac->lock, flags); + spin_lock_irqsave(&imxdma->lock, flags); imxdma_disable_hw(imxdmac); list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); - spin_unlock_irqrestore(&imxdmac->lock, flags); + spin_unlock_irqrestore(&imxdma->lock, flags); list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { kfree(desc); @@ -818,6 +884,49 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( return &desc->desc; } +static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; + struct imxdma_desc *desc; + + dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" + " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, + imxdmac->channel, xt->src_start, xt->dst_start, + xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", + xt->numf, xt->frame_size); + + if (list_empty(&imxdmac->ld_free) || + imxdma_chan_is_doing_cyclic(imxdmac)) + return NULL; + + if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) + return NULL; + + desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); + + desc->type = IMXDMA_DESC_INTERLEAVED; + desc->src = xt->src_start; + desc->dest = xt->dst_start; + desc->x = xt->sgl[0].size; + desc->y = xt->numf; + desc->w = xt->sgl[0].icg + desc->x; + desc->len = desc->x * desc->y; + desc->direction = DMA_MEM_TO_MEM; + desc->config_port = IMX_DMA_MEMSIZE_32; + desc->config_mem = IMX_DMA_MEMSIZE_32; + if (xt->src_sgl) + desc->config_mem |= IMX_DMA_TYPE_2D; + if (xt->dst_sgl) + desc->config_port |= IMX_DMA_TYPE_2D; + desc->desc.callback = NULL; + desc->desc.callback_param = NULL; + + return &desc->desc; +} + static void imxdma_issue_pending(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); @@ -825,7 +934,7 @@ static void imxdma_issue_pending(struct dma_chan *chan) struct imxdma_desc *desc; unsigned long flags; - spin_lock_irqsave(&imxdmac->lock, flags); + spin_lock_irqsave(&imxdma->lock, flags); if (list_empty(&imxdmac->ld_active) && !list_empty(&imxdmac->ld_queue)) { desc = list_first_entry(&imxdmac->ld_queue, @@ -840,7 +949,7 @@ static void imxdma_issue_pending(struct dma_chan *chan) &imxdmac->ld_active); } } - spin_unlock_irqrestore(&imxdmac->lock, flags); + spin_unlock_irqrestore(&imxdma->lock, flags); } static int __init imxdma_probe(struct platform_device *pdev) @@ -903,6 +1012,13 @@ static int __init imxdma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); + dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); + + /* Initialize 2D global parameters */ + for (i = 0; i < IMX_DMA_2D_SLOTS; i++) + imxdma->slots_2d[i].count = 0; + + spin_lock_init(&imxdma->lock); /* Initialize channel parameters */ for (i = 0; i < IMX_DMA_CHANNELS; i++) { @@ -923,7 +1039,6 @@ static int __init imxdma_probe(struct platform_device *pdev) } imxdmac->imxdma = imxdma; - spin_lock_init(&imxdmac->lock); INIT_LIST_HEAD(&imxdmac->ld_queue); INIT_LIST_HEAD(&imxdmac->ld_free); @@ -949,6 +1064,7 @@ static int __init imxdma_probe(struct platform_device *pdev) imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; + imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; imxdma->dma_device.device_control = imxdma_control; imxdma->dma_device.device_issue_pending = imxdma_issue_pending; -- cgit v1.2.3 From 660cd0dd94eba5201c69cd10f2d2fefb52807fa8 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Thu, 22 Mar 2012 14:54:15 +0100 Subject: dmaengine: i.MX: Fix merge of cookie branch. When merging DMA cookie changes a small chunk of code was dropped. This broke imx-dma driver. This patch adds this chunk again and fixes the problem. Signed-off-by: Javier Martin Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma/imx-dma.c') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 0f698f883cca..569b0a29fa8c 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -679,6 +679,7 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) unsigned long flags; spin_lock_irqsave(&imxdma->lock, flags); + list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); cookie = dma_cookie_assign(tx); spin_unlock_irqrestore(&imxdma->lock, flags); -- cgit v1.2.3