aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/k3dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/k3dma.c')
-rw-r--r--drivers/dma/k3dma.c69
1 files changed, 62 insertions, 7 deletions
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 84ddea449ef4..85a046d9cb1a 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define DEBUG
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
@@ -126,11 +127,22 @@ static void terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
static void set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
{
+ if (!(hw->config & CCFG_EN))
+ printk("set_desc CCFG_EN not set\n");
writel_relaxed(hw->lli, phy->base + CX_LLI);
writel_relaxed(hw->count, phy->base + CX_CNT);
writel_relaxed(hw->saddr, phy->base + CX_SRC);
writel_relaxed(hw->daddr, phy->base + CX_DST);
writel_relaxed(hw->config, phy->base + CX_CONFIG);
+// printk("set CX_CNT=0x%x\n", readl_relaxed(phy->base + CX_CNT));
+// printk("set CX_CONFIG=0x%x\n", readl_relaxed(phy->base + CX_CONFIG));
+ {
+ u32 v = 0;
+ v= readl_relaxed(phy->base + CX_CONFIG);
+ if (!(v & CCFG_EN))
+ printk("v set_desc CCFG_EN not set\n");
+
+ }
}
static u32 get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
@@ -139,11 +151,13 @@ static u32 get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
cnt &= 0xffff;
+ printk("cnt=0x%x\n", cnt);
return cnt;
}
static u32 get_curr_lli(struct k3_dma_phy *phy)
{
+ printk("phy->base=0x%x, phy->idx=%d\n", phy->base, phy->idx);
return readl_relaxed(phy->base + CX_LLI);
}
@@ -174,23 +188,29 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
{
struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
struct k3_dma_phy *p;
+ struct k3_dma_chan *c;
u32 stat = readl_relaxed(d->base + INT_STAT);
u32 tc1 = readl_relaxed(d->base + INT_TC1);
u32 err1 = readl_relaxed(d->base + INT_ERR1);
u32 err2 = readl_relaxed(d->base + INT_ERR2);
u32 i, irq_num = 0;
-
+// printk(KERN_DEBUG "stat=0x%x, tc1=0x%x\n", stat, tc1);
while (stat) {
i = __ffs(stat);
stat &= (stat - 1);
if (likely(tc1 & BIT(i))) {
p = &d->phy[i];
+ c = p->vchan;
+ spin_lock(&c->vc.lock);
p->ds_done = p->ds_run;
vchan_cookie_complete(&p->ds_run->vd);
+ spin_unlock(&c->vc.lock);
irq_num++;
}
- if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+ if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) {
+ printk("err1=0x%x, err2=0x%x", err1, err2);
dev_warn(d->slave.dev, "DMA ERR\n");
+ }
}
writel_relaxed(tc1, d->base + INT_TC1_RAW);
@@ -209,8 +229,11 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
- if (BIT(c->phy->idx) & get_chan_stat(d))
- return -EAGAIN;
+ if (BIT(c->phy->idx) & get_chan_stat(d)) {
+ printk("should not error here\n");
+ return 0;
+ //return -EAGAIN;
+ }
if (vd) {
struct k3_dma_desc_sw *ds =
@@ -244,6 +267,7 @@ static void k3_dma_tasklet(unsigned long arg)
list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
spin_lock_irq(&c->vc.lock);
p = c->phy;
+ //if (p && !(BIT(p->idx) & get_chan_stat(d)) {
if (p && p->ds_done) {
if (k3_dma_start_txd(c)) {
/* No current txd associated with this channel */
@@ -324,6 +348,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
if (ret == DMA_SUCCESS)
return ret;
+ printk("c->vc.chan->completed_cookie=%d, cookie=%d, ret=%d\n", c->vc.chan.completed_cookie, cookie, ret);
spin_lock_irqsave(&c->vc.lock, flags);
p = c->phy;
@@ -333,22 +358,49 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
*/
vd = vchan_find_desc(&c->vc, cookie);
if (vd) {
+ printk("not start, c->vd.tx.cookie=%d, cookie=%d\n", vd->tx.cookie, cookie);
+ printk("CX_CONFIG=0x%x\n", readl_relaxed(p->base + CX_CONFIG));
+ printk("CX_SRC=0x%x\n", readl_relaxed(p->base + CX_SRC));
+ printk("CX_DST=0x%x\n", readl_relaxed(p->base + CX_DST));
+ printk("CX_CNT=0x%x\n", readl_relaxed(p->base + CX_CNT));
+ printk("CX_LLI=0x%x\n", readl_relaxed(p->base + CX_LLI));
+ if ((p) && (p->ds_run)) {
+ printk("p->ds_run->vd.tx.cookie=%d, p->idx=%d\n", p->ds_run->vd.tx.cookie, p->idx);
+ }
bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
} else if ((!p) || (!p->ds_run)) {
+ printk("finish , \n");
bytes = 0;
} else {
struct k3_dma_desc_sw *ds = p->ds_run;
u32 clli = 0, index = 0;
-
bytes = get_curr_cnt(d, p);
clli = get_curr_lli(p);
index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+ printk("clli=0x%x, ds->desc_hw_lli=0x%x,index=0x%x, LLI_MAX_NUM=0x%x\n", clli, ds->desc_hw_lli, index, LLI_MAX_NUM);
for (; index < LLI_MAX_NUM; index++) {
bytes += ds->desc_hw[index].count;
/* end of lli */
if (!ds->desc_hw[index].lli)
break;
}
+ printk("CX_CONFIG=0x%x\n", readl_relaxed(p->base + CX_CONFIG));
+ printk("CX_SRC=0x%x\n", readl_relaxed(p->base + CX_SRC));
+ printk("CX_DST=0x%x\n", readl_relaxed(p->base + CX_DST));
+ printk("CX_CNT=0x%x\n", readl_relaxed(p->base + CX_CNT));
+ printk("CX_LLI=0x%x\n", readl_relaxed(p->base + CX_LLI));
+ printk("int_stat=0x%x\n", readl_relaxed(d->base + INT_STAT));
+ printk("int_tc1=0x%x\n", readl_relaxed(d->base + INT_TC1));
+ printk("int_err1=0x%x\n", readl_relaxed(d->base + INT_ERR1));
+ printk("INT_ERR2=0x%x\n", readl_relaxed(d->base + INT_ERR2));
+ printk("INT_TC1_MASK=0x%x\n", readl_relaxed(d->base +INT_TC1_MASK));
+ printk("INT_ERR1_MASK=0x%x\n", readl_relaxed(d->base + INT_ERR1_MASK));
+ printk("INT_ERR2_MASK=0x%x\n", readl_relaxed(d->base + INT_ERR2_MASK));
+ printk("CH_STAT=0x%x\n", readl_relaxed(d->base + CH_STAT));
+ printk("INT_TC1_RAW=0x%x\n", readl_relaxed(d->base + INT_TC1_RAW));
+ printk("INT_ERR1_RAW=0x%x\n", readl_relaxed(d->base + INT_ERR1_RAW));
+ printk("INT_ERR2_RAW=0x%x\n", readl_relaxed(d->base + INT_ERR2_RAW));
+ printk(" IN RUN , bytes=0x%x, p->idx=%d, ds->vd.tx.cookie=%d, p->ds_run=0x%x, p->ds_done=0x%x\n", bytes, p->idx, ds->vd.tx.cookie, p->ds_run, p->ds_done);
}
spin_unlock_irqrestore(&c->vc.lock, flags);
dma_set_residue(state, bytes);
@@ -384,7 +436,8 @@ static void k3_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
dma_addr_t src, size_t len, u32 num, u32 ccfg)
{
BUG_ON(num >= LLI_MAX_NUM);
-
+ if (!(ccfg & CCFG_EN))
+ printk("CCFG_EN not set\n");
ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
sizeof(struct k3_desc_hw);
ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
@@ -558,6 +611,7 @@ static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_TERMINATE_ALL:
dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
/* Clear the tx descriptor lists */
+ printk(" DMA_TERMINATE_ALL \n");
spin_lock_irqsave(&c->vc.lock, flags);
vchan_get_all_descriptors(&c->vc, &head);
if (c)
@@ -623,7 +677,7 @@ static int k3_dma_probe(struct platform_device *op)
d->base = devm_request_and_ioremap(&op->dev, iores);
if (!d->base)
return -EADDRNOTAVAIL;
-
+ printk("d->base=0x%x\n", d->base);
of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
if (of_id)
of_property_read_u32((&op->dev)->of_node,
@@ -647,6 +701,7 @@ static int k3_dma_probe(struct platform_device *op)
p->idx = i;
p->base = d->base + i * 0x40;
+ printk("p[%d]->base=0x%x\n", i, p->base);
}
INIT_LIST_HEAD(&d->slave.channels);