aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLudovic Barre <ludovic.barre@stericsson.com>2011-03-25 09:26:57 +0100
committerSebastian RASMUSSEN <sebastian.rasmussen@stericsson.com>2011-04-28 15:18:27 +0200
commit6d8c735241f9f369a11c9f2b15656951bad438a6 (patch)
treee647755f600c38a00d3f3f274ecb26e66290039d /drivers
parentcca519d4e671d063997762827266119b242271f3 (diff)
reorganize send request to split in - PIO/DMA commun data setup - specific prepare data for PIO or DMA mode - start data (register writing), So in write request (irq context) the configuration is alwready do, just write the registers dma split adaptation: setup and preparation centralizes imasks managment mmci irq context, removes and delays data transfer and finalize in tasklet irq(s) take care of: - checking sdio interrupt - checking for request status (errors, success) - update the completion - schedule tasklet dma irq context, removes and delays finalize in tasklet irq dma take care of: - update the completion - schedule tasklet tasklet context, data transfer and finalize tasklet context, dma finalize sdio adaptation set_clkreg adaptation provide a debugfs view of mmci status driver ST-Ericsson Linux next: N/A ST-Ericsson ID: 328471 ST-Ericsson FOSS-OUT ID: Trivial Signed-off-by: Ludovic Barre <ludovic.barre@stericsson.com> Change-Id: I8f97286c31c13ba8cc168d99b0cfbb2d61269791 Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/21861 Reviewed-by: Sebastian RASMUSSEN <sebastian.rasmussen@stericsson.com> Tested-by: Sebastian RASMUSSEN <sebastian.rasmussen@stericsson.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/host/mmci.c1347
-rw-r--r--drivers/mmc/host/mmci.h71
2 files changed, 825 insertions, 593 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 5a08b0fcd2a..0209828435c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -59,8 +59,6 @@ static unsigned int dataread_delay_clks = 7500000;
* @txsize_threshold: Sets DMA burst size to minimal if transfer size is
* less or equal to this threshold. This shall be specified in
* number of bytes. Set 0 for no burst compensation
- * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
- * and will not work at all.
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
* @pwrreg_powerup: power up value for MMCIPOWER register
@@ -77,7 +75,6 @@ struct variant_data {
unsigned int fifosize;
unsigned int fifohalfsize;
unsigned int txsize_threshold;
- bool broken_blockend;
bool sdio;
bool st_clkdiv;
unsigned int pwrreg_powerup;
@@ -110,13 +107,13 @@ static struct variant_data variant_ux500 = {
.clkreg_enable = 1 << 14, /* HWFCEN */
.dmareg_enable = 1 << 12, /* DMAREQCTRL */
.datalength_bits = 24,
- .broken_blockend = true,
.sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
.non_power_of_2_blksize = true,
};
+
/*
* Debugfs
*/
@@ -195,6 +192,127 @@ static const struct file_operations mmci_fops_regs = {
.release = single_release,
};
+static int mmci_stat_show(struct seq_file *seq, void *v)
+{
+ struct mmci_host *host = seq->private;
+
+ seq_printf(seq, "\033[1;34mMMCI Statistic\033[0m\n");
+
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb core requetes", host->stat.nb_core_req);
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb cmdcrcfail", host->stat.nb_cmdcrcfail);
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb rx overrun", host->stat.nb_rxoverrun);
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb tx underrun", host->stat.nb_txunderrun);
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb datacrcfail", host->stat.nb_datacrcfail);
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb datatimeout", host->stat.nb_datatimeout);
+ seq_printf(seq, "%-20s:%ld\n",
+ "nb startbiterr", host->stat.nb_startbiterr);
+
+ seq_printf(seq, "\n\033[1;34mMMCI Status\033[0m\n");
+ seq_printf(seq, "%-20s:", "completion pending");
+ switch (host->complete_what) {
+ case COMPLETION_NONE:
+ seq_printf(seq, "COMPLETION_NONE");
+ break;
+ case COMPLETION_FINALIZE:
+ seq_printf(seq, "COMPLETION_FINALIZE");
+ break;
+ case COMPLETION_REQ:
+ seq_printf(seq, "COMPLETION_REQ");
+ break;
+ case COMPLETION_CMDSENT:
+ seq_printf(seq, "COMPLETION_CMDSENT");
+ break;
+ case COMPLETION_RSPFIN:
+ seq_printf(seq, "COMPLETION_RSPFIN");
+ break;
+ case COMPLETION_XFERFINISH:
+ seq_printf(seq, "COMPLETION_XFERFINISH");
+ break;
+ case COMPLETION_XFERFINISH_RSPFIN:
+ seq_printf(seq, "COMPLETION_XFERFINISH_RSPFIN");
+ break;
+ default:
+ seq_printf(seq, "warning not define");
+ break;
+ }
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "%-20s:", "completion dma");
+ switch (host->dma_complete) {
+ case COMPLETION_DMA_NONE:
+ seq_printf(seq, "COMPLETION_DMA_NONE");
+ break;
+ case COMPLETION_DMA_START:
+ seq_printf(seq, "COMPLETION_DMA_START");
+ break;
+ case COMPLETION_DMA_XFERFINISH:
+ seq_printf(seq, "COMPLETION_DMA_XFERFINISH");
+ break;
+ default:
+ seq_printf(seq, "warning not define");
+ break;
+ }
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "%-20s:", "pio active");
+ switch (host->pio_active) {
+ case XFER_NONE:
+ seq_printf(seq, "XFER_NONE");
+ break;
+ case XFER_READ:
+ seq_printf(seq, "XFER_READ");
+ break;
+ case XFER_WRITE:
+ seq_printf(seq, "XFER_WRITE");
+ break;
+ default:
+ seq_printf(seq, "warning not define");
+ break;
+ }
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "%-20s:", "imask0");
+ seq_printf(seq, "0x%x\n", host->mmci_mask0);
+ seq_printf(seq, "%-20s:", "imask1");
+ seq_printf(seq, "0x%x\n", host->mmci_mask1);
+
+ seq_printf(seq, "\n\033[1;34mMMCI clock\033[0m\n");
+ seq_printf(seq, "%-20s:%d\n", "mclk", host->mclk);
+ seq_printf(seq, "%-20s:%d\n", "cclk", host->cclk);
+
+ return 0;
+}
+
+static ssize_t mmci_stat_reset(struct file *filp,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct mmci_host *host =
+ ((struct seq_file *)filp->private_data)->private;
+
+ memset(&(host->stat), 0, sizeof(host->stat));
+ return count;
+}
+
+static int mmci_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmci_stat_show, inode->i_private);
+}
+
+static const struct file_operations mmci_fops_stat = {
+ .owner = THIS_MODULE,
+ .open = mmci_stat_open,
+ .read = seq_read,
+ .write = mmci_stat_reset,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void mmci_debugfs_create(struct mmci_host *host)
{
host->debug_regs = debugfs_create_file("regs", S_IRUGO,
@@ -204,11 +322,20 @@ static void mmci_debugfs_create(struct mmci_host *host)
if (IS_ERR(host->debug_regs))
dev_err(mmc_dev(host->mmc),
"failed to create debug regs file\n");
+
+ host->debug_stat = debugfs_create_file("stat", S_IRUGO | S_IWUSR,
+ host->mmc->debugfs_root, host,
+ &mmci_fops_stat);
+
+ if (IS_ERR(host->debug_stat))
+ dev_err(mmc_dev(host->mmc),
+ "failed to create debug stat file\n");
}
static void mmci_debugfs_remove(struct mmci_host *host)
{
debugfs_remove(host->debug_regs);
+ debugfs_remove(host->debug_stat);
}
#else
@@ -271,51 +398,43 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_ST_8BIT_BUS;
+ host->mmci_clockctrl = clk;
writel(clk, host->base + MMCICLOCK);
}
-static void
-mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
+static void mmci_set_mask1(struct mmci_host *host, u32 mask)
{
- writel(0, host->base + MMCICOMMAND);
-
- BUG_ON(host->data);
-
- host->mrq = NULL;
- host->cmd = NULL;
-
- if (mrq->data)
- mrq->data->bytes_xfered = host->data_xfered;
-
- /*
- * Need to drop the host lock here; mmc_request_done may call
- * back into the driver...
- */
- spin_unlock(&host->lock);
- mmc_request_done(host->mmc, mrq);
- spin_lock(&host->lock);
+ if (host->singleirq) {
+ host->mmci_mask0 &= ~MCI_IRQ1MASK;
+ host->mmci_mask0 |= (mask & MCI_IRQ1MASK);
+ }
+ host->mmci_mask1 = mask;
}
-static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
+static inline void enable_imasks(struct mmci_host *host)
{
- void __iomem *base = host->base;
-
- if (host->singleirq) {
- unsigned int mask0 = readl(base + MMCIMASK0);
-
- mask0 &= ~MCI_IRQ1MASK;
- mask0 |= mask;
+ writel(host->mmci_mask0, host->base + MMCIMASK0);
+ writel(host->mmci_mask1, host->base + MMCIMASK1);
+}
- writel(mask0, base + MMCIMASK0);
- }
+static inline void disable_imasks(struct mmci_host *host)
+{
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK1);
+}
- writel(mask, base + MMCIMASK1);
+static inline void clear_imasks(struct mmci_host *host)
+{
+ /* preserve the SDIO IRQ mask state */
+ host->mmci_mask0 &= MCI_SDIOITMASK;
+ host->mmci_mask1 = 0;
+ writel(host->mmci_mask0, host->base + MMCIMASK0);
+ writel(host->mmci_mask1, host->base + MMCIMASK1);
}
static void mmci_stop_data(struct mmci_host *host)
{
- u32 clk;
- unsigned int datactrl = 0;
+ host->mmci_datactrl = 0;
/*
* The ST Micro variants has a special bit
@@ -326,99 +445,40 @@ static void mmci_stop_data(struct mmci_host *host)
if (host->variant->sdio &&
host->mmc->card &&
mmc_card_sdio(host->mmc->card))
- datactrl |= MCI_ST_DPSM_SDIOEN;
+ host->mmci_datactrl |= MCI_ST_DPSM_SDIOEN;
- writel(datactrl, host->base + MMCIDATACTRL);
- mmci_set_mask1(host, 0);
+ writel(host->mmci_datactrl, host->base + MMCIDATACTRL);
/* Needed for DDR */
if (host->mmc->card && mmc_card_ddr_mode(host->mmc->card)) {
- clk = readl(host->base + MMCICLOCK);
- clk &= ~(MCI_NEG_EDGE);
-
- writel(clk, (host->base + MMCICLOCK));
+ host->mmci_clockctrl &= ~(MCI_NEG_EDGE);
+ writel(host->mmci_clockctrl, (host->base + MMCICLOCK));
}
-
- host->data = NULL;
}
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
{
unsigned int flags = SG_MITER_ATOMIC;
- if (data->flags & MMC_DATA_READ)
- flags |= SG_MITER_TO_SG;
- else
- flags |= SG_MITER_FROM_SG;
-
+ flags |= data->flags & MMC_DATA_READ ?
+ SG_MITER_TO_SG : SG_MITER_FROM_SG;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
-static void
-mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
-{
- void __iomem *base = host->base;
-
- dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
- cmd->opcode, cmd->arg, cmd->flags);
-
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
- writel(0, base + MMCICOMMAND);
- udelay(1);
- }
-
- c |= cmd->opcode | MCI_CPSM_ENABLE;
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136)
- c |= MCI_CPSM_LONGRSP;
- c |= MCI_CPSM_RESPONSE;
- }
- if (/*interrupt*/0)
- c |= MCI_CPSM_INTERRUPT;
-
- host->cmd = cmd;
-
- writel(cmd->arg, base + MMCIARGUMENT);
- writel(c, base + MMCICOMMAND);
-}
-
-static void
-mmci_complete_data_xfer(struct mmci_host *host)
-{
- struct mmc_data *data = host->data;
-
- if ((host->size == 0) || data->error) {
-
- /*
- * Variants with broken blockend flags and as well dma
- * transfers handles the end of the entire transfer here.
- */
- if (host->last_blockend && !data->error)
- host->data_xfered = data->blksz * data->blocks;
-
- mmci_stop_data(host);
-
- if (!data->stop)
- mmci_request_end(host, data->mrq);
- else
- mmci_start_command(host, data->stop, 0);
- }
-}
-
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
-static void __devinit mmci_setup_dma(struct mmci_host *host)
+static int __devinit mmci_setup_dma(struct mmci_host *host)
{
struct mmci_platform_data *plat = host->plat;
dma_cap_mask_t mask;
if (!plat || !plat->dma_filter) {
dev_err(mmc_dev(host->mmc), "no DMA platform data!\n");
- return;
+ return -EINVAL;
}
/* Try to acquire a generic DMA engine slave channel */
@@ -435,7 +495,7 @@ static void __devinit mmci_setup_dma(struct mmci_host *host)
/* E.g if no DMA hardware is present */
if (!host->dma_rx_channel) {
dev_err(mmc_dev(host->mmc), "no RX DMA channel!\n");
- return;
+ return -EINVAL;
}
if (plat->dma_tx_param) {
host->dma_tx_channel = dma_request_channel(mask,
@@ -444,7 +504,7 @@ static void __devinit mmci_setup_dma(struct mmci_host *host)
if (!host->dma_tx_channel) {
dma_release_channel(host->dma_rx_channel);
host->dma_rx_channel = NULL;
- return;
+ return -EINVAL;
}
} else {
host->dma_tx_channel = host->dma_rx_channel;
@@ -453,46 +513,20 @@ static void __devinit mmci_setup_dma(struct mmci_host *host)
dev_info(mmc_dev(host->mmc), "use DMA channels DMA RX %s, DMA TX %s\n",
dma_chan_name(host->dma_rx_channel),
dma_chan_name(host->dma_tx_channel));
-}
-/*
- * This is used in __devinit or __devexit so inline it
- * so it can be discarded.
- */
-static inline void mmci_disable_dma(struct mmci_host *host)
-{
- if (host->dma_rx_channel)
- dma_release_channel(host->dma_rx_channel);
- if (host->dma_tx_channel)
- dma_release_channel(host->dma_tx_channel);
- host->dma_enable = false;
-}
-
-static void mmci_dma_data_end(struct mmci_host *host)
-{
- struct mmc_data *data = host->data;
-
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- (data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- host->dma_on_current_xfer = false;
+ return 0;
}
-static void mmci_dma_terminate(struct mmci_host *host)
+static void mmci_dma_abort(struct mmci_host *host)
{
- struct mmc_data *data = host->data;
struct dma_chan *chan;
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- if (data->flags & MMC_DATA_READ)
- chan = host->dma_rx_channel;
- else
- chan = host->dma_tx_channel;
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- (data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ chan = (host->mrq->data->flags & MMC_DATA_READ) ?
+ host->dma_rx_channel : host->dma_tx_channel;
+
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
- host->dma_on_current_xfer = false;
}
static void mmci_dma_callback(void *arg)
@@ -500,56 +534,47 @@ static void mmci_dma_callback(void *arg)
unsigned long flags;
struct mmci_host *host = arg;
- dev_vdbg(mmc_dev(host->mmc), "DMA transfer done!\n");
+ dev_dbg(mmc_dev(host->mmc), "DMA transfer done!\n");
spin_lock_irqsave(&host->lock, flags);
- mmci_dma_data_end(host);
-
- /* Mark that the entire data is transferred for this dma transfer. */
- host->size = 0;
-
- /*
- * Make sure MMCI has received MCI_DATAEND before
- * completing the data transfer.
- */
- if (host->dataend)
- mmci_complete_data_xfer(host);
+ host->dma_complete = COMPLETION_DMA_XFERFINISH;
+ tasklet_schedule(&host->mmci_tasklet);
spin_unlock_irqrestore(&host->lock, flags);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static inline int mmci_prepare_dma(struct mmci_host *host,
+ struct mmc_data *data)
{
struct variant_data *variant = host->variant;
+ int burst_sz = variant->fifohalfsize >> 2; /* # of words */
struct dma_slave_config rx_conf = {
.src_addr = host->phybase + MMCIFIFO,
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.direction = DMA_FROM_DEVICE,
- .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .src_maxburst = burst_sz,
};
struct dma_slave_config tx_conf = {
.dst_addr = host->phybase + MMCIFIFO,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.direction = DMA_TO_DEVICE,
- .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = burst_sz,
};
- struct mmc_data *data = host->data;
enum dma_data_direction direction;
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
struct scatterlist *sg;
dma_cookie_t cookie;
int i;
- unsigned int irqmask0;
int sg_len;
/* If less than or equal to the fifo size, don't bother with DMA */
if (host->size <= variant->fifosize)
return -EINVAL;
- datactrl |= MCI_DPSM_DMAENABLE;
- datactrl |= variant->dmareg_enable;
+ host->mmci_datactrl |= MCI_DPSM_DMAENABLE;
+ host->mmci_datactrl |= variant->dmareg_enable;
if (data->flags & MMC_DATA_READ) {
if (host->size <= variant->txsize_threshold)
@@ -572,8 +597,8 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
/* Check for weird stuff in the sg list */
for_each_sg(data->sg, sg, data->sg_len, i) {
dev_vdbg(mmc_dev(host->mmc),
- "MMCI SGlist %d dir %d: length: %08x\n",
- i, direction, sg->length);
+ "[%s] MMCI SGlist %d dir %d: length: %08x\n",
+ __func__, i, direction, sg->length);
if (sg->offset & 3 || sg->length & 3)
return -EINVAL;
}
@@ -592,30 +617,18 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
desc->callback = mmci_dma_callback;
desc->callback_param = host;
host->dma_desc = desc;
- dev_vdbg(mmc_dev(host->mmc), "Submit MMCI DMA job, sglen %d "
- "blksz %04x blks %04x flags %08x\n",
- data->sg_len, data->blksz, data->blocks, data->flags);
cookie = desc->tx_submit(desc);
/* Here overloaded DMA controllers may fail */
if (dma_submit_error(cookie))
goto unmap_exit;
- host->dma_on_current_xfer = true;
chan->device->device_issue_pending(chan);
- /*
- * MMCI monitors both MCI_DATAEND and the DMA callback.
- * Both events must occur before the transfer is considered
- * to be completed. MCI_DATABLOCKEND is not used in DMA mode.
- */
- host->last_blockend = true;
- irqmask0 = readl(host->base + MMCIMASK0);
- irqmask0 &= ~MCI_DATABLOCKENDMASK;
- writel(irqmask0, host->base + MMCIMASK0);
+ host->mmci_mask0 |= MCI_DATAENDMASK;
- /* Trigger the DMA transfer */
- writel(datactrl, host->base + MMCIDATACTRL);
+ dev_dbg(mmc_dev(host->mmc), "[%s] config dma transfert: len:%d\n",
+ __func__, host->mmci_datalenght);
return 0;
unmap_exit:
@@ -626,285 +639,28 @@ map_err:
}
#else
/* Blank functions if the DMA engine is not available */
-static inline void mmci_setup_dma(struct mmci_host *host)
-{
-}
-
-static inline void mmci_disable_dma(struct mmci_host *host)
-{
-}
-
-static inline void mmci_dma_data_end(struct mmci_host *host)
+static inline int mmci_setup_dma(struct mmci_host *host)
{
+ return -1;
}
-static inline void mmci_dma_terminate(struct mmci_host *host)
+static inline int mmci_prepare_dma(struct mmci_host *host,
+ struct mmc_data *data)
{
+ return -1;
}
-
-static inline int mmci_dma_start_data(struct mmci_host *host,
- unsigned int datactrl)
+static inline void mmci_dma_abort(struct mmci_host *host)
{
- return -ENOSYS;
}
#endif
-static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
-{
- struct variant_data *variant = host->variant;
- unsigned int datactrl, timeout, irqmask0, irqmask1;
- unsigned int clkcycle_ns;
- void __iomem *base;
- int blksz_bits;
- u32 clk;
-
- dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
- data->blksz, data->blocks, data->flags);
-
- host->data = data;
- host->size = data->blksz * data->blocks;
- host->data_xfered = 0;
- host->last_blockend = false;
- host->dataend = false;
- host->cache_len = 0;
- host->cache = 0;
-
- clkcycle_ns = 1000000000 / host->cclk;
- timeout = data->timeout_ns / clkcycle_ns;
- timeout += data->timeout_clks;
-
- if (data->flags & MMC_DATA_READ) {
- /*
- * Since the read command is sent after we have setup
- * the data transfer we must increase the data timeout.
- * Unfortunately this is not enough since some cards
- * does not seem to stick to what is stated in their
- * CSD for TAAC and NSAC.
- */
- timeout += dataread_delay_clks;
- }
-
- base = host->base;
- writel(timeout, base + MMCIDATATIMER);
- writel(host->size, base + MMCIDATALENGTH);
-
- blksz_bits = ffs(data->blksz) - 1;
-
-#ifdef CONFIG_ARCH_U8500
- /* Temporary solution for db8500v2. */
- if (cpu_is_u8500v20_or_later())
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
- else
-#endif
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
-
- if (data->flags & MMC_DATA_READ)
- datactrl |= MCI_DPSM_DIRECTION;
-
- if (host->mmc->card && mmc_card_ddr_mode(host->mmc->card)) {
- datactrl |= MCI_ST_DPSM_DDRMODE;
-
- /* Needed for DDR */
- clk = readl(base + MMCICLOCK);
- clk |= MCI_NEG_EDGE;
-
- writel(clk, (base + MMCICLOCK));
- }
-
- if (variant->sdio &&
- host->mmc->card &&
- mmc_card_sdio(host->mmc->card)) {
- /*
- * The ST Micro variants has a special bit
- * to enable SDIO mode. This bit is set the first time
- * a SDIO data transfer is done and must remain set
- * after the data transfer is completed. The reason is
- * because of otherwise no SDIO interrupts can be
- * received.
- */
- datactrl |= MCI_ST_DPSM_SDIOEN;
-
- /*
- * The ST Micro variant for SDIO transfer sizes
- * less than or equal to 8 bytes needs to have clock
- * H/W flow control disabled. Since flow control is
- * not really needed for anything that fits in the
- * FIFO, we can disable it for any write smaller
- * than the FIFO size.
- */
- if ((host->size <= variant->fifosize) &&
- (data->flags & MMC_DATA_WRITE))
- writel(readl(host->base + MMCICLOCK) &
- ~variant->clkreg_enable,
- host->base + MMCICLOCK);
- else
- writel(readl(host->base + MMCICLOCK) |
- variant->clkreg_enable,
- host->base + MMCICLOCK);
- }
-
- if (host->dma_enable) {
- int ret;
-
- /*
- * Attempt to use DMA operation mode, if this
- * should fail, fall back to PIO mode
- */
- ret = mmci_dma_start_data(host, datactrl);
- if (!ret)
- return;
- }
-
- /* IRQ mode, map the SG list for CPU reading/writing */
- mmci_init_sg(host, data);
-
- if (data->flags & MMC_DATA_READ) {
- irqmask1 = MCI_RXFIFOHALFFULLMASK;
-
- /*
- * If we have less than a FIFOSIZE of bytes to
- * transfer, trigger a PIO interrupt as soon as any
- * data is available.
- */
- if (host->size < variant->fifosize)
- irqmask1 |= MCI_RXDATAAVLBLMASK;
- } else {
- /*
- * We don't actually need to include "FIFO empty" here
- * since its implicit in "FIFO half empty".
- */
- irqmask1 = MCI_TXFIFOHALFEMPTYMASK;
- }
-
- /* Setup IRQ */
- irqmask0 = readl(base + MMCIMASK0);
- if (variant->broken_blockend) {
- host->last_blockend = true;
- irqmask0 &= ~MCI_DATABLOCKENDMASK;
- } else {
- irqmask0 |= MCI_DATABLOCKENDMASK;
- }
- writel(irqmask0, base + MMCIMASK0);
- mmci_set_mask1(host, irqmask1);
-
- /* Start the data transfer */
- writel(datactrl, base + MMCIDATACTRL);
-}
-
-static void
-mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
- unsigned int status)
-{
- struct variant_data *variant = host->variant;
-
- /* First check for errors */
- if (status & MCI_DATA_ERR) {
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n",
- status);
- if (status & MCI_DATACRCFAIL)
- data->error = -EILSEQ;
- else if (status & MCI_DATATIMEOUT)
- data->error = -ETIMEDOUT;
- else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
- data->error = -EIO;
-
- /*
- * We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent,
- * unless we're using DMA.
- */
- if (host->dma_on_current_xfer)
- mmci_dma_terminate(host);
- else if (data->flags & MMC_DATA_READ) {
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- unsigned long flags;
-
- local_irq_save(flags);
- if (sg_miter_next(sg_miter)) {
- flush_dcache_page(sg_miter->page);
- sg_miter_stop(sg_miter);
- }
- local_irq_restore(flags);
- }
- }
-
- /*
- * On ARM variants in PIO mode, MCI_DATABLOCKEND
- * is always sent first, and we increase the
- * transfered number of bytes for that IRQ. Then
- * MCI_DATAEND follows and we conclude the transaction.
- *
- * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
- * doesn't seem to immediately clear from the status,
- * so we can't use it keep count when only one irq is
- * used because the irq will hit for other reasons, and
- * then the flag is still up. So we use the MCI_DATAEND
- * IRQ at the end of the entire transfer because
- * MCI_DATABLOCKEND is broken.
- *
- * In the U300, the IRQs can arrive out-of-order,
- * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
- * so for this case we use the flags "last_blockend" and
- * "dataend" to make sure both IRQs have arrived before
- * concluding the transaction. (This does not apply
- * to the Ux500 which doesn't fire MCI_DATABLOCKEND
- * at all.) In DMA mode it suffers from the same problem
- * as the Ux500.
- */
- if (status & MCI_DATABLOCKEND) {
- /*
- * Just being a little over-cautious, we do not
- * use this progressive update if the hardware blockend
- * flag is unreliable: since it can stay high between
- * IRQs it will corrupt the transfer counter.
- */
- if (!variant->broken_blockend && !host->dma_on_current_xfer) {
- host->data_xfered += data->blksz;
-
- if (host->data_xfered == data->blksz * data->blocks)
- host->last_blockend = true;
- }
- }
-
- if (status & MCI_DATAEND)
- host->dataend = true;
-
- /*
- * On variants with broken blockend we shall only wait for dataend,
- * on others we must sync with the blockend signal since they can
- * appear out-of-order.
- */
- if ((host->dataend && host->last_blockend) || data->error)
- mmci_complete_data_xfer(host);
-}
-
-static void
-mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
- unsigned int status)
+static inline void mmci_disable_dma(struct mmci_host *host)
{
- void __iomem *base = host->base;
-
- host->cmd = NULL;
-
- cmd->resp[0] = readl(base + MMCIRESPONSE0);
- cmd->resp[1] = readl(base + MMCIRESPONSE1);
- cmd->resp[2] = readl(base + MMCIRESPONSE2);
- cmd->resp[3] = readl(base + MMCIRESPONSE3);
-
- if (status & MCI_CMDTIMEOUT)
- cmd->error = -ETIMEDOUT;
- else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC)
- cmd->error = -EILSEQ;
-
- if (!cmd->data || cmd->error) {
- if (host->data) {
- if (host->dma_on_current_xfer)
- mmci_dma_terminate(host);
- mmci_stop_data(host);
- }
- mmci_request_end(host, cmd->mrq);
- } else if (!(cmd->data->flags & MMC_DATA_READ))
- mmci_start_data(host, cmd->data);
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_enable = false;
}
static int mmci_pio_read(struct mmci_host *host, char *buffer,
@@ -1056,153 +812,429 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer,
return ptr - buffer;
}
+static void mmci_start_data(struct mmci_host *host)
+{
+ writel(host->mmci_clockctrl, host->base + MMCICLOCK);
+ writel(host->mmci_datatimer, host->base + MMCIDATATIMER);
+ writel(host->mmci_datalenght, host->base + MMCIDATALENGTH);
+ writel(host->mmci_datactrl, host->base + MMCIDATACTRL);
+}
+
/*
* PIO data transfer IRQ handler.
*/
static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- struct variant_data *variant = host->variant;
- void __iomem *base = host->base;
- unsigned long flags;
- u32 status;
+ u32 mmci_status;
+
+ mmci_status = readl(host->base + MMCISTATUS);
+
+ if ((host->pio_active == XFER_WRITE) &&
+ (mmci_status & MCI_TXFIFOHALFEMPTYMASK)) {
+ dev_dbg(mmc_dev(host->mmc), "pio tx\n");
+ host->mmci_mask1 &= ~MCI_TXFIFOHALFEMPTYMASK;
+ } else if ((host->pio_active == XFER_READ) &&
+ (mmci_status & MCI_RXDATAAVLBL)) {
+ dev_dbg(mmc_dev(host->mmc), "pio rx\n");
+ host->mmci_mask1 &= ~(MCI_RXDATAAVLBL
+ | MCI_RXFIFOHALFFULLMASK);
+ } else
+ goto irq_out;
- status = readl(base + MMCISTATUS);
+ mmci_set_mask1(host, host->mmci_mask1);
+ enable_imasks(host);
+ tasklet_schedule(&host->mmci_tasklet);
- dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+irq_out:
+ return IRQ_HANDLED;
+}
- local_irq_save(flags);
+/*
+ * Handle completion of command and data transfers.
+ */
+static irqreturn_t mmci_irq(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ struct mmc_command *cmd;
+ u32 mmci_status, mmci_mask, mmci_iclear = 0;
+ unsigned long iflags;
+ int sdio_irq = 0;
- do {
- unsigned int remain, len;
- char *buffer;
+ spin_lock_irqsave(&host->lock, iflags);
- /*
- * For write, we only need to test the half-empty flag
- * here - if the FIFO is completely empty, then by
- * definition it is more than half empty.
- *
- * For read, check for data available.
+ mmci_status = readl(host->base + MMCISTATUS);
+ mmci_mask = readl(host->base + MMCIMASK0);
+
+ if (mmci_status & mmci_mask & MCI_SDIOIT) {
+ /* clear status, defer handling until after other interrupts */
+ writel(MCI_SDIOITC, host->base + MMCICLEAR);
+ sdio_irq = 1;
+ }
+
+ if ((host->complete_what == COMPLETION_NONE) ||
+ (host->complete_what == COMPLETION_FINALIZE)) {
+ dev_dbg(mmc_dev(host->mmc), "nothing to complete\n");
+ clear_imasks(host);
+ goto irq_out;
+ }
+
+ if (!host->mrq) {
+ dev_dbg(mmc_dev(host->mmc), "no active mrq\n");
+ clear_imasks(host);
+ goto irq_out;
+ }
+
+ cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
+ if (!cmd) {
+ dev_dbg(mmc_dev(host->mmc), "no active cmd\n");
+ clear_imasks(host);
+ goto irq_out;
+ }
+
+ if (host->singleirq)
+ if (mmci_status & (MCI_TXFIFOHALFEMPTYMASK | MCI_RXDATAAVLBL))
+ mmci_pio_irq(irq, dev_id);
+
+ if (mmci_status & MCI_CMDTIMEOUT) {
+ dev_dbg(mmc_dev(host->mmc), "error: CMDTIMEOUT\n");
+ cmd->error = -ETIMEDOUT;
+ goto fail_transfer;
+ }
+
+ if (mmci_status & MCI_CMDCRCFAIL) {
+ mmci_iclear |= MCI_CMDCRCFAILCLR;
+ if (cmd->flags & MMC_RSP_CRC) {
+ dev_dbg(mmc_dev(host->mmc), "error: CMDCRCFAIL\n");
+ host->stat.nb_cmdcrcfail++;
+ cmd->error = -EILSEQ;
+ goto fail_transfer;
+ }
+ /* When you send a cmd that not check the crc
+ * ( without MMC_RSP_CRC flags example cmd41)
*/
- if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
- break;
+ goto close_transfer;
+ }
- if (!sg_miter_next(sg_miter))
- break;
+ if (mmci_status & MCI_CMDSENT) {
+ mmci_iclear |= MCI_CMDSENTCLR;
+ if (host->complete_what == COMPLETION_CMDSENT) {
+ dev_dbg(mmc_dev(host->mmc), "ok: command sent\n");
+ goto close_transfer;
+ }
+ }
- buffer = sg_miter->addr;
- remain = sg_miter->length;
+ if (mmci_status & MCI_CMDRESPEND) {
+ mmci_iclear |= MCI_CMDRESPENDCLR;
+ if (host->complete_what == COMPLETION_RSPFIN) {
+ dev_dbg(mmc_dev(host->mmc),
+ "ok: command response received\n");
+ goto close_transfer;
+ }
+ if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) {
+ dev_dbg(mmc_dev(host->mmc),
+ "command response received, "
+ "wait data end\n");
+ host->complete_what = COMPLETION_XFERFINISH;
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ mmci_start_data(host);
+ }
+ }
- len = 0;
- if (status & MCI_RXACTIVE)
- len = mmci_pio_read(host, buffer, remain);
- if (status & MCI_TXACTIVE)
- len = mmci_pio_write(host, buffer, remain, status);
+ /* errors handled after this point are only relevant
+ when a data transfer is in progress */
+ if (!cmd->data)
+ goto clear_status_bits;
- sg_miter->consumed = len;
+ if (mmci_status & MCI_DATACRCFAIL) {
+ dev_dbg(mmc_dev(host->mmc), "error: DATACRCFAIL\n");
+ host->stat.nb_datacrcfail++;
+ cmd->data->error = -EILSEQ;
+ goto fail_transfer;
+ }
+ if (mmci_status & MCI_DATATIMEOUT) {
+ dev_dbg(mmc_dev(host->mmc), "error: DATATIMEOUT\n");
+ host->stat.nb_datatimeout++;
+ cmd->data->error = -ETIMEDOUT;
+ goto fail_transfer;
+ }
+ if (mmci_status & MCI_RXOVERRUN) {
+ dev_dbg(mmc_dev(host->mmc), "error: RXOVERRUN\n");
+ host->stat.nb_rxoverrun++;
+ cmd->data->error = -EIO;
+ goto fail_transfer;
+ }
+ if (mmci_status & MCI_TXUNDERRUN) {
+ dev_dbg(mmc_dev(host->mmc), "error: TXUNDERRUN\n");
+ host->stat.nb_txunderrun++;
+ cmd->data->error = -EIO;
+ goto fail_transfer;
+ }
+ if (mmci_status & MCI_STARTBITERR) {
+ dev_dbg(mmc_dev(host->mmc), "error: STARTBITERR\n");
+ host->stat.nb_startbiterr++;
+ cmd->data->error = -EIO;
+ goto fail_transfer;
+ }
- host->size -= len;
- remain -= len;
+ if (mmci_status & MCI_DATAEND) {
+ if (host->complete_what == COMPLETION_XFERFINISH) {
+ dev_dbg(mmc_dev(host->mmc),
+ "ok: data transfer completed\n");
+ mmci_iclear |= MCI_DATAENDCLR;
+ goto close_transfer;
+ }
+ if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) {
+ dev_dbg(mmc_dev(host->mmc),
+ "data transfer completed,"
+ "wait cmd respend\n");
+ host->complete_what = COMPLETION_RSPFIN;
+ }
+ mmci_iclear |= MCI_DATAENDCLR;
+ }
- if (remain)
- break;
+clear_status_bits:
+ writel(mmci_iclear & 0xFFF, host->base + MMCICLEAR);
+ goto irq_out;
- if (status & MCI_RXACTIVE)
- flush_dcache_page(sg_miter->page);
+fail_transfer:
+ host->pio_active = XFER_NONE;
- status = readl(base + MMCISTATUS);
- } while (1);
+close_transfer:
+ host->complete_what = COMPLETION_FINALIZE;
+ clear_imasks(host);
+ tasklet_schedule(&host->mmci_tasklet);
- sg_miter_stop(sg_miter);
+irq_out:
+ dev_dbg(mmc_dev(host->mmc), "[%s] mmci_status:%04x mmci_mask0:%04x\n",
+ __func__, mmci_status, mmci_mask);
+ spin_unlock_irqrestore(&host->lock, iflags);
- local_irq_restore(flags);
+ if (sdio_irq)
+ mmc_signal_sdio_irq(host->mmc);
- /*
- * If we're nearing the end of the read, switch to
- * "any data available" mode.
+ return IRQ_HANDLED;
+}
+
+static int mmci_setup_data(struct mmci_host *host, struct mmc_data *data)
+{
+ unsigned int clkcycle_ns;
+ int blksz_bits;
+
+ dev_dbg(mmc_dev(host->mmc), "[%s] blksz:%d nb_blk:%d sg_len:%d "
+ "ptr_sg:%p flags %08x\n", __func__, data->blksz,
+ data->blocks, data->sg_len, data->sg, data->flags);
+
+ host->size = data->blksz * data->blocks;
+ host->pio_active = XFER_NONE;
+
+ clkcycle_ns = 1000000000 / host->cclk;
+ host->mmci_datatimer = data->timeout_ns / clkcycle_ns;
+ host->mmci_datatimer += data->timeout_clks;
+
+ blksz_bits = ffs(data->blksz) - 1;
+
+ /*FIXME shift must be define */
+#ifdef CONFIG_ARCH_U8500
+ /* Temporary solution for db8500v2. */
+ if (cpu_is_u8500v20_or_later())
+ host->mmci_datactrl = data->blksz << 16;
+ else
+#endif
+ host->mmci_datactrl = blksz_bits << 4;
+
+ if (data->flags & MMC_DATA_READ) {
+ host->mmci_datactrl |= MCI_DPSM_DIRECTION;
+ host->mmci_datatimer += dataread_delay_clks;
+ }
+
+ host->mmci_datactrl |= MCI_DPSM_ENABLE;
+
+ /* Needed for DDR */
+ if (host->mmc->card && mmc_card_ddr_mode(host->mmc->card)) {
+ host->mmci_datactrl |= MCI_ST_DPSM_DDRMODE;
+ host->mmci_clockctrl |= MCI_NEG_EDGE;
+ }
+
+ host->mmci_datalenght = data->blksz * data->blocks;
+
+ /* irq mask */
+ host->mmci_mask0 = MCI_DATACRCFAILMASK | MCI_DATATIMEOUTMASK |
+ MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK | MCI_STARTBITERRMASK;
+
+ return 0;
+}
+
+static inline void mmci_prepare_sdio(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct variant_data *variant = host->variant;
+
+ /* The ST Micro variants:
+ * -has a special bit to enable SDIO.
+ * -for SDIO transfer sizes less then 8 bytes
+ * should have clock H/W flow control disabled.
*/
- if (status & MCI_RXACTIVE && host->size < variant->fifosize)
- mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
+ host->mmci_clockctrl |= variant->clkreg_enable;
+ if (variant->sdio && host->mmc->card) {
+ if (mmc_card_sdio(host->mmc->card)) {
+ host->mmci_datactrl |= MCI_ST_DPSM_SDIOEN;
+ if ((host->size <= variant->fifosize) &&
+ (data->flags & MMC_DATA_WRITE))
+ host->mmci_clockctrl &= ~variant->clkreg_enable;
+ else
+ host->mmci_clockctrl |= variant->clkreg_enable;
+ }
+ }
+}
- /* If we run out of data, disable the data IRQs. */
- if (host->size == 0) {
- mmci_set_mask1(host, 0);
+static inline void mmci_prepare_pio(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct variant_data *variant = host->variant;
+ u32 irqmask1 = 0;
+ int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
+ host->cache_len = 0;
+ host->cache = 0;
+
+ host->pio_active = rw ? XFER_WRITE : XFER_READ;
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ /* FIXME
+ * if write case, perhaps we can preload the fifo
+ */
+ if (data->flags & MMC_DATA_READ) {
+ irqmask1 |= MCI_RXFIFOHALFFULLMASK;
/*
- * If we already received MCI_DATAEND and the last
- * MCI_DATABLOCKEND, the entire data transfer shall
- * be completed.
+ * If we have less than a FIFOSIZE of bytes to
+ * transfer, trigger a PIO interrupt as soon as any
+ * data is available.
*/
- if (host->dataend && host->last_blockend)
- mmci_complete_data_xfer(host);
+ if (host->size < variant->fifosize)
+ irqmask1 |= MCI_RXDATAAVLBLMASK;
+ } else {
+ /*
+ * We don't actually need to include "FIFO empty" here
+ * since its implicit in "FIFO half empty".
+ */
+ irqmask1 |= MCI_TXFIFOHALFEMPTYMASK;
}
- return IRQ_HANDLED;
+ host->mmci_mask0 &= ~MCI_DATAENDMASK;
+ mmci_set_mask1(host, irqmask1);
}
-/*
- * Handle completion of command and data transfers.
- */
-static irqreturn_t mmci_irq(int irq, void *dev_id)
+static void mmci_prepare_data(struct mmci_host *host,
+ struct mmc_data *data)
{
- struct mmci_host *host = dev_id;
- u32 status;
- int sdio_irq = 0;
- int ret = 0;
+ int res = 0;
- spin_lock(&host->lock);
+ mmci_prepare_sdio(host, data);
- do {
- struct mmc_command *cmd;
- struct mmc_data *data;
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (host->dma_enable) {
+ res = mmci_prepare_dma(host, data);
+ if (!res) {
+ host->dma_complete = COMPLETION_DMA_START;
+ return;
+ } else {
+ host->dma_complete = COMPLETION_DMA_NONE;
+ dev_dbg(mmc_dev(host->mmc),
+ "prepare dma error %d.\n", res);
+ host->mmci_datactrl &= ~MCI_DPSM_DMAENABLE;
+ host->mmci_datactrl &= ~host->variant->dmareg_enable;
+ }
+ }
- status = readl(host->base + MMCISTATUS);
+ mmci_prepare_pio(host, data);
+}
- if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
- mmci_pio_irq(irq, dev_id);
+static void mmci_prepare_command(struct mmci_host *host,
+ struct mmc_command *cmd)
+{
+ host->mmci_mask0 |= MCI_CMDTIMEOUTMASK | MCI_CMDCRCFAILMASK;
- status &= ~MCI_IRQ1MASK;
- }
+ if (cmd->data)
+ host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
+ else if (cmd->flags & MMC_RSP_PRESENT)
+ host->complete_what = COMPLETION_RSPFIN;
+ else
+ host->complete_what = COMPLETION_CMDSENT;
- status &= readl(host->base + MMCIMASK0);
- writel(status, host->base + MMCICLEAR);
+ host->mmci_argument = cmd->arg;
+ host->mmci_command = cmd->opcode | MCI_CPSM_ENABLE;
- dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ host->mmci_command |= MCI_CPSM_RESPONSE;
+ host->mmci_mask0 |= MCI_CMDRESPENDMASK;
+ if (cmd->flags & MMC_RSP_136)
+ host->mmci_command |= MCI_CPSM_LONGRSP;
+ } else
+ host->mmci_mask0 |= MCI_CMDSENTMASK;
+}
- if (status & MCI_SDIOIT)
- sdio_irq = 1;
+static void mmci_send_command(struct mmci_host *host)
+{
+ writel(host->mmci_argument, host->base + MMCIARGUMENT);
+ writel(host->mmci_command, host->base + MMCICOMMAND);
+}
- data = host->data;
- if (status & MCI_DATA_IRQ && data)
- mmci_data_irq(host, data, status);
+static void mmci_send_request(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+ int res;
+ unsigned long flags;
- cmd = host->cmd;
- if (status & MCI_CMD_IRQ && cmd)
- mmci_cmd_irq(host, cmd, status);
+ spin_lock_irqsave(&host->lock, flags);
- ret = 1;
- } while (status);
+ /* Clear mmci status and mask registers */
+ writel(0x7ff, host->base + MMCICLEAR);
+ clear_imasks(host);
- spin_unlock(&host->lock);
+ if (cmd->data) {
+ res = mmci_setup_data(host, cmd->data);
+ if (res) {
+ dev_err(mmc_dev(mmc), "data setup error %d.\n", res);
+ goto err_data;
+ }
- if (sdio_irq)
- mmc_signal_sdio_irq(host->mmc);
+ mmci_prepare_data(host, cmd->data);
- return IRQ_RETVAL(ret);
+ if (cmd->data->flags & MMC_DATA_READ)
+ mmci_start_data(host);
+ }
+
+ mmci_prepare_command(host, cmd);
+ enable_imasks(host);
+ mmci_send_command(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return;
+
+err_data:
+ cmd->error = res;
+ cmd->data->error = res;
+ mmc_request_done(mmc, mrq);
+ return;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
- struct variant_data *variant = host->variant;
- unsigned long flags;
+ host->cmd_is_stop = 0;
WARN_ON(host->mrq != NULL);
if (mrq->data &&
- (!variant->non_power_of_2_blksize ||
+ (!host->variant->non_power_of_2_blksize ||
#ifdef CONFIG_ARCH_U8500
!cpu_is_u8500v20_or_later() ||
#endif
@@ -1215,16 +1247,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
- spin_lock_irqsave(&host->lock, flags);
-
host->mrq = mrq;
-
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- mmci_start_data(host, mrq->data);
-
- mmci_start_command(host, mrq->cmd, 0);
-
- spin_unlock_irqrestore(&host->lock, flags);
+ host->stat.nb_core_req++;
+ mmci_send_request(mmc);
}
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1324,6 +1349,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->pwr != pwr) {
host->pwr = pwr;
+ host->mmci_power = pwr;
writel(pwr, host->base + MMCIPOWER);
}
@@ -1385,10 +1411,9 @@ static int mmci_enable(struct mmc_host *mmc)
spin_lock_irqsave(&host->lock, flags);
- /* Restore registers for POWER, CLOCK and IRQMASK0 */
- writel(host->clk_reg, host->base + MMCICLOCK);
- writel(host->pwr_reg, host->base + MMCIPOWER);
- writel(host->irqmask0_reg, host->base + MMCIMASK0);
+ /* Restore registers for POWER and CLOCK. */
+ writel(host->mmci_clockctrl, host->base + MMCICLOCK);
+ writel(host->mmci_power, host->base + MMCIPOWER);
if (host->variant->sdio &&
host->mmc->card &&
@@ -1398,9 +1423,13 @@ static int mmci_enable(struct mmc_host *mmc)
* register to enable SDIO mode. This bit must be set otherwise
* no SDIO interrupts can be received.
*/
- writel(MCI_ST_DPSM_SDIOEN, host->base + MMCIDATACTRL);
+ host->mmci_datactrl = MCI_ST_DPSM_SDIOEN;
+ writel(host->mmci_datactrl, host->base + MMCIDATACTRL);
}
+ /* SDIO IT is re-enabled, if there are any subcribers */
+ enable_imasks(host);
+
spin_unlock_irqrestore(&host->lock, flags);
/*
@@ -1427,17 +1456,12 @@ static int mmci_disable(struct mmc_host *mmc, int lazy)
spin_lock_irqsave(&host->lock, flags);
- /* Save registers for POWER, CLOCK and IRQMASK0 */
- host->irqmask0_reg = readl(host->base + MMCIMASK0);
- host->pwr_reg = readl(host->base + MMCIPOWER);
- host->clk_reg = readl(host->base + MMCICLOCK);
-
/*
* Make sure we do not get any interrupts when we disabled the
* clock and the regulator and as well make sure to clear the
* registers for clock and power.
*/
- writel(0, host->base + MMCIMASK0);
+ disable_imasks(host);
writel(0, host->base + MMCIPOWER);
writel(0, host->base + MMCICLOCK);
@@ -1470,21 +1494,191 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
unsigned long flags;
- unsigned int mask0;
struct mmci_host *host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
- mask0 = readl(host->base + MMCIMASK0);
- if (enable)
- mask0 |= MCI_SDIOIT;
- else
- mask0 &= ~MCI_SDIOIT;
- writel(mask0, host->base + MMCIMASK0);
+ if (enable) {
+ /*
+ * Since the host is not claimed when doing enable
+ * we must handle it here.
+ */
+ host->mmci_mask0 |= MCI_SDIOITMASK;
+ } else {
+ /* We assumes the host is claimed when doing disable. */
+ host->mmci_mask0 &= ~MCI_SDIOITMASK;
+ }
+ enable_imasks(host);
spin_unlock_irqrestore(&host->lock, flags);
}
+/*
+ * Tasklet
+ */
+static void mmci_pio_xferdata(struct mmci_host *host)
+{
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ u32 mmci_status;
+ u32 remain, len;
+ char *buffer;
+ unsigned long flags;
+
+ dev_dbg(mmc_dev(host->mmc), "[%s]\n", __func__);
+
+ local_irq_save(flags);
+
+ do {
+ mmci_status = readl(host->base + MMCISTATUS);
+ /*
+ * For write, we only need to test the half-empty flag
+ * here - if the FIFO is completely empty, then by
+ * definition it is more than half empty.
+ *
+ * For read, check for data available.
+ */
+ if (!(mmci_status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
+ break;
+
+ if (!sg_miter_next(sg_miter))
+ break;
+
+ buffer = sg_miter->addr;
+ remain = sg_miter->length;
+ len = 0;
+
+ if (mmci_status & MCI_RXACTIVE)
+ len = mmci_pio_read(host, buffer, remain);
+ if (mmci_status & MCI_TXACTIVE)
+ len = mmci_pio_write(host, buffer,
+ remain, mmci_status);
+
+ sg_miter->consumed = len;
+ host->size -= len;
+ remain -= len;
+
+ if (remain)
+ break;
+ } while (1);
+
+ sg_miter_stop(sg_miter);
+
+ local_irq_restore(flags);
+
+ if (mmci_status & MCI_RXACTIVE) {
+ host->mmci_mask1 |= MCI_RXFIFOHALFFULLMASK;
+ /*
+ * If we're nearing the end of the read, switch to
+ * "any data available" mode.
+ */
+ if (host->size < host->variant->fifosize)
+ host->mmci_mask1 |= MCI_RXDATAAVLBLMASK;
+ }
+
+ if (mmci_status & MCI_TXACTIVE)
+ host->mmci_mask1 |= MCI_TXFIFOHALFEMPTYMASK;
+
+ /*
+ * If we run out of data, disable the data IRQs; this
+ * prevents a race where the FIFO becomes empty before
+ * the chip itself has disabled the data path, and
+ * stops us racing with our data end IRQ.
+ */
+ if (host->size == 0) {
+ host->mmci_mask1 = 0;
+ host->mmci_mask0 |= MCI_DATAENDMASK;
+ }
+}
+
+static void mmci_finalize_request(struct mmci_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd;
+
+ if (!mrq) {
+ dev_err(mmc_dev(host->mmc), "request Missing!\n");
+ return;
+ }
+
+ cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+ if (cmd->data && !cmd->error && !cmd->data->error)
+ if (host->dma_enable &&
+ host->dma_complete == COMPLETION_DMA_START)
+ return; /* wait dma completion or datatimeout */
+
+ /* Read response from controller. */
+ cmd->resp[0] = readl(host->base + MMCIRESPONSE0);
+ cmd->resp[1] = readl(host->base + MMCIRESPONSE1);
+ cmd->resp[2] = readl(host->base + MMCIRESPONSE2);
+ cmd->resp[3] = readl(host->base + MMCIRESPONSE3);
+
+ /* Cleanup controller */
+ writel(0, host->base + MMCICOMMAND);
+ writel(0, host->base + MMCIARGUMENT);
+ writel(0xFFF, host->base + MMCICLEAR);
+ clear_imasks(host);
+
+ if (cmd->data && cmd->error)
+ cmd->data->error = cmd->error;
+
+ /* If we have no data transfer we are finished here */
+ if (!mrq->data)
+ goto request_done;
+
+ mmci_stop_data(host);
+
+ if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
+ host->cmd_is_stop = 1;
+ mmci_send_request(host->mmc);
+ return;
+ }
+
+ /*if dma used and finish or error*/
+ if (host->dma_enable && host->dma_complete != COMPLETION_DMA_NONE) {
+ if (host->dma_complete == COMPLETION_DMA_START)
+ mmci_dma_abort(host); /* dma error */
+
+ dma_unmap_sg(mmc_dev(host->mmc), mrq->data->sg,
+ mrq->data->sg_len,
+ (mrq->data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ host->dma_complete = COMPLETION_DMA_NONE;
+ }
+
+ /* Calulate the amout of bytes transfer if there was no error */
+ if (mrq->data->error == 0)
+ mrq->data->bytes_xfered = mrq->data->blocks * mrq->data->blksz;
+ else
+ mrq->data->bytes_xfered = 0;
+
+request_done:
+ host->complete_what = COMPLETION_NONE;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ return;
+
+}
+
+static void mmci_tasklet(unsigned long data)
+{
+ struct mmci_host *host = (struct mmci_host *) data;
+
+ dev_dbg(mmc_dev(host->mmc), "[%s]\n", __func__);
+
+ if (host->pio_active != XFER_NONE) {
+ mmci_pio_xferdata(host);
+ mmci_set_mask1(host, host->mmci_mask1);
+ }
+
+ if (host->complete_what == COMPLETION_FINALIZE)
+ mmci_finalize_request(host);
+ else
+ enable_imasks(host);
+}
+
+/*
+ * Init,register,unregister host functions to core mmc
+ */
static const struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.set_ios = mmci_set_ios,
@@ -1528,10 +1722,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
host->gpio_cd = -ENOSYS;
host->gpio_cd_irq = -1;
- host->irqmask0_reg = 0;
- host->pwr_reg = 0;
- host->clk_reg = 0;
-
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1695,9 +1885,11 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
&& host->gpio_cd_irq < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- mmci_setup_dma(host);
+ ret = mmci_setup_dma(host);
+ if (ret)
+ mmci_disable_dma(host);
- ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED,
+ ret = request_irq(dev->irq[0], mmci_irq, 0,
DRIVER_NAME " (cmd)", host);
if (ret)
goto unmap;
@@ -1705,17 +1897,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
if (dev->irq[1] == NO_IRQ)
host->singleirq = true;
else {
- ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
+ ret = request_irq(dev->irq[1], mmci_pio_irq, 0,
DRIVER_NAME " (pio)", host);
if (ret)
goto irq0_free;
}
- /* Prepare IRQMASK0 */
- host->irqmask0_reg = MCI_IRQENABLE;
- if (host->variant->broken_blockend)
- host->irqmask0_reg &= ~MCI_DATABLOCKEND;
-
amba_set_drvdata(dev, mmc);
pm_runtime_enable(mmc->parent);
@@ -1724,7 +1911,11 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
if (pm_runtime_put_sync(mmc->parent) < 0)
dev_err(mmc_dev(mmc), "failed pm_runtime_put_sync\n");
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "failed to add mmc host.\n");
+ goto irq0_free;
+ }
dev_info(&dev->dev,
"%s: MMCI/PL180 manf %x rev %x cfg %02x at 0x%016llx\n",
@@ -1735,6 +1926,9 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
/* Ugly hack for u8500_sdio_detect_card, to be removed soon. */
sdio_host_ptr = host;
+ tasklet_init(&host->mmci_tasklet,
+ mmci_tasklet, (unsigned long) host);
+
mmci_debugfs_create(host);
return 0;
@@ -1782,8 +1976,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
mmci_debugfs_remove(host);
mmc_remove_host(mmc);
- writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+ disable_imasks(host);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
@@ -1850,8 +2043,8 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
mmc_host_enable(mmc);
mmc_power_save_host(mmc);
mmc_host_disable(mmc);
- host->pwr_reg = 0;
- host->clk_reg = 0;
+ host->mmci_power = 0;
+ host->mmci_clockctrl = 0;
}
} else {
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 87d35cc1fea..58107ed4a19 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -78,6 +78,7 @@
#define MCI_CMDRESPEND (1 << 6)
#define MCI_CMDSENT (1 << 7)
#define MCI_DATAEND (1 << 8)
+#define MCI_STARTBITERR (1 << 9)
#define MCI_DATABLOCKEND (1 << 10)
#define MCI_CMDACTIVE (1 << 11)
#define MCI_TXACTIVE (1 << 12)
@@ -103,6 +104,7 @@
#define MCI_CMDRESPENDCLR (1 << 6)
#define MCI_CMDSENTCLR (1 << 7)
#define MCI_DATAENDCLR (1 << 8)
+#define MCI_STARTBITERRCLR (1 << 9)
#define MCI_DATABLOCKENDCLR (1 << 10)
#define MCI_SDIOITC (1 << 22)
#define MCI_CEATAENDC (1 << 23)
@@ -117,6 +119,7 @@
#define MCI_CMDRESPENDMASK (1 << 6)
#define MCI_CMDSENTMASK (1 << 7)
#define MCI_DATAENDMASK (1 << 8)
+#define MCI_STARTBITERRMASK (1 << 9)
#define MCI_DATABLOCKENDMASK (1 << 10)
#define MCI_CMDACTIVEMASK (1 << 11)
#define MCI_TXACTIVEMASK (1 << 12)
@@ -141,12 +144,6 @@
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK)
-#define MCI_DATA_ERR \
- (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)
-#define MCI_DATA_IRQ (MCI_DATA_ERR|MCI_DATAEND|MCI_DATABLOCKEND)
-#define MCI_CMD_ERR (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)
-#define MCI_CMD_IRQ (MCI_CMD_ERR|MCI_CMDRESPEND|MCI_CMDSENT)
-
/* These interrupts are directed to IRQ1 when two IRQ lines are available */
#define MCI_IRQ1MASK \
(MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
@@ -159,12 +156,37 @@ struct variant_data;
struct dma_chan;
struct dma_async_tx_descriptor;
+enum mmci_dma_complete{
+ COMPLETION_DMA_NONE,
+ COMPLETION_DMA_START,
+ COMPLETION_DMA_XFERFINISH,
+};
+
+enum mmci_waitfor {
+ COMPLETION_NONE,
+ COMPLETION_FINALIZE,
+ COMPLETION_REQ,
+ COMPLETION_CMDSENT,
+ COMPLETION_RSPFIN,
+ COMPLETION_XFERFINISH,
+ COMPLETION_XFERFINISH_RSPFIN,
+};
+
+struct mmci_stat{
+ unsigned long nb_core_req;
+ unsigned long nb_cmdcrcfail;
+ unsigned long nb_rxoverrun;
+ unsigned long nb_txunderrun;
+ unsigned long nb_datacrcfail;
+ unsigned long nb_datatimeout;
+ unsigned long nb_startbiterr;
+};
+
struct mmci_host {
phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
- struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
int gpio_cd;
@@ -188,14 +210,6 @@ struct mmci_host {
struct timer_list timer;
unsigned int oldstat;
- bool last_blockend;
- bool dataend;
-
- /* register cache */
- unsigned int irqmask0_reg;
- unsigned int pwr_reg;
- unsigned int clk_reg;
-
/* pio stuff */
struct sg_mapping_iter sg_miter;
unsigned int size;
@@ -214,8 +228,33 @@ struct mmci_host {
struct dma_async_tx_descriptor *dma_desc;
#endif
+ struct tasklet_struct mmci_tasklet;
+ enum mmci_waitfor complete_what;
+ enum mmci_dma_complete dma_complete;
+
+ int cmd_is_stop;
+
+#define XFER_NONE 0
+#define XFER_READ 1
+#define XFER_WRITE 2
+ u32 pio_active;
+
+ /* mmci registers*/
+ u32 mmci_command;
+ u32 mmci_argument;
+
+ u32 mmci_mask0;
+ u32 mmci_mask1;
+
+ u32 mmci_datatimer;
+ u32 mmci_datalenght;
+ u32 mmci_datactrl;
+ u32 mmci_clockctrl;
+ u32 mmci_power;
+
+ struct mmci_stat stat;
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_regs;
+ struct dentry *debug_stat;
#endif
};
-