From c8afc9d59ce1100d3f7704e86fda5a25361c45bf Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 4 Feb 2011 09:19:46 +0000 Subject: ARM: mmci: avoid reporting too many completed bytes on fifo overrun The data counter counts the number of bytes transferred on the MMC bus. When a FIFO overrun occurs, we will not have transferred a FIFOs-worth of data to memory, and so the data counter will be a FIFOs-worth ahead. If this occurs on a block boundary, we will report one too many sectors as successful. Fix this. Acked-by: Linus Walleij Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 2d6de3e03e2..d7b83a8b353 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -283,22 +283,34 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; - /* Calculate how far we are into the transfer */ + /* + * Calculate how far we are into the transfer. Note that + * the data counter gives the number of bytes transferred + * on the MMC bus, not on the host side. On reads, this + * can be as much as a FIFO-worth of data ahead. This + * matters for FIFO overruns only. + */ remain = readl(host->base + MMCIDATACNT); success = data->blksz * data->blocks - remain; - dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); + dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", + status, success); if (status & MCI_DATACRCFAIL) { /* Last block was not successful */ - host->data_xfered = round_down(success - 1, data->blksz); + success -= 1; data->error = -EILSEQ; } else if (status & MCI_DATATIMEOUT) { - host->data_xfered = round_down(success, data->blksz); data->error = -ETIMEDOUT; - } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) { - host->data_xfered = round_down(success, data->blksz); + } else if (status & MCI_TXUNDERRUN) { + data->error = -EIO; + } else if (status & MCI_RXOVERRUN) { + if (success > host->variant->fifosize) + success -= host->variant->fifosize; + else + success = 0; data->error = -EIO; } + host->data_xfered = round_down(success, data->blksz); /* * We hit an error condition. Ensure that any data -- cgit v1.2.3 From 7d7aa23cf0700f4025cb61bd1ac517ccf79bd460 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 27 Jan 2011 09:46:29 +0000 Subject: ARM: mmci: no need to call flush_dcache_page() with sg_miter API The sg_miter API provides the required cache maintainence, so we don't need to do that ourselves. Remove the unnecessary additional cache maintainence. Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d7b83a8b353..2563792c01d 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -311,22 +311,6 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, data->error = -EIO; } host->data_xfered = round_down(success, data->blksz); - - /* - * We hit an error condition. Ensure that any data - * partially written to a page is properly coherent. - */ - if (data->flags & MMC_DATA_READ) { - struct sg_mapping_iter *sg_miter = &host->sg_miter; - unsigned long flags; - - local_irq_save(flags); - if (sg_miter_next(sg_miter)) { - flush_dcache_page(sg_miter->page); - sg_miter_stop(sg_miter); - } - local_irq_restore(flags); - } } if (status & MCI_DATABLOCKEND) @@ -510,9 +494,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) if (remain) break; - if (status & MCI_RXACTIVE) - flush_dcache_page(sg_miter->page); - status = readl(base + MMCISTATUS); } while (1); -- cgit v1.2.3 From c4d877c1b3df58d89f01d7b211f58b944356eea3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 27 Jan 2011 09:50:13 +0000 Subject: ARM: mmci: avoid unnecessary switch to data available PIO interrupts We don't need to switch to data available interrupts if there's at least half a FIFO depth worth of data remaining, as we'll still get the FIFO half full interrupt. Keep this interrupt masked off until we have less than half the FIFO depth worth of data remaining. Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 2563792c01d..bde170d8f72 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -224,10 +224,11 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) irqmask = MCI_RXFIFOHALFFULLMASK; /* - * If we have less than a FIFOSIZE of bytes to transfer, - * trigger a PIO interrupt as soon as any data is available. + * If we have less than the fifo 'half-full' threshold to + * transfer, trigger a PIO interrupt as soon as any data + * is available. */ - if (host->size < variant->fifosize) + if (host->size < variant->fifohalfsize) irqmask |= MCI_RXDATAAVLBLMASK; } else { /* @@ -502,10 +503,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) local_irq_restore(flags); /* - * If we're nearing the end of the read, switch to - * "any data available" mode. + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. */ - if (status & MCI_RXACTIVE && host->size < variant->fifosize) + if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); /* -- cgit v1.2.3 From 51d4375dd72f352594f1a4f1d7598bf9a75b8dfe Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 27 Jan 2011 10:56:52 +0000 Subject: ARM: mmci: no need for separate host->data_xfered We don't need to store the number of bytes transferred in our host structure - we can store this directly in data->bytes_xfered. Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 9 +++------ drivers/mmc/host/mmci.h | 2 -- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index bde170d8f72..db2a358143d 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -142,9 +142,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) host->mrq = NULL; host->cmd = NULL; - if (mrq->data) - mrq->data->bytes_xfered = host->data_xfered; - /* * Need to drop the host lock here; mmc_request_done may call * back into the driver... @@ -202,7 +199,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) host->data = data; host->size = data->blksz * data->blocks; - host->data_xfered = 0; + data->bytes_xfered = 0; mmci_init_sg(host, data); @@ -311,7 +308,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, success = 0; data->error = -EIO; } - host->data_xfered = round_down(success, data->blksz); + data->bytes_xfered = round_down(success, data->blksz); } if (status & MCI_DATABLOCKEND) @@ -322,7 +319,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (!data->error) /* The error clause is handled above, success! */ - host->data_xfered += data->blksz * data->blocks; + data->bytes_xfered = data->blksz * data->blocks; if (!data->stop) { mmci_request_end(host, data->mrq); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index c1df7b82d36..164ce060fc1 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -161,8 +161,6 @@ struct mmci_host { int gpio_cd_irq; bool singleirq; - unsigned int data_xfered; - spinlock_t lock; unsigned int mclk; -- cgit v1.2.3 From c8ebae37034c0ead62eb4df8ef88e999ddb8d5cf Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 11 Jan 2011 19:35:53 +0000 Subject: ARM: mmci: add dmaengine-based DMA support Based on a patch from Linus Walleij. Add dmaengine based support for DMA to the MMCI driver, using the Primecell DMA engine interface. The changes over Linus' driver are: - rename txsize_threshold to dmasize_threshold, as this reflects the purpose more. - use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'. - clean up requesting of dma channels. - don't release a single channel twice when it's shared between tx and rx. - get rid of 'dma_enable' bool - instead check whether the channel is NULL. - detect incomplete DMA at the end of a transfer. Some DMA controllers (eg, PL08x) are unable to be configured for scatter DMA and also listen to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI. They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the final burst/words, PL08x does not transfer the last few words. - map and unmap DMA buffers using the DMA engine struct device, not the MMCI struct device - the DMA engine is doing the DMA transfer, not us. - avoid double-unmapping of the DMA buffers on MMCI data errors. - don't check for negative values from the dmaengine tx submission function - Dan says this must never fail. - use new dmaengine helper functions rather than using the ugly function pointers directly. - allow DMA code to be fully optimized away using dma_inprogress() which is defined to constant 0 if DMA engine support is disabled. - request maximum segment size from the DMA engine struct device and set this appropriately. - removed checking of buffer alignment - the DMA engine should deal with its own restrictions on buffer alignment, not the individual DMA engine users. - removed setting DMAREQCTL - this confuses some DMA controllers as it causes LBREQ to be asserted for the last seven transfers, rather than six SREQ and one LSREQ. - removed burst setting - the DMA controller should not burst past the transfer size required to complete the DMA operation. Tested-by: Linus Walleij Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 282 ++++++++++++++++++++++++++++++++++++++++++++++-- drivers/mmc/host/mmci.h | 13 +++ 2 files changed, 287 insertions(+), 8 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index db2a358143d..8a29c9f4a81 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2,7 +2,7 @@ * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver * * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * Copyright (C) 2010 ST-Ericsson AB. + * Copyright (C) 2010 ST-Ericsson SA * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,8 +25,10 @@ #include #include #include -#include #include +#include +#include +#include #include #include @@ -186,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); } +/* + * All the DMA operation mode stuff goes inside this ifdef. + * This assumes that you have a generic DMA device interface, + * no custom DMA interfaces are supported. + */ +#ifdef CONFIG_DMA_ENGINE +static void __devinit mmci_dma_setup(struct mmci_host *host) +{ + struct mmci_platform_data *plat = host->plat; + const char *rxname, *txname; + dma_cap_mask_t mask; + + if (!plat || !plat->dma_filter) { + dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); + return; + } + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* + * If only an RX channel is specified, the driver will + * attempt to use it bidirectionally, however if it is + * is specified but cannot be located, DMA will be disabled. + */ + if (plat->dma_rx_param) { + host->dma_rx_channel = dma_request_channel(mask, + plat->dma_filter, + plat->dma_rx_param); + /* E.g if no DMA hardware is present */ + if (!host->dma_rx_channel) + dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); + } + + if (plat->dma_tx_param) { + host->dma_tx_channel = dma_request_channel(mask, + plat->dma_filter, + plat->dma_tx_param); + if (!host->dma_tx_channel) + dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); + } else { + host->dma_tx_channel = host->dma_rx_channel; + } + + if (host->dma_rx_channel) + rxname = dma_chan_name(host->dma_rx_channel); + else + rxname = "none"; + + if (host->dma_tx_channel) + txname = dma_chan_name(host->dma_tx_channel); + else + txname = "none"; + + dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", + rxname, txname); + + /* + * Limit the maximum segment size in any SG entry according to + * the parameters of the DMA engine device. + */ + if (host->dma_tx_channel) { + struct device *dev = host->dma_tx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + if (host->dma_rx_channel) { + struct device *dev = host->dma_rx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } +} + +/* + * This is used in __devinit or __devexit so inline it + * so it can be discarded. + */ +static inline void mmci_dma_release(struct mmci_host *host) +{ + struct mmci_platform_data *plat = host->plat; + + if (host->dma_rx_channel) + dma_release_channel(host->dma_rx_channel); + if (host->dma_tx_channel && plat->dma_tx_param) + dma_release_channel(host->dma_tx_channel); + host->dma_rx_channel = host->dma_tx_channel = NULL; +} + +static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ + struct dma_chan *chan = host->dma_current; + enum dma_data_direction dir; + u32 status; + int i; + + /* Wait up to 1ms for the DMA to complete */ + for (i = 0; ; i++) { + status = readl(host->base + MMCISTATUS); + if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) + break; + udelay(10); + } + + /* + * Check to see whether we still have some data left in the FIFO - + * this catches DMA controllers which are unable to monitor the + * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- + * contiguous buffers. On TX, we'll get a FIFO underrun error. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dmaengine_terminate_all(chan); + if (!data->error) + data->error = -EIO; + } + + if (data->flags & MMC_DATA_WRITE) { + dir = DMA_TO_DEVICE; + } else { + dir = DMA_FROM_DEVICE; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + + /* + * Use of DMA with scatter-gather is impossible. + * Give up with DMA and switch back to PIO mode. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); + mmci_dma_release(host); + } +} + +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); +} + +static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ + struct variant_data *variant = host->variant; + struct dma_slave_config conf = { + .src_addr = host->phybase + MMCIFIFO, + .dst_addr = host->phybase + MMCIFIFO, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ + .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ + }; + struct mmc_data *data = host->data; + struct dma_chan *chan; + struct dma_device *device; + struct dma_async_tx_descriptor *desc; + int nr_sg; + + host->dma_current = NULL; + + if (data->flags & MMC_DATA_READ) { + conf.direction = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + conf.direction = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + /* If there's no DMA channel, fall back to PIO */ + if (!chan) + return -EINVAL; + + /* If less than or equal to the fifo size, don't bother with DMA */ + if (host->size <= variant->fifosize) + return -EINVAL; + + device = chan->device; + nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); + if (nr_sg == 0) + return -EINVAL; + + dmaengine_slave_config(chan, &conf); + desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, + conf.direction, DMA_CTRL_ACK); + if (!desc) + goto unmap_exit; + + /* Okay, go for it. */ + host->dma_current = chan; + + dev_vdbg(mmc_dev(host->mmc), + "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", + data->sg_len, data->blksz, data->blocks, data->flags); + dmaengine_submit(desc); + dma_async_issue_pending(chan); + + datactrl |= MCI_DPSM_DMAENABLE; + + /* Trigger the DMA transfer */ + writel(datactrl, host->base + MMCIDATACTRL); + + /* + * Let the MMCI say when the data is ended and it's time + * to fire next DMA request. When that happens, MMCI will + * call mmci_data_end() + */ + writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, + host->base + MMCIMASK0); + return 0; + +unmap_exit: + dmaengine_terminate_all(chan); + dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); + return -ENOMEM; +} +#else +/* Blank functions if the DMA engine is not available */ +static inline void mmci_dma_setup(struct mmci_host *host) +{ +} + +static inline void mmci_dma_release(struct mmci_host *host) +{ +} + +static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ +} + +static inline void mmci_dma_data_error(struct mmci_host *host) +{ +} + +static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ + return -ENOSYS; +} +#endif + static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) { struct variant_data *variant = host->variant; @@ -201,8 +445,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) host->size = data->blksz * data->blocks; data->bytes_xfered = 0; - mmci_init_sg(host, data); - clks = (unsigned long long)data->timeout_ns * host->cclk; do_div(clks, 1000000000UL); @@ -216,8 +458,21 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) BUG_ON(1 << blksz_bits != data->blksz); datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; - if (data->flags & MMC_DATA_READ) { + + if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; + + /* + * Attempt to use DMA operation mode, if this + * should fail, fall back to PIO mode + */ + if (!mmci_dma_start_data(host, datactrl)) + return; + + /* IRQ mode, map the SG list for CPU reading/writing */ + mmci_init_sg(host, data); + + if (data->flags & MMC_DATA_READ) { irqmask = MCI_RXFIFOHALFFULLMASK; /* @@ -281,6 +536,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; + /* Terminate the DMA transfer */ + if (dma_inprogress(host)) + mmci_dma_data_error(host); + /* * Calculate how far we are into the transfer. Note that * the data counter gives the number of bytes transferred @@ -315,6 +574,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); if (status & MCI_DATAEND || data->error) { + if (dma_inprogress(host)) + mmci_dma_unmap(host, data); mmci_stop_data(host); if (!data->error) @@ -767,6 +1028,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } + host->phybase = dev->res.start; host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { ret = -ENOMEM; @@ -894,9 +1156,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) amba_set_drvdata(dev, mmc); - dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", - mmc_hostname(mmc), amba_part(dev), amba_rev(dev), - (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); + dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", + mmc_hostname(mmc), amba_part(dev), amba_manf(dev), + amba_rev(dev), (unsigned long long)dev->res.start, + dev->irq[0], dev->irq[1]); + + mmci_dma_setup(host); mmc_add_host(mmc); @@ -943,6 +1208,7 @@ static int __devexit mmci_remove(struct amba_device *dev) writel(0, host->base + MMCICOMMAND); writel(0, host->base + MMCIDATACTRL); + mmci_dma_release(host); free_irq(dev->irq[0], host); if (!host->singleirq) free_irq(dev->irq[1], host); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 164ce060fc1..ec9a7bc6d0d 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -148,8 +148,10 @@ struct clk; struct variant_data; +struct dma_chan; struct mmci_host { + phys_addr_t phybase; void __iomem *base; struct mmc_request *mrq; struct mmc_command *cmd; @@ -179,5 +181,16 @@ struct mmci_host { struct sg_mapping_iter sg_miter; unsigned int size; struct regulator *vcc; + +#ifdef CONFIG_DMA_ENGINE + /* DMA stuff */ + struct dma_chan *dma_current; + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + +#define dma_inprogress(host) ((host)->dma_current) +#else +#define dma_inprogress(host) (0) +#endif }; -- cgit v1.2.3 From aa25afad2ca60d19457849ea75e9c31236f4e174 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 19 Feb 2011 15:55:00 +0000 Subject: ARM: amba: make probe() functions take const id tables Make Primecell driver probe functions take a const pointer to their ID tables. Drivers should never modify their ID tables in their probe handler. Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 2d6de3e03e2..67f17d61b97 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -713,7 +713,8 @@ static const struct mmc_host_ops mmci_ops = { .get_cd = mmci_get_cd, }; -static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) +static int __devinit mmci_probe(struct amba_device *dev, + const struct amba_id *id) { struct mmci_platform_data *plat = dev->dev.platform_data; struct variant_data *variant = id->data; -- cgit v1.2.3