2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/host.h>
22 #include <linux/amba/bus.h>
23 #include <linux/clk.h>
24 #include <linux/scatterlist.h>
25 #include <linux/gpio.h>
26 #include <linux/amba/mmci.h>
27 #include <linux/regulator/consumer.h>
29 #include <asm/div64.h>
31 #include <asm/sizes.h>
35 #define DRIVER_NAME "mmci-pl18x"
37 static unsigned int fmax = 515633;
40 * struct variant_data - MMCI variant-specific quirks
41 * @clkreg: default value for MCICLOCK register
47 static struct variant_data variant_arm = {
50 static struct variant_data variant_u300 = {
53 static struct variant_data variant_ux500 = {
54 .clkreg = MCI_CLK_ENABLE,
57 * This must be called with host->lock held
59 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
61 struct variant_data *variant = host->variant;
62 u32 clk = variant->clkreg;
65 if (desired >= host->mclk) {
67 host->cclk = host->mclk;
69 clk = host->mclk / (2 * desired) - 1;
72 host->cclk = host->mclk / (2 * (clk + 1));
74 if (host->hw_designer == AMBA_VENDOR_ST)
75 clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
76 clk |= MCI_CLK_ENABLE;
77 /* This hasn't proven to be worthwhile */
78 /* clk |= MCI_CLK_PWRSAVE; */
81 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
83 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
84 clk |= MCI_ST_8BIT_BUS;
86 writel(clk, host->base + MMCICLOCK);
90 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
92 writel(0, host->base + MMCICOMMAND);
100 mrq->data->bytes_xfered = host->data_xfered;
103 * Need to drop the host lock here; mmc_request_done may call
104 * back into the driver...
106 spin_unlock(&host->lock);
107 mmc_request_done(host->mmc, mrq);
108 spin_lock(&host->lock);
111 static void mmci_stop_data(struct mmci_host *host)
113 writel(0, host->base + MMCIDATACTRL);
114 writel(0, host->base + MMCIMASK1);
118 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
120 unsigned int flags = SG_MITER_ATOMIC;
122 if (data->flags & MMC_DATA_READ)
123 flags |= SG_MITER_TO_SG;
125 flags |= SG_MITER_FROM_SG;
127 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
130 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
132 unsigned int datactrl, timeout, irqmask;
133 unsigned long long clks;
137 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
138 data->blksz, data->blocks, data->flags);
141 host->size = data->blksz * data->blocks;
142 host->data_xfered = 0;
144 mmci_init_sg(host, data);
146 clks = (unsigned long long)data->timeout_ns * host->cclk;
147 do_div(clks, 1000000000UL);
149 timeout = data->timeout_clks + (unsigned int)clks;
152 writel(timeout, base + MMCIDATATIMER);
153 writel(host->size, base + MMCIDATALENGTH);
155 blksz_bits = ffs(data->blksz) - 1;
156 BUG_ON(1 << blksz_bits != data->blksz);
158 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
159 if (data->flags & MMC_DATA_READ) {
160 datactrl |= MCI_DPSM_DIRECTION;
161 irqmask = MCI_RXFIFOHALFFULLMASK;
164 * If we have less than a FIFOSIZE of bytes to transfer,
165 * trigger a PIO interrupt as soon as any data is available.
167 if (host->size < MCI_FIFOSIZE)
168 irqmask |= MCI_RXDATAAVLBLMASK;
171 * We don't actually need to include "FIFO empty" here
172 * since its implicit in "FIFO half empty".
174 irqmask = MCI_TXFIFOHALFEMPTYMASK;
177 writel(datactrl, base + MMCIDATACTRL);
178 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
179 writel(irqmask, base + MMCIMASK1);
183 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
185 void __iomem *base = host->base;
187 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
188 cmd->opcode, cmd->arg, cmd->flags);
190 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
191 writel(0, base + MMCICOMMAND);
195 c |= cmd->opcode | MCI_CPSM_ENABLE;
196 if (cmd->flags & MMC_RSP_PRESENT) {
197 if (cmd->flags & MMC_RSP_136)
198 c |= MCI_CPSM_LONGRSP;
199 c |= MCI_CPSM_RESPONSE;
202 c |= MCI_CPSM_INTERRUPT;
206 writel(cmd->arg, base + MMCIARGUMENT);
207 writel(c, base + MMCICOMMAND);
211 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
214 if (status & MCI_DATABLOCKEND) {
215 host->data_xfered += data->blksz;
216 #ifdef CONFIG_ARCH_U300
218 * On the U300 some signal or other is
219 * badly routed so that a data write does
220 * not properly terminate with a MCI_DATAEND
221 * status flag. This quirk will make writes
224 if (data->flags & MMC_DATA_WRITE)
225 status |= MCI_DATAEND;
228 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
229 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
230 if (status & MCI_DATACRCFAIL)
231 data->error = -EILSEQ;
232 else if (status & MCI_DATATIMEOUT)
233 data->error = -ETIMEDOUT;
234 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
236 status |= MCI_DATAEND;
239 * We hit an error condition. Ensure that any data
240 * partially written to a page is properly coherent.
242 if (data->flags & MMC_DATA_READ) {
243 struct sg_mapping_iter *sg_miter = &host->sg_miter;
246 local_irq_save(flags);
247 if (sg_miter_next(sg_miter)) {
248 flush_dcache_page(sg_miter->page);
249 sg_miter_stop(sg_miter);
251 local_irq_restore(flags);
254 if (status & MCI_DATAEND) {
255 mmci_stop_data(host);
258 mmci_request_end(host, data->mrq);
260 mmci_start_command(host, data->stop, 0);
266 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
269 void __iomem *base = host->base;
273 cmd->resp[0] = readl(base + MMCIRESPONSE0);
274 cmd->resp[1] = readl(base + MMCIRESPONSE1);
275 cmd->resp[2] = readl(base + MMCIRESPONSE2);
276 cmd->resp[3] = readl(base + MMCIRESPONSE3);
278 if (status & MCI_CMDTIMEOUT) {
279 cmd->error = -ETIMEDOUT;
280 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
281 cmd->error = -EILSEQ;
284 if (!cmd->data || cmd->error) {
286 mmci_stop_data(host);
287 mmci_request_end(host, cmd->mrq);
288 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
289 mmci_start_data(host, cmd->data);
293 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
295 void __iomem *base = host->base;
298 int host_remain = host->size;
301 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
309 readsl(base + MMCIFIFO, ptr, count >> 2);
313 host_remain -= count;
318 status = readl(base + MMCISTATUS);
319 } while (status & MCI_RXDATAAVLBL);
324 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
326 void __iomem *base = host->base;
330 unsigned int count, maxcnt;
332 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
333 count = min(remain, maxcnt);
335 writesl(base + MMCIFIFO, ptr, count >> 2);
343 status = readl(base + MMCISTATUS);
344 } while (status & MCI_TXFIFOHALFEMPTY);
350 * PIO data transfer IRQ handler.
352 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
354 struct mmci_host *host = dev_id;
355 struct sg_mapping_iter *sg_miter = &host->sg_miter;
356 void __iomem *base = host->base;
360 status = readl(base + MMCISTATUS);
362 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
364 local_irq_save(flags);
367 unsigned int remain, len;
371 * For write, we only need to test the half-empty flag
372 * here - if the FIFO is completely empty, then by
373 * definition it is more than half empty.
375 * For read, check for data available.
377 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
380 if (!sg_miter_next(sg_miter))
383 buffer = sg_miter->addr;
384 remain = sg_miter->length;
387 if (status & MCI_RXACTIVE)
388 len = mmci_pio_read(host, buffer, remain);
389 if (status & MCI_TXACTIVE)
390 len = mmci_pio_write(host, buffer, remain, status);
392 sg_miter->consumed = len;
400 if (status & MCI_RXACTIVE)
401 flush_dcache_page(sg_miter->page);
403 status = readl(base + MMCISTATUS);
406 sg_miter_stop(sg_miter);
408 local_irq_restore(flags);
411 * If we're nearing the end of the read, switch to
412 * "any data available" mode.
414 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
415 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
418 * If we run out of data, disable the data IRQs; this
419 * prevents a race where the FIFO becomes empty before
420 * the chip itself has disabled the data path, and
421 * stops us racing with our data end IRQ.
423 if (host->size == 0) {
424 writel(0, base + MMCIMASK1);
425 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
432 * Handle completion of command and data transfers.
434 static irqreturn_t mmci_irq(int irq, void *dev_id)
436 struct mmci_host *host = dev_id;
440 spin_lock(&host->lock);
443 struct mmc_command *cmd;
444 struct mmc_data *data;
446 status = readl(host->base + MMCISTATUS);
447 status &= readl(host->base + MMCIMASK0);
448 writel(status, host->base + MMCICLEAR);
450 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
453 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
454 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
455 mmci_data_irq(host, data, status);
458 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
459 mmci_cmd_irq(host, cmd, status);
464 spin_unlock(&host->lock);
466 return IRQ_RETVAL(ret);
469 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
471 struct mmci_host *host = mmc_priv(mmc);
474 WARN_ON(host->mrq != NULL);
476 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
477 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
479 mrq->cmd->error = -EINVAL;
480 mmc_request_done(mmc, mrq);
484 spin_lock_irqsave(&host->lock, flags);
488 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
489 mmci_start_data(host, mrq->data);
491 mmci_start_command(host, mrq->cmd, 0);
493 spin_unlock_irqrestore(&host->lock, flags);
496 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
498 struct mmci_host *host = mmc_priv(mmc);
502 switch (ios->power_mode) {
505 regulator_is_enabled(host->vcc))
506 regulator_disable(host->vcc);
509 #ifdef CONFIG_REGULATOR
511 /* This implicitly enables the regulator */
512 mmc_regulator_set_ocr(host->vcc, ios->vdd);
514 if (host->plat->vdd_handler)
515 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
517 /* The ST version does not have this, fall through to POWER_ON */
518 if (host->hw_designer != AMBA_VENDOR_ST) {
527 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
528 if (host->hw_designer != AMBA_VENDOR_ST)
532 * The ST Micro variant use the ROD bit for something
533 * else and only has OD (Open Drain).
539 spin_lock_irqsave(&host->lock, flags);
541 mmci_set_clkreg(host, ios->clock);
543 if (host->pwr != pwr) {
545 writel(pwr, host->base + MMCIPOWER);
548 spin_unlock_irqrestore(&host->lock, flags);
551 static int mmci_get_ro(struct mmc_host *mmc)
553 struct mmci_host *host = mmc_priv(mmc);
555 if (host->gpio_wp == -ENOSYS)
558 return gpio_get_value(host->gpio_wp);
561 static int mmci_get_cd(struct mmc_host *mmc)
563 struct mmci_host *host = mmc_priv(mmc);
566 if (host->gpio_cd == -ENOSYS)
567 status = host->plat->status(mmc_dev(host->mmc));
569 status = gpio_get_value(host->gpio_cd);
574 static const struct mmc_host_ops mmci_ops = {
575 .request = mmci_request,
576 .set_ios = mmci_set_ios,
577 .get_ro = mmci_get_ro,
578 .get_cd = mmci_get_cd,
581 static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
583 struct mmci_platform_data *plat = dev->dev.platform_data;
584 struct variant_data *variant = id->data;
585 struct mmci_host *host;
586 struct mmc_host *mmc;
589 /* must have platform data */
595 ret = amba_request_regions(dev, DRIVER_NAME);
599 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
605 host = mmc_priv(mmc);
608 host->gpio_wp = -ENOSYS;
609 host->gpio_cd = -ENOSYS;
611 host->hw_designer = amba_manf(dev);
612 host->hw_revision = amba_rev(dev);
613 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
614 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
616 host->clk = clk_get(&dev->dev, NULL);
617 if (IS_ERR(host->clk)) {
618 ret = PTR_ERR(host->clk);
623 ret = clk_enable(host->clk);
628 host->variant = variant;
629 host->mclk = clk_get_rate(host->clk);
631 * According to the spec, mclk is max 100 MHz,
632 * so we try to adjust the clock down to this,
635 if (host->mclk > 100000000) {
636 ret = clk_set_rate(host->clk, 100000000);
639 host->mclk = clk_get_rate(host->clk);
640 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
643 host->base = ioremap(dev->res.start, resource_size(&dev->res));
649 mmc->ops = &mmci_ops;
650 mmc->f_min = (host->mclk + 511) / 512;
652 * If the platform data supplies a maximum operating
653 * frequency, this takes precedence. Else, we fall back
654 * to using the module parameter, which has a (low)
655 * default value in case it is not specified. Either
656 * value must not exceed the clock rate into the block,
660 mmc->f_max = min(host->mclk, plat->f_max);
662 mmc->f_max = min(host->mclk, fmax);
663 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
665 #ifdef CONFIG_REGULATOR
666 /* If we're using the regulator framework, try to fetch a regulator */
667 host->vcc = regulator_get(&dev->dev, "vmmc");
668 if (IS_ERR(host->vcc))
671 int mask = mmc_regulator_get_ocrmask(host->vcc);
674 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
677 host->mmc->ocr_avail = (u32) mask;
680 "Provided ocr_mask/setpower will not be used "
681 "(using regulator instead)\n");
685 /* Fall back to platform data if no regulator is found */
686 if (host->vcc == NULL)
687 mmc->ocr_avail = plat->ocr_mask;
688 mmc->caps = plat->capabilities;
689 mmc->caps |= MMC_CAP_NEEDS_POLL;
694 mmc->max_hw_segs = 16;
695 mmc->max_phys_segs = NR_SG;
698 * Since we only have a 16-bit data length register, we must
699 * ensure that we don't exceed 2^16-1 bytes in a single request.
701 mmc->max_req_size = 65535;
704 * Set the maximum segment size. Since we aren't doing DMA
705 * (yet) we are only limited by the data length register.
707 mmc->max_seg_size = mmc->max_req_size;
710 * Block size can be up to 2048 bytes, but must be a power of two.
712 mmc->max_blk_size = 2048;
715 * No limit on the number of blocks transferred.
717 mmc->max_blk_count = mmc->max_req_size;
719 spin_lock_init(&host->lock);
721 writel(0, host->base + MMCIMASK0);
722 writel(0, host->base + MMCIMASK1);
723 writel(0xfff, host->base + MMCICLEAR);
725 if (gpio_is_valid(plat->gpio_cd)) {
726 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
728 ret = gpio_direction_input(plat->gpio_cd);
730 host->gpio_cd = plat->gpio_cd;
731 else if (ret != -ENOSYS)
734 if (gpio_is_valid(plat->gpio_wp)) {
735 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
737 ret = gpio_direction_input(plat->gpio_wp);
739 host->gpio_wp = plat->gpio_wp;
740 else if (ret != -ENOSYS)
744 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
748 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
752 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
754 amba_set_drvdata(dev, mmc);
758 dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
759 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
760 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
765 free_irq(dev->irq[0], host);
767 if (host->gpio_wp != -ENOSYS)
768 gpio_free(host->gpio_wp);
770 if (host->gpio_cd != -ENOSYS)
771 gpio_free(host->gpio_cd);
775 clk_disable(host->clk);
781 amba_release_regions(dev);
786 static int __devexit mmci_remove(struct amba_device *dev)
788 struct mmc_host *mmc = amba_get_drvdata(dev);
790 amba_set_drvdata(dev, NULL);
793 struct mmci_host *host = mmc_priv(mmc);
795 mmc_remove_host(mmc);
797 writel(0, host->base + MMCIMASK0);
798 writel(0, host->base + MMCIMASK1);
800 writel(0, host->base + MMCICOMMAND);
801 writel(0, host->base + MMCIDATACTRL);
803 free_irq(dev->irq[0], host);
804 free_irq(dev->irq[1], host);
806 if (host->gpio_wp != -ENOSYS)
807 gpio_free(host->gpio_wp);
808 if (host->gpio_cd != -ENOSYS)
809 gpio_free(host->gpio_cd);
812 clk_disable(host->clk);
815 if (regulator_is_enabled(host->vcc))
816 regulator_disable(host->vcc);
817 regulator_put(host->vcc);
821 amba_release_regions(dev);
828 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
830 struct mmc_host *mmc = amba_get_drvdata(dev);
834 struct mmci_host *host = mmc_priv(mmc);
836 ret = mmc_suspend_host(mmc);
838 writel(0, host->base + MMCIMASK0);
844 static int mmci_resume(struct amba_device *dev)
846 struct mmc_host *mmc = amba_get_drvdata(dev);
850 struct mmci_host *host = mmc_priv(mmc);
852 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
854 ret = mmc_resume_host(mmc);
860 #define mmci_suspend NULL
861 #define mmci_resume NULL
864 static struct amba_id mmci_ids[] = {
868 .data = &variant_arm,
873 .data = &variant_arm,
875 /* ST Micro variants */
879 .data = &variant_u300,
884 .data = &variant_u300,
889 .data = &variant_ux500,
894 static struct amba_driver mmci_driver = {
899 .remove = __devexit_p(mmci_remove),
900 .suspend = mmci_suspend,
901 .resume = mmci_resume,
902 .id_table = mmci_ids,
905 static int __init mmci_init(void)
907 return amba_driver_register(&mmci_driver);
910 static void __exit mmci_exit(void)
912 amba_driver_unregister(&mmci_driver);
915 module_init(mmci_init);
916 module_exit(mmci_exit);
917 module_param(fmax, uint, 0444);
919 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
920 MODULE_LICENSE("GPL");