2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 * Copyright (C) 2012-2016, NVIDIA CORPORATION. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or (at
10 * your option) any later version.
12 * Thanks to the following companies for their support:
14 * - JMicron (hardware and technical support)
17 #include <linux/delay.h>
18 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/platform_device.h>
27 #include <linux/sched.h>
29 #include <linux/leds.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/slot-gpio.h>
36 #include <linux/sysedp.h>
37 #ifdef CONFIG_DEBUG_FS
38 #include <linux/debugfs.h>
39 #include <linux/ktime.h>
42 #ifdef CONFIG_EMMC_BLKTRACE
43 #include <linux/mmc/emmc-trace.h>
44 #include "../card/queue.h"
48 #define DRIVER_NAME "sdhci"
50 #define DBG(f, x...) \
51 pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
52 #define MMC_CHECK_CMDQ_MODE(host) \
53 (host && host->mmc && \
55 host->mmc->card->ext_csd.cmdq_mode_en)
57 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
58 defined(CONFIG_MMC_SDHCI_MODULE))
59 #define SDHCI_USE_LEDS_CLASS
62 #define MAX_TUNING_LOOP 40
64 #ifdef CONFIG_CMD_DUMP
65 static volatile unsigned int printk_cpu_test = UINT_MAX;
66 struct timeval cur_tv;
67 struct timeval prev_tv, curr_tv;
68 void mmc_cmd_dump(struct mmc_host *host);
69 void dbg_add_host_log(struct mmc_host *host, int type, int cmd, int arg)
72 unsigned long long nanosec_rem;
74 spin_lock_irqsave(&host->cmd_dump_lock, flags);
76 if (host->dbg_run_host_log_dat[host->dbg_host_cnt - 1].type == type &&
77 host->dbg_run_host_log_dat[host->dbg_host_cnt - 1].cmd == cmd &&
78 host->dbg_run_host_log_dat[host->dbg_host_cnt - 1].arg == arg) {
79 spin_unlock_irqrestore(&host->cmd_dump_lock, flags);
82 t = cpu_clock(printk_cpu_test);
83 nanosec_rem = do_div(t, 1000000000)/1000;
84 do_gettimeofday(&cur_tv);
85 host->dbg_run_host_log_dat[host->dbg_host_cnt].time_sec = t;
86 host->dbg_run_host_log_dat[host->dbg_host_cnt].time_usec = nanosec_rem;
87 host->dbg_run_host_log_dat[host->dbg_host_cnt].type = type;
88 host->dbg_run_host_log_dat[host->dbg_host_cnt].cmd = cmd;
89 host->dbg_run_host_log_dat[host->dbg_host_cnt].arg = arg;
91 if (host->dbg_host_cnt >= dbg_max_cnt)
92 host->dbg_host_cnt = 0;
93 spin_unlock_irqrestore(&host->cmd_dump_lock, flags);
97 /* MMC_RTPM timeout */
98 #define MMC_RTPM_MSEC_TMOUT 10
100 /* SDIO 1msec timeout, but use 10msec timeout for HZ=100 */
101 #define SDIO_CLK_GATING_TICK_TMOUT ((HZ >= 1000) ? (HZ / 1000) : 1)
102 /* 20msec EMMC delayed clock gate timeout */
103 #define EMMC_CLK_GATING_TICK_TMOUT ((HZ >= 50) ? (HZ / 50) : 2)
105 #define IS_SDIO_CARD(host) \
106 (host->mmc->card && \
107 (host->mmc->card->type == MMC_TYPE_SDIO))
109 #define IS_EMMC_CARD(host) \
110 (host->mmc->card && \
111 (host->mmc->card->type == MMC_TYPE_MMC))
113 #define IS_SDIO_CARD_OR_EMMC(host) \
114 (host->mmc->card && \
115 ((host->mmc->card->type == MMC_TYPE_SDIO) || \
116 (host->mmc->card->type == MMC_TYPE_MMC)))
118 #define IS_DELAYED_CLK_GATE(host) \
119 ((host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE) && \
120 (IS_SDIO_CARD_OR_EMMC(host)) && \
121 (host->mmc->caps2 & MMC_CAP2_CLOCK_GATING))
123 #ifdef CONFIG_DEBUG_FS
125 #define IS_32_BIT(x) (x < (1ULL << 32))
127 #define IS_DATA_READ(flags) ((flags & MMC_DATA_READ) ? true : false)
129 #define PERF_STAT_COMPARE(stat, blk_size, blk_count, is_read) \
131 (stat->is_read == is_read) && \
132 (stat->stat_blk_size == blk_size) && \
133 (stat->stat_blks_per_transfer == blk_count) \
138 #define MIN_SDMMC_FREQ 400000
140 /* Response error index for SD Host controller spec
141 * defined errors listed in next comment
143 #define RESP_ERROR_INDEX(x) ((x & SDHCI_INT_CRC) << 1 | \
144 (x & SDHCI_INT_TIMEOUT))
146 /* based on the SD Host controller spec these three errors are logged
147 * CommandCRC Error Command Timeout Error Kinds of error
149 * 0 1 Response Timeout Error
150 * 1 0 Response CRC Error
151 * 1 1 CMD line conflict
153 static char *resp_error[4] = {"No error", "Response TIMEOUT error",
154 "Reaponse CRC error",
155 "CMD LINE CONFLICT error"};
156 static unsigned int debug_quirks;
157 static unsigned int debug_quirks2;
159 static void sdhci_finish_data(struct sdhci_host *);
161 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
162 static void sdhci_finish_command(struct sdhci_host *);
163 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
164 static int sdhci_validate_sd2_0(struct mmc_host *mmc);
165 static void sdhci_tuning_timer(unsigned long data);
166 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
168 #ifdef CONFIG_PM_RUNTIME
169 static int sdhci_runtime_pm_get(struct sdhci_host *host);
170 static int sdhci_runtime_pm_put(struct sdhci_host *host);
172 static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
176 static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
180 static inline int sdhci_runtime_resume_host(struct sdhci_host *host)
184 static inline int sdhci_runtime_suspend_host(struct sdhci_host *host)
190 static void sdhci_dumpregs(struct sdhci_host *host)
192 pr_err(DRIVER_NAME ": ================== REGISTER DUMP (%s)==================\n",
193 mmc_hostname(host->mmc));
195 pr_err(DRIVER_NAME ": Sys addr[0x%03x]: 0x%08x | Version[0x%03x]: 0x%08x\n",
196 SDHCI_DMA_ADDRESS, sdhci_readl(host, SDHCI_DMA_ADDRESS),
197 SDHCI_HOST_VERSION, sdhci_readw(host, SDHCI_HOST_VERSION));
198 pr_err(DRIVER_NAME ": Blk size[0x%03x]: 0x%08x | Blk cnt[0x%03x]: 0x%08x\n",
199 SDHCI_BLOCK_SIZE, sdhci_readw(host, SDHCI_BLOCK_SIZE),
200 (host->version > SDHCI_SPEC_400) ? SDHCI_BLOCK_COUNT_32BIT :
201 SDHCI_BLOCK_COUNT, (host->version > SDHCI_SPEC_400) ?
202 sdhci_readw(host, SDHCI_BLOCK_COUNT_32BIT) :
203 sdhci_readw(host, SDHCI_BLOCK_COUNT));
204 pr_err(DRIVER_NAME ": Argument[0x%03x]: 0x%08x | Trn mode[0x%03x]: 0x%08x\n",
205 SDHCI_ARGUMENT, sdhci_readl(host, SDHCI_ARGUMENT),
206 SDHCI_TRANSFER_MODE, sdhci_readw(host, SDHCI_TRANSFER_MODE));
207 pr_err(DRIVER_NAME ": Present[0x%03x]: 0x%08x | Host ctl[0x%03x]: 0x%08x\n",
208 SDHCI_PRESENT_STATE, sdhci_readl(host, SDHCI_PRESENT_STATE),
209 SDHCI_HOST_CONTROL, sdhci_readb(host, SDHCI_HOST_CONTROL));
210 pr_err(DRIVER_NAME ": Power[0x%03x]: 0x%08x | Blk gap[0x%03x]: 0x%08x\n",
211 SDHCI_POWER_CONTROL, sdhci_readb(host, SDHCI_POWER_CONTROL),
212 SDHCI_BLOCK_GAP_CONTROL, sdhci_readb(host,
213 SDHCI_BLOCK_GAP_CONTROL));
214 pr_err(DRIVER_NAME ": Wake-up[0x%03x]: 0x%08x | Clock[0x%03x]: 0x%08x\n",
215 SDHCI_WAKE_UP_CONTROL, sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
216 SDHCI_CLOCK_CONTROL, sdhci_readw(host, SDHCI_CLOCK_CONTROL));
217 pr_err(DRIVER_NAME ": Timeout[0x%03x]: 0x%08x | Int stat[0x%03x]: 0x%08x\n",
218 SDHCI_TIMEOUT_CONTROL, sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
219 SDHCI_INT_STATUS, sdhci_readl(host, SDHCI_INT_STATUS));
220 pr_err(DRIVER_NAME ": Int enab[0x%03x]: 0x%08x | Sig enab[0x%03x]: 0x%08x\n",
221 SDHCI_INT_ENABLE, sdhci_readl(host, SDHCI_INT_ENABLE),
222 SDHCI_SIGNAL_ENABLE, sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
223 pr_err(DRIVER_NAME ": AC12 err[0x%03x]: 0x%08x | Slot int[0x%03x]: 0x%08x\n",
224 SDHCI_ACMD12_ERR, sdhci_readw(host, SDHCI_ACMD12_ERR),
225 SDHCI_SLOT_INT_STATUS, sdhci_readw(host,
226 SDHCI_SLOT_INT_STATUS));
227 pr_err(DRIVER_NAME ": Caps[0x%03x]: 0x%08x | Caps_1[0x%03x]: 0x%08x\n",
228 SDHCI_CAPABILITIES, sdhci_readl(host, SDHCI_CAPABILITIES),
229 SDHCI_CAPABILITIES_1, sdhci_readl(host, SDHCI_CAPABILITIES_1));
230 pr_err(DRIVER_NAME ": Cmd[0x%03x]: 0x%08x | Max curr[0x%03x]: 0x%08x\n",
231 SDHCI_COMMAND, sdhci_readw(host, SDHCI_COMMAND),
232 SDHCI_MAX_CURRENT, sdhci_readl(host, SDHCI_MAX_CURRENT));
233 pr_err(DRIVER_NAME ": Host ctl2[0x%03x]: 0x%08x\n",
234 SDHCI_HOST_CONTROL2, sdhci_readw(host, SDHCI_HOST_CONTROL2));
236 if (host->flags & SDHCI_USE_ADMA)
237 pr_err(DRIVER_NAME ": ADMA Err[0x%03x]: 0x%08x | ADMA Ptr[0x%03x]: 0x%08x\n",
238 SDHCI_ADMA_ERROR, readl(host->ioaddr + SDHCI_ADMA_ERROR),
239 SDHCI_ADMA_ADDRESS, readl(host->ioaddr +
240 SDHCI_ADMA_ADDRESS));
242 if (host->ops->dump_host_cust_regs)
243 host->ops->dump_host_cust_regs(host);
245 pr_err(DRIVER_NAME ": =========================================================\n");
248 /*****************************************************************************\
250 * Low level functions *
252 \*****************************************************************************/
254 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
258 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
259 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
262 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
264 sdhci_clear_set_irqs(host, 0, irqs);
267 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
269 sdhci_clear_set_irqs(host, irqs, 0);
272 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
276 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
277 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
280 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
282 irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
285 sdhci_unmask_irqs(host, irqs);
287 sdhci_mask_irqs(host, irqs);
290 static void sdhci_enable_card_detection(struct sdhci_host *host)
292 sdhci_set_card_detection(host, true);
295 static void sdhci_disable_card_detection(struct sdhci_host *host)
297 sdhci_set_card_detection(host, false);
300 static void sdhci_reset(struct sdhci_host *host, u8 mask)
303 unsigned long timeout;
305 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
306 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
311 if (host->ops->platform_reset_enter)
312 host->ops->platform_reset_enter(host, mask);
314 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
316 if (mask & SDHCI_RESET_ALL)
319 /* Wait max 100 ms */
322 /* hw clears the bit when it's done */
323 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
325 pr_err("%s: Reset 0x%x never completed.\n",
326 mmc_hostname(host->mmc), (int)mask);
327 sdhci_dumpregs(host);
334 if (host->ops->platform_reset_exit)
335 host->ops->platform_reset_exit(host, mask);
337 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
338 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, host->ier);
340 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
341 if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
342 host->ops->enable_dma(host);
346 * VERSION_4_EN bit and 64BIT_EN bit are cleared after a full reset
347 * need to re-configure them after each full reset
349 if ((mask & SDHCI_RESET_ALL) && host->version >= SDHCI_SPEC_400) {
350 ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
351 ctrl |= SDHCI_HOST_VERSION_4_EN;
352 if (host->quirks2 & SDHCI_QUIRK2_SUPPORT_64BIT_DMA)
353 ctrl |= SDHCI_ADDRESSING_64BIT_EN;
354 sdhci_writel(host, ctrl, SDHCI_ACMD12_ERR);
358 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
360 static void sdhci_init(struct sdhci_host *host, int soft)
363 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
365 sdhci_reset(host, SDHCI_RESET_ALL);
367 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
368 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
369 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
370 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
371 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
374 /* force clock reconfiguration */
376 sdhci_set_ios(host->mmc, &host->mmc->ios);
380 static void sdhci_reinit(struct sdhci_host *host)
384 * When tuning mode 1 is selected, the max_block_count value is limited
385 * to 4MB as per the host specification. Default max_blk_count for a
386 * host is defined in the spec and this value should be set during
389 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
390 host->flags &= ~SDHCI_USING_RETUNING_TIMER;
392 del_timer_sync(&host->tuning_timer);
393 host->flags &= ~SDHCI_NEEDS_RETUNING;
394 if (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK)
395 host->mmc->max_blk_count = 1;
397 host->mmc->max_blk_count =
398 (host->version > SDHCI_SPEC_400) ?
399 ((1ULL << BLOCK_COUNT_32BIT) - 1) :
400 ((1 << BLOCK_COUNT_16BIT) - 1);
402 sdhci_enable_card_detection(host);
405 static void sdhci_activate_led(struct sdhci_host *host)
409 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
410 ctrl |= SDHCI_CTRL_LED;
411 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
414 static void sdhci_deactivate_led(struct sdhci_host *host)
418 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
419 ctrl &= ~SDHCI_CTRL_LED;
420 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
423 #ifdef SDHCI_USE_LEDS_CLASS
424 static void sdhci_led_control(struct led_classdev *led,
425 enum led_brightness brightness)
427 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
430 spin_lock_irqsave(&host->lock, flags);
432 if (host->runtime_suspended)
435 if (brightness == LED_OFF)
436 sdhci_deactivate_led(host);
438 sdhci_activate_led(host);
440 spin_unlock_irqrestore(&host->lock, flags);
444 /*****************************************************************************\
448 \*****************************************************************************/
450 static void sdhci_read_block_pio(struct sdhci_host *host)
453 size_t blksize, len, chunk;
454 u32 uninitialized_var(scratch);
457 DBG("PIO reading\n");
459 blksize = host->data->blksz;
462 local_irq_save(flags);
465 if (!sg_miter_next(&host->sg_miter))
468 len = min(host->sg_miter.length, blksize);
471 host->sg_miter.consumed = len;
473 buf = host->sg_miter.addr;
477 scratch = sdhci_readl(host, SDHCI_BUFFER);
481 *buf = scratch & 0xFF;
490 sg_miter_stop(&host->sg_miter);
492 local_irq_restore(flags);
495 static void sdhci_write_block_pio(struct sdhci_host *host)
498 size_t blksize, len, chunk;
502 DBG("PIO writing\n");
504 blksize = host->data->blksz;
508 local_irq_save(flags);
511 if (!sg_miter_next(&host->sg_miter))
514 len = min(host->sg_miter.length, blksize);
517 host->sg_miter.consumed = len;
519 buf = host->sg_miter.addr;
522 scratch |= (u32)*buf << (chunk * 8);
528 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
529 sdhci_writel(host, scratch, SDHCI_BUFFER);
536 sg_miter_stop(&host->sg_miter);
538 local_irq_restore(flags);
541 static void sdhci_transfer_pio(struct sdhci_host *host)
547 if (host->data->flags & MMC_DATA_READ)
548 mask = SDHCI_DATA_AVAILABLE;
550 mask = SDHCI_SPACE_AVAILABLE;
553 * Some controllers (JMicron JMB38x) mess up the buffer bits
554 * for transfers < 4 bytes. As long as it is just one block,
555 * we can ignore the bits.
557 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
558 (host->data->blocks == 1))
562 * Start the transfer if the present state register indicates
563 * SDHCI_DATA_AVAILABLE or SDHCI_SPACE_AVAILABLE. The driver should
564 * transfer one complete block of data and wait for the buffer ready
565 * interrupt to transfer the next block of data.
567 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
568 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
571 if (host->data->flags & MMC_DATA_READ)
572 sdhci_read_block_pio(host);
574 sdhci_write_block_pio(host);
577 DBG("PIO transfer complete.\n");
580 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
582 local_irq_save(*flags);
583 return kmap_atomic(sg_page(sg)) + sg->offset;
586 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
588 kunmap_atomic(buffer);
589 local_irq_restore(*flags);
591 /* container to handle DMA bus widths of 32/64 bit */
593 union sdhci_dma_addr_t {
598 static void sdhci_set_adma_desc(struct sdhci_host *host, u8 *desc,
599 dma_addr_t addr, int len, unsigned cmd)
601 __le32 *dataddr = (__le32 __force *)(desc + 4);
602 __le64 *dataddr64 = (__le64 __force *)(desc + 4);
603 __le16 *cmdlen = (__le16 __force *)desc;
605 union sdhci_dma_addr_t dma_addr;
608 /* SDHCI specification says ADMA descriptors should be 4 byte
609 * aligned, so using 16 or 32bit operations should be safe. */
611 cmdlen[0] = cpu_to_le16(cmd);
612 cmdlen[1] = cpu_to_le16(len);
614 ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
615 if (ctrl & SDHCI_ADDRESSING_64BIT_EN)
616 dataddr64[0] = cpu_to_le64(addr);
618 BUG_ON(dma_addr.a >> 32);
619 dataddr[0] = cpu_to_le32(addr);
623 static int sdhci_adma_table_pre(struct sdhci_host *host,
624 struct mmc_data *data)
631 dma_addr_t align_addr;
634 struct scatterlist *sg;
642 * The spec does not specify endianness of descriptor table.
643 * We currently guess that it is LE.
646 if (data->flags & MMC_DATA_READ)
647 direction = DMA_FROM_DEVICE;
649 direction = DMA_TO_DEVICE;
652 * The ADMA descriptor table is mapped further down as we
653 * need to fill it with data first.
656 if (!host->use_dma_alloc) {
657 host->align_addr = dma_map_single(mmc_dev(host->mmc),
658 host->align_buffer, 128 * 8, direction);
659 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
661 BUG_ON(host->align_addr & 0x3);
664 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
665 data->sg, data->sg_len, direction);
666 if (host->sg_count == 0)
669 desc = host->adma_desc;
670 align = host->align_buffer;
672 align_addr = host->align_addr;
674 ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
675 if (ctrl & SDHCI_ADDRESSING_64BIT_EN) {
676 if (ctrl & SDHCI_HOST_VERSION_4_EN)
681 /* 32 bit DMA mode supported */
685 for_each_sg(data->sg, sg, host->sg_count, i) {
686 addr = sg_dma_address(sg);
687 len = sg_dma_len(sg);
690 * The SDHCI specification states that ADMA
691 * addresses must be 32-bit aligned. If they
692 * aren't, then we use a bounce buffer for
693 * the (up to three) bytes that screw up the
696 offset = (4 - (addr & 0x3)) & 0x3;
698 if (data->flags & MMC_DATA_WRITE) {
699 buffer = sdhci_kmap_atomic(sg, &flags);
700 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
701 memcpy(align, buffer, offset);
702 sdhci_kunmap_atomic(buffer, &flags);
706 sdhci_set_adma_desc(host, desc, align_addr, offset,
709 BUG_ON(offset > 65536);
724 sdhci_set_adma_desc(host, desc, addr, len, 0x21);
729 * If this triggers then we have a calculation bug
732 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 8);
735 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
737 * Mark the last descriptor as the terminating descriptor
739 if (desc != host->adma_desc) {
741 desc[0] |= 0x3; /* end and valid*/
745 * Add a terminating entry.
748 /* nop, end, valid */
749 sdhci_set_adma_desc(host, desc, 0, 0, 0x3);
753 * Resync align buffer as we might have changed it.
755 if (data->flags & MMC_DATA_WRITE) {
756 dma_sync_single_for_device(mmc_dev(host->mmc),
757 host->align_addr, 128 * 8, direction);
760 if (!host->use_dma_alloc) {
761 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
762 host->adma_desc, (128 * 2 + 1) * 8, DMA_TO_DEVICE);
763 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
765 BUG_ON(host->adma_addr & 0x3);
771 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
772 data->sg_len, direction);
774 if (!host->use_dma_alloc)
775 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
781 static void sdhci_adma_table_post(struct sdhci_host *host,
782 struct mmc_data *data)
786 struct scatterlist *sg;
792 if (data->flags & MMC_DATA_READ)
793 direction = DMA_FROM_DEVICE;
795 direction = DMA_TO_DEVICE;
797 if (!host->use_dma_alloc) {
798 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
799 (128 * 2 + 1) * 8, DMA_TO_DEVICE);
801 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
805 if (data->flags & MMC_DATA_READ) {
806 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
807 data->sg_len, direction);
809 align = host->align_buffer;
811 for_each_sg(data->sg, sg, host->sg_count, i) {
812 if (sg_dma_address(sg) & 0x3) {
813 size = 4 - (sg_dma_address(sg) & 0x3);
815 buffer = sdhci_kmap_atomic(sg, &flags);
816 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
817 memcpy(buffer, align, size);
818 sdhci_kunmap_atomic(buffer, &flags);
825 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
826 data->sg_len, direction);
829 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
832 struct mmc_data *data = cmd->data;
833 unsigned target_timeout, current_timeout;
836 * If the host controller provides us with an incorrect timeout
837 * value, just skip the check and use 0xE. The hardware may take
838 * longer to time out, but that's much better than having a too-short
841 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
844 /* Unspecified timeout, assume max */
845 if (!data && !cmd->cmd_timeout_ms)
850 target_timeout = cmd->cmd_timeout_ms * 1000;
852 target_timeout = data->timeout_ns / 1000;
854 target_timeout += data->timeout_clks / host->clock;
858 * Figure out needed cycles.
859 * We do this in steps in order to fit inside a 32 bit int.
860 * The first step is the minimum timeout, which will have a
861 * minimum resolution of 6 bits:
862 * (1) 2^13*1000 > 2^22,
863 * (2) host->timeout_clk < 2^16
868 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
869 while (current_timeout < target_timeout) {
871 current_timeout <<= 1;
877 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
878 mmc_hostname(host->mmc), count, cmd->opcode);
885 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
887 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
888 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
890 if (host->flags & SDHCI_REQ_USE_DMA)
891 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
893 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
896 static void sdhci_determine_transfer_mode(struct sdhci_host *host,
897 unsigned int req_size, unsigned int req_blocks)
899 /* Nothing to do if DMA modes are not supported. */
900 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
901 host->flags &= ~SDHCI_REQ_USE_DMA;
902 } else if (!host->max_pio_size || (req_size > host->max_pio_size)) {
903 host->flags |= SDHCI_REQ_USE_DMA;
904 } else if (req_size < host->max_pio_size) {
905 host->flags &= ~SDHCI_REQ_USE_DMA;
906 if (host->max_pio_blocks &&
907 (req_blocks > host->max_pio_blocks))
908 host->flags |= SDHCI_REQ_USE_DMA;
912 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
916 struct mmc_data *data = cmd->data;
918 union sdhci_dma_addr_t dma_addr;
920 if (!MMC_CHECK_CMDQ_MODE(host))
923 if (data || (cmd->flags & MMC_RSP_BUSY)) {
924 count = sdhci_calc_timeout(host, cmd);
925 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
932 BUG_ON(data->blksz * data->blocks > 524288);
933 BUG_ON(data->blksz > host->mmc->max_blk_size);
934 BUG_ON(data->blocks > 65535);
937 host->data_early = 0;
938 host->data->bytes_xfered = 0;
940 /* Select dma or PIO mode for transfer */
941 sdhci_determine_transfer_mode(host, data->blksz * data->blocks,
945 * FIXME: This doesn't account for merging when mapping the
948 if (host->flags & SDHCI_REQ_USE_DMA) {
950 struct scatterlist *sg;
953 if (host->flags & SDHCI_USE_ADMA) {
954 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
957 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
961 if (unlikely(broken)) {
962 for_each_sg(data->sg, sg, data->sg_len, i) {
963 if (sg->length & 0x3) {
964 DBG("Reverting to PIO because of "
965 "transfer size (%d)\n",
967 host->flags &= ~SDHCI_REQ_USE_DMA;
975 * The assumption here being that alignment is the same after
976 * translation to device address space.
978 if (host->flags & SDHCI_REQ_USE_DMA) {
980 struct scatterlist *sg;
983 if (host->flags & SDHCI_USE_ADMA) {
985 * As we use 3 byte chunks to work around
986 * alignment problems, we need to check this
989 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
992 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
996 if (unlikely(broken)) {
997 for_each_sg(data->sg, sg, data->sg_len, i) {
998 if (sg->offset & 0x3) {
999 DBG("Reverting to PIO because of "
1001 host->flags &= ~SDHCI_REQ_USE_DMA;
1008 if (host->flags & SDHCI_REQ_USE_DMA) {
1009 if (host->flags & SDHCI_USE_ADMA) {
1010 ret = sdhci_adma_table_pre(host, data);
1013 * This only happens when someone fed
1014 * us an invalid request.
1017 host->flags &= ~SDHCI_REQ_USE_DMA;
1020 (host->adma_addr & 0xFFFFFFFF),
1021 SDHCI_ADMA_ADDRESS);
1023 if ((host->version >= SDHCI_SPEC_400) &&
1025 SDHCI_QUIRK2_SUPPORT_64BIT_DMA)) {
1027 SDHCI_QUIRK2_USE_64BIT_ADDR) {
1028 dma_addr.b = host->adma_addr;
1032 SDHCI_UPPER_ADMA_ADDRESS);
1034 sdhci_writel(host, 0,
1035 SDHCI_UPPER_ADMA_ADDRESS);
1042 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
1043 data->sg, data->sg_len,
1044 (data->flags & MMC_DATA_READ) ?
1049 * This only happens when someone fed
1050 * us an invalid request.
1053 host->flags &= ~SDHCI_REQ_USE_DMA;
1055 WARN_ON(sg_cnt != 1);
1056 sdhci_writel(host, sg_dma_address(data->sg),
1063 * Always adjust the DMA selection as some controllers
1064 * (e.g. JMicron) can't do PIO properly when the selection
1067 if (host->version >= SDHCI_SPEC_200) {
1068 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1069 ctrl &= ~SDHCI_CTRL_DMA_MASK;
1070 if ((host->flags & SDHCI_REQ_USE_DMA) &&
1071 (host->flags & SDHCI_USE_ADMA))
1072 ctrl |= SDHCI_CTRL_ADMA2;
1074 ctrl |= SDHCI_CTRL_SDMA;
1075 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1078 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1081 flags = SG_MITER_ATOMIC;
1082 if (host->data->flags & MMC_DATA_READ)
1083 flags |= SG_MITER_TO_SG;
1085 flags |= SG_MITER_FROM_SG;
1086 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1087 host->blocks = data->blocks;
1090 sdhci_set_transfer_irqs(host);
1092 /* Set the DMA boundary value and block size */
1093 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
1094 data->blksz), SDHCI_BLOCK_SIZE);
1095 if (host->version > SDHCI_SPEC_400)
1096 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT_32BIT);
1098 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1101 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1102 struct mmc_command *cmd)
1105 struct mmc_data *data = cmd->data;
1110 WARN_ON(!host->data);
1112 mode = SDHCI_TRNS_BLK_CNT_EN;
1113 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1114 mode |= SDHCI_TRNS_MULTI;
1116 * If we are sending CMD23, CMD12 never gets sent
1117 * on successful completion (so no Auto-CMD12).
1119 if (!MMC_CHECK_CMDQ_MODE(host)) {
1120 if (!host->mrq_cmd->sbc &&
1121 (host->flags & SDHCI_AUTO_CMD12) &&
1122 mmc_op_multi(cmd->opcode))
1123 mode |= SDHCI_TRNS_AUTO_CMD12;
1124 else if (host->mrq_cmd->sbc &&
1125 (host->flags & SDHCI_AUTO_CMD23)) {
1126 mode |= SDHCI_TRNS_AUTO_CMD23;
1128 host->mrq_cmd->sbc->arg,
1134 if (data->flags & MMC_DATA_READ)
1135 mode |= SDHCI_TRNS_READ;
1136 if (host->flags & SDHCI_REQ_USE_DMA)
1137 mode |= SDHCI_TRNS_DMA;
1139 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1142 #ifdef CONFIG_DEBUG_FS
1143 static void sdhci_div32(
1144 u32 size_in_bits_x1000, u32 time_usecs,
1147 *speed_in_kbps = DIV_ROUND_CLOSEST(size_in_bits_x1000, time_usecs);
1150 static void sdhci_div64(
1151 u64 size_in_bits_x1000, u64 time_usecs,
1156 /* convert 64 bit into 32 bits */
1158 while (!(IS_32_BIT(size_in_bits_x1000) && IS_32_BIT(time_usecs))) {
1159 /* shift right both the operands bytes and time */
1160 size_in_bits_x1000 >>= 1;
1165 pr_debug("%s right shifted operands by %d, size=%lld, time=%lld usec\n",
1166 __func__, i, size_in_bits_x1000, time_usecs);
1167 /* check for 32 bit operations first */
1169 (u32)size_in_bits_x1000, (u32)time_usecs,
1174 static void free_stats_nodes(struct sdhci_host *host)
1176 struct data_stat_entry *ptr, *ptr2;
1178 ptr = host->sdhci_data_stat.head;
1181 host->sdhci_data_stat.stat_size--;
1182 devm_kfree(host->mmc->parent, ptr);
1185 if (host->sdhci_data_stat.stat_size)
1186 pr_err("stat_size=%d after free %s\n",
1187 host->sdhci_data_stat.stat_size,
1189 host->sdhci_data_stat.head = NULL;
1192 static struct data_stat_entry *add_entry_sorted(struct sdhci_host *host,
1193 unsigned int blk_size, unsigned int blk_count,
1194 unsigned int data_flags)
1196 struct data_stat_entry *node, *ptr;
1200 pr_err("%s %s: call blk_size=%d, blk_count=%d, data_flags=0x%x\n",
1201 mmc_hostname(host->mmc), __func__,
1202 blk_size, blk_count, data_flags);
1206 node = devm_kzalloc(host->mmc->parent, sizeof(struct data_stat_entry),
1209 pr_err("%s, %s, line=%d %s: unable to allocate data_stat_entry\n",
1210 __FILE__, __func__, __LINE__, mmc_hostname(host->mmc));
1213 node->stat_blk_size = blk_size;
1214 node->stat_blks_per_transfer = blk_count;
1215 is_read = IS_DATA_READ(data_flags);
1216 node->is_read = is_read;
1217 host->sdhci_data_stat.stat_size++;
1218 /* assume existing list is sorted and try to insert this new node
1219 * into the increasing order sorted array
1221 ptr = host->sdhci_data_stat.head;
1224 host->sdhci_data_stat.head = node;
1227 if (ptr && ((ptr->stat_blk_size > blk_size) ||
1228 ((ptr->stat_blk_size == blk_size) &&
1229 (ptr->stat_blks_per_transfer > blk_count)))) {
1230 host->sdhci_data_stat.head = node;
1231 /* update new head */
1236 if ((ptr->next->stat_blk_size < blk_size) ||
1237 ((ptr->next->stat_blk_size == blk_size) &&
1238 (ptr->next->stat_blks_per_transfer < blk_count)))
1244 * 1. ptr->next is null or
1245 * 2. blk_size of ptr->next is greater than new blk size, so we should
1246 * place the new node between ptr and ptr->next
1252 if ((ptr->next->stat_blk_size > blk_size) ||
1253 ((ptr->next->stat_blk_size == blk_size) &&
1254 (ptr->next->stat_blks_per_transfer > blk_count)) ||
1255 ((ptr->next->stat_blk_size == blk_size) &&
1256 (ptr->next->stat_blks_per_transfer == blk_count) &&
1257 (ptr->next->is_read != is_read))) {
1258 node->next = ptr->next;
1262 pr_err("%s %s: line=%d should be unreachable ptr-next->blk_size=%d, blks_per_xfer=%d, is_read=%d, new blk_size=%d, blks_per_xfer=%d, data_flags=0x%x\n",
1263 mmc_hostname(host->mmc), __func__, __LINE__,
1264 ptr->next->stat_blk_size, ptr->next->stat_blks_per_transfer,
1265 ptr->next->is_read, blk_size, blk_count, data_flags);
1270 static void free_data_entry(struct sdhci_host *host,
1271 unsigned int blk_size, unsigned int blk_count,
1272 unsigned int data_flags)
1274 struct data_stat_entry *ptr, *ptr2;
1277 ptr = host->sdhci_data_stat.head;
1280 is_read = IS_DATA_READ(data_flags);
1281 if (PERF_STAT_COMPARE(ptr, blk_size, blk_count, is_read)) {
1282 host->sdhci_data_stat.head = ptr->next;
1283 devm_kfree(host->mmc->parent, ptr);
1284 host->sdhci_data_stat.stat_size--;
1288 if (PERF_STAT_COMPARE(ptr->next, blk_size, blk_count,
1290 ptr2 = ptr->next->next;
1291 devm_kfree(host->mmc->parent, ptr->next);
1292 host->sdhci_data_stat.stat_size--;
1298 pr_err("Error %s %s: given blk_size=%d not found\n",
1299 mmc_hostname(host->mmc), __func__, blk_size);
1303 static void update_stat(struct sdhci_host *host, u32 blk_size, u32 blk_count,
1304 bool is_start_stat, bool is_data_error,
1305 unsigned int data_flags)
1308 struct data_stat_entry *stat;
1312 if (!host->enable_sdhci_perf_stats)
1316 pr_err("%s %s error stats case: blk_size=%d, blk_count=0, is_start_stat=%d, is_data_error=%d, data_flags=0x%x\n",
1317 mmc_hostname(host->mmc), __func__, blk_size,
1318 (int)is_start_stat, (int)is_data_error, data_flags);
1321 stat = host->sdhci_data_stat.head;
1322 is_read = IS_DATA_READ(data_flags);
1324 if (PERF_STAT_COMPARE(stat, blk_size, blk_count, is_read))
1328 /* allocation skipped in finish call */
1332 /* allocate an entry */
1333 stat = add_entry_sorted(host, blk_size, blk_count, data_flags);
1335 pr_err("%s %s line=%d: stat entry not found\n",
1336 mmc_hostname(host->mmc), __func__, __LINE__);
1341 if (is_start_stat) {
1342 stat->start_ktime = ktime_get();
1344 if (is_data_error) {
1345 pr_err("%s %s error stats case: blk_size=%d, blk_count=0, is_start_stat=%d, data Error case ... data_flags=0x%x\n",
1346 mmc_hostname(host->mmc), __func__, blk_size,
1347 (int)is_start_stat, data_flags);
1348 memset(&stat->start_ktime, 0, sizeof(ktime_t));
1349 if (!stat->total_bytes)
1350 free_data_entry(host, blk_size, blk_count,
1355 stat->duration_usecs = ktime_us_delta(t, stat->start_ktime);
1356 stat->current_transferred_bytes = (blk_size * blk_count);
1358 (((u32)stat->current_transferred_bytes << 3) * 1000),
1359 stat->duration_usecs,
1361 if (stat->max_kbps == 0) {
1362 stat->max_kbps = new_kbps;
1363 stat->min_kbps = new_kbps;
1365 if (new_kbps > stat->max_kbps)
1366 stat->max_kbps = new_kbps;
1367 if (new_kbps < stat->min_kbps)
1368 stat->min_kbps = new_kbps;
1370 /* update the total bytes figure for this entry */
1371 stat->total_usecs += stat->duration_usecs;
1372 stat->total_bytes += stat->current_transferred_bytes;
1373 stat->total_transfers++;
1380 static void sdhci_finish_data(struct sdhci_host *host)
1382 struct mmc_data *data;
1384 BUG_ON(!host->data);
1385 #ifdef CONFIG_CMD_DUMP
1386 if (IS_EMMC_CARD(host))
1387 dbg_add_host_log(host->mmc, 9, 9, (int)host->mrq_dat);
1393 if (host->flags & SDHCI_REQ_USE_DMA) {
1394 if (host->flags & SDHCI_USE_ADMA)
1395 sdhci_adma_table_post(host, data);
1397 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1398 data->sg_len, (data->flags & MMC_DATA_READ) ?
1399 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1404 * The specification states that the block count register must
1405 * be updated, but it does not specify at what point in the
1406 * data flow. That makes the register entirely useless to read
1407 * back so we have to assume that nothing made it to the card
1408 * in the event of an error.
1411 data->bytes_xfered = 0;
1413 data->bytes_xfered = data->blksz * data->blocks;
1416 * Need to send CMD12 if -
1417 * a) open-ended multiblock transfer (no CMD23)
1418 * b) error in multiblock transfer
1422 (!MMC_CHECK_CMDQ_MODE(host) && !host->mrq_dat->sbc))) {
1425 * The controller needs a reset of internal state machines
1426 * upon error conditions.
1429 if (!MMC_CHECK_CMDQ_MODE(host))
1430 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1432 sdhci_reset(host, SDHCI_RESET_DATA);
1434 sdhci_send_command(host, data->stop);
1436 if (MMC_CHECK_CMDQ_MODE(host))
1437 tasklet_schedule(&host->finish_dat_tasklet);
1439 tasklet_schedule(&host->finish_tasklet);
1441 #ifdef CONFIG_DEBUG_FS
1442 if (data->bytes_xfered) {
1443 update_stat(host, data->blksz, data->blocks, false, false,
1446 host->no_data_transfer_count++;
1447 /* performance stats does not include cases of data error */
1448 update_stat(host, data->blksz, data->blocks, false, true,
1454 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1458 unsigned long timeout;
1464 /* Wait max 10 ms */
1467 if (!host->mrq_cmd && host->mrq_dat)
1468 host->mrq_cmd = host->mrq_dat;
1470 mask = SDHCI_CMD_INHIBIT;
1471 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1472 mask |= SDHCI_DATA_INHIBIT;
1474 /* We shouldn't wait for data inihibit for stop commands, even
1475 though they might use busy signaling */
1476 if (host->mrq_cmd->data && (cmd == host->mrq_cmd->data->stop))
1477 mask &= ~SDHCI_DATA_INHIBIT;
1479 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1481 pr_err("%s: Controller never released "
1482 "inhibit bit(s).\n", mmc_hostname(host->mmc));
1483 sdhci_dumpregs(host);
1485 if (MMC_CHECK_CMDQ_MODE(host))
1486 tasklet_schedule(&host->finish_cmd_tasklet);
1488 tasklet_schedule(&host->finish_tasklet);
1495 if ((cmd->opcode == MMC_SWITCH) &&
1496 (((cmd->arg >> 16) & EXT_CSD_SANITIZE_START)
1497 == EXT_CSD_SANITIZE_START))
1502 mod_timer(&host->timer, jiffies + timeout * HZ);
1506 sdhci_prepare_data(host, cmd);
1508 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1510 sdhci_set_transfer_mode(host, cmd);
1512 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1513 pr_err("%s: Unsupported response type!\n",
1514 mmc_hostname(host->mmc));
1515 cmd->error = -EINVAL;
1516 if (MMC_CHECK_CMDQ_MODE(host))
1517 tasklet_schedule(&host->finish_cmd_tasklet);
1519 tasklet_schedule(&host->finish_tasklet);
1523 if (!(cmd->flags & MMC_RSP_PRESENT))
1524 flags = SDHCI_CMD_RESP_NONE;
1525 else if (cmd->flags & MMC_RSP_136)
1526 flags = SDHCI_CMD_RESP_LONG;
1527 else if (cmd->flags & MMC_RSP_BUSY)
1528 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1530 flags = SDHCI_CMD_RESP_SHORT;
1532 if (cmd->flags & MMC_RSP_CRC)
1533 flags |= SDHCI_CMD_CRC;
1534 if (cmd->flags & MMC_RSP_OPCODE)
1535 flags |= SDHCI_CMD_INDEX;
1537 /* CMD19, CMD21 is special in that the Data Present Select should be set */
1538 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1539 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1540 flags |= SDHCI_CMD_DATA;
1542 #ifdef CONFIG_CMD_DUMP
1543 if (MMC_CHECK_CMDQ_MODE(host))
1544 dbg_add_host_log(host->mmc, 0, cmd->opcode, cmd->arg);
1546 #ifdef CONFIG_EMMC_BLKTRACE
1547 if (!MMC_CHECK_CMDQ_MODE(host)) {
1548 if (cmd->opcode == MMC_SET_BLOCK_COUNT)
1549 emmc_trace(MMC_ISSUE, host->mmc->mqrq_cur, host->mmc);
1550 else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1551 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
1552 emmc_trace(MMC_ISSUE_DONE,
1553 host->mmc->mqrq_cur, host->mmc);
1555 if (cmd->opcode == MMC_QUEUED_TASK_ADDRESS)
1556 emmc_trace(MMC_ISSUE,
1557 &host->mmc->mq->mqrq[cmd->mrq->areq->mrq->cmd->arg >> 16],
1559 else if (cmd->opcode == MMC_EXECUTE_READ_TASK ||
1560 cmd->opcode == MMC_EXECUTE_WRITE_TASK)
1561 emmc_trace(MMC_ISSUE_DONE,
1562 &host->mmc->mq->mqrq[cmd->arg >> 16],
1566 if ((host->quirks2 & SDHCI_QUIRK2_PERIODIC_CALIBRATION) &&
1567 ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) ||
1568 (cmd->opcode == MMC_WRITE_BLOCK)) &&
1569 host->is_calibration_done) {
1570 cur_time = ktime_get();
1571 period_time = ktime_to_ms(ktime_sub(cur_time,
1573 if (period_time >= SDHCI_PERIODIC_CALIB_TIMEOUT)
1574 if (host->ops->switch_signal_voltage_exit)
1575 host->ops->switch_signal_voltage_exit(host,
1576 host->mmc->ios.signal_voltage);
1579 host->command = SDHCI_MAKE_CMD(cmd->opcode, flags);
1580 sdhci_writew(host, host->command, SDHCI_COMMAND);
1583 static void sdhci_finish_command(struct sdhci_host *host)
1587 BUG_ON(host->cmd == NULL);
1588 #ifdef CONFIG_CMD_DUMP
1589 if (IS_EMMC_CARD(host))
1590 dbg_add_host_log(host->mmc, 8, 8, (int)host->mrq_cmd);
1593 if (host->cmd->flags & MMC_RSP_PRESENT) {
1594 if (host->cmd->flags & MMC_RSP_136) {
1595 /* CRC is stripped so we need to do some shifting. */
1596 for (i = 0; i < 4; i++) {
1597 host->cmd->resp[i] = sdhci_readl(host,
1598 SDHCI_RESPONSE + (3-i)*4) << 8;
1600 host->cmd->resp[i] |=
1602 SDHCI_RESPONSE + (3-i)*4-1);
1605 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1609 host->cmd->error = 0;
1611 #ifdef CONFIG_CMD_DUMP
1612 if (MMC_CHECK_CMDQ_MODE(host))
1613 dbg_add_host_log(host->mmc, 0,
1614 host->cmd->opcode, host->cmd->resp[0]);
1616 /* Finished CMD23, now send actual command. */
1617 if (host->cmd == host->mrq_cmd->sbc) {
1619 sdhci_send_command(host, host->mrq_cmd->cmd);
1622 /* Processed actual command. */
1623 if (host->cmd->data && host->data_early) {
1625 host->mrq_dat = host->mrq_cmd;
1626 host->mrq_cmd = NULL;
1627 sdhci_finish_data(host);
1630 if (!MMC_CHECK_CMDQ_MODE(host)) {
1631 if (!host->cmd->data)
1633 tasklet_schedule(&host->finish_tasklet);
1635 host->mrq_dat = host->mrq_cmd;
1636 host->mrq_cmd = NULL;
1640 } else if (!host->data_early) {
1641 if (!host->mrq_cmd->cmd->error &&
1642 !host->cmd->error && host->cmd->data) {
1644 host->mrq_dat = host->mrq_cmd;
1645 host->mrq_cmd = NULL;
1647 tasklet_schedule(&host->finish_cmd_tasklet);
1652 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1654 u16 ctrl, preset = 0;
1656 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1658 switch (ctrl & SDHCI_CTRL_UHS_MASK) {
1659 case SDHCI_CTRL_UHS_SDR12:
1660 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1662 case SDHCI_CTRL_UHS_SDR25:
1663 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1665 case SDHCI_CTRL_UHS_SDR50:
1666 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1668 case SDHCI_CTRL_UHS_SDR104:
1669 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1671 case SDHCI_CTRL_UHS_DDR50:
1672 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1675 pr_warn("%s: Invalid UHS-I mode selected\n",
1676 mmc_hostname(host->mmc));
1677 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1683 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1685 int div = 0; /* Initialized for compiler warning */
1686 int real_div = div, clk_mul = 1;
1688 unsigned long timeout;
1691 if (clock && clock == host->clock)
1694 host->mmc->actual_clock = 0;
1696 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1700 * If the entire clock control register is updated with zero, some
1701 * controllers might first update clock divisor fields and then update
1702 * the INT_CLK_EN and CARD_CLK_EN fields. Disable card clock first
1703 * to ensure there is no abnormal clock behavior.
1705 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1706 clk &= ~SDHCI_CLOCK_CARD_EN;
1707 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1709 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1714 if (host->version >= SDHCI_SPEC_300) {
1715 if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
1716 SDHCI_CTRL_PRESET_VAL_ENABLE) {
1719 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1720 pre_val = sdhci_get_preset_value(host);
1721 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1722 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1723 if (host->clk_mul &&
1724 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1725 clk = SDHCI_PROG_CLOCK_MODE;
1727 clk_mul = host->clk_mul;
1729 real_div = max_t(int, 1, div << 1);
1735 * Check if the Host Controller supports Programmable Clock
1738 if (host->clk_mul) {
1739 for (div = 1; div <= 1024; div++) {
1740 if ((host->max_clk * host->clk_mul / div)
1745 * Set Programmable Clock Mode in the Clock
1748 clk = SDHCI_PROG_CLOCK_MODE;
1750 clk_mul = host->clk_mul;
1753 /* Version 3.00 divisors must be a multiple of 2. */
1754 if (host->max_clk <= clock) {
1755 if (host->mmc->ios.timing ==
1756 MMC_TIMING_UHS_DDR50)
1761 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1763 if ((host->max_clk / div) <= clock)
1771 /* Version 2.00 divisors must be a power of 2. */
1772 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1773 if ((host->max_clk / div) <= clock)
1782 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1784 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1785 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1786 << SDHCI_DIVIDER_HI_SHIFT;
1787 clk |= SDHCI_CLOCK_INT_EN;
1788 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1791 * For Tegra3 sdmmc controller, internal clock will not be stable bit
1792 * will get set only after some other register write is done. To
1793 * handle, do a dummy reg write to the caps reg if
1794 * SDHCI_QUIRK2_INT_CLK_STABLE_REQ_DUMMY_REG_WRITE is set.
1796 if (host->quirks2 & SDHCI_QUIRK2_INT_CLK_STABLE_REQ_DUMMY_REG_WRITE) {
1799 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
1801 sdhci_writel(host, caps, SDHCI_CAPABILITIES);
1804 /* Wait max 20 ms */
1806 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1807 & SDHCI_CLOCK_INT_STABLE)) {
1809 pr_err("%s: Internal clock never "
1810 "stabilised.\n", mmc_hostname(host->mmc));
1811 sdhci_dumpregs(host);
1818 clk |= SDHCI_CLOCK_CARD_EN;
1819 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1822 host->clock = clock;
1825 static inline void sdhci_update_clock(struct sdhci_host *host)
1829 clock = host->clock;
1831 if (host->ops->set_clock)
1832 host->ops->set_clock(host, clock);
1833 sdhci_set_clock(host, clock);
1836 static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
1840 if (power != (unsigned short)-1) {
1841 switch (1 << power) {
1842 case MMC_VDD_165_195:
1843 pwr = SDHCI_POWER_180;
1847 pwr = SDHCI_POWER_300;
1851 pwr = SDHCI_POWER_330;
1858 if (host->pwr == pwr)
1864 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1869 * Spec says that we should clear the power reg before setting
1870 * a new value. Some controllers don't seem to like this though.
1872 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1873 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1876 * At least the Marvell CaFe chip gets confused if we set the voltage
1877 * and set turn on power at the same time, so set the voltage first.
1879 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1880 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1882 pwr |= SDHCI_POWER_ON;
1884 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1887 * Some controllers need an extra 10ms delay of 10ms before they
1888 * can apply clock after applying power
1890 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1896 /* Execute DLL calibration once for MMC device if it is
1897 * enumerated in HS400 mode at 200MHz clock freq before
1898 * starting any data transfer.
1900 static void sdhci_post_init(struct mmc_host *mmc)
1902 struct sdhci_host *host;
1904 host = mmc_priv(mmc);
1906 sdhci_runtime_pm_get(host);
1907 if (host->ops->post_init)
1908 host->ops->post_init(host);
1909 sdhci_runtime_pm_put(host);
1911 /*****************************************************************************\
1915 \*****************************************************************************/
1917 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1919 struct sdhci_host *host;
1921 unsigned long flags;
1924 host = mmc_priv(mmc);
1926 #ifdef CONFIG_DEBUG_FS
1927 if (mrq->data && mrq->data->blocks)
1928 update_stat(host, mrq->data->blksz, mrq->data->blocks,
1929 true, false, mrq->data->flags);
1931 #ifndef CONFIG_MMC_CQ
1932 sdhci_runtime_pm_get(host);
1934 present = mmc_gpio_get_cd(host->mmc);
1936 spin_lock_irqsave(&host->lock, flags);
1938 WARN_ON(host->mrq_cmd != NULL);
1940 #ifndef SDHCI_USE_LEDS_CLASS
1941 sdhci_activate_led(host);
1945 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1946 * requests if Auto-CMD12 is enabled.
1948 if (!MMC_CHECK_CMDQ_MODE(host) && !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1950 mrq->data->stop = NULL;
1955 host->mrq_cmd = mrq;
1956 host->mrq_cmd->data_early = 0;
1959 * Firstly check card presence from cd-gpio. The return could
1960 * be one of the following possibilities:
1961 * negative: cd-gpio is not available
1962 * zero: cd-gpio is used, and card is removed
1963 * one: cd-gpio is used, and card is present
1966 /* If polling, assume that the card is always present. */
1967 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1968 if (host->ops->get_cd)
1969 present = host->ops->get_cd(host);
1973 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1977 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1978 host->mrq_cmd->cmd->error = -ENOMEDIUM;
1979 if (MMC_CHECK_CMDQ_MODE(host))
1980 tasklet_schedule(&host->finish_cmd_tasklet);
1982 tasklet_schedule(&host->finish_tasklet);
1986 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1988 * Check if the re-tuning timer has already expired and there
1989 * is no on-going data transfer. If so, we need to execute
1990 * tuning procedure before sending command.
1992 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1993 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1994 if (!mmc->need_tuning || !mmc->ready_tuning) {
1995 if (!mmc->need_tuning)
1996 mmc->need_tuning = 1;
2001 /* eMMC uses cmd21 but sd and sdio use cmd19 */
2003 mmc->card->type == MMC_TYPE_MMC ?
2004 MMC_SEND_TUNING_BLOCK_HS200 :
2005 MMC_SEND_TUNING_BLOCK;
2006 host->mrq_cmd = NULL;
2007 spin_unlock_irqrestore(&host->lock, flags);
2008 sdhci_execute_tuning(mmc, tuning_opcode);
2009 mmc->need_tuning = 0;
2010 mmc->ready_tuning = 0;
2011 spin_lock_irqsave(&host->lock, flags);
2014 /* Restore original mmc_request structure */
2015 host->mrq_cmd = mrq;
2019 /* For a data cmd, check for plat specific preparation */
2020 spin_unlock_irqrestore(&host->lock, flags);
2022 host->ops->platform_get_bus(host);
2023 spin_lock_irqsave(&host->lock, flags);
2025 if (!MMC_CHECK_CMDQ_MODE(host) &&
2026 (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)))
2027 sdhci_send_command(host, mrq->sbc);
2028 else if (MMC_CHECK_CMDQ_MODE(host) && mrq->sbc)
2029 sdhci_send_command(host, mrq->sbc);
2031 sdhci_send_command(host, mrq->cmd);
2036 spin_unlock_irqrestore(&host->lock, flags);
2039 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2041 unsigned long flags;
2045 /* cancel delayed clk gate work */
2046 if (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
2047 cancel_delayed_work_sync(&host->delayed_clk_gate_wrk);
2049 /* Do any required preparations prior to setting ios */
2050 if (host->ops->platform_ios_config_enter)
2051 host->ops->platform_ios_config_enter(host, ios);
2053 spin_lock_irqsave(&host->lock, flags);
2055 if (host->flags & SDHCI_DEVICE_DEAD) {
2056 spin_unlock_irqrestore(&host->lock, flags);
2057 if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
2058 mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
2063 * Reset the chip on each power off.
2064 * Should clear out any weird states.
2066 if (ios->power_mode == MMC_POWER_OFF) {
2067 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2071 if (host->version >= SDHCI_SPEC_300 &&
2072 (ios->power_mode == MMC_POWER_UP))
2073 sdhci_enable_preset_value(host, false);
2075 if (ios->power_mode == MMC_POWER_OFF)
2076 vdd_bit = sdhci_set_power(host, -1);
2078 vdd_bit = sdhci_set_power(host, ios->vdd);
2080 if (host->vmmc && vdd_bit != -1) {
2081 spin_unlock_irqrestore(&host->lock, flags);
2082 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
2083 spin_lock_irqsave(&host->lock, flags);
2086 sdhci_set_clock(host, ios->clock);
2088 if (host->ops->platform_send_init_74_clocks)
2089 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2092 * If your platform has 8-bit width support but is not a v3 controller,
2093 * or if it requires special setup code, you should implement that in
2094 * platform_bus_width().
2096 if (host->ops->platform_bus_width) {
2097 host->ops->platform_bus_width(host, ios->bus_width);
2099 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2100 if (ios->bus_width == MMC_BUS_WIDTH_8) {
2101 ctrl &= ~SDHCI_CTRL_4BITBUS;
2102 if (host->version >= SDHCI_SPEC_300)
2103 ctrl |= SDHCI_CTRL_8BITBUS;
2105 if (host->version >= SDHCI_SPEC_300)
2106 ctrl &= ~SDHCI_CTRL_8BITBUS;
2107 if (ios->bus_width == MMC_BUS_WIDTH_4)
2108 ctrl |= SDHCI_CTRL_4BITBUS;
2110 ctrl &= ~SDHCI_CTRL_4BITBUS;
2112 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2115 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2117 if ((ios->timing == MMC_TIMING_SD_HS ||
2118 ios->timing == MMC_TIMING_MMC_HS)
2119 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
2120 ctrl |= SDHCI_CTRL_HISPD;
2122 ctrl &= ~SDHCI_CTRL_HISPD;
2124 if (host->version >= SDHCI_SPEC_300) {
2127 /* In case of UHS-I modes, set High Speed Enable */
2128 if (((ios->timing == MMC_TIMING_MMC_HS200) ||
2129 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2130 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2131 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2132 (ios->timing == MMC_TIMING_UHS_SDR25))
2133 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
2134 ctrl |= SDHCI_CTRL_HISPD;
2136 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2137 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2138 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2140 * We only need to set Driver Strength if the
2141 * preset value enable is not set.
2143 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2144 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2145 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2146 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2147 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2149 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2152 * According to SDHC Spec v3.00, if the Preset Value
2153 * Enable in the Host Control 2 register is set, we
2154 * need to reset SD Clock Enable before changing High
2155 * Speed Enable to avoid generating clock gliches.
2158 /* Reset SD Clock Enable */
2159 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2160 clk &= ~SDHCI_CLOCK_CARD_EN;
2161 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2163 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2165 /* Re-enable SD Clock */
2166 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2167 clk |= SDHCI_CLOCK_CARD_EN;
2168 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2172 /* Reset SD Clock Enable */
2173 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2174 clk &= ~SDHCI_CLOCK_CARD_EN;
2175 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2177 if (host->ops->set_uhs_signaling)
2178 host->ops->set_uhs_signaling(host, ios->timing);
2180 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2181 /* Select Bus Speed Mode for host */
2182 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2183 if (ios->timing == MMC_TIMING_MMC_HS200)
2184 ctrl_2 |= SDHCI_CTRL_HS_SDR200;
2185 else if (ios->timing == MMC_TIMING_UHS_SDR12)
2186 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2187 else if (ios->timing == MMC_TIMING_UHS_SDR25)
2188 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2189 else if (ios->timing == MMC_TIMING_UHS_SDR50)
2190 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2191 else if (ios->timing == MMC_TIMING_UHS_SDR104)
2192 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2193 else if (ios->timing == MMC_TIMING_UHS_DDR50)
2194 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2195 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2198 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2199 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2200 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2201 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2202 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2203 (ios->timing == MMC_TIMING_UHS_DDR50))) {
2206 sdhci_enable_preset_value(host, true);
2207 preset = sdhci_get_preset_value(host);
2208 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2209 >> SDHCI_PRESET_DRV_SHIFT;
2212 /* Re-enable SD Clock */
2213 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2214 clk |= SDHCI_CLOCK_CARD_EN;
2215 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2217 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2220 * Some (ENE) controllers go apeshit on some ios operation,
2221 * signalling timeout and CRC errors even on CMD0. Resetting
2222 * it on each ios seems to solve the problem.
2224 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2225 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2228 spin_unlock_irqrestore(&host->lock, flags);
2230 /* Platform specific handling post ios setting */
2231 if (host->ops->platform_ios_config_exit)
2232 host->ops->platform_ios_config_exit(host, ios);
2236 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2238 struct sdhci_host *host = mmc_priv(mmc);
2239 #ifndef CONFIG_MMC_CQ
2240 sdhci_runtime_pm_get(host);
2242 sdhci_do_set_ios(host, ios);
2243 #ifndef CONFIG_MMC_CQ
2244 sdhci_runtime_pm_put(host);
2248 static int sdhci_do_get_cd(struct sdhci_host *host)
2250 int gpio_cd = mmc_gpio_get_cd(host->mmc);
2252 if (host->flags & SDHCI_DEVICE_DEAD)
2255 /* If polling/nonremovable, assume that the card is always present. */
2256 if (((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2257 (!host->ops->get_cd)) ||
2258 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
2261 if (host->ops->get_cd)
2262 return host->ops->get_cd(host);
2264 /* Try slot gpio detect */
2265 if (!IS_ERR_VALUE(gpio_cd))
2268 /* Host native card detect */
2269 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2272 static int sdhci_get_cd(struct mmc_host *mmc)
2274 struct sdhci_host *host = mmc_priv(mmc);
2277 sdhci_runtime_pm_get(host);
2278 ret = sdhci_do_get_cd(host);
2279 sdhci_runtime_pm_put(host);
2283 static int sdhci_check_ro(struct sdhci_host *host)
2285 unsigned long flags;
2288 spin_lock_irqsave(&host->lock, flags);
2290 if (host->flags & SDHCI_DEVICE_DEAD)
2292 else if (host->ops->get_ro) {
2293 spin_unlock_irqrestore(&host->lock, flags);
2294 is_readonly = host->ops->get_ro(host);
2295 spin_lock_irqsave(&host->lock, flags);
2298 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2299 & SDHCI_WRITE_PROTECT);
2301 spin_unlock_irqrestore(&host->lock, flags);
2303 /* This quirk needs to be replaced by a callback-function later */
2304 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2305 !is_readonly : is_readonly;
2308 #define SAMPLE_COUNT 5
2310 static int sdhci_do_get_ro(struct sdhci_host *host)
2314 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2315 return sdhci_check_ro(host);
2318 for (i = 0; i < SAMPLE_COUNT; i++) {
2319 if (sdhci_check_ro(host)) {
2320 if (++ro_count > SAMPLE_COUNT / 2)
2328 static void sdhci_hw_reset(struct mmc_host *mmc)
2330 struct sdhci_host *host = mmc_priv(mmc);
2332 if (host->ops && host->ops->hw_reset)
2333 host->ops->hw_reset(host);
2336 static int sdhci_get_ro(struct mmc_host *mmc)
2338 struct sdhci_host *host = mmc_priv(mmc);
2341 sdhci_runtime_pm_get(host);
2342 ret = sdhci_do_get_ro(host);
2343 sdhci_runtime_pm_put(host);
2347 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2349 if (host->flags & SDHCI_DEVICE_DEAD)
2353 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
2355 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
2357 /* SDIO IRQ will be enabled as appropriate in runtime resume */
2358 if (host->runtime_suspended)
2362 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
2364 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
2369 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2371 struct sdhci_host *host = mmc_priv(mmc);
2372 unsigned long flags;
2374 spin_lock_irqsave(&host->lock, flags);
2375 sdhci_enable_sdio_irq_nolock(host, enable);
2376 spin_unlock_irqrestore(&host->lock, flags);
2379 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
2380 struct mmc_ios *ios)
2386 * Signal Voltage Switching is only applicable for Host Controllers
2389 if (host->version < SDHCI_SPEC_300)
2392 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING) {
2393 if (host->ops->switch_signal_voltage)
2394 return host->ops->switch_signal_voltage(
2395 host, ios->signal_voltage);
2398 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2400 switch (ios->signal_voltage) {
2401 case MMC_SIGNAL_VOLTAGE_330:
2402 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2403 ctrl &= ~SDHCI_CTRL_VDD_180;
2404 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2407 ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
2409 pr_warning("%s: Switching to 3.3V signalling voltage "
2410 " failed\n", mmc_hostname(host->mmc));
2415 usleep_range(5000, 5500);
2417 /* 3.3V regulator output should be stable within 5 ms */
2418 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2419 if (!(ctrl & SDHCI_CTRL_VDD_180))
2422 pr_warning("%s: 3.3V regulator output did not became stable\n",
2423 mmc_hostname(host->mmc));
2426 case MMC_SIGNAL_VOLTAGE_180:
2428 ret = regulator_set_voltage(host->vqmmc,
2431 pr_warning("%s: Switching to 1.8V signalling voltage "
2432 " failed\n", mmc_hostname(host->mmc));
2438 * Enable 1.8V Signal Enable in the Host Control2
2441 ctrl |= SDHCI_CTRL_VDD_180;
2442 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2445 usleep_range(5000, 5500);
2447 /* 1.8V regulator output should be stable within 5 ms */
2448 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2449 if (ctrl & SDHCI_CTRL_VDD_180)
2452 pr_warning("%s: 1.8V regulator output did not became stable\n",
2453 mmc_hostname(host->mmc));
2456 case MMC_SIGNAL_VOLTAGE_120:
2458 ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
2460 pr_warning("%s: Switching to 1.2V signalling voltage "
2461 " failed\n", mmc_hostname(host->mmc));
2467 /* No signal voltage switch required */
2472 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2473 struct mmc_ios *ios)
2475 struct sdhci_host *host = mmc_priv(mmc);
2478 if (host->version < SDHCI_SPEC_300)
2480 sdhci_runtime_pm_get(host);
2481 err = sdhci_do_start_signal_voltage_switch(host, ios);
2482 /* Do any post voltage switch platform specific configuration */
2483 if (host->ops->switch_signal_voltage_exit)
2484 host->ops->switch_signal_voltage_exit(host,
2485 ios->signal_voltage);
2486 sdhci_runtime_pm_put(host);
2490 static int sdhci_card_busy(struct mmc_host *mmc)
2492 struct sdhci_host *host = mmc_priv(mmc);
2495 sdhci_runtime_pm_get(host);
2496 /* Check whether DAT[3:0] is 0000 */
2497 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2498 sdhci_runtime_pm_put(host);
2500 return !(present_state & SDHCI_DATA_LVL_MASK);
2503 static void sdhci_config_tap(struct mmc_host *mmc, u8 option)
2505 struct sdhci_host *host = mmc_priv(mmc);
2507 if (host->ops->config_tap_delay)
2508 host->ops->config_tap_delay(host, option);
2511 static int sdhci_validate_sd2_0(struct mmc_host *mmc)
2513 struct sdhci_host *host;
2516 host = mmc_priv(mmc);
2518 if (host->ops->validate_sd2_0)
2519 err = host->ops->validate_sd2_0(host);
2523 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2525 struct sdhci_host *host;
2528 int tuning_loop_counter = MAX_TUNING_LOOP;
2529 unsigned long timeout;
2531 bool requires_tuning_nonuhs = false;
2534 host = mmc_priv(mmc);
2536 sdhci_runtime_pm_get(host);
2537 disable_irq(host->irq);
2539 if ((host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) &&
2540 host->ops->execute_freq_tuning) {
2541 err = host->ops->execute_freq_tuning(host, opcode);
2542 enable_irq(host->irq);
2543 sdhci_runtime_pm_put(host);
2547 if ((host->quirks2 & SDHCI_QUIRK2_SKIP_TUNING) &&
2548 host->ops->is_tuning_done) {
2549 if(host->ops->is_tuning_done(host)) {
2550 enable_irq(host->irq);
2551 sdhci_runtime_pm_put(host);
2556 if ((host->quirks2 & SDHCI_QUIRK2_NON_STD_TUNING_LOOP_CNTR) &&
2557 (host->ops->get_max_tuning_loop_counter))
2558 tuning_loop_counter =
2559 host->ops->get_max_tuning_loop_counter(host);
2561 spin_lock(&host->lock);
2562 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2565 * The Host Controller needs tuning only in case of SDR104 mode
2566 * and for SDR50 mode when Use Tuning for SDR50 is set in the
2567 * Capabilities register.
2568 * If the Host Controller supports the HS200 mode then the
2569 * tuning function has to be executed.
2571 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
2572 (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
2573 host->flags & SDHCI_HS200_NEEDS_TUNING))
2574 requires_tuning_nonuhs = true;
2576 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
2577 requires_tuning_nonuhs)
2578 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2580 spin_unlock(&host->lock);
2581 enable_irq(host->irq);
2582 sdhci_runtime_pm_put(host);
2586 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2589 * As per the Host Controller spec v3.00, tuning command
2590 * generates Buffer Read Ready interrupt, so enable that.
2592 * Note: The spec clearly says that when tuning sequence
2593 * is being performed, the controller does not generate
2594 * interrupts other than Buffer Read Ready interrupt. But
2595 * to make sure we don't hit a controller bug, we _only_
2596 * enable Buffer Read Ready interrupt here.
2599 sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
2602 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
2603 * of loops reaches 40 times or a timeout of 150ms occurs.
2607 struct mmc_command cmd = {0};
2608 struct mmc_request mrq = {NULL};
2610 if (!tuning_loop_counter && !timeout)
2613 cmd.opcode = opcode;
2615 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2621 host->mrq_cmd = &mrq;
2623 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK) {
2624 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2625 clk &= ~SDHCI_CLOCK_CARD_EN;
2626 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2630 * In response to CMD19, the card sends 64 bytes of tuning
2631 * block to the Host Controller. So we set the block size
2633 * In response to CMD21, the card sends 128 bytes of tuning
2634 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2635 * to the Host Controller. So we set the block size to 64 here.
2637 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2638 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2639 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
2641 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
2642 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2645 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2650 * The tuning block is sent by the card to the host controller.
2651 * So we set the TRNS_READ bit in the Transfer Mode register.
2652 * This also takes care of setting DMA Enable and Multi Block
2653 * Select in the same register to 0.
2655 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2657 sdhci_send_command(host, &cmd);
2660 host->mrq_cmd = NULL;
2662 spin_unlock(&host->lock);
2663 enable_irq(host->irq);
2665 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK) {
2667 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
2668 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2669 clk |= SDHCI_CLOCK_CARD_EN;
2670 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2673 /* Wait for Buffer Read Ready interrupt */
2674 wait_event_interruptible_timeout(host->buf_ready_int,
2675 (host->tuning_done == 1),
2676 msecs_to_jiffies(50));
2677 disable_irq(host->irq);
2678 spin_lock(&host->lock);
2680 if (!host->tuning_done) {
2681 pr_info(DRIVER_NAME ": Timeout waiting for "
2682 "Buffer Read Ready interrupt during tuning "
2683 "procedure, falling back to fixed sampling "
2685 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2686 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2687 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2688 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2694 host->tuning_done = 0;
2696 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2697 tuning_loop_counter--;
2700 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2703 * The Host Driver has exhausted the maximum number of loops allowed,
2704 * so use fixed sampling frequency.
2706 if (!tuning_loop_counter || !timeout) {
2707 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2708 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2710 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2711 pr_info(DRIVER_NAME ": Tuning procedure"
2712 " failed, falling back to fixed sampling"
2716 sdhci_config_tap(mmc, SAVE_TUNED_TAP);
2717 pr_info("%s: tap value and tuning window after hw tuning completion ...\n",
2719 /* log tap, trim and tuning windows */
2720 if (host->ops->dump_host_cust_regs)
2721 host->ops->dump_host_cust_regs(host);
2727 * If this is the very first time we are here, we start the retuning
2728 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
2729 * flag won't be set, we check this condition before actually starting
2732 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
2733 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
2734 host->flags |= SDHCI_USING_RETUNING_TIMER;
2735 mod_timer(&host->tuning_timer, jiffies +
2736 host->tuning_count * HZ);
2737 /* Tuning mode 1 limits the maximum data length to 4MB */
2738 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
2740 host->flags &= ~SDHCI_NEEDS_RETUNING;
2741 /* Reload the new initial value for timer */
2742 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2743 mod_timer(&host->tuning_timer, jiffies +
2744 host->tuning_count * HZ);
2748 * In case tuning fails, host controllers which support re-tuning can
2749 * try tuning again at a later time, when the re-tuning timer expires.
2750 * So for these controllers, we return 0. Since there might be other
2751 * controllers who do not have this capability, we return error for
2752 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
2753 * a retuning timer to do the retuning for the card.
2755 if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
2758 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
2759 spin_unlock(&host->lock);
2760 enable_irq(host->irq);
2761 sdhci_runtime_pm_put(host);
2767 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2771 /* Host Controller v3.00 defines preset value registers */
2772 if (host->version < SDHCI_SPEC_300)
2775 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2778 * We only enable or disable Preset Value if they are not already
2779 * enabled or disabled respectively. Otherwise, we bail out.
2781 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2782 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2783 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2784 host->flags |= SDHCI_PV_ENABLED;
2785 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2786 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2787 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2788 host->flags &= ~SDHCI_PV_ENABLED;
2792 static void sdhci_card_event(struct mmc_host *mmc)
2794 struct sdhci_host *host = mmc_priv(mmc);
2795 unsigned long flags;
2797 /* sdhci_runtime_pm_get cannot be called here since
2798 * tasklet/softirq context cannot call
2799 * sleeping function like __pm_runtime_resume
2801 spin_lock_irqsave(&host->lock, flags);
2803 /* Check host->mrq_cmd first in case we are runtime suspended */
2804 if ((host->mrq_cmd || host->mrq_dat) &&
2805 /* TODO: check if clocks are already ON when
2806 * mrq_cmd or mrq_dat are enabled
2808 !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
2809 pr_err("%s: Card removed during transfer!\n",
2810 mmc_hostname(host->mmc));
2811 pr_err("%s: Resetting controller.\n",
2812 mmc_hostname(host->mmc));
2814 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2816 if (host->mrq_cmd) {
2817 host->mrq_cmd->cmd->error = -ENOMEDIUM;
2818 if (MMC_CHECK_CMDQ_MODE(host))
2819 tasklet_schedule(&host->finish_cmd_tasklet);
2821 tasklet_schedule(&host->finish_tasklet);
2823 if (host->mrq_dat) {
2824 host->mrq_dat->cmd->error = -ENOMEDIUM;
2825 if (MMC_CHECK_CMDQ_MODE(host))
2826 tasklet_schedule(&host->finish_dat_tasklet);
2828 tasklet_schedule(&host->finish_tasklet);
2832 spin_unlock_irqrestore(&host->lock, flags);
2835 int sdhci_enable(struct mmc_host *mmc)
2837 struct sdhci_host *host = mmc_priv(mmc);
2839 if (!mmc->card || !(mmc->caps2 & MMC_CAP2_CLOCK_GATING))
2842 /* cancel delayed clk gate work */
2843 if (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
2844 cancel_delayed_work_sync(&host->delayed_clk_gate_wrk);
2846 sysedp_set_state(host->sysedpc, 1);
2848 if (mmc->ios.clock) {
2849 if (host->ops->set_clock)
2850 host->ops->set_clock(host, mmc->ios.clock);
2851 sdhci_set_clock(host, mmc->ios.clock);
2857 static void mmc_host_clk_gate(struct sdhci_host *host)
2859 sdhci_set_clock(host, 0);
2860 if (host->ops->set_clock)
2861 host->ops->set_clock(host, 0);
2863 sysedp_set_state(host->sysedpc, 0);
2868 void delayed_clk_gate_cb(struct work_struct *work)
2870 struct sdhci_host *host = container_of(work, struct sdhci_host,
2871 delayed_clk_gate_wrk.work);
2873 /* power off check */
2874 if (host->mmc->ios.power_mode == MMC_POWER_OFF)
2877 mmc_host_clk_gate(host);
2881 EXPORT_SYMBOL_GPL(delayed_clk_gate_cb);
2883 int sdhci_disable(struct mmc_host *mmc)
2885 struct sdhci_host *host = mmc_priv(mmc);
2887 if (!mmc->card || !(mmc->caps2 & MMC_CAP2_CLOCK_GATING))
2890 if (IS_DELAYED_CLK_GATE(host)) {
2891 if (host->is_clk_on) {
2892 if (IS_SDIO_CARD(host))
2893 host->clk_gate_tmout_ticks =
2894 SDIO_CLK_GATING_TICK_TMOUT;
2895 else if (IS_EMMC_CARD(host))
2896 host->clk_gate_tmout_ticks =
2897 EMMC_CLK_GATING_TICK_TMOUT;
2898 if (host->clk_gate_tmout_ticks > 0)
2899 schedule_delayed_work(
2900 &host->delayed_clk_gate_wrk,
2901 host->clk_gate_tmout_ticks);
2906 mmc_host_clk_gate(host);
2911 #ifdef CONFIG_MMC_FREQ_SCALING
2913 * Wrapper functions to call any platform specific implementation for
2914 * supporting dynamic frequency scaling for SD/MMC devices.
2916 static int sdhci_gov_get_target(struct mmc_host *mmc, unsigned long *freq)
2918 struct sdhci_host *host = mmc_priv(mmc);
2920 if (host->ops->dfs_gov_get_target_freq)
2921 *freq = host->ops->dfs_gov_get_target_freq(host,
2922 mmc->devfreq_stats);
2927 static int sdhci_gov_init(struct mmc_host *mmc)
2929 struct sdhci_host *host = mmc_priv(mmc);
2931 if (host->ops->dfs_gov_init)
2932 return host->ops->dfs_gov_init(host);
2937 static void sdhci_gov_exit(struct mmc_host *mmc)
2939 struct sdhci_host *host = mmc_priv(mmc);
2941 if (host->ops->dfs_gov_exit)
2942 host->ops->dfs_gov_exit(host);
2946 static int sdhci_select_drive_strength(struct mmc_host *mmc,
2947 unsigned int max_dtr,
2951 struct sdhci_host *host = mmc_priv(mmc);
2952 unsigned char drv_type;
2954 /* return default strength if no handler in driver */
2955 if (!host->ops->get_drive_strength)
2956 return MMC_SET_DRIVER_TYPE_B;
2958 drv_type = host->ops->get_drive_strength(host, max_dtr,
2959 host_drv, card_drv);
2961 if (drv_type > MMC_SET_DRIVER_TYPE_D) {
2962 pr_err("%s: Error on getting drive strength. Got drv_type %d\n"
2963 , mmc_hostname(host->mmc), drv_type);
2964 return MMC_SET_DRIVER_TYPE_B;
2969 static void sdhci_init_card(struct mmc_host *mmc, struct mmc_card *card)
2971 struct sdhci_host *host = mmc_priv(mmc);
2974 * Get the max pio transfer limits if defined. This would be used to
2975 * dynamically choose between dma and pio modes depending on the
2976 * transfer parameters.
2978 if (host->ops->get_max_pio_transfer_limits)
2979 host->ops->get_max_pio_transfer_limits(host);
2981 static const struct mmc_host_ops sdhci_ops = {
2982 .request = sdhci_request,
2983 .set_ios = sdhci_set_ios,
2984 .get_cd = sdhci_get_cd,
2985 .get_ro = sdhci_get_ro,
2986 .hw_reset = sdhci_hw_reset,
2987 .enable = sdhci_enable,
2988 .disable = sdhci_disable,
2989 .enable_sdio_irq = sdhci_enable_sdio_irq,
2990 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2991 .execute_tuning = sdhci_execute_tuning,
2992 .validate_sd2_0 = sdhci_validate_sd2_0,
2993 .card_event = sdhci_card_event,
2994 .card_busy = sdhci_card_busy,
2995 #ifdef CONFIG_MMC_FREQ_SCALING
2996 .dfs_governor_init = sdhci_gov_init,
2997 .dfs_governor_exit = sdhci_gov_exit,
2998 .dfs_governor_get_target = sdhci_gov_get_target,
3000 .select_drive_strength = sdhci_select_drive_strength,
3001 .post_init = sdhci_post_init,
3002 .init_card = sdhci_init_card,
3005 /*****************************************************************************\
3009 \*****************************************************************************/
3011 static void sdhci_tasklet_card(unsigned long param)
3013 struct sdhci_host *host = (struct sdhci_host *)param;
3015 sdhci_card_event(host->mmc);
3016 if (host->detect_resume)
3017 mmc_detect_change(host->mmc, msecs_to_jiffies(700));
3019 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
3022 static void sdhci_tasklet_finish(unsigned long param)
3024 struct sdhci_host *host;
3025 unsigned long flags;
3026 struct mmc_request *mrq = NULL;
3028 host = (struct sdhci_host *)param;
3030 spin_lock_irqsave(&host->lock, flags);
3033 * If this tasklet gets rescheduled while running, it will
3034 * be run again afterwards but without any active request.
3036 if (!host->mrq_cmd && !host->mrq_dat) {
3037 spin_unlock_irqrestore(&host->lock, flags);
3041 del_timer(&host->timer);
3044 mrq = host->mrq_cmd;
3045 else if (host->mrq_dat)
3046 mrq = host->mrq_dat;
3049 * The controller needs a reset of internal state machines
3050 * upon error conditions.
3052 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
3053 ((mrq->cmd && mrq->cmd->error) ||
3054 (mrq->data && (mrq->data->error ||
3055 (mrq->data->stop && mrq->data->stop->error))) ||
3056 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
3058 /* Some controllers need this kick or reset won't work here */
3059 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3060 /* This is to force an update */
3061 sdhci_update_clock(host);
3063 /* Spec says we should do both at the same time */
3064 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
3067 host->mrq_cmd = NULL;
3068 host->mrq_dat = NULL;
3072 #ifndef SDHCI_USE_LEDS_CLASS
3073 sdhci_deactivate_led(host);
3077 spin_unlock_irqrestore(&host->lock, flags);
3079 mmc_request_done(host->mmc, mrq);
3080 #ifndef CONFIG_MMC_CQ
3081 sdhci_runtime_pm_put(host);
3086 * This tasklet gets scheduled to handle CMD only requests in CQ.
3088 static void sdhci_tasklet_cmd_finish(unsigned long param)
3090 struct sdhci_host *host;
3091 unsigned long flags;
3092 struct mmc_request *mrq;
3094 host = (struct sdhci_host *)param;
3096 if (!host->mrq_cmd && host->mrq_dat) {
3097 mmc_handle_queued_request(host->mmc, MMC_HANDLE_CLR_CMD);
3101 spin_lock_irqsave(&host->lock, flags);
3104 * If this tasklet gets rescheduled while running, it will
3105 * be run again afterwards but without any active request.
3107 if (!host->mrq_cmd) {
3108 spin_unlock_irqrestore(&host->lock, flags);
3112 del_timer(&host->timer);
3114 mrq = host->mrq_cmd;
3117 * The controller needs a reset of internal state machines
3118 * upon error conditions.
3120 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
3121 ((mrq->cmd && mrq->cmd->error) ||
3122 (mrq->data && (mrq->data->error ||
3123 (mrq->data->stop && mrq->data->stop->error))) ||
3124 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
3126 /* Some controllers need this kick or reset won't work here */
3127 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3128 /* This is to force an update */
3129 sdhci_update_clock(host);
3131 sdhci_reset(host, SDHCI_RESET_CMD);
3134 host->mrq_cmd = NULL;
3137 #ifndef SDHCI_USE_LEDS_CLASS
3138 sdhci_deactivate_led(host);
3142 spin_unlock_irqrestore(&host->lock, flags);
3144 mmc_request_done(host->mmc, mrq);
3145 #ifndef CONFIG_MMC_CQ
3146 sdhci_runtime_pm_put(host);
3151 * This tasklet gets scheduled to handle CMD with DATA requests in CQ.
3153 static void sdhci_tasklet_dat_finish(unsigned long param)
3155 struct sdhci_host *host;
3156 unsigned long flags;
3157 struct mmc_request *mrq;
3159 host = (struct sdhci_host *)param;
3161 spin_lock_irqsave(&host->lock, flags);
3164 * If this tasklet gets rescheduled while running, it will
3165 * be run again afterwards but without any active request.
3167 if (!host->mrq_dat) {
3168 spin_unlock_irqrestore(&host->lock, flags);
3172 del_timer(&host->timer);
3174 mrq = host->mrq_dat;
3176 if (host->data_early)
3177 mrq->data_early = 1;
3180 * The controller needs a reset of internal state machines
3181 * upon error conditions.
3183 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
3184 ((mrq->cmd && mrq->cmd->error) ||
3185 (mrq->data && (mrq->data->error ||
3186 (mrq->data->stop && mrq->data->stop->error))) ||
3187 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
3189 /* Some controllers need this kick or reset won't work here */
3190 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3191 /* This is to force an update */
3192 sdhci_update_clock(host);
3194 sdhci_reset(host, SDHCI_RESET_DATA);
3197 host->mrq_dat = NULL;
3200 #ifndef SDHCI_USE_LEDS_CLASS
3201 sdhci_deactivate_led(host);
3205 spin_unlock_irqrestore(&host->lock, flags);
3207 mmc_request_done(host->mmc, mrq);
3208 #ifndef CONFIG_MMC_CQ
3209 sdhci_runtime_pm_put(host);
3213 static void sdhci_timeout_timer(unsigned long data)
3215 struct sdhci_host *host;
3216 unsigned long flags;
3218 host = (struct sdhci_host *)data;
3220 spin_lock_irqsave(&host->lock, flags);
3222 if (host->mrq_cmd || host->mrq_dat) {
3223 pr_err("%s: Timeout waiting for hardware "
3224 "interrupt.\n", mmc_hostname(host->mmc));
3225 sdhci_dumpregs(host);
3228 host->data->error = -ETIMEDOUT;
3229 sdhci_finish_data(host);
3232 host->cmd->error = -ETIMEDOUT;
3233 else if (host->mrq_dat)
3234 host->mrq_dat->cmd->error = -ETIMEDOUT;
3236 if (MMC_CHECK_CMDQ_MODE(host))
3237 tasklet_schedule(&host->finish_cmd_tasklet);
3239 tasklet_schedule(&host->finish_tasklet);
3244 spin_unlock_irqrestore(&host->lock, flags);
3247 static void sdhci_tuning_timer(unsigned long data)
3249 struct sdhci_host *host;
3250 unsigned long flags;
3252 host = (struct sdhci_host *)data;
3254 spin_lock_irqsave(&host->lock, flags);
3256 host->flags |= SDHCI_NEEDS_RETUNING;
3258 spin_unlock_irqrestore(&host->lock, flags);
3261 /*****************************************************************************\
3263 * Interrupt handling *
3265 \*****************************************************************************/
3267 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
3269 bool skip_dump = false;
3271 BUG_ON(intmask == 0);
3274 pr_err("%s: Got command interrupt 0x%08x even "
3275 "though no command operation was in progress.\n",
3276 mmc_hostname(host->mmc), (unsigned)intmask);
3277 sdhci_dumpregs(host);
3281 if (intmask & SDHCI_INT_TIMEOUT) {
3282 host->cmd->error = -ETIMEDOUT;
3283 } else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
3285 host->cmd->error = -EILSEQ;
3287 if (host->ops->skip_register_dump)
3288 skip_dump = host->ops->skip_register_dump(host);
3290 (intmask & SDHCI_INT_INDEX))
3291 goto lbl_suppress_dump;
3293 sdhci_dumpregs(host);
3294 if (intmask & SDHCI_INT_INDEX)
3295 pr_err("%s: Command INDEX error, intmask: %x Interface clock = %uHz\n",
3296 mmc_hostname(host->mmc), intmask, host->max_clk);
3297 else if (intmask & SDHCI_INT_CRC)
3298 pr_err("%s: Command CRC error, intmask: %x Interface clock = %uHz\n",
3299 mmc_hostname(host->mmc), intmask, host->max_clk);
3300 else if (intmask & SDHCI_INT_END_BIT)
3301 pr_err("%s: Command END BIT error, intmask: %x Interface clock = %uHz\n",
3302 mmc_hostname(host->mmc), intmask, host->max_clk);
3306 if (host->cmd->error) {
3307 if (MMC_CHECK_CMDQ_MODE(host))
3308 tasklet_schedule(&host->finish_cmd_tasklet);
3310 tasklet_schedule(&host->finish_tasklet);
3315 * The host can send and interrupt when the busy state has
3316 * ended, allowing us to wait without wasting CPU cycles.
3317 * Unfortunately this is overloaded on the "data complete"
3318 * interrupt, so we need to take some care when handling
3321 * Note: The 1.0 specification is a bit ambiguous about this
3322 * feature so there might be some problems with older
3325 if (host->cmd->flags & MMC_RSP_BUSY) {
3326 if (host->cmd->data)
3327 DBG("Cannot wait for busy signal when also "
3328 "doing a data transfer");
3329 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
3332 /* The controller does not support the end-of-busy IRQ,
3333 * fall through and take the SDHCI_INT_RESPONSE */
3336 if (intmask & SDHCI_INT_RESPONSE)
3337 sdhci_finish_command(host);
3340 #ifdef CONFIG_MMC_DEBUG
3341 static void sdhci_show_adma_error(struct sdhci_host *host)
3343 const char *name = mmc_hostname(host->mmc);
3344 u8 *desc = host->adma_desc;
3351 ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
3352 if (ctrl & SDHCI_ADDRESSING_64BIT_EN) {
3353 if (ctrl & SDHCI_HOST_VERSION_4_EN)
3358 /* 32 bit DMA mode supported*/
3362 sdhci_dumpregs(host);
3365 dma = (__le32 *)(desc + 4);
3366 len = (__le16 *)(desc + 2);
3369 if (next_desc == 8) {
3370 DBG("%s: %p: DMA-32 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3371 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
3372 } else if (next_desc == 16) {
3373 DBG("%s: %p: DMA-64 0x%16x, LEN 0x%04x, Attr=0x%02x\n",
3374 name, desc, le64_to_cpu(*((__le64 *)dma)), le16_to_cpu(*len), attr);
3382 static void sdhci_show_adma_error(struct sdhci_host *host) { }
3385 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3388 BUG_ON(intmask == 0);
3390 /* CMD19, CMD21 generates _only_ Buffer Read Ready interrupt */
3391 if (intmask & SDHCI_INT_DATA_AVAIL) {
3392 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3393 if (command == MMC_SEND_TUNING_BLOCK ||
3394 command == MMC_SEND_TUNING_BLOCK_HS200) {
3395 host->tuning_done = 1;
3396 wake_up(&host->buf_ready_int);
3403 * The "data complete" interrupt is also used to
3404 * indicate that a busy state has ended. See comment
3405 * above in sdhci_cmd_irq().
3407 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
3408 if (intmask & SDHCI_INT_DATA_END) {
3409 sdhci_finish_command(host);
3414 pr_err("%s: Got data interrupt 0x%08x even "
3415 "though no data operation was in progress.\n",
3416 mmc_hostname(host->mmc), (unsigned)intmask);
3417 sdhci_dumpregs(host);
3422 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3423 host->data->error = -ETIMEDOUT;
3424 pr_err("%s: Data Timeout error, intmask: %x Interface clock = %uHz\n",
3425 mmc_hostname(host->mmc), intmask, host->max_clk);
3426 sdhci_dumpregs(host);
3427 } else if (intmask & SDHCI_INT_DATA_END_BIT) {
3428 host->data->error = -EILSEQ;
3429 pr_err("%s: Data END Bit error, intmask: %x Interface clock = %uHz\n",
3430 mmc_hostname(host->mmc), intmask, host->max_clk);
3431 } else if ((intmask & SDHCI_INT_DATA_CRC) &&
3432 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3433 != MMC_BUS_TEST_R) {
3434 host->data->error = -EILSEQ;
3435 pr_err("%s: Data CRC error, intmask: %x Interface clock = %uHz\n",
3436 mmc_hostname(host->mmc), intmask, host->max_clk);
3437 sdhci_dumpregs(host);
3438 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3439 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
3440 sdhci_dumpregs(host);
3441 sdhci_show_adma_error(host);
3442 host->data->error = -EIO;
3443 if (host->ops->adma_workaround)
3444 host->ops->adma_workaround(host, intmask);
3447 if (host->data->error)
3448 sdhci_finish_data(host);
3450 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3451 sdhci_transfer_pio(host);
3454 * We currently don't do anything fancy with DMA
3455 * boundaries, but as we can't disable the feature
3456 * we need to at least restart the transfer.
3458 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3459 * should return a valid address to continue from, but as
3460 * some controllers are faulty, don't trust them.
3462 if (intmask & SDHCI_INT_DMA_END) {
3463 u32 dmastart, dmanow;
3464 dmastart = sg_dma_address(host->data->sg);
3465 dmanow = dmastart + host->data->bytes_xfered;
3467 * Force update to the next DMA block boundary.
3470 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3471 SDHCI_DEFAULT_BOUNDARY_SIZE;
3472 host->data->bytes_xfered = dmanow - dmastart;
3473 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
3475 mmc_hostname(host->mmc), dmastart,
3476 host->data->bytes_xfered, dmanow);
3477 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
3480 if (intmask & SDHCI_INT_DATA_END) {
3481 if ((!MMC_CHECK_CMDQ_MODE(host) && host->cmd) ||
3482 (MMC_CHECK_CMDQ_MODE(host) && host->cmd && (host->mrq_dat->cmd == host->cmd))) {
3485 * Data managed to finish before the
3486 * command completed. Make sure we do
3487 * things in the proper order.
3489 host->data_early = 1;
3491 sdhci_finish_data(host);
3496 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3499 struct sdhci_host *host = dev_id;
3500 u32 intmask, unexpected = 0;
3501 int cardint = 0, max_loops = 16;
3503 spin_lock(&host->lock);
3505 if (host->runtime_suspended) {
3506 spin_unlock(&host->lock);
3507 pr_warning("%s: got irq while runtime suspended\n",
3508 mmc_hostname(host->mmc));
3512 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3514 if (!intmask || intmask == 0xffffffff) {
3520 DBG("*** %s got interrupt: 0x%08x\n",
3521 mmc_hostname(host->mmc), intmask);
3523 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3524 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3528 * There is a observation on i.mx esdhc. INSERT bit will be
3529 * immediately set again when it gets cleared, if a card is
3530 * inserted. We have to mask the irq to prevent interrupt
3531 * storm which will freeze the system. And the REMOVE gets
3532 * the same situation.
3534 * More testing are needed here to ensure it works for other
3537 sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
3538 SDHCI_INT_CARD_REMOVE);
3539 sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
3540 SDHCI_INT_CARD_INSERT);
3542 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3543 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3544 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
3545 tasklet_schedule(&host->card_tasklet);
3548 #ifdef CONFIG_CMD_DUMP
3549 if (mmc_hostname(host->mmc)[3] == '0')
3550 dbg_add_host_log(host->mmc, 7, intmask, 0xffffffff);
3553 if (intmask & SDHCI_INT_CMD_MASK) {
3554 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
3556 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
3559 if (intmask & SDHCI_INT_DATA_MASK) {
3560 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
3562 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3565 if (intmask & SDHCI_INT_RETUNING_EVENT)
3566 host->flags |= SDHCI_NEEDS_RETUNING;
3568 if ((intmask & SDHCI_INT_DATA_MASK) || (intmask & SDHCI_INT_CMD_MASK))
3569 if (host->ops->sd_error_stats)
3570 host->ops->sd_error_stats(host, intmask);
3572 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
3574 intmask &= ~SDHCI_INT_ERROR;
3576 if (intmask & SDHCI_INT_BUS_POWER) {
3577 pr_err("%s: Current limit error, intmask: %x Interface clock = %uHz\n",
3578 mmc_hostname(host->mmc), intmask, host->max_clk);
3579 pr_err("%s: Card is consuming too much power!\n",
3580 mmc_hostname(host->mmc));
3581 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
3584 /* print the errors based on the SD Host controller spec */
3585 if ((intmask & SDHCI_INT_TIMEOUT) || (intmask & SDHCI_INT_CRC)) {
3586 pr_err("%s: %s, intmask: %x Interface clock = %uHz\n",
3587 mmc_hostname(host->mmc),
3588 resp_error[RESP_ERROR_INDEX(intmask)],
3589 intmask, host->max_clk);
3592 intmask &= ~SDHCI_INT_BUS_POWER;
3594 if (intmask & SDHCI_INT_CARD_INT)
3597 intmask &= ~SDHCI_INT_CARD_INT;
3600 unexpected |= intmask;
3601 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3604 result = IRQ_HANDLED;
3606 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3607 if (intmask && --max_loops)
3610 spin_unlock(&host->lock);
3613 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3614 mmc_hostname(host->mmc), unexpected);
3615 sdhci_dumpregs(host);
3618 * We have to delay this as it calls back into the driver.
3621 mmc_signal_sdio_irq(host->mmc);
3626 /*****************************************************************************\
3630 \*****************************************************************************/
3633 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
3636 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3637 | SDHCI_WAKE_ON_INT;
3639 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3641 /* Avoid fake wake up */
3642 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
3643 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
3644 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3646 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
3648 void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3651 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3652 | SDHCI_WAKE_ON_INT;
3654 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3656 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3658 EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
3660 int sdhci_suspend_host(struct sdhci_host *host)
3663 struct mmc_host *mmc = host->mmc;
3665 host->suspend_task = current;
3667 if (host->ops->platform_suspend)
3668 host->ops->platform_suspend(host);
3670 sdhci_disable_card_detection(host);
3672 /* Disable tuning since we are suspending */
3673 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
3674 del_timer_sync(&host->tuning_timer);
3675 host->flags &= ~SDHCI_NEEDS_RETUNING;
3679 * If eMMC cards are put in sleep state, Vccq can be disabled
3680 * but Vcc would still be powered on. In resume, we only restore
3681 * the controller context. So, set MMC_PM_KEEP_POWER flag.
3683 if (mmc_card_can_sleep(mmc) && !(mmc->caps2 & MMC_CAP2_NO_SLEEP_CMD))
3684 mmc->pm_flags |= MMC_PM_KEEP_POWER;
3686 ret = mmc_suspend_host(host->mmc);
3688 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
3689 host->flags |= SDHCI_NEEDS_RETUNING;
3690 mod_timer(&host->tuning_timer, jiffies +
3691 host->tuning_count * HZ);
3694 sdhci_enable_card_detection(host);
3696 host->suspend_task = NULL;
3699 /* cancel delayed clk gate work */
3700 if (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
3701 cancel_delayed_work_sync(&host->delayed_clk_gate_wrk);
3704 * If host clock is disabled but the register access requires host
3705 * clock, then enable the clock, mask the interrupts and disable
3708 if (host->quirks2 & SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
3709 if ((!host->clock && host->ops->set_clock) &&
3710 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE))
3711 host->ops->set_clock(host, max(mmc->ios.clock, mmc->f_min));
3713 if (mmc->pm_flags & MMC_PM_KEEP_POWER)
3714 host->card_int_set = host->ier &
3717 if (!device_may_wakeup(mmc_dev(host->mmc))) {
3718 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
3720 if (host->quirks2 & SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
3721 if ((!host->clock && host->ops->set_clock) &&
3722 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE))
3723 host->ops->set_clock(host, 0);
3726 disable_irq(host->irq);
3728 sdhci_enable_irq_wakeups(host);
3729 enable_irq_wake(host->irq);
3731 if (host->quirks2 & SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
3732 if ((!host->clock && host->ops->set_clock) &&
3733 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE))
3734 host->ops->set_clock(host, 0);
3737 host->suspend_task = NULL;
3742 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3744 int sdhci_resume_host(struct sdhci_host *host)
3747 struct mmc_host *mmc = host->mmc;
3749 host->suspend_task = current;
3752 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3753 if (host->ops->enable_dma)
3754 host->ops->enable_dma(host);
3757 if (!device_may_wakeup(mmc_dev(host->mmc))) {
3759 enable_irq(host->irq);
3761 sdhci_disable_irq_wakeups(host);
3762 disable_irq_wake(host->irq);
3765 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3766 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3767 /* Card keeps power but host controller does not */
3768 sdhci_init(host, 0);
3771 sdhci_do_set_ios(host, &host->mmc->ios);
3773 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3777 ret = mmc_resume_host(host->mmc);
3778 /* Enable card interrupt as it is overwritten in sdhci_init */
3779 if ((mmc->caps & MMC_CAP_SDIO_IRQ) &&
3780 (mmc->pm_flags & MMC_PM_KEEP_POWER))
3781 if (host->card_int_set)
3782 mmc->ops->enable_sdio_irq(mmc, true);
3784 sdhci_enable_card_detection(host);
3786 if (host->ops->platform_resume)
3787 host->ops->platform_resume(host);
3789 /* Set the re-tuning expiration flag */
3790 if (host->flags & SDHCI_USING_RETUNING_TIMER)
3791 host->flags |= SDHCI_NEEDS_RETUNING;
3793 host->suspend_task = NULL;
3798 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3799 #endif /* CONFIG_PM */
3801 #ifdef CONFIG_PM_RUNTIME
3803 static int sdhci_runtime_pm_get(struct sdhci_host *host)
3807 if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3810 present = mmc_gpio_get_cd(host->mmc);
3812 /* If polling, assume that the card is always present. */
3813 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
3814 if (host->ops->get_cd)
3815 present = host->ops->get_cd(host);
3819 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3823 if ((present && !host->mmc->card && (host->runtime_suspended == false))
3824 || host->suspend_task == current) {
3825 pm_runtime_get_noresume(host->mmc->parent);
3829 return pm_runtime_get_sync(host->mmc->parent);
3832 static int sdhci_runtime_pm_put(struct sdhci_host *host)
3836 if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3839 present = mmc_gpio_get_cd(host->mmc);
3841 /* If polling, assume that the card is always present. */
3842 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
3843 if (host->ops->get_cd)
3844 present = host->ops->get_cd(host);
3848 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3851 if ((present && !host->mmc->card) || host->suspend_task == current) {
3852 pm_runtime_mark_last_busy(host->mmc->parent);
3853 pm_runtime_put_noidle(host->mmc->parent);
3857 pm_runtime_mark_last_busy(host->mmc->parent);
3858 return pm_runtime_put_autosuspend(host->mmc->parent);
3861 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3863 unsigned long flags;
3866 if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3869 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_RTPM) {
3870 spin_lock_irqsave(&host->lock, flags);
3871 host->runtime_suspended = true;
3872 spin_unlock_irqrestore(&host->lock, flags);
3874 sdhci_set_clock(host, 0);
3875 if (host->ops->set_clock)
3876 host->ops->set_clock(host, 0);
3877 sysedp_set_state(host->sysedpc, 0);
3881 /* Disable tuning since we are suspending */
3882 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
3883 del_timer_sync(&host->tuning_timer);
3884 host->flags &= ~SDHCI_NEEDS_RETUNING;
3887 if (host->ops->set_clock)
3888 host->ops->set_clock(host, host->mmc->f_min);
3889 sdhci_set_clock(host, host->mmc->f_min);
3891 spin_lock_irqsave(&host->lock, flags);
3892 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
3893 spin_unlock_irqrestore(&host->lock, flags);
3895 synchronize_irq(host->irq);
3897 spin_lock_irqsave(&host->lock, flags);
3898 host->runtime_suspended = true;
3899 spin_unlock_irqrestore(&host->lock, flags);
3901 sdhci_set_clock(host, 0);
3902 if (host->ops->set_clock)
3903 host->ops->set_clock(host, 0);
3908 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3910 int sdhci_runtime_resume_host(struct sdhci_host *host)
3912 unsigned long flags;
3913 int ret = 0, host_flags = host->flags;
3916 if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3919 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_RTPM) {
3920 if (host->mmc->ios.clock) {
3921 freq = host->mmc->ios.clock;
3923 if (!host->mmc->f_min)
3924 host->mmc->f_min = MIN_SDMMC_FREQ;
3925 freq = host->mmc->f_min;
3929 if (host->ops->set_clock)
3930 host->ops->set_clock(host, freq);
3931 sdhci_set_clock(host, freq);
3933 sysedp_set_state(host->sysedpc, 1);
3934 spin_lock_irqsave(&host->lock, flags);
3935 host->runtime_suspended = false;
3936 spin_unlock_irqrestore(&host->lock, flags);
3940 if (host->ops->set_clock)
3941 host->ops->set_clock(host, host->mmc->f_min);
3942 sdhci_set_clock(host, host->mmc->f_min);
3944 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3945 if (host->ops->enable_dma)
3946 host->ops->enable_dma(host);
3949 sdhci_init(host, 0);
3951 /* Force clock and power re-program */
3954 sdhci_do_set_ios(host, &host->mmc->ios);
3956 if (host->mmc->ios.clock) {
3957 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
3958 /* Do any post voltage switch platform specific configuration */
3959 if (host->ops->switch_signal_voltage_exit)
3960 host->ops->switch_signal_voltage_exit(host,
3961 host->mmc->ios.signal_voltage);
3964 if ((host_flags & SDHCI_PV_ENABLED) &&
3965 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3966 spin_lock_irqsave(&host->lock, flags);
3967 sdhci_enable_preset_value(host, true);
3968 spin_unlock_irqrestore(&host->lock, flags);
3971 /* Set the re-tuning expiration flag */
3972 if (host->flags & SDHCI_USING_RETUNING_TIMER)
3973 host->flags |= SDHCI_NEEDS_RETUNING;
3975 spin_lock_irqsave(&host->lock, flags);
3977 host->runtime_suspended = false;
3979 /* Enable SDIO IRQ */
3980 if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
3981 sdhci_enable_sdio_irq_nolock(host, true);
3983 /* Enable Card Detection */
3984 sdhci_enable_card_detection(host);
3986 spin_unlock_irqrestore(&host->lock, flags);
3991 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3995 /*****************************************************************************\
3997 * Device allocation/registration *
3999 \*****************************************************************************/
4001 struct sdhci_host *sdhci_alloc_host(struct device *dev,
4004 struct mmc_host *mmc;
4005 struct sdhci_host *host;
4007 WARN_ON(dev == NULL);
4009 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
4011 return ERR_PTR(-ENOMEM);
4013 host = mmc_priv(mmc);
4019 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4021 #ifdef CONFIG_DEBUG_FS
4022 static int show_sdhci_perf_stats(struct seq_file *s, void *data)
4024 struct sdhci_host *host = s->private;
4027 u32 last_perf_in_class;
4028 struct data_stat_entry *stat = NULL;
4034 unsigned int overall_avg_rd_perf2;
4035 unsigned int overall_avg_wr_perf2;
4036 int rd_percent, wr_percent;
4038 seq_printf(s, "SDHCI(%s): perf statistics stat_size=%d\n",
4039 mmc_hostname(host->mmc),
4040 host->sdhci_data_stat.stat_size
4042 if (host->sdhci_data_stat.stat_size) {
4043 seq_printf(s, "SDHCI(%s): perf statistics:\n",
4044 mmc_hostname(host->mmc));
4046 "Note: Performance figures in kilo bits per sec(kbps)\n");
4048 "S.No. Block Direction Num blks/ Total Total Total Last Last usec Avg kbps Last kbps Min kbps Max kbps\n");
4050 " Size (R/W) transfer Bytes Transfers Time(usec) Bytes Duration Perf Perf Perf Perf\n");
4056 for (i = 0; i < host->sdhci_data_stat.stat_size; i++) {
4058 stat = host->sdhci_data_stat.head;
4062 pr_err("%s %s: sdhci data stat head NULL i=%d\n",
4063 mmc_hostname(host->mmc), __func__, i);
4067 ((stat->total_bytes << 3) * 1000),
4068 stat->total_usecs, &avg_perf2);
4070 (((u32)stat->current_transferred_bytes << 3) * 1000),
4071 stat->duration_usecs,
4072 &last_perf_in_class);
4073 if (stat->is_read) {
4074 total_rd_bytes += stat->total_bytes;
4075 total_rd_usecs += stat->total_usecs;
4077 total_wr_bytes += stat->total_bytes;
4078 total_wr_usecs += stat->total_usecs;
4081 "%2d %4d %c %8d %16lld %8d %16lld %8d %8d %8d %8d %8d %8d\n",
4083 stat->stat_blk_size,
4084 stat->is_read ? 'R' : 'W',
4085 stat->stat_blks_per_transfer,
4087 stat->total_transfers,
4089 stat->current_transferred_bytes,
4090 stat->duration_usecs,
4099 ((total_rd_bytes << 3) * 1000),
4100 total_rd_usecs, &overall_avg_rd_perf2);
4102 (total_rd_bytes * 1000),
4103 (total_rd_bytes + total_wr_bytes), &rd_percent);
4105 "Read Total_bytes=%lldB, time=%lldusecs, overall kbps=%d Rd percent=%d.%d\n",
4106 total_rd_bytes, total_rd_usecs,
4107 overall_avg_rd_perf2,
4108 (rd_percent / 10), (rd_percent % 10));
4111 ((total_wr_bytes << 3) * 1000),
4112 total_wr_usecs, &overall_avg_wr_perf2);
4114 (total_wr_bytes * 1000),
4115 (total_rd_bytes + total_wr_bytes), &wr_percent);
4117 "Write Total_bytes=%lldB, time=%lldusecs, overall kbps=%d, Wr percent=%d.%d\n",
4118 total_wr_bytes, total_wr_usecs,
4119 overall_avg_wr_perf2,
4120 (wr_percent / 10), (wr_percent % 10));
4126 static int sdhci_perf_stats_dump(struct inode *inode, struct file *file)
4128 return single_open(file, show_sdhci_perf_stats, inode->i_private);
4131 static const struct file_operations flush_sdhci_perf_stats_fops = {
4132 .open = sdhci_perf_stats_dump,
4134 .llseek = seq_lseek,
4135 .release = single_release,
4138 static int restart_sdhci_perf_stats(struct seq_file *s, void *data)
4140 struct sdhci_host *host = s->private;
4142 free_stats_nodes(host);
4146 static int sdhci_perf_stats_restart(struct inode *inode, struct file *file)
4148 return single_open(file, restart_sdhci_perf_stats, inode->i_private);
4151 static const struct file_operations reset_sdhci_perf_stats_fops = {
4152 .open = sdhci_perf_stats_restart,
4154 .llseek = seq_lseek,
4155 .release = single_release,
4158 static void sdhci_debugfs_init(struct sdhci_host *host)
4160 struct dentry *root = host->debugfs_root;
4163 * debugfs nodes earlier were created from sdhci-tegra,
4164 * In this change root debugfs node is created first-come-first-serve
4167 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
4168 if (IS_ERR_OR_NULL(root))
4170 host->debugfs_root = root;
4173 if (!debugfs_create_u32("enable_sdhci_perf_stats", S_IRUGO | S_IWUSR,
4174 root, (u32 *)&host->enable_sdhci_perf_stats))
4177 if (!debugfs_create_file("reset_sdhci_perf_stats", S_IRUGO,
4178 root, host, &reset_sdhci_perf_stats_fops))
4181 if (!debugfs_create_file("sdhci_perf_stats", S_IRUGO,
4182 root, host, &flush_sdhci_perf_stats_fops))
4185 if (!debugfs_create_u32("sdhci_perf_no_data_transfer_count", S_IRUGO,
4186 root, (u32 *)&host->no_data_transfer_count))
4189 if (!debugfs_create_u32("max_pio_size", S_IRUGO | S_IWUSR,
4190 root, (u32 *)&host->max_pio_size))
4193 if (!debugfs_create_u32("max_pio_blocks", S_IRUGO | S_IWUSR,
4194 root, (u32 *)&host->max_pio_blocks))
4200 debugfs_remove_recursive(root);
4201 host->debugfs_root = NULL;
4207 /* runtime pm is not enabled before add host */
4208 int sdhci_add_host(struct sdhci_host *host)
4210 struct mmc_host *mmc;
4211 u32 caps[2] = {0, 0};
4212 u32 max_current_caps;
4213 unsigned int ocr_avail;
4216 WARN_ON(host == NULL);
4223 host->quirks = debug_quirks;
4225 host->quirks2 = debug_quirks2;
4227 sdhci_reset(host, SDHCI_RESET_ALL);
4229 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
4230 host->version = (host->version & SDHCI_SPEC_VER_MASK)
4231 >> SDHCI_SPEC_VER_SHIFT;
4232 if (host->version > SDHCI_SPEC_410) {
4233 pr_err("%s: Unknown controller version (%d). "
4234 "You may experience problems.\n", mmc_hostname(mmc),
4238 host->mrq_cmd = NULL;
4239 host->mrq_dat = NULL;
4240 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
4241 sdhci_readl(host, SDHCI_CAPABILITIES);
4243 if (host->version >= SDHCI_SPEC_300)
4244 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
4246 sdhci_readl(host, SDHCI_CAPABILITIES_1);
4248 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4249 host->flags |= SDHCI_USE_SDMA;
4250 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
4251 DBG("Controller doesn't have SDMA capability\n");
4253 host->flags |= SDHCI_USE_SDMA;
4255 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4256 (host->flags & SDHCI_USE_SDMA)) {
4257 DBG("Disabling DMA as it is marked broken\n");
4258 host->flags &= ~SDHCI_USE_SDMA;
4261 if ((host->version >= SDHCI_SPEC_200) &&
4262 (caps[0] & SDHCI_CAN_DO_ADMA2))
4263 host->flags |= SDHCI_USE_ADMA;
4265 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4266 (host->flags & SDHCI_USE_ADMA)) {
4267 DBG("Disabling ADMA as it is marked broken\n");
4268 host->flags &= ~SDHCI_USE_ADMA;
4271 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4272 if (host->ops->enable_dma) {
4273 if (host->ops->enable_dma(host)) {
4274 pr_warning("%s: No suitable DMA "
4275 "available. Falling back to PIO.\n",
4278 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4283 if (host->flags & SDHCI_USE_ADMA) {
4285 * We need to allocate descriptors for all sg entries
4286 * (128) and potentially one alignment transfer for
4287 * each of those entries. Simply allocating 128 bits
4290 if (mmc_dev(host->mmc)->dma_mask &&
4291 mmc_dev(host->mmc)->coherent_dma_mask) {
4292 host->adma_desc = dma_alloc_coherent(
4293 mmc_dev(host->mmc), (128 * 2 + 1) * 8,
4294 &host->adma_addr, GFP_KERNEL);
4295 if (!host->adma_desc)
4298 host->align_buffer = dma_alloc_coherent(
4299 mmc_dev(host->mmc), 128 * 8,
4300 &host->align_addr, GFP_KERNEL);
4301 if (!host->align_buffer) {
4302 dma_free_coherent(mmc_dev(host->mmc),
4306 host->adma_desc = NULL;
4310 host->use_dma_alloc = true;
4312 BUG_ON(host->adma_addr & 0x3);
4313 BUG_ON(host->align_addr & 0x3);
4318 host->adma_desc = kmalloc((128 * 2 + 1) * 8, GFP_KERNEL);
4319 host->align_buffer = kmalloc(128 * 8, GFP_KERNEL);
4320 if (!host->adma_desc || !host->align_buffer) {
4321 kfree(host->adma_desc);
4322 kfree(host->align_buffer);
4323 pr_warning("%s: Unable to allocate ADMA "
4324 "buffers. Falling back to standard DMA.\n",
4326 host->flags &= ~SDHCI_USE_ADMA;
4332 * If we use DMA, then it's up to the caller to set the DMA
4333 * mask, but PIO does not need the hw shim so we set a new
4334 * mask here in that case.
4336 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4337 host->dma_mask = DMA_BIT_MASK(64);
4338 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
4341 if (host->version >= SDHCI_SPEC_300)
4342 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
4343 >> SDHCI_CLOCK_BASE_SHIFT;
4345 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
4346 >> SDHCI_CLOCK_BASE_SHIFT;
4348 host->max_clk *= 1000000;
4350 if (mmc->caps2 & MMC_CAP2_HS533)
4351 host->max_clk = MMC_HS533_MAX_DTR;
4353 if (host->max_clk == 0 || host->quirks &
4354 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4355 if (!host->ops->get_max_clock) {
4356 pr_err("%s: Hardware doesn't specify base clock "
4357 "frequency.\n", mmc_hostname(mmc));
4360 host->max_clk = host->ops->get_max_clock(host);
4364 * In case of Host Controller v3.00, find out whether clock
4365 * multiplier is supported.
4367 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
4368 SDHCI_CLOCK_MUL_SHIFT;
4371 * In case the value in Clock Multiplier is 0, then programmable
4372 * clock mode is not supported, otherwise the actual clock
4373 * multiplier is one more than the value of Clock Multiplier
4374 * in the Capabilities Register.
4380 * Set host parameters.
4382 mmc->ops = &sdhci_ops;
4383 mmc->f_max = host->max_clk;
4384 if (host->ops->get_min_clock)
4385 mmc->f_min = host->ops->get_min_clock(host);
4386 else if (host->version >= SDHCI_SPEC_300) {
4387 if (host->clk_mul) {
4388 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
4389 mmc->f_max = host->max_clk * host->clk_mul;
4391 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4393 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4396 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
4397 if (host->timeout_clk == 0) {
4398 if (host->ops->get_timeout_clock) {
4399 host->timeout_clk = host->ops->get_timeout_clock(host);
4400 } else if (!(host->quirks &
4401 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4402 pr_err("%s: Hardware doesn't specify timeout clock "
4403 "frequency.\n", mmc_hostname(mmc));
4407 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
4408 host->timeout_clk *= 1000;
4410 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
4411 host->timeout_clk = mmc->f_max / 1000;
4413 if (!(host->quirks2 & SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO))
4414 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
4416 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4417 host->flags |= SDHCI_AUTO_CMD12;
4419 /* Auto-CMD23 stuff only works in ADMA or PIO. */
4420 if ((host->version >= SDHCI_SPEC_300) &&
4421 ((host->flags & SDHCI_USE_ADMA) ||
4422 !(host->flags & SDHCI_USE_SDMA))) {
4423 host->flags |= SDHCI_AUTO_CMD23;
4424 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
4426 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
4430 * A controller may support 8-bit width, but the board itself
4431 * might not have the pins brought out. Boards that support
4432 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4433 * their platform code before calling sdhci_add_host(), and we
4434 * won't assume 8-bit width for hosts without that CAP.
4436 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4437 mmc->caps |= MMC_CAP_4_BIT_DATA;
4439 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4440 mmc->caps &= ~MMC_CAP_CMD23;
4442 if (caps[0] & SDHCI_CAN_DO_HISPD)
4443 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4445 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4446 !(host->mmc->caps & MMC_CAP_NONREMOVABLE) && !(host->ops->get_cd))
4447 mmc->caps |= MMC_CAP_NEEDS_POLL;
4449 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
4450 host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
4451 if (IS_ERR_OR_NULL(host->vqmmc)) {
4452 if (PTR_ERR(host->vqmmc) < 0) {
4453 pr_info("%s: no vqmmc regulator found\n",
4458 ret = regulator_enable(host->vqmmc);
4459 if (!regulator_is_supported_voltage(host->vqmmc, 1700000,
4461 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
4462 SDHCI_SUPPORT_SDR50 |
4463 SDHCI_SUPPORT_DDR50);
4465 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4466 mmc_hostname(mmc), ret);
4471 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
4472 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4473 SDHCI_SUPPORT_DDR50);
4475 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4476 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4477 SDHCI_SUPPORT_DDR50))
4478 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4480 /* SDR104 supports also implies SDR50 support */
4481 if (caps[1] & SDHCI_SUPPORT_SDR104)
4482 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4483 else if (caps[1] & SDHCI_SUPPORT_SDR50)
4484 mmc->caps |= MMC_CAP_UHS_SDR50;
4486 if (caps[1] & SDHCI_SUPPORT_DDR50)
4487 mmc->caps |= MMC_CAP_UHS_DDR50;
4489 /* Does the host need tuning for SDR50? */
4490 if (caps[1] & SDHCI_USE_SDR50_TUNING)
4491 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4493 /* Does the host need tuning for HS200? */
4494 if (mmc->caps2 & MMC_CAP2_HS200)
4495 host->flags |= SDHCI_HS200_NEEDS_TUNING;
4497 /* Driver Type(s) (A, C, D) supported by the host */
4498 if (caps[1] & SDHCI_DRIVER_TYPE_A)
4499 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4500 if (caps[1] & SDHCI_DRIVER_TYPE_C)
4501 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4502 if (caps[1] & SDHCI_DRIVER_TYPE_D)
4503 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4505 /* Initial value for re-tuning timer count */
4506 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4507 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4509 * If the re-tuning timer count value is 0xF, the timer count
4510 * information should be obtained in a non-standard way.
4512 if (host->tuning_count == 0xF) {
4513 if (host->ops->get_tuning_counter) {
4514 host->tuning_count =
4515 host->ops->get_tuning_counter(host);
4517 host->tuning_count = 0;
4522 * In case Re-tuning Timer is not disabled, the actual value of
4523 * re-tuning timer will be 2 ^ (n - 1).
4525 if (host->tuning_count)
4526 host->tuning_count = 1 << (host->tuning_count - 1);
4528 /* Re-tuning mode supported by the Host Controller */
4529 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
4530 SDHCI_RETUNING_MODE_SHIFT;
4534 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
4535 if (IS_ERR_OR_NULL(host->vmmc)) {
4536 if (PTR_ERR(host->vmmc) < 0) {
4537 pr_info("%s: no vmmc regulator found\n",
4543 #ifdef CONFIG_REGULATOR
4545 * Voltage range check makes sense only if regulator reports
4546 * any voltage value.
4548 if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
4549 ret = regulator_is_supported_voltage(host->vmmc, 2700000,
4551 if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
4552 caps[0] &= ~SDHCI_CAN_VDD_330;
4553 if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
4554 caps[0] &= ~SDHCI_CAN_VDD_300;
4555 ret = regulator_is_supported_voltage(host->vmmc, 1700000,
4557 if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
4558 caps[0] &= ~SDHCI_CAN_VDD_180;
4560 #endif /* CONFIG_REGULATOR */
4563 * According to SD Host Controller spec v3.00, if the Host System
4564 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4565 * the value is meaningful only if Voltage Support in the Capabilities
4566 * register is set. The actual current value is 4 times the register
4569 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4570 if (!max_current_caps && host->vmmc) {
4571 u32 curr = regulator_get_current_limit(host->vmmc);
4574 /* convert to SDHCI_MAX_CURRENT format */
4575 curr = curr/1000; /* convert to mA */
4576 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4578 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4580 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4581 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4582 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4586 if (caps[0] & SDHCI_CAN_VDD_330) {
4587 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4589 mmc->max_current_330 = ((max_current_caps &
4590 SDHCI_MAX_CURRENT_330_MASK) >>
4591 SDHCI_MAX_CURRENT_330_SHIFT) *
4592 SDHCI_MAX_CURRENT_MULTIPLIER;
4594 if (caps[0] & SDHCI_CAN_VDD_300) {
4595 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4597 mmc->max_current_300 = ((max_current_caps &
4598 SDHCI_MAX_CURRENT_300_MASK) >>
4599 SDHCI_MAX_CURRENT_300_SHIFT) *
4600 SDHCI_MAX_CURRENT_MULTIPLIER;
4602 if (caps[0] & SDHCI_CAN_VDD_180) {
4603 ocr_avail |= MMC_VDD_165_195;
4605 mmc->max_current_180 = ((max_current_caps &
4606 SDHCI_MAX_CURRENT_180_MASK) >>
4607 SDHCI_MAX_CURRENT_180_SHIFT) *
4608 SDHCI_MAX_CURRENT_MULTIPLIER;
4611 mmc->ocr_avail = ocr_avail;
4612 mmc->ocr_avail_sdio = ocr_avail;
4613 if (host->ocr_avail_sdio)
4614 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4615 mmc->ocr_avail_sd = ocr_avail;
4616 if (host->ocr_avail_sd)
4617 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4618 else /* normal SD controllers don't support 1.8V */
4619 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4620 mmc->ocr_avail_mmc = ocr_avail;
4621 if (host->ocr_avail_mmc)
4622 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4624 if (mmc->ocr_avail == 0) {
4625 pr_err("%s: Hardware doesn't report any "
4626 "support voltages.\n", mmc_hostname(mmc));
4630 spin_lock_init(&host->lock);
4633 * Maximum number of segments. Depends on if the hardware
4634 * can do scatter/gather or not.
4636 if (host->flags & SDHCI_USE_ADMA)
4637 mmc->max_segs = 128;
4638 else if (host->flags & SDHCI_USE_SDMA)
4641 mmc->max_segs = 128;
4644 * Maximum number of sectors in one transfer. Limited by DMA boundary
4647 mmc->max_req_size = 524288;
4650 * Maximum segment size. Could be one segment with the maximum number
4651 * of bytes. When doing hardware scatter/gather, each entry cannot
4652 * be larger than 64 KiB though.
4654 if (host->flags & SDHCI_USE_ADMA) {
4655 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4656 mmc->max_seg_size = 65535;
4658 mmc->max_seg_size = 65536;
4660 mmc->max_seg_size = mmc->max_req_size;
4664 * Maximum block size. This varies from controller to controller and
4665 * is specified in the capabilities register.
4667 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4668 mmc->max_blk_size = 2;
4670 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
4671 SDHCI_MAX_BLOCK_SHIFT;
4672 if (mmc->max_blk_size >= 3) {
4673 pr_info("%s: Invalid maximum block size, "
4674 "assuming 512 bytes\n", mmc_hostname(mmc));
4675 mmc->max_blk_size = 0;
4679 mmc->max_blk_size = 512 << mmc->max_blk_size;
4682 * Maximum block count.
4684 if (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK)
4685 mmc->max_blk_count = 1;
4687 mmc->max_blk_count = (host->version > SDHCI_SPEC_400) ?
4688 ((1ULL << BLOCK_COUNT_32BIT) - 1) :
4689 ((1 << BLOCK_COUNT_16BIT) - 1);
4691 #ifdef CONFIG_CMD_DUMP
4692 mmc->dbg_host_cnt = 0;
4698 tasklet_init(&host->card_tasklet,
4699 sdhci_tasklet_card, (unsigned long)host);
4700 tasklet_init(&host->finish_tasklet,
4701 sdhci_tasklet_finish, (unsigned long)host);
4702 tasklet_init(&host->finish_cmd_tasklet,
4703 sdhci_tasklet_cmd_finish, (unsigned long)host);
4704 tasklet_init(&host->finish_dat_tasklet,
4705 sdhci_tasklet_dat_finish, (unsigned long)host);
4707 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
4709 if (host->version >= SDHCI_SPEC_300) {
4710 init_waitqueue_head(&host->buf_ready_int);
4712 /* Initialize re-tuning timer */
4713 init_timer(&host->tuning_timer);
4714 host->tuning_timer.data = (unsigned long)host;
4715 host->tuning_timer.function = sdhci_tuning_timer;
4718 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
4719 mmc_hostname(mmc), host);
4721 pr_err("%s: Failed to request IRQ %d: %d\n",
4722 mmc_hostname(mmc), host->irq, ret);
4726 sdhci_init(host, 0);
4728 host->sysedpc = sysedp_create_consumer(dev_name(mmc_dev(mmc)),
4729 dev_name(mmc_dev(mmc)));
4731 #ifdef CONFIG_MMC_DEBUG
4732 sdhci_dumpregs(host);
4735 #ifdef SDHCI_USE_LEDS_CLASS
4736 snprintf(host->led_name, sizeof(host->led_name),
4737 "%s::", mmc_hostname(mmc));
4738 host->led.name = host->led_name;
4739 host->led.brightness = LED_OFF;
4740 host->led.default_trigger = mmc_hostname(mmc);
4741 host->led.brightness_set = sdhci_led_control;
4743 ret = led_classdev_register(mmc_dev(mmc), &host->led);
4745 pr_err("%s: Failed to register LED device: %d\n",
4746 mmc_hostname(mmc), ret);
4755 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4756 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4757 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
4758 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4760 sdhci_enable_card_detection(host);
4762 pm_runtime_enable(mmc_dev(mmc));
4763 pm_runtime_use_autosuspend(mmc_dev(mmc));
4764 if (host->quirks2 & SDHCI_QUIRK2_MMC_RTPM) {
4766 * Below Autosuspend delay can be increased/decreased based on
4767 * power and perf data
4769 pm_runtime_set_autosuspend_delay(mmc_dev(mmc),
4770 MMC_RTPM_MSEC_TMOUT);
4772 host->runtime_pm_init_done = true;
4774 #ifdef CONFIG_DEBUG_FS
4775 /* Add debugfs nodes */
4776 sdhci_debugfs_init(host);
4781 #ifdef SDHCI_USE_LEDS_CLASS
4783 sdhci_reset(host, SDHCI_RESET_ALL);
4784 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
4785 free_irq(host->irq, host);
4788 tasklet_kill(&host->card_tasklet);
4789 tasklet_kill(&host->finish_tasklet);
4790 tasklet_kill(&host->finish_cmd_tasklet);
4791 tasklet_kill(&host->finish_dat_tasklet);
4796 EXPORT_SYMBOL_GPL(sdhci_add_host);
4798 void sdhci_runtime_forbid(struct sdhci_host *host)
4800 pm_runtime_forbid(mmc_dev(host->mmc));
4802 EXPORT_SYMBOL_GPL(sdhci_runtime_forbid);
4804 void sdhci_remove_host(struct sdhci_host *host, int dead)
4806 unsigned long flags;
4808 sdhci_runtime_pm_get(host);
4810 spin_lock_irqsave(&host->lock, flags);
4812 host->flags |= SDHCI_DEVICE_DEAD;
4814 if (host->mrq_cmd || host->mrq_dat) {
4815 pr_err("%s: Controller removed during "
4816 " transfer!\n", mmc_hostname(host->mmc));
4818 if (host->mrq_cmd) {
4819 host->mrq_cmd->cmd->error = -ENOMEDIUM;
4820 if (MMC_CHECK_CMDQ_MODE(host))
4821 tasklet_schedule(&host->finish_cmd_tasklet);
4823 tasklet_schedule(&host->finish_tasklet);
4825 if (host->mrq_dat) {
4826 host->mrq_dat->cmd->error = -ENOMEDIUM;
4827 if (MMC_CHECK_CMDQ_MODE(host))
4828 tasklet_schedule(&host->finish_dat_tasklet);
4830 tasklet_schedule(&host->finish_tasklet);
4834 spin_unlock_irqrestore(&host->lock, flags);
4837 sdhci_disable_card_detection(host);
4839 mmc_remove_host(host->mmc);
4841 #ifdef SDHCI_USE_LEDS_CLASS
4842 led_classdev_unregister(&host->led);
4846 sdhci_reset(host, SDHCI_RESET_ALL);
4848 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
4849 free_irq(host->irq, host);
4851 del_timer_sync(&host->timer);
4853 tasklet_kill(&host->card_tasklet);
4854 tasklet_kill(&host->finish_tasklet);
4855 tasklet_kill(&host->finish_cmd_tasklet);
4856 tasklet_kill(&host->finish_dat_tasklet);
4859 regulator_disable(host->vmmc);
4860 regulator_put(host->vmmc);
4864 regulator_disable(host->vqmmc);
4865 regulator_put(host->vqmmc);
4868 if (host->use_dma_alloc) {
4869 dma_free_coherent(mmc_dev(host->mmc), (128 * 2 + 1) * 8,
4870 host->adma_desc, host->adma_addr);
4871 dma_free_coherent(mmc_dev(host->mmc), 128 * 8,
4872 host->align_buffer, host->align_addr);
4874 kfree(host->adma_desc);
4875 kfree(host->align_buffer);
4878 host->adma_desc = NULL;
4879 host->align_buffer = NULL;
4881 sdhci_runtime_pm_put(host);
4882 sysedp_free_consumer(host->sysedpc);
4883 host->sysedpc = NULL;
4886 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4888 void sdhci_free_host(struct sdhci_host *host)
4890 mmc_free_host(host->mmc);
4893 EXPORT_SYMBOL_GPL(sdhci_free_host);
4895 /*****************************************************************************\
4897 * Driver init/exit *
4899 \*****************************************************************************/
4901 static int __init sdhci_drv_init(void)
4904 ": Secure Digital Host Controller Interface driver\n");
4905 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4910 static void __exit sdhci_drv_exit(void)
4914 module_init(sdhci_drv_init);
4915 module_exit(sdhci_drv_exit);
4917 module_param(debug_quirks, uint, 0444);
4918 module_param(debug_quirks2, uint, 0444);
4920 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4921 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4922 MODULE_LICENSE("GPL");
4924 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4925 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");