]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/mmc/host/sdhci.c
mmc: host: tegra: disable kso mode reg dump
[sojka/nv-tegra/linux-3.10.git] / drivers / mmc / host / sdhci.c
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *  Copyright (C) 2012-2015, NVIDIA CORPORATION.  All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or (at
10  * your option) any later version.
11  *
12  * Thanks to the following companies for their support:
13  *
14  *     - JMicron (hardware and technical support)
15  */
16
17 #include <linux/delay.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/platform_device.h>
27 #include <linux/sched.h>
28
29 #include <linux/leds.h>
30
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/slot-gpio.h>
35
36 #include <linux/sysedp.h>
37 #ifdef CONFIG_DEBUG_FS
38 #include <linux/debugfs.h>
39 #include <linux/ktime.h>
40 #endif
41
42 #ifdef CONFIG_EMMC_BLKTRACE
43 #include <linux/mmc/emmc-trace.h>
44 #include "../card/queue.h"
45 #endif
46 #include "sdhci.h"
47
48 #define DRIVER_NAME "sdhci"
49
50 #define DBG(f, x...) \
51         pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
52 #define MMC_CHECK_CMDQ_MODE(host)                       \
53         (host && host->mmc &&                                   \
54         host->mmc->card &&                                              \
55         host->mmc->card->ext_csd.cmdq_mode_en)
56
57 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
58         defined(CONFIG_MMC_SDHCI_MODULE))
59 #define SDHCI_USE_LEDS_CLASS
60 #endif
61
62 #define MAX_TUNING_LOOP 40
63
64 #ifdef CONFIG_CMD_DUMP
65 static volatile unsigned int printk_cpu_test = UINT_MAX;
66 struct timeval cur_tv;
67 struct timeval prev_tv, curr_tv;
68 void mmc_cmd_dump(struct mmc_host *host);
69 void dbg_add_host_log(struct mmc_host *host, int type, int cmd, int arg)
70 {
71         unsigned long long t;
72         unsigned long long nanosec_rem;
73         unsigned long flags;
74         spin_lock_irqsave(&host->cmd_dump_lock, flags);
75
76         if (host->dbg_run_host_log_dat[host->dbg_host_cnt - 1].type == type &&
77                 host->dbg_run_host_log_dat[host->dbg_host_cnt - 1].cmd == cmd &&
78                 host->dbg_run_host_log_dat[host->dbg_host_cnt - 1].arg == arg) {
79                 spin_unlock_irqrestore(&host->cmd_dump_lock, flags);
80                 return;
81         }
82         t = cpu_clock(printk_cpu_test);
83         nanosec_rem = do_div(t, 1000000000)/1000;
84         do_gettimeofday(&cur_tv);
85         host->dbg_run_host_log_dat[host->dbg_host_cnt].time_sec = t;
86         host->dbg_run_host_log_dat[host->dbg_host_cnt].time_usec = nanosec_rem;
87         host->dbg_run_host_log_dat[host->dbg_host_cnt].type = type;
88         host->dbg_run_host_log_dat[host->dbg_host_cnt].cmd = cmd;
89         host->dbg_run_host_log_dat[host->dbg_host_cnt].arg = arg;
90         host->dbg_host_cnt++;
91         if (host->dbg_host_cnt >= dbg_max_cnt)
92                 host->dbg_host_cnt = 0;
93         spin_unlock_irqrestore(&host->cmd_dump_lock, flags);
94 }
95 #endif
96
97 /* MMC_RTPM timeout */
98 #define MMC_RTPM_MSEC_TMOUT 10
99
100 /* SDIO 1msec timeout, but use 10msec timeout for HZ=100 */
101 #define SDIO_CLK_GATING_TICK_TMOUT ((HZ >= 1000) ? (HZ / 1000) : 1)
102 /* 20msec EMMC delayed clock gate timeout */
103 #define EMMC_CLK_GATING_TICK_TMOUT ((HZ >= 50) ? (HZ / 50) : 2)
104
105 #define IS_SDIO_CARD(host) \
106                 (host->mmc->card && \
107                 (host->mmc->card->type == MMC_TYPE_SDIO))
108
109 #define IS_EMMC_CARD(host) \
110                 (host->mmc->card && \
111                 (host->mmc->card->type == MMC_TYPE_MMC))
112
113 #define IS_SDIO_CARD_OR_EMMC(host) \
114                 (host->mmc->card && \
115                 ((host->mmc->card->type == MMC_TYPE_SDIO) || \
116                 (host->mmc->card->type == MMC_TYPE_MMC)))
117
118 #define IS_DELAYED_CLK_GATE(host) \
119                 ((host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE) && \
120                 (IS_SDIO_CARD_OR_EMMC(host)) && \
121                 (host->mmc->caps2 & MMC_CAP2_CLOCK_GATING))
122
123 #ifdef CONFIG_DEBUG_FS
124
125 #define IS_32_BIT(x)    (x < (1ULL << 32))
126
127 #define IS_DATA_READ(flags)     ((flags & MMC_DATA_READ) ? true : false)
128
129 #define PERF_STAT_COMPARE(stat, blk_size, blk_count, is_read) \
130                 ( \
131                         (stat->is_read == is_read) && \
132                         (stat->stat_blk_size == blk_size) && \
133                         (stat->stat_blks_per_transfer == blk_count) \
134                 )
135
136 #endif
137
138 #define MIN_SDMMC_FREQ 400000
139
140 /* Response error index for SD Host controller spec
141  * defined errors listed in next comment
142  */
143 #define RESP_ERROR_INDEX(x) ((x & SDHCI_INT_CRC) << 1 | \
144                         (x & SDHCI_INT_TIMEOUT))
145
146 /* based on the SD Host controller spec these three errors are logged
147  * CommandCRC Error     Command Timeout Error         Kinds of error
148  * 0                    0                             No Error
149  * 0                    1                             Response Timeout Error
150  * 1                    0                             Response CRC Error
151  * 1                    1                             CMD line conflict
152  */
153 static char *resp_error[4] = {"No error", "Response TIMEOUT error",
154                                 "Reaponse CRC error",
155                                 "CMD LINE CONFLICT error"};
156 static unsigned int debug_quirks;
157 static unsigned int debug_quirks2;
158
159 static void sdhci_finish_data(struct sdhci_host *);
160
161 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
162 static void sdhci_finish_command(struct sdhci_host *);
163 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
164 static int sdhci_validate_sd2_0(struct mmc_host *mmc);
165 static void sdhci_tuning_timer(unsigned long data);
166 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
167
168 #ifdef CONFIG_PM_RUNTIME
169 static int sdhci_runtime_pm_get(struct sdhci_host *host);
170 static int sdhci_runtime_pm_put(struct sdhci_host *host);
171 #else
172 static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
173 {
174         return 0;
175 }
176 static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
177 {
178         return 0;
179 }
180 static inline int sdhci_runtime_resume_host(struct sdhci_host *host)
181 {
182         return 0;
183 }
184 static inline int sdhci_runtime_suspend_host(struct sdhci_host *host)
185 {
186         return 0;
187 }
188 #endif
189
190 static void sdhci_dumpregs(struct sdhci_host *host)
191 {
192         pr_err(DRIVER_NAME ": ================== REGISTER DUMP (%s)==================\n",
193                 mmc_hostname(host->mmc));
194
195         pr_err(DRIVER_NAME ": Sys addr[0x%03x]: 0x%08x | Version[0x%03x]:  0x%08x\n",
196                 SDHCI_DMA_ADDRESS, sdhci_readl(host, SDHCI_DMA_ADDRESS),
197                 SDHCI_HOST_VERSION, sdhci_readw(host, SDHCI_HOST_VERSION));
198         pr_err(DRIVER_NAME ": Blk size[0x%03x]: 0x%08x | Blk cnt[0x%03x]:  0x%08x\n",
199                 SDHCI_BLOCK_SIZE, sdhci_readw(host, SDHCI_BLOCK_SIZE),
200                 SDHCI_BLOCK_COUNT, sdhci_readw(host, SDHCI_BLOCK_COUNT));
201         pr_err(DRIVER_NAME ": Argument[0x%03x]: 0x%08x | Trn mode[0x%03x]: 0x%08x\n",
202                 SDHCI_ARGUMENT, sdhci_readl(host, SDHCI_ARGUMENT),
203                 SDHCI_TRANSFER_MODE, sdhci_readw(host, SDHCI_TRANSFER_MODE));
204         pr_err(DRIVER_NAME ": Present[0x%03x]:  0x%08x | Host ctl[0x%03x]: 0x%08x\n",
205                 SDHCI_PRESENT_STATE, sdhci_readl(host, SDHCI_PRESENT_STATE),
206                 SDHCI_HOST_CONTROL, sdhci_readb(host, SDHCI_HOST_CONTROL));
207         pr_err(DRIVER_NAME ": Power[0x%03x]:    0x%08x | Blk gap[0x%03x]:  0x%08x\n",
208                 SDHCI_POWER_CONTROL, sdhci_readb(host, SDHCI_POWER_CONTROL),
209                 SDHCI_BLOCK_GAP_CONTROL, sdhci_readb(host,
210                 SDHCI_BLOCK_GAP_CONTROL));
211         pr_err(DRIVER_NAME ": Wake-up[0x%03x]:  0x%08x | Clock[0x%03x]:    0x%08x\n",
212                 SDHCI_WAKE_UP_CONTROL, sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
213                 SDHCI_CLOCK_CONTROL, sdhci_readw(host, SDHCI_CLOCK_CONTROL));
214         pr_err(DRIVER_NAME ": Timeout[0x%03x]:  0x%08x | Int stat[0x%03x]: 0x%08x\n",
215                 SDHCI_TIMEOUT_CONTROL, sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
216                 SDHCI_INT_STATUS, sdhci_readl(host, SDHCI_INT_STATUS));
217         pr_err(DRIVER_NAME ": Int enab[0x%03x]: 0x%08x | Sig enab[0x%03x]: 0x%08x\n",
218                 SDHCI_INT_ENABLE, sdhci_readl(host, SDHCI_INT_ENABLE),
219                 SDHCI_SIGNAL_ENABLE, sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
220         pr_err(DRIVER_NAME ": AC12 err[0x%03x]: 0x%08x | Slot int[0x%03x]: 0x%08x\n",
221                 SDHCI_ACMD12_ERR, sdhci_readw(host, SDHCI_ACMD12_ERR),
222                 SDHCI_SLOT_INT_STATUS, sdhci_readw(host,
223                 SDHCI_SLOT_INT_STATUS));
224         pr_err(DRIVER_NAME ": Caps[0x%03x]:     0x%08x | Caps_1[0x%03x]:   0x%08x\n",
225                 SDHCI_CAPABILITIES, sdhci_readl(host, SDHCI_CAPABILITIES),
226                 SDHCI_CAPABILITIES_1, sdhci_readl(host, SDHCI_CAPABILITIES_1));
227         pr_err(DRIVER_NAME ": Cmd[0x%03x]:      0x%08x | Max curr[0x%03x]: 0x%08x\n",
228                 SDHCI_COMMAND, sdhci_readw(host, SDHCI_COMMAND),
229                 SDHCI_MAX_CURRENT, sdhci_readl(host, SDHCI_MAX_CURRENT));
230         pr_err(DRIVER_NAME ": Host ctl2[0x%03x]: 0x%08x\n",
231                 SDHCI_HOST_CONTROL2, sdhci_readw(host, SDHCI_HOST_CONTROL2));
232
233         if (host->flags & SDHCI_USE_ADMA)
234                 pr_err(DRIVER_NAME ": ADMA Err[0x%03x]: 0x%08x | ADMA Ptr[0x%03x]: 0x%08x\n",
235                        SDHCI_ADMA_ERROR, readl(host->ioaddr + SDHCI_ADMA_ERROR),
236                        SDHCI_ADMA_ADDRESS, readl(host->ioaddr +
237                        SDHCI_ADMA_ADDRESS));
238
239         if (host->ops->dump_host_cust_regs)
240                 host->ops->dump_host_cust_regs(host);
241
242         pr_err(DRIVER_NAME ": =========================================================\n");
243 }
244
245 /*****************************************************************************\
246  *                                                                           *
247  * Low level functions                                                       *
248  *                                                                           *
249 \*****************************************************************************/
250
251 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
252 {
253         host->ier &= ~clear;
254         host->ier |= set;
255         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
256         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
257 }
258
259 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
260 {
261         sdhci_clear_set_irqs(host, 0, irqs);
262 }
263
264 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
265 {
266         sdhci_clear_set_irqs(host, irqs, 0);
267 }
268
269 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
270 {
271         u32 present, irqs;
272
273         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
274             (host->mmc->caps & MMC_CAP_NONREMOVABLE))
275                 return;
276
277         present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
278                               SDHCI_CARD_PRESENT;
279         irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
280
281         if (enable)
282                 sdhci_unmask_irqs(host, irqs);
283         else
284                 sdhci_mask_irqs(host, irqs);
285 }
286
287 static void sdhci_enable_card_detection(struct sdhci_host *host)
288 {
289         sdhci_set_card_detection(host, true);
290 }
291
292 static void sdhci_disable_card_detection(struct sdhci_host *host)
293 {
294         sdhci_set_card_detection(host, false);
295 }
296
297 static void sdhci_reset(struct sdhci_host *host, u8 mask)
298 {
299         u32 ctrl;
300         unsigned long timeout;
301
302         if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
303                 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
304                         SDHCI_CARD_PRESENT))
305                         return;
306         }
307
308         if (host->ops->platform_reset_enter)
309                 host->ops->platform_reset_enter(host, mask);
310
311         sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
312
313         if (mask & SDHCI_RESET_ALL)
314                 host->clock = 0;
315
316         /* Wait max 100 ms */
317         timeout = 100;
318
319         /* hw clears the bit when it's done */
320         while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
321                 if (timeout == 0) {
322                         pr_err("%s: Reset 0x%x never completed.\n",
323                                 mmc_hostname(host->mmc), (int)mask);
324                         sdhci_dumpregs(host);
325                         return;
326                 }
327                 timeout--;
328                 mdelay(1);
329         }
330
331         if (host->ops->platform_reset_exit)
332                 host->ops->platform_reset_exit(host, mask);
333
334         if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
335                 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, host->ier);
336
337         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
338                 if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
339                         host->ops->enable_dma(host);
340         }
341
342         /*
343          * VERSION_4_EN bit and 64BIT_EN bit are cleared after a full reset
344          * need to re-configure them after each full reset
345          */
346         if ((mask & SDHCI_RESET_ALL) && host->version >= SDHCI_SPEC_400) {
347                 ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
348                 ctrl |= SDHCI_HOST_VERSION_4_EN;
349                 if (host->quirks2 & SDHCI_QUIRK2_SUPPORT_64BIT_DMA)
350                         ctrl |= SDHCI_ADDRESSING_64BIT_EN;
351                 sdhci_writel(host, ctrl, SDHCI_ACMD12_ERR);
352         }
353 }
354
355 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
356
357 static void sdhci_init(struct sdhci_host *host, int soft)
358 {
359         if (soft)
360                 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
361         else
362                 sdhci_reset(host, SDHCI_RESET_ALL);
363
364         sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
365                 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
366                 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
367                 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
368                 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
369
370         if (soft) {
371                 /* force clock reconfiguration */
372                 host->clock = 0;
373                 sdhci_set_ios(host->mmc, &host->mmc->ios);
374         }
375 }
376
377 static void sdhci_reinit(struct sdhci_host *host)
378 {
379         sdhci_init(host, 0);
380         /*
381          * Retuning stuffs are affected by different cards inserted and only
382          * applicable to UHS-I cards. So reset these fields to their initial
383          * value when card is removed.
384          */
385         if (host->flags & SDHCI_USING_RETUNING_TIMER) {
386                 host->flags &= ~SDHCI_USING_RETUNING_TIMER;
387
388                 del_timer_sync(&host->tuning_timer);
389                 host->flags &= ~SDHCI_NEEDS_RETUNING;
390                 host->mmc->max_blk_count =
391                         (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
392         }
393         sdhci_enable_card_detection(host);
394 }
395
396 static void sdhci_activate_led(struct sdhci_host *host)
397 {
398         u8 ctrl;
399
400         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
401         ctrl |= SDHCI_CTRL_LED;
402         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
403 }
404
405 static void sdhci_deactivate_led(struct sdhci_host *host)
406 {
407         u8 ctrl;
408
409         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
410         ctrl &= ~SDHCI_CTRL_LED;
411         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
412 }
413
414 #ifdef SDHCI_USE_LEDS_CLASS
415 static void sdhci_led_control(struct led_classdev *led,
416         enum led_brightness brightness)
417 {
418         struct sdhci_host *host = container_of(led, struct sdhci_host, led);
419         unsigned long flags;
420
421         spin_lock_irqsave(&host->lock, flags);
422
423         if (host->runtime_suspended)
424                 goto out;
425
426         if (brightness == LED_OFF)
427                 sdhci_deactivate_led(host);
428         else
429                 sdhci_activate_led(host);
430 out:
431         spin_unlock_irqrestore(&host->lock, flags);
432 }
433 #endif
434
435 /*****************************************************************************\
436  *                                                                           *
437  * Core functions                                                            *
438  *                                                                           *
439 \*****************************************************************************/
440
441 static void sdhci_read_block_pio(struct sdhci_host *host)
442 {
443         unsigned long flags;
444         size_t blksize, len, chunk;
445         u32 uninitialized_var(scratch);
446         u8 *buf;
447
448         DBG("PIO reading\n");
449
450         blksize = host->data->blksz;
451         chunk = 0;
452
453         local_irq_save(flags);
454
455         while (blksize) {
456                 if (!sg_miter_next(&host->sg_miter))
457                         BUG();
458
459                 len = min(host->sg_miter.length, blksize);
460
461                 blksize -= len;
462                 host->sg_miter.consumed = len;
463
464                 buf = host->sg_miter.addr;
465
466                 while (len) {
467                         if (chunk == 0) {
468                                 scratch = sdhci_readl(host, SDHCI_BUFFER);
469                                 chunk = 4;
470                         }
471
472                         *buf = scratch & 0xFF;
473
474                         buf++;
475                         scratch >>= 8;
476                         chunk--;
477                         len--;
478                 }
479         }
480
481         sg_miter_stop(&host->sg_miter);
482
483         local_irq_restore(flags);
484 }
485
486 static void sdhci_write_block_pio(struct sdhci_host *host)
487 {
488         unsigned long flags;
489         size_t blksize, len, chunk;
490         u32 scratch;
491         u8 *buf;
492
493         DBG("PIO writing\n");
494
495         blksize = host->data->blksz;
496         chunk = 0;
497         scratch = 0;
498
499         local_irq_save(flags);
500
501         while (blksize) {
502                 if (!sg_miter_next(&host->sg_miter))
503                         BUG();
504
505                 len = min(host->sg_miter.length, blksize);
506
507                 blksize -= len;
508                 host->sg_miter.consumed = len;
509
510                 buf = host->sg_miter.addr;
511
512                 while (len) {
513                         scratch |= (u32)*buf << (chunk * 8);
514
515                         buf++;
516                         chunk++;
517                         len--;
518
519                         if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
520                                 sdhci_writel(host, scratch, SDHCI_BUFFER);
521                                 chunk = 0;
522                                 scratch = 0;
523                         }
524                 }
525         }
526
527         sg_miter_stop(&host->sg_miter);
528
529         local_irq_restore(flags);
530 }
531
532 static void sdhci_transfer_pio(struct sdhci_host *host)
533 {
534         u32 mask;
535
536         BUG_ON(!host->data);
537
538         if (host->data->flags & MMC_DATA_READ)
539                 mask = SDHCI_DATA_AVAILABLE;
540         else
541                 mask = SDHCI_SPACE_AVAILABLE;
542
543         /*
544          * Some controllers (JMicron JMB38x) mess up the buffer bits
545          * for transfers < 4 bytes. As long as it is just one block,
546          * we can ignore the bits.
547          */
548         if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
549                 (host->data->blocks == 1))
550                 mask = ~0;
551
552         /*
553          * Start the transfer if the present state register indicates
554          * SDHCI_DATA_AVAILABLE or SDHCI_SPACE_AVAILABLE. The driver should
555          * transfer one complete block of data and wait for the buffer ready
556          * interrupt to transfer the next block of data.
557          */
558         if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
559                 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
560                         udelay(100);
561
562                 if (host->data->flags & MMC_DATA_READ)
563                         sdhci_read_block_pio(host);
564                 else
565                         sdhci_write_block_pio(host);
566         }
567
568         DBG("PIO transfer complete.\n");
569 }
570
571 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
572 {
573         local_irq_save(*flags);
574         return kmap_atomic(sg_page(sg)) + sg->offset;
575 }
576
577 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
578 {
579         kunmap_atomic(buffer);
580         local_irq_restore(*flags);
581 }
582
583 static void sdhci_set_adma_desc(struct sdhci_host *host, u8 *desc,
584                                 dma_addr_t addr, int len, unsigned cmd)
585 {
586         __le32 *dataddr = (__le32 __force *)(desc + 4);
587         __le64 *dataddr64 = (__le64 __force *)(desc + 4);
588         __le16 *cmdlen = (__le16 __force *)desc;
589         u32 ctrl;
590
591         /* SDHCI specification says ADMA descriptors should be 4 byte
592          * aligned, so using 16 or 32bit operations should be safe. */
593
594         cmdlen[0] = cpu_to_le16(cmd);
595         cmdlen[1] = cpu_to_le16(len);
596
597         ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
598         if (ctrl & SDHCI_ADDRESSING_64BIT_EN)
599                 dataddr64[0] = cpu_to_le64(addr);
600         else
601                 dataddr[0] = cpu_to_le32(addr);
602 }
603
604 static int sdhci_adma_table_pre(struct sdhci_host *host,
605         struct mmc_data *data)
606 {
607         int direction;
608
609         u8 *desc;
610         u8 *align;
611         dma_addr_t addr;
612         dma_addr_t align_addr;
613         int len, offset;
614
615         struct scatterlist *sg;
616         int i;
617         char *buffer;
618         unsigned long flags;
619         int next_desc;
620         u32 ctrl;
621
622         /*
623          * The spec does not specify endianness of descriptor table.
624          * We currently guess that it is LE.
625          */
626
627         if (data->flags & MMC_DATA_READ)
628                 direction = DMA_FROM_DEVICE;
629         else
630                 direction = DMA_TO_DEVICE;
631
632         /*
633          * The ADMA descriptor table is mapped further down as we
634          * need to fill it with data first.
635          */
636
637         if (!host->use_dma_alloc) {
638                 host->align_addr = dma_map_single(mmc_dev(host->mmc),
639                         host->align_buffer, 128 * 8, direction);
640                 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
641                         goto fail;
642                 BUG_ON(host->align_addr & 0x3);
643         }
644
645         host->sg_count = dma_map_sg(mmc_dev(host->mmc),
646                 data->sg, data->sg_len, direction);
647         if (host->sg_count == 0)
648                 goto unmap_align;
649
650         desc = host->adma_desc;
651         align = host->align_buffer;
652
653         align_addr = host->align_addr;
654
655         ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
656         if (ctrl & SDHCI_ADDRESSING_64BIT_EN) {
657                 if (ctrl & SDHCI_HOST_VERSION_4_EN)
658                         next_desc = 16;
659                 else
660                         next_desc = 12;
661         } else {
662                 /* 32 bit DMA mode supported */
663                 next_desc = 8;
664         }
665
666         for_each_sg(data->sg, sg, host->sg_count, i) {
667                 addr = sg_dma_address(sg);
668                 len = sg_dma_len(sg);
669
670                 /*
671                  * The SDHCI specification states that ADMA
672                  * addresses must be 32-bit aligned. If they
673                  * aren't, then we use a bounce buffer for
674                  * the (up to three) bytes that screw up the
675                  * alignment.
676                  */
677                 offset = (4 - (addr & 0x3)) & 0x3;
678                 if (offset) {
679                         if (data->flags & MMC_DATA_WRITE) {
680                                 buffer = sdhci_kmap_atomic(sg, &flags);
681                                 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
682                                 memcpy(align, buffer, offset);
683                                 sdhci_kunmap_atomic(buffer, &flags);
684                         }
685
686                         /* tran, valid */
687                         sdhci_set_adma_desc(host, desc, align_addr, offset,
688                                                 0x21);
689
690                         BUG_ON(offset > 65536);
691
692                         align += 4;
693                         align_addr += 4;
694
695                         desc += next_desc;
696
697                         addr += offset;
698                         len -= offset;
699                 }
700
701                 BUG_ON(len > 65536);
702
703                 /* tran, valid */
704                 if (len > 0) {
705                         sdhci_set_adma_desc(host, desc, addr, len, 0x21);
706                         desc += next_desc;
707                 }
708
709                 /*
710                  * If this triggers then we have a calculation bug
711                  * somewhere. :/
712                  */
713                 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 8);
714         }
715
716         if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
717                 /*
718                 * Mark the last descriptor as the terminating descriptor
719                 */
720                 if (desc != host->adma_desc) {
721                         desc -= next_desc;
722                         desc[0] |= 0x3; /* end and valid*/
723                 }
724         } else {
725                 /*
726                 * Add a terminating entry.
727                 */
728
729                 /* nop, end, valid */
730                 sdhci_set_adma_desc(host, desc, 0, 0, 0x3);
731         }
732
733         /*
734          * Resync align buffer as we might have changed it.
735          */
736         if (data->flags & MMC_DATA_WRITE) {
737                 dma_sync_single_for_device(mmc_dev(host->mmc),
738                         host->align_addr, 128 * 8, direction);
739         }
740
741         if (!host->use_dma_alloc) {
742                 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
743                         host->adma_desc, (128 * 2 + 1) * 8, DMA_TO_DEVICE);
744                 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
745                         goto unmap_entries;
746                 BUG_ON(host->adma_addr & 0x3);
747         }
748
749         return 0;
750
751 unmap_entries:
752         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
753                 data->sg_len, direction);
754 unmap_align:
755         if (!host->use_dma_alloc)
756                 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
757                                 128 * 8, direction);
758 fail:
759         return -EINVAL;
760 }
761
762 static void sdhci_adma_table_post(struct sdhci_host *host,
763         struct mmc_data *data)
764 {
765         int direction;
766
767         struct scatterlist *sg;
768         int i, size;
769         u8 *align;
770         char *buffer;
771         unsigned long flags;
772
773         if (data->flags & MMC_DATA_READ)
774                 direction = DMA_FROM_DEVICE;
775         else
776                 direction = DMA_TO_DEVICE;
777
778         if (!host->use_dma_alloc) {
779                 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
780                         (128 * 2 + 1) * 8, DMA_TO_DEVICE);
781
782                 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
783                         128 * 8, direction);
784         }
785
786         if (data->flags & MMC_DATA_READ) {
787                 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
788                         data->sg_len, direction);
789
790                 align = host->align_buffer;
791
792                 for_each_sg(data->sg, sg, host->sg_count, i) {
793                         if (sg_dma_address(sg) & 0x3) {
794                                 size = 4 - (sg_dma_address(sg) & 0x3);
795
796                                 buffer = sdhci_kmap_atomic(sg, &flags);
797                                 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
798                                 memcpy(buffer, align, size);
799                                 sdhci_kunmap_atomic(buffer, &flags);
800
801                                 align += 4;
802                         }
803                 }
804         }
805
806         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
807                 data->sg_len, direction);
808 }
809
810 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
811 {
812         u8 count;
813         struct mmc_data *data = cmd->data;
814         unsigned target_timeout, current_timeout;
815
816         /*
817          * If the host controller provides us with an incorrect timeout
818          * value, just skip the check and use 0xE.  The hardware may take
819          * longer to time out, but that's much better than having a too-short
820          * timeout value.
821          */
822         if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
823                 return 0xE;
824
825         /* Unspecified timeout, assume max */
826         if (!data && !cmd->cmd_timeout_ms)
827                 return 0xE;
828
829         /* timeout in us */
830         if (!data)
831                 target_timeout = cmd->cmd_timeout_ms * 1000;
832         else {
833                 target_timeout = data->timeout_ns / 1000;
834                 if (host->clock)
835                         target_timeout += data->timeout_clks / host->clock;
836         }
837
838         /*
839          * Figure out needed cycles.
840          * We do this in steps in order to fit inside a 32 bit int.
841          * The first step is the minimum timeout, which will have a
842          * minimum resolution of 6 bits:
843          * (1) 2^13*1000 > 2^22,
844          * (2) host->timeout_clk < 2^16
845          *     =>
846          *     (1) / (2) > 2^6
847          */
848         count = 0;
849         current_timeout = (1 << 13) * 1000 / host->timeout_clk;
850         while (current_timeout < target_timeout) {
851                 count++;
852                 current_timeout <<= 1;
853                 if (count >= 0xF)
854                         break;
855         }
856
857         if (count >= 0xF) {
858                 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
859                     mmc_hostname(host->mmc), count, cmd->opcode);
860                 count = 0xE;
861         }
862
863         return count;
864 }
865
866 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
867 {
868         u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
869         u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
870
871         if (host->flags & SDHCI_REQ_USE_DMA)
872                 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
873         else
874                 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
875 }
876
877 static void sdhci_determine_transfer_mode(struct sdhci_host *host,
878         unsigned int req_size, unsigned int req_blocks)
879 {
880         /* Nothing to do if DMA modes are not supported. */
881         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
882                 host->flags &= ~SDHCI_REQ_USE_DMA;
883         } else if (!host->max_pio_size || (req_size > host->max_pio_size)) {
884                 host->flags |= SDHCI_REQ_USE_DMA;
885         } else if (req_size < host->max_pio_size) {
886                 host->flags &= ~SDHCI_REQ_USE_DMA;
887                 if (host->max_pio_blocks &&
888                         (req_blocks > host->max_pio_blocks))
889                         host->flags |= SDHCI_REQ_USE_DMA;
890         }
891 }
892
893 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
894 {
895         u8 count;
896         u8 ctrl;
897         struct mmc_data *data = cmd->data;
898         int ret;
899
900         if (!MMC_CHECK_CMDQ_MODE(host))
901                 WARN_ON(host->data);
902
903         if (data || (cmd->flags & MMC_RSP_BUSY)) {
904                 count = sdhci_calc_timeout(host, cmd);
905                 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
906         }
907
908         if (!data)
909                 return;
910
911         /* Sanity checks */
912         BUG_ON(data->blksz * data->blocks > 524288);
913         BUG_ON(data->blksz > host->mmc->max_blk_size);
914         BUG_ON(data->blocks > 65535);
915
916         host->data = data;
917         host->data_early = 0;
918         host->data->bytes_xfered = 0;
919
920         /* Select dma or PIO mode for transfer */
921         sdhci_determine_transfer_mode(host, data->blksz * data->blocks,
922                 data->blocks);
923
924         /*
925          * FIXME: This doesn't account for merging when mapping the
926          * scatterlist.
927          */
928         if (host->flags & SDHCI_REQ_USE_DMA) {
929                 int broken, i;
930                 struct scatterlist *sg;
931
932                 broken = 0;
933                 if (host->flags & SDHCI_USE_ADMA) {
934                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
935                                 broken = 1;
936                 } else {
937                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
938                                 broken = 1;
939                 }
940
941                 if (unlikely(broken)) {
942                         for_each_sg(data->sg, sg, data->sg_len, i) {
943                                 if (sg->length & 0x3) {
944                                         DBG("Reverting to PIO because of "
945                                                 "transfer size (%d)\n",
946                                                 sg->length);
947                                         host->flags &= ~SDHCI_REQ_USE_DMA;
948                                         break;
949                                 }
950                         }
951                 }
952         }
953
954         /*
955          * The assumption here being that alignment is the same after
956          * translation to device address space.
957          */
958         if (host->flags & SDHCI_REQ_USE_DMA) {
959                 int broken, i;
960                 struct scatterlist *sg;
961
962                 broken = 0;
963                 if (host->flags & SDHCI_USE_ADMA) {
964                         /*
965                          * As we use 3 byte chunks to work around
966                          * alignment problems, we need to check this
967                          * quirk.
968                          */
969                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
970                                 broken = 1;
971                 } else {
972                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
973                                 broken = 1;
974                 }
975
976                 if (unlikely(broken)) {
977                         for_each_sg(data->sg, sg, data->sg_len, i) {
978                                 if (sg->offset & 0x3) {
979                                         DBG("Reverting to PIO because of "
980                                                 "bad alignment\n");
981                                         host->flags &= ~SDHCI_REQ_USE_DMA;
982                                         break;
983                                 }
984                         }
985                 }
986         }
987
988         if (host->flags & SDHCI_REQ_USE_DMA) {
989                 if (host->flags & SDHCI_USE_ADMA) {
990                         ret = sdhci_adma_table_pre(host, data);
991                         if (ret) {
992                                 /*
993                                  * This only happens when someone fed
994                                  * us an invalid request.
995                                  */
996                                 WARN_ON(1);
997                                 host->flags &= ~SDHCI_REQ_USE_DMA;
998                         } else {
999                                 sdhci_writel(host,
1000                                         (host->adma_addr & 0xFFFFFFFF),
1001                                         SDHCI_ADMA_ADDRESS);
1002
1003                                 if ((host->version >= SDHCI_SPEC_400) &&
1004                                     (host->quirks2 &
1005                                      SDHCI_QUIRK2_SUPPORT_64BIT_DMA)) {
1006                                         if (host->quirks2 &
1007                                             SDHCI_QUIRK2_USE_64BIT_ADDR) {
1008
1009                                                 sdhci_writel(host,
1010                                                 (host->adma_addr >> 32)
1011                                                         & 0xFFFFFFFF,
1012                                                 SDHCI_UPPER_ADMA_ADDRESS);
1013                                         } else {
1014                                                 sdhci_writel(host, 0,
1015                                                 SDHCI_UPPER_ADMA_ADDRESS);
1016                                         }
1017                                 }
1018                         }
1019                 } else {
1020                         int sg_cnt;
1021
1022                         sg_cnt = dma_map_sg(mmc_dev(host->mmc),
1023                                         data->sg, data->sg_len,
1024                                         (data->flags & MMC_DATA_READ) ?
1025                                                 DMA_FROM_DEVICE :
1026                                                 DMA_TO_DEVICE);
1027                         if (sg_cnt == 0) {
1028                                 /*
1029                                  * This only happens when someone fed
1030                                  * us an invalid request.
1031                                  */
1032                                 WARN_ON(1);
1033                                 host->flags &= ~SDHCI_REQ_USE_DMA;
1034                         } else {
1035                                 WARN_ON(sg_cnt != 1);
1036                                 sdhci_writel(host, sg_dma_address(data->sg),
1037                                         SDHCI_DMA_ADDRESS);
1038                         }
1039                 }
1040         }
1041
1042         /*
1043          * Always adjust the DMA selection as some controllers
1044          * (e.g. JMicron) can't do PIO properly when the selection
1045          * is ADMA.
1046          */
1047         if (host->version >= SDHCI_SPEC_200) {
1048                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1049                 ctrl &= ~SDHCI_CTRL_DMA_MASK;
1050                 if ((host->flags & SDHCI_REQ_USE_DMA) &&
1051                         (host->flags & SDHCI_USE_ADMA))
1052                         ctrl |= SDHCI_CTRL_ADMA2;
1053                 else
1054                         ctrl |= SDHCI_CTRL_SDMA;
1055                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1056         }
1057
1058         if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1059                 int flags;
1060
1061                 flags = SG_MITER_ATOMIC;
1062                 if (host->data->flags & MMC_DATA_READ)
1063                         flags |= SG_MITER_TO_SG;
1064                 else
1065                         flags |= SG_MITER_FROM_SG;
1066                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1067                 host->blocks = data->blocks;
1068         }
1069
1070         sdhci_set_transfer_irqs(host);
1071
1072         /* Set the DMA boundary value and block size */
1073         sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
1074                 data->blksz), SDHCI_BLOCK_SIZE);
1075         sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1076 }
1077
1078 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1079         struct mmc_command *cmd)
1080 {
1081         u16 mode;
1082         struct mmc_data *data = cmd->data;
1083
1084         if (data == NULL)
1085                 return;
1086
1087         WARN_ON(!host->data);
1088
1089         mode = SDHCI_TRNS_BLK_CNT_EN;
1090         if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1091                 mode |= SDHCI_TRNS_MULTI;
1092                 /*
1093                  * If we are sending CMD23, CMD12 never gets sent
1094                  * on successful completion (so no Auto-CMD12).
1095                  */
1096                 if (!MMC_CHECK_CMDQ_MODE(host)) {
1097                         if (!host->mrq_cmd->sbc &&
1098                                 (host->flags & SDHCI_AUTO_CMD12) &&
1099                                 mmc_op_multi(cmd->opcode))
1100                                         mode |= SDHCI_TRNS_AUTO_CMD12;
1101                         else if (host->mrq_cmd->sbc &&
1102                                 (host->flags & SDHCI_AUTO_CMD23)) {
1103                                         mode |= SDHCI_TRNS_AUTO_CMD23;
1104                                         sdhci_writel(host,
1105                                                 host->mrq_cmd->sbc->arg,
1106                                                 SDHCI_ARGUMENT2);
1107                         }
1108                 }
1109         }
1110
1111         if (data->flags & MMC_DATA_READ)
1112                 mode |= SDHCI_TRNS_READ;
1113         if (host->flags & SDHCI_REQ_USE_DMA)
1114                 mode |= SDHCI_TRNS_DMA;
1115
1116         sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1117 }
1118
1119 #ifdef CONFIG_DEBUG_FS
1120 static void sdhci_div32(
1121                 u32 size_in_bits_x1000, u32 time_usecs,
1122                 u32 *speed_in_kbps)
1123 {
1124         *speed_in_kbps = DIV_ROUND_CLOSEST(size_in_bits_x1000, time_usecs);
1125 }
1126
1127 static void sdhci_div64(
1128                 u64 size_in_bits_x1000, u64 time_usecs,
1129                 u32 *speed_in_kbps)
1130 {
1131         int i;
1132
1133         /* convert 64 bit into 32 bits */
1134         i = 0;
1135         while (!(IS_32_BIT(size_in_bits_x1000) && IS_32_BIT(time_usecs))) {
1136                 /* shift right both the operands bytes and time */
1137                 size_in_bits_x1000 >>= 1;
1138                 time_usecs >>= 1;
1139                 i++;
1140         }
1141         if (i)
1142                 pr_debug("%s right shifted operands by %d, size=%lld, time=%lld usec\n",
1143                         __func__, i, size_in_bits_x1000, time_usecs);
1144         /* check for 32 bit operations first */
1145         sdhci_div32(
1146                 (u32)size_in_bits_x1000, (u32)time_usecs,
1147                 speed_in_kbps);
1148         return;
1149 }
1150
1151 static void free_stats_nodes(struct sdhci_host *host)
1152 {
1153         struct data_stat_entry *ptr, *ptr2;
1154
1155         ptr = host->sdhci_data_stat.head;
1156         while (ptr) {
1157                 ptr2 = ptr->next;
1158                 host->sdhci_data_stat.stat_size--;
1159                 devm_kfree(host->mmc->parent, ptr);
1160                 ptr = ptr2;
1161         }
1162         if (host->sdhci_data_stat.stat_size)
1163                 pr_err("stat_size=%d after free %s\n",
1164                         host->sdhci_data_stat.stat_size,
1165                         __func__);
1166         host->sdhci_data_stat.head = NULL;
1167 }
1168
1169 static struct data_stat_entry *add_entry_sorted(struct sdhci_host *host,
1170         unsigned int blk_size, unsigned int blk_count,
1171         unsigned int data_flags)
1172 {
1173         struct data_stat_entry *node, *ptr;
1174         bool is_read;
1175
1176         if (!blk_count) {
1177                 pr_err("%s %s: call blk_size=%d, blk_count=%d, data_flags=0x%x\n",
1178                         mmc_hostname(host->mmc), __func__,
1179                         blk_size, blk_count, data_flags);
1180                 goto end;
1181         }
1182
1183         node = devm_kzalloc(host->mmc->parent, sizeof(struct data_stat_entry),
1184                 GFP_KERNEL);
1185         if (!node) {
1186                 pr_err("%s, %s, line=%d %s: unable to allocate data_stat_entry\n",
1187                         __FILE__, __func__, __LINE__, mmc_hostname(host->mmc));
1188                 goto end;
1189         }
1190         node->stat_blk_size = blk_size;
1191         node->stat_blks_per_transfer = blk_count;
1192         is_read = IS_DATA_READ(data_flags);
1193         node->is_read = is_read;
1194         host->sdhci_data_stat.stat_size++;
1195         /* assume existing list is sorted and try to insert this new node
1196          * into the increasing order sorted array
1197          */
1198         ptr = host->sdhci_data_stat.head;
1199         if (!ptr) {
1200                 /* first element */
1201                 host->sdhci_data_stat.head = node;
1202                 return node;
1203         }
1204         if (ptr && ((ptr->stat_blk_size > blk_size) ||
1205                 ((ptr->stat_blk_size == blk_size) &&
1206                 (ptr->stat_blks_per_transfer > blk_count)))) {
1207                 host->sdhci_data_stat.head = node;
1208                 /* update new head */
1209                 node->next = ptr;
1210                 return node;
1211         }
1212         while (ptr->next) {
1213                 if ((ptr->next->stat_blk_size < blk_size) ||
1214                         ((ptr->next->stat_blk_size == blk_size) &&
1215                         (ptr->next->stat_blks_per_transfer < blk_count)))
1216                         ptr = ptr->next;
1217                 else
1218                         break;
1219         }
1220         /* We are here if -
1221          * 1. ptr->next is null or
1222          * 2. blk_size of ptr->next is greater than new blk size, so we should
1223          *    place the new node between ptr and ptr->next
1224          */
1225         if (!ptr->next) {
1226                 ptr->next = node;
1227                 return node;
1228         }
1229         if ((ptr->next->stat_blk_size > blk_size) ||
1230                 ((ptr->next->stat_blk_size == blk_size) &&
1231                 (ptr->next->stat_blks_per_transfer > blk_count)) ||
1232                 ((ptr->next->stat_blk_size == blk_size) &&
1233                 (ptr->next->stat_blks_per_transfer == blk_count) &&
1234                 (ptr->next->is_read != is_read))) {
1235                 node->next = ptr->next;
1236                 ptr->next = node;
1237                 return node;
1238         }
1239         pr_err("%s %s: line=%d should be unreachable ptr-next->blk_size=%d, blks_per_xfer=%d, is_read=%d, new blk_size=%d, blks_per_xfer=%d, data_flags=0x%x\n",
1240                 mmc_hostname(host->mmc), __func__, __LINE__,
1241                 ptr->next->stat_blk_size, ptr->next->stat_blks_per_transfer,
1242                 ptr->next->is_read, blk_size, blk_count, data_flags);
1243 end:
1244         return NULL;
1245 }
1246
1247 static void free_data_entry(struct sdhci_host *host,
1248                                 unsigned int blk_size, unsigned int blk_count,
1249                                 unsigned int data_flags)
1250 {
1251         struct data_stat_entry *ptr, *ptr2;
1252         bool is_read;
1253
1254         ptr = host->sdhci_data_stat.head;
1255         if (!ptr)
1256                 return;
1257         is_read = IS_DATA_READ(data_flags);
1258         if (PERF_STAT_COMPARE(ptr, blk_size, blk_count, is_read)) {
1259                 host->sdhci_data_stat.head = ptr->next;
1260                 devm_kfree(host->mmc->parent, ptr);
1261                 host->sdhci_data_stat.stat_size--;
1262                 return;
1263         }
1264         while (ptr->next) {
1265                 if (PERF_STAT_COMPARE(ptr->next, blk_size, blk_count,
1266                         is_read)) {
1267                         ptr2 = ptr->next->next;
1268                         devm_kfree(host->mmc->parent, ptr->next);
1269                         host->sdhci_data_stat.stat_size--;
1270                         ptr->next = ptr2;
1271                         return;
1272                 }
1273                 ptr = ptr->next;
1274         }
1275         pr_err("Error %s %s: given blk_size=%d not found\n",
1276                 mmc_hostname(host->mmc), __func__, blk_size);
1277         return;
1278 }
1279
1280 static void update_stat(struct sdhci_host *host, u32 blk_size, u32 blk_count,
1281                         bool is_start_stat, bool is_data_error,
1282                         unsigned int data_flags)
1283 {
1284         u32 new_kbps;
1285         struct data_stat_entry *stat;
1286         ktime_t t;
1287         bool is_read;
1288
1289         if (!host->enable_sdhci_perf_stats)
1290                 goto end;
1291
1292         if (!blk_count) {
1293                 pr_err("%s %s error stats case: blk_size=%d, blk_count=0, is_start_stat=%d, is_data_error=%d, data_flags=0x%x\n",
1294                         mmc_hostname(host->mmc), __func__, blk_size,
1295                         (int)is_start_stat, (int)is_data_error, data_flags);
1296                 goto end;
1297         }
1298         stat = host->sdhci_data_stat.head;
1299         is_read = IS_DATA_READ(data_flags);
1300         while (stat) {
1301                 if (PERF_STAT_COMPARE(stat, blk_size, blk_count, is_read))
1302                         break;
1303                 stat = stat->next;
1304         }
1305         /* allocation skipped in finish call */
1306         if (!stat) {
1307                 if (!is_start_stat)
1308                         goto end;
1309                 /* allocate an entry */
1310                 stat = add_entry_sorted(host, blk_size, blk_count, data_flags);
1311                 if (!stat) {
1312                         pr_err("%s %s line=%d: stat entry not found\n",
1313                                 mmc_hostname(host->mmc), __func__, __LINE__);
1314                         goto end;
1315                 }
1316         }
1317
1318         if (is_start_stat) {
1319                 stat->start_ktime = ktime_get();
1320         } else {
1321                 if (is_data_error) {
1322                         pr_err("%s %s error stats case: blk_size=%d, blk_count=0, is_start_stat=%d, data Error case ... data_flags=0x%x\n",
1323                                 mmc_hostname(host->mmc), __func__, blk_size,
1324                                 (int)is_start_stat, data_flags);
1325                         memset(&stat->start_ktime, 0, sizeof(ktime_t));
1326                         if (!stat->total_bytes)
1327                                 free_data_entry(host, blk_size, blk_count,
1328                                         data_flags);
1329                         goto end;
1330                 }
1331                 t = ktime_get();
1332                 stat->duration_usecs = ktime_us_delta(t, stat->start_ktime);
1333                 stat->current_transferred_bytes = (blk_size * blk_count);
1334                 sdhci_div32(
1335                         (((u32)stat->current_transferred_bytes << 3) * 1000),
1336                         stat->duration_usecs,
1337                         &new_kbps);
1338                 if (stat->max_kbps == 0) {
1339                         stat->max_kbps = new_kbps;
1340                         stat->min_kbps = new_kbps;
1341                 } else {
1342                         if (new_kbps > stat->max_kbps)
1343                                 stat->max_kbps = new_kbps;
1344                         if (new_kbps < stat->min_kbps)
1345                                 stat->min_kbps = new_kbps;
1346                 }
1347                 /* update the total bytes figure for this entry */
1348                 stat->total_usecs += stat->duration_usecs;
1349                 stat->total_bytes += stat->current_transferred_bytes;
1350                 stat->total_transfers++;
1351         }
1352 end:
1353         return;
1354 }
1355 #endif
1356
1357 static void sdhci_finish_data(struct sdhci_host *host)
1358 {
1359         struct mmc_data *data;
1360
1361         BUG_ON(!host->data);
1362 #ifdef CONFIG_CMD_DUMP
1363         if (IS_EMMC_CARD(host))
1364                 dbg_add_host_log(host->mmc, 9, 9, (int)host->mrq_dat);
1365 #endif
1366
1367         data = host->data;
1368         host->data = NULL;
1369
1370         if (host->flags & SDHCI_REQ_USE_DMA) {
1371                 if (host->flags & SDHCI_USE_ADMA)
1372                         sdhci_adma_table_post(host, data);
1373                 else {
1374                         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1375                                 data->sg_len, (data->flags & MMC_DATA_READ) ?
1376                                         DMA_FROM_DEVICE : DMA_TO_DEVICE);
1377                 }
1378         }
1379
1380         /*
1381          * The specification states that the block count register must
1382          * be updated, but it does not specify at what point in the
1383          * data flow. That makes the register entirely useless to read
1384          * back so we have to assume that nothing made it to the card
1385          * in the event of an error.
1386          */
1387         if (data->error)
1388                 data->bytes_xfered = 0;
1389         else
1390                 data->bytes_xfered = data->blksz * data->blocks;
1391
1392         /*
1393          * Need to send CMD12 if -
1394          * a) open-ended multiblock transfer (no CMD23)
1395          * b) error in multiblock transfer
1396          */
1397         if (data->stop &&
1398             (data->error ||
1399              (!MMC_CHECK_CMDQ_MODE(host) && !host->mrq_dat->sbc))) {
1400
1401                 /*
1402                  * The controller needs a reset of internal state machines
1403                  * upon error conditions.
1404                  */
1405                 if (data->error) {
1406                         if (!MMC_CHECK_CMDQ_MODE(host))
1407                                 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1408                         else
1409                                 sdhci_reset(host, SDHCI_RESET_DATA);
1410                 }
1411                 sdhci_send_command(host, data->stop);
1412         } else {
1413                 if (MMC_CHECK_CMDQ_MODE(host))
1414                         tasklet_schedule(&host->finish_dat_tasklet);
1415                 else
1416                         tasklet_schedule(&host->finish_tasklet);
1417         }
1418 #ifdef CONFIG_DEBUG_FS
1419         if (data->bytes_xfered) {
1420                 update_stat(host, data->blksz, data->blocks, false, false,
1421                         data->flags);
1422         } else {
1423                 host->no_data_transfer_count++;
1424                 /* performance stats does not include cases of data error */
1425                 update_stat(host, data->blksz, data->blocks, false, true,
1426                         data->flags);
1427         }
1428 #endif
1429 }
1430
1431 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1432 {
1433         int flags;
1434         u32 mask;
1435         unsigned long timeout;
1436         ktime_t cur_time;
1437         s64 period_time;
1438
1439         WARN_ON(host->cmd);
1440
1441         /* Wait max 10 ms */
1442         timeout = 10;
1443
1444         if (!host->mrq_cmd && host->mrq_dat)
1445                 host->mrq_cmd = host->mrq_dat;
1446
1447         mask = SDHCI_CMD_INHIBIT;
1448         if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1449                 mask |= SDHCI_DATA_INHIBIT;
1450
1451         /* We shouldn't wait for data inihibit for stop commands, even
1452            though they might use busy signaling */
1453         if (host->mrq_cmd->data && (cmd == host->mrq_cmd->data->stop))
1454                 mask &= ~SDHCI_DATA_INHIBIT;
1455
1456         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1457                 if (timeout == 0) {
1458                         pr_err("%s: Controller never released "
1459                                 "inhibit bit(s).\n", mmc_hostname(host->mmc));
1460                         sdhci_dumpregs(host);
1461                         cmd->error = -EIO;
1462                         if (MMC_CHECK_CMDQ_MODE(host))
1463                                 tasklet_schedule(&host->finish_cmd_tasklet);
1464                         else
1465                                 tasklet_schedule(&host->finish_tasklet);
1466                         return;
1467                 }
1468                 timeout--;
1469                 mdelay(1);
1470         }
1471
1472         if ((cmd->opcode == MMC_SWITCH) &&
1473                 (((cmd->arg >> 16) & EXT_CSD_SANITIZE_START)
1474                 == EXT_CSD_SANITIZE_START))
1475                 timeout = 100;
1476         else
1477                 timeout = 10;
1478
1479         mod_timer(&host->timer, jiffies + timeout * HZ);
1480
1481         host->cmd = cmd;
1482
1483         sdhci_prepare_data(host, cmd);
1484
1485         sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1486
1487         sdhci_set_transfer_mode(host, cmd);
1488
1489         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1490                 pr_err("%s: Unsupported response type!\n",
1491                         mmc_hostname(host->mmc));
1492                 cmd->error = -EINVAL;
1493                 if (MMC_CHECK_CMDQ_MODE(host))
1494                         tasklet_schedule(&host->finish_cmd_tasklet);
1495                 else
1496                         tasklet_schedule(&host->finish_tasklet);
1497                 return;
1498         }
1499
1500         if (!(cmd->flags & MMC_RSP_PRESENT))
1501                 flags = SDHCI_CMD_RESP_NONE;
1502         else if (cmd->flags & MMC_RSP_136)
1503                 flags = SDHCI_CMD_RESP_LONG;
1504         else if (cmd->flags & MMC_RSP_BUSY)
1505                 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1506         else
1507                 flags = SDHCI_CMD_RESP_SHORT;
1508
1509         if (cmd->flags & MMC_RSP_CRC)
1510                 flags |= SDHCI_CMD_CRC;
1511         if (cmd->flags & MMC_RSP_OPCODE)
1512                 flags |= SDHCI_CMD_INDEX;
1513
1514         /* CMD19, CMD21 is special in that the Data Present Select should be set */
1515         if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1516             cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1517                 flags |= SDHCI_CMD_DATA;
1518
1519 #ifdef CONFIG_CMD_DUMP
1520         if (MMC_CHECK_CMDQ_MODE(host))
1521                 dbg_add_host_log(host->mmc, 0, cmd->opcode, cmd->arg);
1522 #endif
1523 #ifdef CONFIG_EMMC_BLKTRACE
1524         if (!MMC_CHECK_CMDQ_MODE(host)) {
1525                 if (cmd->opcode == MMC_SET_BLOCK_COUNT)
1526                         emmc_trace(MMC_ISSUE, host->mmc->mqrq_cur, host->mmc);
1527                 else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1528                                 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
1529                         emmc_trace(MMC_ISSUE_DONE,
1530                                 host->mmc->mqrq_cur, host->mmc);
1531         } else {
1532                 if (cmd->opcode == MMC_QUEUED_TASK_ADDRESS)
1533                         emmc_trace(MMC_ISSUE,
1534                                 &host->mmc->mq->mqrq[cmd->mrq->areq->mrq->cmd->arg >> 16],
1535                                 host->mmc);
1536                 else if (cmd->opcode == MMC_EXECUTE_READ_TASK ||
1537                                 cmd->opcode == MMC_EXECUTE_WRITE_TASK)
1538                         emmc_trace(MMC_ISSUE_DONE,
1539                                 &host->mmc->mq->mqrq[cmd->arg >> 16],
1540                                 host->mmc);
1541         }
1542 #endif
1543         if ((host->quirks2 & SDHCI_QUIRK2_PERIODIC_CALIBRATION) &&
1544                 ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) ||
1545                  (cmd->opcode == MMC_WRITE_BLOCK)) &&
1546                 host->is_calibration_done) {
1547                 cur_time = ktime_get();
1548                 period_time = ktime_to_ms(ktime_sub(cur_time,
1549                                         host->timestamp));
1550                 if (period_time >= SDHCI_PERIODIC_CALIB_TIMEOUT)
1551                         if (host->ops->switch_signal_voltage_exit)
1552                                 host->ops->switch_signal_voltage_exit(host,
1553                                                 host->mmc->ios.signal_voltage);
1554         }
1555
1556         host->command = SDHCI_MAKE_CMD(cmd->opcode, flags);
1557         sdhci_writew(host, host->command, SDHCI_COMMAND);
1558 }
1559
1560 static void sdhci_finish_command(struct sdhci_host *host)
1561 {
1562         int i;
1563
1564         BUG_ON(host->cmd == NULL);
1565 #ifdef CONFIG_CMD_DUMP
1566         if (IS_EMMC_CARD(host))
1567                 dbg_add_host_log(host->mmc, 8, 8, (int)host->mrq_cmd);
1568 #endif
1569
1570         if (host->cmd->flags & MMC_RSP_PRESENT) {
1571                 if (host->cmd->flags & MMC_RSP_136) {
1572                         /* CRC is stripped so we need to do some shifting. */
1573                         for (i = 0; i < 4; i++) {
1574                                 host->cmd->resp[i] = sdhci_readl(host,
1575                                         SDHCI_RESPONSE + (3-i)*4) << 8;
1576                                 if (i != 3)
1577                                         host->cmd->resp[i] |=
1578                                                 sdhci_readb(host,
1579                                                 SDHCI_RESPONSE + (3-i)*4-1);
1580                         }
1581                 } else {
1582                         host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1583                 }
1584         }
1585
1586         host->cmd->error = 0;
1587
1588 #ifdef CONFIG_CMD_DUMP
1589         if (MMC_CHECK_CMDQ_MODE(host))
1590                 dbg_add_host_log(host->mmc, 0,
1591                         host->cmd->opcode, host->cmd->resp[0]);
1592 #endif
1593         /* Finished CMD23, now send actual command. */
1594         if (host->cmd == host->mrq_cmd->sbc) {
1595                 host->cmd = NULL;
1596                 sdhci_send_command(host, host->mrq_cmd->cmd);
1597         } else {
1598
1599                 /* Processed actual command. */
1600                 if (host->cmd->data && host->data_early) {
1601                         host->cmd = NULL;
1602                         host->mrq_dat = host->mrq_cmd;
1603                         host->mrq_cmd = NULL;
1604                         sdhci_finish_data(host);
1605                 }
1606
1607                 if (!MMC_CHECK_CMDQ_MODE(host)) {
1608                         if (!host->cmd->data)
1609
1610                                 tasklet_schedule(&host->finish_tasklet);
1611                         else {
1612                                 host->mrq_dat = host->mrq_cmd;
1613                                 host->mrq_cmd = NULL;
1614                         }
1615
1616                         host->cmd = NULL;
1617                 } else if (!host->data_early) {
1618                         if (!host->mrq_cmd->cmd->error &&
1619                         !host->cmd->error && host->cmd->data) {
1620                                 host->cmd = NULL;
1621                                 host->mrq_dat = host->mrq_cmd;
1622                                 host->mrq_cmd = NULL;
1623                         }
1624                         tasklet_schedule(&host->finish_cmd_tasklet);
1625                 }
1626         }
1627 }
1628
1629 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1630 {
1631         u16 ctrl, preset = 0;
1632
1633         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1634
1635         switch (ctrl & SDHCI_CTRL_UHS_MASK) {
1636         case SDHCI_CTRL_UHS_SDR12:
1637                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1638                 break;
1639         case SDHCI_CTRL_UHS_SDR25:
1640                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1641                 break;
1642         case SDHCI_CTRL_UHS_SDR50:
1643                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1644                 break;
1645         case SDHCI_CTRL_UHS_SDR104:
1646                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1647                 break;
1648         case SDHCI_CTRL_UHS_DDR50:
1649                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1650                 break;
1651         default:
1652                 pr_warn("%s: Invalid UHS-I mode selected\n",
1653                         mmc_hostname(host->mmc));
1654                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1655                 break;
1656         }
1657         return preset;
1658 }
1659
1660 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1661 {
1662         int div = 0; /* Initialized for compiler warning */
1663         int real_div = div, clk_mul = 1;
1664         u16 clk = 0;
1665         unsigned long timeout;
1666         u32 caps;
1667
1668         if (clock && clock == host->clock)
1669                 return;
1670
1671         host->mmc->actual_clock = 0;
1672
1673         if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1674                 return;
1675
1676         /*
1677          * If the entire clock control register is updated with zero, some
1678          * controllers might first update clock divisor fields and then update
1679          * the INT_CLK_EN and CARD_CLK_EN fields. Disable card clock first
1680          * to ensure there is no abnormal clock behavior.
1681          */
1682         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1683         clk &= ~SDHCI_CLOCK_CARD_EN;
1684         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1685         clk = 0;
1686         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1687
1688         if (clock == 0)
1689                 goto out;
1690
1691         if (host->version >= SDHCI_SPEC_300) {
1692                 if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
1693                         SDHCI_CTRL_PRESET_VAL_ENABLE) {
1694                         u16 pre_val;
1695
1696                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1697                         pre_val = sdhci_get_preset_value(host);
1698                         div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1699                                 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1700                         if (host->clk_mul &&
1701                                 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1702                                 clk = SDHCI_PROG_CLOCK_MODE;
1703                                 real_div = div + 1;
1704                                 clk_mul = host->clk_mul;
1705                         } else {
1706                                 real_div = max_t(int, 1, div << 1);
1707                         }
1708                         goto clock_set;
1709                 }
1710
1711                 /*
1712                  * Check if the Host Controller supports Programmable Clock
1713                  * Mode.
1714                  */
1715                 if (host->clk_mul) {
1716                         for (div = 1; div <= 1024; div++) {
1717                                 if ((host->max_clk * host->clk_mul / div)
1718                                         <= clock)
1719                                         break;
1720                         }
1721                         /*
1722                          * Set Programmable Clock Mode in the Clock
1723                          * Control register.
1724                          */
1725                         clk = SDHCI_PROG_CLOCK_MODE;
1726                         real_div = div;
1727                         clk_mul = host->clk_mul;
1728                         div--;
1729                 } else {
1730                         /* Version 3.00 divisors must be a multiple of 2. */
1731                         if (host->max_clk <= clock) {
1732                                 if (host->mmc->ios.timing ==
1733                                         MMC_TIMING_UHS_DDR50)
1734                                         div = 2;
1735                                 else
1736                                         div = 1;
1737                         } else {
1738                                 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1739                                      div += 2) {
1740                                         if ((host->max_clk / div) <= clock)
1741                                                 break;
1742                                 }
1743                         }
1744                         real_div = div;
1745                         div >>= 1;
1746                 }
1747         } else {
1748                 /* Version 2.00 divisors must be a power of 2. */
1749                 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1750                         if ((host->max_clk / div) <= clock)
1751                                 break;
1752                 }
1753                 real_div = div;
1754                 div >>= 1;
1755         }
1756
1757 clock_set:
1758         if (real_div)
1759                 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1760
1761         clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1762         clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1763                 << SDHCI_DIVIDER_HI_SHIFT;
1764         clk |= SDHCI_CLOCK_INT_EN;
1765         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1766
1767         /*
1768          * For Tegra3 sdmmc controller, internal clock will not be stable bit
1769          * will get set only after some other register write is done. To
1770          * handle, do a dummy reg write to the caps reg if
1771          * SDHCI_QUIRK2_INT_CLK_STABLE_REQ_DUMMY_REG_WRITE is set.
1772          */
1773         if (host->quirks2 & SDHCI_QUIRK2_INT_CLK_STABLE_REQ_DUMMY_REG_WRITE) {
1774                 udelay(5);
1775
1776                 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
1777                 caps |= 1;
1778                 sdhci_writel(host, caps, SDHCI_CAPABILITIES);
1779         }
1780
1781         /* Wait max 20 ms */
1782         timeout = 20;
1783         while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1784                 & SDHCI_CLOCK_INT_STABLE)) {
1785                 if (timeout == 0) {
1786                         pr_err("%s: Internal clock never "
1787                                 "stabilised.\n", mmc_hostname(host->mmc));
1788                         sdhci_dumpregs(host);
1789                         return;
1790                 }
1791                 timeout--;
1792                 mdelay(1);
1793         }
1794
1795         clk |= SDHCI_CLOCK_CARD_EN;
1796         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1797
1798 out:
1799         host->clock = clock;
1800 }
1801
1802 static inline void sdhci_update_clock(struct sdhci_host *host)
1803 {
1804         unsigned int clock;
1805
1806         clock = host->clock;
1807         host->clock = 0;
1808         if (host->ops->set_clock)
1809                 host->ops->set_clock(host, clock);
1810         sdhci_set_clock(host, clock);
1811 }
1812
1813 static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
1814 {
1815         u8 pwr = 0;
1816
1817         if (power != (unsigned short)-1) {
1818                 switch (1 << power) {
1819                 case MMC_VDD_165_195:
1820                         pwr = SDHCI_POWER_180;
1821                         break;
1822                 case MMC_VDD_29_30:
1823                 case MMC_VDD_30_31:
1824                         pwr = SDHCI_POWER_300;
1825                         break;
1826                 case MMC_VDD_32_33:
1827                 case MMC_VDD_33_34:
1828                         pwr = SDHCI_POWER_330;
1829                         break;
1830                 default:
1831                         BUG();
1832                 }
1833         }
1834
1835         if (host->pwr == pwr)
1836                 return -1;
1837
1838         host->pwr = pwr;
1839
1840         if (pwr == 0) {
1841                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1842                 return 0;
1843         }
1844
1845         /*
1846          * Spec says that we should clear the power reg before setting
1847          * a new value. Some controllers don't seem to like this though.
1848          */
1849         if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1850                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1851
1852         /*
1853          * At least the Marvell CaFe chip gets confused if we set the voltage
1854          * and set turn on power at the same time, so set the voltage first.
1855          */
1856         if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1857                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1858
1859         pwr |= SDHCI_POWER_ON;
1860
1861         sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1862
1863         /*
1864          * Some controllers need an extra 10ms delay of 10ms before they
1865          * can apply clock after applying power
1866          */
1867         if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1868                 mdelay(10);
1869
1870         return power;
1871 }
1872
1873 /* Execute DLL calibration once for MMC device if it is
1874  * enumerated in HS400 mode at 200MHz clock freq before
1875  * starting any data transfer.
1876  */
1877 static void sdhci_post_init(struct mmc_host *mmc)
1878 {
1879         struct sdhci_host *host;
1880
1881         host = mmc_priv(mmc);
1882
1883         sdhci_runtime_pm_get(host);
1884         if (host->ops->post_init)
1885                 host->ops->post_init(host);
1886         sdhci_runtime_pm_put(host);
1887 }
1888 /*****************************************************************************\
1889  *                                                                           *
1890  * MMC callbacks                                                             *
1891  *                                                                           *
1892 \*****************************************************************************/
1893
1894 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1895 {
1896         struct sdhci_host *host;
1897         int present;
1898         unsigned long flags;
1899         u32 tuning_opcode;
1900
1901         host = mmc_priv(mmc);
1902
1903 #ifdef CONFIG_DEBUG_FS
1904         if (mrq->data && mrq->data->blocks)
1905                 update_stat(host, mrq->data->blksz, mrq->data->blocks,
1906                         true, false, mrq->data->flags);
1907 #endif
1908
1909         sdhci_runtime_pm_get(host);
1910
1911         present = mmc_gpio_get_cd(host->mmc);
1912
1913         spin_lock_irqsave(&host->lock, flags);
1914
1915         WARN_ON(host->mrq_cmd != NULL);
1916
1917 #ifndef SDHCI_USE_LEDS_CLASS
1918         sdhci_activate_led(host);
1919 #endif
1920
1921         /*
1922          * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1923          * requests if Auto-CMD12 is enabled.
1924          */
1925         if (!MMC_CHECK_CMDQ_MODE(host) && !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1926                 if (mrq->stop) {
1927                         mrq->data->stop = NULL;
1928                         mrq->stop = NULL;
1929                 }
1930         }
1931
1932         host->mrq_cmd = mrq;
1933         host->mrq_cmd->data_early = 0;
1934
1935         /*
1936          * Firstly check card presence from cd-gpio.  The return could
1937          * be one of the following possibilities:
1938          *     negative: cd-gpio is not available
1939          *     zero: cd-gpio is used, and card is removed
1940          *     one: cd-gpio is used, and card is present
1941          */
1942         if (present < 0) {
1943                 /* If polling, assume that the card is always present. */
1944                 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1945                         if (host->ops->get_cd)
1946                                 present = host->ops->get_cd(host);
1947                         else
1948                                 present = 1;
1949                 else
1950                         present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1951                                         SDHCI_CARD_PRESENT;
1952         }
1953
1954         if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1955                 host->mrq_cmd->cmd->error = -ENOMEDIUM;
1956                 if (MMC_CHECK_CMDQ_MODE(host))
1957                         tasklet_schedule(&host->finish_cmd_tasklet);
1958                 else
1959                         tasklet_schedule(&host->finish_tasklet);
1960         } else {
1961                 u32 present_state;
1962
1963                 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1964                 /*
1965                  * Check if the re-tuning timer has already expired and there
1966                  * is no on-going data transfer. If so, we need to execute
1967                  * tuning procedure before sending command.
1968                  */
1969                 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1970                     !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1971                         if (!mmc->need_tuning || !mmc->ready_tuning) {
1972                                 if (!mmc->need_tuning)
1973                                         mmc->need_tuning = 1;
1974                                 goto end_tuning;
1975                         }
1976
1977                         if (mmc->card) {
1978                                 /* eMMC uses cmd21 but sd and sdio use cmd19 */
1979                                 tuning_opcode =
1980                                         mmc->card->type == MMC_TYPE_MMC ?
1981                                         MMC_SEND_TUNING_BLOCK_HS200 :
1982                                         MMC_SEND_TUNING_BLOCK;
1983                                 host->mrq_cmd = NULL;
1984                                 spin_unlock_irqrestore(&host->lock, flags);
1985                                 sdhci_execute_tuning(mmc, tuning_opcode);
1986                                 mmc->need_tuning = 0;
1987                                 mmc->ready_tuning = 0;
1988                                 spin_lock_irqsave(&host->lock, flags);
1989
1990 end_tuning:
1991                                 /* Restore original mmc_request structure */
1992                                 host->mrq_cmd = mrq;
1993                         }
1994                 }
1995
1996                 /* For a data cmd, check for plat specific preparation */
1997                 spin_unlock_irqrestore(&host->lock, flags);
1998                 if (mrq->data)
1999                         host->ops->platform_get_bus(host);
2000                 spin_lock_irqsave(&host->lock, flags);
2001
2002                 if (!MMC_CHECK_CMDQ_MODE(host) &&
2003                         (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)))
2004                                 sdhci_send_command(host, mrq->sbc);
2005                 else if (MMC_CHECK_CMDQ_MODE(host) && mrq->sbc)
2006                         sdhci_send_command(host, mrq->sbc);
2007                 else {
2008                         sdhci_send_command(host, mrq->cmd);
2009                 }
2010         }
2011
2012         mmiowb();
2013         spin_unlock_irqrestore(&host->lock, flags);
2014 }
2015
2016 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2017 {
2018         unsigned long flags;
2019         int vdd_bit = -1;
2020         u8 ctrl;
2021
2022         /* cancel delayed clk gate work */
2023         if (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
2024                 cancel_delayed_work_sync(&host->delayed_clk_gate_wrk);
2025
2026         /* Do any required preparations prior to setting ios */
2027         if (host->ops->platform_ios_config_enter)
2028                 host->ops->platform_ios_config_enter(host, ios);
2029
2030         spin_lock_irqsave(&host->lock, flags);
2031
2032         if (host->flags & SDHCI_DEVICE_DEAD) {
2033                 spin_unlock_irqrestore(&host->lock, flags);
2034                 if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
2035                         mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
2036                 return;
2037         }
2038
2039         /*
2040          * Reset the chip on each power off.
2041          * Should clear out any weird states.
2042          */
2043         if (ios->power_mode == MMC_POWER_OFF) {
2044                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2045                 sdhci_reinit(host);
2046         }
2047
2048         if (host->version >= SDHCI_SPEC_300 &&
2049                 (ios->power_mode == MMC_POWER_UP))
2050                 sdhci_enable_preset_value(host, false);
2051
2052         if (ios->power_mode == MMC_POWER_OFF)
2053                 vdd_bit = sdhci_set_power(host, -1);
2054         else
2055                 vdd_bit = sdhci_set_power(host, ios->vdd);
2056
2057         if (host->vmmc && vdd_bit != -1) {
2058                 spin_unlock_irqrestore(&host->lock, flags);
2059                 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
2060                 spin_lock_irqsave(&host->lock, flags);
2061         }
2062
2063         sdhci_set_clock(host, ios->clock);
2064
2065         if (host->ops->platform_send_init_74_clocks)
2066                 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2067
2068         /*
2069          * If your platform has 8-bit width support but is not a v3 controller,
2070          * or if it requires special setup code, you should implement that in
2071          * platform_bus_width().
2072          */
2073         if (host->ops->platform_bus_width) {
2074                 host->ops->platform_bus_width(host, ios->bus_width);
2075         } else {
2076                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2077                 if (ios->bus_width == MMC_BUS_WIDTH_8) {
2078                         ctrl &= ~SDHCI_CTRL_4BITBUS;
2079                         if (host->version >= SDHCI_SPEC_300)
2080                                 ctrl |= SDHCI_CTRL_8BITBUS;
2081                 } else {
2082                         if (host->version >= SDHCI_SPEC_300)
2083                                 ctrl &= ~SDHCI_CTRL_8BITBUS;
2084                         if (ios->bus_width == MMC_BUS_WIDTH_4)
2085                                 ctrl |= SDHCI_CTRL_4BITBUS;
2086                         else
2087                                 ctrl &= ~SDHCI_CTRL_4BITBUS;
2088                 }
2089                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2090         }
2091
2092         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2093
2094         if ((ios->timing == MMC_TIMING_SD_HS ||
2095              ios->timing == MMC_TIMING_MMC_HS)
2096             && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
2097                 ctrl |= SDHCI_CTRL_HISPD;
2098         else
2099                 ctrl &= ~SDHCI_CTRL_HISPD;
2100
2101         if (host->version >= SDHCI_SPEC_300) {
2102                 u16 clk, ctrl_2;
2103
2104                 /* In case of UHS-I modes, set High Speed Enable */
2105                 if (((ios->timing == MMC_TIMING_MMC_HS200) ||
2106                     (ios->timing == MMC_TIMING_UHS_SDR50) ||
2107                     (ios->timing == MMC_TIMING_UHS_SDR104) ||
2108                     (ios->timing == MMC_TIMING_UHS_DDR50) ||
2109                     (ios->timing == MMC_TIMING_UHS_SDR25))
2110                     && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
2111                         ctrl |= SDHCI_CTRL_HISPD;
2112
2113                 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2114                 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2115                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2116                         /*
2117                          * We only need to set Driver Strength if the
2118                          * preset value enable is not set.
2119                          */
2120                         ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2121                         if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2122                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2123                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2124                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2125
2126                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2127                 } else {
2128                         /*
2129                          * According to SDHC Spec v3.00, if the Preset Value
2130                          * Enable in the Host Control 2 register is set, we
2131                          * need to reset SD Clock Enable before changing High
2132                          * Speed Enable to avoid generating clock gliches.
2133                          */
2134
2135                         /* Reset SD Clock Enable */
2136                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2137                         clk &= ~SDHCI_CLOCK_CARD_EN;
2138                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2139
2140                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2141
2142                         /* Re-enable SD Clock */
2143                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2144                         clk |= SDHCI_CLOCK_CARD_EN;
2145                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2146                 }
2147
2148
2149                 /* Reset SD Clock Enable */
2150                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2151                 clk &= ~SDHCI_CLOCK_CARD_EN;
2152                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2153
2154                 if (host->ops->set_uhs_signaling)
2155                         host->ops->set_uhs_signaling(host, ios->timing);
2156                 else {
2157                         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2158                         /* Select Bus Speed Mode for host */
2159                         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2160                         if (ios->timing == MMC_TIMING_MMC_HS200)
2161                                 ctrl_2 |= SDHCI_CTRL_HS_SDR200;
2162                         else if (ios->timing == MMC_TIMING_UHS_SDR12)
2163                                 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2164                         else if (ios->timing == MMC_TIMING_UHS_SDR25)
2165                                 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2166                         else if (ios->timing == MMC_TIMING_UHS_SDR50)
2167                                 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2168                         else if (ios->timing == MMC_TIMING_UHS_SDR104)
2169                                 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2170                         else if (ios->timing == MMC_TIMING_UHS_DDR50)
2171                                 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2172                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2173                 }
2174
2175                 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2176                                 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2177                                  (ios->timing == MMC_TIMING_UHS_SDR25) ||
2178                                  (ios->timing == MMC_TIMING_UHS_SDR50) ||
2179                                  (ios->timing == MMC_TIMING_UHS_SDR104) ||
2180                                  (ios->timing == MMC_TIMING_UHS_DDR50))) {
2181                         u16 preset;
2182
2183                         sdhci_enable_preset_value(host, true);
2184                         preset = sdhci_get_preset_value(host);
2185                         ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2186                                 >> SDHCI_PRESET_DRV_SHIFT;
2187                 }
2188
2189                 /* Re-enable SD Clock */
2190                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2191                 clk |= SDHCI_CLOCK_CARD_EN;
2192                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2193         } else
2194                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2195
2196         /*
2197          * Some (ENE) controllers go apeshit on some ios operation,
2198          * signalling timeout and CRC errors even on CMD0. Resetting
2199          * it on each ios seems to solve the problem.
2200          */
2201         if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2202                 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2203
2204         mmiowb();
2205         spin_unlock_irqrestore(&host->lock, flags);
2206
2207         /* Platform specific handling post ios setting */
2208         if (host->ops->platform_ios_config_exit)
2209                 host->ops->platform_ios_config_exit(host, ios);
2210
2211 }
2212
2213 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2214 {
2215         struct sdhci_host *host = mmc_priv(mmc);
2216
2217         sdhci_runtime_pm_get(host);
2218         sdhci_do_set_ios(host, ios);
2219         sdhci_runtime_pm_put(host);
2220 }
2221
2222 static int sdhci_do_get_cd(struct sdhci_host *host)
2223 {
2224         int gpio_cd = mmc_gpio_get_cd(host->mmc);
2225
2226         if (host->flags & SDHCI_DEVICE_DEAD)
2227                 return 0;
2228
2229         /* If polling/nonremovable, assume that the card is always present. */
2230         if (((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2231             (!host->ops->get_cd)) ||
2232             (host->mmc->caps & MMC_CAP_NONREMOVABLE))
2233                 return 1;
2234
2235         if (host->ops->get_cd)
2236                 return host->ops->get_cd(host);
2237
2238         /* Try slot gpio detect */
2239         if (!IS_ERR_VALUE(gpio_cd))
2240                 return !!gpio_cd;
2241
2242         /* Host native card detect */
2243         return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2244 }
2245
2246 static int sdhci_get_cd(struct mmc_host *mmc)
2247 {
2248         struct sdhci_host *host = mmc_priv(mmc);
2249         int ret;
2250
2251         sdhci_runtime_pm_get(host);
2252         ret = sdhci_do_get_cd(host);
2253         sdhci_runtime_pm_put(host);
2254         return ret;
2255 }
2256
2257 static int sdhci_check_ro(struct sdhci_host *host)
2258 {
2259         unsigned long flags;
2260         int is_readonly;
2261
2262         spin_lock_irqsave(&host->lock, flags);
2263
2264         if (host->flags & SDHCI_DEVICE_DEAD)
2265                 is_readonly = 0;
2266         else if (host->ops->get_ro) {
2267                 spin_unlock_irqrestore(&host->lock, flags);
2268                 is_readonly = host->ops->get_ro(host);
2269                 spin_lock_irqsave(&host->lock, flags);
2270         }
2271         else
2272                 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2273                                 & SDHCI_WRITE_PROTECT);
2274
2275         spin_unlock_irqrestore(&host->lock, flags);
2276
2277         /* This quirk needs to be replaced by a callback-function later */
2278         return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2279                 !is_readonly : is_readonly;
2280 }
2281
2282 #define SAMPLE_COUNT    5
2283
2284 static int sdhci_do_get_ro(struct sdhci_host *host)
2285 {
2286         int i, ro_count;
2287
2288         if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2289                 return sdhci_check_ro(host);
2290
2291         ro_count = 0;
2292         for (i = 0; i < SAMPLE_COUNT; i++) {
2293                 if (sdhci_check_ro(host)) {
2294                         if (++ro_count > SAMPLE_COUNT / 2)
2295                                 return 1;
2296                 }
2297                 msleep(30);
2298         }
2299         return 0;
2300 }
2301
2302 static void sdhci_hw_reset(struct mmc_host *mmc)
2303 {
2304         struct sdhci_host *host = mmc_priv(mmc);
2305
2306         if (host->ops && host->ops->hw_reset)
2307                 host->ops->hw_reset(host);
2308 }
2309
2310 static int sdhci_get_ro(struct mmc_host *mmc)
2311 {
2312         struct sdhci_host *host = mmc_priv(mmc);
2313         int ret;
2314
2315         sdhci_runtime_pm_get(host);
2316         ret = sdhci_do_get_ro(host);
2317         sdhci_runtime_pm_put(host);
2318         return ret;
2319 }
2320
2321 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2322 {
2323         if (host->flags & SDHCI_DEVICE_DEAD)
2324                 goto out;
2325
2326         if (enable)
2327                 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
2328         else
2329                 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
2330
2331         /* SDIO IRQ will be enabled as appropriate in runtime resume */
2332         if (host->runtime_suspended)
2333                 goto out;
2334
2335         if (enable)
2336                 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
2337         else
2338                 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
2339 out:
2340         mmiowb();
2341 }
2342
2343 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2344 {
2345         struct sdhci_host *host = mmc_priv(mmc);
2346         unsigned long flags;
2347
2348         spin_lock_irqsave(&host->lock, flags);
2349         sdhci_enable_sdio_irq_nolock(host, enable);
2350         spin_unlock_irqrestore(&host->lock, flags);
2351 }
2352
2353 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
2354                                                 struct mmc_ios *ios)
2355 {
2356         u16 ctrl;
2357         int ret;
2358
2359         /*
2360          * Signal Voltage Switching is only applicable for Host Controllers
2361          * v3.00 and above.
2362          */
2363         if (host->version < SDHCI_SPEC_300)
2364                 return 0;
2365
2366         if (host->quirks2 & SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING) {
2367                 if (host->ops->switch_signal_voltage)
2368                         return host->ops->switch_signal_voltage(
2369                                 host, ios->signal_voltage);
2370         }
2371
2372         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2373
2374         switch (ios->signal_voltage) {
2375         case MMC_SIGNAL_VOLTAGE_330:
2376                 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2377                 ctrl &= ~SDHCI_CTRL_VDD_180;
2378                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2379
2380                 if (host->vqmmc) {
2381                         ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
2382                         if (ret) {
2383                                 pr_warning("%s: Switching to 3.3V signalling voltage "
2384                                                 " failed\n", mmc_hostname(host->mmc));
2385                                 return -EIO;
2386                         }
2387                 }
2388                 /* Wait for 5ms */
2389                 usleep_range(5000, 5500);
2390
2391                 /* 3.3V regulator output should be stable within 5 ms */
2392                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2393                 if (!(ctrl & SDHCI_CTRL_VDD_180))
2394                         return 0;
2395
2396                 pr_warning("%s: 3.3V regulator output did not became stable\n",
2397                                 mmc_hostname(host->mmc));
2398
2399                 return -EAGAIN;
2400         case MMC_SIGNAL_VOLTAGE_180:
2401                 if (host->vqmmc) {
2402                         ret = regulator_set_voltage(host->vqmmc,
2403                                         1700000, 1950000);
2404                         if (ret) {
2405                                 pr_warning("%s: Switching to 1.8V signalling voltage "
2406                                                 " failed\n", mmc_hostname(host->mmc));
2407                                 return -EIO;
2408                         }
2409                 }
2410
2411                 /*
2412                  * Enable 1.8V Signal Enable in the Host Control2
2413                  * register
2414                  */
2415                 ctrl |= SDHCI_CTRL_VDD_180;
2416                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2417
2418                 /* Wait for 5ms */
2419                 usleep_range(5000, 5500);
2420
2421                 /* 1.8V regulator output should be stable within 5 ms */
2422                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2423                 if (ctrl & SDHCI_CTRL_VDD_180)
2424                         return 0;
2425
2426                 pr_warning("%s: 1.8V regulator output did not became stable\n",
2427                                 mmc_hostname(host->mmc));
2428
2429                 return -EAGAIN;
2430         case MMC_SIGNAL_VOLTAGE_120:
2431                 if (host->vqmmc) {
2432                         ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
2433                         if (ret) {
2434                                 pr_warning("%s: Switching to 1.2V signalling voltage "
2435                                                 " failed\n", mmc_hostname(host->mmc));
2436                                 return -EIO;
2437                         }
2438                 }
2439                 return 0;
2440         default:
2441                 /* No signal voltage switch required */
2442                 return 0;
2443         }
2444 }
2445
2446 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2447         struct mmc_ios *ios)
2448 {
2449         struct sdhci_host *host = mmc_priv(mmc);
2450         int err;
2451
2452         if (host->version < SDHCI_SPEC_300)
2453                 return 0;
2454         sdhci_runtime_pm_get(host);
2455         err = sdhci_do_start_signal_voltage_switch(host, ios);
2456         /* Do any post voltage switch platform specific configuration */
2457         if  (host->ops->switch_signal_voltage_exit)
2458                 host->ops->switch_signal_voltage_exit(host,
2459                         ios->signal_voltage);
2460         sdhci_runtime_pm_put(host);
2461         return err;
2462 }
2463
2464 static int sdhci_card_busy(struct mmc_host *mmc)
2465 {
2466         struct sdhci_host *host = mmc_priv(mmc);
2467         u32 present_state;
2468
2469         sdhci_runtime_pm_get(host);
2470         /* Check whether DAT[3:0] is 0000 */
2471         present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2472         sdhci_runtime_pm_put(host);
2473
2474         return !(present_state & SDHCI_DATA_LVL_MASK);
2475 }
2476
2477 static void sdhci_config_tap(struct mmc_host *mmc, u8 option)
2478 {
2479         struct sdhci_host *host = mmc_priv(mmc);
2480
2481         if (host->ops->config_tap_delay)
2482                 host->ops->config_tap_delay(host, option);
2483 }
2484
2485 static int sdhci_validate_sd2_0(struct mmc_host *mmc)
2486 {
2487         struct sdhci_host *host;
2488         int err = 0;
2489
2490         host = mmc_priv(mmc);
2491
2492         if (host->ops->validate_sd2_0)
2493                 err = host->ops->validate_sd2_0(host);
2494         return err;
2495 }
2496
2497 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2498 {
2499         struct sdhci_host *host;
2500         u16 ctrl;
2501         u32 ier;
2502         int tuning_loop_counter = MAX_TUNING_LOOP;
2503         unsigned long timeout, flags;
2504         int err = 0;
2505         bool requires_tuning_nonuhs = false;
2506         u16 clk = 0;
2507
2508         host = mmc_priv(mmc);
2509
2510         sdhci_runtime_pm_get(host);
2511         local_irq_save(flags);
2512
2513         if ((host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) &&
2514                 host->ops->execute_freq_tuning) {
2515                 err = host->ops->execute_freq_tuning(host, opcode);
2516                 local_irq_restore(flags);
2517                 sdhci_runtime_pm_put(host);
2518                 return err;
2519         }
2520
2521         if ((host->quirks2 & SDHCI_QUIRK2_SKIP_TUNING) &&
2522                 host->ops->is_tuning_done) {
2523                 if(host->ops->is_tuning_done(host)) {
2524                         local_irq_restore(flags);
2525                         sdhci_runtime_pm_put(host);
2526                         return 0;
2527                 }
2528         }
2529
2530         if ((host->quirks2 & SDHCI_QUIRK2_NON_STD_TUNING_LOOP_CNTR) &&
2531                 (host->ops->get_max_tuning_loop_counter))
2532                 tuning_loop_counter =
2533                         host->ops->get_max_tuning_loop_counter(host);
2534
2535         spin_lock(&host->lock);
2536         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2537
2538         /*
2539          * The Host Controller needs tuning only in case of SDR104 mode
2540          * and for SDR50 mode when Use Tuning for SDR50 is set in the
2541          * Capabilities register.
2542          * If the Host Controller supports the HS200 mode then the
2543          * tuning function has to be executed.
2544          */
2545         if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
2546             (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
2547              host->flags & SDHCI_HS200_NEEDS_TUNING))
2548                 requires_tuning_nonuhs = true;
2549
2550         if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
2551             requires_tuning_nonuhs)
2552                 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2553         else {
2554                 spin_unlock_irqrestore(&host->lock, flags);
2555                 sdhci_runtime_pm_put(host);
2556                 return 0;
2557         }
2558
2559         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2560
2561         /*
2562          * As per the Host Controller spec v3.00, tuning command
2563          * generates Buffer Read Ready interrupt, so enable that.
2564          *
2565          * Note: The spec clearly says that when tuning sequence
2566          * is being performed, the controller does not generate
2567          * interrupts other than Buffer Read Ready interrupt. But
2568          * to make sure we don't hit a controller bug, we _only_
2569          * enable Buffer Read Ready interrupt here.
2570          */
2571         ier = host->ier;
2572         sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
2573
2574         /*
2575          * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
2576          * of loops reaches 40 times or a timeout of 150ms occurs.
2577          */
2578         timeout = 150;
2579         do {
2580                 struct mmc_command cmd = {0};
2581                 struct mmc_request mrq = {NULL};
2582
2583                 if (!tuning_loop_counter && !timeout)
2584                         break;
2585
2586                 cmd.opcode = opcode;
2587                 cmd.arg = 0;
2588                 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2589                 cmd.retries = 0;
2590                 cmd.data = NULL;
2591                 cmd.error = 0;
2592
2593                 mrq.cmd = &cmd;
2594                 host->mrq_cmd = &mrq;
2595
2596                 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK) {
2597                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2598                         clk &= ~SDHCI_CLOCK_CARD_EN;
2599                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2600                 }
2601
2602                 /*
2603                  * In response to CMD19, the card sends 64 bytes of tuning
2604                  * block to the Host Controller. So we set the block size
2605                  * to 64 here.
2606                  * In response to CMD21, the card sends 128 bytes of tuning
2607                  * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2608                  * to the Host Controller. So we set the block size to 64 here.
2609                  */
2610                 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2611                         if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2612                                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
2613                                              SDHCI_BLOCK_SIZE);
2614                         else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
2615                                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2616                                              SDHCI_BLOCK_SIZE);
2617                 } else {
2618                         sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2619                                      SDHCI_BLOCK_SIZE);
2620                 }
2621
2622                 /*
2623                  * The tuning block is sent by the card to the host controller.
2624                  * So we set the TRNS_READ bit in the Transfer Mode register.
2625                  * This also takes care of setting DMA Enable and Multi Block
2626                  * Select in the same register to 0.
2627                  */
2628                 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2629
2630                 sdhci_send_command(host, &cmd);
2631
2632                 host->cmd = NULL;
2633                 host->mrq_cmd = NULL;
2634
2635                 spin_unlock_irqrestore(&host->lock, flags);
2636
2637                 if (host->quirks2 & SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK) {
2638                         udelay(1);
2639                         sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
2640                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2641                         clk |= SDHCI_CLOCK_CARD_EN;
2642                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2643                 }
2644
2645                 /* Wait for Buffer Read Ready interrupt */
2646                 wait_event_interruptible_timeout(host->buf_ready_int,
2647                                         (host->tuning_done == 1),
2648                                         msecs_to_jiffies(50));
2649                 spin_lock_irqsave(&host->lock, flags);
2650
2651                 if (!host->tuning_done) {
2652                         pr_info(DRIVER_NAME ": Timeout waiting for "
2653                                 "Buffer Read Ready interrupt during tuning "
2654                                 "procedure, falling back to fixed sampling "
2655                                 "clock\n");
2656                         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2657                         ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2658                         ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2659                         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2660
2661                         err = -EIO;
2662                         goto out;
2663                 }
2664
2665                 host->tuning_done = 0;
2666
2667                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2668                 tuning_loop_counter--;
2669                 timeout--;
2670                 mdelay(1);
2671         } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2672
2673         /*
2674          * The Host Driver has exhausted the maximum number of loops allowed,
2675          * so use fixed sampling frequency.
2676          */
2677         if (!tuning_loop_counter || !timeout) {
2678                 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2679                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2680         } else {
2681                 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2682                         pr_info(DRIVER_NAME ": Tuning procedure"
2683                                 " failed, falling back to fixed sampling"
2684                                 " clock\n");
2685                         err = -EIO;
2686                 } else {
2687                         sdhci_config_tap(mmc, SAVE_TUNED_TAP);
2688                         pr_info("%s: tap value and tuning window after hw tuning completion ...\n",
2689                                 mmc_hostname(mmc));
2690                         /* log tap, trim and tuning windows */
2691                         if (host->ops->dump_host_cust_regs)
2692                                 host->ops->dump_host_cust_regs(host);
2693                 }
2694         }
2695
2696 out:
2697         /*
2698          * If this is the very first time we are here, we start the retuning
2699          * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
2700          * flag won't be set, we check this condition before actually starting
2701          * the timer.
2702          */
2703         if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
2704             (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
2705                 host->flags |= SDHCI_USING_RETUNING_TIMER;
2706                 mod_timer(&host->tuning_timer, jiffies +
2707                         host->tuning_count * HZ);
2708                 /* Tuning mode 1 limits the maximum data length to 4MB */
2709                 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
2710         } else {
2711                 host->flags &= ~SDHCI_NEEDS_RETUNING;
2712                 /* Reload the new initial value for timer */
2713                 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2714                         mod_timer(&host->tuning_timer, jiffies +
2715                                 host->tuning_count * HZ);
2716         }
2717
2718         /*
2719          * In case tuning fails, host controllers which support re-tuning can
2720          * try tuning again at a later time, when the re-tuning timer expires.
2721          * So for these controllers, we return 0. Since there might be other
2722          * controllers who do not have this capability, we return error for
2723          * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
2724          * a retuning timer to do the retuning for the card.
2725          */
2726         if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
2727                 err = 0;
2728
2729         sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
2730         spin_unlock_irqrestore(&host->lock, flags);
2731         sdhci_runtime_pm_put(host);
2732
2733         return err;
2734 }
2735
2736
2737 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2738 {
2739         u16 ctrl;
2740
2741         /* Host Controller v3.00 defines preset value registers */
2742         if (host->version < SDHCI_SPEC_300)
2743                 return;
2744
2745         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2746
2747         /*
2748          * We only enable or disable Preset Value if they are not already
2749          * enabled or disabled respectively. Otherwise, we bail out.
2750          */
2751         if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2752                 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2753                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2754                 host->flags |= SDHCI_PV_ENABLED;
2755         } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2756                 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2757                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2758                 host->flags &= ~SDHCI_PV_ENABLED;
2759         }
2760 }
2761
2762 static void sdhci_card_event(struct mmc_host *mmc)
2763 {
2764         struct sdhci_host *host = mmc_priv(mmc);
2765         unsigned long flags;
2766
2767         /* sdhci_runtime_pm_get cannot be called here since
2768          * tasklet/softirq context cannot call
2769          * sleeping function like __pm_runtime_resume
2770          */
2771         spin_lock_irqsave(&host->lock, flags);
2772
2773         /* Check host->mrq_cmd first in case we are runtime suspended */
2774         if ((host->mrq_cmd || host->mrq_dat) &&
2775                 /* TODO: check if clocks are already ON when
2776                  * mrq_cmd or mrq_dat are enabled
2777                  */
2778             !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
2779                 pr_err("%s: Card removed during transfer!\n",
2780                         mmc_hostname(host->mmc));
2781                 pr_err("%s: Resetting controller.\n",
2782                         mmc_hostname(host->mmc));
2783
2784                 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2785
2786                 if (host->mrq_cmd) {
2787                         host->mrq_cmd->cmd->error = -ENOMEDIUM;
2788                         if (MMC_CHECK_CMDQ_MODE(host))
2789                                 tasklet_schedule(&host->finish_cmd_tasklet);
2790                         else
2791                                 tasklet_schedule(&host->finish_tasklet);
2792                 }
2793                 if (host->mrq_dat) {
2794                         host->mrq_dat->cmd->error = -ENOMEDIUM;
2795                         if (MMC_CHECK_CMDQ_MODE(host))
2796                                 tasklet_schedule(&host->finish_dat_tasklet);
2797                         else
2798                                 tasklet_schedule(&host->finish_tasklet);
2799                 }
2800         }
2801
2802         spin_unlock_irqrestore(&host->lock, flags);
2803 }
2804
2805 int sdhci_enable(struct mmc_host *mmc)
2806 {
2807         struct sdhci_host *host = mmc_priv(mmc);
2808
2809         if (!mmc->card || !(mmc->caps2 & MMC_CAP2_CLOCK_GATING))
2810                 return 0;
2811
2812         /* cancel delayed clk gate work */
2813         if (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
2814                 cancel_delayed_work_sync(&host->delayed_clk_gate_wrk);
2815
2816         sysedp_set_state(host->sysedpc, 1);
2817
2818         if (mmc->ios.clock) {
2819                 if (host->ops->set_clock)
2820                         host->ops->set_clock(host, mmc->ios.clock);
2821                 sdhci_set_clock(host, mmc->ios.clock);
2822         }
2823
2824         return 0;
2825 }
2826
2827 static void mmc_host_clk_gate(struct sdhci_host *host)
2828 {
2829         sdhci_set_clock(host, 0);
2830         if (host->ops->set_clock)
2831                 host->ops->set_clock(host, 0);
2832
2833         sysedp_set_state(host->sysedpc, 0);
2834
2835         return;
2836 }
2837
2838 void delayed_clk_gate_cb(struct work_struct *work)
2839 {
2840         struct sdhci_host *host = container_of(work, struct sdhci_host,
2841                                               delayed_clk_gate_wrk.work);
2842
2843         /* power off check */
2844         if (host->mmc->ios.power_mode == MMC_POWER_OFF)
2845                 goto end;
2846
2847         mmc_host_clk_gate(host);
2848 end:
2849         return;
2850 }
2851 EXPORT_SYMBOL_GPL(delayed_clk_gate_cb);
2852
2853 int sdhci_disable(struct mmc_host *mmc)
2854 {
2855         struct sdhci_host *host = mmc_priv(mmc);
2856
2857         if (!mmc->card || !(mmc->caps2 & MMC_CAP2_CLOCK_GATING))
2858                 return 0;
2859
2860         if (IS_DELAYED_CLK_GATE(host)) {
2861                 if (host->is_clk_on) {
2862                         if (IS_SDIO_CARD(host))
2863                                 host->clk_gate_tmout_ticks =
2864                                         SDIO_CLK_GATING_TICK_TMOUT;
2865                         else if (IS_EMMC_CARD(host))
2866                                 host->clk_gate_tmout_ticks =
2867                                         EMMC_CLK_GATING_TICK_TMOUT;
2868                         if (host->clk_gate_tmout_ticks > 0)
2869                                 schedule_delayed_work(
2870                                         &host->delayed_clk_gate_wrk,
2871                                         host->clk_gate_tmout_ticks);
2872                 }
2873                 return 0;
2874         }
2875
2876         mmc_host_clk_gate(host);
2877
2878         return 0;
2879 }
2880
2881 #ifdef CONFIG_MMC_FREQ_SCALING
2882 /*
2883  * Wrapper functions to call any platform specific implementation for
2884  * supporting dynamic frequency scaling for SD/MMC devices.
2885  */
2886 static int sdhci_gov_get_target(struct mmc_host *mmc, unsigned long *freq)
2887 {
2888         struct sdhci_host *host = mmc_priv(mmc);
2889
2890         if (host->ops->dfs_gov_get_target_freq)
2891                 *freq = host->ops->dfs_gov_get_target_freq(host,
2892                         mmc->devfreq_stats);
2893
2894         return 0;
2895 }
2896
2897 static int sdhci_gov_init(struct mmc_host *mmc)
2898 {
2899         struct sdhci_host *host = mmc_priv(mmc);
2900
2901         if (host->ops->dfs_gov_init)
2902                 return host->ops->dfs_gov_init(host);
2903
2904         return 0;
2905 }
2906
2907 static void sdhci_gov_exit(struct mmc_host *mmc)
2908 {
2909         struct sdhci_host *host = mmc_priv(mmc);
2910
2911         if (host->ops->dfs_gov_exit)
2912                 host->ops->dfs_gov_exit(host);
2913 }
2914 #endif
2915
2916 static int sdhci_select_drive_strength(struct mmc_host *mmc,
2917                                        unsigned int max_dtr,
2918                                        int host_drv,
2919                                        int card_drv)
2920 {
2921         struct sdhci_host *host = mmc_priv(mmc);
2922         unsigned char   drv_type;
2923
2924         /* return default strength if no handler in driver */
2925         if (!host->ops->get_drive_strength)
2926                 return MMC_SET_DRIVER_TYPE_B;
2927
2928         drv_type = host->ops->get_drive_strength(host, max_dtr,
2929                         host_drv, card_drv);
2930
2931         if (drv_type > MMC_SET_DRIVER_TYPE_D) {
2932                 pr_err("%s: Error on getting drive strength. Got drv_type %d\n"
2933                         , mmc_hostname(host->mmc), drv_type);
2934                 return MMC_SET_DRIVER_TYPE_B;
2935         }
2936
2937         return drv_type;
2938 }
2939 static void sdhci_init_card(struct mmc_host *mmc, struct mmc_card *card)
2940 {
2941         struct sdhci_host *host = mmc_priv(mmc);
2942
2943         /*
2944          * Get the max pio transfer limits if defined. This would be used to
2945          * dynamically choose between dma and pio modes depending on the
2946          * transfer parameters.
2947          */
2948         if (host->ops->get_max_pio_transfer_limits)
2949                 host->ops->get_max_pio_transfer_limits(host);
2950 }
2951 static const struct mmc_host_ops sdhci_ops = {
2952         .request        = sdhci_request,
2953         .set_ios        = sdhci_set_ios,
2954         .get_cd         = sdhci_get_cd,
2955         .get_ro         = sdhci_get_ro,
2956         .hw_reset       = sdhci_hw_reset,
2957         .enable         = sdhci_enable,
2958         .disable        = sdhci_disable,
2959         .enable_sdio_irq = sdhci_enable_sdio_irq,
2960         .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
2961         .execute_tuning                 = sdhci_execute_tuning,
2962         .validate_sd2_0                 = sdhci_validate_sd2_0,
2963         .card_event                     = sdhci_card_event,
2964         .card_busy      = sdhci_card_busy,
2965 #ifdef CONFIG_MMC_FREQ_SCALING
2966         .dfs_governor_init              = sdhci_gov_init,
2967         .dfs_governor_exit              = sdhci_gov_exit,
2968         .dfs_governor_get_target        = sdhci_gov_get_target,
2969 #endif
2970         .select_drive_strength          = sdhci_select_drive_strength,
2971         .post_init      = sdhci_post_init,
2972         .init_card      = sdhci_init_card,
2973 };
2974
2975 /*****************************************************************************\
2976  *                                                                           *
2977  * Tasklets                                                                  *
2978  *                                                                           *
2979 \*****************************************************************************/
2980
2981 static void sdhci_tasklet_card(unsigned long param)
2982 {
2983         struct sdhci_host *host = (struct sdhci_host *)param;
2984
2985         sdhci_card_event(host->mmc);
2986         if (host->detect_resume && host->ops->get_cd(host))
2987                 mmc_detect_change(host->mmc, msecs_to_jiffies(700));
2988         else
2989                 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2990 }
2991
2992 static void sdhci_tasklet_finish(unsigned long param)
2993 {
2994         struct sdhci_host *host;
2995         unsigned long flags;
2996         struct mmc_request *mrq = NULL;
2997
2998         host = (struct sdhci_host *)param;
2999
3000         spin_lock_irqsave(&host->lock, flags);
3001
3002         /*
3003          * If this tasklet gets rescheduled while running, it will
3004          * be run again afterwards but without any active request.
3005          */
3006         if (!host->mrq_cmd && !host->mrq_dat) {
3007                 spin_unlock_irqrestore(&host->lock, flags);
3008                 return;
3009         }
3010
3011         del_timer(&host->timer);
3012
3013         if (host->mrq_cmd)
3014                 mrq = host->mrq_cmd;
3015         else if (host->mrq_dat)
3016                 mrq = host->mrq_dat;
3017
3018         /*
3019          * The controller needs a reset of internal state machines
3020          * upon error conditions.
3021          */
3022         if (!(host->flags & SDHCI_DEVICE_DEAD) &&
3023             ((mrq->cmd && mrq->cmd->error) ||
3024                  (mrq->data && (mrq->data->error ||
3025                   (mrq->data->stop && mrq->data->stop->error))) ||
3026                    (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
3027
3028                 /* Some controllers need this kick or reset won't work here */
3029                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3030                         /* This is to force an update */
3031                         sdhci_update_clock(host);
3032
3033                 /* Spec says we should do both at the same time */
3034                 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
3035         }
3036
3037         host->mrq_cmd = NULL;
3038         host->mrq_dat = NULL;
3039         host->cmd = NULL;
3040         host->data = NULL;
3041
3042 #ifndef SDHCI_USE_LEDS_CLASS
3043         sdhci_deactivate_led(host);
3044 #endif
3045
3046         mmiowb();
3047         spin_unlock_irqrestore(&host->lock, flags);
3048
3049         mmc_request_done(host->mmc, mrq);
3050         sdhci_runtime_pm_put(host);
3051 }
3052
3053 /*
3054  * This tasklet gets scheduled to handle CMD only requests in CQ.
3055  */
3056 static void sdhci_tasklet_cmd_finish(unsigned long param)
3057 {
3058         struct sdhci_host *host;
3059         unsigned long flags;
3060         struct mmc_request *mrq;
3061
3062         host = (struct sdhci_host *)param;
3063
3064         if (!host->mrq_cmd && host->mrq_dat) {
3065                 mmc_handle_queued_request(host->mmc, MMC_HANDLE_CLR_CMD);
3066                 return;
3067         }
3068
3069         spin_lock_irqsave(&host->lock, flags);
3070
3071         /*
3072          * If this tasklet gets rescheduled while running, it will
3073          * be run again afterwards but without any active request.
3074          */
3075         if (!host->mrq_cmd) {
3076                 spin_unlock_irqrestore(&host->lock, flags);
3077                 return;
3078         }
3079
3080         del_timer(&host->timer);
3081
3082         mrq = host->mrq_cmd;
3083
3084         /*
3085          * The controller needs a reset of internal state machines
3086          * upon error conditions.
3087          */
3088         if (!(host->flags & SDHCI_DEVICE_DEAD) &&
3089             ((mrq->cmd && mrq->cmd->error) ||
3090                  (mrq->data && (mrq->data->error ||
3091                   (mrq->data->stop && mrq->data->stop->error))) ||
3092                    (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
3093
3094                 /* Some controllers need this kick or reset won't work here */
3095                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3096                         /* This is to force an update */
3097                         sdhci_update_clock(host);
3098
3099                 sdhci_reset(host, SDHCI_RESET_CMD);
3100         }
3101
3102         host->mrq_cmd = NULL;
3103         host->cmd = NULL;
3104
3105 #ifndef SDHCI_USE_LEDS_CLASS
3106         sdhci_deactivate_led(host);
3107 #endif
3108
3109         mmiowb();
3110         spin_unlock_irqrestore(&host->lock, flags);
3111
3112         mmc_request_done(host->mmc, mrq);
3113         sdhci_runtime_pm_put(host);
3114 }
3115
3116 /*
3117  * This tasklet gets scheduled to handle CMD with DATA requests in CQ.
3118  */
3119 static void sdhci_tasklet_dat_finish(unsigned long param)
3120 {
3121         struct sdhci_host *host;
3122         unsigned long flags;
3123         struct mmc_request *mrq;
3124
3125         host = (struct sdhci_host *)param;
3126
3127         spin_lock_irqsave(&host->lock, flags);
3128
3129         /*
3130          * If this tasklet gets rescheduled while running, it will
3131          * be run again afterwards but without any active request.
3132          */
3133         if (!host->mrq_dat) {
3134                 spin_unlock_irqrestore(&host->lock, flags);
3135                 return;
3136         }
3137
3138         del_timer(&host->timer);
3139
3140         mrq = host->mrq_dat;
3141
3142         if (host->data_early)
3143                 mrq->data_early = 1;
3144
3145         /*
3146          * The controller needs a reset of internal state machines
3147          * upon error conditions.
3148          */
3149         if (!(host->flags & SDHCI_DEVICE_DEAD) &&
3150             ((mrq->cmd && mrq->cmd->error) ||
3151                  (mrq->data && (mrq->data->error ||
3152                   (mrq->data->stop && mrq->data->stop->error))) ||
3153                    (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
3154
3155                 /* Some controllers need this kick or reset won't work here */
3156                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3157                         /* This is to force an update */
3158                         sdhci_update_clock(host);
3159
3160                 sdhci_reset(host, SDHCI_RESET_DATA);
3161         }
3162
3163         host->mrq_dat = NULL;
3164         host->data = NULL;
3165
3166 #ifndef SDHCI_USE_LEDS_CLASS
3167         sdhci_deactivate_led(host);
3168 #endif
3169
3170         mmiowb();
3171         spin_unlock_irqrestore(&host->lock, flags);
3172
3173         mmc_request_done(host->mmc, mrq);
3174         sdhci_runtime_pm_put(host);
3175 }
3176
3177 static void sdhci_timeout_timer(unsigned long data)
3178 {
3179         struct sdhci_host *host;
3180         unsigned long flags;
3181
3182         host = (struct sdhci_host *)data;
3183
3184         spin_lock_irqsave(&host->lock, flags);
3185
3186         if (host->mrq_cmd || host->mrq_dat) {
3187                 pr_err("%s: Timeout waiting for hardware "
3188                         "interrupt.\n", mmc_hostname(host->mmc));
3189                 sdhci_dumpregs(host);
3190
3191                 if (host->data) {
3192                         host->data->error = -ETIMEDOUT;
3193                         sdhci_finish_data(host);
3194                 } else {
3195                         if (host->cmd)
3196                                 host->cmd->error = -ETIMEDOUT;
3197                         else if (host->mrq_dat)
3198                                 host->mrq_dat->cmd->error = -ETIMEDOUT;
3199
3200                         if (MMC_CHECK_CMDQ_MODE(host))
3201                                 tasklet_schedule(&host->finish_cmd_tasklet);
3202                         else
3203                                 tasklet_schedule(&host->finish_tasklet);
3204                 }
3205         }
3206
3207         mmiowb();
3208         spin_unlock_irqrestore(&host->lock, flags);
3209 }
3210
3211 static void sdhci_tuning_timer(unsigned long data)
3212 {
3213         struct sdhci_host *host;
3214         unsigned long flags;
3215
3216         host = (struct sdhci_host *)data;
3217
3218         spin_lock_irqsave(&host->lock, flags);
3219
3220         host->flags |= SDHCI_NEEDS_RETUNING;
3221
3222         spin_unlock_irqrestore(&host->lock, flags);
3223 }
3224
3225 /*****************************************************************************\
3226  *                                                                           *
3227  * Interrupt handling                                                        *
3228  *                                                                           *
3229 \*****************************************************************************/
3230
3231 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
3232 {
3233         bool skip_dump = false;
3234
3235         BUG_ON(intmask == 0);
3236
3237         if (!host->cmd) {
3238                 pr_err("%s: Got command interrupt 0x%08x even "
3239                         "though no command operation was in progress.\n",
3240                         mmc_hostname(host->mmc), (unsigned)intmask);
3241                 sdhci_dumpregs(host);
3242                 return;
3243         }
3244
3245         if (intmask & SDHCI_INT_TIMEOUT)
3246                 host->cmd->error = -ETIMEDOUT;
3247         else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
3248                         SDHCI_INT_INDEX)) {
3249                 host->cmd->error = -EILSEQ;
3250
3251                 if (host->ops->skip_register_dump)
3252                         skip_dump = host->ops->skip_register_dump(host);
3253                 if (skip_dump &&
3254                         (intmask & SDHCI_INT_INDEX))
3255                         goto lbl_suppress_dump;
3256
3257                 sdhci_dumpregs(host);
3258                 if (intmask & SDHCI_INT_INDEX)
3259                         pr_err("%s: Command INDEX error, intmask: %x Interface clock = %uHz\n",
3260                         mmc_hostname(host->mmc), intmask, host->max_clk);
3261                 else if (intmask & SDHCI_INT_CRC)
3262                         pr_err("%s: Command CRC error, intmask: %x Interface clock = %uHz\n",
3263                         mmc_hostname(host->mmc), intmask, host->max_clk);
3264                 else if (intmask & SDHCI_INT_END_BIT)
3265                         pr_err("%s: Command END BIT error, intmask: %x Interface clock = %uHz\n",
3266                         mmc_hostname(host->mmc), intmask, host->max_clk);
3267         }
3268
3269 lbl_suppress_dump:
3270         if (host->cmd->error) {
3271                 if (MMC_CHECK_CMDQ_MODE(host))
3272                         tasklet_schedule(&host->finish_cmd_tasklet);
3273                 else
3274                         tasklet_schedule(&host->finish_tasklet);
3275                 return;
3276         }
3277
3278         /*
3279          * The host can send and interrupt when the busy state has
3280          * ended, allowing us to wait without wasting CPU cycles.
3281          * Unfortunately this is overloaded on the "data complete"
3282          * interrupt, so we need to take some care when handling
3283          * it.
3284          *
3285          * Note: The 1.0 specification is a bit ambiguous about this
3286          *       feature so there might be some problems with older
3287          *       controllers.
3288          */
3289         if (host->cmd->flags & MMC_RSP_BUSY) {
3290                 if (host->cmd->data)
3291                         DBG("Cannot wait for busy signal when also "
3292                                 "doing a data transfer");
3293                 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
3294                         return;
3295
3296                 /* The controller does not support the end-of-busy IRQ,
3297                  * fall through and take the SDHCI_INT_RESPONSE */
3298         }
3299
3300         if (intmask & SDHCI_INT_RESPONSE)
3301                 sdhci_finish_command(host);
3302 }
3303
3304 #ifdef CONFIG_MMC_DEBUG
3305 static void sdhci_show_adma_error(struct sdhci_host *host)
3306 {
3307         const char *name = mmc_hostname(host->mmc);
3308         u8 *desc = host->adma_desc;
3309         __le32 *dma;
3310         __le16 *len;
3311         u8 attr;
3312
3313         u32 ctrl;
3314         int next_desc;
3315         ctrl = sdhci_readl(host, SDHCI_ACMD12_ERR);
3316         if (ctrl & SDHCI_ADDRESSING_64BIT_EN) {
3317                 if (ctrl & SDHCI_HOST_VERSION_4_EN)
3318                         next_desc = 16;
3319                 else
3320                         next_desc = 12;
3321         } else {
3322                 /* 32 bit DMA mode supported*/
3323                 next_desc = 8;
3324         }
3325
3326         sdhci_dumpregs(host);
3327
3328         while (true) {
3329                 dma = (__le32 *)(desc + 4);
3330                 len = (__le16 *)(desc + 2);
3331                 attr = *desc;
3332
3333                 if (next_desc == 8) {
3334                         DBG("%s: %p: DMA-32 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3335                                 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
3336                 } else if (next_desc == 16) {
3337                         DBG("%s: %p: DMA-64 0x%16x, LEN 0x%04x, Attr=0x%02x\n",
3338                                 name, desc, le64_to_cpu(*((__le64 *)dma)), le16_to_cpu(*len), attr);
3339                 }
3340                 desc += next_desc;
3341                 if (attr & 2)
3342                         break;
3343         }
3344 }
3345 #else
3346 static void sdhci_show_adma_error(struct sdhci_host *host) { }
3347 #endif
3348
3349 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3350 {
3351         u32 command;
3352         BUG_ON(intmask == 0);
3353
3354         /* CMD19, CMD21 generates _only_ Buffer Read Ready interrupt */
3355         if (intmask & SDHCI_INT_DATA_AVAIL) {
3356                 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3357                 if (command == MMC_SEND_TUNING_BLOCK ||
3358                     command == MMC_SEND_TUNING_BLOCK_HS200) {
3359                         host->tuning_done = 1;
3360                         wake_up(&host->buf_ready_int);
3361                         return;
3362                 }
3363         }
3364
3365         if (!host->data) {
3366                 /*
3367                  * The "data complete" interrupt is also used to
3368                  * indicate that a busy state has ended. See comment
3369                  * above in sdhci_cmd_irq().
3370                  */
3371                 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
3372                         if (intmask & SDHCI_INT_DATA_END) {
3373                                 sdhci_finish_command(host);
3374                                 return;
3375                         }
3376                 }
3377
3378                 pr_err("%s: Got data interrupt 0x%08x even "
3379                         "though no data operation was in progress.\n",
3380                         mmc_hostname(host->mmc), (unsigned)intmask);
3381                 sdhci_dumpregs(host);
3382
3383                 return;
3384         }
3385
3386         if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3387                 host->data->error = -ETIMEDOUT;
3388                 pr_err("%s: Data Timeout error, intmask: %x Interface clock = %uHz\n",
3389                         mmc_hostname(host->mmc), intmask, host->max_clk);
3390                 sdhci_dumpregs(host);
3391         } else if (intmask & SDHCI_INT_DATA_END_BIT) {
3392                 host->data->error = -EILSEQ;
3393                 pr_err("%s: Data END Bit error, intmask: %x Interface clock = %uHz\n",
3394                         mmc_hostname(host->mmc), intmask, host->max_clk);
3395         } else if ((intmask & SDHCI_INT_DATA_CRC) &&
3396                 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3397                         != MMC_BUS_TEST_R) {
3398                 host->data->error = -EILSEQ;
3399                 pr_err("%s: Data CRC error, intmask: %x Interface clock = %uHz\n",
3400                         mmc_hostname(host->mmc), intmask, host->max_clk);
3401                 sdhci_dumpregs(host);
3402         } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3403                 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
3404                 sdhci_dumpregs(host);
3405                 sdhci_show_adma_error(host);
3406                 host->data->error = -EIO;
3407                 if (host->ops->adma_workaround)
3408                         host->ops->adma_workaround(host, intmask);
3409         }
3410
3411         if (host->data->error)
3412                 sdhci_finish_data(host);
3413         else {
3414                 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3415                         sdhci_transfer_pio(host);
3416
3417                 /*
3418                  * We currently don't do anything fancy with DMA
3419                  * boundaries, but as we can't disable the feature
3420                  * we need to at least restart the transfer.
3421                  *
3422                  * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3423                  * should return a valid address to continue from, but as
3424                  * some controllers are faulty, don't trust them.
3425                  */
3426                 if (intmask & SDHCI_INT_DMA_END) {
3427                         u32 dmastart, dmanow;
3428                         dmastart = sg_dma_address(host->data->sg);
3429                         dmanow = dmastart + host->data->bytes_xfered;
3430                         /*
3431                          * Force update to the next DMA block boundary.
3432                          */
3433                         dmanow = (dmanow &
3434                                 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3435                                 SDHCI_DEFAULT_BOUNDARY_SIZE;
3436                         host->data->bytes_xfered = dmanow - dmastart;
3437                         DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
3438                                 " next 0x%08x\n",
3439                                 mmc_hostname(host->mmc), dmastart,
3440                                 host->data->bytes_xfered, dmanow);
3441                         sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
3442                 }
3443
3444                 if (intmask & SDHCI_INT_DATA_END) {
3445                         if ((!MMC_CHECK_CMDQ_MODE(host) && host->cmd) ||
3446                                 (MMC_CHECK_CMDQ_MODE(host) && host->cmd && (host->mrq_dat->cmd == host->cmd))) {
3447
3448                                 /*
3449                                  * Data managed to finish before the
3450                                  * command completed. Make sure we do
3451                                  * things in the proper order.
3452                                  */
3453                                 host->data_early = 1;
3454                         } else
3455                                 sdhci_finish_data(host);
3456                 }
3457         }
3458 }
3459
3460 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3461 {
3462         irqreturn_t result;
3463         struct sdhci_host *host = dev_id;
3464         u32 intmask, unexpected = 0;
3465         int cardint = 0, max_loops = 16;
3466
3467         spin_lock(&host->lock);
3468
3469         if (host->runtime_suspended) {
3470                 spin_unlock(&host->lock);
3471                 pr_warning("%s: got irq while runtime suspended\n",
3472                        mmc_hostname(host->mmc));
3473                 return IRQ_HANDLED;
3474         }
3475
3476         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3477
3478         if (!intmask || intmask == 0xffffffff) {
3479                 result = IRQ_NONE;
3480                 goto out;
3481         }
3482
3483 again:
3484         DBG("*** %s got interrupt: 0x%08x\n",
3485                 mmc_hostname(host->mmc), intmask);
3486
3487         if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3488                 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3489                               SDHCI_CARD_PRESENT;
3490
3491                 /*
3492                  * There is a observation on i.mx esdhc.  INSERT bit will be
3493                  * immediately set again when it gets cleared, if a card is
3494                  * inserted.  We have to mask the irq to prevent interrupt
3495                  * storm which will freeze the system.  And the REMOVE gets
3496                  * the same situation.
3497                  *
3498                  * More testing are needed here to ensure it works for other
3499                  * platforms though.
3500                  */
3501                 sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
3502                                                 SDHCI_INT_CARD_REMOVE);
3503                 sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
3504                                                   SDHCI_INT_CARD_INSERT);
3505
3506                 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3507                              SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3508                 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
3509                 tasklet_schedule(&host->card_tasklet);
3510         }
3511
3512 #ifdef CONFIG_CMD_DUMP
3513         if (mmc_hostname(host->mmc)[3] == '0')
3514                 dbg_add_host_log(host->mmc, 7,  intmask, 0xffffffff);
3515 #endif
3516
3517         if (intmask & SDHCI_INT_CMD_MASK) {
3518                 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
3519                         SDHCI_INT_STATUS);
3520                 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
3521         }
3522
3523         if (intmask & SDHCI_INT_DATA_MASK) {
3524                 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
3525                         SDHCI_INT_STATUS);
3526                 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3527         }
3528
3529         if (intmask & SDHCI_INT_RETUNING_EVENT)
3530                 host->flags |= SDHCI_NEEDS_RETUNING;
3531
3532         if ((intmask & SDHCI_INT_DATA_MASK) || (intmask & SDHCI_INT_CMD_MASK))
3533                 if (host->ops->sd_error_stats)
3534                         host->ops->sd_error_stats(host, intmask);
3535
3536         intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
3537
3538         intmask &= ~SDHCI_INT_ERROR;
3539
3540         if (intmask & SDHCI_INT_BUS_POWER) {
3541                 pr_err("%s: Current limit error, intmask: %x Interface clock = %uHz\n",
3542                         mmc_hostname(host->mmc), intmask, host->max_clk);
3543                 pr_err("%s: Card is consuming too much power!\n",
3544                         mmc_hostname(host->mmc));
3545                 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
3546         }
3547
3548         /* print the errors based on the SD Host controller spec */
3549         if ((intmask & SDHCI_INT_TIMEOUT) || (intmask & SDHCI_INT_CRC)) {
3550                 pr_err("%s: %s, intmask: %x Interface clock = %uHz\n",
3551                         mmc_hostname(host->mmc),
3552                         resp_error[RESP_ERROR_INDEX(intmask)],
3553                         intmask, host->max_clk);
3554         }
3555
3556         intmask &= ~SDHCI_INT_BUS_POWER;
3557
3558         if (intmask & SDHCI_INT_CARD_INT)
3559                 cardint = 1;
3560
3561         intmask &= ~SDHCI_INT_CARD_INT;
3562
3563         if (intmask) {
3564                 unexpected |= intmask;
3565                 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3566         }
3567
3568         result = IRQ_HANDLED;
3569
3570         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3571         if (intmask && --max_loops)
3572                 goto again;
3573 out:
3574         spin_unlock(&host->lock);
3575
3576         if (unexpected) {
3577                 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3578                            mmc_hostname(host->mmc), unexpected);
3579                 sdhci_dumpregs(host);
3580         }
3581         /*
3582          * We have to delay this as it calls back into the driver.
3583          */
3584         if (cardint)
3585                 mmc_signal_sdio_irq(host->mmc);
3586
3587         return result;
3588 }
3589
3590 /*****************************************************************************\
3591  *                                                                           *
3592  * Suspend/resume                                                            *
3593  *                                                                           *
3594 \*****************************************************************************/
3595
3596 #ifdef CONFIG_PM
3597 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
3598 {
3599         u8 val;
3600         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3601                         | SDHCI_WAKE_ON_INT;
3602
3603         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3604         val |= mask ;
3605         /* Avoid fake wake up */
3606         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
3607                 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
3608         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3609 }
3610 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
3611
3612 void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3613 {
3614         u8 val;
3615         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3616                         | SDHCI_WAKE_ON_INT;
3617
3618         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3619         val &= ~mask;
3620         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3621 }
3622 EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
3623
3624 int sdhci_suspend_host(struct sdhci_host *host)
3625 {
3626         int ret;
3627         struct mmc_host *mmc = host->mmc;
3628
3629         host->suspend_task = current;
3630
3631         if (host->ops->platform_suspend)
3632                 host->ops->platform_suspend(host);
3633
3634         sdhci_disable_card_detection(host);
3635
3636         /* Disable tuning since we are suspending */
3637         if (host->flags & SDHCI_USING_RETUNING_TIMER) {
3638                 del_timer_sync(&host->tuning_timer);
3639                 host->flags &= ~SDHCI_NEEDS_RETUNING;
3640         }
3641
3642         /*
3643          * If eMMC cards are put in sleep state, Vccq can be disabled
3644          * but Vcc would still be powered on. In resume, we only restore
3645          * the controller context. So, set MMC_PM_KEEP_POWER flag.
3646          */
3647         if (mmc_card_can_sleep(mmc) && !(mmc->caps2 & MMC_CAP2_NO_SLEEP_CMD))
3648                 mmc->pm_flags |= MMC_PM_KEEP_POWER;
3649
3650         ret = mmc_suspend_host(host->mmc);
3651         if (ret) {
3652                 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
3653                         host->flags |= SDHCI_NEEDS_RETUNING;
3654                         mod_timer(&host->tuning_timer, jiffies +
3655                                         host->tuning_count * HZ);
3656                 }
3657
3658                 sdhci_enable_card_detection(host);
3659
3660                 host->suspend_task = NULL;
3661                 return ret;
3662         }
3663         /* cancel delayed clk gate work */
3664         if (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
3665                 cancel_delayed_work_sync(&host->delayed_clk_gate_wrk);
3666
3667         /*
3668          * If host clock is disabled but the register access requires host
3669          * clock, then enable the clock, mask the interrupts and disable
3670          * the clock.
3671          */
3672         if (host->quirks2 & SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
3673                 if ((!host->clock && host->ops->set_clock) &&
3674                         (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE))
3675                         host->ops->set_clock(host, max(mmc->ios.clock, mmc->f_min));
3676
3677         if (mmc->pm_flags & MMC_PM_KEEP_POWER)
3678                 host->card_int_set = host->ier &
3679                         SDHCI_INT_CARD_INT;
3680
3681         if (!device_may_wakeup(mmc_dev(host->mmc))) {
3682                 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
3683
3684                 if (host->quirks2 & SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
3685                         if ((!host->clock && host->ops->set_clock) &&
3686                         (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE))
3687                                 host->ops->set_clock(host, 0);
3688
3689                 if (host->irq)
3690                         disable_irq(host->irq);
3691         } else {
3692                 sdhci_enable_irq_wakeups(host);
3693                 enable_irq_wake(host->irq);
3694
3695                 if (host->quirks2 & SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
3696                         if ((!host->clock && host->ops->set_clock) &&
3697                         (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE))
3698                                 host->ops->set_clock(host, 0);
3699         }
3700
3701         host->suspend_task = NULL;
3702
3703         return ret;
3704 }
3705
3706 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3707
3708 int sdhci_resume_host(struct sdhci_host *host)
3709 {
3710         int ret;
3711         struct mmc_host *mmc = host->mmc;
3712
3713         host->suspend_task = current;
3714
3715
3716         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3717                 if (host->ops->enable_dma)
3718                         host->ops->enable_dma(host);
3719         }
3720
3721         if (!device_may_wakeup(mmc_dev(host->mmc))) {
3722                 if (host->irq)
3723                         enable_irq(host->irq);
3724         } else {
3725                 sdhci_disable_irq_wakeups(host);
3726                 disable_irq_wake(host->irq);
3727         }
3728
3729         if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3730             (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3731                 /* Card keeps power but host controller does not */
3732                 sdhci_init(host, 0);
3733                 host->pwr = 0;
3734                 host->clock = 0;
3735                 sdhci_do_set_ios(host, &host->mmc->ios);
3736         } else {
3737                 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3738                 mmiowb();
3739         }
3740
3741         ret = mmc_resume_host(host->mmc);
3742         /* Enable card interrupt as it is overwritten in sdhci_init */
3743         if ((mmc->caps & MMC_CAP_SDIO_IRQ) &&
3744                 (mmc->pm_flags & MMC_PM_KEEP_POWER))
3745                         if (host->card_int_set)
3746                                 mmc->ops->enable_sdio_irq(mmc, true);
3747
3748         sdhci_enable_card_detection(host);
3749
3750         if (host->ops->platform_resume)
3751                 host->ops->platform_resume(host);
3752
3753         /* Set the re-tuning expiration flag */
3754         if (host->flags & SDHCI_USING_RETUNING_TIMER)
3755                 host->flags |= SDHCI_NEEDS_RETUNING;
3756
3757         host->suspend_task = NULL;
3758
3759         return ret;
3760 }
3761
3762 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3763 #endif /* CONFIG_PM */
3764
3765 #ifdef CONFIG_PM_RUNTIME
3766
3767 static int sdhci_runtime_pm_get(struct sdhci_host *host)
3768 {
3769         int present;
3770
3771         if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3772                 return 0;
3773
3774         present = mmc_gpio_get_cd(host->mmc);
3775         if (present < 0) {
3776                 /* If polling, assume that the card is always present. */
3777                 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
3778                         if (host->ops->get_cd)
3779                                 present = host->ops->get_cd(host);
3780                         else
3781                                 present = 1;
3782                 else
3783                         present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3784                                         SDHCI_CARD_PRESENT;
3785         }
3786
3787         if ((present && !host->mmc->card && (host->runtime_suspended == false))
3788                                         || host->suspend_task == current) {
3789                 pm_runtime_get_noresume(host->mmc->parent);
3790                 return 0;
3791         }
3792
3793         return pm_runtime_get_sync(host->mmc->parent);
3794 }
3795
3796 static int sdhci_runtime_pm_put(struct sdhci_host *host)
3797 {
3798         int present;
3799
3800         if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3801                 return 0;
3802
3803         present = mmc_gpio_get_cd(host->mmc);
3804         if (present < 0) {
3805                 /* If polling, assume that the card is always present. */
3806                 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
3807                         if (host->ops->get_cd)
3808                                 present = host->ops->get_cd(host);
3809                         else
3810                                 present = 1;
3811                 else
3812                         present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3813                                         SDHCI_CARD_PRESENT;
3814         }
3815         if ((present && !host->mmc->card) || host->suspend_task == current) {
3816                 pm_runtime_mark_last_busy(host->mmc->parent);
3817                 pm_runtime_put_noidle(host->mmc->parent);
3818                 return 0;
3819         }
3820
3821         pm_runtime_mark_last_busy(host->mmc->parent);
3822         return pm_runtime_put_autosuspend(host->mmc->parent);
3823 }
3824
3825 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3826 {
3827         unsigned long flags;
3828         int ret = 0;
3829
3830         if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3831                 return 0;
3832
3833         if (host->quirks2 & SDHCI_QUIRK2_NON_STD_RTPM) {
3834                 spin_lock_irqsave(&host->lock, flags);
3835                 host->runtime_suspended = true;
3836                 spin_unlock_irqrestore(&host->lock, flags);
3837
3838                 sdhci_set_clock(host, 0);
3839                 if (host->ops->set_clock)
3840                         host->ops->set_clock(host, 0);
3841                 sysedp_set_state(host->sysedpc, 0);
3842                 goto lbl_end;
3843         }
3844
3845         /* Disable tuning since we are suspending */
3846         if (host->flags & SDHCI_USING_RETUNING_TIMER) {
3847                 del_timer_sync(&host->tuning_timer);
3848                 host->flags &= ~SDHCI_NEEDS_RETUNING;
3849         }
3850
3851         if (host->ops->set_clock)
3852                 host->ops->set_clock(host, host->mmc->f_min);
3853         sdhci_set_clock(host, host->mmc->f_min);
3854
3855         spin_lock_irqsave(&host->lock, flags);
3856         sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
3857         spin_unlock_irqrestore(&host->lock, flags);
3858
3859         synchronize_irq(host->irq);
3860
3861         spin_lock_irqsave(&host->lock, flags);
3862         host->runtime_suspended = true;
3863         spin_unlock_irqrestore(&host->lock, flags);
3864
3865         sdhci_set_clock(host, 0);
3866         if (host->ops->set_clock)
3867                 host->ops->set_clock(host, 0);
3868
3869 lbl_end:
3870         return ret;
3871 }
3872 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3873
3874 int sdhci_runtime_resume_host(struct sdhci_host *host)
3875 {
3876         unsigned long flags;
3877         int ret = 0, host_flags = host->flags;
3878         unsigned int freq;
3879
3880         if (!(host->quirks2 & SDHCI_QUIRK2_MMC_RTPM))
3881                 return 0;
3882
3883         if (host->quirks2 & SDHCI_QUIRK2_NON_STD_RTPM) {
3884                 if (host->mmc->ios.clock) {
3885                         freq = host->mmc->ios.clock;
3886                 } else {
3887                         if (!host->mmc->f_min)
3888                                 host->mmc->f_min = MIN_SDMMC_FREQ;
3889                         freq = host->mmc->f_min;
3890                         host->clock = freq;
3891                 }
3892
3893                 if (host->ops->set_clock)
3894                         host->ops->set_clock(host, freq);
3895                 sdhci_set_clock(host, freq);
3896
3897                 sysedp_set_state(host->sysedpc, 1);
3898                 spin_lock_irqsave(&host->lock, flags);
3899                 host->runtime_suspended = false;
3900                 spin_unlock_irqrestore(&host->lock, flags);
3901                 goto lbl_end;
3902         }
3903
3904         if (host->ops->set_clock)
3905                 host->ops->set_clock(host, host->mmc->f_min);
3906         sdhci_set_clock(host, host->mmc->f_min);
3907
3908         if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3909                 if (host->ops->enable_dma)
3910                         host->ops->enable_dma(host);
3911         }
3912
3913         sdhci_init(host, 0);
3914
3915         /* Force clock and power re-program */
3916         host->pwr = 0;
3917         host->clock = 0;
3918         sdhci_do_set_ios(host, &host->mmc->ios);
3919
3920         if (host->mmc->ios.clock) {
3921                 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
3922         /* Do any post voltage switch platform specific configuration */
3923                 if (host->ops->switch_signal_voltage_exit)
3924                         host->ops->switch_signal_voltage_exit(host,
3925                                 host->mmc->ios.signal_voltage);
3926         }
3927
3928         if ((host_flags & SDHCI_PV_ENABLED) &&
3929                 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3930                 spin_lock_irqsave(&host->lock, flags);
3931                 sdhci_enable_preset_value(host, true);
3932                 spin_unlock_irqrestore(&host->lock, flags);
3933         }
3934
3935         /* Set the re-tuning expiration flag */
3936         if (host->flags & SDHCI_USING_RETUNING_TIMER)
3937                 host->flags |= SDHCI_NEEDS_RETUNING;
3938
3939         spin_lock_irqsave(&host->lock, flags);
3940
3941         host->runtime_suspended = false;
3942
3943         /* Enable SDIO IRQ */
3944         if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
3945                 sdhci_enable_sdio_irq_nolock(host, true);
3946
3947         /* Enable Card Detection */
3948         sdhci_enable_card_detection(host);
3949
3950         spin_unlock_irqrestore(&host->lock, flags);
3951
3952 lbl_end:
3953         return ret;
3954 }
3955 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3956
3957 #endif
3958
3959 /*****************************************************************************\
3960  *                                                                           *
3961  * Device allocation/registration                                            *
3962  *                                                                           *
3963 \*****************************************************************************/
3964
3965 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3966         size_t priv_size)
3967 {
3968         struct mmc_host *mmc;
3969         struct sdhci_host *host;
3970
3971         WARN_ON(dev == NULL);
3972
3973         mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3974         if (!mmc)
3975                 return ERR_PTR(-ENOMEM);
3976
3977         host = mmc_priv(mmc);
3978         host->mmc = mmc;
3979
3980         return host;
3981 }
3982
3983 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3984
3985 #ifdef CONFIG_DEBUG_FS
3986 static int show_sdhci_perf_stats(struct seq_file *s, void *data)
3987 {
3988         struct sdhci_host *host = s->private;
3989         int i;
3990         u32 avg_perf2;
3991         u32 last_perf_in_class;
3992         struct data_stat_entry *stat = NULL;
3993         char buf[250];
3994         u64 total_rd_bytes;
3995         u64 total_wr_bytes;
3996         u64 total_rd_usecs;
3997         u64 total_wr_usecs;
3998         unsigned int overall_avg_rd_perf2;
3999         unsigned int overall_avg_wr_perf2;
4000         int rd_percent, wr_percent;
4001
4002         seq_printf(s, "SDHCI(%s): perf statistics stat_size=%d\n",
4003                 mmc_hostname(host->mmc),
4004                 host->sdhci_data_stat.stat_size
4005                 );
4006         if (host->sdhci_data_stat.stat_size) {
4007                 seq_printf(s, "SDHCI(%s): perf statistics:\n",
4008                         mmc_hostname(host->mmc));
4009                 seq_puts(s,
4010                 "Note: Performance figures in kilo bits per sec(kbps)\n");
4011                 seq_puts(s,
4012                 "S.No.    Block       Direction    Num blks/        Total     Total           Total          Last            Last usec          Avg kbps        Last kbps           Min kbps   Max kbps\n");
4013                 seq_puts(s,
4014                 "         Size        (R/W)        transfer         Bytes     Transfers       Time(usec)     Bytes           Duration           Perf            Perf                Perf       Perf\n");
4015         }
4016         total_rd_bytes = 0;
4017         total_wr_bytes = 0;
4018         total_rd_usecs = 0;
4019         total_wr_usecs = 0;
4020         for (i = 0; i < host->sdhci_data_stat.stat_size; i++) {
4021                 if (!stat)
4022                         stat = host->sdhci_data_stat.head;
4023                 else
4024                         stat = stat->next;
4025                 if (!stat) {
4026                         pr_err("%s %s: sdhci data stat head NULL i=%d\n",
4027                                 mmc_hostname(host->mmc), __func__, i);
4028                         break;
4029                 }
4030                 sdhci_div64(
4031                         ((stat->total_bytes << 3) * 1000),
4032                         stat->total_usecs, &avg_perf2);
4033                 sdhci_div32(
4034                         (((u32)stat->current_transferred_bytes << 3) * 1000),
4035                         stat->duration_usecs,
4036                         &last_perf_in_class);
4037                 if (stat->is_read) {
4038                         total_rd_bytes += stat->total_bytes;
4039                         total_rd_usecs += stat->total_usecs;
4040                 } else {
4041                         total_wr_bytes += stat->total_bytes;
4042                         total_wr_usecs += stat->total_usecs;
4043                 }
4044                 snprintf(buf, 250,
4045                         "%2d    %4d           %c       %8d    %16lld    %8d        %16lld    %8d            %8d           %8d         %8d         %8d    %8d\n",
4046                         (i + 1),
4047                         stat->stat_blk_size,
4048                         stat->is_read ? 'R' : 'W',
4049                         stat->stat_blks_per_transfer,
4050                         stat->total_bytes,
4051                         stat->total_transfers,
4052                         stat->total_usecs,
4053                         stat->current_transferred_bytes,
4054                         stat->duration_usecs,
4055                         avg_perf2,
4056                         last_perf_in_class,
4057                         stat->min_kbps,
4058                         stat->max_kbps
4059                         );
4060                 seq_puts(s, buf);
4061         }
4062         sdhci_div64(
4063                 ((total_rd_bytes << 3) * 1000),
4064                 total_rd_usecs, &overall_avg_rd_perf2);
4065         sdhci_div64(
4066                 (total_rd_bytes * 1000),
4067                 (total_rd_bytes + total_wr_bytes), &rd_percent);
4068         snprintf(buf, 250,
4069                 "Read Total_bytes=%lldB, time=%lldusecs, overall kbps=%d Rd percent=%d.%d\n",
4070                 total_rd_bytes, total_rd_usecs,
4071                 overall_avg_rd_perf2,
4072                 (rd_percent / 10), (rd_percent % 10));
4073         seq_puts(s, buf);
4074         sdhci_div64(
4075                 ((total_wr_bytes << 3) * 1000),
4076                 total_wr_usecs, &overall_avg_wr_perf2);
4077         sdhci_div64(
4078                 (total_wr_bytes * 1000),
4079                 (total_rd_bytes + total_wr_bytes), &wr_percent);
4080         snprintf(buf, 250,
4081                 "Write Total_bytes=%lldB, time=%lldusecs, overall kbps=%d, Wr percent=%d.%d\n",
4082                 total_wr_bytes, total_wr_usecs,
4083                 overall_avg_wr_perf2,
4084                 (wr_percent / 10), (wr_percent % 10));
4085         seq_puts(s, buf);
4086
4087         return 0;
4088 }
4089
4090 static int sdhci_perf_stats_dump(struct inode *inode, struct file *file)
4091 {
4092         return single_open(file, show_sdhci_perf_stats, inode->i_private);
4093 }
4094
4095 static const struct file_operations flush_sdhci_perf_stats_fops = {
4096         .open           = sdhci_perf_stats_dump,
4097         .read           = seq_read,
4098         .llseek         = seq_lseek,
4099         .release        = single_release,
4100 };
4101
4102 static int restart_sdhci_perf_stats(struct seq_file *s, void *data)
4103 {
4104         struct sdhci_host *host = s->private;
4105
4106         free_stats_nodes(host);
4107         return 0;
4108 }
4109
4110 static int sdhci_perf_stats_restart(struct inode *inode, struct file *file)
4111 {
4112         return single_open(file, restart_sdhci_perf_stats, inode->i_private);
4113 }
4114
4115 static const struct file_operations reset_sdhci_perf_stats_fops = {
4116         .open           = sdhci_perf_stats_restart,
4117         .read           = seq_read,
4118         .llseek         = seq_lseek,
4119         .release        = single_release,
4120 };
4121
4122 static void sdhci_debugfs_init(struct sdhci_host *host)
4123 {
4124         struct dentry *root = host->debugfs_root;
4125
4126         /*
4127          * debugfs nodes earlier were created from sdhci-tegra,
4128          * In this change root debugfs node is created first-come-first-serve
4129          */
4130         if (!root) {
4131                 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
4132                 if (IS_ERR_OR_NULL(root))
4133                         goto err_root;
4134                 host->debugfs_root = root;
4135         }
4136
4137         if (!debugfs_create_u32("enable_sdhci_perf_stats", S_IRUGO | S_IWUSR,
4138                 root, (u32 *)&host->enable_sdhci_perf_stats))
4139                 goto err_root;
4140
4141         if (!debugfs_create_file("reset_sdhci_perf_stats", S_IRUGO,
4142                 root, host, &reset_sdhci_perf_stats_fops))
4143                 goto err_root;
4144
4145         if (!debugfs_create_file("sdhci_perf_stats", S_IRUGO,
4146                 root, host, &flush_sdhci_perf_stats_fops))
4147                 goto err_root;
4148
4149         if (!debugfs_create_u32("sdhci_perf_no_data_transfer_count", S_IRUGO,
4150                 root, (u32 *)&host->no_data_transfer_count))
4151                 goto err_root;
4152
4153         if (!debugfs_create_u32("max_pio_size", S_IRUGO | S_IWUSR,
4154                 root, (u32 *)&host->max_pio_size))
4155                 goto err_root;
4156
4157         if (!debugfs_create_u32("max_pio_blocks", S_IRUGO | S_IWUSR,
4158                 root, (u32 *)&host->max_pio_blocks))
4159                 goto err_root;
4160
4161         return;
4162
4163 err_root:
4164         debugfs_remove_recursive(root);
4165         host->debugfs_root = NULL;
4166
4167         return;
4168 }
4169 #endif
4170
4171 /* runtime pm is not enabled before add host */
4172 int sdhci_add_host(struct sdhci_host *host)
4173 {
4174         struct mmc_host *mmc;
4175         u32 caps[2] = {0, 0};
4176         u32 max_current_caps;
4177         unsigned int ocr_avail;
4178         int ret;
4179
4180         WARN_ON(host == NULL);
4181         if (host == NULL)
4182                 return -EINVAL;
4183
4184         mmc = host->mmc;
4185
4186         if (debug_quirks)
4187                 host->quirks = debug_quirks;
4188         if (debug_quirks2)
4189                 host->quirks2 = debug_quirks2;
4190
4191         sdhci_reset(host, SDHCI_RESET_ALL);
4192
4193         host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
4194         host->version = (host->version & SDHCI_SPEC_VER_MASK)
4195                                 >> SDHCI_SPEC_VER_SHIFT;
4196         if (host->version > SDHCI_SPEC_400) {
4197                 pr_err("%s: Unknown controller version (%d). "
4198                         "You may experience problems.\n", mmc_hostname(mmc),
4199                         host->version);
4200         }
4201
4202         host->mrq_cmd = NULL;
4203         host->mrq_dat = NULL;
4204         caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
4205                 sdhci_readl(host, SDHCI_CAPABILITIES);
4206
4207         if (host->version >= SDHCI_SPEC_300)
4208                 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
4209                         host->caps1 :
4210                         sdhci_readl(host, SDHCI_CAPABILITIES_1);
4211
4212         if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4213                 host->flags |= SDHCI_USE_SDMA;
4214         else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
4215                 DBG("Controller doesn't have SDMA capability\n");
4216         else
4217                 host->flags |= SDHCI_USE_SDMA;
4218
4219         if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4220                 (host->flags & SDHCI_USE_SDMA)) {
4221                 DBG("Disabling DMA as it is marked broken\n");
4222                 host->flags &= ~SDHCI_USE_SDMA;
4223         }
4224
4225         if ((host->version >= SDHCI_SPEC_200) &&
4226                 (caps[0] & SDHCI_CAN_DO_ADMA2))
4227                 host->flags |= SDHCI_USE_ADMA;
4228
4229         if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4230                 (host->flags & SDHCI_USE_ADMA)) {
4231                 DBG("Disabling ADMA as it is marked broken\n");
4232                 host->flags &= ~SDHCI_USE_ADMA;
4233         }
4234
4235         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4236                 if (host->ops->enable_dma) {
4237                         if (host->ops->enable_dma(host)) {
4238                                 pr_warning("%s: No suitable DMA "
4239                                         "available. Falling back to PIO.\n",
4240                                         mmc_hostname(mmc));
4241                                 host->flags &=
4242                                         ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4243                         }
4244                 }
4245         }
4246
4247         if (host->flags & SDHCI_USE_ADMA) {
4248                 /*
4249                  * We need to allocate descriptors for all sg entries
4250                  * (128) and potentially one alignment transfer for
4251                  * each of those entries. Simply allocating 128 bits
4252                  * for each entry
4253                  */
4254                 if (mmc_dev(host->mmc)->dma_mask &&
4255                                 mmc_dev(host->mmc)->coherent_dma_mask) {
4256                         host->adma_desc = dma_alloc_coherent(
4257                                         mmc_dev(host->mmc), (128 * 2 + 1) * 8,
4258                                         &host->adma_addr, GFP_KERNEL);
4259                         if (!host->adma_desc)
4260                                 goto err_dma_alloc;
4261
4262                         host->align_buffer = dma_alloc_coherent(
4263                                         mmc_dev(host->mmc), 128 * 8,
4264                                         &host->align_addr, GFP_KERNEL);
4265                         if (!host->align_buffer) {
4266                                 dma_free_coherent(mmc_dev(host->mmc),
4267                                                 (128 * 2 + 1) * 8,
4268                                                 host->adma_desc,
4269                                                 host->adma_addr);
4270                                 host->adma_desc = NULL;
4271                                 goto err_dma_alloc;
4272                         }
4273
4274                         host->use_dma_alloc = true;
4275
4276                         BUG_ON(host->adma_addr & 0x3);
4277                         BUG_ON(host->align_addr & 0x3);
4278                         goto out_dma_alloc;
4279                 }
4280 err_dma_alloc:
4281
4282                 host->adma_desc = kmalloc((128 * 2 + 1) * 8, GFP_KERNEL);
4283                 host->align_buffer = kmalloc(128 * 8, GFP_KERNEL);
4284                 if (!host->adma_desc || !host->align_buffer) {
4285                         kfree(host->adma_desc);
4286                         kfree(host->align_buffer);
4287                         pr_warning("%s: Unable to allocate ADMA "
4288                                 "buffers. Falling back to standard DMA.\n",
4289                                 mmc_hostname(mmc));
4290                         host->flags &= ~SDHCI_USE_ADMA;
4291                 }
4292         }
4293 out_dma_alloc:
4294
4295         /*
4296          * If we use DMA, then it's up to the caller to set the DMA
4297          * mask, but PIO does not need the hw shim so we set a new
4298          * mask here in that case.
4299          */
4300         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4301                 host->dma_mask = DMA_BIT_MASK(64);
4302                 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
4303         }
4304
4305         if (host->version >= SDHCI_SPEC_300)
4306                 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
4307                         >> SDHCI_CLOCK_BASE_SHIFT;
4308         else
4309                 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
4310                         >> SDHCI_CLOCK_BASE_SHIFT;
4311
4312         host->max_clk *= 1000000;
4313
4314         if (mmc->caps2 & MMC_CAP2_HS533)
4315                 host->max_clk = MMC_HS533_MAX_DTR;
4316
4317         if (host->max_clk == 0 || host->quirks &
4318                         SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4319                 if (!host->ops->get_max_clock) {
4320                         pr_err("%s: Hardware doesn't specify base clock "
4321                                "frequency.\n", mmc_hostname(mmc));
4322                         return -ENODEV;
4323                 }
4324                 host->max_clk = host->ops->get_max_clock(host);
4325         }
4326
4327         /*
4328          * In case of Host Controller v3.00, find out whether clock
4329          * multiplier is supported.
4330          */
4331         host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
4332                         SDHCI_CLOCK_MUL_SHIFT;
4333
4334         /*
4335          * In case the value in Clock Multiplier is 0, then programmable
4336          * clock mode is not supported, otherwise the actual clock
4337          * multiplier is one more than the value of Clock Multiplier
4338          * in the Capabilities Register.
4339          */
4340         if (host->clk_mul)
4341                 host->clk_mul += 1;
4342
4343         /*
4344          * Set host parameters.
4345          */
4346         mmc->ops = &sdhci_ops;
4347         mmc->f_max = host->max_clk;
4348         if (host->ops->get_min_clock)
4349                 mmc->f_min = host->ops->get_min_clock(host);
4350         else if (host->version >= SDHCI_SPEC_300) {
4351                 if (host->clk_mul) {
4352                         mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
4353                         mmc->f_max = host->max_clk * host->clk_mul;
4354                 } else
4355                         mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4356         } else
4357                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4358
4359         host->timeout_clk =
4360                 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
4361         if (host->timeout_clk == 0) {
4362                 if (host->ops->get_timeout_clock) {
4363                         host->timeout_clk = host->ops->get_timeout_clock(host);
4364                 } else if (!(host->quirks &
4365                                 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4366                         pr_err("%s: Hardware doesn't specify timeout clock "
4367                                "frequency.\n", mmc_hostname(mmc));
4368                         return -ENODEV;
4369                 }
4370         }
4371         if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
4372                 host->timeout_clk *= 1000;
4373
4374         if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
4375                 host->timeout_clk = mmc->f_max / 1000;
4376
4377         if (!(host->quirks2 & SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO))
4378                 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
4379
4380         if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4381                 host->flags |= SDHCI_AUTO_CMD12;
4382
4383         /* Auto-CMD23 stuff only works in ADMA or PIO. */
4384         if ((host->version >= SDHCI_SPEC_300) &&
4385             ((host->flags & SDHCI_USE_ADMA) ||
4386              !(host->flags & SDHCI_USE_SDMA))) {
4387                 host->flags |= SDHCI_AUTO_CMD23;
4388                 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
4389         } else {
4390                 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
4391         }
4392
4393         /*
4394          * A controller may support 8-bit width, but the board itself
4395          * might not have the pins brought out.  Boards that support
4396          * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4397          * their platform code before calling sdhci_add_host(), and we
4398          * won't assume 8-bit width for hosts without that CAP.
4399          */
4400         if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4401                 mmc->caps |= MMC_CAP_4_BIT_DATA;
4402
4403         if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4404                 mmc->caps &= ~MMC_CAP_CMD23;
4405
4406         if (caps[0] & SDHCI_CAN_DO_HISPD)
4407                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4408
4409         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4410             !(host->mmc->caps & MMC_CAP_NONREMOVABLE) && !(host->ops->get_cd))
4411                 mmc->caps |= MMC_CAP_NEEDS_POLL;
4412
4413         /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
4414         host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
4415         if (IS_ERR_OR_NULL(host->vqmmc)) {
4416                 if (PTR_ERR(host->vqmmc) < 0) {
4417                         pr_info("%s: no vqmmc regulator found\n",
4418                                 mmc_hostname(mmc));
4419                         host->vqmmc = NULL;
4420                 }
4421         } else {
4422                 ret = regulator_enable(host->vqmmc);
4423                 if (!regulator_is_supported_voltage(host->vqmmc, 1700000,
4424                         1950000))
4425                         caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
4426                                         SDHCI_SUPPORT_SDR50 |
4427                                         SDHCI_SUPPORT_DDR50);
4428                 if (ret) {
4429                         pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4430                                 mmc_hostname(mmc), ret);
4431                         host->vqmmc = NULL;
4432                 }
4433         }
4434
4435         if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
4436                 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4437                        SDHCI_SUPPORT_DDR50);
4438
4439         /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4440         if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4441                        SDHCI_SUPPORT_DDR50))
4442                 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4443
4444         /* SDR104 supports also implies SDR50 support */
4445         if (caps[1] & SDHCI_SUPPORT_SDR104)
4446                 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4447         else if (caps[1] & SDHCI_SUPPORT_SDR50)
4448                 mmc->caps |= MMC_CAP_UHS_SDR50;
4449
4450         if (caps[1] & SDHCI_SUPPORT_DDR50)
4451                 mmc->caps |= MMC_CAP_UHS_DDR50;
4452
4453         /* Does the host need tuning for SDR50? */
4454         if (caps[1] & SDHCI_USE_SDR50_TUNING)
4455                 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4456
4457         /* Does the host need tuning for HS200? */
4458         if (mmc->caps2 & MMC_CAP2_HS200)
4459                 host->flags |= SDHCI_HS200_NEEDS_TUNING;
4460
4461         /* Driver Type(s) (A, C, D) supported by the host */
4462         if (caps[1] & SDHCI_DRIVER_TYPE_A)
4463                 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4464         if (caps[1] & SDHCI_DRIVER_TYPE_C)
4465                 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4466         if (caps[1] & SDHCI_DRIVER_TYPE_D)
4467                 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4468
4469         /* Initial value for re-tuning timer count */
4470         host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4471                               SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4472         /*
4473          * If the re-tuning timer count value is 0xF, the timer count
4474          * information should be obtained in a non-standard way.
4475          */
4476         if (host->tuning_count == 0xF) {
4477                 if (host->ops->get_tuning_counter) {
4478                         host->tuning_count =
4479                                 host->ops->get_tuning_counter(host);
4480                 } else {
4481                         host->tuning_count = 0;
4482                 }
4483         }
4484
4485         /*
4486          * In case Re-tuning Timer is not disabled, the actual value of
4487          * re-tuning timer will be 2 ^ (n - 1).
4488          */
4489         if (host->tuning_count)
4490                 host->tuning_count = 1 << (host->tuning_count - 1);
4491
4492         /* Re-tuning mode supported by the Host Controller */
4493         host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
4494                              SDHCI_RETUNING_MODE_SHIFT;
4495
4496         ocr_avail = 0;
4497
4498         host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
4499         if (IS_ERR_OR_NULL(host->vmmc)) {
4500                 if (PTR_ERR(host->vmmc) < 0) {
4501                         pr_info("%s: no vmmc regulator found\n",
4502                                 mmc_hostname(mmc));
4503                         host->vmmc = NULL;
4504                 }
4505         }
4506
4507 #ifdef CONFIG_REGULATOR
4508         /*
4509          * Voltage range check makes sense only if regulator reports
4510          * any voltage value.
4511          */
4512         if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
4513                 ret = regulator_is_supported_voltage(host->vmmc, 2700000,
4514                         3600000);
4515                 if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
4516                         caps[0] &= ~SDHCI_CAN_VDD_330;
4517                 if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
4518                         caps[0] &= ~SDHCI_CAN_VDD_300;
4519                 ret = regulator_is_supported_voltage(host->vmmc, 1700000,
4520                         1950000);
4521                 if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
4522                         caps[0] &= ~SDHCI_CAN_VDD_180;
4523         }
4524 #endif /* CONFIG_REGULATOR */
4525
4526         /*
4527          * According to SD Host Controller spec v3.00, if the Host System
4528          * can afford more than 150mA, Host Driver should set XPC to 1. Also
4529          * the value is meaningful only if Voltage Support in the Capabilities
4530          * register is set. The actual current value is 4 times the register
4531          * value.
4532          */
4533         max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4534         if (!max_current_caps && host->vmmc) {
4535                 u32 curr = regulator_get_current_limit(host->vmmc);
4536                 if (curr > 0) {
4537
4538                         /* convert to SDHCI_MAX_CURRENT format */
4539                         curr = curr/1000;  /* convert to mA */
4540                         curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4541
4542                         curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4543                         max_current_caps =
4544                                 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4545                                 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4546                                 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4547                 }
4548         }
4549
4550         if (caps[0] & SDHCI_CAN_VDD_330) {
4551                 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4552
4553                 mmc->max_current_330 = ((max_current_caps &
4554                                    SDHCI_MAX_CURRENT_330_MASK) >>
4555                                    SDHCI_MAX_CURRENT_330_SHIFT) *
4556                                    SDHCI_MAX_CURRENT_MULTIPLIER;
4557         }
4558         if (caps[0] & SDHCI_CAN_VDD_300) {
4559                 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4560
4561                 mmc->max_current_300 = ((max_current_caps &
4562                                    SDHCI_MAX_CURRENT_300_MASK) >>
4563                                    SDHCI_MAX_CURRENT_300_SHIFT) *
4564                                    SDHCI_MAX_CURRENT_MULTIPLIER;
4565         }
4566         if (caps[0] & SDHCI_CAN_VDD_180) {
4567                 ocr_avail |= MMC_VDD_165_195;
4568
4569                 mmc->max_current_180 = ((max_current_caps &
4570                                    SDHCI_MAX_CURRENT_180_MASK) >>
4571                                    SDHCI_MAX_CURRENT_180_SHIFT) *
4572                                    SDHCI_MAX_CURRENT_MULTIPLIER;
4573         }
4574
4575         mmc->ocr_avail = ocr_avail;
4576         mmc->ocr_avail_sdio = ocr_avail;
4577         if (host->ocr_avail_sdio)
4578                 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4579         mmc->ocr_avail_sd = ocr_avail;
4580         if (host->ocr_avail_sd)
4581                 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4582         else /* normal SD controllers don't support 1.8V */
4583                 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4584         mmc->ocr_avail_mmc = ocr_avail;
4585         if (host->ocr_avail_mmc)
4586                 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4587
4588         if (mmc->ocr_avail == 0) {
4589                 pr_err("%s: Hardware doesn't report any "
4590                         "support voltages.\n", mmc_hostname(mmc));
4591                 return -ENODEV;
4592         }
4593
4594         spin_lock_init(&host->lock);
4595
4596         /*
4597          * Maximum number of segments. Depends on if the hardware
4598          * can do scatter/gather or not.
4599          */
4600         if (host->flags & SDHCI_USE_ADMA)
4601                 mmc->max_segs = 128;
4602         else if (host->flags & SDHCI_USE_SDMA)
4603                 mmc->max_segs = 1;
4604         else /* PIO */
4605                 mmc->max_segs = 128;
4606
4607         /*
4608          * Maximum number of sectors in one transfer. Limited by DMA boundary
4609          * size (512KiB).
4610          */
4611         mmc->max_req_size = 524288;
4612
4613         /*
4614          * Maximum segment size. Could be one segment with the maximum number
4615          * of bytes. When doing hardware scatter/gather, each entry cannot
4616          * be larger than 64 KiB though.
4617          */
4618         if (host->flags & SDHCI_USE_ADMA) {
4619                 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4620                         mmc->max_seg_size = 65535;
4621                 else
4622                         mmc->max_seg_size = 65536;
4623         } else {
4624                 mmc->max_seg_size = mmc->max_req_size;
4625         }
4626
4627         /*
4628          * Maximum block size. This varies from controller to controller and
4629          * is specified in the capabilities register.
4630          */
4631         if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4632                 mmc->max_blk_size = 2;
4633         } else {
4634                 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
4635                                 SDHCI_MAX_BLOCK_SHIFT;
4636                 if (mmc->max_blk_size >= 3) {
4637                         pr_info("%s: Invalid maximum block size, "
4638                                 "assuming 512 bytes\n", mmc_hostname(mmc));
4639                         mmc->max_blk_size = 0;
4640                 }
4641         }
4642
4643         mmc->max_blk_size = 512 << mmc->max_blk_size;
4644
4645         /*
4646          * Maximum block count.
4647          */
4648         mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4649 #ifdef CONFIG_CMD_DUMP
4650         mmc->dbg_host_cnt = 0;
4651 #endif
4652
4653         /*
4654          * Init tasklets.
4655          */
4656         tasklet_init(&host->card_tasklet,
4657                 sdhci_tasklet_card, (unsigned long)host);
4658         tasklet_init(&host->finish_tasklet,
4659                 sdhci_tasklet_finish, (unsigned long)host);
4660         tasklet_init(&host->finish_cmd_tasklet,
4661                 sdhci_tasklet_cmd_finish, (unsigned long)host);
4662         tasklet_init(&host->finish_dat_tasklet,
4663                 sdhci_tasklet_dat_finish, (unsigned long)host);
4664
4665         setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
4666
4667         if (host->version >= SDHCI_SPEC_300) {
4668                 init_waitqueue_head(&host->buf_ready_int);
4669
4670                 /* Initialize re-tuning timer */
4671                 init_timer(&host->tuning_timer);
4672                 host->tuning_timer.data = (unsigned long)host;
4673                 host->tuning_timer.function = sdhci_tuning_timer;
4674         }
4675
4676         ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
4677                 mmc_hostname(mmc), host);
4678         if (ret) {
4679                 pr_err("%s: Failed to request IRQ %d: %d\n",
4680                        mmc_hostname(mmc), host->irq, ret);
4681                 goto untasklet;
4682         }
4683
4684         sdhci_init(host, 0);
4685
4686         host->sysedpc = sysedp_create_consumer(dev_name(mmc_dev(mmc)),
4687                                                dev_name(mmc_dev(mmc)));
4688
4689 #ifdef CONFIG_MMC_DEBUG
4690         sdhci_dumpregs(host);
4691 #endif
4692
4693 #ifdef SDHCI_USE_LEDS_CLASS
4694         snprintf(host->led_name, sizeof(host->led_name),
4695                 "%s::", mmc_hostname(mmc));
4696         host->led.name = host->led_name;
4697         host->led.brightness = LED_OFF;
4698         host->led.default_trigger = mmc_hostname(mmc);
4699         host->led.brightness_set = sdhci_led_control;
4700
4701         ret = led_classdev_register(mmc_dev(mmc), &host->led);
4702         if (ret) {
4703                 pr_err("%s: Failed to register LED device: %d\n",
4704                        mmc_hostname(mmc), ret);
4705                 goto reset;
4706         }
4707 #endif
4708
4709         mmiowb();
4710
4711         mmc_add_host(mmc);
4712
4713         pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4714                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4715                 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
4716                 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4717
4718         sdhci_enable_card_detection(host);
4719
4720         pm_runtime_enable(mmc_dev(mmc));
4721         pm_runtime_use_autosuspend(mmc_dev(mmc));
4722         if (host->quirks2 & SDHCI_QUIRK2_MMC_RTPM) {
4723                 /*
4724                  * Below Autosuspend delay can be increased/decreased based on
4725                  * power and perf data
4726                  */
4727                 pm_runtime_set_autosuspend_delay(mmc_dev(mmc),
4728                         MMC_RTPM_MSEC_TMOUT);
4729         }
4730         host->runtime_pm_init_done = true;
4731
4732 #ifdef CONFIG_DEBUG_FS
4733         /* Add debugfs nodes */
4734         sdhci_debugfs_init(host);
4735 #endif
4736
4737         return 0;
4738
4739 #ifdef SDHCI_USE_LEDS_CLASS
4740 reset:
4741         sdhci_reset(host, SDHCI_RESET_ALL);
4742         sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
4743         free_irq(host->irq, host);
4744 #endif
4745 untasklet:
4746         tasklet_kill(&host->card_tasklet);
4747         tasklet_kill(&host->finish_tasklet);
4748         tasklet_kill(&host->finish_cmd_tasklet);
4749         tasklet_kill(&host->finish_dat_tasklet);
4750
4751         return ret;
4752 }
4753
4754 EXPORT_SYMBOL_GPL(sdhci_add_host);
4755
4756 void sdhci_runtime_forbid(struct sdhci_host *host)
4757 {
4758         pm_runtime_forbid(mmc_dev(host->mmc));
4759 }
4760 EXPORT_SYMBOL_GPL(sdhci_runtime_forbid);
4761
4762 void sdhci_remove_host(struct sdhci_host *host, int dead)
4763 {
4764         unsigned long flags;
4765
4766         sdhci_runtime_pm_get(host);
4767         if (dead) {
4768                 spin_lock_irqsave(&host->lock, flags);
4769
4770                 host->flags |= SDHCI_DEVICE_DEAD;
4771
4772                 if (host->mrq_cmd || host->mrq_dat) {
4773                         pr_err("%s: Controller removed during "
4774                                 " transfer!\n", mmc_hostname(host->mmc));
4775
4776                         if (host->mrq_cmd) {
4777                                 host->mrq_cmd->cmd->error = -ENOMEDIUM;
4778                                 if (MMC_CHECK_CMDQ_MODE(host))
4779                                         tasklet_schedule(&host->finish_cmd_tasklet);
4780                                 else
4781                                         tasklet_schedule(&host->finish_tasklet);
4782                         }
4783                         if (host->mrq_dat) {
4784                                 host->mrq_dat->cmd->error = -ENOMEDIUM;
4785                                 if (MMC_CHECK_CMDQ_MODE(host))
4786                                         tasklet_schedule(&host->finish_dat_tasklet);
4787                                 else
4788                                         tasklet_schedule(&host->finish_tasklet);
4789                         }
4790                 }
4791
4792                 spin_unlock_irqrestore(&host->lock, flags);
4793         }
4794
4795         sdhci_disable_card_detection(host);
4796
4797         mmc_remove_host(host->mmc);
4798
4799 #ifdef SDHCI_USE_LEDS_CLASS
4800         led_classdev_unregister(&host->led);
4801 #endif
4802
4803         if (!dead)
4804                 sdhci_reset(host, SDHCI_RESET_ALL);
4805
4806         sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
4807         free_irq(host->irq, host);
4808
4809         del_timer_sync(&host->timer);
4810
4811         tasklet_kill(&host->card_tasklet);
4812         tasklet_kill(&host->finish_tasklet);
4813         tasklet_kill(&host->finish_cmd_tasklet);
4814         tasklet_kill(&host->finish_dat_tasklet);
4815
4816         if (host->vmmc) {
4817                 regulator_disable(host->vmmc);
4818                 regulator_put(host->vmmc);
4819         }
4820
4821         if (host->vqmmc) {
4822                 regulator_disable(host->vqmmc);
4823                 regulator_put(host->vqmmc);
4824         }
4825
4826         if (host->use_dma_alloc) {
4827                 dma_free_coherent(mmc_dev(host->mmc), (128 * 2 + 1) * 8,
4828                                 host->adma_desc, host->adma_addr);
4829                 dma_free_coherent(mmc_dev(host->mmc), 128 * 8,
4830                                 host->align_buffer, host->align_addr);
4831         } else {
4832                 kfree(host->adma_desc);
4833                 kfree(host->align_buffer);
4834         }
4835
4836         host->adma_desc = NULL;
4837         host->align_buffer = NULL;
4838
4839         sdhci_runtime_pm_put(host);
4840         sysedp_free_consumer(host->sysedpc);
4841         host->sysedpc = NULL;
4842 }
4843
4844 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4845
4846 void sdhci_free_host(struct sdhci_host *host)
4847 {
4848         mmc_free_host(host->mmc);
4849 }
4850
4851 EXPORT_SYMBOL_GPL(sdhci_free_host);
4852
4853 /*****************************************************************************\
4854  *                                                                           *
4855  * Driver init/exit                                                          *
4856  *                                                                           *
4857 \*****************************************************************************/
4858
4859 static int __init sdhci_drv_init(void)
4860 {
4861         pr_info(DRIVER_NAME
4862                 ": Secure Digital Host Controller Interface driver\n");
4863         pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4864
4865         return 0;
4866 }
4867
4868 static void __exit sdhci_drv_exit(void)
4869 {
4870 }
4871
4872 module_init(sdhci_drv_init);
4873 module_exit(sdhci_drv_exit);
4874
4875 module_param(debug_quirks, uint, 0444);
4876 module_param(debug_quirks2, uint, 0444);
4877
4878 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4879 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4880 MODULE_LICENSE("GPL");
4881
4882 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4883 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");