2 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
24 #include <linux/of_device.h>
25 #include <linux/of_gpio.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/module.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/delay.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/tegra_pm_domains.h>
36 #include <linux/pinctrl/pinctrl.h>
37 #include <linux/pinctrl/consumer.h>
38 #include <linux/pinctrl/pinconf-tegra.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/uaccess.h>
41 #include <linux/ktime.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/reboot.h>
49 #include <linux/devfreq.h>
50 #include <linux/clk/tegra.h>
51 #include <linux/tegra-soc.h>
52 #include <linux/tegra-fuse.h>
53 #include <linux/tegra-pmc.h>
54 #include <linux/padctrl/padctrl.h>
56 #include <linux/platform_data/mmc-sdhci-tegra.h>
57 #include <linux/platform/tegra/common.h>
59 #include "sdhci-pltfm.h"
62 #define SDHCI_TEGRA_DBG(stuff...) pr_info(stuff)
64 #define SDHCI_TEGRA_DBG(stuff...) do {} while (0)
67 #define SDHCI_VNDR_CLK_CTRL 0x100
68 #define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
69 #define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
70 #define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
71 #define SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK 0x2
72 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
73 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
74 #define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
75 #define SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK 0x2
76 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK 0xFF
77 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK 0x1F
79 #define SDHCI_VNDR_SYS_SW_CTRL 0x104
80 #define SDHCI_VNDR_SYS_SW_CTRL_WR_CRC_USE_TMCLK 0x40000000
81 #define SDHCI_VNDR_SYS_SW_CTRL_STROBE_SHIFT 31
83 #define SDHCI_VNDR_CAP_OVERRIDES_0 0x10c
84 #define SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_SHIFT 8
85 #define SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_MASK 0x3F
87 #define SDHCI_VNDR_MISC_CTRL 0x120
88 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
89 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
90 #define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
91 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
92 #define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
93 #define SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK 0x180
94 #define SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT 17
96 #define SDHCI_VNDR_DLLCAL_CFG 0x1b0
97 #define SDHCI_VNDR_DLLCAL_CFG_EN_CALIBRATE 0x80000000
99 #define SDHCI_VNDR_DLL_CTRL0_0 0x1b4
100 #define SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_SHIFT 7
101 #define SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_MASK 0x7F
102 #define SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_OFFSET 0x7C
105 #define SDHCI_VNDR_DLLCAL_CFG_STATUS 0x1bc
106 #define SDHCI_VNDR_DLLCAL_CFG_STATUS_DLL_ACTIVE 0x80000000
108 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
109 /*MUL_M is defined in [12:6] bits*/
110 #define SDHCI_VNDR_TUN_CTRL0_0_MUL_M 0x1FC0
111 /* To Set value of [12:6] as 1 */
112 #define SDHCI_VNDR_TUN_CTRL0_0_MUL_M_VAL 0x40
113 #define SDHCI_VNDR_TUN_CTRL1_0 0x1c4
114 #define SDHCI_VNDR_TUN_STATUS0_0 0x1c8
115 /* Enable Re-tuning request only when CRC error is detected
116 * in SDR50/SDR104/HS200 modes
118 #define SDHCI_VNDR_TUN_CTRL_RETUNE_REQ_EN 0x8000000
119 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
120 #define TUNING_WORD_SEL_MASK 0x7
121 /*value 4 in 13 to 15 bits indicates 256 iterations*/
122 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_MASK 0x7
123 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_SHIFT 13
124 /* Value 1 in NUM_TUNING_ITERATIONS indicates 64 iterations */
125 #define HW_TUNING_64_TRIES 1
126 /* Value 2 in NUM_TUNING_ITERATIONS indicates 128 iterations */
127 #define HW_TUNING_128_TRIES 2
128 /* Value 4 in NUM_TUNING_ITERATIONS indicates 256 iterations */
129 #define HW_TUNING_256_TRIES 4
131 #define SDHCI_VNDR_TUN_CTRL1_TUN_STEP_SIZE 0x77
134 #define SDHCI_VNDR_PRESET_VAL0_0 0x1d4
135 #define SDCLK_FREQ_SEL_HS_SHIFT 20
136 #define SDCLK_FREQ_SEL_DEFAULT_SHIFT 10
138 #define SDHCI_VNDR_PRESET_VAL1_0 0x1d8
139 #define SDCLK_FREQ_SEL_SDR50_SHIFT 20
140 #define SDCLK_FREQ_SEL_SDR25_SHIFT 10
142 #define SDHCI_VNDR_PRESET_VAL2_0 0x1dc
143 #define SDCLK_FREQ_SEL_DDR50_SHIFT 10
145 #define SDMMC_SDMEMCOMPPADCTRL 0x1E0
146 #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
147 #define SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK 0x80000000
149 #define SDMMC_AUTO_CAL_CONFIG 0x1E4
150 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
151 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
152 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_SLW_OVERRIDE 0x10000000
153 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
154 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_STEP_OFFSET_SHIFT 0x10
156 #define SDMMC_AUTO_CAL_STATUS 0x1EC
157 #define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
158 #define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
159 #define PULLUP_ADJUSTMENT_OFFSET 20
161 #define SDMMC_VENDOR_ERR_INTR_STATUS_0 0x108
163 #define SDMMC_IO_SPARE_0 0x1F0
164 #define SPARE_OUT_3_OFFSET 19
166 #define SDMMC_VNDR_IO_TRIM_CNTRL_0 0x1AC
167 #define SDMMC_VNDR_IO_TRIM_CNTRL_0_SEL_VREG 0x4
169 /* Erratum: Version register is invalid in HW */
170 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
171 /* Erratum: Enable block gap interrupt detection */
172 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
173 /* Do not enable auto calibration if the platform doesn't support */
174 #define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
175 /* Set Calibration Offsets */
176 #define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
177 /* Set Drive Strengths */
178 #define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
179 /* Enable PADPIPE CLKEN */
180 #define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
181 /* DISABLE SPI_MODE CLKEN */
182 #define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
184 #define NVQUIRK_SET_TAP_DELAY BIT(7)
186 #define NVQUIRK_SET_TRIM_DELAY BIT(8)
187 /* Enable SDHOST v3.0 support */
188 #define NVQUIRK_ENABLE_SD_3_0 BIT(9)
189 /* Enable SDR50 mode */
190 #define NVQUIRK_ENABLE_SDR50 BIT(10)
191 /* Enable SDR104 mode */
192 #define NVQUIRK_ENABLE_SDR104 BIT(11)
193 /*Enable DDR50 mode */
194 #define NVQUIRK_ENABLE_DDR50 BIT(12)
195 /* Enable Frequency Tuning for SDR50 mode */
196 #define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
197 /* Enable HS200 mode */
198 #define NVQUIRK_ENABLE_HS200 BIT(14)
199 /* Enable Infinite Erase Timeout*/
200 #define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(15)
201 /* ENAABLE FEEDBACK IO CLOCK */
202 #define NVQUIRK_EN_FEEDBACK_CLK BIT(17)
203 /* Disable AUTO CMD23 */
204 #define NVQUIRK_DISABLE_AUTO_CMD23 BIT(18)
205 /* Shadow write xfer mode reg and write it alongwith CMD register */
206 #define NVQUIRK_SHADOW_XFER_MODE_REG BIT(19)
207 /* update PAD_E_INPUT_OR_E_PWRD bit */
208 #define NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD BIT(20)
209 /* Shadow write xfer mode reg and write it alongwith CMD register */
210 #define NVQUIRK_SET_PIPE_STAGES_MASK_0 BIT(21)
211 #define NVQUIRK_HIGH_FREQ_TAP_PROCEDURE BIT(22)
212 /* Disable external loopback for all sdmmc devices*/
213 #define NVQUIRK_DISABLE_EXTERNAL_LOOPBACK BIT(23)
214 /* Select fix tap hole margins */
215 #define NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS BIT(24)
216 /* Enable HS400 mode */
217 #define NVQUIRK_ENABLE_HS400 BIT(26)
218 /* Enable AUTO CMD23 */
219 #define NVQUIRK_ENABLE_AUTO_CMD23 BIT(27)
220 #define NVQUIRK_SET_SDMEMCOMP_VREF_SEL BIT(28)
221 /* Special PAD control register settings are needed for T210 */
222 #define NVQUIRK_UPDATE_PAD_CNTRL_REG BIT(29)
223 #define NVQUIRK_UPDATE_PIN_CNTRL_REG BIT(30)
224 /* Use timeout clk for write crc status data timeout counter */
225 #define NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT BIT(31)
227 /* Enable T210 specific SDMMC WAR - sd card voltage switch */
228 #define NVQUIRK2_CONFIG_PWR_DET BIT(0)
229 /* Enable T210 specific SDMMC WAR - Tuning Step Size, Tuning Iterations*/
230 #define NVQUIRK2_UPDATE_HW_TUNING_CONFG BIT(1)
231 /*controller does not support cards if 1.8 V is not supported by cards*/
232 #define NVQUIRK2_BROKEN_SD2_0_SUPPORT BIT(2)
233 #define NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH BIT(3)
234 /* Select SDR50 UHS mode for host if the device runs at SDR50 mode on T210 */
235 #define NVQUIRK2_SELECT_SDR50_MODE BIT(4)
236 #define NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION BIT(5)
237 #define NVQUIRK2_SET_PAD_E_INPUT_VOL BIT(6)
239 /* Common subset of quirks for Tegra3 and later sdmmc controllers */
240 #define TEGRA_SDHCI_NVQUIRKS (NVQUIRK_ENABLE_PADPIPE_CLKEN | \
241 NVQUIRK_DISABLE_SPI_MODE_CLKEN | \
242 NVQUIRK_EN_FEEDBACK_CLK | \
243 NVQUIRK_SET_TAP_DELAY | \
244 NVQUIRK_ENABLE_SDR50_TUNING | \
245 NVQUIRK_ENABLE_SDR50 | \
246 NVQUIRK_ENABLE_SDR104 | \
247 NVQUIRK_SHADOW_XFER_MODE_REG | \
248 NVQUIRK_DISABLE_AUTO_CMD23)
250 #define TEGRA_SDHCI_QUIRKS (SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | \
251 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
252 SDHCI_QUIRK_SINGLE_POWER_WRITE | \
253 SDHCI_QUIRK_NO_HISPD_BIT | \
254 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | \
255 SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
256 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC)
258 #define TEGRA_SDHCI_QUIRKS2 (SDHCI_QUIRK2_PRESET_VALUE_BROKEN | \
259 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING | \
260 SDHCI_QUIRK2_NON_STANDARD_TUNING | \
261 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO | \
262 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
264 #define IS_QUIRKS2_DELAYED_CLK_GATE(host) \
265 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
267 /* Interface voltages */
268 #define SDHOST_1V8_OCR_MASK 0x8
269 #define SDHOST_HIGH_VOLT_MIN 2700000
270 #define SDHOST_HIGH_VOLT_MAX 3600000
271 #define SDHOST_HIGH_VOLT_2V8 2800000
272 #define SDHOST_LOW_VOLT_MIN 1800000
273 #define SDHOST_LOW_VOLT_MAX 1800000
274 #define SDHOST_HIGH_VOLT_3V2 3200000
275 #define SDHOST_HIGH_VOLT_3V3 3300000
276 #define SDHOST_MAX_VOLT_SUPPORT 3000000
278 /* Clock related definitions */
279 #define MAX_DIVISOR_VALUE 128
280 #define DEFAULT_SDHOST_FREQ 50000000
281 #define SDMMC_AHB_MAX_FREQ 115000000
282 #define SDMMC_EMC_MAX_FREQ 150000000
283 #define SDMMC_EMC_NOM_VOLT_FREQ 900000000
285 /* Tuning related definitions */
286 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
287 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
288 #define MAX_TAP_VALUES 255
289 #define TUNING_FREQ_COUNT 3
290 #define TUNING_VOLTAGES_COUNT 3
291 #define TUNING_RETRIES 1
292 #define DFS_FREQ_COUNT 2
293 #define NEG_MAR_CHK_WIN_COUNT 2
294 #define PRECISION_FOR_ESTIMATE 100000
295 /* Tuning core voltage requirements */
296 #define NOMINAL_VCORE_TUN BIT(0)
297 #define BOOT_VCORE_TUN BIT(1)
298 #define MIN_OVERRIDE_VCORE_TUN BIT(2)
300 /* Tap cmd sysfs commands */
301 #define TAP_CMD_TRIM_DEFAULT_VOLTAGE 1
302 #define TAP_CMD_TRIM_HIGH_VOLTAGE 2
304 /* Max number of clock parents for sdhci is fixed to 2 */
305 #define TEGRA_SDHCI_MAX_PLL_SOURCE 2
307 * Defined the chip specific quirks and clock sources. For now, the used clock
308 * sources vary only from chip to chip. If the sources allowed varies from
309 * platform to platform, then move the clock sources list to platform data.
310 * When filling the tuning_freq_list in soc_data, the number of entries should
311 * be equal to TUNNG_FREQ_COUNT. Depending on number DFS frequencies supported,
312 * set the desired low, high or max frequencies and set the remaining entries
313 * as 0s. The number of entries should always be equal to TUNING_FREQ_COUNT
314 * inorder to get the right tuning data.
317 struct sdhci_tegra_soc_data {
318 const struct sdhci_pltfm_data *pdata;
321 const char *parent_clk_list[TEGRA_SDHCI_MAX_PLL_SOURCE];
322 unsigned int tuning_freq_list[TUNING_FREQ_COUNT];
324 u8 tap_hole_coeffs_count;
325 u8 tap_hole_margins_count;
326 struct tuning_t2t_coeffs *t2t_coeffs;
327 struct tap_hole_coeffs *tap_hole_coeffs;
328 struct tuning_tap_hole_margins *tap_hole_margins;
332 enum tegra_regulator_config_ops {
338 enum tegra_tuning_freq {
344 struct tuning_t2t_coeffs {
348 unsigned int t2t_vnom_slope;
349 unsigned int t2t_vnom_int;
350 unsigned int t2t_vmax_slope;
351 unsigned int t2t_vmax_int;
352 unsigned int t2t_vmin_slope;
353 unsigned int t2t_vmin_int;
356 #define SET_TUNING_COEFFS(_device_id, _vmax, _vmin, _t2t_vnom_slope, \
357 _t2t_vnom_int, _t2t_vmax_slope, _t2t_vmax_int, _t2t_vmin_slope, \
360 .dev_id = _device_id, \
363 .t2t_vnom_slope = _t2t_vnom_slope, \
364 .t2t_vnom_int = _t2t_vnom_int, \
365 .t2t_vmax_slope = _t2t_vmax_slope, \
366 .t2t_vmax_int = _t2t_vmax_int, \
367 .t2t_vmin_slope = _t2t_vmin_slope, \
368 .t2t_vmin_int = _t2t_vmin_int, \
371 static struct tuning_t2t_coeffs t11x_tuning_coeffs[] = {
372 SET_TUNING_COEFFS("sdhci-tegra.3", 1250, 950, 55, 135434,
373 73, 170493, 243, 455948),
374 SET_TUNING_COEFFS("sdhci-tegra.2", 1250, 950, 50, 129738,
375 73, 168898, 241, 453050),
376 SET_TUNING_COEFFS("sdhci-tegra.0", 1250, 950, 62, 143469,
377 82, 180096, 238, 444285),
380 static struct tuning_t2t_coeffs t12x_automotive_tuning_coeffs[] = {
381 SET_TUNING_COEFFS("sdhci-tegra.3", 1040, 950, 29, 130687,
382 29, 130687, 29, 130687),
383 SET_TUNING_COEFFS("sdhci-tegra.2", 1040, 950, 36, 148855,
384 36, 148855, 36, 148855),
385 SET_TUNING_COEFFS("sdhci-tegra.0", 1040, 950, 37, 149783,
386 37, 149783, 37, 149783),
389 static struct tuning_t2t_coeffs t12x_tuning_coeffs[] = {
390 SET_TUNING_COEFFS("sdhci-tegra.3", 1150, 950, 27, 118295,
391 27, 118295, 48, 188148),
392 SET_TUNING_COEFFS("sdhci-tegra.2", 1150, 950, 29, 124427,
393 29, 124427, 54, 203707),
394 SET_TUNING_COEFFS("sdhci-tegra.0", 1150, 950, 25, 115933,
395 25, 115933, 47, 187224),
398 struct tap_hole_coeffs {
400 unsigned int freq_khz;
401 unsigned int thole_vnom_slope;
402 unsigned int thole_vnom_int;
403 unsigned int thole_vmax_slope;
404 unsigned int thole_vmax_int;
405 unsigned int thole_vmin_slope;
406 unsigned int thole_vmin_int;
409 #define SET_TAP_HOLE_COEFFS(_device_id, _freq_khz, _thole_vnom_slope, \
410 _thole_vnom_int, _thole_vmax_slope, _thole_vmax_int, \
411 _thole_vmin_slope, _thole_vmin_int) \
413 .dev_id = _device_id, \
414 .freq_khz = _freq_khz, \
415 .thole_vnom_slope = _thole_vnom_slope, \
416 .thole_vnom_int = _thole_vnom_int, \
417 .thole_vmax_slope = _thole_vmax_slope, \
418 .thole_vmax_int = _thole_vmax_int, \
419 .thole_vmin_slope = _thole_vmin_slope, \
420 .thole_vmin_int = _thole_vmin_int, \
423 static struct tap_hole_coeffs t11x_tap_hole_coeffs[] = {
424 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 765, 102357, 507,
426 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 156000, 1042, 142044, 776,
428 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1215, 167702, 905,
430 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 1925, 284516, 1528,
431 253188, 366, 120001),
432 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 472, 53312, 318,
434 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 156000, 765, 95512, 526,
436 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 949, 121887, 656,
438 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 1901, 259035, 1334,
439 215539, 326, 100986),
440 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 411, 54495, 305,
442 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 156000, 715, 97623, 516,
444 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 905, 124579, 648,
446 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 1893, 264746, 1333,
447 221722, 354, 109880),
450 static struct tap_hole_coeffs t12x_automotive_tap_hole_coeffs[] = {
451 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 198000, 926, 107053, 926,
452 107053, 926, 107053),
453 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 189000, 985, 114635, 985,
454 114635, 985, 114635),
455 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 188000, 991, 115523, 991,
456 115523, 991, 115523),
457 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 296, 27274, 296,
459 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 94000, 1520, 196114, 1520,
460 196114, 1520, 196114),
461 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 578, 67417, 578,
463 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 94000, 1785, 219359, 1785,
464 219359, 1785, 219359),
467 static struct tap_hole_coeffs t12x_tap_hole_coeffs[] = {
468 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 1037, 106934, 1037,
470 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 198000, 1037, 106934, 1037,
472 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1703, 186307, 1703,
473 186307, 890, 130617),
474 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 100000, 2452, 275601, 2452,
475 275601, 1264, 193957),
476 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 3090, 351666, 3090,
477 351666, 1583, 247913),
478 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 468, 36031, 468,
480 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 200000, 468, 36031, 468,
482 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 1146, 117841, 1146,
484 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 100000, 1879, 206195, 1879,
485 206195, 953, 141341),
486 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 2504, 281460, 2504,
487 281460, 1262, 194452),
488 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 874, 85243, 874,
490 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 1554, 167210, 1554,
491 167210, 793, 115672),
492 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 100000, 2290, 255734, 2290,
493 255734, 1164, 178691),
494 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 2916, 331143, 2916,
495 331143, 1480, 232373),
498 struct tuning_tap_hole_margins {
500 unsigned int tap_hole_margin;
503 #define SET_TUNING_TAP_HOLE_MARGIN(_device_id, _tap_hole_margin) \
505 .dev_id = _device_id, \
506 .tap_hole_margin = _tap_hole_margin, \
509 static struct tuning_tap_hole_margins t12x_automotive_tap_hole_margins[] = {
510 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.3", 13),
511 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.2", 7),
512 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.0", 10),
515 struct freq_tuning_constraints {
516 unsigned int vcore_mask;
519 static struct freq_tuning_constraints tuning_vcore_constraints[3] = {
521 .vcore_mask = BOOT_VCORE_TUN,
524 .vcore_mask = BOOT_VCORE_TUN,
527 .vcore_mask = BOOT_VCORE_TUN,
536 enum tap_win_edge_attr {
542 struct tap_window_data {
545 enum tap_win_edge_attr win_start_attr;
546 enum tap_win_edge_attr win_end_attr;
551 struct tuning_values {
559 struct tegra_tuning_data {
560 unsigned int freq_hz;
562 int nom_best_tap_value;
563 struct freq_tuning_constraints constraints;
564 struct tap_hole_coeffs *thole_coeffs;
565 struct tuning_t2t_coeffs *t2t_coeffs;
566 struct tuning_values est_values;
567 struct tuning_values calc_values;
568 struct tap_window_data *tap_data;
569 struct tap_window_data *final_tap_data;
570 u8 num_of_valid_tap_wins;
574 bool is_partial_win_valid;
577 #ifdef CONFIG_MMC_FREQ_SCALING
578 struct freq_gov_params {
580 u8 polling_interval_ms;
581 u8 active_load_threshold;
584 static struct freq_gov_params gov_params[3] = {
586 .idle_mon_cycles = 3,
587 .polling_interval_ms = 50,
588 .active_load_threshold = 25,
591 .idle_mon_cycles = 3,
592 .polling_interval_ms = 50,
593 .active_load_threshold = 25,
596 .idle_mon_cycles = 3,
597 .polling_interval_ms = 50,
598 .active_load_threshold = 25,
603 struct tegra_freq_gov_data {
604 unsigned int curr_active_load;
605 unsigned int avg_active_load;
606 unsigned int act_load_high_threshold;
607 unsigned int max_idle_monitor_cycles;
608 unsigned int curr_freq;
609 unsigned int freqs[DFS_FREQ_COUNT];
610 unsigned int freq_switch_count;
611 bool monitor_idle_load;
614 struct sdhci_tegra_sd_stats {
615 unsigned int data_crc_count;
616 unsigned int cmd_crc_count;
617 unsigned int data_to_count;
618 unsigned int cmd_to_count;
621 struct sdhci_tegra_pll_parent {
623 unsigned long pll_rate;
626 #ifdef CONFIG_DEBUG_FS
627 struct dbg_cfg_data {
628 unsigned int tap_val;
629 unsigned int trim_val;
634 const struct tegra_sdhci_platform_data *plat;
635 const struct sdhci_tegra_soc_data *soc_data;
638 /* ensure atomic set clock calls */
639 struct mutex set_clock_mutex;
640 struct regulator *vdd_io_reg;
641 struct regulator *vdd_slot_reg;
642 struct regulator *vcore_reg;
644 unsigned int vddio_min_uv;
646 unsigned int vddio_max_uv;
647 /* DDR and low speed modes clock */
649 /* HS200, SDR104 modes clock */
651 /* Check if ddr_clk is being used */
653 /* max clk supported by the platform */
654 unsigned int max_clk_limit;
655 /* max ddr clk supported by the platform */
656 unsigned int ddr_clk_limit;
658 bool is_rail_enabled;
660 bool is_sdmmc_emc_clk_on;
662 bool is_sdmmc_sclk_on;
663 struct sdhci_tegra_sd_stats *sd_stat_head;
664 struct notifier_block reboot_notify;
665 struct sdhci_tegra_pll_parent pll_source[TEGRA_SDHCI_MAX_PLL_SOURCE];
666 bool is_parent_pll_source_1;
667 bool set_1v8_calib_offsets;
668 int nominal_vcore_mv;
669 int min_vcore_override_mv;
671 /* Tuning related structures and variables */
672 /* Tuning opcode to be used */
673 unsigned int tuning_opcode;
674 /* Tuning packet size */
675 unsigned int tuning_bsize;
676 /* Num of tuning freqs selected */
677 int tuning_freq_count;
678 unsigned int tap_cmd;
680 unsigned int tuning_status;
682 #define TUNING_STATUS_DONE 1
683 #define TUNING_STATUS_RETUNE 2
684 /* Freq tuning information for each sampling clock freq */
685 struct tegra_tuning_data tuning_data[DFS_FREQ_COUNT];
686 struct tegra_freq_gov_data *gov_data;
688 #ifdef CONFIG_DEBUG_FS
689 /* Override debug config data */
690 struct dbg_cfg_data dbg_cfg;
692 struct pinctrl_dev *pinctrl;
693 struct pinctrl *pinctrl_sdmmc;
694 struct pinctrl_state *schmitt_enable[2];
695 struct pinctrl_state *schmitt_disable[2];
696 struct pinctrl_state *drv_code_strength;
697 struct pinctrl_state *default_drv_code_strength;
698 struct pinctrl_state *sdmmc_pad_ctrl[MMC_TIMINGS_MAX_MODES];
700 unsigned int tuned_tap_delay;
701 struct padctrl *sdmmc_padctrl;
705 static unsigned int boot_volt_req_refcount;
706 static DEFINE_MUTEX(tuning_mutex);
708 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
709 struct sdhci_host *sdhci, unsigned int clock);
710 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
711 unsigned long desired_rate);
712 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
713 unsigned int tap_delay);
714 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
715 u8 option, int min_uV, int max_uV);
716 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
717 unsigned int trim_delay);
718 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
719 unsigned char signal_voltage);
720 static void tegra_sdhci_post_init(struct sdhci_host *sdhci);
721 static void tegra_sdhci_en_strobe(struct sdhci_host *sdhci);
722 static void tegra_sdhci_update_sdmmc_pinctrl_register(struct sdhci_host *sdhci,
724 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
725 int t2t_tuning_value);
726 static void tegra_sdhci_config_tap(struct sdhci_host *sdhci, u8 option);
727 static void vendor_trim_clear_sel_vreg(struct sdhci_host *host, bool enable);
728 static void sdhci_tegra_select_drive_strength(struct sdhci_host *host,
730 static void tegra_sdhci_get_clock_freq_for_mode(struct sdhci_host *sdhci,
731 unsigned int *clock);
732 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask);
734 static void tegra_sdhci_dumpregs(struct sdhci_host *sdhci)
741 /* print tuning windows */
742 if (!(sdhci->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING)) {
743 for (i = 0; i <= TUNING_WORD_SEL_MASK; i++) {
744 reg = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
745 reg &= ~TUNING_WORD_SEL_MASK;
747 sdhci_writel(sdhci, reg, SDHCI_VNDR_TUN_CTRL0_0);
748 val = sdhci_readl(sdhci, SDHCI_VNDR_TUN_STATUS0_0);
749 pr_info("%s: tuning_window[%d]: %#x\n",
750 mmc_hostname(sdhci->mmc), i, val);
753 tap_delay = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
754 trim_delay = tap_delay;
755 tap_delay >>= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT;
756 tap_delay &= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK;
757 trim_delay >>= SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT;
758 trim_delay &= SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK;
759 pr_info("sdhci: Tap value: %u | Trim value: %u\n", tap_delay,
761 pr_info("sdhci: SDMMC Interrupt status: 0x%08x\n", sdhci_readl(sdhci,
762 SDMMC_VENDOR_ERR_INTR_STATUS_0));
765 static bool tegra_sdhci_is_tuning_done(struct sdhci_host *sdhci)
767 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
768 struct sdhci_tegra *tegra_host = pltfm_host->priv;
770 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
771 dev_info(mmc_dev(sdhci->mmc),
772 "Tuning already done, restoring the best tap value : %u\n",
773 tegra_host->tuned_tap_delay);
774 sdhci_tegra_set_tap_delay(sdhci, tegra_host->tuned_tap_delay);
780 static int sdhci_tegra_get_max_tuning_loop_counter(struct sdhci_host *sdhci)
782 u16 hw_tuning_iterations;
785 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50)
786 hw_tuning_iterations = HW_TUNING_256_TRIES;
787 else if (sdhci->mmc->caps2 & MMC_CAP2_HS533)
788 hw_tuning_iterations = HW_TUNING_64_TRIES;
790 hw_tuning_iterations = HW_TUNING_128_TRIES;
792 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
793 vendor_ctrl &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_MASK <<
794 SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_SHIFT);
795 vendor_ctrl |= (hw_tuning_iterations <<
796 SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_SHIFT);
797 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
802 static int show_error_stats_dump(struct seq_file *s, void *data)
804 struct sdhci_host *host = s->private;
805 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
806 struct sdhci_tegra *tegra_host = pltfm_host->priv;
807 struct sdhci_tegra_sd_stats *head;
809 seq_printf(s, "ErrorStatistics:\n");
810 seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
811 head = tegra_host->sd_stat_head;
813 seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
814 head->cmd_crc_count, head->data_to_count,
819 static int show_dfs_stats_dump(struct seq_file *s, void *data)
821 struct sdhci_host *host = s->private;
822 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
823 struct sdhci_tegra *tegra_host = pltfm_host->priv;
824 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
826 seq_printf(s, "DFS statistics:\n");
828 if (host->mmc->dev_stats != NULL)
829 seq_printf(s, "Polling_period: %d\n",
830 host->mmc->dev_stats->polling_interval);
832 if (gov_data != NULL) {
833 seq_printf(s, "cur_active_load: %d\n",
834 gov_data->curr_active_load);
835 seq_printf(s, "avg_active_load: %d\n",
836 gov_data->avg_active_load);
837 seq_printf(s, "act_load_high_threshold: %d\n",
838 gov_data->act_load_high_threshold);
839 seq_printf(s, "freq_switch_count: %d\n",
840 gov_data->freq_switch_count);
845 static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
847 return single_open(file, show_error_stats_dump, inode->i_private);
850 static int sdhci_dfs_stats_dump(struct inode *inode, struct file *file)
852 return single_open(file, show_dfs_stats_dump, inode->i_private);
856 static const struct file_operations sdhci_host_fops = {
857 .open = sdhci_error_stats_dump,
860 .release = single_release,
863 static const struct file_operations sdhci_host_dfs_fops = {
864 .open = sdhci_dfs_stats_dump,
867 .release = single_release,
870 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
874 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
875 /* Use wp_gpio here instead? */
876 val = readl(host->ioaddr + reg);
877 return val | SDHCI_WRITE_PROTECT;
879 return readl(host->ioaddr + reg);
882 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
884 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
885 struct sdhci_tegra *tegra_host = pltfm_host->priv;
886 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
888 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
889 (reg == SDHCI_HOST_VERSION))) {
890 return SDHCI_SPEC_200;
892 return readw(host->ioaddr + reg);
895 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
897 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
898 struct sdhci_tegra *tegra_host = pltfm_host->priv;
899 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
901 /* Seems like we're getting spurious timeout and crc errors, so
902 * disable signalling of them. In case of real errors software
903 * timers should take care of eventually detecting them.
905 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
906 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
908 writel(val, host->ioaddr + reg);
910 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
911 (reg == SDHCI_INT_ENABLE))) {
912 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
913 if (val & SDHCI_INT_CARD_INT)
917 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
921 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
923 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
924 struct sdhci_tegra *tegra_host = pltfm_host->priv;
925 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
927 if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
929 case SDHCI_TRANSFER_MODE:
931 * Postpone this write, we must do it together with a
932 * command write that is down below.
934 pltfm_host->xfer_mode_shadow = val;
937 writel((val << 16) | pltfm_host->xfer_mode_shadow,
938 host->ioaddr + SDHCI_TRANSFER_MODE);
939 pltfm_host->xfer_mode_shadow = 0;
944 writew(val, host->ioaddr + reg);
947 #ifdef CONFIG_MMC_FREQ_SCALING
949 static bool disable_scaling __read_mostly;
950 module_param(disable_scaling, bool, 0644);
953 * Dynamic frequency calculation.
954 * The active load for the current period and the average active load
955 * are calculated at the end of each polling interval.
957 * If the current active load is greater than the threshold load, then the
958 * frequency is boosted(156MHz).
959 * If the active load is lower than the threshold, then the load is monitored
960 * for a max of three cycles before reducing the frequency(82MHz). If the
961 * average active load is lower, then the monitoring cycles is reduced.
963 * The active load threshold value for both eMMC and SDIO is set to 25 which
964 * is found to give the optimal power and performance. The polling interval is
967 * The polling interval and active load threshold values can be changed by
968 * the user through sysfs.
970 static unsigned long calculate_mmc_target_freq(
971 struct tegra_freq_gov_data *gov_data)
973 unsigned long desired_freq = gov_data->curr_freq;
974 unsigned int type = MMC_TYPE_MMC;
976 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
977 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
978 gov_data->monitor_idle_load = false;
979 gov_data->max_idle_monitor_cycles =
980 gov_params[type].idle_mon_cycles;
982 if (gov_data->monitor_idle_load) {
983 if (!gov_data->max_idle_monitor_cycles) {
984 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
985 gov_data->max_idle_monitor_cycles =
986 gov_params[type].idle_mon_cycles;
988 gov_data->max_idle_monitor_cycles--;
991 gov_data->monitor_idle_load = true;
992 gov_data->max_idle_monitor_cycles *=
993 gov_data->avg_active_load;
994 gov_data->max_idle_monitor_cycles /= 100;
1001 static unsigned long calculate_sdio_target_freq(
1002 struct tegra_freq_gov_data *gov_data)
1004 unsigned long desired_freq = gov_data->curr_freq;
1005 unsigned int type = MMC_TYPE_SDIO;
1007 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
1008 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
1009 gov_data->monitor_idle_load = false;
1010 gov_data->max_idle_monitor_cycles =
1011 gov_params[type].idle_mon_cycles;
1013 if (gov_data->monitor_idle_load) {
1014 if (!gov_data->max_idle_monitor_cycles) {
1015 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
1016 gov_data->max_idle_monitor_cycles =
1017 gov_params[type].idle_mon_cycles;
1019 gov_data->max_idle_monitor_cycles--;
1022 gov_data->monitor_idle_load = true;
1023 gov_data->max_idle_monitor_cycles *=
1024 gov_data->avg_active_load;
1025 gov_data->max_idle_monitor_cycles /= 100;
1029 return desired_freq;
1032 static unsigned long calculate_sd_target_freq(
1033 struct tegra_freq_gov_data *gov_data)
1035 unsigned long desired_freq = gov_data->curr_freq;
1036 unsigned int type = MMC_TYPE_SD;
1038 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
1039 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
1040 gov_data->monitor_idle_load = false;
1041 gov_data->max_idle_monitor_cycles =
1042 gov_params[type].idle_mon_cycles;
1044 if (gov_data->monitor_idle_load) {
1045 if (!gov_data->max_idle_monitor_cycles) {
1046 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
1047 gov_data->max_idle_monitor_cycles =
1048 gov_params[type].idle_mon_cycles;
1050 gov_data->max_idle_monitor_cycles--;
1053 gov_data->monitor_idle_load = true;
1054 gov_data->max_idle_monitor_cycles *=
1055 gov_data->avg_active_load;
1056 gov_data->max_idle_monitor_cycles /= 100;
1060 return desired_freq;
1063 static unsigned long sdhci_tegra_get_target_freq(struct sdhci_host *sdhci,
1064 struct devfreq_dev_status *dfs_stats)
1066 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1067 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1068 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
1069 unsigned long freq = sdhci->mmc->actual_clock;
1072 dev_err(mmc_dev(sdhci->mmc),
1073 "No gov data. Continue using current freq %ld", freq);
1077 if (disable_scaling)
1081 * If clock gating is enabled and clock is currently disabled, then
1084 if (!tegra_host->clk_enabled)
1087 if (dfs_stats->total_time) {
1088 gov_data->curr_active_load = (dfs_stats->busy_time * 100) /
1089 dfs_stats->total_time;
1091 gov_data->curr_active_load = 0;
1094 gov_data->avg_active_load += gov_data->curr_active_load;
1095 gov_data->avg_active_load >>= 1;
1097 if (sdhci->mmc->card) {
1098 if (sdhci->mmc->card->type == MMC_TYPE_SDIO)
1099 freq = calculate_sdio_target_freq(gov_data);
1100 else if (sdhci->mmc->card->type == MMC_TYPE_MMC)
1101 freq = calculate_mmc_target_freq(gov_data);
1102 else if (sdhci->mmc->card->type == MMC_TYPE_SD)
1103 freq = calculate_sd_target_freq(gov_data);
1104 if (gov_data->curr_freq != freq)
1105 gov_data->freq_switch_count++;
1106 gov_data->curr_freq = freq;
1112 static int sdhci_tegra_freq_gov_init(struct sdhci_host *sdhci)
1114 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1115 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1120 if (!((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
1121 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS200))) {
1122 dev_info(mmc_dev(sdhci->mmc),
1123 "DFS not required for current operating mode\n");
1127 if (!tegra_host->gov_data) {
1128 tegra_host->gov_data = devm_kzalloc(mmc_dev(sdhci->mmc),
1129 sizeof(struct tegra_freq_gov_data), GFP_KERNEL);
1130 if (!tegra_host->gov_data) {
1131 dev_err(mmc_dev(sdhci->mmc),
1132 "Failed to allocate memory for dfs data\n");
1137 /* Find the supported frequencies */
1138 dev_info(mmc_dev(sdhci->mmc), "DFS supported freqs");
1139 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
1140 freq = tegra_host->tuning_data[i].freq_hz;
1142 * Check the nearest possible clock with pll_c and pll_p as
1143 * the clock sources. Choose the higher frequency.
1145 tegra_host->gov_data->freqs[i] = get_nearest_clock_freq(
1146 tegra_host->pll_source[0].pll_rate,
1148 freq = get_nearest_clock_freq(
1149 tegra_host->pll_source[1].pll_rate,
1151 if (freq > tegra_host->gov_data->freqs[i])
1152 tegra_host->gov_data->freqs[i] = freq;
1153 pr_err("%d,", tegra_host->gov_data->freqs[i]);
1156 tegra_host->gov_data->monitor_idle_load = false;
1157 tegra_host->gov_data->curr_freq = sdhci->mmc->actual_clock;
1158 if (sdhci->mmc->card) {
1159 type = sdhci->mmc->card->type;
1160 sdhci->mmc->dev_stats->polling_interval =
1161 gov_params[type].polling_interval_ms;
1162 tegra_host->gov_data->act_load_high_threshold =
1163 gov_params[type].active_load_threshold;
1164 tegra_host->gov_data->max_idle_monitor_cycles =
1165 gov_params[type].idle_mon_cycles;
1173 static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
1175 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1176 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1178 return tegra_host->card_present;
1181 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
1183 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1184 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1185 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1187 if (!gpio_is_valid(plat->wp_gpio))
1190 return gpio_get_value_cansleep(plat->wp_gpio);
1193 static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1197 u32 vndr_ctrl, trim_delay, best_tap_value;
1198 unsigned int dqs_trim_delay;
1199 struct tegra_tuning_data *tuning_data;
1200 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1201 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1202 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1203 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1205 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1207 /* Select Bus Speed Mode for host
1208 * For HS200 we need to set UHS_MODE_SEL to SDR104.
1209 * It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
1210 * SDR50 mode timing seems to have issues. Programming SDR104
1211 * mode for SDR50 mode for reliable transfers over interface.
1212 * For HS400 we need to set UHS_MODE_SEL to HS400.
1214 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1216 case MMC_TIMING_UHS_SDR12:
1217 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1219 case MMC_TIMING_UHS_SDR25:
1220 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1222 case MMC_TIMING_UHS_SDR50:
1223 if (soc_data->nvquirks2 & NVQUIRK2_SELECT_SDR50_MODE)
1224 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1226 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1228 case MMC_TIMING_UHS_SDR104:
1229 case MMC_TIMING_MMC_HS200:
1230 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1232 case MMC_TIMING_UHS_DDR50:
1233 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1235 case MMC_TIMING_MMC_HS400:
1236 ctrl_2 |= SDHCI_CTRL_UHS_HS400;
1240 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1241 sdhci_tegra_select_drive_strength(host, uhs);
1243 if (uhs == MMC_TIMING_MMC_HS400) {
1244 if (host->mmc->caps2 & MMC_CAP2_HS533)
1245 dqs_trim_delay = plat->dqs_trim_delay_hs533;
1247 dqs_trim_delay = plat->dqs_trim_delay;
1249 ctrl_2 = sdhci_readl(host, SDHCI_VNDR_CAP_OVERRIDES_0);
1250 ctrl_2 &= ~(SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_MASK <<
1251 SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_SHIFT);
1252 ctrl_2 |= ((dqs_trim_delay &
1253 SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_MASK) <<
1254 SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_SHIFT);
1255 sdhci_writel(host, ctrl_2, SDHCI_VNDR_CAP_OVERRIDES_0);
1258 if (uhs == MMC_TIMING_UHS_DDR50) {
1259 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1260 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
1261 clk |= 1 << SDHCI_DIVIDER_SHIFT;
1262 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1264 /* Set the ddr mode trim delay if required */
1265 if (plat->is_ddr_trim_delay) {
1266 trim_delay = plat->ddr_trim_delay;
1267 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1268 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1269 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1270 vndr_ctrl |= (trim_delay <<
1271 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1272 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1276 /* Set the best tap value based on timing */
1277 if (((uhs == MMC_TIMING_MMC_HS200) ||
1278 (uhs == MMC_TIMING_UHS_SDR104) ||
1279 (uhs == MMC_TIMING_MMC_HS400) ||
1280 (uhs == MMC_TIMING_UHS_SDR50)) &&
1281 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1282 if (host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) {
1283 tuning_data = sdhci_tegra_get_tuning_data(host,
1284 host->mmc->ios.clock);
1285 best_tap_value = (tegra_host->tap_cmd ==
1286 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1287 tuning_data->nom_best_tap_value :
1288 tuning_data->best_tap_value;
1290 best_tap_value = tegra_host->tuned_tap_delay;
1292 } else if ((uhs == MMC_TIMING_UHS_DDR50) && (plat->is_ddr_tap_delay)) {
1293 best_tap_value = plat->ddr_tap_delay;
1295 best_tap_value = tegra_host->plat->tap_delay;
1297 sdhci_tegra_set_tap_delay(host, best_tap_value);
1302 static void sdhci_status_notify_cb(int card_present, void *dev_id)
1304 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
1305 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1306 struct tegra_sdhci_platform_data *plat;
1307 unsigned int status, oldstat;
1309 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
1312 plat = pdev->dev.platform_data;
1313 if (!plat->mmc_data.status) {
1314 if (card_present == 1) {
1315 sdhci->mmc->rescan_disable = 0;
1316 mmc_detect_change(sdhci->mmc, 0);
1317 } else if (card_present == 0) {
1318 sdhci->mmc->detect_change = 0;
1319 sdhci->mmc->rescan_disable = 1;
1324 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
1326 oldstat = plat->mmc_data.card_present;
1327 plat->mmc_data.card_present = status;
1328 if (status ^ oldstat) {
1329 pr_debug("%s: Slot status change detected (%d -> %d)\n",
1330 mmc_hostname(sdhci->mmc), oldstat, status);
1331 if (status && !plat->mmc_data.built_in)
1332 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
1334 mmc_detect_change(sdhci->mmc, 0);
1338 static irqreturn_t carddetect_irq(int irq, void *data)
1340 struct sdhci_host *sdhost = (struct sdhci_host *)data;
1341 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
1342 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1343 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
1344 struct tegra_sdhci_platform_data *plat;
1347 plat = pdev->dev.platform_data;
1349 tegra_host->card_present =
1350 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
1352 if (!tegra_host->card_present) {
1353 err = tegra_sdhci_configure_regulators(tegra_host,
1354 CONFIG_REG_DIS, 0 , 0);
1356 dev_err(mmc_dev(sdhost->mmc),
1357 "Failed to disable card regulators %d\n", err);
1359 * Set retune request as tuning should be done next time
1360 * a card is inserted.
1362 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
1363 tegra_host->force_retune = true;
1366 tasklet_schedule(&sdhost->card_tasklet);
1370 static void vendor_trim_clear_sel_vreg(struct sdhci_host *host, bool enable)
1372 unsigned int misc_ctrl;
1374 misc_ctrl = sdhci_readl(host, SDMMC_VNDR_IO_TRIM_CNTRL_0);
1376 misc_ctrl &= ~(SDMMC_VNDR_IO_TRIM_CNTRL_0_SEL_VREG);
1377 sdhci_writel(host, misc_ctrl, SDMMC_VNDR_IO_TRIM_CNTRL_0);
1379 tegra_sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1381 misc_ctrl |= (SDMMC_VNDR_IO_TRIM_CNTRL_0_SEL_VREG);
1382 sdhci_writel(host, misc_ctrl, SDMMC_VNDR_IO_TRIM_CNTRL_0);
1387 static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
1391 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1392 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1393 struct tegra_tuning_data *tuning_data;
1394 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1395 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1396 unsigned int best_tap_value;
1398 if (!(mask & SDHCI_RESET_ALL))
1401 if (tegra_host->sd_stat_head != NULL) {
1402 tegra_host->sd_stat_head->data_crc_count = 0;
1403 tegra_host->sd_stat_head->cmd_crc_count = 0;
1404 tegra_host->sd_stat_head->data_to_count = 0;
1405 tegra_host->sd_stat_head->cmd_to_count = 0;
1408 if (tegra_host->gov_data != NULL)
1409 tegra_host->gov_data->freq_switch_count = 0;
1411 if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
1412 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1413 && (host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
1414 if (host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) {
1415 tuning_data = sdhci_tegra_get_tuning_data(host,
1416 host->mmc->ios.clock);
1417 best_tap_value = (tegra_host->tap_cmd ==
1418 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1419 tuning_data->nom_best_tap_value :
1420 tuning_data->best_tap_value;
1422 best_tap_value = tegra_host->tuned_tap_delay;
1425 best_tap_value = tegra_host->plat->tap_delay;
1427 sdhci_tegra_set_tap_delay(host, best_tap_value);
1430 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1431 if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
1433 SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
1435 if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
1437 ~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
1439 if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
1441 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1443 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK;
1445 /* For automotive enable feedback clock for non-tuning modes */
1446 if (plat->enb_feedback_clock) {
1447 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1448 && (host->mmc->pm_flags &
1449 MMC_PM_KEEP_POWER)) {
1451 SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1455 if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
1456 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1457 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1458 vendor_ctrl |= (plat->trim_delay <<
1459 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1461 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
1462 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
1463 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1465 misc_ctrl = sdhci_readl(host, SDHCI_VNDR_MISC_CTRL);
1466 if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
1467 misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
1468 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
1470 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
1472 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
1474 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
1476 /* Enable DDR mode support only for SDMMC4 */
1477 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
1478 if (!(plat->uhs_mask & MMC_UHS_MASK_DDR50)) {
1480 SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
1483 if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
1485 SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
1487 if (soc_data->nvquirks & NVQUIRK_SET_PIPE_STAGES_MASK_0)
1488 misc_ctrl &= ~SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK;
1490 if (plat->enb_ext_loopback) {
1491 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1492 && (host->mmc->pm_flags &
1493 MMC_PM_KEEP_POWER)) {
1495 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1498 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1501 /* Disable External loopback for all sdmmc instances */
1502 if (soc_data->nvquirks & NVQUIRK_DISABLE_EXTERNAL_LOOPBACK)
1503 misc_ctrl &= ~(1 << SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1505 sdhci_writel(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
1507 if (soc_data->nvquirks & NVQUIRK_UPDATE_PAD_CNTRL_REG) {
1508 misc_ctrl = sdhci_readl(host, SDMMC_IO_SPARE_0);
1509 misc_ctrl |= (1 << SPARE_OUT_3_OFFSET);
1510 sdhci_writel(host, misc_ctrl, SDMMC_IO_SPARE_0);
1513 /* SEL_VREG should be 0 for all modes*/
1514 if (soc_data->nvquirks2 &
1515 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH)
1516 vendor_trim_clear_sel_vreg(host, true);
1518 if (soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23)
1519 host->flags &= ~SDHCI_AUTO_CMD23;
1521 /* Mask the support for any UHS modes if specified */
1522 if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
1523 host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
1525 if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
1526 host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
1528 if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
1529 host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
1531 if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
1532 host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
1534 if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
1535 host->mmc->caps &= ~MMC_CAP_UHS_SDR12;
1537 if (plat->uhs_mask & MMC_MASK_HS400) {
1538 host->mmc->caps2 &= ~MMC_CAP2_HS400;
1539 host->mmc->caps2 &= ~MMC_CAP2_EN_STROBE;
1540 host->mmc->caps2 &= ~MMC_CAP2_HS533;
1543 #ifdef CONFIG_MMC_SDHCI_TEGRA_HS200_DISABLE
1544 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1546 if (plat->uhs_mask & MMC_MASK_HS200)
1547 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1550 if (soc_data->nvquirks2 & NVQUIRK2_UPDATE_HW_TUNING_CONFG) {
1551 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1552 vendor_ctrl &= ~(SDHCI_VNDR_TUN_CTRL0_0_MUL_M);
1553 vendor_ctrl |= SDHCI_VNDR_TUN_CTRL0_0_MUL_M_VAL;
1554 vendor_ctrl |= SDHCI_VNDR_TUN_CTRL_RETUNE_REQ_EN;
1555 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
1557 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL1_0);
1558 vendor_ctrl &= ~(SDHCI_VNDR_TUN_CTRL1_TUN_STEP_SIZE);
1559 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_TUN_CTRL1_0);
1562 /* Use timeout clk data timeout counter for generating wr crc status */
1563 if (soc_data->nvquirks &
1564 NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT) {
1565 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_SYS_SW_CTRL);
1566 vendor_ctrl |= SDHCI_VNDR_SYS_SW_CTRL_WR_CRC_USE_TMCLK;
1567 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_SYS_SW_CTRL);
1571 static int tegra_sdhci_buswidth(struct sdhci_host *sdhci, int bus_width)
1573 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1574 const struct tegra_sdhci_platform_data *plat;
1577 plat = pdev->dev.platform_data;
1579 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
1580 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
1581 ctrl &= ~SDHCI_CTRL_4BITBUS;
1582 ctrl |= SDHCI_CTRL_8BITBUS;
1584 ctrl &= ~SDHCI_CTRL_8BITBUS;
1585 if (bus_width == MMC_BUS_WIDTH_4)
1586 ctrl |= SDHCI_CTRL_4BITBUS;
1588 ctrl &= ~SDHCI_CTRL_4BITBUS;
1590 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
1595 * Calculation of nearest clock frequency for desired rate:
1596 * Get the divisor value, div = p / d_rate
1597 * 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
1598 * nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
1599 * 2. If not, result = p / div
1600 * As the nearest clk freq should be <= to desired_rate,
1601 * 3. If result > desired_rate then increment the div by 0.5
1602 * and do, (p << 1)/((div << 1) + 1)
1603 * 4. Else return result
1604 * Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
1605 * defined index variable.
1607 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
1608 unsigned long desired_rate)
1610 unsigned long result;
1614 if (pll_rate <= desired_rate)
1617 div = pll_rate / desired_rate;
1618 if (div > MAX_DIVISOR_VALUE) {
1619 div = MAX_DIVISOR_VALUE;
1620 result = pll_rate / div;
1622 if ((pll_rate % desired_rate) >= (desired_rate / 2))
1623 result = (pll_rate << 1) / ((div << 1) + index++);
1625 result = pll_rate / div;
1627 if (desired_rate < result) {
1629 * Trying to get lower clock freq than desired clock,
1630 * by increasing the divisor value by 0.5
1632 result = (pll_rate << 1) / ((div << 1) + index);
1639 static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
1640 unsigned long desired_rate)
1642 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1643 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1644 struct clk *parent_clk;
1645 unsigned long pll_source_1_freq;
1646 unsigned long pll_source_2_freq;
1647 struct sdhci_tegra_pll_parent *pll_source = tegra_host->pll_source;
1650 if (tegra_platform_is_fpga())
1653 * Currently pll_p and pll_c are used as clock sources for SDMMC. If clk
1654 * rate is missing for either of them, then no selection is needed and
1655 * the default parent is used.
1657 if (!pll_source[0].pll_rate || !pll_source[1].pll_rate)
1660 pll_source_1_freq = get_nearest_clock_freq(pll_source[0].pll_rate,
1662 pll_source_2_freq = get_nearest_clock_freq(pll_source[1].pll_rate,
1666 * For low freq requests, both the desired rates might be higher than
1667 * the requested clock frequency. In such cases, select the parent
1668 * with the lower frequency rate.
1670 if ((pll_source_1_freq > desired_rate)
1671 && (pll_source_2_freq > desired_rate)) {
1672 if (pll_source_2_freq <= pll_source_1_freq) {
1673 desired_rate = pll_source_2_freq;
1674 pll_source_1_freq = 0;
1676 desired_rate = pll_source_1_freq;
1677 pll_source_2_freq = 0;
1679 rc = clk_set_rate(pltfm_host->clk, desired_rate);
1682 if (pll_source_1_freq > pll_source_2_freq) {
1683 if (!tegra_host->is_parent_pll_source_1) {
1684 parent_clk = pll_source[0].pll;
1685 tegra_host->is_parent_pll_source_1 = true;
1686 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1689 } else if (tegra_host->is_parent_pll_source_1) {
1690 parent_clk = pll_source[1].pll;
1691 tegra_host->is_parent_pll_source_1 = false;
1692 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1696 rc = clk_set_parent(pltfm_host->clk, parent_clk);
1698 pr_err("%s: failed to set pll parent clock %d\n",
1699 mmc_hostname(host->mmc), rc);
1702 static void tegra_sdhci_get_clock_freq_for_mode(struct sdhci_host *sdhci,
1703 unsigned int *clock)
1705 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1706 const struct tegra_sdhci_platform_data *plat = pdev->dev.platform_data;
1707 unsigned int ios_timing = sdhci->mmc->ios.timing;
1710 if (!(plat->is_fix_clock_freq) || !(pdev->dev.of_node)
1711 || (ios_timing >= MMC_TIMINGS_MAX_MODES))
1715 * Index 0 is for ID mode and rest mapped with index being ios timings.
1716 * If the frequency for some particular mode is set as 0 then return
1717 * without updating the clock
1719 if (*clock <= 400000)
1722 index = ios_timing + 1;
1724 if (plat->fixed_clk_freq_table[index] != 0)
1725 *clock = plat->fixed_clk_freq_table[index];
1727 pr_warn("%s: The fixed_clk_freq_table entry for ios timing %d is 0. So using clock rate as requested by card\n",
1728 mmc_hostname(sdhci->mmc), ios_timing);
1731 static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
1734 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1735 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1736 unsigned int clk_rate;
1737 #ifdef CONFIG_MMC_FREQ_SCALING
1738 unsigned int tap_value;
1739 struct tegra_tuning_data *tuning_data;
1742 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
1744 * In ddr mode, tegra sdmmc controller clock frequency
1745 * should be double the card clock frequency.
1747 if (tegra_host->ddr_clk_limit &&
1748 (tegra_host->ddr_clk_limit < clock))
1749 clk_rate = tegra_host->ddr_clk_limit * 2;
1751 clk_rate = clock * 2;
1756 if ((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50) &&
1757 tegra_host->soc_data->tuning_freq_list[0])
1758 clk_rate = tegra_host->soc_data->tuning_freq_list[0];
1760 tegra_sdhci_get_clock_freq_for_mode(sdhci, &clk_rate);
1762 if (tegra_host->max_clk_limit &&
1763 (clk_rate > tegra_host->max_clk_limit))
1764 clk_rate = tegra_host->max_clk_limit;
1766 tegra_sdhci_clock_set_parent(sdhci, clk_rate);
1767 clk_set_rate(pltfm_host->clk, clk_rate);
1768 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
1770 /* FPGA supports 26MHz of clock for SDMMC. */
1771 if (tegra_platform_is_fpga())
1772 sdhci->max_clk = 13000000;
1774 #ifdef CONFIG_MMC_FREQ_SCALING
1775 /* Set the tap delay if tuning is done and dfs is enabled */
1776 if (sdhci->mmc->df &&
1777 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1778 tuning_data = sdhci_tegra_get_tuning_data(sdhci, clock);
1779 tap_value = (tegra_host->tap_cmd == TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1780 tuning_data->nom_best_tap_value :
1781 tuning_data->best_tap_value;
1782 sdhci_tegra_set_tap_delay(sdhci, tap_value);
1787 static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
1789 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1790 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1791 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1792 struct tegra_sdhci_platform_data *plat;
1798 mutex_lock(&tegra_host->set_clock_mutex);
1799 pr_debug("%s %s %u enabled=%u\n", __func__,
1800 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
1801 plat = pdev->dev.platform_data;
1803 if (!tegra_host->clk_enabled) {
1804 ret = clk_prepare_enable(pltfm_host->clk);
1806 dev_err(mmc_dev(sdhci->mmc),
1807 "clock enable is failed, ret: %d\n", ret);
1808 mutex_unlock(&tegra_host->set_clock_mutex);
1811 if (sdhci->runtime_pm_init_done &&
1812 IS_RTPM_DELAY_CG(plat->rtpm_type)) {
1813 sdhci->runtime_pm_enable_dcg = true;
1814 pm_runtime_get_sync(&pdev->dev);
1816 tegra_host->clk_enabled = true;
1817 sdhci->is_clk_on = true;
1818 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1819 ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1820 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1821 if (tegra_host->soc_data->nvquirks2 &
1822 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH) {
1823 /* power up / active state */
1824 vendor_trim_clear_sel_vreg(sdhci, true);
1827 tegra_sdhci_set_clk_rate(sdhci, clock);
1829 if (tegra_host->emc_clk && (!tegra_host->is_sdmmc_emc_clk_on)) {
1830 ret = clk_prepare_enable(tegra_host->emc_clk);
1832 dev_err(mmc_dev(sdhci->mmc),
1833 "clock enable is failed, ret: %d\n", ret);
1834 mutex_unlock(&tegra_host->set_clock_mutex);
1837 tegra_host->is_sdmmc_emc_clk_on = true;
1839 if (tegra_host->sclk && (!tegra_host->is_sdmmc_sclk_on)) {
1840 ret = clk_prepare_enable(tegra_host->sclk);
1842 dev_err(mmc_dev(sdhci->mmc),
1843 "clock enable is failed, ret: %d\n", ret);
1844 mutex_unlock(&tegra_host->set_clock_mutex);
1847 tegra_host->is_sdmmc_sclk_on = true;
1849 if (plat->en_periodic_calib &&
1850 sdhci->is_calibration_done) {
1851 cur_time = ktime_get();
1852 period_time = ktime_to_ms(ktime_sub(cur_time,
1853 tegra_host->timestamp));
1854 if (period_time >= SDHCI_PERIODIC_CALIB_TIMEOUT)
1855 tegra_sdhci_do_calibration(sdhci,
1856 sdhci->mmc->ios.signal_voltage);
1858 } else if (!clock && tegra_host->clk_enabled) {
1859 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on) {
1860 clk_disable_unprepare(tegra_host->emc_clk);
1861 tegra_host->is_sdmmc_emc_clk_on = false;
1863 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on) {
1864 clk_disable_unprepare(tegra_host->sclk);
1865 tegra_host->is_sdmmc_sclk_on = false;
1867 if (tegra_host->soc_data->nvquirks2 &
1868 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH){
1869 /* power down / idle state */
1870 vendor_trim_clear_sel_vreg(sdhci, false);
1872 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1873 ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1874 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1876 tegra_host->clk_enabled = false;
1877 sdhci->is_clk_on = false;
1878 if (sdhci->runtime_pm_init_done &&
1879 sdhci->runtime_pm_enable_dcg &&
1880 IS_RTPM_DELAY_CG(plat->rtpm_type)) {
1881 sdhci->runtime_pm_enable_dcg = false;
1882 pm_runtime_put_sync(&pdev->dev);
1884 clk_disable_unprepare(pltfm_host->clk);
1886 mutex_unlock(&tegra_host->set_clock_mutex);
1889 static void tegra_sdhci_en_strobe(struct sdhci_host *host)
1893 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_SYS_SW_CTRL);
1895 SDHCI_VNDR_SYS_SW_CTRL_STROBE_SHIFT);
1896 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_SYS_SW_CTRL);
1899 static void tegra_sdhci_post_init(struct sdhci_host *sdhci)
1903 unsigned timeout = 5;
1904 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1905 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1907 if ((sdhci->mmc->card->ext_csd.strobe_support) &&
1908 (sdhci->mmc->caps2 & MMC_CAP2_EN_STROBE) &&
1909 tegra_host->plat->en_strobe)
1910 tegra_sdhci_en_strobe(sdhci);
1912 /* Program TX_DLY_CODE_OFFSET Value for HS533 mode*/
1913 if (sdhci->mmc->card->state & MMC_STATE_HIGHSPEED_533) {
1914 dll_ctrl0 = sdhci_readl(sdhci, SDHCI_VNDR_DLL_CTRL0_0);
1915 dll_ctrl0 &= ~(SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_MASK <<
1916 SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_SHIFT);
1917 dll_ctrl0 |= ((SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_OFFSET &
1918 SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_MASK) <<
1919 SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_SHIFT);
1920 sdhci_writel(sdhci, dll_ctrl0, SDHCI_VNDR_DLL_CTRL0_0);
1923 dll_cfg = sdhci_readl(sdhci, SDHCI_VNDR_DLLCAL_CFG);
1924 dll_cfg |= SDHCI_VNDR_DLLCAL_CFG_EN_CALIBRATE;
1925 sdhci_writel(sdhci, dll_cfg, SDHCI_VNDR_DLLCAL_CFG);
1929 /* Wait until the dll calibration is done */
1931 if (!(sdhci_readl(sdhci, SDHCI_VNDR_DLLCAL_CFG_STATUS) &
1932 SDHCI_VNDR_DLLCAL_CFG_STATUS_DLL_ACTIVE))
1940 dev_err(mmc_dev(sdhci->mmc), "DLL calibration is failed\n");
1944 static void tegra_sdhci_update_sdmmc_pinctrl_register(struct sdhci_host *sdhci,
1947 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1948 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1949 struct pinctrl_state *set_schmitt[2];
1954 set_schmitt[0] = tegra_host->schmitt_enable[0];
1955 set_schmitt[1] = tegra_host->schmitt_enable[1];
1957 if (!IS_ERR_OR_NULL(tegra_host->drv_code_strength)) {
1958 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
1959 tegra_host->drv_code_strength);
1961 dev_warn(mmc_dev(sdhci->mmc),
1962 "setting drive code strength failed\n");
1965 set_schmitt[0] = tegra_host->schmitt_disable[0];
1966 set_schmitt[1] = tegra_host->schmitt_disable[1];
1968 if (!IS_ERR_OR_NULL(tegra_host->default_drv_code_strength)) {
1969 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
1970 tegra_host->default_drv_code_strength);
1972 dev_warn(mmc_dev(sdhci->mmc),
1973 "setting default drive code strength failed\n");
1977 for (i = 0; i < 2; i++) {
1978 if (IS_ERR_OR_NULL(set_schmitt[i]))
1980 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
1983 dev_warn(mmc_dev(sdhci->mmc),
1984 "setting schmitt state failed\n");
1988 static void tegra_sdhci_configure_e_input(struct sdhci_host *sdhci, bool enable)
1992 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1994 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1996 val &= ~SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1997 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
2002 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
2003 unsigned char signal_voltage)
2006 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2007 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2008 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2009 unsigned int timeout = 10;
2010 unsigned int calib_offsets = 0;
2011 unsigned int pulldown_code;
2012 unsigned int pullup_code;
2013 unsigned long pin_config;
2016 /* No Calibration for sdmmc4 */
2017 if (tegra_host->plat->disable_auto_cal)
2020 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
2023 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
2024 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
2025 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
2026 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
2027 if (soc_data->nvquirks & NVQUIRK_SET_SDMEMCOMP_VREF_SEL) {
2028 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
2029 val |= tegra_host->plat->compad_vref_3v3;
2030 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
2031 val |= tegra_host->plat->compad_vref_1v8;
2035 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
2037 /* Wait for 1us after e_input is enabled*/
2038 if (soc_data->nvquirks2 & NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION)
2041 /* Enable Auto Calibration*/
2042 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
2043 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
2044 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
2045 if (tegra_host->plat->enable_autocal_slew_override)
2046 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_SLW_OVERRIDE;
2047 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
2048 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
2049 calib_offsets = tegra_host->plat->calib_3v3_offsets;
2050 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
2051 calib_offsets = tegra_host->plat->calib_1v8_offsets;
2053 if (calib_offsets) {
2054 /* Program Auto cal PD offset(bits 8:14) */
2056 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
2057 val |= (((calib_offsets >> 8) & 0xFF) <<
2058 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
2059 /* Program Auto cal PU offset(bits 0:6) */
2061 val |= (calib_offsets & 0xFF);
2064 if (tegra_host->plat->auto_cal_step) {
2066 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_STEP_OFFSET_SHIFT);
2067 val |= (tegra_host->plat->auto_cal_step <<
2068 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_STEP_OFFSET_SHIFT);
2070 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
2072 /* Wait for 1us after auto calibration is enabled*/
2073 if (soc_data->nvquirks2 & NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION)
2076 /* Wait until the calibration is done */
2078 if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
2079 SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
2087 dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
2089 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
2090 tegra_sdhci_configure_e_input(sdhci, false);
2092 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
2093 /* Disable Auto calibration */
2094 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
2095 val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
2096 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
2098 if (tegra_host->pinctrl && tegra_host->drive_group_sel >= 0) {
2099 /* Get the pull down codes from auto cal status reg */
2101 sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
2102 SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
2103 pin_config = TEGRA_PINCONF_PACK(
2104 TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH,
2106 err = pinctrl_set_config_for_group_sel(tegra_host->pinctrl,
2107 tegra_host->drive_group_sel, pin_config);
2109 dev_err(mmc_dev(sdhci->mmc),
2110 "Failed to set pulldown codes %d err %d\n",
2111 pulldown_code, err);
2113 /* Calculate the pull up codes */
2114 pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
2115 pin_config = TEGRA_PINCONF_PACK(
2116 TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH,
2118 /* Set the pull up code in the pinmux reg */
2119 err = pinctrl_set_config_for_group_sel(tegra_host->pinctrl,
2120 tegra_host->drive_group_sel, pin_config);
2122 dev_err(mmc_dev(sdhci->mmc),
2123 "Failed to set pullup codes %d err %d\n",
2127 if (tegra_host->plat->en_periodic_calib) {
2128 tegra_host->timestamp = ktime_get();
2129 sdhci->timestamp = ktime_get();
2130 sdhci->is_calibration_done = true;
2134 static int tegra_sdhci_validate_sd2_0(struct sdhci_host *sdhci)
2136 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2137 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2138 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
2139 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2140 struct tegra_sdhci_platform_data *plat;
2143 plat = pdev->dev.platform_data;
2145 if ((soc_data->nvquirks2 & NVQUIRK2_BROKEN_SD2_0_SUPPORT) &&
2146 (plat->limit_vddio_max_volt)) {
2147 /* T210: Bug 1561291
2148 * Design issue where a cap connected to IO node is stressed
2149 * to 3.3v while it can only tolerate up to 1.8v.
2151 rc = tegra_sdhci_configure_regulators(tegra_host,
2152 CONFIG_REG_DIS, 0, 0);
2154 dev_err(mmc_dev(sdhci->mmc),
2155 "Regulator disable failed %d\n", rc);
2156 dev_err(mmc_dev(sdhci->mmc),
2157 "SD cards with out 1.8V is not supported\n");
2165 static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
2166 unsigned int signal_voltage)
2168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2169 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2170 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
2171 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2172 struct tegra_sdhci_platform_data *plat;
2173 unsigned int min_uV = tegra_host->vddio_min_uv;
2174 unsigned int max_uV = tegra_host->vddio_max_uv;
2175 unsigned int rc = 0;
2179 plat = pdev->dev.platform_data;
2181 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
2182 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
2183 ctrl |= SDHCI_CTRL_VDD_180;
2184 min_uV = SDHOST_LOW_VOLT_MIN;
2185 max_uV = SDHOST_LOW_VOLT_MAX;
2186 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
2187 if (ctrl & SDHCI_CTRL_VDD_180)
2188 ctrl &= ~SDHCI_CTRL_VDD_180;
2191 /* Check if the slot can support the required voltage */
2192 if (min_uV > tegra_host->vddio_max_uv)
2195 /* Set/clear the 1.8V signalling */
2196 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2198 if (soc_data->nvquirks2 & NVQUIRK2_SET_PAD_E_INPUT_VOL)
2199 tegra_sdhci_configure_e_input(sdhci, true);
2201 if ((!tegra_host->is_rail_enabled) && (tegra_host->card_present)) {
2202 rc = tegra_sdhci_configure_regulators(tegra_host,
2203 CONFIG_REG_EN, 0, 0);
2205 dev_err(mmc_dev(sdhci->mmc),
2206 "Enable regulators failed %d\n", rc);
2210 /* Switch the I/O rail voltage */
2211 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_SET_VOLT,
2213 if (rc && (signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
2214 dev_err(mmc_dev(sdhci->mmc),
2215 "setting 1.8V failed %d. Revert to 3.3V\n", rc);
2216 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
2217 rc = tegra_sdhci_configure_regulators(tegra_host,
2218 CONFIG_REG_SET_VOLT, tegra_host->vddio_min_uv,
2219 tegra_host->vddio_max_uv);
2221 if (gpio_is_valid(plat->power_gpio)) {
2222 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
2223 gpio_set_value(plat->power_gpio, 1);
2225 gpio_set_value(plat->power_gpio, 0);
2230 if (!(soc_data->nvquirks & NVQUIRK_UPDATE_PIN_CNTRL_REG))
2236 if (!plat->update_pinctrl_settings)
2239 set = (signal_voltage == MMC_SIGNAL_VOLTAGE_180) ? true : false;
2241 if (!IS_ERR_OR_NULL(tegra_host->pinctrl_sdmmc))
2242 tegra_sdhci_update_sdmmc_pinctrl_register(sdhci, set);
2247 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
2248 u8 option, int min_uV, int max_uV)
2251 int vddio_prev = -1;
2253 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2254 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
2255 struct sdhci_host *sdhci = dev_get_drvdata(tegra_host->dev);
2259 if (!tegra_host->is_rail_enabled) {
2260 if (soc_data->nvquirks2 & NVQUIRK2_SET_PAD_E_INPUT_VOL)
2261 tegra_sdhci_configure_e_input(sdhci, true);
2262 if (tegra_host->vdd_io_reg) {
2263 vddio_prev = regulator_get_voltage(
2264 tegra_host->vdd_io_reg);
2265 if (vddio_prev == SDHOST_LOW_VOLT_MAX) {
2266 if (plat->pwrdet_support &&
2267 tegra_host->sdmmc_padctrl)
2268 rc = padctrl_set_voltage(
2269 tegra_host->sdmmc_padctrl,
2273 if (tegra_host->vdd_slot_reg)
2274 rc = regulator_enable(tegra_host->vdd_slot_reg);
2275 if (tegra_host->vdd_io_reg)
2276 rc = regulator_enable(tegra_host->vdd_io_reg);
2277 tegra_host->is_rail_enabled = true;
2280 case CONFIG_REG_DIS:
2281 if (tegra_host->is_rail_enabled) {
2282 if (tegra_host->vdd_io_reg) {
2283 vddio_prev = regulator_get_voltage(
2284 tegra_host->vdd_io_reg);
2285 if (vddio_prev > SDHOST_LOW_VOLT_MAX)
2286 tegra_sdhci_signal_voltage_switch(
2287 sdhci, MMC_SIGNAL_VOLTAGE_180);
2289 if (tegra_host->vdd_io_reg)
2290 rc = regulator_disable(tegra_host->vdd_io_reg);
2291 if (tegra_host->vdd_slot_reg)
2292 rc = regulator_disable(
2293 tegra_host->vdd_slot_reg);
2294 tegra_host->is_rail_enabled = false;
2297 case CONFIG_REG_SET_VOLT:
2298 if (tegra_host->vdd_io_reg) {
2299 if (soc_data->nvquirks2 & NVQUIRK2_CONFIG_PWR_DET) {
2300 vddio_prev = regulator_get_voltage(
2301 tegra_host->vdd_io_reg);
2302 /* set pwrdet sdmmc1 before set 3.3 V */
2303 if ((vddio_prev < min_uV) &&
2304 (min_uV >= SDHOST_HIGH_VOLT_2V8) &&
2305 plat->pwrdet_support &&
2306 tegra_host->sdmmc_padctrl) {
2307 rc = padctrl_set_voltage(
2308 tegra_host->sdmmc_padctrl,
2309 SDHOST_HIGH_VOLT_3V3);
2311 dev_err(mmc_dev(sdhci->mmc),
2312 "padcontrol set volt failed:"
2316 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
2319 if ((plat->pwrdet_support) &&
2320 (min_uV == SDHOST_LOW_VOLT_MIN))
2321 usleep_range(5000, 5500);
2322 if (soc_data->nvquirks2 & NVQUIRK2_CONFIG_PWR_DET) {
2323 vddio_new = regulator_get_voltage(
2324 tegra_host->vdd_io_reg);
2325 /* clear pwrdet sdmmc1 after set 1.8 V */
2326 if ((vddio_new <= vddio_prev) &&
2327 (vddio_new == SDHOST_LOW_VOLT_MAX) &&
2328 plat->pwrdet_support &&
2329 tegra_host->sdmmc_padctrl) {
2330 rc = padctrl_set_voltage(
2331 tegra_host->sdmmc_padctrl, vddio_new);
2333 dev_err(mmc_dev(sdhci->mmc),
2334 "padcontrol set volt failed:"
2341 pr_err("Invalid argument passed to reg config %d\n", option);
2347 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
2349 unsigned long timeout;
2351 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
2353 /* Wait max 100 ms */
2356 /* hw clears the bit when it's done */
2357 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
2359 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
2360 "completed.\n", (int)mask);
2367 tegra_sdhci_reset_exit(sdhci, mask);
2370 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
2371 unsigned int tap_delay)
2375 bool card_clk_enabled;
2377 /* Max tap delay value is 255 */
2378 if (tap_delay > MAX_TAP_VALUES) {
2379 dev_err(mmc_dev(sdhci->mmc),
2380 "Valid tap range (0-255). Setting tap value %d\n",
2386 card_clk_enabled = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL) &
2387 SDHCI_CLOCK_CARD_EN;
2389 if (card_clk_enabled) {
2390 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
2391 clk &= ~SDHCI_CLOCK_CARD_EN;
2392 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
2395 if (!(sdhci->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING)) {
2396 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
2397 vendor_ctrl &= ~SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP;
2398 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
2401 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
2402 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
2403 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
2404 vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
2405 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
2407 if (!(sdhci->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING)) {
2408 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
2409 vendor_ctrl |= SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP;
2410 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
2412 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
2415 if (card_clk_enabled) {
2416 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
2417 clk |= SDHCI_CLOCK_CARD_EN;
2418 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
2422 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
2423 unsigned int trim_delay)
2427 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
2428 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
2429 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
2430 vendor_ctrl |= (trim_delay << SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
2431 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
2434 static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
2436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2437 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2438 struct sdhci_tegra_sd_stats *head = tegra_host->sd_stat_head;
2440 if (int_status & SDHCI_INT_DATA_CRC)
2441 head->data_crc_count++;
2442 if (int_status & SDHCI_INT_CRC)
2443 head->cmd_crc_count++;
2444 if (int_status & SDHCI_INT_TIMEOUT)
2445 head->cmd_to_count++;
2446 if (int_status & SDHCI_INT_DATA_TIMEOUT)
2447 head->data_to_count++;
2451 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
2452 struct sdhci_host *sdhci, unsigned int clock)
2454 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2455 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2456 struct tegra_tuning_data *tuning_data;
2457 unsigned int low_freq;
2460 if (tegra_host->tuning_freq_count == 1) {
2461 tuning_data = &tegra_host->tuning_data[0];
2465 /* Get the lowest supported freq */
2466 for (i = 0; i < TUNING_FREQ_COUNT; ++i) {
2467 low_freq = tegra_host->soc_data->tuning_freq_list[i];
2472 if (clock <= low_freq)
2473 tuning_data = &tegra_host->tuning_data[0];
2475 tuning_data = &tegra_host->tuning_data[1];
2481 static void calculate_vmin_values(struct sdhci_host *sdhci,
2482 struct tegra_tuning_data *tuning_data, int vmin, int boot_mv)
2484 struct tuning_values *est_values = &tuning_data->est_values;
2485 struct tuning_values *calc_values = &tuning_data->calc_values;
2486 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2487 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2488 int vmin_slope, vmin_int, temp_calc_vmin;
2489 int t2t_vmax, t2t_vmin;
2490 int vmax_thole, vmin_thole;
2493 * If current vmin is equal to vmin or vmax of tuning data, use the
2494 * previously calculated estimated T2T values directly. Note that the
2495 * estimated T2T_vmax is not at Vmax specified in tuning data. It is
2496 * the T2T at the boot or max voltage for the current SKU. Hence,
2497 * boot_mv is used in place of t2t_coeffs->vmax.
2499 if (vmin == t2t_coeffs->vmin) {
2500 t2t_vmin = est_values->t2t_vmin;
2501 } else if (vmin == boot_mv) {
2502 t2t_vmin = est_values->t2t_vmax;
2505 * For any intermediate voltage between boot voltage and vmin
2506 * of tuning data, calculate the slope and intercept from the
2507 * t2t at boot_mv and vmin and calculate the actual values.
2509 t2t_vmax = 1000 / est_values->t2t_vmax;
2510 t2t_vmin = 1000 / est_values->t2t_vmin;
2511 vmin_slope = ((t2t_vmax - t2t_vmin) * 1000) /
2512 (boot_mv - t2t_coeffs->vmin);
2513 vmin_int = (t2t_vmax * 1000 - (vmin_slope * boot_mv)) / 1000;
2514 t2t_vmin = (vmin_slope * vmin) / 1000 + vmin_int;
2515 t2t_vmin = (1000 / t2t_vmin);
2518 calc_values->t2t_vmin = (t2t_vmin * calc_values->t2t_vmax) /
2519 est_values->t2t_vmax;
2521 calc_values->ui_vmin = (1000000 / (tuning_data->freq_hz / 1000000)) /
2522 calc_values->t2t_vmin;
2524 /* Calculate the vmin tap hole at vmin of tuning data */
2525 temp_calc_vmin = (est_values->t2t_vmin * calc_values->t2t_vmax) /
2526 est_values->t2t_vmax;
2527 vmin_thole = (thole_coeffs->thole_vmin_int -
2528 (thole_coeffs->thole_vmin_slope * temp_calc_vmin)) /
2530 vmax_thole = calc_values->vmax_thole;
2532 if (vmin == t2t_coeffs->vmin) {
2533 calc_values->vmin_thole = vmin_thole;
2534 } else if (vmin == boot_mv) {
2535 calc_values->vmin_thole = vmax_thole;
2538 * Interpolate the tap hole for any intermediate voltage.
2539 * Calculate the slope and intercept from the available data
2540 * and use them to calculate the actual values.
2542 vmin_slope = ((vmax_thole - vmin_thole) * 1000) /
2543 (boot_mv - t2t_coeffs->vmin);
2544 vmin_int = (vmax_thole * 1000 - (vmin_slope * boot_mv)) / 1000;
2545 calc_values->vmin_thole = (vmin_slope * vmin) / 1000 + vmin_int;
2548 /* Adjust the partial win start for Vmin boundary */
2549 if (tuning_data->is_partial_win_valid)
2550 tuning_data->final_tap_data[0].win_start =
2551 (tuning_data->final_tap_data[0].win_start *
2552 tuning_data->calc_values.t2t_vmax) /
2553 tuning_data->calc_values.t2t_vmin;
2555 pr_info("**********Tuning values*********\n");
2556 pr_info("**estimated values**\n");
2557 pr_info("T2T_Vmax %d, T2T_Vmin %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
2558 est_values->t2t_vmax, est_values->t2t_vmin,
2559 est_values->vmax_thole, est_values->ui);
2560 pr_info("**Calculated values**\n");
2561 pr_info("T2T_Vmax %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
2562 calc_values->t2t_vmax, calc_values->vmax_thole,
2564 pr_info("T2T_Vmin %d, 1'st_hole_Vmin %d, UI_Vmin %d\n",
2565 calc_values->t2t_vmin, calc_values->vmin_thole,
2566 calc_values->ui_vmin);
2567 pr_info("***********************************\n");
2570 static int slide_window_start(struct sdhci_host *sdhci,
2571 struct tegra_tuning_data *tuning_data,
2572 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
2576 if (edge_attr == WIN_EDGE_BOUN_START) {
2578 tap_value += (1000 / tuning_data->calc_values.t2t_vmin);
2580 tap_value += (1000 / tuning_data->calc_values.t2t_vmax);
2581 } else if (edge_attr == WIN_EDGE_HOLE) {
2582 if (tap_hole >= 0) {
2583 tap_margin = get_tuning_tap_hole_margins(sdhci,
2584 tuning_data->calc_values.t2t_vmax);
2585 tap_value += ((7 * tap_hole) / 100) + tap_margin;
2589 if (tap_value > MAX_TAP_VALUES)
2590 tap_value = MAX_TAP_VALUES;
2595 static int slide_window_end(struct sdhci_host *sdhci,
2596 struct tegra_tuning_data *tuning_data,
2597 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
2601 if (edge_attr == WIN_EDGE_BOUN_END) {
2602 tap_value = (tap_value * tuning_data->calc_values.t2t_vmax) /
2603 tuning_data->calc_values.t2t_vmin;
2604 tap_value -= (1000 / tuning_data->calc_values.t2t_vmin);
2605 } else if (edge_attr == WIN_EDGE_HOLE) {
2606 if (tap_hole >= 0) {
2607 tap_value = tap_hole;
2608 tap_margin = get_tuning_tap_hole_margins(sdhci,
2609 tuning_data->calc_values.t2t_vmin);
2611 tap_value -= ((7 * tap_hole) / 100) + tap_margin;
2616 static int adjust_window_boundaries(struct sdhci_host *sdhci,
2617 struct tegra_tuning_data *tuning_data,
2618 struct tap_window_data *temp_tap_data)
2620 struct tap_window_data *tap_data;
2621 int vmin_tap_hole = -1;
2622 int vmax_tap_hole = -1;
2625 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2626 tap_data = &temp_tap_data[i];
2627 /* Update with next hole if first hole is taken care of */
2628 if (tap_data->win_start_attr == WIN_EDGE_HOLE)
2629 vmax_tap_hole = tuning_data->calc_values.vmax_thole +
2630 (tap_data->hole_pos - 1) *
2631 tuning_data->calc_values.ui;
2632 tap_data->win_start = slide_window_start(sdhci, tuning_data,
2633 tap_data->win_start, tap_data->win_start_attr,
2636 /* Update with next hole if first hole is taken care of */
2637 if (tap_data->win_end_attr == WIN_EDGE_HOLE)
2638 vmin_tap_hole = tuning_data->calc_values.vmin_thole +
2639 (tap_data->hole_pos - 1) *
2640 tuning_data->calc_values.ui_vmin;
2641 tap_data->win_end = slide_window_end(sdhci, tuning_data,
2642 tap_data->win_end, tap_data->win_end_attr,
2646 pr_info("***********final tuning windows**********\n");
2647 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2648 tap_data = &temp_tap_data[i];
2649 pr_info("win[%d]: %d - %d\n", i, tap_data->win_start,
2652 pr_info("********************************\n");
2656 static int find_best_tap_value(struct tegra_tuning_data *tuning_data,
2657 struct tap_window_data *temp_tap_data, int vmin)
2659 struct tap_window_data *tap_data;
2660 u8 i = 0, sel_win = 0;
2661 int pref_win = 0, curr_win_size = 0;
2662 int best_tap_value = 0;
2664 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2665 tap_data = &temp_tap_data[i];
2666 if (!i && tuning_data->is_partial_win_valid) {
2667 pref_win = tap_data->win_end - tap_data->win_start;
2668 if ((tap_data->win_end * 2) < pref_win)
2669 pref_win = tap_data->win_end * 2;
2672 curr_win_size = tap_data->win_end - tap_data->win_start;
2673 if ((curr_win_size > 0) && (curr_win_size > pref_win)) {
2674 pref_win = curr_win_size;
2680 if (pref_win <= 0) {
2681 pr_err("No window opening for %d vmin\n", vmin);
2685 tap_data = &temp_tap_data[sel_win];
2686 if (!sel_win && tuning_data->is_partial_win_valid) {
2688 best_tap_value = tap_data->win_end - (pref_win / 2);
2689 if (best_tap_value < 0)
2692 best_tap_value = tap_data->win_start +
2693 ((tap_data->win_end - tap_data->win_start) *
2694 tuning_data->calc_values.t2t_vmin) /
2695 (tuning_data->calc_values.t2t_vmin +
2696 tuning_data->calc_values.t2t_vmax);
2699 pr_info("best tap win - (%d-%d), best tap value %d\n",
2700 tap_data->win_start, tap_data->win_end, best_tap_value);
2701 return best_tap_value;
2704 static int sdhci_tegra_calculate_best_tap(struct sdhci_host *sdhci,
2705 struct tegra_tuning_data *tuning_data)
2707 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2708 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2709 struct tap_window_data *temp_tap_data = NULL;
2710 int vmin, curr_vmin, best_tap_value = 0;
2713 curr_vmin = tegra_dvfs_predict_mv_at_hz_no_tfloor(pltfm_host->clk,
2714 tuning_data->freq_hz);
2716 curr_vmin = tegra_host->boot_vcore_mv;
2720 SDHCI_TEGRA_DBG("%s: checking for win opening with vmin %d\n",
2721 mmc_hostname(sdhci->mmc), vmin);
2722 if ((best_tap_value < 0) &&
2723 (vmin > tegra_host->boot_vcore_mv)) {
2724 dev_err(mmc_dev(sdhci->mmc),
2725 "No best tap for any vcore range\n");
2726 kfree(temp_tap_data);
2727 temp_tap_data = NULL;
2731 calculate_vmin_values(sdhci, tuning_data, vmin,
2732 tegra_host->boot_vcore_mv);
2734 if (temp_tap_data == NULL) {
2735 temp_tap_data = kzalloc(sizeof(struct tap_window_data) *
2736 tuning_data->num_of_valid_tap_wins, GFP_KERNEL);
2737 if (IS_ERR_OR_NULL(temp_tap_data)) {
2738 dev_err(mmc_dev(sdhci->mmc),
2739 "No memory for final tap value calculation\n");
2744 memcpy(temp_tap_data, tuning_data->final_tap_data,
2745 sizeof(struct tap_window_data) *
2746 tuning_data->num_of_valid_tap_wins);
2748 adjust_window_boundaries(sdhci, tuning_data, temp_tap_data);
2750 best_tap_value = find_best_tap_value(tuning_data,
2751 temp_tap_data, vmin);
2753 if (best_tap_value < 0)
2755 } while (best_tap_value < 0);
2757 tuning_data->best_tap_value = best_tap_value;
2758 tuning_data->nom_best_tap_value = best_tap_value;
2761 * Set the new vmin if there is any change. If dvfs overrides are
2762 * disabled, then print the error message but continue execution
2763 * rather than disabling tuning altogether.
2765 if ((tuning_data->best_tap_value >= 0) && (curr_vmin != vmin)) {
2766 err = tegra_dvfs_set_fmax_at_vmin(pltfm_host->clk,
2767 tuning_data->freq_hz, vmin);
2768 if ((err == -EPERM) || (err == -ENOSYS)) {
2770 * tegra_dvfs_set_fmax_at_vmin: will return EPERM or
2771 * ENOSYS, when DVFS override is not enabled, continue
2772 * tuning with default core voltage.
2775 "dvfs overrides disabled. Vmin not updated\n");
2779 kfree(temp_tap_data);
2783 static int sdhci_tegra_issue_tuning_cmd(struct sdhci_host *sdhci)
2785 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2786 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2790 unsigned int timeout = 10;
2794 if (gpio_is_valid(tegra_host->plat->cd_gpio) &&
2795 (gpio_get_value(tegra_host->plat->cd_gpio) != 0)) {
2796 dev_err(mmc_dev(sdhci->mmc), "device removed during tuning\n");
2799 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
2800 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
2802 dev_err(mmc_dev(sdhci->mmc), "Controller never"
2803 "released inhibit bit(s).\n");
2811 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2812 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2813 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2815 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2816 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2817 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2820 * In response to CMD19, the card sends 64 bytes of tuning
2821 * block to the Host Controller. So we set the block size
2823 * In response to CMD21, the card sends 128 bytes of tuning
2824 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2825 * to the Host Controller. So we set the block size to 64 here.
2827 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
2830 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
2832 sdhci_writew(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2834 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
2836 /* Set the cmd flags */
2837 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
2838 /* Issue the command */
2839 sdhci->command = SDHCI_MAKE_CMD(tegra_host->tuning_opcode, flags);
2840 sdhci_writew(sdhci, sdhci->command, SDHCI_COMMAND);
2846 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
2848 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
2853 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
2854 !(intstatus & SDHCI_INT_DATA_CRC)) {
2856 sdhci->tuning_done = 1;
2858 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
2859 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
2863 if (sdhci->tuning_done) {
2864 sdhci->tuning_done = 0;
2865 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2866 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
2867 (ctrl & SDHCI_CTRL_TUNED_CLK))
2876 static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
2877 unsigned int starting_tap, bool expect_failure, int *status)
2879 unsigned int tap_value = starting_tap;
2881 unsigned int retry = TUNING_RETRIES;
2884 /* Set the tap delay */
2885 sdhci_tegra_set_tap_delay(sdhci, tap_value);
2887 /* Run frequency tuning */
2888 err = sdhci_tegra_issue_tuning_cmd(sdhci);
2889 if (err == -ENOMEDIUM) {
2897 retry = TUNING_RETRIES;
2898 if ((expect_failure && !err) ||
2899 (!expect_failure && err))
2903 } while (tap_value <= MAX_TAP_VALUES);
2909 static int calculate_actual_tuning_values(int speedo,
2910 struct tegra_tuning_data *tuning_data, int voltage_mv)
2912 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2913 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2914 struct tuning_values *calc_values = &tuning_data->calc_values;
2916 int vmax_thole, vmin_thole;
2918 /* T2T_Vmax = (1000000/freq_MHz)/Calc_UI */
2919 calc_values->t2t_vmax = (1000000 / (tuning_data->freq_hz / 1000000)) /
2923 * Interpolate the tap hole.
2924 * Vmax_1'st_hole = (Calc_T2T_Vmax*(-thole_slope)+thole_tint.
2926 vmax_thole = (thole_coeffs->thole_vmax_int -
2927 (thole_coeffs->thole_vmax_slope * calc_values->t2t_vmax)) /
2929 vmin_thole = (thole_coeffs->thole_vmin_int -
2930 (thole_coeffs->thole_vmin_slope * calc_values->t2t_vmax)) /
2932 if (voltage_mv == t2t_coeffs->vmin) {
2933 calc_values->vmax_thole = vmin_thole;
2934 } else if (voltage_mv == t2t_coeffs->vmax) {
2935 calc_values->vmax_thole = vmax_thole;
2937 slope = (vmax_thole - vmin_thole) /
2938 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2939 inpt = ((vmax_thole * 1000) - (slope * 1250)) / 1000;
2940 calc_values->vmax_thole = slope * voltage_mv + inpt;
2947 * All coeffs are filled up in the table after multiplying by 1000. So, all
2948 * calculations should have a divide by 1000 at the end.
2950 static int calculate_estimated_tuning_values(int speedo,
2951 struct tegra_tuning_data *tuning_data, int voltage_mv)
2953 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2954 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2955 struct tuning_values *est_values = &tuning_data->est_values;
2957 int vmax_t2t, vmin_t2t;
2958 int vmax_thole, vmin_thole;
2960 /* Est_T2T_Vmax = (speedo*(-t2t_slope)+t2t_int */
2961 vmax_t2t = (t2t_coeffs->t2t_vmax_int - (speedo *
2962 t2t_coeffs->t2t_vmax_slope)) / 1000;
2963 vmin_t2t = (t2t_coeffs->t2t_vmin_int - (speedo *
2964 t2t_coeffs->t2t_vmin_slope)) / 1000;
2965 est_values->t2t_vmin = vmin_t2t;
2967 if (voltage_mv == t2t_coeffs->vmin) {
2968 est_values->t2t_vmax = vmin_t2t;
2969 } else if (voltage_mv == t2t_coeffs->vmax) {
2970 est_values->t2t_vmax = vmax_t2t;
2972 vmax_t2t = PRECISION_FOR_ESTIMATE / vmax_t2t;
2973 vmin_t2t = PRECISION_FOR_ESTIMATE / vmin_t2t;
2975 * For any intermediate voltage between 0.95V and max vcore,
2976 * calculate the slope and intercept from the T2T and tap hole
2977 * values of 0.95V and max vcore and use them to calculate the
2978 * actual values. 1/T2T is a linear function of voltage.
2980 slope = ((vmax_t2t - vmin_t2t) * PRECISION_FOR_ESTIMATE) /
2981 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2982 inpt = (vmax_t2t * PRECISION_FOR_ESTIMATE -
2983 (slope * t2t_coeffs->vmax)) / PRECISION_FOR_ESTIMATE;
2984 est_values->t2t_vmax = ((slope * voltage_mv) /
2985 PRECISION_FOR_ESTIMATE + inpt);
2986 est_values->t2t_vmax = (PRECISION_FOR_ESTIMATE /
2987 est_values->t2t_vmax);
2990 /* Est_UI = (1000000/freq_MHz)/Est_T2T_Vmax */
2991 est_values->ui = (1000000 / (thole_coeffs->freq_khz / 1000)) /
2992 est_values->t2t_vmax;
2995 * Est_1'st_hole = (Est_T2T_Vmax*(-thole_slope)) + thole_int.
2997 vmax_thole = (thole_coeffs->thole_vmax_int -
2998 (thole_coeffs->thole_vmax_slope * est_values->t2t_vmax)) / 1000;
2999 vmin_thole = (thole_coeffs->thole_vmin_int -
3000 (thole_coeffs->thole_vmin_slope * est_values->t2t_vmax)) / 1000;
3002 if (voltage_mv == t2t_coeffs->vmin) {
3003 est_values->vmax_thole = vmin_thole;
3004 } else if (voltage_mv == t2t_coeffs->vmax) {
3005 est_values->vmax_thole = vmax_thole;
3008 * For any intermediate voltage between 0.95V and max vcore,
3009 * calculate the slope and intercept from the t2t and tap hole
3010 * values of 0.95V and max vcore and use them to calculate the
3011 * actual values. Tap hole is a linear function of voltage.
3013 slope = ((vmax_thole - vmin_thole) * PRECISION_FOR_ESTIMATE) /
3014 (t2t_coeffs->vmax - t2t_coeffs->vmin);
3015 inpt = (vmax_thole * PRECISION_FOR_ESTIMATE -
3016 (slope * t2t_coeffs->vmax)) / PRECISION_FOR_ESTIMATE;
3017 est_values->vmax_thole = (slope * voltage_mv) /
3018 PRECISION_FOR_ESTIMATE + inpt;
3020 est_values->vmin_thole = vmin_thole;
3026 * Insert the calculated holes and get the final tap windows
3027 * with the boundaries and holes set.
3029 static int adjust_holes_in_tap_windows(struct sdhci_host *sdhci,
3030 struct tegra_tuning_data *tuning_data)
3032 struct tap_window_data *tap_data;
3033 struct tap_window_data *final_tap_data;
3034 struct tuning_values *calc_values = &tuning_data->calc_values;
3035 int tap_hole, size = 0;
3036 u8 i = 0, j = 0, num_of_wins, hole_pos = 0;
3038 tuning_data->final_tap_data =
3039 devm_kzalloc(mmc_dev(sdhci->mmc),
3040 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
3041 if (IS_ERR_OR_NULL(tuning_data->final_tap_data)) {
3042 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
3046 num_of_wins = tuning_data->num_of_valid_tap_wins;
3047 tap_hole = calc_values->vmax_thole;
3050 tap_data = &tuning_data->tap_data[i];
3051 final_tap_data = &tuning_data->final_tap_data[j];
3052 if (tap_hole < tap_data->win_start) {
3053 tap_hole += calc_values->ui;
3056 } else if (tap_hole > tap_data->win_end) {
3057 memcpy(final_tap_data, tap_data,
3058 sizeof(struct tap_window_data));
3063 } else if ((tap_hole >= tap_data->win_start) &&
3064 (tap_hole <= tap_data->win_end)) {
3065 size = tap_data->win_end - tap_data->win_start;
3068 &tuning_data->final_tap_data[j];
3069 if (tap_hole == tap_data->win_start) {
3070 final_tap_data->win_start =
3072 final_tap_data->win_start_attr =
3074 final_tap_data->hole_pos = hole_pos;
3075 tap_hole += calc_values->ui;
3078 final_tap_data->win_start =
3079 tap_data->win_start;
3080 final_tap_data->win_start_attr =
3081 WIN_EDGE_BOUN_START;
3083 if (tap_hole <= tap_data->win_end) {
3084 final_tap_data->win_end = tap_hole - 1;
3085 final_tap_data->win_end_attr =
3087 final_tap_data->hole_pos = hole_pos;
3088 tap_data->win_start = tap_hole;
3089 } else if (tap_hole > tap_data->win_end) {
3090 final_tap_data->win_end =
3092 final_tap_data->win_end_attr =
3094 tap_data->win_start =
3097 size = tap_data->win_end - tap_data->win_start;
3103 } while (num_of_wins > 0);
3105 /* Update the num of valid wins count after tap holes insertion */
3106 tuning_data->num_of_valid_tap_wins = j;
3108 pr_info("********tuning windows after inserting holes*****\n");
3109 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
3110 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
3111 final_tap_data = &tuning_data->final_tap_data[i];
3112 pr_info("win[%d]:%d(%d) - %d(%d)\n", i,
3113 final_tap_data->win_start,
3114 final_tap_data->win_start_attr,
3115 final_tap_data->win_end, final_tap_data->win_end_attr);
3117 pr_info("***********************************************\n");
3123 * Insert the boundaries from negative margin calculations into the windows
3126 static int insert_boundaries_in_tap_windows(struct sdhci_host *sdhci,
3127 struct tegra_tuning_data *tuning_data, u8 boun_end)
3129 struct tap_window_data *tap_data;
3130 struct tap_window_data *new_tap_data;
3131 struct tap_window_data *temp_tap_data;
3132 struct tuning_values *calc_values = &tuning_data->calc_values;
3134 u8 i = 0, j = 0, num_of_wins;
3135 bool get_next_boun = false;
3137 temp_tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
3138 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
3139 if (IS_ERR_OR_NULL(temp_tap_data)) {
3140 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
3144 num_of_wins = tuning_data->num_of_valid_tap_wins;
3145 curr_boun = boun_end % calc_values->ui;
3147 if (get_next_boun) {
3148 curr_boun += calc_values->ui;
3150 * If the boun_end exceeds the intial boundary end,
3151 * just copy remaining windows and return.
3153 if (curr_boun >= boun_end)
3154 curr_boun += MAX_TAP_VALUES;
3157 tap_data = &tuning_data->tap_data[i];
3158 new_tap_data = &temp_tap_data[j];
3159 if (curr_boun <= tap_data->win_start) {
3160 get_next_boun = true;
3162 } else if (curr_boun >= tap_data->win_end) {
3163 memcpy(new_tap_data, tap_data,
3164 sizeof(struct tap_window_data));
3168 get_next_boun = false;
3170 } else if ((curr_boun >= tap_data->win_start) &&
3171 (curr_boun <= tap_data->win_end)) {
3172 new_tap_data->win_start = tap_data->win_start;
3173 new_tap_data->win_start_attr =
3174 tap_data->win_start_attr;
3175 new_tap_data->win_end = curr_boun - 1;
3176 new_tap_data->win_end_attr =
3177 tap_data->win_end_attr;
3179 new_tap_data = &temp_tap_data[j];
3180 new_tap_data->win_start = curr_boun;
3181 new_tap_data->win_end = curr_boun;
3182 new_tap_data->win_start_attr =
3183 WIN_EDGE_BOUN_START;
3184 new_tap_data->win_end_attr =
3187 new_tap_data = &temp_tap_data[j];
3188 new_tap_data->win_start = curr_boun + 1;
3189 new_tap_data->win_start_attr = WIN_EDGE_BOUN_START;
3190 new_tap_data->win_end = tap_data->win_end;
3191 new_tap_data->win_end_attr =
3192 tap_data->win_end_attr;
3196 get_next_boun = true;
3198 } while (num_of_wins > 0);
3200 /* Update the num of valid wins count after tap holes insertion */
3201 tuning_data->num_of_valid_tap_wins = j;
3203 memcpy(tuning_data->tap_data, temp_tap_data,
3204 j * sizeof(struct tap_window_data));
3205 SDHCI_TEGRA_DBG("***tuning windows after inserting boundaries***\n");
3206 SDHCI_TEGRA_DBG("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
3207 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
3208 new_tap_data = &tuning_data->tap_data[i];
3209 SDHCI_TEGRA_DBG("win[%d]:%d(%d) - %d(%d)\n", i,
3210 new_tap_data->win_start,
3211 new_tap_data->win_start_attr,
3212 new_tap_data->win_end, new_tap_data->win_end_attr);
3214 SDHCI_TEGRA_DBG("***********************************************\n");
3220 * Scan for all tap values and get all passing tap windows.
3222 static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
3223 struct tegra_tuning_data *tuning_data)
3225 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3226 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3227 struct tap_window_data *tap_data;
3228 struct tuning_ui tuning_ui[10];
3229 int err = 0, partial_win_start = 0, temp_margin = 0, tap_value;
3230 unsigned int calc_ui = 0;
3231 u8 prev_boundary_end = 0, num_of_wins = 0;
3232 u8 num_of_uis = 0, valid_num_uis = 0;
3233 u8 ref_ui, first_valid_full_win = 0;
3234 u8 boun_end = 0, next_boun_end = 0;
3236 bool valid_ui_found = false;
3237 unsigned long flags;
3240 * Assume there are a max of 10 windows and allocate tap window
3241 * structures for the same. If there are more windows, the array
3242 * size can be adjusted later using realloc.
3244 tuning_data->tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
3245 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
3246 if (IS_ERR_OR_NULL(tuning_data->tap_data)) {
3247 dev_err(mmc_dev(sdhci->mmc), "No memory for tap data\n");
3251 spin_lock_irqsave(&sdhci->lock, flags);
3254 tap_data = &tuning_data->tap_data[num_of_wins];
3255 /* Get the window start */
3256 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true,
3258 if ((tap_value < 0) && (err == -ENOMEDIUM)) {
3259 spin_unlock_irqrestore(&sdhci->lock, flags);
3262 tap_data->win_start = min_t(u8, tap_value, MAX_TAP_VALUES);
3264 if (tap_value >= MAX_TAP_VALUES) {
3265 /* If it's first iteration, then all taps failed */
3267 dev_err(mmc_dev(sdhci->mmc),
3268 "All tap values(0-255) failed\n");
3269 spin_unlock_irqrestore(&sdhci->lock, flags);
3272 /* All windows obtained */
3277 /* Get the window end */
3278 tap_value = sdhci_tegra_scan_tap_values(sdhci,
3279 tap_value, false, &err);
3280 if ((tap_value < 0) && (err == -ENOMEDIUM)) {
3281 spin_unlock_irqrestore(&sdhci->lock, flags);
3284 tap_data->win_end = min_t(u8, (tap_value - 1), MAX_TAP_VALUES);
3285 tap_data->win_size = tap_data->win_end - tap_data->win_start;
3289 * If the size of window is more than 4 taps wide, then it is a
3290 * valid window. If tap value 0 has passed, then a partial
3291 * window exists. Mark all the window edges as boundary edges.
3293 if (tap_data->win_size > 4) {
3294 if (tap_data->win_start == 0)
3295 tuning_data->is_partial_win_valid = true;
3296 tap_data->win_start_attr = WIN_EDGE_BOUN_START;
3297 tap_data->win_end_attr = WIN_EDGE_BOUN_END;
3299 /* Invalid window as size is less than 5 taps */
3300 SDHCI_TEGRA_DBG("Invalid tuning win (%d-%d) ignored\n",
3301 tap_data->win_start, tap_data->win_end);
3305 /* Ignore first and last partial UIs */
3306 if (tap_data->win_end_attr == WIN_EDGE_BOUN_END) {
3307 tuning_ui[num_of_uis].ui = tap_data->win_end -
3309 tuning_ui[num_of_uis].is_valid_ui = true;
3311 prev_boundary_end = tap_data->win_end;
3314 } while (tap_value < MAX_TAP_VALUES);
3315 spin_unlock_irqrestore(&sdhci->lock, flags);
3317 tuning_data->num_of_valid_tap_wins = num_of_wins;
3318 valid_num_uis = num_of_uis;
3320 /* Print info of all tap windows */
3321 pr_info("**********Auto tuning windows*************\n");
3322 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
3323 for (j = 0; j < tuning_data->num_of_valid_tap_wins; j++) {
3324 tap_data = &tuning_data->tap_data[j];
3325 pr_info("win[%d]: %d(%d) - %d(%d)\n",
3326 j, tap_data->win_start, tap_data->win_start_attr,
3327 tap_data->win_end, tap_data->win_end_attr);
3329 pr_info("***************************************\n");
3331 /* Mark the first last partial UIs as invalid */
3332 tuning_ui[0].is_valid_ui = false;
3333 tuning_ui[num_of_uis - 1].is_valid_ui = false;
3336 /* Discredit all uis at either end with size less than 30% of est ui */
3337 ref_ui = (30 * tuning_data->est_values.ui) / 100;
3338 for (j = 0; j < num_of_uis; j++) {
3339 if (tuning_ui[j].is_valid_ui) {
3340 tuning_ui[j].is_valid_ui = false;
3343 if (tuning_ui[j].ui > ref_ui)
3347 for (j = num_of_uis; j > 0; j--) {
3348 if (tuning_ui[j - 1].ui < ref_ui) {
3349 if (tuning_ui[j - 1].is_valid_ui) {
3350 tuning_ui[j - 1].is_valid_ui = false;
3357 /* Calculate 0.75*est_UI */
3358 ref_ui = (75 * tuning_data->est_values.ui) / 100;
3361 * Check for valid UIs and discredit invalid UIs. A UI is considered
3362 * valid if it's greater than (0.75*est_UI). If an invalid UI is found,
3363 * also discredit the smaller of the two adjacent windows.
3365 for (j = 1; j < (num_of_uis - 1); j++) {
3366 if (tuning_ui[j].ui > ref_ui && tuning_ui[j].is_valid_ui) {
3367 tuning_ui[j].is_valid_ui = true;
3369 if (tuning_ui[j].is_valid_ui) {
3370 tuning_ui[j].is_valid_ui = false;
3373 if (!tuning_ui[j + 1].is_valid_ui ||
3374 !tuning_ui[j - 1].is_valid_ui) {
3375 if (tuning_ui[j - 1].is_valid_ui) {
3376 tuning_ui[j - 1].is_valid_ui = false;
3378 } else if (tuning_ui[j + 1].is_valid_ui) {
3379 tuning_ui[j + 1].is_valid_ui = false;
3384 if (tuning_ui[j - 1].ui > tuning_ui[j + 1].ui)
3385 tuning_ui[j + 1].is_valid_ui = false;
3387 tuning_ui[j - 1].is_valid_ui = false;
3393 /* Calculate the cumulative UI if there are valid UIs left */
3394 if (valid_num_uis) {
3395 for (j = 0; j < num_of_uis; j++)
3396 if (tuning_ui[j].is_valid_ui) {
3397 calc_ui += tuning_ui[j].ui;
3398 if (!first_valid_full_win)
3399 first_valid_full_win = j;
3404 tuning_data->calc_values.ui = (calc_ui / valid_num_uis);
3405 valid_ui_found = true;
3407 tuning_data->calc_values.ui = tuning_data->est_values.ui;
3408 valid_ui_found = false;
3411 SDHCI_TEGRA_DBG("****Tuning UIs***********\n");
3412 for (j = 0; j < num_of_uis; j++)
3413 SDHCI_TEGRA_DBG("Tuning UI[%d] : %d, Is valid[%d]\n",
3414 j, tuning_ui[j].ui, tuning_ui[j].is_valid_ui);
3415 SDHCI_TEGRA_DBG("*************************\n");
3417 /* Get the calculated tuning values */
3418 err = calculate_actual_tuning_values(tegra_host->speedo, tuning_data,
3419 tegra_host->boot_vcore_mv);
3422 * Calculate negative margin if partial win is valid. There are two
3424 * Case 1: If Avg_UI is found, then keep subtracting avg_ui from start
3425 * of first valid full window until a value <=0 is obtained.
3426 * Case 2: If Avg_UI is not found, subtract avg_ui from all boundary
3427 * starts until a value <=0 is found.
3429 if (tuning_data->is_partial_win_valid && (num_of_wins > 1)) {
3430 if (valid_ui_found) {
3432 tuning_data->tap_data[first_valid_full_win].win_start;
3433 boun_end = partial_win_start;
3434 partial_win_start %= tuning_data->calc_values.ui;
3435 partial_win_start -= tuning_data->calc_values.ui;
3437 for (j = 0; j < NEG_MAR_CHK_WIN_COUNT; j++) {
3439 tuning_data->tap_data[j + 1].win_start;
3441 boun_end = temp_margin;
3442 else if (!next_boun_end)
3443 next_boun_end = temp_margin;
3444 temp_margin %= tuning_data->calc_values.ui;
3445 temp_margin -= tuning_data->calc_values.ui;
3446 if (!partial_win_start ||
3447 (temp_margin > partial_win_start))
3448 partial_win_start = temp_margin;
3451 if (partial_win_start <= 0)
3452 tuning_data->tap_data[0].win_start = partial_win_start;
3456 insert_boundaries_in_tap_windows(sdhci, tuning_data, boun_end);
3458 insert_boundaries_in_tap_windows(sdhci, tuning_data, next_boun_end);
3460 /* Insert calculated holes into the windows */
3461 err = adjust_holes_in_tap_windows(sdhci, tuning_data);
3466 static void sdhci_tegra_dump_tuning_constraints(struct sdhci_host *sdhci)
3468 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3469 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3470 struct tegra_tuning_data *tuning_data;
3473 SDHCI_TEGRA_DBG("%s: Num of tuning frequencies%d\n",
3474 mmc_hostname(sdhci->mmc), tegra_host->tuning_freq_count);
3475 for (i = 0; i < tegra_host->tuning_freq_count; ++i) {
3476 tuning_data = &tegra_host->tuning_data[i];
3477 SDHCI_TEGRA_DBG("%s: Tuning freq[%d]: %d, freq band %d\n",
3478 mmc_hostname(sdhci->mmc), i,
3479 tuning_data->freq_hz, tuning_data->freq_band);
3483 static unsigned int get_tuning_voltage(struct sdhci_tegra *tegra_host, u8 *mask)
3490 case NOMINAL_VCORE_TUN:
3491 return tegra_host->nominal_vcore_mv;
3492 case BOOT_VCORE_TUN:
3493 return tegra_host->boot_vcore_mv;
3494 case MIN_OVERRIDE_VCORE_TUN:
3495 return tegra_host->min_vcore_override_mv;
3498 return tegra_host->boot_vcore_mv;
3501 static u8 sdhci_tegra_get_freq_point(struct sdhci_host *sdhci)
3503 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3504 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3505 const unsigned int *freq_list;
3509 curr_clock = sdhci->max_clk;
3510 freq_list = tegra_host->soc_data->tuning_freq_list;
3512 for (i = 0; i < TUNING_FREQ_COUNT; ++i)
3513 if (curr_clock <= freq_list[i])
3516 return TUNING_MAX_FREQ;
3519 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
3520 int t2t_tuning_value)
3522 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3523 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3524 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3525 struct tuning_tap_hole_margins *tap_hole;
3530 if (soc_data->nvquirks & NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS) {
3531 if (soc_data->tap_hole_margins) {
3532 tap_hole = soc_data->tap_hole_margins;
3533 dev_id = dev_name(mmc_dev(sdhci->mmc));
3534 for (i = 0; i < soc_data->tap_hole_margins_count; i++) {
3535 if (!strcmp(dev_id, tap_hole->dev_id))
3536 return tap_hole->tap_hole_margin;
3540 dev_info(mmc_dev(sdhci->mmc),
3541 "Fixed tap hole margins missing\n");
3545 /* if no margin are available calculate tap margin */
3546 tap_margin = (((2 * (450 / t2t_tuning_value)) +
3553 * The frequency tuning algorithm tries to calculate the tap-to-tap delay
3554 * UI and estimate holes using equations and predetermined coefficients from
3555 * the characterization data. The algorithm will not work without this data.
3557 static int find_tuning_coeffs_data(struct sdhci_host *sdhci,
3558 bool force_retuning)
3560 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3561 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3562 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3563 struct tegra_tuning_data *tuning_data;
3564 struct tuning_t2t_coeffs *t2t_coeffs;
3565 struct tap_hole_coeffs *thole_coeffs;
3567 unsigned int freq_khz;
3569 bool coeffs_set = false;
3571 dev_id = dev_name(mmc_dev(sdhci->mmc));
3572 /* Find the coeffs data for all supported frequencies */
3573 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3574 tuning_data = &tegra_host->tuning_data[i];
3576 /* Skip if T2T coeffs are already found */
3577 if (tuning_data->t2t_coeffs == NULL || force_retuning) {
3578 t2t_coeffs = soc_data->t2t_coeffs;
3579 for (j = 0; j < soc_data->t2t_coeffs_count; j++) {
3580 if (!strcmp(dev_id, t2t_coeffs->dev_id)) {
3581 tuning_data->t2t_coeffs = t2t_coeffs;
3583 dev_info(mmc_dev(sdhci->mmc),
3584 "Found T2T coeffs data\n");
3590 dev_err(mmc_dev(sdhci->mmc),
3591 "T2T coeffs data missing\n");
3592 tuning_data->t2t_coeffs = NULL;
3598 /* Skip if tap hole coeffs are already found */
3599 if (tuning_data->thole_coeffs == NULL || force_retuning) {
3600 thole_coeffs = soc_data->tap_hole_coeffs;
3601 freq_khz = tuning_data->freq_hz / 1000;
3602 for (j = 0; j < soc_data->tap_hole_coeffs_count; j++) {
3603 if (!strcmp(dev_id, thole_coeffs->dev_id) &&
3604 (freq_khz == thole_coeffs->freq_khz)) {
3605 tuning_data->thole_coeffs =
3608 dev_info(mmc_dev(sdhci->mmc),
3609 "%dMHz tap hole coeffs found\n",
3617 dev_err(mmc_dev(sdhci->mmc),
3618 "%dMHz Tap hole coeffs data missing\n",
3620 tuning_data->thole_coeffs = NULL;
3630 * Determines the numbers of frequencies required and then fills up the tuning
3631 * constraints for each of the frequencies. The data of lower frequency is
3632 * filled first and then the higher frequency data. Max supported frequencies
3635 static int setup_freq_constraints(struct sdhci_host *sdhci,
3636 const unsigned int *freq_list)
3638 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3639 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3640 struct tegra_tuning_data *tuning_data;
3644 if ((sdhci->mmc->ios.timing != MMC_TIMING_UHS_SDR50) &&
3645 (sdhci->mmc->caps2 & MMC_CAP2_FREQ_SCALING))
3646 freq_count = DFS_FREQ_COUNT;
3650 freq_band = sdhci_tegra_get_freq_point(sdhci);
3651 /* Fill up the req frequencies */
3652 switch (freq_count) {
3654 tuning_data = &tegra_host->tuning_data[0];
3655 tuning_data->freq_hz = sdhci->max_clk;
3656 tuning_data->freq_band = freq_band;
3657 tuning_data->constraints.vcore_mask =
3658 tuning_vcore_constraints[freq_band].vcore_mask;
3659 tuning_data->nr_voltages =
3660 hweight32(tuning_data->constraints.vcore_mask);
3663 tuning_data = &tegra_host->tuning_data[1];
3664 tuning_data->freq_hz = sdhci->max_clk;
3665 tuning_data->freq_band = freq_band;
3666 tuning_data->constraints.vcore_mask =
3667 tuning_vcore_constraints[freq_band].vcore_mask;
3668 tuning_data->nr_voltages =
3669 hweight32(tuning_data->constraints.vcore_mask);
3671 tuning_data = &tegra_host->tuning_data[0];
3672 for (i = (freq_band - 1); i >= 0; i--) {
3675 tuning_data->freq_hz = freq_list[i];
3676 tuning_data->freq_band = i;
3677 tuning_data->nr_voltages = 1;
3678 tuning_data->constraints.vcore_mask =
3679 tuning_vcore_constraints[i].vcore_mask;
3680 tuning_data->nr_voltages =
3681 hweight32(tuning_data->constraints.vcore_mask);
3685 dev_err(mmc_dev(sdhci->mmc), "Unsupported freq count\n");
3693 * Get the supported frequencies and other tuning related constraints for each
3694 * frequency. The supported frequencies should be determined from the list of
3695 * frequencies in the soc data and also consider the platform clock limits as
3696 * well as any DFS related restrictions.
3698 static int sdhci_tegra_get_tuning_constraints(struct sdhci_host *sdhci,
3699 bool force_retuning)
3701 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3702 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3703 const unsigned int *freq_list;
3706 /* A valid freq count means freq constraints are already set up */
3707 if (!tegra_host->tuning_freq_count || force_retuning) {
3708 freq_list = tegra_host->soc_data->tuning_freq_list;
3709 tegra_host->tuning_freq_count =
3710 setup_freq_constraints(sdhci, freq_list);
3711 if (tegra_host->tuning_freq_count < 0) {
3712 dev_err(mmc_dev(sdhci->mmc),
3713 "Invalid tuning freq count\n");
3718 err = find_tuning_coeffs_data(sdhci, force_retuning);
3722 sdhci_tegra_dump_tuning_constraints(sdhci);
3728 * During boot, only boot voltage for vcore can be set. Check if the current
3729 * voltage is allowed to be used. Nominal and min override voltages can be
3730 * set once boot is done. This will be notified through late subsys init call.
3732 static int sdhci_tegra_set_tuning_voltage(struct sdhci_host *sdhci,
3733 unsigned int voltage)
3735 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3736 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3738 bool nom_emc_freq_set = false;
3740 if (voltage && (voltage != tegra_host->boot_vcore_mv)) {
3741 SDHCI_TEGRA_DBG("%s: Override vcore %dmv not allowed\n",
3742 mmc_hostname(sdhci->mmc), voltage);
3746 SDHCI_TEGRA_DBG("%s: Setting vcore override %d\n",
3747 mmc_hostname(sdhci->mmc), voltage);
3749 * First clear any previous dvfs override settings. If dvfs overrides
3750 * are disabled, then print the error message but continue execution
3751 * rather than failing tuning altogether.
3753 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, 0);
3754 if ((err == -EPERM) || (err == -ENOSYS)) {
3756 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3757 * when DVFS override is not enabled. Continue tuning
3758 * with default core voltage
3760 SDHCI_TEGRA_DBG("dvfs overrides disabled. Nothing to clear\n");
3766 /* EMC clock freq boost might be required for nominal core voltage */
3767 if ((voltage == tegra_host->nominal_vcore_mv) &&
3768 tegra_host->plat->en_nominal_vcore_tuning &&
3769 tegra_host->emc_clk) {
3770 err = clk_set_rate(tegra_host->emc_clk,
3771 SDMMC_EMC_NOM_VOLT_FREQ);
3773 dev_err(mmc_dev(sdhci->mmc),
3774 "Failed to set emc nom clk freq %d\n", err);
3776 nom_emc_freq_set = true;
3780 * If dvfs overrides are disabled, then print the error message but
3781 * continue tuning execution rather than failing tuning altogether.
3783 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, voltage);
3784 if ((err == -EPERM) || (err == -ENOSYS)) {
3786 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3787 * when DVFS override is not enabled. Continue tuning
3788 * with default core voltage
3790 SDHCI_TEGRA_DBG("dvfs overrides disabled. No overrides set\n");
3793 dev_err(mmc_dev(sdhci->mmc),
3794 "failed to set vcore override %dmv\n", voltage);
3796 /* Revert emc clock to normal freq */
3797 if (nom_emc_freq_set) {
3798 err = clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
3800 dev_err(mmc_dev(sdhci->mmc),
3801 "Failed to revert emc nom clk freq %d\n", err);
3807 static int sdhci_tegra_run_tuning(struct sdhci_host *sdhci,
3808 struct tegra_tuning_data *tuning_data)
3810 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3811 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3814 u8 i, vcore_mask = 0;
3816 vcore_mask = tuning_data->constraints.vcore_mask;
3817 for (i = 0; i < tuning_data->nr_voltages; i++) {
3818 voltage = get_tuning_voltage(tegra_host, &vcore_mask);
3819 err = sdhci_tegra_set_tuning_voltage(sdhci, voltage);
3821 dev_err(mmc_dev(sdhci->mmc),
3822 "Unable to set override voltage.\n");
3826 /* Get the tuning window info */
3827 SDHCI_TEGRA_DBG("Getting tuning windows...\n");
3828 err = sdhci_tegra_get_tap_window_data(sdhci, tuning_data);
3830 dev_err(mmc_dev(sdhci->mmc),
3831 "Failed to get tap win %d\n", err);
3834 SDHCI_TEGRA_DBG("%s: %d tuning window data obtained\n",
3835 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3840 static int sdhci_tegra_verify_best_tap(struct sdhci_host *sdhci)
3842 struct tegra_tuning_data *tuning_data;
3845 tuning_data = sdhci_tegra_get_tuning_data(sdhci, sdhci->max_clk);
3846 if ((tuning_data->best_tap_value < 0) ||
3847 (tuning_data->best_tap_value > MAX_TAP_VALUES)) {
3848 dev_err(mmc_dev(sdhci->mmc),
3849 "Trying to verify invalid best tap value\n");
3852 dev_info(mmc_dev(sdhci->mmc),
3853 "%s: tuning freq %dhz, best tap %d\n",
3854 __func__, tuning_data->freq_hz,
3855 tuning_data->best_tap_value);
3858 /* Set the best tap value */
3859 sdhci_tegra_set_tap_delay(sdhci, tuning_data->best_tap_value);
3861 /* Run tuning after setting the best tap value */
3862 err = sdhci_tegra_issue_tuning_cmd(sdhci);
3864 dev_err(mmc_dev(sdhci->mmc),
3865 "%dMHz best tap value verification failed %d\n",
3866 tuning_data->freq_hz, err);
3870 static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
3872 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3873 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3874 struct tegra_tuning_data *tuning_data;
3879 u8 i, set_retuning = 0;
3880 bool force_retuning = false;
3882 /* Tuning is valid only in SDR104 and SDR50 modes */
3883 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
3884 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
3885 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
3886 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
3889 /* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
3890 if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
3891 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
3892 else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
3893 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
3897 SDHCI_TEGRA_DBG("%s: Starting freq tuning\n", mmc_hostname(sdhci->mmc));
3898 if (tegra_host->plat->enb_ext_loopback) {
3899 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3901 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3902 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3904 if (tegra_host->plat->enb_feedback_clock) {
3905 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
3907 SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
3908 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
3911 mutex_lock(&tuning_mutex);
3913 /* Set the tuning command to be used */
3914 tegra_host->tuning_opcode = opcode;
3917 * Disable all interrupts signalling.Enable interrupt status
3918 * detection for buffer read ready and data crc. We use
3919 * polling for tuning as it involves less overhead.
3921 sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
3922 sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
3923 SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
3926 * If tuning is already done and retune request is not set, then skip
3927 * best tap value calculation and use the old best tap value. If the
3928 * previous best tap value verification failed, force retuning.
3930 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
3931 err = sdhci_tegra_verify_best_tap(sdhci);
3933 dev_err(mmc_dev(sdhci->mmc),
3934 "Prev best tap failed. Re-running tuning\n");
3935 force_retuning = true;
3941 if (tegra_host->force_retune == true) {
3942 force_retuning = true;
3943 tegra_host->force_retune = false;
3946 tegra_host->tuning_status = 0;
3947 err = sdhci_tegra_get_tuning_constraints(sdhci, force_retuning);
3949 dev_err(mmc_dev(sdhci->mmc),
3950 "Failed to get tuning constraints\n");
3954 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3955 tuning_data = &tegra_host->tuning_data[i];
3956 if (tuning_data->tuning_done && !force_retuning)
3959 /* set clock freq also needed for MMC_RTPM */
3960 SDHCI_TEGRA_DBG("%s: Setting tuning freq%d\n",
3961 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3962 tegra_sdhci_set_clock(sdhci, tuning_data->freq_hz);
3964 SDHCI_TEGRA_DBG("%s: Calculating estimated tuning values\n",
3965 mmc_hostname(sdhci->mmc));
3966 err = calculate_estimated_tuning_values(tegra_host->speedo,
3967 tuning_data, tegra_host->boot_vcore_mv);
3971 SDHCI_TEGRA_DBG("Running tuning...\n");
3972 err = sdhci_tegra_run_tuning(sdhci, tuning_data);
3976 SDHCI_TEGRA_DBG("calculating best tap value\n");
3977 err = sdhci_tegra_calculate_best_tap(sdhci, tuning_data);
3981 err = sdhci_tegra_verify_best_tap(sdhci);
3982 if (!err && !set_retuning) {
3983 tuning_data->tuning_done = true;
3984 tegra_host->tuning_status |= TUNING_STATUS_DONE;
3986 tegra_host->tuning_status |= TUNING_STATUS_RETUNE;
3990 /* Release any override core voltages set */
3991 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3993 /* Enable interrupts. Enable full range for core voltage */
3994 sdhci_writel(sdhci, sdhci->ier, SDHCI_INT_ENABLE);
3995 sdhci_writel(sdhci, sdhci->ier, SDHCI_SIGNAL_ENABLE);
3996 mutex_unlock(&tuning_mutex);
3998 SDHCI_TEGRA_DBG("%s: Freq tuning done\n", mmc_hostname(sdhci->mmc));
3999 if (tegra_host->plat->enb_ext_loopback) {
4000 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
4002 /* Tuning is failed and card will try to enumerate in
4003 * Legacy High Speed mode. So, Enable External Loopback
4007 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
4010 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
4012 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
4015 if (tegra_host->plat->enb_feedback_clock) {
4016 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
4017 if (err) /* Tuning is failed disable feedback clock */
4019 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
4022 SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
4023 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
4029 static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
4031 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4032 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4034 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4035 const struct tegra_sdhci_platform_data *plat = pdev->dev.platform_data;
4036 unsigned int cd_irq;
4038 if (sdhci->is_clk_on) {
4039 pr_debug("%s suspend force clk off\n",
4040 mmc_hostname(sdhci->mmc));
4041 tegra_sdhci_set_clock(sdhci, 0);
4044 /* Disable the power rails if any */
4045 if (tegra_host->card_present) {
4047 /* Configure sdmmc pins to GPIO mode if needed */
4048 if (plat->pin_count > 0)
4049 gpio_request_array(plat->gpios,
4050 ARRAY_SIZE(plat->gpios));
4052 err = tegra_sdhci_configure_regulators(tegra_host,
4053 CONFIG_REG_DIS, 0, 0);
4055 dev_err(mmc_dev(sdhci->mmc),
4056 "Regulators disable in suspend failed %d\n", err);
4058 if (plat && gpio_is_valid(plat->cd_gpio)) {
4059 if (!plat->cd_wakeup_incapable) {
4060 /* Enable wake irq at end of suspend */
4061 cd_irq = gpio_to_irq(plat->cd_gpio);
4062 err = enable_irq_wake(cd_irq);
4064 dev_err(mmc_dev(sdhci->mmc),
4065 "SD card wake-up event registration for irq=%d failed with error: %d\n",
4070 if (plat->pwrdet_support && tegra_host->sdmmc_padctrl) {
4071 err = padctrl_set_voltage(tegra_host->sdmmc_padctrl,
4072 SDHOST_HIGH_VOLT_3V3);
4074 dev_err(mmc_dev(sdhci->mmc),
4075 "padcontrol set volt failed: %d\n", err);
4078 if (plat->pin_count > 0)
4079 gpio_free_array(plat->gpios, ARRAY_SIZE(plat->gpios));
4082 sdhci->detect_resume = 1;
4086 static int tegra_sdhci_resume(struct sdhci_host *sdhci)
4088 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4089 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4090 struct platform_device *pdev;
4091 struct tegra_sdhci_platform_data *plat;
4092 unsigned int signal_voltage = 0;
4094 unsigned int cd_irq;
4096 pdev = to_platform_device(mmc_dev(sdhci->mmc));
4097 plat = pdev->dev.platform_data;
4099 if (plat && gpio_is_valid(plat->cd_gpio)) {
4100 /* disable wake capability at start of resume */
4101 if (!plat->cd_wakeup_incapable) {
4102 cd_irq = gpio_to_irq(plat->cd_gpio);
4103 disable_irq_wake(cd_irq);
4105 tegra_host->card_present =
4106 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
4109 /* Setting the min identification clock of freq 400KHz */
4110 if (!sdhci->is_clk_on) {
4111 pr_debug("%s: resume force clk ON\n",
4112 mmc_hostname(sdhci->mmc));
4113 tegra_sdhci_set_clock(sdhci, 400000);
4116 /* Enable the power rails if any */
4117 if (tegra_host->card_present) {
4118 err = tegra_sdhci_configure_regulators(tegra_host,
4119 CONFIG_REG_EN, 0, 0);
4121 dev_err(mmc_dev(sdhci->mmc),
4122 "Regulators enable in resume failed %d\n", err);
4125 if (tegra_host->vdd_io_reg) {
4126 if (plat && (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK))
4127 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
4129 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
4130 tegra_sdhci_signal_voltage_switch(sdhci,
4135 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
4136 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
4137 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
4138 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
4141 tegra_sdhci_do_calibration(sdhci, signal_voltage);
4144 sdhci->detect_resume = 0;
4148 static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
4150 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4151 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4152 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4153 struct tegra_sdhci_platform_data *plat;
4154 bool dll_calib_req = false;
4155 bool is_sdhci_clk_turned_on = false;
4157 plat = pdev->dev.platform_data;
4158 dll_calib_req = (sdhci->mmc->card &&
4159 (sdhci->mmc->card->type == MMC_TYPE_MMC) &&
4160 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS400));
4161 if (dll_calib_req) {
4162 if (!sdhci->is_clk_on) {
4163 if (sdhci->mmc->ios.clock) {
4164 sdhci->mmc->ops->set_ios(sdhci->mmc,
4166 is_sdhci_clk_turned_on = true;
4169 tegra_sdhci_post_init(sdhci);
4170 if (is_sdhci_clk_turned_on)
4171 tegra_sdhci_set_clock(sdhci, 0);
4174 /* Turn OFF the clocks if the device is not present */
4175 if ((!tegra_host->card_present || !sdhci->mmc->card) &&
4176 tegra_host->clk_enabled &&
4177 (IS_RTPM_DELAY_CG(plat->rtpm_type)))
4178 tegra_sdhci_set_clock(sdhci, 0);
4182 * For tegra specific tuning, core voltage has to be fixed at different
4183 * voltages to get the tap values. Fixing the core voltage during tuning for one
4184 * device might affect transfers of other SDMMC devices. Check if tuning mutex
4185 * is locked before starting a data transfer. The new tuning procedure might
4186 * take at max 1.5s for completion for a single run. Taking DFS into count,
4187 * setting the max timeout for tuning mutex check a 3 secs. Since tuning is
4188 * run only during boot or the first time device is inserted, there wouldn't
4189 * be any delays in cmd/xfer execution once devices enumeration is done.
4191 static void tegra_sdhci_get_bus(struct sdhci_host *sdhci)
4193 unsigned int timeout = 300;
4195 while (mutex_is_locked(&tuning_mutex)) {
4199 dev_err(mmc_dev(sdhci->mmc),
4200 "Tuning mutex locked for long time\n");
4207 * The host/device can be powered off before the retuning request is handled in
4208 * case of SDIDO being off if Wifi is turned off, sd card removal etc. In such
4209 * cases, cancel the pending tuning timer and remove any core voltage
4210 * constraints that are set earlier.
4212 static void tegra_sdhci_power_off(struct sdhci_host *sdhci, u8 power_mode)
4214 int retuning_req_set = 0;
4216 retuning_req_set = (timer_pending(&sdhci->tuning_timer) ||
4217 (sdhci->flags & SDHCI_NEEDS_RETUNING));
4219 if (retuning_req_set) {
4220 del_timer_sync(&sdhci->tuning_timer);
4222 if (boot_volt_req_refcount)
4223 --boot_volt_req_refcount;
4225 if (!boot_volt_req_refcount) {
4226 sdhci_tegra_set_tuning_voltage(sdhci, 0);
4227 SDHCI_TEGRA_DBG("%s: Release override as host is off\n",
4228 mmc_hostname(sdhci->mmc));
4233 static int show_polling_period(void *data, u64 *value)
4235 struct sdhci_host *host = (struct sdhci_host *)data;
4237 if (host->mmc->dev_stats != NULL)
4238 *value = host->mmc->dev_stats->polling_interval;
4243 static int set_polling_period(void *data, u64 value)
4245 struct sdhci_host *host = (struct sdhci_host *)data;
4247 if (host->mmc->dev_stats != NULL) {
4248 /* Limiting the maximum polling period to 1 sec */
4251 host->mmc->dev_stats->polling_interval = value;
4256 static int show_active_load_high_threshold(void *data, u64 *value)
4258 struct sdhci_host *host = (struct sdhci_host *)data;
4259 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4260 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4261 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
4263 if (gov_data != NULL)
4264 *value = gov_data->act_load_high_threshold;
4269 static int set_active_load_high_threshold(void *data, u64 value)
4271 struct sdhci_host *host = (struct sdhci_host *)data;
4272 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4273 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4274 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
4276 if (gov_data != NULL) {
4277 /* Maximum threshold load percentage is 100.*/
4280 gov_data->act_load_high_threshold = value;
4286 static int show_disableclkgating_value(void *data, u64 *value)
4288 struct sdhci_host *host;
4289 struct sdhci_pltfm_host *pltfm_host;
4290 struct sdhci_tegra *tegra_host;
4292 host = (struct sdhci_host *)data;
4294 pltfm_host = sdhci_priv(host);
4295 if (pltfm_host != NULL) {
4296 tegra_host = pltfm_host->priv;
4297 if (tegra_host != NULL)
4298 *value = tegra_host->dbg_cfg.clk_ungated;
4304 static int set_disableclkgating_value(void *data, u64 value)
4306 struct sdhci_host *host;
4307 struct platform_device *pdev;
4308 struct tegra_sdhci_platform_data *plat;
4309 struct sdhci_pltfm_host *pltfm_host;
4310 struct sdhci_tegra *tegra_host;
4312 host = (struct sdhci_host *)data;
4314 pdev = to_platform_device(mmc_dev(host->mmc));
4315 plat = pdev->dev.platform_data;
4316 pltfm_host = sdhci_priv(host);
4317 if (pltfm_host != NULL) {
4318 tegra_host = pltfm_host->priv;
4319 /* Set the CAPS2 register to reflect
4320 * the clk gating value
4322 if (tegra_host != NULL) {
4324 host->mmc->ops->set_ios(host->mmc,
4326 tegra_host->dbg_cfg.clk_ungated = true;
4327 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4329 ~MMC_CAP2_CLOCK_GATING;
4331 tegra_host->dbg_cfg.clk_ungated = false;
4332 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4334 MMC_CAP2_CLOCK_GATING;
4342 static int set_trim_override_value(void *data, u64 value)
4344 struct sdhci_host *host;
4345 struct sdhci_pltfm_host *pltfm_host;
4346 struct sdhci_tegra *tegra_host;
4348 host = (struct sdhci_host *)data;
4350 pltfm_host = sdhci_priv(host);
4351 if (pltfm_host != NULL) {
4352 tegra_host = pltfm_host->priv;
4353 if (tegra_host != NULL) {
4354 /* Make sure clock gating is disabled */
4355 if ((tegra_host->dbg_cfg.clk_ungated) &&
4356 (tegra_host->clk_enabled)) {
4357 sdhci_tegra_set_trim_delay(host, value);
4358 tegra_host->dbg_cfg.trim_val =
4361 pr_info("%s: Disable clock gating before setting value\n",
4362 mmc_hostname(host->mmc));
4370 static int show_trim_override_value(void *data, u64 *value)
4372 struct sdhci_host *host;
4373 struct sdhci_pltfm_host *pltfm_host;
4374 struct sdhci_tegra *tegra_host;
4376 host = (struct sdhci_host *)data;
4378 pltfm_host = sdhci_priv(host);
4379 if (pltfm_host != NULL) {
4380 tegra_host = pltfm_host->priv;
4381 if (tegra_host != NULL)
4382 *value = tegra_host->dbg_cfg.trim_val;
4388 static int show_tap_override_value(void *data, u64 *value)
4390 struct sdhci_host *host;
4391 struct sdhci_pltfm_host *pltfm_host;
4392 struct sdhci_tegra *tegra_host;
4394 host = (struct sdhci_host *)data;
4396 pltfm_host = sdhci_priv(host);
4397 if (pltfm_host != NULL) {
4398 tegra_host = pltfm_host->priv;
4399 if (tegra_host != NULL)
4400 *value = tegra_host->dbg_cfg.tap_val;
4406 static int set_tap_override_value(void *data, u64 value)
4408 struct sdhci_host *host;
4409 struct sdhci_pltfm_host *pltfm_host;
4410 struct sdhci_tegra *tegra_host;
4412 host = (struct sdhci_host *)data;
4414 pltfm_host = sdhci_priv(host);
4415 if (pltfm_host != NULL) {
4416 tegra_host = pltfm_host->priv;
4417 if (tegra_host != NULL) {
4418 /* Make sure clock gating is disabled */
4419 if ((tegra_host->dbg_cfg.clk_ungated) &&
4420 (tegra_host->clk_enabled)) {
4421 sdhci_tegra_set_tap_delay(host, value);
4422 tegra_host->dbg_cfg.tap_val = value;
4424 pr_info("%s: Disable clock gating before setting value\n",
4425 mmc_hostname(host->mmc));
4432 DEFINE_SIMPLE_ATTRIBUTE(sdhci_polling_period_fops, show_polling_period,
4433 set_polling_period, "%llu\n");
4434 DEFINE_SIMPLE_ATTRIBUTE(sdhci_active_load_high_threshold_fops,
4435 show_active_load_high_threshold,
4436 set_active_load_high_threshold, "%llu\n");
4437 DEFINE_SIMPLE_ATTRIBUTE(sdhci_disable_clkgating_fops,
4438 show_disableclkgating_value,
4439 set_disableclkgating_value, "%llu\n");
4440 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_trim_data_fops,
4441 show_trim_override_value,
4442 set_trim_override_value, "%llu\n");
4443 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_tap_data_fops,
4444 show_tap_override_value,
4445 set_tap_override_value, "%llu\n");
4447 static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
4449 struct dentry *root = host->debugfs_root;
4450 struct dentry *dfs_root;
4451 unsigned saved_line;
4454 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
4455 if (IS_ERR_OR_NULL(root)) {
4456 saved_line = __LINE__;
4459 host->debugfs_root = root;
4462 dfs_root = debugfs_create_dir("dfs_stats_dir", root);
4463 if (IS_ERR_OR_NULL(dfs_root)) {
4464 saved_line = __LINE__;
4468 if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
4469 &sdhci_host_fops)) {
4470 saved_line = __LINE__;
4473 if (!debugfs_create_file("dfs_stats", S_IRUSR, dfs_root, host,
4474 &sdhci_host_dfs_fops)) {
4475 saved_line = __LINE__;
4478 if (!debugfs_create_file("polling_period", 0644, dfs_root, (void *)host,
4479 &sdhci_polling_period_fops)) {
4480 saved_line = __LINE__;
4483 if (!debugfs_create_file("active_load_high_threshold", 0644,
4484 dfs_root, (void *)host,
4485 &sdhci_active_load_high_threshold_fops)) {
4486 saved_line = __LINE__;
4490 dfs_root = debugfs_create_dir("override_data", root);
4491 if (IS_ERR_OR_NULL(dfs_root)) {
4492 saved_line = __LINE__;
4496 if (!debugfs_create_file("clk_gate_disabled", 0644,
4497 dfs_root, (void *)host,
4498 &sdhci_disable_clkgating_fops)) {
4499 saved_line = __LINE__;
4503 if (!debugfs_create_file("tap_value", 0644,
4504 dfs_root, (void *)host,
4505 &sdhci_override_tap_data_fops)) {
4506 saved_line = __LINE__;
4510 if (!debugfs_create_file("trim_value", 0644,
4511 dfs_root, (void *)host,
4512 &sdhci_override_trim_data_fops)) {
4513 saved_line = __LINE__;
4516 if (IS_QUIRKS2_DELAYED_CLK_GATE(host)) {
4517 host->clk_gate_tmout_ticks = -1;
4518 if (!debugfs_create_u32("clk_gate_tmout_ticks",
4520 root, (u32 *)&host->clk_gate_tmout_ticks)) {
4521 saved_line = __LINE__;
4529 debugfs_remove_recursive(root);
4530 host->debugfs_root = NULL;
4532 pr_err("%s %s: Failed to initialize debugfs functionality at line=%d\n", __func__,
4533 mmc_hostname(host->mmc), saved_line);
4538 * Simulate the card remove and insert
4539 * set req to true to insert the card
4540 * set req to false to remove the card
4542 static int sdhci_tegra_carddetect(struct sdhci_host *sdhost, bool req)
4544 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
4545 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4546 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
4547 struct tegra_sdhci_platform_data *plat;
4550 plat = pdev->dev.platform_data;
4552 *check if card is inserted physically before performing
4553 *virtual remove or insertion
4555 if (gpio_is_valid(plat->cd_gpio) &&
4556 (gpio_get_value_cansleep(plat->cd_gpio) != 0)) {
4558 dev_err(mmc_dev(sdhost->mmc),
4559 "Card not inserted in slot\n");
4563 /* Ignore the request if card already in requested state*/
4564 if (tegra_host->card_present == req) {
4565 dev_info(mmc_dev(sdhost->mmc),
4566 "Card already in requested state\n");
4569 tegra_host->card_present = req;
4571 if (tegra_host->card_present) {
4572 err = tegra_sdhci_configure_regulators(tegra_host,
4573 CONFIG_REG_EN, 0, 0);
4575 dev_err(mmc_dev(sdhost->mmc),
4576 "Failed to enable card regulators %d\n", err);
4579 /*sdcard power up time max 37msec*/
4580 usleep_range(40000, 41000);
4582 err = tegra_sdhci_configure_regulators(tegra_host,
4583 CONFIG_REG_DIS, 0 , 0);
4585 dev_err(mmc_dev(sdhost->mmc),
4586 "Failed to disable card regulators %d\n", err);
4589 /*sdcard power down time min 1ms*/
4590 usleep_range(1000, 2000);
4593 * Set retune request as tuning should be done next time
4594 * a card is inserted.
4596 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
4597 tegra_host->force_retune = true;
4599 tasklet_schedule(&sdhost->card_tasklet);
4604 static int get_card_insert(void *data, u64 *val)
4606 struct sdhci_host *sdhost = data;
4607 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
4608 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4610 *val = tegra_host->card_present;
4615 static int set_card_insert(void *data, u64 val)
4617 struct sdhci_host *sdhost = data;
4622 dev_err(mmc_dev(sdhost->mmc),
4623 "Usage error. Use 0 to remove, 1 to insert %d\n", err);
4627 if (sdhost->mmc->caps & MMC_CAP_NONREMOVABLE) {
4629 dev_err(mmc_dev(sdhost->mmc),
4630 "usage error, Supports only SDCARD hosts only %d\n", err);
4634 err = sdhci_tegra_carddetect(sdhost, val == 1);
4639 static ssize_t get_bus_timing(struct file *file, char __user *user_buf,
4640 size_t count, loff_t *ppos)
4642 struct sdhci_host *host = file->private_data;
4643 unsigned int len = 0;
4646 static const char *const sdhci_tegra_timing[] = {
4647 [MMC_TIMING_LEGACY] = "legacy",
4648 [MMC_TIMING_MMC_HS] = "highspeed",
4649 [MMC_TIMING_SD_HS] = "highspeed",
4650 [MMC_TIMING_UHS_SDR12] = "SDR12",
4651 [MMC_TIMING_UHS_SDR25] = "SDR25",
4652 [MMC_TIMING_UHS_SDR50] = "SDR50",
4653 [MMC_TIMING_UHS_SDR104] = "SDR104",
4654 [MMC_TIMING_UHS_DDR50] = "DDR50",
4655 [MMC_TIMING_MMC_HS200] = "HS200",
4656 [MMC_TIMING_MMC_HS400] = "HS400",
4659 len = snprintf(buf, sizeof(buf), "%s\n",
4660 sdhci_tegra_timing[host->mmc->ios.timing]);
4661 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
4664 static ssize_t set_bus_timing(struct file *file,
4665 const char __user *userbuf,
4666 size_t count, loff_t *ppos)
4668 struct sdhci_host *sdhost = file->private_data;
4669 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
4670 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4676 /* Ignore the request if card is not yet removed*/
4677 if (tegra_host->card_present != 0) {
4678 dev_err(mmc_dev(sdhost->mmc),
4679 "Sdcard not removed. Set bus timing denied\n");
4684 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) {
4689 buf[count-1] = '\0';
4691 /*prepare the temp mask to mask higher host timing modes wrt user
4694 mask = ~(MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_DDR50
4695 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25
4696 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104);
4697 if (strcmp(buf, "highspeed") == 0) {
4698 timing_req = MMC_CAP_SD_HIGHSPEED;
4699 mask |= MMC_CAP_SD_HIGHSPEED;
4700 } else if (strcmp(buf, "SDR12") == 0) {
4701 timing_req = MMC_CAP_UHS_SDR12;
4702 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12);
4703 } else if (strcmp(buf, "SDR25") == 0) {
4704 timing_req = MMC_CAP_UHS_SDR25;
4705 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4706 | MMC_CAP_UHS_SDR25);
4707 } else if (strcmp(buf, "SDR50") == 0) {
4708 timing_req = MMC_CAP_UHS_SDR50;
4709 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4710 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50);
4711 } else if (strcmp(buf, "SDR104") == 0) {
4712 timing_req = MMC_CAP_UHS_SDR104;
4713 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4714 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
4715 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50);
4716 } else if (strcmp(buf, "DDR50") == 0) {
4717 timing_req = MMC_CAP_UHS_DDR50;
4718 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4719 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
4720 | MMC_CAP_UHS_DDR50);
4721 } else if (strcmp(buf, "legacy")) {
4723 dev_err(mmc_dev(sdhost->mmc),
4724 "Invalid bus timing requested %d\n", err);
4728 /*Checks if user requested mode is supported by host*/
4729 if (timing_req && (!(sdhost->caps_timing_orig & timing_req))) {
4731 dev_err(mmc_dev(sdhost->mmc),
4732 "Timing not supported by Host %d\n", err);
4737 *Limit the capability of host upto user requested timing
4739 sdhost->mmc->caps |= sdhost->caps_timing_orig;
4740 sdhost->mmc->caps &= mask;
4742 dev_dbg(mmc_dev(sdhost->mmc),
4743 "Host Bus Timing limited to %s mode\n", buf);
4744 dev_dbg(mmc_dev(sdhost->mmc),
4745 "when sdcard is inserted next time, bus timing");
4746 dev_dbg(mmc_dev(sdhost->mmc),
4747 "gets selected based on card speed caps");
4755 static const struct file_operations sdhci_host_bus_timing_fops = {
4756 .read = get_bus_timing,
4757 .write = set_bus_timing,
4758 .open = simple_open,
4759 .owner = THIS_MODULE,
4760 .llseek = default_llseek,
4763 DEFINE_SIMPLE_ATTRIBUTE(sdhci_tegra_card_insert_fops, get_card_insert,
4764 set_card_insert, "%llu\n");
4765 static void sdhci_tegra_misc_debugfs(struct sdhci_host *host)
4767 struct dentry *root = host->debugfs_root;
4768 unsigned saved_line;
4770 *backup original host timing capabilities as debugfs
4771 *may override it later
4773 host->caps_timing_orig = host->mmc->caps &
4774 (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_DDR50
4775 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25
4776 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104);
4779 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
4780 if (IS_ERR_OR_NULL(root)) {
4781 saved_line = __LINE__;
4784 host->debugfs_root = root;
4787 if (!debugfs_create_file("bus_timing", S_IRUSR | S_IWUSR, root, host,
4788 &sdhci_host_bus_timing_fops)) {
4789 saved_line = __LINE__;
4793 if (!debugfs_create_file("card_insert", S_IRUSR | S_IWUSR, root, host,
4794 &sdhci_tegra_card_insert_fops)) {
4795 saved_line = __LINE__;
4802 debugfs_remove_recursive(root);
4803 host->debugfs_root = NULL;
4805 pr_err("%s %s:Failed to initialize debugfs functionality at line=%d\n",
4806 __func__, mmc_hostname(host->mmc), saved_line);
4810 static ssize_t sdhci_handle_boost_mode_tap(struct device *dev,
4811 struct device_attribute *attr, const char *buf, size_t count)
4814 struct mmc_card *card;
4815 char *p = (char *)buf;
4816 struct sdhci_host *host = dev_get_drvdata(dev);
4817 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4818 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4819 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
4820 struct tegra_sdhci_platform_data *plat;
4821 struct tegra_tuning_data *tuning_data;
4825 tap_cmd = memparse(p, &p);
4827 card = host->mmc->card;
4831 /* if not uhs -- no tuning and no tap value to set */
4832 if (!mmc_sd_card_uhs(card) && !mmc_card_hs200(card))
4835 /* if no change in tap value -- just exit */
4836 if (tap_cmd == tegra_host->tap_cmd)
4839 if ((tap_cmd != TAP_CMD_TRIM_DEFAULT_VOLTAGE) &&
4840 (tap_cmd != TAP_CMD_TRIM_HIGH_VOLTAGE)) {
4841 pr_info("echo 1 > cmd_state # to set normal voltage\n");
4842 pr_info("echo 2 > cmd_state # to set high voltage\n");
4846 tegra_host->tap_cmd = tap_cmd;
4847 plat = pdev->dev.platform_data;
4848 tuning_data = sdhci_tegra_get_tuning_data(host, host->max_clk);
4849 /* Check if host clock is enabled */
4850 if (!tegra_host->clk_enabled) {
4851 /* Nothing to do if the host is not powered ON */
4852 if (host->mmc->ios.power_mode != MMC_POWER_ON)
4854 else if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4855 tegra_sdhci_set_clock(host, host->mmc->ios.clock);
4858 /* Wait for any on-going data transfers */
4859 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
4860 while (present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) {
4865 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
4868 spin_lock(&host->lock);
4870 case TAP_CMD_TRIM_DEFAULT_VOLTAGE:
4871 /* set tap value for voltage range 1.1 to 1.25 */
4872 sdhci_tegra_set_tap_delay(host, tuning_data->best_tap_value);
4875 case TAP_CMD_TRIM_HIGH_VOLTAGE:
4876 /* set tap value for voltage range 1.25 to 1.39 */
4877 sdhci_tegra_set_tap_delay(host,
4878 tuning_data->nom_best_tap_value);
4881 spin_unlock(&host->lock);
4882 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4883 tegra_sdhci_set_clock(host, 0);
4887 static ssize_t sdhci_show_turbo_mode(struct device *dev,
4888 struct device_attribute *attr, char *buf)
4890 struct sdhci_host *host = dev_get_drvdata(dev);
4891 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4892 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4894 return sprintf(buf, "%d\n", tegra_host->tap_cmd);
4897 static DEVICE_ATTR(cmd_state, 0644, sdhci_show_turbo_mode,
4898 sdhci_handle_boost_mode_tap);
4900 static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
4901 unsigned long event, void *data)
4903 struct sdhci_tegra *tegra_host =
4904 container_of(nb, struct sdhci_tegra, reboot_notify);
4910 err = tegra_sdhci_configure_regulators(tegra_host,
4911 CONFIG_REG_DIS, 0, 0);
4913 pr_err("Disable regulator in reboot notify failed %d\n",
4921 static void tegra_sdhci_ios_config_enter(struct sdhci_host *sdhci,
4922 struct mmc_ios *ios)
4924 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4925 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4926 struct clk *new_mode_clk;
4927 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4928 struct tegra_sdhci_platform_data *plat;
4929 bool change_clk = false;
4932 * Tegra sdmmc controllers require clock to be enabled for any register
4933 * access. Set the minimum controller clock if no clock is requested.
4935 plat = pdev->dev.platform_data;
4936 if (!IS_RTPM_DELAY_CG(plat->rtpm_type)) {
4937 if (ios->clock && (ios->clock != sdhci->clock))
4938 tegra_sdhci_set_clock(sdhci, ios->clock);
4940 if (!sdhci->clock && !ios->clock) {
4941 tegra_sdhci_set_clock(sdhci, sdhci->mmc->f_min);
4942 sdhci->clock = sdhci->mmc->f_min;
4943 } else if (ios->clock && (ios->clock != sdhci->clock)) {
4944 tegra_sdhci_set_clock(sdhci, ios->clock);
4949 * Check for DDR50 mode setting and set ddr_clk if not already
4950 * done. Return if only one clock option is available.
4952 if (!tegra_host->ddr_clk || !tegra_host->sdr_clk) {
4955 if ((ios->timing == MMC_TIMING_UHS_DDR50) &&
4956 !tegra_host->is_ddr_clk_set) {
4958 new_mode_clk = tegra_host->ddr_clk;
4959 } else if ((ios->timing != MMC_TIMING_UHS_DDR50) &&
4960 tegra_host->is_ddr_clk_set) {
4962 new_mode_clk = tegra_host->sdr_clk;
4966 /* below clock on/off also needed for MMC_RTPM */
4967 tegra_sdhci_set_clock(sdhci, 0);
4968 pltfm_host->clk = new_mode_clk;
4969 /* Restore the previous frequency */
4970 tegra_sdhci_set_clock(sdhci, sdhci->max_clk);
4971 tegra_host->is_ddr_clk_set =
4972 !tegra_host->is_ddr_clk_set;
4977 static void tegra_sdhci_ios_config_exit(struct sdhci_host *sdhci,
4978 struct mmc_ios *ios)
4980 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4981 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4983 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4984 struct tegra_sdhci_platform_data *plat;
4986 plat = pdev->dev.platform_data;
4988 * Do any required handling for retuning requests before powering off
4991 if (ios->power_mode == MMC_POWER_OFF) {
4992 tegra_sdhci_power_off(sdhci, ios->power_mode);
4993 err = tegra_sdhci_configure_regulators(tegra_host,
4994 CONFIG_REG_DIS, 0, 0);
4996 pr_err("Disable regulators failed in ios:%d\n", err);
4998 err = tegra_sdhci_configure_regulators(tegra_host,
4999 CONFIG_REG_EN, 0, 0);
5001 pr_err("Enable regulator failed in ios:%d\n", err);
5005 * In case of power off, turn off controller clock now as all the
5006 * required register accesses are already done.
5008 if (!ios->clock && !sdhci->mmc->skip_host_clkgate) {
5009 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
5010 tegra_sdhci_set_clock(sdhci, 0);
5014 static int tegra_sdhci_get_drive_strength(struct sdhci_host *sdhci,
5015 unsigned int max_dtr, int host_drv, int card_drv)
5017 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5018 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5019 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
5021 return plat->default_drv_type;
5024 static void tegra_sdhci_config_tap(struct sdhci_host *sdhci, u8 option)
5026 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5027 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5031 case SAVE_TUNED_TAP:
5032 tap_delay = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
5033 tap_delay >>= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT;
5034 tap_delay &= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK;
5035 tegra_host->tuned_tap_delay = tap_delay;
5036 tegra_host->tuning_status = TUNING_STATUS_DONE;
5038 case SET_DEFAULT_TAP:
5039 sdhci_tegra_set_tap_delay(sdhci, tegra_host->plat->tap_delay);
5042 sdhci_tegra_set_tap_delay(sdhci, tegra_host->tuned_tap_delay);
5045 dev_err(mmc_dev(sdhci->mmc),
5046 "Invalid argument passed to tap config\n");
5050 static void sdhci_tegra_select_drive_strength(struct sdhci_host *host,
5053 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5054 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5057 if (!IS_ERR_OR_NULL(tegra_host->pinctrl_sdmmc)) {
5058 if (!IS_ERR_OR_NULL(tegra_host->sdmmc_pad_ctrl[uhs])) {
5059 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
5060 tegra_host->sdmmc_pad_ctrl[uhs]);
5062 dev_warn(mmc_dev(host->mmc),
5063 "setting pad strength for sdcard mode %d failed\n", uhs);
5066 dev_dbg(mmc_dev(host->mmc),
5067 "No custom pad-ctrl strength settings present for sdcard %d mode\n", uhs);
5073 * Set the max pio transfer limits to allow for dynamic switching between dma
5074 * and pio modes if the platform data indicates support for it. Option to set
5075 * different limits for different interfaces.
5077 static void tegra_sdhci_set_max_pio_transfer_limits(struct sdhci_host *sdhci)
5079 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5080 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5082 if (!tegra_host->plat->dynamic_dma_pio_switch || !sdhci->mmc->card)
5085 switch (sdhci->mmc->card->type) {
5087 sdhci->max_pio_size = 0;
5088 sdhci->max_pio_blocks = 0;
5091 sdhci->max_pio_size = 0;
5092 sdhci->max_pio_blocks = 0;
5095 sdhci->max_pio_size = 0;
5096 sdhci->max_pio_blocks = 0;
5099 dev_err(mmc_dev(sdhci->mmc),
5100 "Unknown device type. No max pio limits set\n");
5104 static const struct sdhci_ops tegra_sdhci_ops = {
5105 .get_ro = tegra_sdhci_get_ro,
5106 .get_cd = tegra_sdhci_get_cd,
5107 .read_l = tegra_sdhci_readl,
5108 .read_w = tegra_sdhci_readw,
5109 .write_l = tegra_sdhci_writel,
5110 .write_w = tegra_sdhci_writew,
5111 .platform_bus_width = tegra_sdhci_buswidth,
5112 .set_clock = tegra_sdhci_set_clock,
5113 .suspend = tegra_sdhci_suspend,
5114 .resume = tegra_sdhci_resume,
5115 .platform_resume = tegra_sdhci_post_resume,
5116 .platform_reset_exit = tegra_sdhci_reset_exit,
5117 .platform_get_bus = tegra_sdhci_get_bus,
5118 .platform_ios_config_enter = tegra_sdhci_ios_config_enter,
5119 .platform_ios_config_exit = tegra_sdhci_ios_config_exit,
5120 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
5121 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
5122 .validate_sd2_0 = tegra_sdhci_validate_sd2_0,
5123 .switch_signal_voltage_exit = tegra_sdhci_do_calibration,
5124 .execute_freq_tuning = sdhci_tegra_execute_tuning,
5125 .sd_error_stats = sdhci_tegra_sd_error_stats,
5126 #ifdef CONFIG_MMC_FREQ_SCALING
5127 .dfs_gov_init = sdhci_tegra_freq_gov_init,
5128 .dfs_gov_get_target_freq = sdhci_tegra_get_target_freq,
5130 .get_drive_strength = tegra_sdhci_get_drive_strength,
5131 .post_init = tegra_sdhci_post_init,
5132 .dump_host_cust_regs = tegra_sdhci_dumpregs,
5133 .get_max_tuning_loop_counter = sdhci_tegra_get_max_tuning_loop_counter,
5134 .config_tap_delay = tegra_sdhci_config_tap,
5135 .is_tuning_done = tegra_sdhci_is_tuning_done,
5136 .get_max_pio_transfer_limits = tegra_sdhci_set_max_pio_transfer_limits,
5139 static struct sdhci_pltfm_data sdhci_tegra11_pdata = {
5140 .quirks = TEGRA_SDHCI_QUIRKS,
5141 .quirks2 = TEGRA_SDHCI_QUIRKS2,
5142 .ops = &tegra_sdhci_ops,
5145 static struct sdhci_tegra_soc_data soc_data_tegra11 = {
5146 .pdata = &sdhci_tegra11_pdata,
5147 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5148 NVQUIRK_SET_DRIVE_STRENGTH |
5149 NVQUIRK_SET_TRIM_DELAY |
5150 NVQUIRK_ENABLE_DDR50 |
5151 NVQUIRK_ENABLE_HS200 |
5152 NVQUIRK_ENABLE_AUTO_CMD23 |
5153 NVQUIRK_INFINITE_ERASE_TIMEOUT,
5154 .parent_clk_list = {"pll_p", "pll_c"},
5155 .tuning_freq_list = {81600000, 156000000, 200000000},
5156 .t2t_coeffs = t11x_tuning_coeffs,
5157 .t2t_coeffs_count = 3,
5158 .tap_hole_coeffs = t11x_tap_hole_coeffs,
5159 .tap_hole_coeffs_count = 12,
5162 static struct sdhci_pltfm_data sdhci_tegra12_pdata = {
5163 .quirks = TEGRA_SDHCI_QUIRKS,
5164 .quirks2 = TEGRA_SDHCI_QUIRKS2 |
5165 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
5166 SDHCI_QUIRK2_SUPPORT_64BIT_DMA |
5167 SDHCI_QUIRK2_USE_64BIT_ADDR,
5168 .ops = &tegra_sdhci_ops,
5171 static struct sdhci_tegra_soc_data soc_data_tegra12 = {
5172 .pdata = &sdhci_tegra12_pdata,
5173 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5174 NVQUIRK_SET_TRIM_DELAY |
5175 NVQUIRK_ENABLE_DDR50 |
5176 NVQUIRK_ENABLE_HS200 |
5177 NVQUIRK_ENABLE_AUTO_CMD23 |
5178 NVQUIRK_INFINITE_ERASE_TIMEOUT |
5179 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
5180 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
5181 NVQUIRK_SET_CALIBRATION_OFFSETS,
5182 .parent_clk_list = {"pll_p", "pll_c"},
5183 .tuning_freq_list = {81600000, 136000000, 200000000},
5184 .t2t_coeffs = t12x_tuning_coeffs,
5185 .t2t_coeffs_count = 3,
5186 .tap_hole_coeffs = t12x_tap_hole_coeffs,
5187 .tap_hole_coeffs_count = 14,
5190 static struct sdhci_pltfm_data sdhci_tegra21_pdata = {
5191 .quirks = TEGRA_SDHCI_QUIRKS,
5192 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
5193 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING |
5194 SDHCI_QUIRK2_NON_STD_TUNING_LOOP_CNTR |
5195 SDHCI_QUIRK2_SKIP_TUNING |
5196 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO |
5197 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK |
5198 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
5199 SDHCI_QUIRK2_USE_64BIT_ADDR |
5200 SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK |
5201 SDHCI_QUIRK2_NON_STD_RTPM |
5202 SDHCI_QUIRK2_SUPPORT_64BIT_DMA,
5203 .ops = &tegra_sdhci_ops,
5206 static struct sdhci_tegra_soc_data soc_data_tegra21 = {
5207 .pdata = &sdhci_tegra21_pdata,
5208 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5209 NVQUIRK_SET_TRIM_DELAY |
5210 NVQUIRK_ENABLE_DDR50 |
5211 NVQUIRK_ENABLE_HS200 |
5212 NVQUIRK_ENABLE_HS400 |
5213 NVQUIRK_ENABLE_AUTO_CMD23 |
5214 NVQUIRK_INFINITE_ERASE_TIMEOUT |
5215 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
5216 NVQUIRK_SET_SDMEMCOMP_VREF_SEL |
5217 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
5218 NVQUIRK_SET_CALIBRATION_OFFSETS |
5219 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK |
5220 NVQUIRK_UPDATE_PAD_CNTRL_REG |
5221 NVQUIRK_UPDATE_PIN_CNTRL_REG,
5222 .nvquirks2 = NVQUIRK2_UPDATE_HW_TUNING_CONFG |
5223 NVQUIRK2_CONFIG_PWR_DET |
5224 NVQUIRK2_BROKEN_SD2_0_SUPPORT |
5225 NVQUIRK2_SELECT_SDR50_MODE |
5226 NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION |
5227 NVQUIRK2_SET_PAD_E_INPUT_VOL |
5228 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH,
5231 static const struct of_device_id sdhci_tegra_dt_match[] = {
5232 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra21 },
5233 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra12 },
5234 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra11 },
5237 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
5239 static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
5240 struct platform_device *pdev)
5244 struct tegra_sdhci_platform_data *plat;
5245 struct device_node *np = pdev->dev.of_node;
5253 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
5255 dev_err(&pdev->dev, "Can't allocate platform data\n");
5259 plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
5260 plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
5261 plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
5263 if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
5267 of_property_read_u32(np, "tap-delay", &plat->tap_delay);
5268 plat->is_ddr_tap_delay = of_property_read_bool(np,
5269 "nvidia,is-ddr-tap-delay");
5270 of_property_read_u32(np, "nvidia,ddr-tap-delay", &plat->ddr_tap_delay);
5271 of_property_read_u32(np, "trim-delay", &plat->trim_delay);
5272 plat->is_ddr_trim_delay = of_property_read_bool(np,
5273 "nvidia,is-ddr-trim-delay");
5274 of_property_read_u32(np, "ddr-trim-delay", &plat->ddr_trim_delay);
5275 of_property_read_u32(np, "ddr-clk-limit", &plat->ddr_clk_limit);
5276 of_property_read_u32(np, "max-clk-limit", &plat->max_clk_limit);
5277 of_property_read_u32(np, "id", &plat->id);
5278 of_property_read_u32(np, "dqs-trim-delay", &plat->dqs_trim_delay);
5279 of_property_read_u32(np, "dqs-trim-delay-hs533", &plat->dqs_trim_delay_hs533);
5281 of_property_read_u32(np, "compad-vref-3v3", &plat->compad_vref_3v3);
5282 of_property_read_u32(np, "compad-vref-1v8", &plat->compad_vref_1v8);
5283 of_property_read_u32(np, "uhs-mask", &plat->uhs_mask);
5284 of_property_read_u32(np, "calib-3v3-offsets", &plat->calib_3v3_offsets);
5285 of_property_read_u32(np, "calib-1v8-offsets", &plat->calib_1v8_offsets);
5286 of_property_read_u32(np, "auto-cal-step", &plat->auto_cal_step);
5287 plat->disable_auto_cal = of_property_read_bool(np,
5288 "nvidia,disable-auto-cal");
5290 plat->power_off_rail = of_property_read_bool(np,
5293 plat->pwr_off_during_lp0 = of_property_read_bool(np,
5294 "pwr-off-during-lp0");
5296 plat->limit_vddio_max_volt = of_property_read_bool(np,
5297 "nvidia,limit-vddio-max-volt");
5298 plat->cd_wakeup_incapable = of_property_read_bool(np,
5299 "cd_wakeup_incapable");
5301 plat->mmc_data.built_in = of_property_read_bool(np, "built-in");
5302 plat->update_pinctrl_settings = of_property_read_bool(np,
5303 "nvidia,update-pinctrl-settings");
5304 plat->dll_calib_needed = of_property_read_bool(np,
5305 "nvidia,dll-calib-needed");
5306 plat->enb_ext_loopback = of_property_read_bool(np,
5307 "nvidia,enable-ext-loopback");
5308 plat->disable_clock_gate = of_property_read_bool(np,
5309 "disable-clock-gate");
5310 plat->enable_hs533_mode =
5311 of_property_read_bool(np, "nvidia,enable-hs533-mode");
5312 of_property_read_u32(np, "default-drv-type", &plat->default_drv_type);
5313 plat->en_io_trim_volt = of_property_read_bool(np,
5314 "nvidia,en-io-trim-volt");
5315 plat->is_emmc = of_property_read_bool(np, "nvidia,is-emmc");
5316 plat->is_sd_device = of_property_read_bool(np, "nvidia,sd-device");
5318 of_property_read_bool(np, "nvidia,enable-strobe-mode");
5320 if (!of_property_read_u32(np, "mmc-ocr-mask", &val)) {
5322 plat->mmc_data.ocr_mask = MMC_OCR_1V8_MASK;
5324 plat->mmc_data.ocr_mask = MMC_OCR_2V8_MASK;
5326 plat->mmc_data.ocr_mask = MMC_OCR_3V2_MASK;
5328 plat->mmc_data.ocr_mask = MMC_OCR_3V3_MASK;
5330 plat->pwrdet_support = of_property_read_bool(np, "pwrdet-support");
5331 if (of_find_property(np, "fixed-clock-freq", NULL)) {
5332 plat->is_fix_clock_freq = true;
5333 of_property_read_u32_array(np,
5335 (u32 *)&plat->fixed_clk_freq_table,
5336 MMC_TIMINGS_MAX_MODES);
5338 plat->enable_autocal_slew_override = of_property_read_bool(np,
5339 "nvidia,auto-cal-slew-override");
5341 ret = of_property_read_u32(np, "nvidia,runtime-pm-type",
5343 /* use delayed clock gate if runtime type not specified explicitly */
5345 plat->rtpm_type = RTPM_TYPE_DELAY_CG;
5348 of_property_read_bool(np, "nvidia,enable-cq");
5350 plat->en_periodic_calib = of_property_read_bool(np,
5351 "nvidia,en-periodic-calib");
5352 plat->pin_count = of_gpio_named_count(np, "nvidia,sdmmc-pin-gpios");
5353 for (i = 0; i < plat->pin_count; ++i) {
5354 val = of_get_named_gpio(np, "nvidia,sdmmc-pin-gpios", i);
5355 if (gpio_is_valid(val)) {
5356 plat->gpios[i].gpio = val;
5357 plat->gpios[i].flags = GPIOF_OUT_INIT_HIGH;
5358 sprintf(label, "sdmmc_pin%d", i);
5359 plat->gpios[i].label = label;
5365 static int sdhci_tegra_get_pll_from_dt(struct platform_device *pdev,
5366 const char **parent_clk_list, int size)
5368 struct device_node *np = pdev->dev.of_node;
5369 const char *pll_str;
5375 if (!of_find_property(np, "pll_source", NULL))
5378 cnt = of_property_count_strings(np, "pll_source");
5383 dev_warn(&pdev->dev,
5384 "pll list provide in DT exceeds max supported\n");
5388 for (i = 0; i < cnt; i++) {
5389 of_property_read_string_index(np, "pll_source", i, &pll_str);
5390 parent_clk_list[i] = pll_str;
5396 * sdhci_tegra_check_bondout
5398 * check whether the specified SDHCI instance is bonded out
5400 * do not validate ID itself, instead, just make sure it's less
5401 * than 4, so that we do not index beyond the end of position array
5403 * non-zero return value means bond-out, so that instance doesn't exist
5405 static inline int sdhci_tegra_check_bondout(unsigned int id)
5407 #ifdef CONFIG_ARCH_TEGRA_21x_SOC
5408 enum tegra_bondout_dev dev[4] = {
5416 return tegra_bonded_out_dev(dev[id]);
5424 static int sdhci_tegra_init_pinctrl_info(struct device *dev,
5425 struct sdhci_tegra *tegra_host,
5426 struct tegra_sdhci_platform_data *plat)
5428 struct device_node *np = dev->of_node;
5429 const char *drive_gname;
5432 struct pinctrl_state *pctl_state;
5437 if (plat->pwrdet_support) {
5438 tegra_host->sdmmc_padctrl = devm_padctrl_get(dev, "sdmmc");
5439 if (IS_ERR(tegra_host->sdmmc_padctrl)) {
5440 ret = PTR_ERR(tegra_host->sdmmc_padctrl);
5441 tegra_host->sdmmc_padctrl = NULL;
5445 if (plat->update_pinctrl_settings) {
5446 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
5447 if (IS_ERR_OR_NULL(tegra_host->pinctrl_sdmmc)) {
5448 dev_err(dev, "Missing pinctrl info\n");
5452 tegra_host->schmitt_enable[0] =
5453 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5454 "sdmmc_schmitt_enable");
5455 if (IS_ERR_OR_NULL(tegra_host->schmitt_enable[0]))
5456 dev_dbg(dev, "Missing schmitt enable state\n");
5458 tegra_host->schmitt_enable[1] =
5459 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5460 "sdmmc_clk_schmitt_enable");
5461 if (IS_ERR_OR_NULL(tegra_host->schmitt_enable[1]))
5462 dev_dbg(dev, "Missing clk schmitt enable state\n");
5464 tegra_host->schmitt_disable[0] =
5465 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5466 "sdmmc_schmitt_disable");
5467 if (IS_ERR_OR_NULL(tegra_host->schmitt_disable[0]))
5468 dev_dbg(dev, "Missing schmitt disable state\n");
5470 tegra_host->schmitt_disable[1] =
5471 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5472 "sdmmc_clk_schmitt_disable");
5473 if (IS_ERR_OR_NULL(tegra_host->schmitt_disable[1]))
5474 dev_dbg(dev, "Missing clk schmitt disable state\n");
5476 for (i = 0; i < 2; i++) {
5477 if (!IS_ERR_OR_NULL(tegra_host->schmitt_disable[i])) {
5478 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
5479 tegra_host->schmitt_disable[i]);
5481 dev_warn(dev, "setting schmitt state failed\n");
5484 tegra_host->drv_code_strength =
5485 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5487 if (IS_ERR_OR_NULL(tegra_host->drv_code_strength))
5488 dev_dbg(dev, "Missing sdmmc drive code state\n");
5490 tegra_host->default_drv_code_strength =
5491 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5492 "sdmmc_default_drv_code");
5493 if (IS_ERR_OR_NULL(tegra_host->default_drv_code_strength))
5494 dev_dbg(dev, "Missing sdmmc default drive code state\n");
5496 /* Apply the default_mode settings to all modes of SD/MMC
5497 initially and then later update the pad strengths depending
5498 upon the states specified if any */
5499 pctl_state = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5501 if (IS_ERR_OR_NULL(pctl_state)) {
5502 dev_dbg(dev, "Missing default mode pad control state\n");
5505 for (i = 0; i < MMC_TIMINGS_MAX_MODES; i++)
5506 tegra_host->sdmmc_pad_ctrl[i] = pctl_state;
5509 pctl_state = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5511 if (IS_ERR_OR_NULL(pctl_state)) {
5512 dev_dbg(dev, "Missing sdr50 pad control state\n");
5515 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_UHS_SDR50] = pctl_state;
5516 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_UHS_DDR50] = pctl_state;
5519 pctl_state = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5521 if (IS_ERR_OR_NULL(pctl_state)) {
5522 dev_dbg(dev, "Missing sdr104 pad control state\n");
5525 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_UHS_SDR104] = pctl_state;
5528 /*Select the default state*/
5529 if (!IS_ERR_OR_NULL(tegra_host->sdmmc_pad_ctrl[MMC_TIMING_MMC_HS])) {
5530 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
5531 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_MMC_HS]);
5533 dev_warn(dev, "setting default pad state failed\n");
5537 tegra_host->pinctrl = pinctrl_get_dev_from_of_property(np,
5538 "drive-pin-pinctrl");
5539 if (!tegra_host->pinctrl)
5542 drive_gname = of_get_property(np, "drive-pin-name", NULL);
5543 tegra_host->drive_group_sel = pinctrl_get_selector_from_group_name(
5544 tegra_host->pinctrl, drive_gname);
5548 static int sdhci_tegra_probe(struct platform_device *pdev)
5550 const struct of_device_id *match;
5551 const struct sdhci_tegra_soc_data *soc_data;
5552 struct sdhci_host *host;
5553 struct sdhci_pltfm_host *pltfm_host;
5554 struct tegra_sdhci_platform_data *plat;
5555 struct sdhci_tegra *tegra_host;
5556 unsigned int low_freq;
5557 unsigned int signal_voltage = 0;
5558 const char *parent_clk_list[TEGRA_SDHCI_MAX_PLL_SOURCE];
5561 u32 opt_subrevision;
5563 for (i = 0; i < ARRAY_SIZE(parent_clk_list); i++)
5564 parent_clk_list[i] = NULL;
5565 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
5567 soc_data = match->data;
5569 /* Use id tables and remove the following chip defines */
5570 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
5571 soc_data = &soc_data_tegra11;
5572 #elif defined(CONFIG_ARCH_TEGRA_21x_SOC)
5573 soc_data = &soc_data_tegra21;
5575 soc_data = &soc_data_tegra12;
5579 host = sdhci_pltfm_init(pdev, soc_data->pdata);
5582 return PTR_ERR(host);
5584 pltfm_host = sdhci_priv(host);
5586 plat = pdev->dev.platform_data;
5589 plat = sdhci_tegra_dt_parse_pdata(pdev);
5591 dev_err(mmc_dev(host->mmc), "missing platform data\n");
5595 pr_info("%s: %s line=%d runtime pm type=%s, disable-clock-gate=%d\n",
5596 mmc_hostname(host->mmc), __func__, __LINE__,
5597 GET_RTPM_TYPE(plat->rtpm_type),
5598 plat->disable_clock_gate);
5600 pr_err("%s using board files instead of DT\n",
5601 mmc_hostname(host->mmc));
5602 plat->rtpm_type = RTPM_TYPE_DELAY_CG;
5605 /* sdio delayed clock gate quirk in sdhci_host used */
5606 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
5607 host->quirks2 |= SDHCI_QUIRK2_DELAYED_CLK_GATE;
5608 if (IS_MMC_RTPM(plat->rtpm_type))
5609 host->quirks2 |= SDHCI_QUIRK2_MMC_RTPM;
5611 if (sdhci_tegra_check_bondout(plat->id)) {
5612 dev_err(mmc_dev(host->mmc), "bonded out\n");
5617 /* FIXME: This is for until dma-mask binding is supported in DT.
5618 * Set coherent_dma_mask for each Tegra SKUs.
5619 * If dma_mask is NULL, set it to coherent_dma_mask. */
5620 if (soc_data == &soc_data_tegra11)
5621 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
5623 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
5625 if (!pdev->dev.dma_mask)
5626 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
5628 tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
5630 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
5635 tegra_host->dev = &pdev->dev;
5636 tegra_host->plat = plat;
5637 pdev->dev.platform_data = plat;
5639 tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev,
5640 sizeof(struct sdhci_tegra_sd_stats), GFP_KERNEL);
5641 if (!tegra_host->sd_stat_head) {
5642 dev_err(mmc_dev(host->mmc), "failed to allocate sd_stat_head\n");
5647 tegra_host->soc_data = soc_data;
5648 pltfm_host->priv = tegra_host;
5650 /* check if DT provide list possible pll parents */
5651 if (sdhci_tegra_get_pll_from_dt(pdev,
5652 &parent_clk_list[0], ARRAY_SIZE(parent_clk_list))) {
5653 parent_clk_list[0] = soc_data->parent_clk_list[0];
5654 parent_clk_list[1] = soc_data->parent_clk_list[1];
5657 for (i = 0; i < ARRAY_SIZE(parent_clk_list); i++) {
5658 if (!parent_clk_list[i])
5660 tegra_host->pll_source[i].pll = clk_get_sys(NULL,
5661 parent_clk_list[i]);
5662 if (IS_ERR(tegra_host->pll_source[i].pll)) {
5663 rc = PTR_ERR(tegra_host->pll_source[i].pll);
5664 dev_err(mmc_dev(host->mmc),
5665 "clk[%d] error in getting %s: %d\n",
5666 i, parent_clk_list[i], rc);
5669 tegra_host->pll_source[i].pll_rate =
5670 clk_get_rate(tegra_host->pll_source[i].pll);
5672 dev_info(mmc_dev(host->mmc), "Parent select= %s rate=%ld\n",
5673 parent_clk_list[i], tegra_host->pll_source[i].pll_rate);
5676 #ifdef CONFIG_MMC_EMBEDDED_SDIO
5677 if (plat->mmc_data.embedded_sdio)
5678 mmc_set_embedded_sdio_data(host->mmc,
5679 &plat->mmc_data.embedded_sdio->cis,
5680 &plat->mmc_data.embedded_sdio->cccr,
5681 plat->mmc_data.embedded_sdio->funcs,
5682 plat->mmc_data.embedded_sdio->num_funcs);
5685 if (gpio_is_valid(plat->power_gpio)) {
5686 rc = gpio_request(plat->power_gpio, "sdhci_power");
5688 dev_err(mmc_dev(host->mmc),
5689 "failed to allocate power gpio\n");
5692 gpio_direction_output(plat->power_gpio, 1);
5695 if (gpio_is_valid(plat->cd_gpio)) {
5696 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
5698 dev_err(mmc_dev(host->mmc),
5699 "failed to allocate cd gpio\n");
5702 gpio_direction_input(plat->cd_gpio);
5704 tegra_host->card_present =
5705 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
5707 } else if (plat->mmc_data.register_status_notify) {
5708 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
5711 if (plat->mmc_data.status) {
5712 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
5715 if (gpio_is_valid(plat->wp_gpio)) {
5716 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
5718 dev_err(mmc_dev(host->mmc),
5719 "failed to allocate wp gpio\n");
5722 gpio_direction_input(plat->wp_gpio);
5726 * If there is no card detect gpio, assume that the
5727 * card is always present.
5729 if (!gpio_is_valid(plat->cd_gpio))
5730 tegra_host->card_present = 1;
5732 tegra_pd_add_device(&pdev->dev);
5733 /* Get the ddr clock */
5734 tegra_host->ddr_clk = clk_get(mmc_dev(host->mmc), "ddr");
5735 if (IS_ERR(tegra_host->ddr_clk)) {
5736 dev_err(mmc_dev(host->mmc), "ddr clk err\n");
5737 tegra_host->ddr_clk = NULL;
5740 /* Get high speed clock */
5741 tegra_host->sdr_clk = clk_get(mmc_dev(host->mmc), NULL);
5742 if (IS_ERR(tegra_host->sdr_clk)) {
5743 dev_err(mmc_dev(host->mmc), "sdr clk err\n");
5744 tegra_host->sdr_clk = NULL;
5745 /* If both ddr and sdr clks are missing, then fail probe */
5746 if (!tegra_host->ddr_clk && !tegra_host->sdr_clk) {
5747 dev_err(mmc_dev(host->mmc),
5748 "Failed to get ddr and sdr clks\n");
5754 if (tegra_host->sdr_clk) {
5755 pltfm_host->clk = tegra_host->sdr_clk;
5756 tegra_host->is_ddr_clk_set = false;
5758 pltfm_host->clk = tegra_host->ddr_clk;
5759 tegra_host->is_ddr_clk_set = true;
5762 if (clk_get_parent(pltfm_host->clk) == tegra_host->pll_source[0].pll)
5763 tegra_host->is_parent_pll_source_1 = true;
5765 /* enable clocks first time */
5766 rc = clk_prepare_enable(pltfm_host->clk);
5770 /* Reset the sdhci controller to clear all previous status.*/
5771 tegra_periph_reset_assert(pltfm_host->clk);
5773 tegra_periph_reset_deassert(pltfm_host->clk);
5775 tegra_host->emc_clk = devm_clk_get(mmc_dev(host->mmc), "emc");
5776 if (IS_ERR_OR_NULL(tegra_host->emc_clk)) {
5777 dev_err(mmc_dev(host->mmc), "Can't get emc clk\n");
5778 tegra_host->emc_clk = NULL;
5780 clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
5783 tegra_host->sclk = devm_clk_get(mmc_dev(host->mmc), "sclk");
5784 if (IS_ERR_OR_NULL(tegra_host->sclk)) {
5785 dev_err(mmc_dev(host->mmc), "Can't get sclk clock\n");
5786 tegra_host->sclk = NULL;
5788 clk_set_rate(tegra_host->sclk, SDMMC_AHB_MAX_FREQ);
5790 pltfm_host->priv = tegra_host;
5791 tegra_host->clk_enabled = true;
5792 host->is_clk_on = true;
5793 mutex_init(&tegra_host->set_clock_mutex);
5795 tegra_host->max_clk_limit = plat->max_clk_limit;
5796 tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
5798 sdhci_tegra_init_pinctrl_info(&pdev->dev, tegra_host, plat);
5800 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
5801 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
5802 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
5803 } else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
5804 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
5805 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5806 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V2_MASK) {
5807 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V2;
5808 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5809 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V3_MASK) {
5810 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V3;
5811 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5814 * Set the minV and maxV to default
5815 * voltage range of 2.7V - 3.6V
5817 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
5818 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5821 if (plat->is_sd_device &&
5822 (tegra_get_chipid() == TEGRA_CHIPID_TEGRA21) &&
5823 (tegra_chip_get_revision() == TEGRA_REVISION_A01)) {
5824 opt_subrevision = tegra_get_fuse_opt_subrevision();
5825 if ((opt_subrevision == 0) || (opt_subrevision == 1))
5826 plat->limit_vddio_max_volt = true;
5829 if (plat->limit_vddio_max_volt) {
5830 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
5831 tegra_host->vddio_max_uv = SDHOST_MAX_VOLT_SUPPORT;
5834 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
5836 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
5839 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
5840 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
5841 "Assuming vddio_sdmmc is not required.\n",
5842 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
5843 tegra_host->vdd_io_reg = NULL;
5846 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
5847 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
5848 " Assuming vddio_sd_slot is not required.\n",
5849 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
5850 tegra_host->vdd_slot_reg = NULL;
5853 if ((tegra_host->vdd_slot_reg || tegra_host->vdd_io_reg) &&
5854 (tegra_host->card_present)) {
5855 rc = tegra_sdhci_configure_regulators(tegra_host,
5856 CONFIG_REG_EN, 0, 0);
5858 dev_err(mmc_dev(host->mmc),
5859 "Enable regulators failed in probe %d\n", rc);
5863 if (plat && (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK))
5864 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
5866 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
5867 rc = tegra_sdhci_signal_voltage_switch(host, signal_voltage);
5869 dev_err(mmc_dev(host->mmc),
5870 "Init volt(%duV-%duV) setting failed %d\n",
5871 tegra_host->vddio_min_uv,
5872 tegra_host->vddio_max_uv, rc);
5873 regulator_put(tegra_host->vdd_io_reg);
5874 tegra_host->vdd_io_reg = NULL;
5878 tegra_host->tap_cmd = TAP_CMD_TRIM_DEFAULT_VOLTAGE;
5879 tegra_host->speedo = tegra_soc_speedo_0_value();
5880 dev_info(mmc_dev(host->mmc), "Speedo value %d\n", tegra_host->speedo);
5882 /* update t2t and tap_hole for automotive speedo */
5883 if (tegra_is_soc_automotive_speedo() &&
5884 (soc_data == &soc_data_tegra12)) {
5885 soc_data_tegra12.t2t_coeffs = t12x_automotive_tuning_coeffs;
5886 soc_data_tegra12.t2t_coeffs_count =
5887 ARRAY_SIZE(t12x_automotive_tuning_coeffs);
5888 soc_data_tegra12.tap_hole_coeffs =
5889 t12x_automotive_tap_hole_coeffs;
5890 soc_data_tegra12.tap_hole_coeffs_count =
5891 ARRAY_SIZE(t12x_automotive_tap_hole_coeffs);
5892 /* For automotive SDR50 mode POR frequency is 99Mhz */
5893 soc_data_tegra12.tuning_freq_list[0] = 99000000;
5894 soc_data_tegra12.nvquirks |=
5895 NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS;
5896 soc_data_tegra12.tap_hole_margins =
5897 t12x_automotive_tap_hole_margins;
5898 soc_data_tegra12.tap_hole_margins_count =
5899 ARRAY_SIZE(t12x_automotive_tap_hole_margins);
5900 /* feedback clock need to be enabled for non-tuning timing */
5901 if (plat->enb_ext_loopback)
5902 plat->enb_feedback_clock = true;
5904 host->mmc->pm_caps |= plat->pm_caps;
5905 host->mmc->pm_flags |= plat->pm_flags;
5906 host->mmc->caps |= MMC_CAP_ERASE;
5907 /* enable 1/8V DDR capable */
5908 host->mmc->caps |= MMC_CAP_1_8V_DDR;
5910 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
5911 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
5912 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
5913 if (plat->mmc_data.built_in) {
5914 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
5916 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
5918 if (plat->cd_wakeup_incapable)
5919 host->mmc->pm_flags &= ~MMC_PM_IGNORE_PM_NOTIFY;
5921 /* disable access to boot partitions */
5922 host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
5924 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS200)
5925 host->mmc->caps2 |= MMC_CAP2_HS200;
5927 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS400)
5928 host->mmc->caps2 |= MMC_CAP2_HS400;
5930 if ((plat->enable_hs533_mode) && (host->mmc->caps2 & MMC_CAP2_HS400))
5931 host->mmc->caps2 |= MMC_CAP2_HS533;
5933 if (soc_data->nvquirks & NVQUIRK_ENABLE_AUTO_CMD23)
5934 host->mmc->caps |= MMC_CAP_CMD23;
5936 if ((host->mmc->caps2 & MMC_CAP2_HS400) && (plat->en_strobe))
5937 host->mmc->caps2 |= MMC_CAP2_EN_STROBE;
5939 if (plat->enable_cq)
5940 host->mmc->caps2 |= MMC_CAP2_CQ;
5942 host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
5943 host->mmc->caps2 |= MMC_CAP2_PACKED_CMD;
5946 * Enable dyamic frequency scaling support only if the platform clock
5947 * limit is higher than the lowest supported frequency by tuning.
5949 for (i = 0; i < TUNING_FREQ_COUNT; i++) {
5950 low_freq = soc_data->tuning_freq_list[i];
5954 if (plat->en_freq_scaling && (plat->max_clk_limit > low_freq))
5955 host->mmc->caps2 |= MMC_CAP2_FREQ_SCALING;
5957 if (plat->en_periodic_calib)
5958 host->quirks2 |= SDHCI_QUIRK2_PERIODIC_CALIBRATION;
5960 if (plat->pwr_off_during_lp0)
5961 host->mmc->caps2 |= MMC_CAP2_NO_SLEEP_CMD;
5963 if (IS_RTPM_DELAY_CG(plat->rtpm_type) && (!plat->disable_clock_gate))
5964 host->mmc->caps2 |= MMC_CAP2_CLOCK_GATING;
5965 tegra_host->nominal_vcore_mv =
5966 tegra_dvfs_get_core_nominal_millivolts();
5967 tegra_host->min_vcore_override_mv =
5968 tegra_dvfs_get_core_override_floor();
5969 tegra_host->boot_vcore_mv = tegra_dvfs_get_core_boot_level();
5970 dev_info(mmc_dev(host->mmc),
5971 "Tuning constraints: nom_mv %d, boot_mv %d, min_or_mv %d\n",
5972 tegra_host->nominal_vcore_mv, tegra_host->boot_vcore_mv,
5973 tegra_host->min_vcore_override_mv);
5976 * If nominal voltage is equal to boot voltage, there is no need for
5977 * nominal voltage tuning.
5979 if (tegra_host->nominal_vcore_mv <= tegra_host->boot_vcore_mv)
5980 plat->en_nominal_vcore_tuning = false;
5982 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
5983 INIT_DELAYED_WORK(&host->delayed_clk_gate_wrk,
5984 delayed_clk_gate_cb);
5985 rc = sdhci_add_host(host);
5989 if (gpio_is_valid(plat->cd_gpio)) {
5990 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
5992 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
5993 mmc_hostname(host->mmc), host);
5995 dev_err(mmc_dev(host->mmc), "request irq error\n");
5996 goto err_cd_irq_req;
5999 sdhci_tegra_error_stats_debugfs(host);
6000 sdhci_tegra_misc_debugfs(host);
6001 device_create_file(&pdev->dev, &dev_attr_cmd_state);
6003 /* Enable async suspend/resume to reduce LP0 latency */
6004 device_enable_async_suspend(&pdev->dev);
6006 if (plat->power_off_rail) {
6007 tegra_host->reboot_notify.notifier_call =
6008 tegra_sdhci_reboot_notify;
6009 register_reboot_notifier(&tegra_host->reboot_notify);
6011 #ifdef CONFIG_DEBUG_FS
6012 tegra_host->dbg_cfg.tap_val =
6014 tegra_host->dbg_cfg.trim_val =
6015 plat->ddr_trim_delay;
6016 tegra_host->dbg_cfg.clk_ungated =
6017 plat->disable_clock_gate;
6022 if (gpio_is_valid(plat->cd_gpio))
6023 gpio_free(plat->cd_gpio);
6025 if (tegra_host->is_ddr_clk_set)
6026 clk_disable_unprepare(tegra_host->ddr_clk);
6028 clk_disable_unprepare(tegra_host->sdr_clk);
6031 if (tegra_host->ddr_clk)
6032 clk_put(tegra_host->ddr_clk);
6033 if (tegra_host->sdr_clk)
6034 clk_put(tegra_host->sdr_clk);
6036 if (gpio_is_valid(plat->wp_gpio))
6037 gpio_free(plat->wp_gpio);
6039 if (gpio_is_valid(plat->cd_gpio))
6040 free_irq(gpio_to_irq(plat->cd_gpio), host);
6042 if (gpio_is_valid(plat->power_gpio))
6043 gpio_free(plat->power_gpio);
6046 sdhci_pltfm_free(pdev);
6050 static int sdhci_tegra_remove(struct platform_device *pdev)
6052 struct sdhci_host *host = platform_get_drvdata(pdev);
6053 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
6054 struct sdhci_tegra *tegra_host = pltfm_host->priv;
6055 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
6056 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
6059 sdhci_remove_host(host, dead);
6061 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_DIS, 0, 0);
6063 dev_err(mmc_dev(host->mmc),
6064 "Regulator disable in remove failed %d\n", rc);
6066 if (tegra_host->vdd_slot_reg)
6067 regulator_put(tegra_host->vdd_slot_reg);
6068 if (tegra_host->vdd_io_reg)
6069 regulator_put(tegra_host->vdd_io_reg);
6071 if (gpio_is_valid(plat->wp_gpio))
6072 gpio_free(plat->wp_gpio);
6074 if (gpio_is_valid(plat->cd_gpio)) {
6075 free_irq(gpio_to_irq(plat->cd_gpio), host);
6076 gpio_free(plat->cd_gpio);
6079 if (gpio_is_valid(plat->power_gpio))
6080 gpio_free(plat->power_gpio);
6082 if (tegra_host->clk_enabled) {
6083 if (tegra_host->is_ddr_clk_set)
6084 clk_disable_unprepare(tegra_host->ddr_clk);
6086 clk_disable_unprepare(tegra_host->sdr_clk);
6089 if (tegra_host->ddr_clk)
6090 clk_put(tegra_host->ddr_clk);
6091 if (tegra_host->sdr_clk)
6092 clk_put(tegra_host->sdr_clk);
6094 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on)
6095 clk_disable_unprepare(tegra_host->emc_clk);
6096 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on)
6097 clk_disable_unprepare(tegra_host->sclk);
6098 if (plat->power_off_rail)
6099 unregister_reboot_notifier(&tegra_host->reboot_notify);
6101 sdhci_pltfm_free(pdev);
6106 static void sdhci_tegra_shutdown(struct platform_device *pdev)
6108 #ifdef CONFIG_MMC_RTPM
6109 struct sdhci_host *host = platform_get_drvdata(pdev);
6110 dev_dbg(&pdev->dev, " %s shutting down\n",
6111 mmc_hostname(host->mmc));
6112 /* applies to delayed clock gate RTPM and MMC RTPM cases */
6113 sdhci_runtime_forbid(host);
6117 static struct platform_driver sdhci_tegra_driver = {
6119 .name = "sdhci-tegra",
6120 .owner = THIS_MODULE,
6121 .of_match_table = sdhci_tegra_dt_match,
6122 .pm = SDHCI_PLTFM_PMOPS,
6124 .probe = sdhci_tegra_probe,
6125 .remove = sdhci_tegra_remove,
6126 .shutdown = sdhci_tegra_shutdown,
6129 module_platform_driver(sdhci_tegra_driver);
6131 MODULE_DESCRIPTION("SDHCI driver for Tegra");
6132 MODULE_AUTHOR("Google, Inc.");
6133 MODULE_LICENSE("GPL v2");