2 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012-2016, NVIDIA CORPORATION. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
24 #include <linux/of_device.h>
25 #include <linux/of_gpio.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/module.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/delay.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/tegra_pm_domains.h>
36 #include <linux/pinctrl/pinctrl.h>
37 #include <linux/pinctrl/consumer.h>
38 #include <linux/pinctrl/pinconf-tegra.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/uaccess.h>
41 #include <linux/ktime.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/reboot.h>
49 #include <linux/devfreq.h>
50 #include <linux/clk/tegra.h>
51 #include <linux/tegra-soc.h>
52 #include <linux/tegra-fuse.h>
53 #include <linux/tegra-pmc.h>
54 #include <linux/padctrl/padctrl.h>
56 #include <linux/platform_data/mmc-sdhci-tegra.h>
57 #include <linux/platform/tegra/common.h>
59 #include "sdhci-pltfm.h"
62 #define SDHCI_TEGRA_DBG(stuff...) pr_info(stuff)
64 #define SDHCI_TEGRA_DBG(stuff...) do {} while (0)
67 #define SDHCI_VNDR_CLK_CTRL 0x100
68 #define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
69 #define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
70 #define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
71 #define SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK 0x2
72 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
73 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
74 #define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
75 #define SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK 0x2
76 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK 0xFF
77 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK 0x1F
79 #define SDHCI_VNDR_SYS_SW_CTRL 0x104
80 #define SDHCI_VNDR_SYS_SW_CTRL_WR_CRC_USE_TMCLK 0x40000000
81 #define SDHCI_VNDR_SYS_SW_CTRL_STROBE_SHIFT 31
83 #define SDHCI_VNDR_CAP_OVERRIDES_0 0x10c
84 #define SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_SHIFT 8
85 #define SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_MASK 0x3F
87 #define SDHCI_VNDR_MISC_CTRL 0x120
88 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
89 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
90 #define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
91 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
92 #define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
93 #define SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK 0x180
94 #define SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT 17
96 #define SDHCI_VNDR_DLLCAL_CFG 0x1b0
97 #define SDHCI_VNDR_DLLCAL_CFG_EN_CALIBRATE 0x80000000
99 #define SDHCI_VNDR_DLL_CTRL0_0 0x1b4
100 #define SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_SHIFT 7
101 #define SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_MASK 0x7F
102 #define SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_OFFSET 0x7C
105 #define SDHCI_VNDR_DLLCAL_CFG_STATUS 0x1bc
106 #define SDHCI_VNDR_DLLCAL_CFG_STATUS_DLL_ACTIVE 0x80000000
108 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
109 /*MUL_M is defined in [12:6] bits*/
110 #define SDHCI_VNDR_TUN_CTRL0_0_MUL_M 0x1FC0
111 /* To Set value of [12:6] as 1 */
112 #define SDHCI_VNDR_TUN_CTRL0_0_MUL_M_VAL 0x40
113 #define SDHCI_VNDR_TUN_CTRL1_0 0x1c4
114 #define SDHCI_VNDR_TUN_STATUS0_0 0x1c8
115 /* Enable Re-tuning request only when CRC error is detected
116 * in SDR50/SDR104/HS200 modes
118 #define SDHCI_VNDR_TUN_CTRL_RETUNE_REQ_EN 0x8000000
119 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
120 #define TUNING_WORD_SEL_MASK 0x7
121 /*value 4 in 13 to 15 bits indicates 256 iterations*/
122 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_MASK 0x7
123 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_SHIFT 13
124 /* Value 1 in NUM_TUNING_ITERATIONS indicates 64 iterations */
125 #define HW_TUNING_64_TRIES 1
126 /* Value 2 in NUM_TUNING_ITERATIONS indicates 128 iterations */
127 #define HW_TUNING_128_TRIES 2
128 /* Value 4 in NUM_TUNING_ITERATIONS indicates 256 iterations */
129 #define HW_TUNING_256_TRIES 4
131 #define SDHCI_VNDR_TUN_CTRL1_TUN_STEP_SIZE 0x77
134 #define SDHCI_VNDR_PRESET_VAL0_0 0x1d4
135 #define SDCLK_FREQ_SEL_HS_SHIFT 20
136 #define SDCLK_FREQ_SEL_DEFAULT_SHIFT 10
138 #define SDHCI_VNDR_PRESET_VAL1_0 0x1d8
139 #define SDCLK_FREQ_SEL_SDR50_SHIFT 20
140 #define SDCLK_FREQ_SEL_SDR25_SHIFT 10
142 #define SDHCI_VNDR_PRESET_VAL2_0 0x1dc
143 #define SDCLK_FREQ_SEL_DDR50_SHIFT 10
145 #define SDMMC_SDMEMCOMPPADCTRL 0x1E0
146 #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
147 #define SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK 0x80000000
149 #define SDMMC_AUTO_CAL_CONFIG 0x1E4
150 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
151 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
152 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_SLW_OVERRIDE 0x10000000
153 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
154 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_STEP_OFFSET_SHIFT 0x10
156 #define SDMMC_AUTO_CAL_STATUS 0x1EC
157 #define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
158 #define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
159 #define PULLUP_ADJUSTMENT_OFFSET 20
161 #define SDMMC_VENDOR_ERR_INTR_STATUS_0 0x108
163 #define SDMMC_IO_SPARE_0 0x1F0
164 #define SPARE_OUT_3_OFFSET 19
166 #define SDMMC_VNDR_IO_TRIM_CNTRL_0 0x1AC
167 #define SDMMC_VNDR_IO_TRIM_CNTRL_0_SEL_VREG 0x4
169 /* Erratum: Version register is invalid in HW */
170 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
171 /* Erratum: Enable block gap interrupt detection */
172 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
173 /* Do not enable auto calibration if the platform doesn't support */
174 #define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
175 /* Set Calibration Offsets */
176 #define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
177 /* Set Drive Strengths */
178 #define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
179 /* Enable PADPIPE CLKEN */
180 #define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
181 /* DISABLE SPI_MODE CLKEN */
182 #define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
184 #define NVQUIRK_SET_TAP_DELAY BIT(7)
186 #define NVQUIRK_SET_TRIM_DELAY BIT(8)
187 /* Enable SDHOST v3.0 support */
188 #define NVQUIRK_ENABLE_SD_3_0 BIT(9)
189 /* Enable SDR50 mode */
190 #define NVQUIRK_ENABLE_SDR50 BIT(10)
191 /* Enable SDR104 mode */
192 #define NVQUIRK_ENABLE_SDR104 BIT(11)
193 /*Enable DDR50 mode */
194 #define NVQUIRK_ENABLE_DDR50 BIT(12)
195 /* Enable Frequency Tuning for SDR50 mode */
196 #define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
197 /* Enable HS200 mode */
198 #define NVQUIRK_ENABLE_HS200 BIT(14)
199 /* Enable Infinite Erase Timeout*/
200 #define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(15)
201 /* ENAABLE FEEDBACK IO CLOCK */
202 #define NVQUIRK_EN_FEEDBACK_CLK BIT(17)
203 /* Disable AUTO CMD23 */
204 #define NVQUIRK_DISABLE_AUTO_CMD23 BIT(18)
205 /* Shadow write xfer mode reg and write it alongwith CMD register */
206 #define NVQUIRK_SHADOW_XFER_MODE_REG BIT(19)
207 /* update PAD_E_INPUT_OR_E_PWRD bit */
208 #define NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD BIT(20)
209 /* Shadow write xfer mode reg and write it alongwith CMD register */
210 #define NVQUIRK_SET_PIPE_STAGES_MASK_0 BIT(21)
211 #define NVQUIRK_HIGH_FREQ_TAP_PROCEDURE BIT(22)
212 /* Disable external loopback for all sdmmc devices*/
213 #define NVQUIRK_DISABLE_EXTERNAL_LOOPBACK BIT(23)
214 /* Select fix tap hole margins */
215 #define NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS BIT(24)
216 /* Enable HS400 mode */
217 #define NVQUIRK_ENABLE_HS400 BIT(26)
218 /* Enable AUTO CMD23 */
219 #define NVQUIRK_ENABLE_AUTO_CMD23 BIT(27)
220 #define NVQUIRK_SET_SDMEMCOMP_VREF_SEL BIT(28)
221 /* Special PAD control register settings are needed for T210 */
222 #define NVQUIRK_UPDATE_PAD_CNTRL_REG BIT(29)
223 #define NVQUIRK_UPDATE_PIN_CNTRL_REG BIT(30)
224 /* Use timeout clk for write crc status data timeout counter */
225 #define NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT BIT(31)
227 /* Enable T210 specific SDMMC WAR - sd card voltage switch */
228 #define NVQUIRK2_CONFIG_PWR_DET BIT(0)
229 /* Enable T210 specific SDMMC WAR - Tuning Step Size, Tuning Iterations*/
230 #define NVQUIRK2_UPDATE_HW_TUNING_CONFG BIT(1)
231 /*controller does not support cards if 1.8 V is not supported by cards*/
232 #define NVQUIRK2_BROKEN_SD2_0_SUPPORT BIT(2)
233 #define NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH BIT(3)
234 /* Select SDR50 UHS mode for host if the device runs at SDR50 mode on T210 */
235 #define NVQUIRK2_SELECT_SDR50_MODE BIT(4)
236 #define NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION BIT(5)
237 #define NVQUIRK2_SET_PAD_E_INPUT_VOL BIT(6)
239 /* Common subset of quirks for Tegra3 and later sdmmc controllers */
240 #define TEGRA_SDHCI_NVQUIRKS (NVQUIRK_ENABLE_PADPIPE_CLKEN | \
241 NVQUIRK_DISABLE_SPI_MODE_CLKEN | \
242 NVQUIRK_EN_FEEDBACK_CLK | \
243 NVQUIRK_SET_TAP_DELAY | \
244 NVQUIRK_ENABLE_SDR50_TUNING | \
245 NVQUIRK_ENABLE_SDR50 | \
246 NVQUIRK_ENABLE_SDR104 | \
247 NVQUIRK_SHADOW_XFER_MODE_REG | \
248 NVQUIRK_DISABLE_AUTO_CMD23)
250 #define TEGRA_SDHCI_QUIRKS (SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | \
251 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
252 SDHCI_QUIRK_SINGLE_POWER_WRITE | \
253 SDHCI_QUIRK_NO_HISPD_BIT | \
254 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | \
255 SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
256 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC)
258 #define TEGRA_SDHCI_QUIRKS2 (SDHCI_QUIRK2_PRESET_VALUE_BROKEN | \
259 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING | \
260 SDHCI_QUIRK2_NON_STANDARD_TUNING | \
261 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO | \
262 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
264 #define IS_QUIRKS2_DELAYED_CLK_GATE(host) \
265 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
267 /* Interface voltages */
268 #define SDHOST_1V8_OCR_MASK 0x8
269 #define SDHOST_HIGH_VOLT_MIN 2700000
270 #define SDHOST_HIGH_VOLT_MAX 3600000
271 #define SDHOST_HIGH_VOLT_2V8 2800000
272 #define SDHOST_LOW_VOLT_MIN 1800000
273 #define SDHOST_LOW_VOLT_MAX 1800000
274 #define SDHOST_HIGH_VOLT_3V2 3200000
275 #define SDHOST_HIGH_VOLT_3V3 3300000
276 #define SDHOST_MAX_VOLT_SUPPORT 3000000
278 /* Clock related definitions */
279 #define MAX_DIVISOR_VALUE 128
280 #define DEFAULT_SDHOST_FREQ 50000000
281 #define SDMMC_AHB_MAX_FREQ 115000000
282 #define SDMMC_EMC_MAX_FREQ 150000000
283 #define SDMMC_EMC_NOM_VOLT_FREQ 900000000
285 /* Tuning related definitions */
286 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
287 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
288 #define MAX_TAP_VALUES 255
289 #define TUNING_FREQ_COUNT 3
290 #define TUNING_VOLTAGES_COUNT 3
291 #define TUNING_RETRIES 1
292 #define DFS_FREQ_COUNT 2
293 #define NEG_MAR_CHK_WIN_COUNT 2
294 #define PRECISION_FOR_ESTIMATE 100000
295 /* Tuning core voltage requirements */
296 #define NOMINAL_VCORE_TUN BIT(0)
297 #define BOOT_VCORE_TUN BIT(1)
298 #define MIN_OVERRIDE_VCORE_TUN BIT(2)
300 /* Tap cmd sysfs commands */
301 #define TAP_CMD_TRIM_DEFAULT_VOLTAGE 1
302 #define TAP_CMD_TRIM_HIGH_VOLTAGE 2
304 /* Max number of clock parents for sdhci is fixed to 2 */
305 #define TEGRA_SDHCI_MAX_PLL_SOURCE 2
307 * Defined the chip specific quirks and clock sources. For now, the used clock
308 * sources vary only from chip to chip. If the sources allowed varies from
309 * platform to platform, then move the clock sources list to platform data.
310 * When filling the tuning_freq_list in soc_data, the number of entries should
311 * be equal to TUNNG_FREQ_COUNT. Depending on number DFS frequencies supported,
312 * set the desired low, high or max frequencies and set the remaining entries
313 * as 0s. The number of entries should always be equal to TUNING_FREQ_COUNT
314 * inorder to get the right tuning data.
317 struct sdhci_tegra_soc_data {
318 const struct sdhci_pltfm_data *pdata;
321 const char *parent_clk_list[TEGRA_SDHCI_MAX_PLL_SOURCE];
322 unsigned int tuning_freq_list[TUNING_FREQ_COUNT];
324 u8 tap_hole_coeffs_count;
325 u8 tap_hole_margins_count;
326 struct tuning_t2t_coeffs *t2t_coeffs;
327 struct tap_hole_coeffs *tap_hole_coeffs;
328 struct tuning_tap_hole_margins *tap_hole_margins;
332 enum tegra_regulator_config_ops {
338 enum tegra_tuning_freq {
344 struct tuning_t2t_coeffs {
348 unsigned int t2t_vnom_slope;
349 unsigned int t2t_vnom_int;
350 unsigned int t2t_vmax_slope;
351 unsigned int t2t_vmax_int;
352 unsigned int t2t_vmin_slope;
353 unsigned int t2t_vmin_int;
356 #define SET_TUNING_COEFFS(_device_id, _vmax, _vmin, _t2t_vnom_slope, \
357 _t2t_vnom_int, _t2t_vmax_slope, _t2t_vmax_int, _t2t_vmin_slope, \
360 .dev_id = _device_id, \
363 .t2t_vnom_slope = _t2t_vnom_slope, \
364 .t2t_vnom_int = _t2t_vnom_int, \
365 .t2t_vmax_slope = _t2t_vmax_slope, \
366 .t2t_vmax_int = _t2t_vmax_int, \
367 .t2t_vmin_slope = _t2t_vmin_slope, \
368 .t2t_vmin_int = _t2t_vmin_int, \
371 static struct tuning_t2t_coeffs t11x_tuning_coeffs[] = {
372 SET_TUNING_COEFFS("sdhci-tegra.3", 1250, 950, 55, 135434,
373 73, 170493, 243, 455948),
374 SET_TUNING_COEFFS("sdhci-tegra.2", 1250, 950, 50, 129738,
375 73, 168898, 241, 453050),
376 SET_TUNING_COEFFS("sdhci-tegra.0", 1250, 950, 62, 143469,
377 82, 180096, 238, 444285),
380 static struct tuning_t2t_coeffs t12x_automotive_tuning_coeffs[] = {
381 SET_TUNING_COEFFS("sdhci-tegra.3", 1040, 950, 29, 130687,
382 29, 130687, 29, 130687),
383 SET_TUNING_COEFFS("sdhci-tegra.2", 1040, 950, 36, 148855,
384 36, 148855, 36, 148855),
385 SET_TUNING_COEFFS("sdhci-tegra.0", 1040, 950, 37, 149783,
386 37, 149783, 37, 149783),
389 static struct tuning_t2t_coeffs t12x_tuning_coeffs[] = {
390 SET_TUNING_COEFFS("sdhci-tegra.3", 1150, 950, 27, 118295,
391 27, 118295, 48, 188148),
392 SET_TUNING_COEFFS("sdhci-tegra.2", 1150, 950, 29, 124427,
393 29, 124427, 54, 203707),
394 SET_TUNING_COEFFS("sdhci-tegra.0", 1150, 950, 25, 115933,
395 25, 115933, 47, 187224),
398 struct tap_hole_coeffs {
400 unsigned int freq_khz;
401 unsigned int thole_vnom_slope;
402 unsigned int thole_vnom_int;
403 unsigned int thole_vmax_slope;
404 unsigned int thole_vmax_int;
405 unsigned int thole_vmin_slope;
406 unsigned int thole_vmin_int;
409 #define SET_TAP_HOLE_COEFFS(_device_id, _freq_khz, _thole_vnom_slope, \
410 _thole_vnom_int, _thole_vmax_slope, _thole_vmax_int, \
411 _thole_vmin_slope, _thole_vmin_int) \
413 .dev_id = _device_id, \
414 .freq_khz = _freq_khz, \
415 .thole_vnom_slope = _thole_vnom_slope, \
416 .thole_vnom_int = _thole_vnom_int, \
417 .thole_vmax_slope = _thole_vmax_slope, \
418 .thole_vmax_int = _thole_vmax_int, \
419 .thole_vmin_slope = _thole_vmin_slope, \
420 .thole_vmin_int = _thole_vmin_int, \
423 static struct tap_hole_coeffs t11x_tap_hole_coeffs[] = {
424 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 765, 102357, 507,
426 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 156000, 1042, 142044, 776,
428 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1215, 167702, 905,
430 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 1925, 284516, 1528,
431 253188, 366, 120001),
432 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 472, 53312, 318,
434 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 156000, 765, 95512, 526,
436 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 949, 121887, 656,
438 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 1901, 259035, 1334,
439 215539, 326, 100986),
440 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 411, 54495, 305,
442 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 156000, 715, 97623, 516,
444 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 905, 124579, 648,
446 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 1893, 264746, 1333,
447 221722, 354, 109880),
450 static struct tap_hole_coeffs t12x_automotive_tap_hole_coeffs[] = {
451 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 198000, 926, 107053, 926,
452 107053, 926, 107053),
453 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 189000, 985, 114635, 985,
454 114635, 985, 114635),
455 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 188000, 991, 115523, 991,
456 115523, 991, 115523),
457 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 296, 27274, 296,
459 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 94000, 1520, 196114, 1520,
460 196114, 1520, 196114),
461 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 578, 67417, 578,
463 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 94000, 1785, 219359, 1785,
464 219359, 1785, 219359),
467 static struct tap_hole_coeffs t12x_tap_hole_coeffs[] = {
468 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 1037, 106934, 1037,
470 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 198000, 1037, 106934, 1037,
472 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1703, 186307, 1703,
473 186307, 890, 130617),
474 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 100000, 2452, 275601, 2452,
475 275601, 1264, 193957),
476 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 3090, 351666, 3090,
477 351666, 1583, 247913),
478 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 468, 36031, 468,
480 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 200000, 468, 36031, 468,
482 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 1146, 117841, 1146,
484 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 100000, 1879, 206195, 1879,
485 206195, 953, 141341),
486 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 2504, 281460, 2504,
487 281460, 1262, 194452),
488 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 874, 85243, 874,
490 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 1554, 167210, 1554,
491 167210, 793, 115672),
492 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 100000, 2290, 255734, 2290,
493 255734, 1164, 178691),
494 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 2916, 331143, 2916,
495 331143, 1480, 232373),
498 struct tuning_tap_hole_margins {
500 unsigned int tap_hole_margin;
503 #define SET_TUNING_TAP_HOLE_MARGIN(_device_id, _tap_hole_margin) \
505 .dev_id = _device_id, \
506 .tap_hole_margin = _tap_hole_margin, \
509 static struct tuning_tap_hole_margins t12x_automotive_tap_hole_margins[] = {
510 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.3", 13),
511 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.2", 7),
512 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.0", 10),
515 struct freq_tuning_constraints {
516 unsigned int vcore_mask;
519 static struct freq_tuning_constraints tuning_vcore_constraints[3] = {
521 .vcore_mask = BOOT_VCORE_TUN,
524 .vcore_mask = BOOT_VCORE_TUN,
527 .vcore_mask = BOOT_VCORE_TUN,
536 enum tap_win_edge_attr {
542 struct tap_window_data {
545 enum tap_win_edge_attr win_start_attr;
546 enum tap_win_edge_attr win_end_attr;
551 struct tuning_values {
559 struct tegra_tuning_data {
560 unsigned int freq_hz;
562 int nom_best_tap_value;
563 struct freq_tuning_constraints constraints;
564 struct tap_hole_coeffs *thole_coeffs;
565 struct tuning_t2t_coeffs *t2t_coeffs;
566 struct tuning_values est_values;
567 struct tuning_values calc_values;
568 struct tap_window_data *tap_data;
569 struct tap_window_data *final_tap_data;
570 u8 num_of_valid_tap_wins;
574 bool is_partial_win_valid;
577 #ifdef CONFIG_MMC_FREQ_SCALING
578 struct freq_gov_params {
580 u8 polling_interval_ms;
581 u8 active_load_threshold;
584 static struct freq_gov_params gov_params[3] = {
586 .idle_mon_cycles = 3,
587 .polling_interval_ms = 50,
588 .active_load_threshold = 25,
591 .idle_mon_cycles = 3,
592 .polling_interval_ms = 50,
593 .active_load_threshold = 25,
596 .idle_mon_cycles = 3,
597 .polling_interval_ms = 50,
598 .active_load_threshold = 25,
603 struct tegra_freq_gov_data {
604 unsigned int curr_active_load;
605 unsigned int avg_active_load;
606 unsigned int act_load_high_threshold;
607 unsigned int max_idle_monitor_cycles;
608 unsigned int curr_freq;
609 unsigned int freqs[DFS_FREQ_COUNT];
610 unsigned int freq_switch_count;
611 bool monitor_idle_load;
614 struct sdhci_tegra_sd_stats {
615 unsigned int data_crc_count;
616 unsigned int cmd_crc_count;
617 unsigned int data_to_count;
618 unsigned int cmd_to_count;
621 struct sdhci_tegra_pll_parent {
623 unsigned long pll_rate;
626 #ifdef CONFIG_DEBUG_FS
627 struct dbg_cfg_data {
628 unsigned int tap_val;
629 unsigned int trim_val;
634 /*These dummy function are defined, because dvfs function
635 are not available. This function will be removed once dvfs
636 function are available to use.
639 #ifdef CONFIG_ARCH_TEGRA_18x_SOC
640 static int tegra_dvfs_predict_mv_at_hz_no_tfloor(struct clk *c, unsigned long rate) {
644 static int tegra_dvfs_set_fmax_at_vmin(struct clk *c, unsigned long f_max, int v_min) {
648 static int tegra_dvfs_get_core_nominal_millivolts(void) {
652 static int tegra_dvfs_get_core_override_floor(void) {
656 static int tegra_dvfs_get_core_boot_level(void) {
659 static int tegra_soc_speedo_0_value(void) {
665 const struct tegra_sdhci_platform_data *plat;
666 const struct sdhci_tegra_soc_data *soc_data;
669 /* ensure atomic set clock calls */
670 struct mutex set_clock_mutex;
671 struct regulator *vdd_io_reg;
672 struct regulator *vdd_slot_reg;
673 struct regulator *vcore_reg;
675 unsigned int vddio_min_uv;
677 unsigned int vddio_max_uv;
678 /* DDR and low speed modes clock */
680 /* HS200, SDR104 modes clock */
682 /* Check if ddr_clk is being used */
684 /* max clk supported by the platform */
685 unsigned int max_clk_limit;
686 /* max ddr clk supported by the platform */
687 unsigned int ddr_clk_limit;
689 bool is_rail_enabled;
691 bool is_sdmmc_emc_clk_on;
693 bool is_sdmmc_sclk_on;
694 struct sdhci_tegra_sd_stats *sd_stat_head;
695 struct notifier_block reboot_notify;
696 struct sdhci_tegra_pll_parent pll_source[TEGRA_SDHCI_MAX_PLL_SOURCE];
697 bool is_parent_pll_source_1;
698 bool set_1v8_calib_offsets;
699 int nominal_vcore_mv;
700 int min_vcore_override_mv;
702 /* Tuning related structures and variables */
703 /* Tuning opcode to be used */
704 unsigned int tuning_opcode;
705 /* Tuning packet size */
706 unsigned int tuning_bsize;
707 /* Num of tuning freqs selected */
708 int tuning_freq_count;
709 unsigned int tap_cmd;
711 unsigned int tuning_status;
713 #define TUNING_STATUS_DONE 1
714 #define TUNING_STATUS_RETUNE 2
715 /* Freq tuning information for each sampling clock freq */
716 struct tegra_tuning_data tuning_data[DFS_FREQ_COUNT];
717 struct tegra_freq_gov_data *gov_data;
719 #ifdef CONFIG_DEBUG_FS
720 /* Override debug config data */
721 struct dbg_cfg_data dbg_cfg;
723 struct pinctrl_dev *pinctrl;
724 struct pinctrl *pinctrl_sdmmc;
725 struct pinctrl_state *schmitt_enable[2];
726 struct pinctrl_state *schmitt_disable[2];
727 struct pinctrl_state *drv_code_strength;
728 struct pinctrl_state *default_drv_code_strength;
729 struct pinctrl_state *sdmmc_pad_ctrl[MMC_TIMINGS_MAX_MODES];
731 unsigned int tuned_tap_delay;
732 struct padctrl *sdmmc_padctrl;
736 static unsigned int boot_volt_req_refcount;
737 static DEFINE_MUTEX(tuning_mutex);
739 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
740 struct sdhci_host *sdhci, unsigned int clock);
741 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
742 unsigned long desired_rate);
743 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
744 unsigned int tap_delay);
745 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
746 u8 option, int min_uV, int max_uV);
747 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
748 unsigned int trim_delay);
749 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
750 unsigned char signal_voltage);
751 static void tegra_sdhci_post_init(struct sdhci_host *sdhci);
752 static void tegra_sdhci_en_strobe(struct sdhci_host *sdhci);
753 static void tegra_sdhci_update_sdmmc_pinctrl_register(struct sdhci_host *sdhci,
755 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
756 int t2t_tuning_value);
757 static void tegra_sdhci_config_tap(struct sdhci_host *sdhci, u8 option);
758 static void vendor_trim_clear_sel_vreg(struct sdhci_host *host, bool enable);
759 static void sdhci_tegra_select_drive_strength(struct sdhci_host *host,
761 static void tegra_sdhci_get_clock_freq_for_mode(struct sdhci_host *sdhci,
762 unsigned int *clock);
763 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask);
765 static void tegra_sdhci_dumpregs(struct sdhci_host *sdhci)
772 /* print tuning windows */
773 if (!(sdhci->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING)) {
774 for (i = 0; i <= TUNING_WORD_SEL_MASK; i++) {
775 reg = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
776 reg &= ~TUNING_WORD_SEL_MASK;
778 sdhci_writel(sdhci, reg, SDHCI_VNDR_TUN_CTRL0_0);
779 val = sdhci_readl(sdhci, SDHCI_VNDR_TUN_STATUS0_0);
780 pr_info("%s: tuning_window[%d]: %#x\n",
781 mmc_hostname(sdhci->mmc), i, val);
784 tap_delay = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
785 trim_delay = tap_delay;
786 tap_delay >>= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT;
787 tap_delay &= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK;
788 trim_delay >>= SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT;
789 trim_delay &= SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK;
790 pr_info("sdhci: Tap value: %u | Trim value: %u\n", tap_delay,
792 pr_info("sdhci: SDMMC Interrupt status: 0x%08x\n", sdhci_readl(sdhci,
793 SDMMC_VENDOR_ERR_INTR_STATUS_0));
796 static bool tegra_sdhci_is_tuning_done(struct sdhci_host *sdhci)
798 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
799 struct sdhci_tegra *tegra_host = pltfm_host->priv;
801 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
802 dev_info(mmc_dev(sdhci->mmc),
803 "Tuning already done, restoring the best tap value : %u\n",
804 tegra_host->tuned_tap_delay);
805 sdhci_tegra_set_tap_delay(sdhci, tegra_host->tuned_tap_delay);
811 static int sdhci_tegra_get_max_tuning_loop_counter(struct sdhci_host *sdhci)
813 u16 hw_tuning_iterations;
816 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50)
817 hw_tuning_iterations = HW_TUNING_256_TRIES;
818 else if (sdhci->mmc->caps2 & MMC_CAP2_HS533)
819 hw_tuning_iterations = HW_TUNING_64_TRIES;
821 hw_tuning_iterations = HW_TUNING_128_TRIES;
823 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
824 vendor_ctrl &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_MASK <<
825 SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_SHIFT);
826 vendor_ctrl |= (hw_tuning_iterations <<
827 SDHCI_VNDR_TUN_CTRL0_TUN_ITERATIONS_SHIFT);
828 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
833 static int show_error_stats_dump(struct seq_file *s, void *data)
835 struct sdhci_host *host = s->private;
836 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
837 struct sdhci_tegra *tegra_host = pltfm_host->priv;
838 struct sdhci_tegra_sd_stats *head;
840 seq_printf(s, "ErrorStatistics:\n");
841 seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
842 head = tegra_host->sd_stat_head;
844 seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
845 head->cmd_crc_count, head->data_to_count,
850 static int show_dfs_stats_dump(struct seq_file *s, void *data)
852 struct sdhci_host *host = s->private;
853 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
854 struct sdhci_tegra *tegra_host = pltfm_host->priv;
855 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
857 seq_printf(s, "DFS statistics:\n");
859 if (host->mmc->dev_stats != NULL)
860 seq_printf(s, "Polling_period: %d\n",
861 host->mmc->dev_stats->polling_interval);
863 if (gov_data != NULL) {
864 seq_printf(s, "cur_active_load: %d\n",
865 gov_data->curr_active_load);
866 seq_printf(s, "avg_active_load: %d\n",
867 gov_data->avg_active_load);
868 seq_printf(s, "act_load_high_threshold: %d\n",
869 gov_data->act_load_high_threshold);
870 seq_printf(s, "freq_switch_count: %d\n",
871 gov_data->freq_switch_count);
876 static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
878 return single_open(file, show_error_stats_dump, inode->i_private);
881 static int sdhci_dfs_stats_dump(struct inode *inode, struct file *file)
883 return single_open(file, show_dfs_stats_dump, inode->i_private);
887 static const struct file_operations sdhci_host_fops = {
888 .open = sdhci_error_stats_dump,
891 .release = single_release,
894 static const struct file_operations sdhci_host_dfs_fops = {
895 .open = sdhci_dfs_stats_dump,
898 .release = single_release,
901 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
905 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
906 /* Use wp_gpio here instead? */
907 val = readl(host->ioaddr + reg);
908 return val | SDHCI_WRITE_PROTECT;
910 return readl(host->ioaddr + reg);
913 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
915 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
916 struct sdhci_tegra *tegra_host = pltfm_host->priv;
917 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
919 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
920 (reg == SDHCI_HOST_VERSION))) {
921 return SDHCI_SPEC_200;
923 return readw(host->ioaddr + reg);
926 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
928 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
929 struct sdhci_tegra *tegra_host = pltfm_host->priv;
930 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
932 /* Seems like we're getting spurious timeout and crc errors, so
933 * disable signalling of them. In case of real errors software
934 * timers should take care of eventually detecting them.
936 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
937 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
939 writel(val, host->ioaddr + reg);
941 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
942 (reg == SDHCI_INT_ENABLE))) {
943 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
944 if (val & SDHCI_INT_CARD_INT)
948 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
952 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
954 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
955 struct sdhci_tegra *tegra_host = pltfm_host->priv;
956 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
958 if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
960 case SDHCI_TRANSFER_MODE:
962 * Postpone this write, we must do it together with a
963 * command write that is down below.
965 pltfm_host->xfer_mode_shadow = val;
968 writel((val << 16) | pltfm_host->xfer_mode_shadow,
969 host->ioaddr + SDHCI_TRANSFER_MODE);
970 pltfm_host->xfer_mode_shadow = 0;
975 writew(val, host->ioaddr + reg);
978 #ifdef CONFIG_MMC_FREQ_SCALING
980 static bool disable_scaling __read_mostly;
981 module_param(disable_scaling, bool, 0644);
984 * Dynamic frequency calculation.
985 * The active load for the current period and the average active load
986 * are calculated at the end of each polling interval.
988 * If the current active load is greater than the threshold load, then the
989 * frequency is boosted(156MHz).
990 * If the active load is lower than the threshold, then the load is monitored
991 * for a max of three cycles before reducing the frequency(82MHz). If the
992 * average active load is lower, then the monitoring cycles is reduced.
994 * The active load threshold value for both eMMC and SDIO is set to 25 which
995 * is found to give the optimal power and performance. The polling interval is
998 * The polling interval and active load threshold values can be changed by
999 * the user through sysfs.
1001 static unsigned long calculate_mmc_target_freq(
1002 struct tegra_freq_gov_data *gov_data)
1004 unsigned long desired_freq = gov_data->curr_freq;
1005 unsigned int type = MMC_TYPE_MMC;
1007 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
1008 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
1009 gov_data->monitor_idle_load = false;
1010 gov_data->max_idle_monitor_cycles =
1011 gov_params[type].idle_mon_cycles;
1013 if (gov_data->monitor_idle_load) {
1014 if (!gov_data->max_idle_monitor_cycles) {
1015 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
1016 gov_data->max_idle_monitor_cycles =
1017 gov_params[type].idle_mon_cycles;
1019 gov_data->max_idle_monitor_cycles--;
1022 gov_data->monitor_idle_load = true;
1023 gov_data->max_idle_monitor_cycles *=
1024 gov_data->avg_active_load;
1025 gov_data->max_idle_monitor_cycles /= 100;
1029 return desired_freq;
1032 static unsigned long calculate_sdio_target_freq(
1033 struct tegra_freq_gov_data *gov_data)
1035 unsigned long desired_freq = gov_data->curr_freq;
1036 unsigned int type = MMC_TYPE_SDIO;
1038 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
1039 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
1040 gov_data->monitor_idle_load = false;
1041 gov_data->max_idle_monitor_cycles =
1042 gov_params[type].idle_mon_cycles;
1044 if (gov_data->monitor_idle_load) {
1045 if (!gov_data->max_idle_monitor_cycles) {
1046 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
1047 gov_data->max_idle_monitor_cycles =
1048 gov_params[type].idle_mon_cycles;
1050 gov_data->max_idle_monitor_cycles--;
1053 gov_data->monitor_idle_load = true;
1054 gov_data->max_idle_monitor_cycles *=
1055 gov_data->avg_active_load;
1056 gov_data->max_idle_monitor_cycles /= 100;
1060 return desired_freq;
1063 static unsigned long calculate_sd_target_freq(
1064 struct tegra_freq_gov_data *gov_data)
1066 unsigned long desired_freq = gov_data->curr_freq;
1067 unsigned int type = MMC_TYPE_SD;
1069 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
1070 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
1071 gov_data->monitor_idle_load = false;
1072 gov_data->max_idle_monitor_cycles =
1073 gov_params[type].idle_mon_cycles;
1075 if (gov_data->monitor_idle_load) {
1076 if (!gov_data->max_idle_monitor_cycles) {
1077 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
1078 gov_data->max_idle_monitor_cycles =
1079 gov_params[type].idle_mon_cycles;
1081 gov_data->max_idle_monitor_cycles--;
1084 gov_data->monitor_idle_load = true;
1085 gov_data->max_idle_monitor_cycles *=
1086 gov_data->avg_active_load;
1087 gov_data->max_idle_monitor_cycles /= 100;
1091 return desired_freq;
1094 static unsigned long sdhci_tegra_get_target_freq(struct sdhci_host *sdhci,
1095 struct devfreq_dev_status *dfs_stats)
1097 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1098 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1099 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
1100 unsigned long freq = sdhci->mmc->actual_clock;
1103 dev_err(mmc_dev(sdhci->mmc),
1104 "No gov data. Continue using current freq %ld", freq);
1108 if (disable_scaling)
1112 * If clock gating is enabled and clock is currently disabled, then
1115 if (!tegra_host->clk_enabled)
1118 if (dfs_stats->total_time) {
1119 gov_data->curr_active_load = (dfs_stats->busy_time * 100) /
1120 dfs_stats->total_time;
1122 gov_data->curr_active_load = 0;
1125 gov_data->avg_active_load += gov_data->curr_active_load;
1126 gov_data->avg_active_load >>= 1;
1128 if (sdhci->mmc->card) {
1129 if (sdhci->mmc->card->type == MMC_TYPE_SDIO)
1130 freq = calculate_sdio_target_freq(gov_data);
1131 else if (sdhci->mmc->card->type == MMC_TYPE_MMC)
1132 freq = calculate_mmc_target_freq(gov_data);
1133 else if (sdhci->mmc->card->type == MMC_TYPE_SD)
1134 freq = calculate_sd_target_freq(gov_data);
1135 if (gov_data->curr_freq != freq)
1136 gov_data->freq_switch_count++;
1137 gov_data->curr_freq = freq;
1143 static int sdhci_tegra_freq_gov_init(struct sdhci_host *sdhci)
1145 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1146 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1151 if (!((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
1152 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS200))) {
1153 dev_info(mmc_dev(sdhci->mmc),
1154 "DFS not required for current operating mode\n");
1158 if (!tegra_host->gov_data) {
1159 tegra_host->gov_data = devm_kzalloc(mmc_dev(sdhci->mmc),
1160 sizeof(struct tegra_freq_gov_data), GFP_KERNEL);
1161 if (!tegra_host->gov_data) {
1162 dev_err(mmc_dev(sdhci->mmc),
1163 "Failed to allocate memory for dfs data\n");
1168 /* Find the supported frequencies */
1169 dev_info(mmc_dev(sdhci->mmc), "DFS supported freqs");
1170 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
1171 freq = tegra_host->tuning_data[i].freq_hz;
1173 * Check the nearest possible clock with pll_c and pll_p as
1174 * the clock sources. Choose the higher frequency.
1176 tegra_host->gov_data->freqs[i] = get_nearest_clock_freq(
1177 tegra_host->pll_source[0].pll_rate,
1179 freq = get_nearest_clock_freq(
1180 tegra_host->pll_source[1].pll_rate,
1182 if (freq > tegra_host->gov_data->freqs[i])
1183 tegra_host->gov_data->freqs[i] = freq;
1184 pr_err("%d,", tegra_host->gov_data->freqs[i]);
1187 tegra_host->gov_data->monitor_idle_load = false;
1188 tegra_host->gov_data->curr_freq = sdhci->mmc->actual_clock;
1189 if (sdhci->mmc->card) {
1190 type = sdhci->mmc->card->type;
1191 sdhci->mmc->dev_stats->polling_interval =
1192 gov_params[type].polling_interval_ms;
1193 tegra_host->gov_data->act_load_high_threshold =
1194 gov_params[type].active_load_threshold;
1195 tegra_host->gov_data->max_idle_monitor_cycles =
1196 gov_params[type].idle_mon_cycles;
1204 static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
1206 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1207 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1209 return tegra_host->card_present;
1212 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
1214 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1215 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1216 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1218 if (!gpio_is_valid(plat->wp_gpio))
1221 return gpio_get_value_cansleep(plat->wp_gpio);
1224 static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1228 u32 vndr_ctrl, trim_delay, best_tap_value;
1229 unsigned int dqs_trim_delay;
1230 struct tegra_tuning_data *tuning_data;
1231 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1232 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1233 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1234 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1236 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1238 /* Select Bus Speed Mode for host
1239 * For HS200 we need to set UHS_MODE_SEL to SDR104.
1240 * It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
1241 * SDR50 mode timing seems to have issues. Programming SDR104
1242 * mode for SDR50 mode for reliable transfers over interface.
1243 * For HS400 we need to set UHS_MODE_SEL to HS400.
1245 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1247 case MMC_TIMING_UHS_SDR12:
1248 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1250 case MMC_TIMING_UHS_SDR25:
1251 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1253 case MMC_TIMING_UHS_SDR50:
1254 if (soc_data->nvquirks2 & NVQUIRK2_SELECT_SDR50_MODE)
1255 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1257 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1259 case MMC_TIMING_UHS_SDR104:
1260 case MMC_TIMING_MMC_HS200:
1261 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1263 case MMC_TIMING_UHS_DDR50:
1264 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1266 case MMC_TIMING_MMC_HS400:
1267 ctrl_2 |= SDHCI_CTRL_UHS_HS400;
1271 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1272 sdhci_tegra_select_drive_strength(host, uhs);
1274 if (uhs == MMC_TIMING_MMC_HS400) {
1275 if (host->mmc->caps2 & MMC_CAP2_HS533)
1276 dqs_trim_delay = plat->dqs_trim_delay_hs533;
1278 dqs_trim_delay = plat->dqs_trim_delay;
1280 ctrl_2 = sdhci_readl(host, SDHCI_VNDR_CAP_OVERRIDES_0);
1281 ctrl_2 &= ~(SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_MASK <<
1282 SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_SHIFT);
1283 ctrl_2 |= ((dqs_trim_delay &
1284 SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_MASK) <<
1285 SDHCI_VNDR_CAP_OVERRIDES_0_DQS_TRIM_SHIFT);
1286 sdhci_writel(host, ctrl_2, SDHCI_VNDR_CAP_OVERRIDES_0);
1289 if (uhs == MMC_TIMING_UHS_DDR50) {
1290 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1291 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
1292 clk |= 1 << SDHCI_DIVIDER_SHIFT;
1293 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1295 /* Set the ddr mode trim delay if required */
1296 if (plat->is_ddr_trim_delay) {
1297 trim_delay = plat->ddr_trim_delay;
1298 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1299 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1300 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1301 vndr_ctrl |= (trim_delay <<
1302 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1303 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1307 /* Set the best tap value based on timing */
1308 if (((uhs == MMC_TIMING_MMC_HS200) ||
1309 (uhs == MMC_TIMING_UHS_SDR104) ||
1310 (uhs == MMC_TIMING_MMC_HS400) ||
1311 (uhs == MMC_TIMING_UHS_SDR50)) &&
1312 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1313 if (host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) {
1314 tuning_data = sdhci_tegra_get_tuning_data(host,
1315 host->mmc->ios.clock);
1316 best_tap_value = (tegra_host->tap_cmd ==
1317 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1318 tuning_data->nom_best_tap_value :
1319 tuning_data->best_tap_value;
1321 best_tap_value = tegra_host->tuned_tap_delay;
1323 } else if ((uhs == MMC_TIMING_UHS_DDR50) && (plat->is_ddr_tap_delay)) {
1324 best_tap_value = plat->ddr_tap_delay;
1326 best_tap_value = tegra_host->plat->tap_delay;
1328 sdhci_tegra_set_tap_delay(host, best_tap_value);
1333 static void sdhci_status_notify_cb(int card_present, void *dev_id)
1335 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
1336 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1337 struct tegra_sdhci_platform_data *plat;
1338 unsigned int status, oldstat;
1340 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
1343 plat = pdev->dev.platform_data;
1344 if (!plat->mmc_data.status) {
1345 if (card_present == 1) {
1346 sdhci->mmc->rescan_disable = 0;
1347 mmc_detect_change(sdhci->mmc, 0);
1348 } else if (card_present == 0) {
1349 sdhci->mmc->detect_change = 0;
1350 sdhci->mmc->rescan_disable = 1;
1355 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
1357 oldstat = plat->mmc_data.card_present;
1358 plat->mmc_data.card_present = status;
1359 if (status ^ oldstat) {
1360 pr_debug("%s: Slot status change detected (%d -> %d)\n",
1361 mmc_hostname(sdhci->mmc), oldstat, status);
1362 if (status && !plat->mmc_data.built_in)
1363 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
1365 mmc_detect_change(sdhci->mmc, 0);
1369 static irqreturn_t carddetect_irq(int irq, void *data)
1371 struct sdhci_host *sdhost = (struct sdhci_host *)data;
1372 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
1373 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1374 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
1375 struct tegra_sdhci_platform_data *plat;
1378 plat = pdev->dev.platform_data;
1380 tegra_host->card_present =
1381 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
1383 if (!tegra_host->card_present) {
1384 err = tegra_sdhci_configure_regulators(tegra_host,
1385 CONFIG_REG_DIS, 0 , 0);
1387 dev_err(mmc_dev(sdhost->mmc),
1388 "Failed to disable card regulators %d\n", err);
1390 * Set retune request as tuning should be done next time
1391 * a card is inserted.
1393 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
1394 tegra_host->force_retune = true;
1395 sdhost->is_calibration_done = false;
1398 tasklet_schedule(&sdhost->card_tasklet);
1402 static void vendor_trim_clear_sel_vreg(struct sdhci_host *host, bool enable)
1404 unsigned int misc_ctrl;
1406 misc_ctrl = sdhci_readl(host, SDMMC_VNDR_IO_TRIM_CNTRL_0);
1408 misc_ctrl &= ~(SDMMC_VNDR_IO_TRIM_CNTRL_0_SEL_VREG);
1409 sdhci_writel(host, misc_ctrl, SDMMC_VNDR_IO_TRIM_CNTRL_0);
1411 tegra_sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1413 misc_ctrl |= (SDMMC_VNDR_IO_TRIM_CNTRL_0_SEL_VREG);
1414 sdhci_writel(host, misc_ctrl, SDMMC_VNDR_IO_TRIM_CNTRL_0);
1419 static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
1423 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1424 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1425 struct tegra_tuning_data *tuning_data;
1426 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1427 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1428 unsigned int best_tap_value;
1430 if (!(mask & SDHCI_RESET_ALL))
1433 if (tegra_host->sd_stat_head != NULL) {
1434 tegra_host->sd_stat_head->data_crc_count = 0;
1435 tegra_host->sd_stat_head->cmd_crc_count = 0;
1436 tegra_host->sd_stat_head->data_to_count = 0;
1437 tegra_host->sd_stat_head->cmd_to_count = 0;
1440 if (tegra_host->gov_data != NULL)
1441 tegra_host->gov_data->freq_switch_count = 0;
1443 if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
1444 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1445 && (host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
1446 if (host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) {
1447 tuning_data = sdhci_tegra_get_tuning_data(host,
1448 host->mmc->ios.clock);
1449 best_tap_value = (tegra_host->tap_cmd ==
1450 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1451 tuning_data->nom_best_tap_value :
1452 tuning_data->best_tap_value;
1454 best_tap_value = tegra_host->tuned_tap_delay;
1457 best_tap_value = tegra_host->plat->tap_delay;
1459 sdhci_tegra_set_tap_delay(host, best_tap_value);
1462 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1463 if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
1465 SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
1467 if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
1469 ~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
1471 if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
1473 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1475 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK;
1477 /* For automotive enable feedback clock for non-tuning modes */
1478 if (plat->enb_feedback_clock) {
1479 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1480 && (host->mmc->pm_flags &
1481 MMC_PM_KEEP_POWER)) {
1483 SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1487 if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
1488 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1489 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1490 vendor_ctrl |= (plat->trim_delay <<
1491 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1493 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
1494 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
1495 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1497 misc_ctrl = sdhci_readl(host, SDHCI_VNDR_MISC_CTRL);
1498 if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
1499 misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
1500 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
1502 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
1504 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
1506 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
1508 /* Enable DDR mode support only for SDMMC4 */
1509 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
1510 if (!(plat->uhs_mask & MMC_UHS_MASK_DDR50)) {
1512 SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
1515 if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
1517 SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
1519 if (soc_data->nvquirks & NVQUIRK_SET_PIPE_STAGES_MASK_0)
1520 misc_ctrl &= ~SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK;
1522 if (plat->enb_ext_loopback) {
1523 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1524 && (host->mmc->pm_flags &
1525 MMC_PM_KEEP_POWER)) {
1527 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1530 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1533 /* Disable External loopback for all sdmmc instances */
1534 if (soc_data->nvquirks & NVQUIRK_DISABLE_EXTERNAL_LOOPBACK)
1535 misc_ctrl &= ~(1 << SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1537 sdhci_writel(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
1539 if (soc_data->nvquirks & NVQUIRK_UPDATE_PAD_CNTRL_REG) {
1540 misc_ctrl = sdhci_readl(host, SDMMC_IO_SPARE_0);
1541 misc_ctrl |= (1 << SPARE_OUT_3_OFFSET);
1542 sdhci_writel(host, misc_ctrl, SDMMC_IO_SPARE_0);
1545 /* SEL_VREG should be 0 for all modes*/
1546 if (soc_data->nvquirks2 &
1547 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH)
1548 vendor_trim_clear_sel_vreg(host, true);
1550 if (soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23)
1551 host->flags &= ~SDHCI_AUTO_CMD23;
1553 /* Mask the support for any UHS modes if specified */
1554 if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
1555 host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
1557 if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
1558 host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
1560 if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
1561 host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
1563 if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
1564 host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
1566 if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
1567 host->mmc->caps &= ~MMC_CAP_UHS_SDR12;
1569 if (plat->uhs_mask & MMC_MASK_HS400) {
1570 host->mmc->caps2 &= ~MMC_CAP2_HS400;
1571 host->mmc->caps2 &= ~MMC_CAP2_EN_STROBE;
1572 host->mmc->caps2 &= ~MMC_CAP2_HS533;
1575 #ifdef CONFIG_MMC_SDHCI_TEGRA_HS200_DISABLE
1576 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1578 if (plat->uhs_mask & MMC_MASK_HS200)
1579 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1582 if (soc_data->nvquirks2 & NVQUIRK2_UPDATE_HW_TUNING_CONFG) {
1583 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1584 vendor_ctrl &= ~(SDHCI_VNDR_TUN_CTRL0_0_MUL_M);
1585 vendor_ctrl |= SDHCI_VNDR_TUN_CTRL0_0_MUL_M_VAL;
1586 vendor_ctrl |= SDHCI_VNDR_TUN_CTRL_RETUNE_REQ_EN;
1587 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
1589 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL1_0);
1590 vendor_ctrl &= ~(SDHCI_VNDR_TUN_CTRL1_TUN_STEP_SIZE);
1591 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_TUN_CTRL1_0);
1594 /* Use timeout clk data timeout counter for generating wr crc status */
1595 if (soc_data->nvquirks &
1596 NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT) {
1597 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_SYS_SW_CTRL);
1598 vendor_ctrl |= SDHCI_VNDR_SYS_SW_CTRL_WR_CRC_USE_TMCLK;
1599 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_SYS_SW_CTRL);
1603 static int tegra_sdhci_buswidth(struct sdhci_host *sdhci, int bus_width)
1605 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1606 const struct tegra_sdhci_platform_data *plat;
1609 plat = pdev->dev.platform_data;
1611 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
1612 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
1613 ctrl &= ~SDHCI_CTRL_4BITBUS;
1614 ctrl |= SDHCI_CTRL_8BITBUS;
1616 ctrl &= ~SDHCI_CTRL_8BITBUS;
1617 if (bus_width == MMC_BUS_WIDTH_4)
1618 ctrl |= SDHCI_CTRL_4BITBUS;
1620 ctrl &= ~SDHCI_CTRL_4BITBUS;
1622 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
1627 * Calculation of nearest clock frequency for desired rate:
1628 * Get the divisor value, div = p / d_rate
1629 * 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
1630 * nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
1631 * 2. If not, result = p / div
1632 * As the nearest clk freq should be <= to desired_rate,
1633 * 3. If result > desired_rate then increment the div by 0.5
1634 * and do, (p << 1)/((div << 1) + 1)
1635 * 4. Else return result
1636 * Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
1637 * defined index variable.
1639 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
1640 unsigned long desired_rate)
1642 unsigned long result;
1646 if (pll_rate <= desired_rate)
1649 div = pll_rate / desired_rate;
1650 if (div > MAX_DIVISOR_VALUE) {
1651 div = MAX_DIVISOR_VALUE;
1652 result = pll_rate / div;
1654 if ((pll_rate % desired_rate) >= (desired_rate / 2))
1655 result = (pll_rate << 1) / ((div << 1) + index++);
1657 result = pll_rate / div;
1659 if (desired_rate < result) {
1661 * Trying to get lower clock freq than desired clock,
1662 * by increasing the divisor value by 0.5
1664 result = (pll_rate << 1) / ((div << 1) + index);
1671 static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
1672 unsigned long desired_rate)
1674 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1675 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1676 struct clk *parent_clk;
1677 unsigned long pll_source_1_freq;
1678 unsigned long pll_source_2_freq;
1679 struct sdhci_tegra_pll_parent *pll_source = tegra_host->pll_source;
1682 if (tegra_platform_is_fpga())
1685 * Currently pll_p and pll_c are used as clock sources for SDMMC. If clk
1686 * rate is missing for either of them, then no selection is needed and
1687 * the default parent is used.
1689 if (!pll_source[0].pll_rate || !pll_source[1].pll_rate)
1692 pll_source_1_freq = get_nearest_clock_freq(pll_source[0].pll_rate,
1694 pll_source_2_freq = get_nearest_clock_freq(pll_source[1].pll_rate,
1698 * For low freq requests, both the desired rates might be higher than
1699 * the requested clock frequency. In such cases, select the parent
1700 * with the lower frequency rate.
1702 if ((pll_source_1_freq > desired_rate)
1703 && (pll_source_2_freq > desired_rate)) {
1704 if (pll_source_2_freq <= pll_source_1_freq) {
1705 desired_rate = pll_source_2_freq;
1706 pll_source_1_freq = 0;
1708 desired_rate = pll_source_1_freq;
1709 pll_source_2_freq = 0;
1711 rc = clk_set_rate(pltfm_host->clk, desired_rate);
1714 if (pll_source_1_freq > pll_source_2_freq) {
1715 if (!tegra_host->is_parent_pll_source_1) {
1716 parent_clk = pll_source[0].pll;
1717 tegra_host->is_parent_pll_source_1 = true;
1718 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1721 } else if (tegra_host->is_parent_pll_source_1) {
1722 parent_clk = pll_source[1].pll;
1723 tegra_host->is_parent_pll_source_1 = false;
1724 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1728 rc = clk_set_parent(pltfm_host->clk, parent_clk);
1730 pr_err("%s: failed to set pll parent clock %d\n",
1731 mmc_hostname(host->mmc), rc);
1734 static void tegra_sdhci_get_clock_freq_for_mode(struct sdhci_host *sdhci,
1735 unsigned int *clock)
1737 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1738 const struct tegra_sdhci_platform_data *plat = pdev->dev.platform_data;
1739 unsigned int ios_timing = sdhci->mmc->ios.timing;
1742 if (!(plat->is_fix_clock_freq) || !(pdev->dev.of_node)
1743 || (ios_timing >= MMC_TIMINGS_MAX_MODES))
1747 * Index 0 is for ID mode and rest mapped with index being ios timings.
1748 * If the frequency for some particular mode is set as 0 then return
1749 * without updating the clock
1751 if (*clock <= 400000)
1754 index = ios_timing + 1;
1756 if (plat->fixed_clk_freq_table[index] != 0)
1757 *clock = plat->fixed_clk_freq_table[index];
1759 pr_warn("%s: The fixed_clk_freq_table entry for ios timing %d is 0. So using clock rate as requested by card\n",
1760 mmc_hostname(sdhci->mmc), ios_timing);
1763 static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
1766 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1767 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1768 unsigned int clk_rate;
1769 #ifdef CONFIG_MMC_FREQ_SCALING
1770 unsigned int tap_value;
1771 struct tegra_tuning_data *tuning_data;
1774 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
1776 * In ddr mode, tegra sdmmc controller clock frequency
1777 * should be double the card clock frequency.
1779 if (tegra_host->ddr_clk_limit &&
1780 (tegra_host->ddr_clk_limit < clock))
1781 clk_rate = tegra_host->ddr_clk_limit * 2;
1783 clk_rate = clock * 2;
1788 if ((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50) &&
1789 tegra_host->soc_data->tuning_freq_list[0])
1790 clk_rate = tegra_host->soc_data->tuning_freq_list[0];
1792 tegra_sdhci_get_clock_freq_for_mode(sdhci, &clk_rate);
1794 if (tegra_host->max_clk_limit &&
1795 (clk_rate > tegra_host->max_clk_limit))
1796 clk_rate = tegra_host->max_clk_limit;
1798 tegra_sdhci_clock_set_parent(sdhci, clk_rate);
1799 clk_set_rate(pltfm_host->clk, clk_rate);
1800 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
1802 #ifdef CONFIG_MMC_FREQ_SCALING
1803 /* Set the tap delay if tuning is done and dfs is enabled */
1804 if (sdhci->mmc->df &&
1805 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1806 tuning_data = sdhci_tegra_get_tuning_data(sdhci, clock);
1807 tap_value = (tegra_host->tap_cmd == TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1808 tuning_data->nom_best_tap_value :
1809 tuning_data->best_tap_value;
1810 sdhci_tegra_set_tap_delay(sdhci, tap_value);
1815 static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
1817 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1818 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1819 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1820 struct tegra_sdhci_platform_data *plat;
1826 mutex_lock(&tegra_host->set_clock_mutex);
1827 pr_debug("%s %s %u enabled=%u\n", __func__,
1828 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
1829 plat = pdev->dev.platform_data;
1831 if (!tegra_host->clk_enabled) {
1832 ret = clk_prepare_enable(pltfm_host->clk);
1834 dev_err(mmc_dev(sdhci->mmc),
1835 "clock enable is failed, ret: %d\n", ret);
1836 mutex_unlock(&tegra_host->set_clock_mutex);
1839 if (sdhci->runtime_pm_init_done &&
1840 IS_RTPM_DELAY_CG(plat->rtpm_type)) {
1841 sdhci->runtime_pm_enable_dcg = true;
1842 pm_runtime_get_sync(&pdev->dev);
1844 tegra_host->clk_enabled = true;
1845 sdhci->is_clk_on = true;
1846 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1847 ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1848 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1849 if (tegra_host->soc_data->nvquirks2 &
1850 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH) {
1851 /* power up / active state */
1852 vendor_trim_clear_sel_vreg(sdhci, true);
1855 tegra_sdhci_set_clk_rate(sdhci, clock);
1857 if (tegra_host->emc_clk && (!tegra_host->is_sdmmc_emc_clk_on)) {
1858 ret = clk_prepare_enable(tegra_host->emc_clk);
1860 dev_err(mmc_dev(sdhci->mmc),
1861 "clock enable is failed, ret: %d\n", ret);
1862 mutex_unlock(&tegra_host->set_clock_mutex);
1865 tegra_host->is_sdmmc_emc_clk_on = true;
1867 if (tegra_host->sclk && (!tegra_host->is_sdmmc_sclk_on)) {
1868 ret = clk_prepare_enable(tegra_host->sclk);
1870 dev_err(mmc_dev(sdhci->mmc),
1871 "clock enable is failed, ret: %d\n", ret);
1872 mutex_unlock(&tegra_host->set_clock_mutex);
1875 tegra_host->is_sdmmc_sclk_on = true;
1877 if (plat->en_periodic_calib &&
1878 sdhci->is_calibration_done) {
1879 cur_time = ktime_get();
1880 period_time = ktime_to_ms(ktime_sub(cur_time,
1881 tegra_host->timestamp));
1882 if (period_time >= SDHCI_PERIODIC_CALIB_TIMEOUT)
1883 tegra_sdhci_do_calibration(sdhci,
1884 sdhci->mmc->ios.signal_voltage);
1886 } else if (!clock && tegra_host->clk_enabled) {
1887 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on) {
1888 clk_disable_unprepare(tegra_host->emc_clk);
1889 tegra_host->is_sdmmc_emc_clk_on = false;
1891 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on) {
1892 clk_disable_unprepare(tegra_host->sclk);
1893 tegra_host->is_sdmmc_sclk_on = false;
1895 if (tegra_host->soc_data->nvquirks2 &
1896 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH){
1897 /* power down / idle state */
1898 vendor_trim_clear_sel_vreg(sdhci, false);
1900 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1901 ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1902 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1904 tegra_host->clk_enabled = false;
1905 sdhci->is_clk_on = false;
1906 if (sdhci->runtime_pm_init_done &&
1907 sdhci->runtime_pm_enable_dcg &&
1908 IS_RTPM_DELAY_CG(plat->rtpm_type)) {
1909 sdhci->runtime_pm_enable_dcg = false;
1910 pm_runtime_put_sync(&pdev->dev);
1912 clk_disable_unprepare(pltfm_host->clk);
1914 mutex_unlock(&tegra_host->set_clock_mutex);
1917 static void tegra_sdhci_en_strobe(struct sdhci_host *host)
1921 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_SYS_SW_CTRL);
1923 SDHCI_VNDR_SYS_SW_CTRL_STROBE_SHIFT);
1924 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_SYS_SW_CTRL);
1927 static void tegra_sdhci_post_init(struct sdhci_host *sdhci)
1931 unsigned timeout = 5;
1932 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1933 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1935 if ((sdhci->mmc->card->ext_csd.strobe_support) &&
1936 (sdhci->mmc->caps2 & MMC_CAP2_EN_STROBE) &&
1937 tegra_host->plat->en_strobe)
1938 tegra_sdhci_en_strobe(sdhci);
1940 /* Program TX_DLY_CODE_OFFSET Value for HS533 mode*/
1941 if (sdhci->mmc->card->state & MMC_STATE_HIGHSPEED_533) {
1942 dll_ctrl0 = sdhci_readl(sdhci, SDHCI_VNDR_DLL_CTRL0_0);
1943 dll_ctrl0 &= ~(SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_MASK <<
1944 SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_SHIFT);
1945 dll_ctrl0 |= ((SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_OFFSET &
1946 SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_MASK) <<
1947 SDHCI_VNDR_DLL_CTRL0_0_TX_DLY_SHIFT);
1948 sdhci_writel(sdhci, dll_ctrl0, SDHCI_VNDR_DLL_CTRL0_0);
1951 dll_cfg = sdhci_readl(sdhci, SDHCI_VNDR_DLLCAL_CFG);
1952 dll_cfg |= SDHCI_VNDR_DLLCAL_CFG_EN_CALIBRATE;
1953 sdhci_writel(sdhci, dll_cfg, SDHCI_VNDR_DLLCAL_CFG);
1957 /* Wait until the dll calibration is done */
1959 if (!(sdhci_readl(sdhci, SDHCI_VNDR_DLLCAL_CFG_STATUS) &
1960 SDHCI_VNDR_DLLCAL_CFG_STATUS_DLL_ACTIVE))
1968 dev_err(mmc_dev(sdhci->mmc), "DLL calibration is failed\n");
1972 static void tegra_sdhci_update_sdmmc_pinctrl_register(struct sdhci_host *sdhci,
1975 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1976 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1977 struct pinctrl_state *set_schmitt[2];
1982 set_schmitt[0] = tegra_host->schmitt_enable[0];
1983 set_schmitt[1] = tegra_host->schmitt_enable[1];
1985 if (!IS_ERR_OR_NULL(tegra_host->drv_code_strength)) {
1986 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
1987 tegra_host->drv_code_strength);
1989 dev_warn(mmc_dev(sdhci->mmc),
1990 "setting drive code strength failed\n");
1993 set_schmitt[0] = tegra_host->schmitt_disable[0];
1994 set_schmitt[1] = tegra_host->schmitt_disable[1];
1996 if (!IS_ERR_OR_NULL(tegra_host->default_drv_code_strength)) {
1997 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
1998 tegra_host->default_drv_code_strength);
2000 dev_warn(mmc_dev(sdhci->mmc),
2001 "setting default drive code strength failed\n");
2005 for (i = 0; i < 2; i++) {
2006 if (IS_ERR_OR_NULL(set_schmitt[i]))
2008 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
2011 dev_warn(mmc_dev(sdhci->mmc),
2012 "setting schmitt state failed\n");
2016 static void tegra_sdhci_configure_e_input(struct sdhci_host *sdhci, bool enable)
2020 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
2022 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
2024 val &= ~SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
2025 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
2030 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
2031 unsigned char signal_voltage)
2034 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2035 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2036 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2037 unsigned int timeout = 10;
2038 unsigned int calib_offsets = 0;
2039 unsigned int pulldown_code;
2040 unsigned int pullup_code;
2041 unsigned long pin_config;
2043 bool card_clk_enabled;
2046 /* No Calibration for sdmmc4 */
2047 if (tegra_host->plat->disable_auto_cal)
2050 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
2053 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
2054 card_clk_enabled = clk & SDHCI_CLOCK_CARD_EN;
2055 if (card_clk_enabled) {
2056 clk &= ~SDHCI_CLOCK_CARD_EN;
2057 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
2060 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
2061 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
2062 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
2063 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
2064 if (soc_data->nvquirks & NVQUIRK_SET_SDMEMCOMP_VREF_SEL) {
2065 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
2066 val |= tegra_host->plat->compad_vref_3v3;
2067 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
2068 val |= tegra_host->plat->compad_vref_1v8;
2072 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
2074 /* Wait for 1us after e_input is enabled*/
2075 if (soc_data->nvquirks2 & NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION)
2078 /* Enable Auto Calibration*/
2079 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
2080 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
2081 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
2082 if (tegra_host->plat->enable_autocal_slew_override)
2083 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_SLW_OVERRIDE;
2084 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
2085 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
2086 calib_offsets = tegra_host->plat->calib_3v3_offsets;
2087 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
2088 calib_offsets = tegra_host->plat->calib_1v8_offsets;
2090 if (calib_offsets) {
2091 /* Program Auto cal PD offset(bits 8:14) */
2093 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
2094 val |= (((calib_offsets >> 8) & 0xFF) <<
2095 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
2096 /* Program Auto cal PU offset(bits 0:6) */
2098 val |= (calib_offsets & 0xFF);
2101 if (tegra_host->plat->auto_cal_step) {
2103 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_STEP_OFFSET_SHIFT);
2104 val |= (tegra_host->plat->auto_cal_step <<
2105 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_STEP_OFFSET_SHIFT);
2107 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
2109 /* Wait for 2us after auto calibration is enabled*/
2110 if (soc_data->nvquirks2 & NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION)
2113 /* Wait until the calibration is done */
2115 if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
2116 SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
2124 dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
2126 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
2127 tegra_sdhci_configure_e_input(sdhci, false);
2129 if (card_clk_enabled) {
2130 clk |= SDHCI_CLOCK_CARD_EN;
2131 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
2134 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
2135 /* Disable Auto calibration */
2136 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
2137 val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
2138 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
2140 if (tegra_host->pinctrl && tegra_host->drive_group_sel >= 0) {
2141 /* Get the pull down codes from auto cal status reg */
2143 sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
2144 SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
2145 pin_config = TEGRA_PINCONF_PACK(
2146 TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH,
2148 err = pinctrl_set_config_for_group_sel(tegra_host->pinctrl,
2149 tegra_host->drive_group_sel, pin_config);
2151 dev_err(mmc_dev(sdhci->mmc),
2152 "Failed to set pulldown codes %d err %d\n",
2153 pulldown_code, err);
2155 /* Calculate the pull up codes */
2156 pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
2157 pin_config = TEGRA_PINCONF_PACK(
2158 TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH,
2160 /* Set the pull up code in the pinmux reg */
2161 err = pinctrl_set_config_for_group_sel(tegra_host->pinctrl,
2162 tegra_host->drive_group_sel, pin_config);
2164 dev_err(mmc_dev(sdhci->mmc),
2165 "Failed to set pullup codes %d err %d\n",
2170 if (tegra_host->plat->en_periodic_calib && tegra_host->card_present) {
2171 tegra_host->timestamp = ktime_get();
2172 sdhci->timestamp = ktime_get();
2173 sdhci->is_calibration_done = true;
2177 static int tegra_sdhci_validate_sd2_0(struct sdhci_host *sdhci)
2179 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2180 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2181 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
2182 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2183 struct tegra_sdhci_platform_data *plat;
2186 plat = pdev->dev.platform_data;
2188 if ((soc_data->nvquirks2 & NVQUIRK2_BROKEN_SD2_0_SUPPORT) &&
2189 (plat->limit_vddio_max_volt)) {
2190 /* T210: Bug 1561291
2191 * Design issue where a cap connected to IO node is stressed
2192 * to 3.3v while it can only tolerate up to 1.8v.
2194 rc = tegra_sdhci_configure_regulators(tegra_host,
2195 CONFIG_REG_DIS, 0, 0);
2197 dev_err(mmc_dev(sdhci->mmc),
2198 "Regulator disable failed %d\n", rc);
2199 dev_err(mmc_dev(sdhci->mmc),
2200 "SD cards with out 1.8V is not supported\n");
2208 static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
2209 unsigned int signal_voltage)
2211 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2212 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2213 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
2214 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2215 struct tegra_sdhci_platform_data *plat;
2216 unsigned int min_uV = tegra_host->vddio_min_uv;
2217 unsigned int max_uV = tegra_host->vddio_max_uv;
2218 unsigned int rc = 0;
2222 plat = pdev->dev.platform_data;
2224 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
2225 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
2226 ctrl |= SDHCI_CTRL_VDD_180;
2227 min_uV = SDHOST_LOW_VOLT_MIN;
2228 max_uV = SDHOST_LOW_VOLT_MAX;
2229 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
2230 if (ctrl & SDHCI_CTRL_VDD_180)
2231 ctrl &= ~SDHCI_CTRL_VDD_180;
2234 /* Check if the slot can support the required voltage */
2235 if (min_uV > tegra_host->vddio_max_uv)
2238 /* Set/clear the 1.8V signalling */
2239 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2241 if (soc_data->nvquirks2 & NVQUIRK2_SET_PAD_E_INPUT_VOL)
2242 tegra_sdhci_configure_e_input(sdhci, true);
2244 if ((!tegra_host->is_rail_enabled) && (tegra_host->card_present)) {
2245 rc = tegra_sdhci_configure_regulators(tegra_host,
2246 CONFIG_REG_EN, 0, 0);
2248 dev_err(mmc_dev(sdhci->mmc),
2249 "Enable regulators failed %d\n", rc);
2253 /* Switch the I/O rail voltage */
2254 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_SET_VOLT,
2256 if (rc && (signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
2257 dev_err(mmc_dev(sdhci->mmc),
2258 "setting 1.8V failed %d. Revert to 3.3V\n", rc);
2259 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
2260 rc = tegra_sdhci_configure_regulators(tegra_host,
2261 CONFIG_REG_SET_VOLT, tegra_host->vddio_min_uv,
2262 tegra_host->vddio_max_uv);
2264 if (gpio_is_valid(plat->power_gpio)) {
2265 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
2266 gpio_set_value(plat->power_gpio, 1);
2268 gpio_set_value(plat->power_gpio, 0);
2273 if (!(soc_data->nvquirks & NVQUIRK_UPDATE_PIN_CNTRL_REG))
2279 if (!plat->update_pinctrl_settings)
2282 set = (signal_voltage == MMC_SIGNAL_VOLTAGE_180) ? true : false;
2284 if (!IS_ERR_OR_NULL(tegra_host->pinctrl_sdmmc))
2285 tegra_sdhci_update_sdmmc_pinctrl_register(sdhci, set);
2290 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
2291 u8 option, int min_uV, int max_uV)
2294 int vddio_prev = -1;
2296 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2297 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
2298 struct sdhci_host *sdhci = dev_get_drvdata(tegra_host->dev);
2302 if (!tegra_host->is_rail_enabled) {
2303 if (soc_data->nvquirks2 & NVQUIRK2_SET_PAD_E_INPUT_VOL)
2304 tegra_sdhci_configure_e_input(sdhci, true);
2305 if (tegra_host->vdd_io_reg) {
2306 vddio_prev = regulator_get_voltage(
2307 tegra_host->vdd_io_reg);
2308 if (vddio_prev == SDHOST_LOW_VOLT_MAX) {
2309 if (plat->pwrdet_support &&
2310 tegra_host->sdmmc_padctrl)
2311 rc = padctrl_set_voltage(
2312 tegra_host->sdmmc_padctrl,
2316 if (tegra_host->vdd_slot_reg)
2317 rc = regulator_enable(tegra_host->vdd_slot_reg);
2318 if (tegra_host->vdd_io_reg)
2319 rc = regulator_enable(tegra_host->vdd_io_reg);
2320 tegra_host->is_rail_enabled = true;
2323 case CONFIG_REG_DIS:
2324 if (tegra_host->is_rail_enabled) {
2325 if (tegra_host->vdd_io_reg) {
2326 vddio_prev = regulator_get_voltage(
2327 tegra_host->vdd_io_reg);
2328 if (vddio_prev > SDHOST_LOW_VOLT_MAX)
2329 tegra_sdhci_signal_voltage_switch(
2330 sdhci, MMC_SIGNAL_VOLTAGE_180);
2332 if (tegra_host->vdd_io_reg)
2333 rc = regulator_disable(tegra_host->vdd_io_reg);
2334 if (tegra_host->vdd_slot_reg)
2335 rc = regulator_disable(
2336 tegra_host->vdd_slot_reg);
2337 tegra_host->is_rail_enabled = false;
2340 case CONFIG_REG_SET_VOLT:
2341 if (tegra_host->vdd_io_reg) {
2342 if (soc_data->nvquirks2 & NVQUIRK2_CONFIG_PWR_DET) {
2343 vddio_prev = regulator_get_voltage(
2344 tegra_host->vdd_io_reg);
2345 /* set pwrdet sdmmc1 before set 3.3 V */
2346 if ((vddio_prev < min_uV) &&
2347 (min_uV >= SDHOST_HIGH_VOLT_2V8) &&
2348 plat->pwrdet_support &&
2349 tegra_host->sdmmc_padctrl) {
2350 rc = padctrl_set_voltage(
2351 tegra_host->sdmmc_padctrl,
2352 SDHOST_HIGH_VOLT_3V3);
2354 dev_err(mmc_dev(sdhci->mmc),
2355 "padcontrol set volt failed:"
2359 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
2362 if ((plat->pwrdet_support) &&
2363 (min_uV == SDHOST_LOW_VOLT_MIN))
2364 usleep_range(5000, 5500);
2365 if (soc_data->nvquirks2 & NVQUIRK2_CONFIG_PWR_DET) {
2366 vddio_new = regulator_get_voltage(
2367 tegra_host->vdd_io_reg);
2368 /* clear pwrdet sdmmc1 after set 1.8 V */
2369 if ((vddio_new <= vddio_prev) &&
2370 (vddio_new == SDHOST_LOW_VOLT_MAX) &&
2371 plat->pwrdet_support &&
2372 tegra_host->sdmmc_padctrl) {
2373 rc = padctrl_set_voltage(
2374 tegra_host->sdmmc_padctrl, vddio_new);
2376 dev_err(mmc_dev(sdhci->mmc),
2377 "padcontrol set volt failed:"
2384 pr_err("Invalid argument passed to reg config %d\n", option);
2390 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
2392 unsigned long timeout;
2394 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
2396 /* Wait max 100 ms */
2399 /* hw clears the bit when it's done */
2400 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
2402 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
2403 "completed.\n", (int)mask);
2410 tegra_sdhci_reset_exit(sdhci, mask);
2413 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
2414 unsigned int tap_delay)
2418 bool card_clk_enabled;
2420 /* Max tap delay value is 255 */
2421 if (tap_delay > MAX_TAP_VALUES) {
2422 dev_err(mmc_dev(sdhci->mmc),
2423 "Valid tap range (0-255). Setting tap value %d\n",
2429 card_clk_enabled = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL) &
2430 SDHCI_CLOCK_CARD_EN;
2432 if (card_clk_enabled) {
2433 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
2434 clk &= ~SDHCI_CLOCK_CARD_EN;
2435 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
2438 if (!(sdhci->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING)) {
2439 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
2440 vendor_ctrl &= ~SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP;
2441 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
2444 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
2445 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
2446 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
2447 vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
2448 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
2450 if (!(sdhci->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING)) {
2451 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_TUN_CTRL0_0);
2452 vendor_ctrl |= SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP;
2453 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_TUN_CTRL0_0);
2455 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
2458 if (card_clk_enabled) {
2459 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
2460 clk |= SDHCI_CLOCK_CARD_EN;
2461 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
2465 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
2466 unsigned int trim_delay)
2470 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
2471 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
2472 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
2473 vendor_ctrl |= (trim_delay << SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
2474 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
2477 static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
2479 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2480 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2481 struct sdhci_tegra_sd_stats *head = tegra_host->sd_stat_head;
2483 if (int_status & SDHCI_INT_DATA_CRC)
2484 head->data_crc_count++;
2485 if (int_status & SDHCI_INT_CRC)
2486 head->cmd_crc_count++;
2487 if (int_status & SDHCI_INT_TIMEOUT)
2488 head->cmd_to_count++;
2489 if (int_status & SDHCI_INT_DATA_TIMEOUT)
2490 head->data_to_count++;
2494 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
2495 struct sdhci_host *sdhci, unsigned int clock)
2497 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2498 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2499 struct tegra_tuning_data *tuning_data;
2500 unsigned int low_freq;
2503 if (tegra_host->tuning_freq_count == 1) {
2504 tuning_data = &tegra_host->tuning_data[0];
2508 /* Get the lowest supported freq */
2509 for (i = 0; i < TUNING_FREQ_COUNT; ++i) {
2510 low_freq = tegra_host->soc_data->tuning_freq_list[i];
2515 if (clock <= low_freq)
2516 tuning_data = &tegra_host->tuning_data[0];
2518 tuning_data = &tegra_host->tuning_data[1];
2524 static void calculate_vmin_values(struct sdhci_host *sdhci,
2525 struct tegra_tuning_data *tuning_data, int vmin, int boot_mv)
2527 struct tuning_values *est_values = &tuning_data->est_values;
2528 struct tuning_values *calc_values = &tuning_data->calc_values;
2529 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2530 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2531 int vmin_slope, vmin_int, temp_calc_vmin;
2532 int t2t_vmax, t2t_vmin;
2533 int vmax_thole, vmin_thole;
2536 * If current vmin is equal to vmin or vmax of tuning data, use the
2537 * previously calculated estimated T2T values directly. Note that the
2538 * estimated T2T_vmax is not at Vmax specified in tuning data. It is
2539 * the T2T at the boot or max voltage for the current SKU. Hence,
2540 * boot_mv is used in place of t2t_coeffs->vmax.
2542 if (vmin == t2t_coeffs->vmin) {
2543 t2t_vmin = est_values->t2t_vmin;
2544 } else if (vmin == boot_mv) {
2545 t2t_vmin = est_values->t2t_vmax;
2548 * For any intermediate voltage between boot voltage and vmin
2549 * of tuning data, calculate the slope and intercept from the
2550 * t2t at boot_mv and vmin and calculate the actual values.
2552 t2t_vmax = 1000 / est_values->t2t_vmax;
2553 t2t_vmin = 1000 / est_values->t2t_vmin;
2554 vmin_slope = ((t2t_vmax - t2t_vmin) * 1000) /
2555 (boot_mv - t2t_coeffs->vmin);
2556 vmin_int = (t2t_vmax * 1000 - (vmin_slope * boot_mv)) / 1000;
2557 t2t_vmin = (vmin_slope * vmin) / 1000 + vmin_int;
2558 t2t_vmin = (1000 / t2t_vmin);
2561 calc_values->t2t_vmin = (t2t_vmin * calc_values->t2t_vmax) /
2562 est_values->t2t_vmax;
2564 calc_values->ui_vmin = (1000000 / (tuning_data->freq_hz / 1000000)) /
2565 calc_values->t2t_vmin;
2567 /* Calculate the vmin tap hole at vmin of tuning data */
2568 temp_calc_vmin = (est_values->t2t_vmin * calc_values->t2t_vmax) /
2569 est_values->t2t_vmax;
2570 vmin_thole = (thole_coeffs->thole_vmin_int -
2571 (thole_coeffs->thole_vmin_slope * temp_calc_vmin)) /
2573 vmax_thole = calc_values->vmax_thole;
2575 if (vmin == t2t_coeffs->vmin) {
2576 calc_values->vmin_thole = vmin_thole;
2577 } else if (vmin == boot_mv) {
2578 calc_values->vmin_thole = vmax_thole;
2581 * Interpolate the tap hole for any intermediate voltage.
2582 * Calculate the slope and intercept from the available data
2583 * and use them to calculate the actual values.
2585 vmin_slope = ((vmax_thole - vmin_thole) * 1000) /
2586 (boot_mv - t2t_coeffs->vmin);
2587 vmin_int = (vmax_thole * 1000 - (vmin_slope * boot_mv)) / 1000;
2588 calc_values->vmin_thole = (vmin_slope * vmin) / 1000 + vmin_int;
2591 /* Adjust the partial win start for Vmin boundary */
2592 if (tuning_data->is_partial_win_valid)
2593 tuning_data->final_tap_data[0].win_start =
2594 (tuning_data->final_tap_data[0].win_start *
2595 tuning_data->calc_values.t2t_vmax) /
2596 tuning_data->calc_values.t2t_vmin;
2598 pr_info("**********Tuning values*********\n");
2599 pr_info("**estimated values**\n");
2600 pr_info("T2T_Vmax %d, T2T_Vmin %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
2601 est_values->t2t_vmax, est_values->t2t_vmin,
2602 est_values->vmax_thole, est_values->ui);
2603 pr_info("**Calculated values**\n");
2604 pr_info("T2T_Vmax %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
2605 calc_values->t2t_vmax, calc_values->vmax_thole,
2607 pr_info("T2T_Vmin %d, 1'st_hole_Vmin %d, UI_Vmin %d\n",
2608 calc_values->t2t_vmin, calc_values->vmin_thole,
2609 calc_values->ui_vmin);
2610 pr_info("***********************************\n");
2613 static int slide_window_start(struct sdhci_host *sdhci,
2614 struct tegra_tuning_data *tuning_data,
2615 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
2619 if (edge_attr == WIN_EDGE_BOUN_START) {
2621 tap_value += (1000 / tuning_data->calc_values.t2t_vmin);
2623 tap_value += (1000 / tuning_data->calc_values.t2t_vmax);
2624 } else if (edge_attr == WIN_EDGE_HOLE) {
2625 if (tap_hole >= 0) {
2626 tap_margin = get_tuning_tap_hole_margins(sdhci,
2627 tuning_data->calc_values.t2t_vmax);
2628 tap_value += ((7 * tap_hole) / 100) + tap_margin;
2632 if (tap_value > MAX_TAP_VALUES)
2633 tap_value = MAX_TAP_VALUES;
2638 static int slide_window_end(struct sdhci_host *sdhci,
2639 struct tegra_tuning_data *tuning_data,
2640 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
2644 if (edge_attr == WIN_EDGE_BOUN_END) {
2645 tap_value = (tap_value * tuning_data->calc_values.t2t_vmax) /
2646 tuning_data->calc_values.t2t_vmin;
2647 tap_value -= (1000 / tuning_data->calc_values.t2t_vmin);
2648 } else if (edge_attr == WIN_EDGE_HOLE) {
2649 if (tap_hole >= 0) {
2650 tap_value = tap_hole;
2651 tap_margin = get_tuning_tap_hole_margins(sdhci,
2652 tuning_data->calc_values.t2t_vmin);
2654 tap_value -= ((7 * tap_hole) / 100) + tap_margin;
2659 static int adjust_window_boundaries(struct sdhci_host *sdhci,
2660 struct tegra_tuning_data *tuning_data,
2661 struct tap_window_data *temp_tap_data)
2663 struct tap_window_data *tap_data;
2664 int vmin_tap_hole = -1;
2665 int vmax_tap_hole = -1;
2668 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2669 tap_data = &temp_tap_data[i];
2670 /* Update with next hole if first hole is taken care of */
2671 if (tap_data->win_start_attr == WIN_EDGE_HOLE)
2672 vmax_tap_hole = tuning_data->calc_values.vmax_thole +
2673 (tap_data->hole_pos - 1) *
2674 tuning_data->calc_values.ui;
2675 tap_data->win_start = slide_window_start(sdhci, tuning_data,
2676 tap_data->win_start, tap_data->win_start_attr,
2679 /* Update with next hole if first hole is taken care of */
2680 if (tap_data->win_end_attr == WIN_EDGE_HOLE)
2681 vmin_tap_hole = tuning_data->calc_values.vmin_thole +
2682 (tap_data->hole_pos - 1) *
2683 tuning_data->calc_values.ui_vmin;
2684 tap_data->win_end = slide_window_end(sdhci, tuning_data,
2685 tap_data->win_end, tap_data->win_end_attr,
2689 pr_info("***********final tuning windows**********\n");
2690 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2691 tap_data = &temp_tap_data[i];
2692 pr_info("win[%d]: %d - %d\n", i, tap_data->win_start,
2695 pr_info("********************************\n");
2699 static int find_best_tap_value(struct tegra_tuning_data *tuning_data,
2700 struct tap_window_data *temp_tap_data, int vmin)
2702 struct tap_window_data *tap_data;
2703 u8 i = 0, sel_win = 0;
2704 int pref_win = 0, curr_win_size = 0;
2705 int best_tap_value = 0;
2707 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2708 tap_data = &temp_tap_data[i];
2709 if (!i && tuning_data->is_partial_win_valid) {
2710 pref_win = tap_data->win_end - tap_data->win_start;
2711 if ((tap_data->win_end * 2) < pref_win)
2712 pref_win = tap_data->win_end * 2;
2715 curr_win_size = tap_data->win_end - tap_data->win_start;
2716 if ((curr_win_size > 0) && (curr_win_size > pref_win)) {
2717 pref_win = curr_win_size;
2723 if (pref_win <= 0) {
2724 pr_err("No window opening for %d vmin\n", vmin);
2728 tap_data = &temp_tap_data[sel_win];
2729 if (!sel_win && tuning_data->is_partial_win_valid) {
2731 best_tap_value = tap_data->win_end - (pref_win / 2);
2732 if (best_tap_value < 0)
2735 best_tap_value = tap_data->win_start +
2736 ((tap_data->win_end - tap_data->win_start) *
2737 tuning_data->calc_values.t2t_vmin) /
2738 (tuning_data->calc_values.t2t_vmin +
2739 tuning_data->calc_values.t2t_vmax);
2742 pr_info("best tap win - (%d-%d), best tap value %d\n",
2743 tap_data->win_start, tap_data->win_end, best_tap_value);
2744 return best_tap_value;
2747 static int sdhci_tegra_calculate_best_tap(struct sdhci_host *sdhci,
2748 struct tegra_tuning_data *tuning_data)
2750 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2751 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2752 struct tap_window_data *temp_tap_data = NULL;
2753 int vmin, curr_vmin, best_tap_value = 0;
2756 curr_vmin = tegra_dvfs_predict_mv_at_hz_no_tfloor(pltfm_host->clk,
2757 tuning_data->freq_hz);
2759 curr_vmin = tegra_host->boot_vcore_mv;
2763 SDHCI_TEGRA_DBG("%s: checking for win opening with vmin %d\n",
2764 mmc_hostname(sdhci->mmc), vmin);
2765 if ((best_tap_value < 0) &&
2766 (vmin > tegra_host->boot_vcore_mv)) {
2767 dev_err(mmc_dev(sdhci->mmc),
2768 "No best tap for any vcore range\n");
2769 kfree(temp_tap_data);
2770 temp_tap_data = NULL;
2774 calculate_vmin_values(sdhci, tuning_data, vmin,
2775 tegra_host->boot_vcore_mv);
2777 if (temp_tap_data == NULL) {
2778 temp_tap_data = kzalloc(sizeof(struct tap_window_data) *
2779 tuning_data->num_of_valid_tap_wins, GFP_KERNEL);
2780 if (IS_ERR_OR_NULL(temp_tap_data)) {
2781 dev_err(mmc_dev(sdhci->mmc),
2782 "No memory for final tap value calculation\n");
2787 memcpy(temp_tap_data, tuning_data->final_tap_data,
2788 sizeof(struct tap_window_data) *
2789 tuning_data->num_of_valid_tap_wins);
2791 adjust_window_boundaries(sdhci, tuning_data, temp_tap_data);
2793 best_tap_value = find_best_tap_value(tuning_data,
2794 temp_tap_data, vmin);
2796 if (best_tap_value < 0)
2798 } while (best_tap_value < 0);
2800 tuning_data->best_tap_value = best_tap_value;
2801 tuning_data->nom_best_tap_value = best_tap_value;
2804 * Set the new vmin if there is any change. If dvfs overrides are
2805 * disabled, then print the error message but continue execution
2806 * rather than disabling tuning altogether.
2808 if ((tuning_data->best_tap_value >= 0) && (curr_vmin != vmin)) {
2809 err = tegra_dvfs_set_fmax_at_vmin(pltfm_host->clk,
2810 tuning_data->freq_hz, vmin);
2811 if ((err == -EPERM) || (err == -ENOSYS)) {
2813 * tegra_dvfs_set_fmax_at_vmin: will return EPERM or
2814 * ENOSYS, when DVFS override is not enabled, continue
2815 * tuning with default core voltage.
2818 "dvfs overrides disabled. Vmin not updated\n");
2822 kfree(temp_tap_data);
2826 static int sdhci_tegra_issue_tuning_cmd(struct sdhci_host *sdhci)
2828 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2829 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2833 unsigned int timeout = 10;
2837 if (gpio_is_valid(tegra_host->plat->cd_gpio) &&
2838 (gpio_get_value(tegra_host->plat->cd_gpio) != 0)) {
2839 dev_err(mmc_dev(sdhci->mmc), "device removed during tuning\n");
2842 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
2843 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
2845 dev_err(mmc_dev(sdhci->mmc), "Controller never"
2846 "released inhibit bit(s).\n");
2854 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2855 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2856 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2858 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2859 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2860 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2863 * In response to CMD19, the card sends 64 bytes of tuning
2864 * block to the Host Controller. So we set the block size
2866 * In response to CMD21, the card sends 128 bytes of tuning
2867 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2868 * to the Host Controller. So we set the block size to 64 here.
2870 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
2873 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
2875 sdhci_writew(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2877 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
2879 /* Set the cmd flags */
2880 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
2881 /* Issue the command */
2882 sdhci->command = SDHCI_MAKE_CMD(tegra_host->tuning_opcode, flags);
2883 sdhci_writew(sdhci, sdhci->command, SDHCI_COMMAND);
2889 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
2891 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
2896 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
2897 !(intstatus & SDHCI_INT_DATA_CRC)) {
2899 sdhci->tuning_done = 1;
2901 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
2902 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
2906 if (sdhci->tuning_done) {
2907 sdhci->tuning_done = 0;
2908 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2909 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
2910 (ctrl & SDHCI_CTRL_TUNED_CLK))
2919 static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
2920 unsigned int starting_tap, bool expect_failure, int *status)
2922 unsigned int tap_value = starting_tap;
2924 unsigned int retry = TUNING_RETRIES;
2927 /* Set the tap delay */
2928 sdhci_tegra_set_tap_delay(sdhci, tap_value);
2930 /* Run frequency tuning */
2931 err = sdhci_tegra_issue_tuning_cmd(sdhci);
2932 if (err == -ENOMEDIUM) {
2940 retry = TUNING_RETRIES;
2941 if ((expect_failure && !err) ||
2942 (!expect_failure && err))
2946 } while (tap_value <= MAX_TAP_VALUES);
2952 static int calculate_actual_tuning_values(int speedo,
2953 struct tegra_tuning_data *tuning_data, int voltage_mv)
2955 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2956 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2957 struct tuning_values *calc_values = &tuning_data->calc_values;
2959 int vmax_thole, vmin_thole;
2961 /* T2T_Vmax = (1000000/freq_MHz)/Calc_UI */
2962 calc_values->t2t_vmax = (1000000 / (tuning_data->freq_hz / 1000000)) /
2966 * Interpolate the tap hole.
2967 * Vmax_1'st_hole = (Calc_T2T_Vmax*(-thole_slope)+thole_tint.
2969 vmax_thole = (thole_coeffs->thole_vmax_int -
2970 (thole_coeffs->thole_vmax_slope * calc_values->t2t_vmax)) /
2972 vmin_thole = (thole_coeffs->thole_vmin_int -
2973 (thole_coeffs->thole_vmin_slope * calc_values->t2t_vmax)) /
2975 if (voltage_mv == t2t_coeffs->vmin) {
2976 calc_values->vmax_thole = vmin_thole;
2977 } else if (voltage_mv == t2t_coeffs->vmax) {
2978 calc_values->vmax_thole = vmax_thole;
2980 slope = (vmax_thole - vmin_thole) /
2981 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2982 inpt = ((vmax_thole * 1000) - (slope * 1250)) / 1000;
2983 calc_values->vmax_thole = slope * voltage_mv + inpt;
2990 * All coeffs are filled up in the table after multiplying by 1000. So, all
2991 * calculations should have a divide by 1000 at the end.
2993 static int calculate_estimated_tuning_values(int speedo,
2994 struct tegra_tuning_data *tuning_data, int voltage_mv)
2996 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2997 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2998 struct tuning_values *est_values = &tuning_data->est_values;
3000 int vmax_t2t, vmin_t2t;
3001 int vmax_thole, vmin_thole;
3003 /* Est_T2T_Vmax = (speedo*(-t2t_slope)+t2t_int */
3004 vmax_t2t = (t2t_coeffs->t2t_vmax_int - (speedo *
3005 t2t_coeffs->t2t_vmax_slope)) / 1000;
3006 vmin_t2t = (t2t_coeffs->t2t_vmin_int - (speedo *
3007 t2t_coeffs->t2t_vmin_slope)) / 1000;
3008 est_values->t2t_vmin = vmin_t2t;
3010 if (voltage_mv == t2t_coeffs->vmin) {
3011 est_values->t2t_vmax = vmin_t2t;
3012 } else if (voltage_mv == t2t_coeffs->vmax) {
3013 est_values->t2t_vmax = vmax_t2t;
3015 vmax_t2t = PRECISION_FOR_ESTIMATE / vmax_t2t;
3016 vmin_t2t = PRECISION_FOR_ESTIMATE / vmin_t2t;
3018 * For any intermediate voltage between 0.95V and max vcore,
3019 * calculate the slope and intercept from the T2T and tap hole
3020 * values of 0.95V and max vcore and use them to calculate the
3021 * actual values. 1/T2T is a linear function of voltage.
3023 slope = ((vmax_t2t - vmin_t2t) * PRECISION_FOR_ESTIMATE) /
3024 (t2t_coeffs->vmax - t2t_coeffs->vmin);
3025 inpt = (vmax_t2t * PRECISION_FOR_ESTIMATE -
3026 (slope * t2t_coeffs->vmax)) / PRECISION_FOR_ESTIMATE;
3027 est_values->t2t_vmax = ((slope * voltage_mv) /
3028 PRECISION_FOR_ESTIMATE + inpt);
3029 est_values->t2t_vmax = (PRECISION_FOR_ESTIMATE /
3030 est_values->t2t_vmax);
3033 /* Est_UI = (1000000/freq_MHz)/Est_T2T_Vmax */
3034 est_values->ui = (1000000 / (thole_coeffs->freq_khz / 1000)) /
3035 est_values->t2t_vmax;
3038 * Est_1'st_hole = (Est_T2T_Vmax*(-thole_slope)) + thole_int.
3040 vmax_thole = (thole_coeffs->thole_vmax_int -
3041 (thole_coeffs->thole_vmax_slope * est_values->t2t_vmax)) / 1000;
3042 vmin_thole = (thole_coeffs->thole_vmin_int -
3043 (thole_coeffs->thole_vmin_slope * est_values->t2t_vmax)) / 1000;
3045 if (voltage_mv == t2t_coeffs->vmin) {
3046 est_values->vmax_thole = vmin_thole;
3047 } else if (voltage_mv == t2t_coeffs->vmax) {
3048 est_values->vmax_thole = vmax_thole;
3051 * For any intermediate voltage between 0.95V and max vcore,
3052 * calculate the slope and intercept from the t2t and tap hole
3053 * values of 0.95V and max vcore and use them to calculate the
3054 * actual values. Tap hole is a linear function of voltage.
3056 slope = ((vmax_thole - vmin_thole) * PRECISION_FOR_ESTIMATE) /
3057 (t2t_coeffs->vmax - t2t_coeffs->vmin);
3058 inpt = (vmax_thole * PRECISION_FOR_ESTIMATE -
3059 (slope * t2t_coeffs->vmax)) / PRECISION_FOR_ESTIMATE;
3060 est_values->vmax_thole = (slope * voltage_mv) /
3061 PRECISION_FOR_ESTIMATE + inpt;
3063 est_values->vmin_thole = vmin_thole;
3069 * Insert the calculated holes and get the final tap windows
3070 * with the boundaries and holes set.
3072 static int adjust_holes_in_tap_windows(struct sdhci_host *sdhci,
3073 struct tegra_tuning_data *tuning_data)
3075 struct tap_window_data *tap_data;
3076 struct tap_window_data *final_tap_data;
3077 struct tuning_values *calc_values = &tuning_data->calc_values;
3078 int tap_hole, size = 0;
3079 u8 i = 0, j = 0, num_of_wins, hole_pos = 0;
3081 tuning_data->final_tap_data =
3082 devm_kzalloc(mmc_dev(sdhci->mmc),
3083 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
3084 if (IS_ERR_OR_NULL(tuning_data->final_tap_data)) {
3085 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
3089 num_of_wins = tuning_data->num_of_valid_tap_wins;
3090 tap_hole = calc_values->vmax_thole;
3093 tap_data = &tuning_data->tap_data[i];
3094 final_tap_data = &tuning_data->final_tap_data[j];
3095 if (tap_hole < tap_data->win_start) {
3096 tap_hole += calc_values->ui;
3099 } else if (tap_hole > tap_data->win_end) {
3100 memcpy(final_tap_data, tap_data,
3101 sizeof(struct tap_window_data));
3106 } else if ((tap_hole >= tap_data->win_start) &&
3107 (tap_hole <= tap_data->win_end)) {
3108 size = tap_data->win_end - tap_data->win_start;
3111 &tuning_data->final_tap_data[j];
3112 if (tap_hole == tap_data->win_start) {
3113 final_tap_data->win_start =
3115 final_tap_data->win_start_attr =
3117 final_tap_data->hole_pos = hole_pos;
3118 tap_hole += calc_values->ui;
3121 final_tap_data->win_start =
3122 tap_data->win_start;
3123 final_tap_data->win_start_attr =
3124 WIN_EDGE_BOUN_START;
3126 if (tap_hole <= tap_data->win_end) {
3127 final_tap_data->win_end = tap_hole - 1;
3128 final_tap_data->win_end_attr =
3130 final_tap_data->hole_pos = hole_pos;
3131 tap_data->win_start = tap_hole;
3132 } else if (tap_hole > tap_data->win_end) {
3133 final_tap_data->win_end =
3135 final_tap_data->win_end_attr =
3137 tap_data->win_start =
3140 size = tap_data->win_end - tap_data->win_start;
3146 } while (num_of_wins > 0);
3148 /* Update the num of valid wins count after tap holes insertion */
3149 tuning_data->num_of_valid_tap_wins = j;
3151 pr_info("********tuning windows after inserting holes*****\n");
3152 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
3153 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
3154 final_tap_data = &tuning_data->final_tap_data[i];
3155 pr_info("win[%d]:%d(%d) - %d(%d)\n", i,
3156 final_tap_data->win_start,
3157 final_tap_data->win_start_attr,
3158 final_tap_data->win_end, final_tap_data->win_end_attr);
3160 pr_info("***********************************************\n");
3166 * Insert the boundaries from negative margin calculations into the windows
3169 static int insert_boundaries_in_tap_windows(struct sdhci_host *sdhci,
3170 struct tegra_tuning_data *tuning_data, u8 boun_end)
3172 struct tap_window_data *tap_data;
3173 struct tap_window_data *new_tap_data;
3174 struct tap_window_data *temp_tap_data;
3175 struct tuning_values *calc_values = &tuning_data->calc_values;
3177 u8 i = 0, j = 0, num_of_wins;
3178 bool get_next_boun = false;
3180 temp_tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
3181 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
3182 if (IS_ERR_OR_NULL(temp_tap_data)) {
3183 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
3187 num_of_wins = tuning_data->num_of_valid_tap_wins;
3188 curr_boun = boun_end % calc_values->ui;
3190 if (get_next_boun) {
3191 curr_boun += calc_values->ui;
3193 * If the boun_end exceeds the intial boundary end,
3194 * just copy remaining windows and return.
3196 if (curr_boun >= boun_end)
3197 curr_boun += MAX_TAP_VALUES;
3200 tap_data = &tuning_data->tap_data[i];
3201 new_tap_data = &temp_tap_data[j];
3202 if (curr_boun <= tap_data->win_start) {
3203 get_next_boun = true;
3205 } else if (curr_boun >= tap_data->win_end) {
3206 memcpy(new_tap_data, tap_data,
3207 sizeof(struct tap_window_data));
3211 get_next_boun = false;
3213 } else if ((curr_boun >= tap_data->win_start) &&
3214 (curr_boun <= tap_data->win_end)) {
3215 new_tap_data->win_start = tap_data->win_start;
3216 new_tap_data->win_start_attr =
3217 tap_data->win_start_attr;
3218 new_tap_data->win_end = curr_boun - 1;
3219 new_tap_data->win_end_attr =
3220 tap_data->win_end_attr;
3222 new_tap_data = &temp_tap_data[j];
3223 new_tap_data->win_start = curr_boun;
3224 new_tap_data->win_end = curr_boun;
3225 new_tap_data->win_start_attr =
3226 WIN_EDGE_BOUN_START;
3227 new_tap_data->win_end_attr =
3230 new_tap_data = &temp_tap_data[j];
3231 new_tap_data->win_start = curr_boun + 1;
3232 new_tap_data->win_start_attr = WIN_EDGE_BOUN_START;
3233 new_tap_data->win_end = tap_data->win_end;
3234 new_tap_data->win_end_attr =
3235 tap_data->win_end_attr;
3239 get_next_boun = true;
3241 } while (num_of_wins > 0);
3243 /* Update the num of valid wins count after tap holes insertion */
3244 tuning_data->num_of_valid_tap_wins = j;
3246 memcpy(tuning_data->tap_data, temp_tap_data,
3247 j * sizeof(struct tap_window_data));
3248 SDHCI_TEGRA_DBG("***tuning windows after inserting boundaries***\n");
3249 SDHCI_TEGRA_DBG("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
3250 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
3251 new_tap_data = &tuning_data->tap_data[i];
3252 SDHCI_TEGRA_DBG("win[%d]:%d(%d) - %d(%d)\n", i,
3253 new_tap_data->win_start,
3254 new_tap_data->win_start_attr,
3255 new_tap_data->win_end, new_tap_data->win_end_attr);
3257 SDHCI_TEGRA_DBG("***********************************************\n");
3263 * Scan for all tap values and get all passing tap windows.
3265 static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
3266 struct tegra_tuning_data *tuning_data)
3268 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3269 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3270 struct tap_window_data *tap_data;
3271 struct tuning_ui tuning_ui[10];
3272 int err = 0, partial_win_start = 0, temp_margin = 0, tap_value;
3273 unsigned int calc_ui = 0;
3274 u8 prev_boundary_end = 0, num_of_wins = 0;
3275 u8 num_of_uis = 0, valid_num_uis = 0;
3276 u8 ref_ui, first_valid_full_win = 0;
3277 u8 boun_end = 0, next_boun_end = 0;
3279 bool valid_ui_found = false;
3282 * Assume there are a max of 10 windows and allocate tap window
3283 * structures for the same. If there are more windows, the array
3284 * size can be adjusted later using realloc.
3286 tuning_data->tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
3287 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
3288 if (IS_ERR_OR_NULL(tuning_data->tap_data)) {
3289 dev_err(mmc_dev(sdhci->mmc), "No memory for tap data\n");
3293 spin_lock(&sdhci->lock);
3296 tap_data = &tuning_data->tap_data[num_of_wins];
3297 /* Get the window start */
3298 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true,
3300 if ((tap_value < 0) && (err == -ENOMEDIUM)) {
3301 spin_unlock(&sdhci->lock);
3304 tap_data->win_start = min_t(u8, tap_value, MAX_TAP_VALUES);
3306 if (tap_value >= MAX_TAP_VALUES) {
3307 /* If it's first iteration, then all taps failed */
3309 dev_err(mmc_dev(sdhci->mmc),
3310 "All tap values(0-255) failed\n");
3311 spin_unlock(&sdhci->lock);
3314 /* All windows obtained */
3319 /* Get the window end */
3320 tap_value = sdhci_tegra_scan_tap_values(sdhci,
3321 tap_value, false, &err);
3322 if ((tap_value < 0) && (err == -ENOMEDIUM)) {
3323 spin_unlock(&sdhci->lock);
3326 tap_data->win_end = min_t(u8, (tap_value - 1), MAX_TAP_VALUES);
3327 tap_data->win_size = tap_data->win_end - tap_data->win_start;
3331 * If the size of window is more than 4 taps wide, then it is a
3332 * valid window. If tap value 0 has passed, then a partial
3333 * window exists. Mark all the window edges as boundary edges.
3335 if (tap_data->win_size > 4) {
3336 if (tap_data->win_start == 0)
3337 tuning_data->is_partial_win_valid = true;
3338 tap_data->win_start_attr = WIN_EDGE_BOUN_START;
3339 tap_data->win_end_attr = WIN_EDGE_BOUN_END;
3341 /* Invalid window as size is less than 5 taps */
3342 SDHCI_TEGRA_DBG("Invalid tuning win (%d-%d) ignored\n",
3343 tap_data->win_start, tap_data->win_end);
3347 /* Ignore first and last partial UIs */
3348 if (tap_data->win_end_attr == WIN_EDGE_BOUN_END) {
3349 tuning_ui[num_of_uis].ui = tap_data->win_end -
3351 tuning_ui[num_of_uis].is_valid_ui = true;
3353 prev_boundary_end = tap_data->win_end;
3356 } while (tap_value < MAX_TAP_VALUES);
3357 spin_unlock(&sdhci->lock);
3359 tuning_data->num_of_valid_tap_wins = num_of_wins;
3360 valid_num_uis = num_of_uis;
3362 /* Print info of all tap windows */
3363 pr_info("**********Auto tuning windows*************\n");
3364 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
3365 for (j = 0; j < tuning_data->num_of_valid_tap_wins; j++) {
3366 tap_data = &tuning_data->tap_data[j];
3367 pr_info("win[%d]: %d(%d) - %d(%d)\n",
3368 j, tap_data->win_start, tap_data->win_start_attr,
3369 tap_data->win_end, tap_data->win_end_attr);
3371 pr_info("***************************************\n");
3373 /* Mark the first last partial UIs as invalid */
3374 tuning_ui[0].is_valid_ui = false;
3375 tuning_ui[num_of_uis - 1].is_valid_ui = false;
3378 /* Discredit all uis at either end with size less than 30% of est ui */
3379 ref_ui = (30 * tuning_data->est_values.ui) / 100;
3380 for (j = 0; j < num_of_uis; j++) {
3381 if (tuning_ui[j].is_valid_ui) {
3382 tuning_ui[j].is_valid_ui = false;
3385 if (tuning_ui[j].ui > ref_ui)
3389 for (j = num_of_uis; j > 0; j--) {
3390 if (tuning_ui[j - 1].ui < ref_ui) {
3391 if (tuning_ui[j - 1].is_valid_ui) {
3392 tuning_ui[j - 1].is_valid_ui = false;
3399 /* Calculate 0.75*est_UI */
3400 ref_ui = (75 * tuning_data->est_values.ui) / 100;
3403 * Check for valid UIs and discredit invalid UIs. A UI is considered
3404 * valid if it's greater than (0.75*est_UI). If an invalid UI is found,
3405 * also discredit the smaller of the two adjacent windows.
3407 for (j = 1; j < (num_of_uis - 1); j++) {
3408 if (tuning_ui[j].ui > ref_ui && tuning_ui[j].is_valid_ui) {
3409 tuning_ui[j].is_valid_ui = true;
3411 if (tuning_ui[j].is_valid_ui) {
3412 tuning_ui[j].is_valid_ui = false;
3415 if (!tuning_ui[j + 1].is_valid_ui ||
3416 !tuning_ui[j - 1].is_valid_ui) {
3417 if (tuning_ui[j - 1].is_valid_ui) {
3418 tuning_ui[j - 1].is_valid_ui = false;
3420 } else if (tuning_ui[j + 1].is_valid_ui) {
3421 tuning_ui[j + 1].is_valid_ui = false;
3426 if (tuning_ui[j - 1].ui > tuning_ui[j + 1].ui)
3427 tuning_ui[j + 1].is_valid_ui = false;
3429 tuning_ui[j - 1].is_valid_ui = false;
3435 /* Calculate the cumulative UI if there are valid UIs left */
3436 if (valid_num_uis) {
3437 for (j = 0; j < num_of_uis; j++)
3438 if (tuning_ui[j].is_valid_ui) {
3439 calc_ui += tuning_ui[j].ui;
3440 if (!first_valid_full_win)
3441 first_valid_full_win = j;
3446 tuning_data->calc_values.ui = (calc_ui / valid_num_uis);
3447 valid_ui_found = true;
3449 tuning_data->calc_values.ui = tuning_data->est_values.ui;
3450 valid_ui_found = false;
3453 SDHCI_TEGRA_DBG("****Tuning UIs***********\n");
3454 for (j = 0; j < num_of_uis; j++)
3455 SDHCI_TEGRA_DBG("Tuning UI[%d] : %d, Is valid[%d]\n",
3456 j, tuning_ui[j].ui, tuning_ui[j].is_valid_ui);
3457 SDHCI_TEGRA_DBG("*************************\n");
3459 /* Get the calculated tuning values */
3460 err = calculate_actual_tuning_values(tegra_host->speedo, tuning_data,
3461 tegra_host->boot_vcore_mv);
3464 * Calculate negative margin if partial win is valid. There are two
3466 * Case 1: If Avg_UI is found, then keep subtracting avg_ui from start
3467 * of first valid full window until a value <=0 is obtained.
3468 * Case 2: If Avg_UI is not found, subtract avg_ui from all boundary
3469 * starts until a value <=0 is found.
3471 if (tuning_data->is_partial_win_valid && (num_of_wins > 1)) {
3472 if (valid_ui_found) {
3474 tuning_data->tap_data[first_valid_full_win].win_start;
3475 boun_end = partial_win_start;
3476 partial_win_start %= tuning_data->calc_values.ui;
3477 partial_win_start -= tuning_data->calc_values.ui;
3479 for (j = 0; j < NEG_MAR_CHK_WIN_COUNT; j++) {
3481 tuning_data->tap_data[j + 1].win_start;
3483 boun_end = temp_margin;
3484 else if (!next_boun_end)
3485 next_boun_end = temp_margin;
3486 temp_margin %= tuning_data->calc_values.ui;
3487 temp_margin -= tuning_data->calc_values.ui;
3488 if (!partial_win_start ||
3489 (temp_margin > partial_win_start))
3490 partial_win_start = temp_margin;
3493 if (partial_win_start <= 0)
3494 tuning_data->tap_data[0].win_start = partial_win_start;
3498 insert_boundaries_in_tap_windows(sdhci, tuning_data, boun_end);
3500 insert_boundaries_in_tap_windows(sdhci, tuning_data, next_boun_end);
3502 /* Insert calculated holes into the windows */
3503 err = adjust_holes_in_tap_windows(sdhci, tuning_data);
3508 static void sdhci_tegra_dump_tuning_constraints(struct sdhci_host *sdhci)
3510 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3511 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3512 struct tegra_tuning_data *tuning_data;
3515 SDHCI_TEGRA_DBG("%s: Num of tuning frequencies%d\n",
3516 mmc_hostname(sdhci->mmc), tegra_host->tuning_freq_count);
3517 for (i = 0; i < tegra_host->tuning_freq_count; ++i) {
3518 tuning_data = &tegra_host->tuning_data[i];
3519 SDHCI_TEGRA_DBG("%s: Tuning freq[%d]: %d, freq band %d\n",
3520 mmc_hostname(sdhci->mmc), i,
3521 tuning_data->freq_hz, tuning_data->freq_band);
3525 static unsigned int get_tuning_voltage(struct sdhci_tegra *tegra_host, u8 *mask)
3532 case NOMINAL_VCORE_TUN:
3533 return tegra_host->nominal_vcore_mv;
3534 case BOOT_VCORE_TUN:
3535 return tegra_host->boot_vcore_mv;
3536 case MIN_OVERRIDE_VCORE_TUN:
3537 return tegra_host->min_vcore_override_mv;
3540 return tegra_host->boot_vcore_mv;
3543 static u8 sdhci_tegra_get_freq_point(struct sdhci_host *sdhci)
3545 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3546 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3547 const unsigned int *freq_list;
3551 curr_clock = sdhci->max_clk;
3552 freq_list = tegra_host->soc_data->tuning_freq_list;
3554 for (i = 0; i < TUNING_FREQ_COUNT; ++i)
3555 if (curr_clock <= freq_list[i])
3558 return TUNING_MAX_FREQ;
3561 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
3562 int t2t_tuning_value)
3564 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3565 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3566 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3567 struct tuning_tap_hole_margins *tap_hole;
3572 if (soc_data->nvquirks & NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS) {
3573 if (soc_data->tap_hole_margins) {
3574 tap_hole = soc_data->tap_hole_margins;
3575 dev_id = dev_name(mmc_dev(sdhci->mmc));
3576 for (i = 0; i < soc_data->tap_hole_margins_count; i++) {
3577 if (!strcmp(dev_id, tap_hole->dev_id))
3578 return tap_hole->tap_hole_margin;
3582 dev_info(mmc_dev(sdhci->mmc),
3583 "Fixed tap hole margins missing\n");
3587 /* if no margin are available calculate tap margin */
3588 tap_margin = (((2 * (450 / t2t_tuning_value)) +
3595 * The frequency tuning algorithm tries to calculate the tap-to-tap delay
3596 * UI and estimate holes using equations and predetermined coefficients from
3597 * the characterization data. The algorithm will not work without this data.
3599 static int find_tuning_coeffs_data(struct sdhci_host *sdhci,
3600 bool force_retuning)
3602 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3603 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3604 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3605 struct tegra_tuning_data *tuning_data;
3606 struct tuning_t2t_coeffs *t2t_coeffs;
3607 struct tap_hole_coeffs *thole_coeffs;
3609 unsigned int freq_khz;
3611 bool coeffs_set = false;
3613 dev_id = dev_name(mmc_dev(sdhci->mmc));
3614 /* Find the coeffs data for all supported frequencies */
3615 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3616 tuning_data = &tegra_host->tuning_data[i];
3618 /* Skip if T2T coeffs are already found */
3619 if (tuning_data->t2t_coeffs == NULL || force_retuning) {
3620 t2t_coeffs = soc_data->t2t_coeffs;
3621 for (j = 0; j < soc_data->t2t_coeffs_count; j++) {
3622 if (!strcmp(dev_id, t2t_coeffs->dev_id)) {
3623 tuning_data->t2t_coeffs = t2t_coeffs;
3625 dev_info(mmc_dev(sdhci->mmc),
3626 "Found T2T coeffs data\n");
3632 dev_err(mmc_dev(sdhci->mmc),
3633 "T2T coeffs data missing\n");
3634 tuning_data->t2t_coeffs = NULL;
3640 /* Skip if tap hole coeffs are already found */
3641 if (tuning_data->thole_coeffs == NULL || force_retuning) {
3642 thole_coeffs = soc_data->tap_hole_coeffs;
3643 freq_khz = tuning_data->freq_hz / 1000;
3644 for (j = 0; j < soc_data->tap_hole_coeffs_count; j++) {
3645 if (!strcmp(dev_id, thole_coeffs->dev_id) &&
3646 (freq_khz == thole_coeffs->freq_khz)) {
3647 tuning_data->thole_coeffs =
3650 dev_info(mmc_dev(sdhci->mmc),
3651 "%dMHz tap hole coeffs found\n",
3659 dev_err(mmc_dev(sdhci->mmc),
3660 "%dMHz Tap hole coeffs data missing\n",
3662 tuning_data->thole_coeffs = NULL;
3672 * Determines the numbers of frequencies required and then fills up the tuning
3673 * constraints for each of the frequencies. The data of lower frequency is
3674 * filled first and then the higher frequency data. Max supported frequencies
3677 static int setup_freq_constraints(struct sdhci_host *sdhci,
3678 const unsigned int *freq_list)
3680 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3681 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3682 struct tegra_tuning_data *tuning_data;
3686 if ((sdhci->mmc->ios.timing != MMC_TIMING_UHS_SDR50) &&
3687 (sdhci->mmc->caps2 & MMC_CAP2_FREQ_SCALING))
3688 freq_count = DFS_FREQ_COUNT;
3692 freq_band = sdhci_tegra_get_freq_point(sdhci);
3693 /* Fill up the req frequencies */
3694 switch (freq_count) {
3696 tuning_data = &tegra_host->tuning_data[0];
3697 tuning_data->freq_hz = sdhci->max_clk;
3698 tuning_data->freq_band = freq_band;
3699 tuning_data->constraints.vcore_mask =
3700 tuning_vcore_constraints[freq_band].vcore_mask;
3701 tuning_data->nr_voltages =
3702 hweight32(tuning_data->constraints.vcore_mask);
3705 tuning_data = &tegra_host->tuning_data[1];
3706 tuning_data->freq_hz = sdhci->max_clk;
3707 tuning_data->freq_band = freq_band;
3708 tuning_data->constraints.vcore_mask =
3709 tuning_vcore_constraints[freq_band].vcore_mask;
3710 tuning_data->nr_voltages =
3711 hweight32(tuning_data->constraints.vcore_mask);
3713 tuning_data = &tegra_host->tuning_data[0];
3714 for (i = (freq_band - 1); i >= 0; i--) {
3717 tuning_data->freq_hz = freq_list[i];
3718 tuning_data->freq_band = i;
3719 tuning_data->nr_voltages = 1;
3720 tuning_data->constraints.vcore_mask =
3721 tuning_vcore_constraints[i].vcore_mask;
3722 tuning_data->nr_voltages =
3723 hweight32(tuning_data->constraints.vcore_mask);
3727 dev_err(mmc_dev(sdhci->mmc), "Unsupported freq count\n");
3735 * Get the supported frequencies and other tuning related constraints for each
3736 * frequency. The supported frequencies should be determined from the list of
3737 * frequencies in the soc data and also consider the platform clock limits as
3738 * well as any DFS related restrictions.
3740 static int sdhci_tegra_get_tuning_constraints(struct sdhci_host *sdhci,
3741 bool force_retuning)
3743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3744 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3745 const unsigned int *freq_list;
3748 /* A valid freq count means freq constraints are already set up */
3749 if (!tegra_host->tuning_freq_count || force_retuning) {
3750 freq_list = tegra_host->soc_data->tuning_freq_list;
3751 tegra_host->tuning_freq_count =
3752 setup_freq_constraints(sdhci, freq_list);
3753 if (tegra_host->tuning_freq_count < 0) {
3754 dev_err(mmc_dev(sdhci->mmc),
3755 "Invalid tuning freq count\n");
3760 err = find_tuning_coeffs_data(sdhci, force_retuning);
3764 sdhci_tegra_dump_tuning_constraints(sdhci);
3770 * During boot, only boot voltage for vcore can be set. Check if the current
3771 * voltage is allowed to be used. Nominal and min override voltages can be
3772 * set once boot is done. This will be notified through late subsys init call.
3774 static int sdhci_tegra_set_tuning_voltage(struct sdhci_host *sdhci,
3775 unsigned int voltage)
3777 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3778 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3780 bool nom_emc_freq_set = false;
3782 if (voltage && (voltage != tegra_host->boot_vcore_mv)) {
3783 SDHCI_TEGRA_DBG("%s: Override vcore %dmv not allowed\n",
3784 mmc_hostname(sdhci->mmc), voltage);
3788 SDHCI_TEGRA_DBG("%s: Setting vcore override %d\n",
3789 mmc_hostname(sdhci->mmc), voltage);
3791 * First clear any previous dvfs override settings. If dvfs overrides
3792 * are disabled, then print the error message but continue execution
3793 * rather than failing tuning altogether.
3795 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, 0);
3796 if ((err == -EPERM) || (err == -ENOSYS)) {
3798 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3799 * when DVFS override is not enabled. Continue tuning
3800 * with default core voltage
3802 SDHCI_TEGRA_DBG("dvfs overrides disabled. Nothing to clear\n");
3808 /* EMC clock freq boost might be required for nominal core voltage */
3809 if ((voltage == tegra_host->nominal_vcore_mv) &&
3810 tegra_host->plat->en_nominal_vcore_tuning &&
3811 tegra_host->emc_clk) {
3812 err = clk_set_rate(tegra_host->emc_clk,
3813 SDMMC_EMC_NOM_VOLT_FREQ);
3815 dev_err(mmc_dev(sdhci->mmc),
3816 "Failed to set emc nom clk freq %d\n", err);
3818 nom_emc_freq_set = true;
3822 * If dvfs overrides are disabled, then print the error message but
3823 * continue tuning execution rather than failing tuning altogether.
3825 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, voltage);
3826 if ((err == -EPERM) || (err == -ENOSYS)) {
3828 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3829 * when DVFS override is not enabled. Continue tuning
3830 * with default core voltage
3832 SDHCI_TEGRA_DBG("dvfs overrides disabled. No overrides set\n");
3835 dev_err(mmc_dev(sdhci->mmc),
3836 "failed to set vcore override %dmv\n", voltage);
3838 /* Revert emc clock to normal freq */
3839 if (nom_emc_freq_set) {
3840 err = clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
3842 dev_err(mmc_dev(sdhci->mmc),
3843 "Failed to revert emc nom clk freq %d\n", err);
3849 static int sdhci_tegra_run_tuning(struct sdhci_host *sdhci,
3850 struct tegra_tuning_data *tuning_data)
3852 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3853 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3856 u8 i, vcore_mask = 0;
3858 vcore_mask = tuning_data->constraints.vcore_mask;
3859 for (i = 0; i < tuning_data->nr_voltages; i++) {
3860 voltage = get_tuning_voltage(tegra_host, &vcore_mask);
3861 err = sdhci_tegra_set_tuning_voltage(sdhci, voltage);
3863 dev_err(mmc_dev(sdhci->mmc),
3864 "Unable to set override voltage.\n");
3868 /* Get the tuning window info */
3869 SDHCI_TEGRA_DBG("Getting tuning windows...\n");
3870 err = sdhci_tegra_get_tap_window_data(sdhci, tuning_data);
3872 dev_err(mmc_dev(sdhci->mmc),
3873 "Failed to get tap win %d\n", err);
3876 SDHCI_TEGRA_DBG("%s: %d tuning window data obtained\n",
3877 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3882 static int sdhci_tegra_verify_best_tap(struct sdhci_host *sdhci)
3884 struct tegra_tuning_data *tuning_data;
3887 tuning_data = sdhci_tegra_get_tuning_data(sdhci, sdhci->max_clk);
3888 if ((tuning_data->best_tap_value < 0) ||
3889 (tuning_data->best_tap_value > MAX_TAP_VALUES)) {
3890 dev_err(mmc_dev(sdhci->mmc),
3891 "Trying to verify invalid best tap value\n");
3894 dev_info(mmc_dev(sdhci->mmc),
3895 "%s: tuning freq %dhz, best tap %d\n",
3896 __func__, tuning_data->freq_hz,
3897 tuning_data->best_tap_value);
3900 /* Set the best tap value */
3901 sdhci_tegra_set_tap_delay(sdhci, tuning_data->best_tap_value);
3903 /* Run tuning after setting the best tap value */
3904 err = sdhci_tegra_issue_tuning_cmd(sdhci);
3906 dev_err(mmc_dev(sdhci->mmc),
3907 "%dMHz best tap value verification failed %d\n",
3908 tuning_data->freq_hz, err);
3912 static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
3914 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3915 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3916 struct tegra_tuning_data *tuning_data;
3921 u8 i, set_retuning = 0;
3922 bool force_retuning = false;
3924 /* Tuning is valid only in SDR104 and SDR50 modes */
3925 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
3926 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
3927 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
3928 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
3931 /* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
3932 if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
3933 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
3934 else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
3935 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
3939 SDHCI_TEGRA_DBG("%s: Starting freq tuning\n", mmc_hostname(sdhci->mmc));
3940 if (tegra_host->plat->enb_ext_loopback) {
3941 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3943 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3944 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3946 if (tegra_host->plat->enb_feedback_clock) {
3947 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
3949 SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
3950 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
3953 mutex_lock(&tuning_mutex);
3955 /* Set the tuning command to be used */
3956 tegra_host->tuning_opcode = opcode;
3959 * Disable all interrupts signalling.Enable interrupt status
3960 * detection for buffer read ready and data crc. We use
3961 * polling for tuning as it involves less overhead.
3963 sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
3964 sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
3965 SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
3968 * If tuning is already done and retune request is not set, then skip
3969 * best tap value calculation and use the old best tap value. If the
3970 * previous best tap value verification failed, force retuning.
3972 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
3973 err = sdhci_tegra_verify_best_tap(sdhci);
3975 dev_err(mmc_dev(sdhci->mmc),
3976 "Prev best tap failed. Re-running tuning\n");
3977 force_retuning = true;
3983 if (tegra_host->force_retune == true) {
3984 force_retuning = true;
3985 tegra_host->force_retune = false;
3988 tegra_host->tuning_status = 0;
3989 err = sdhci_tegra_get_tuning_constraints(sdhci, force_retuning);
3991 dev_err(mmc_dev(sdhci->mmc),
3992 "Failed to get tuning constraints\n");
3996 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3997 tuning_data = &tegra_host->tuning_data[i];
3998 if (tuning_data->tuning_done && !force_retuning)
4001 /* set clock freq also needed for MMC_RTPM */
4002 SDHCI_TEGRA_DBG("%s: Setting tuning freq%d\n",
4003 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
4004 tegra_sdhci_set_clock(sdhci, tuning_data->freq_hz);
4006 SDHCI_TEGRA_DBG("%s: Calculating estimated tuning values\n",
4007 mmc_hostname(sdhci->mmc));
4008 err = calculate_estimated_tuning_values(tegra_host->speedo,
4009 tuning_data, tegra_host->boot_vcore_mv);
4013 SDHCI_TEGRA_DBG("Running tuning...\n");
4014 err = sdhci_tegra_run_tuning(sdhci, tuning_data);
4018 SDHCI_TEGRA_DBG("calculating best tap value\n");
4019 err = sdhci_tegra_calculate_best_tap(sdhci, tuning_data);
4023 err = sdhci_tegra_verify_best_tap(sdhci);
4024 if (!err && !set_retuning) {
4025 tuning_data->tuning_done = true;
4026 tegra_host->tuning_status |= TUNING_STATUS_DONE;
4028 tegra_host->tuning_status |= TUNING_STATUS_RETUNE;
4032 /* Release any override core voltages set */
4033 sdhci_tegra_set_tuning_voltage(sdhci, 0);
4035 /* Enable interrupts. Enable full range for core voltage */
4036 sdhci_writel(sdhci, sdhci->ier, SDHCI_INT_ENABLE);
4037 sdhci_writel(sdhci, sdhci->ier, SDHCI_SIGNAL_ENABLE);
4038 mutex_unlock(&tuning_mutex);
4040 SDHCI_TEGRA_DBG("%s: Freq tuning done\n", mmc_hostname(sdhci->mmc));
4041 if (tegra_host->plat->enb_ext_loopback) {
4042 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
4044 /* Tuning is failed and card will try to enumerate in
4045 * Legacy High Speed mode. So, Enable External Loopback
4049 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
4052 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
4054 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
4057 if (tegra_host->plat->enb_feedback_clock) {
4058 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
4059 if (err) /* Tuning is failed disable feedback clock */
4061 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
4064 SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
4065 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
4071 static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
4073 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4074 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4076 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4077 const struct tegra_sdhci_platform_data *plat = pdev->dev.platform_data;
4078 unsigned int cd_irq;
4080 if (sdhci->is_clk_on) {
4081 pr_debug("%s suspend force clk off\n",
4082 mmc_hostname(sdhci->mmc));
4083 tegra_sdhci_set_clock(sdhci, 0);
4086 /* Disable the power rails if any */
4087 if (tegra_host->card_present) {
4089 /* Configure sdmmc pins to GPIO mode if needed */
4090 if (plat && plat->pin_count > 0)
4091 gpio_request_array(plat->gpios,
4092 ARRAY_SIZE(plat->gpios));
4094 err = tegra_sdhci_configure_regulators(tegra_host,
4095 CONFIG_REG_DIS, 0, 0);
4097 dev_err(mmc_dev(sdhci->mmc),
4098 "Regulators disable in suspend failed %d\n", err);
4099 sdhci->is_calibration_done = false;
4101 if (plat && gpio_is_valid(plat->cd_gpio)) {
4102 if (!plat->cd_wakeup_incapable) {
4103 /* Enable wake irq at end of suspend */
4104 cd_irq = gpio_to_irq(plat->cd_gpio);
4105 err = enable_irq_wake(cd_irq);
4107 dev_err(mmc_dev(sdhci->mmc),
4108 "SD card wake-up event registration for irq=%d failed with error: %d\n",
4113 if (plat && plat->pwrdet_support && tegra_host->sdmmc_padctrl) {
4114 err = padctrl_set_voltage(tegra_host->sdmmc_padctrl,
4115 SDHOST_HIGH_VOLT_3V3);
4117 dev_err(mmc_dev(sdhci->mmc),
4118 "padcontrol set volt failed: %d\n", err);
4121 if (plat && plat->pin_count > 0)
4122 gpio_free_array(plat->gpios, ARRAY_SIZE(plat->gpios));
4124 pr_err("%s %s line=%d - null plat\n",
4125 mmc_hostname(sdhci->mmc), __func__, __LINE__);
4128 sdhci->detect_resume = 1;
4132 static int tegra_sdhci_resume(struct sdhci_host *sdhci)
4134 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4135 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4136 struct platform_device *pdev;
4137 struct tegra_sdhci_platform_data *plat;
4138 unsigned int signal_voltage = 0;
4140 unsigned int cd_irq;
4142 pdev = to_platform_device(mmc_dev(sdhci->mmc));
4143 plat = pdev->dev.platform_data;
4145 if (plat && gpio_is_valid(plat->cd_gpio)) {
4146 /* disable wake capability at start of resume */
4147 if (!plat->cd_wakeup_incapable) {
4148 cd_irq = gpio_to_irq(plat->cd_gpio);
4149 disable_irq_wake(cd_irq);
4151 tegra_host->card_present =
4152 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
4155 /* Setting the min identification clock of freq 400KHz */
4156 if (!sdhci->is_clk_on) {
4157 pr_debug("%s: resume force clk ON\n",
4158 mmc_hostname(sdhci->mmc));
4159 tegra_sdhci_set_clock(sdhci, 400000);
4162 /* Enable the power rails if any */
4163 if (tegra_host->card_present) {
4164 err = tegra_sdhci_configure_regulators(tegra_host,
4165 CONFIG_REG_EN, 0, 0);
4167 dev_err(mmc_dev(sdhci->mmc),
4168 "Regulators enable in resume failed %d\n", err);
4171 if (tegra_host->vdd_io_reg) {
4172 if (plat && (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK))
4173 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
4175 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
4176 tegra_sdhci_signal_voltage_switch(sdhci,
4181 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
4182 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
4183 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
4184 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
4187 tegra_sdhci_do_calibration(sdhci, signal_voltage);
4190 sdhci->detect_resume = 0;
4194 static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
4196 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4197 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4198 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4199 struct tegra_sdhci_platform_data *plat;
4200 bool dll_calib_req = false;
4201 bool is_sdhci_clk_turned_on = false;
4203 plat = pdev->dev.platform_data;
4204 dll_calib_req = (sdhci->mmc->card &&
4205 (sdhci->mmc->card->type == MMC_TYPE_MMC) &&
4206 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS400));
4207 if (dll_calib_req) {
4208 if (!sdhci->is_clk_on) {
4209 if (sdhci->mmc->ios.clock) {
4210 sdhci->mmc->ops->set_ios(sdhci->mmc,
4212 is_sdhci_clk_turned_on = true;
4215 tegra_sdhci_post_init(sdhci);
4216 if (is_sdhci_clk_turned_on)
4217 tegra_sdhci_set_clock(sdhci, 0);
4220 /* Turn OFF the clocks if the device is not present */
4221 if ((!tegra_host->card_present || !sdhci->mmc->card) &&
4222 tegra_host->clk_enabled &&
4223 (IS_RTPM_DELAY_CG(plat->rtpm_type)))
4224 tegra_sdhci_set_clock(sdhci, 0);
4228 * For tegra specific tuning, core voltage has to be fixed at different
4229 * voltages to get the tap values. Fixing the core voltage during tuning for one
4230 * device might affect transfers of other SDMMC devices. Check if tuning mutex
4231 * is locked before starting a data transfer. The new tuning procedure might
4232 * take at max 1.5s for completion for a single run. Taking DFS into count,
4233 * setting the max timeout for tuning mutex check a 3 secs. Since tuning is
4234 * run only during boot or the first time device is inserted, there wouldn't
4235 * be any delays in cmd/xfer execution once devices enumeration is done.
4237 static void tegra_sdhci_get_bus(struct sdhci_host *sdhci)
4239 unsigned int timeout = 300;
4241 while (mutex_is_locked(&tuning_mutex)) {
4245 dev_err(mmc_dev(sdhci->mmc),
4246 "Tuning mutex locked for long time\n");
4253 * The host/device can be powered off before the retuning request is handled in
4254 * case of SDIDO being off if Wifi is turned off, sd card removal etc. In such
4255 * cases, cancel the pending tuning timer and remove any core voltage
4256 * constraints that are set earlier.
4258 static void tegra_sdhci_power_off(struct sdhci_host *sdhci, u8 power_mode)
4260 int retuning_req_set = 0;
4262 retuning_req_set = (timer_pending(&sdhci->tuning_timer) ||
4263 (sdhci->flags & SDHCI_NEEDS_RETUNING));
4265 if (retuning_req_set) {
4266 del_timer_sync(&sdhci->tuning_timer);
4268 if (boot_volt_req_refcount)
4269 --boot_volt_req_refcount;
4271 if (!boot_volt_req_refcount) {
4272 sdhci_tegra_set_tuning_voltage(sdhci, 0);
4273 SDHCI_TEGRA_DBG("%s: Release override as host is off\n",
4274 mmc_hostname(sdhci->mmc));
4279 static int show_polling_period(void *data, u64 *value)
4281 struct sdhci_host *host = (struct sdhci_host *)data;
4283 if (host->mmc->dev_stats != NULL)
4284 *value = host->mmc->dev_stats->polling_interval;
4289 static int set_polling_period(void *data, u64 value)
4291 struct sdhci_host *host = (struct sdhci_host *)data;
4293 if (host->mmc->dev_stats != NULL) {
4294 /* Limiting the maximum polling period to 1 sec */
4297 host->mmc->dev_stats->polling_interval = value;
4302 static int show_active_load_high_threshold(void *data, u64 *value)
4304 struct sdhci_host *host = (struct sdhci_host *)data;
4305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4306 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4307 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
4309 if (gov_data != NULL)
4310 *value = gov_data->act_load_high_threshold;
4315 static int set_active_load_high_threshold(void *data, u64 value)
4317 struct sdhci_host *host = (struct sdhci_host *)data;
4318 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4319 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4320 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
4322 if (gov_data != NULL) {
4323 /* Maximum threshold load percentage is 100.*/
4326 gov_data->act_load_high_threshold = value;
4332 static int show_disableclkgating_value(void *data, u64 *value)
4334 struct sdhci_host *host;
4335 struct sdhci_pltfm_host *pltfm_host;
4336 struct sdhci_tegra *tegra_host;
4338 host = (struct sdhci_host *)data;
4340 pltfm_host = sdhci_priv(host);
4341 if (pltfm_host != NULL) {
4342 tegra_host = pltfm_host->priv;
4343 if (tegra_host != NULL)
4344 *value = tegra_host->dbg_cfg.clk_ungated;
4350 static int set_disableclkgating_value(void *data, u64 value)
4352 struct sdhci_host *host;
4353 struct platform_device *pdev;
4354 struct tegra_sdhci_platform_data *plat;
4355 struct sdhci_pltfm_host *pltfm_host;
4356 struct sdhci_tegra *tegra_host;
4358 host = (struct sdhci_host *)data;
4360 pdev = to_platform_device(mmc_dev(host->mmc));
4361 plat = pdev->dev.platform_data;
4362 pltfm_host = sdhci_priv(host);
4363 if (pltfm_host != NULL) {
4364 tegra_host = pltfm_host->priv;
4365 /* Set the CAPS2 register to reflect
4366 * the clk gating value
4368 if (tegra_host != NULL) {
4370 host->mmc->ops->set_ios(host->mmc,
4372 tegra_host->dbg_cfg.clk_ungated = true;
4373 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4375 ~MMC_CAP2_CLOCK_GATING;
4377 tegra_host->dbg_cfg.clk_ungated = false;
4378 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4380 MMC_CAP2_CLOCK_GATING;
4388 static int set_trim_override_value(void *data, u64 value)
4390 struct sdhci_host *host;
4391 struct sdhci_pltfm_host *pltfm_host;
4392 struct sdhci_tegra *tegra_host;
4394 host = (struct sdhci_host *)data;
4396 pltfm_host = sdhci_priv(host);
4397 if (pltfm_host != NULL) {
4398 tegra_host = pltfm_host->priv;
4399 if (tegra_host != NULL) {
4400 /* Make sure clock gating is disabled */
4401 if ((tegra_host->dbg_cfg.clk_ungated) &&
4402 (tegra_host->clk_enabled)) {
4403 sdhci_tegra_set_trim_delay(host, value);
4404 tegra_host->dbg_cfg.trim_val =
4407 pr_info("%s: Disable clock gating before setting value\n",
4408 mmc_hostname(host->mmc));
4416 static int show_trim_override_value(void *data, u64 *value)
4418 struct sdhci_host *host;
4419 struct sdhci_pltfm_host *pltfm_host;
4420 struct sdhci_tegra *tegra_host;
4422 host = (struct sdhci_host *)data;
4424 pltfm_host = sdhci_priv(host);
4425 if (pltfm_host != NULL) {
4426 tegra_host = pltfm_host->priv;
4427 if (tegra_host != NULL)
4428 *value = tegra_host->dbg_cfg.trim_val;
4434 static int show_tap_override_value(void *data, u64 *value)
4436 struct sdhci_host *host;
4437 struct sdhci_pltfm_host *pltfm_host;
4438 struct sdhci_tegra *tegra_host;
4440 host = (struct sdhci_host *)data;
4442 pltfm_host = sdhci_priv(host);
4443 if (pltfm_host != NULL) {
4444 tegra_host = pltfm_host->priv;
4445 if (tegra_host != NULL)
4446 *value = tegra_host->dbg_cfg.tap_val;
4452 static int set_tap_override_value(void *data, u64 value)
4454 struct sdhci_host *host;
4455 struct sdhci_pltfm_host *pltfm_host;
4456 struct sdhci_tegra *tegra_host;
4458 host = (struct sdhci_host *)data;
4460 pltfm_host = sdhci_priv(host);
4461 if (pltfm_host != NULL) {
4462 tegra_host = pltfm_host->priv;
4463 if (tegra_host != NULL) {
4464 /* Make sure clock gating is disabled */
4465 if ((tegra_host->dbg_cfg.clk_ungated) &&
4466 (tegra_host->clk_enabled)) {
4467 sdhci_tegra_set_tap_delay(host, value);
4468 tegra_host->dbg_cfg.tap_val = value;
4470 pr_info("%s: Disable clock gating before setting value\n",
4471 mmc_hostname(host->mmc));
4478 DEFINE_SIMPLE_ATTRIBUTE(sdhci_polling_period_fops, show_polling_period,
4479 set_polling_period, "%llu\n");
4480 DEFINE_SIMPLE_ATTRIBUTE(sdhci_active_load_high_threshold_fops,
4481 show_active_load_high_threshold,
4482 set_active_load_high_threshold, "%llu\n");
4483 DEFINE_SIMPLE_ATTRIBUTE(sdhci_disable_clkgating_fops,
4484 show_disableclkgating_value,
4485 set_disableclkgating_value, "%llu\n");
4486 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_trim_data_fops,
4487 show_trim_override_value,
4488 set_trim_override_value, "%llu\n");
4489 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_tap_data_fops,
4490 show_tap_override_value,
4491 set_tap_override_value, "%llu\n");
4493 static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
4495 struct dentry *root = host->debugfs_root;
4496 struct dentry *dfs_root;
4497 unsigned saved_line;
4500 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
4501 if (IS_ERR_OR_NULL(root)) {
4502 saved_line = __LINE__;
4505 host->debugfs_root = root;
4508 dfs_root = debugfs_create_dir("dfs_stats_dir", root);
4509 if (IS_ERR_OR_NULL(dfs_root)) {
4510 saved_line = __LINE__;
4514 if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
4515 &sdhci_host_fops)) {
4516 saved_line = __LINE__;
4519 if (!debugfs_create_file("dfs_stats", S_IRUSR, dfs_root, host,
4520 &sdhci_host_dfs_fops)) {
4521 saved_line = __LINE__;
4524 if (!debugfs_create_file("polling_period", 0644, dfs_root, (void *)host,
4525 &sdhci_polling_period_fops)) {
4526 saved_line = __LINE__;
4529 if (!debugfs_create_file("active_load_high_threshold", 0644,
4530 dfs_root, (void *)host,
4531 &sdhci_active_load_high_threshold_fops)) {
4532 saved_line = __LINE__;
4536 dfs_root = debugfs_create_dir("override_data", root);
4537 if (IS_ERR_OR_NULL(dfs_root)) {
4538 saved_line = __LINE__;
4542 if (!debugfs_create_file("clk_gate_disabled", 0644,
4543 dfs_root, (void *)host,
4544 &sdhci_disable_clkgating_fops)) {
4545 saved_line = __LINE__;
4549 if (!debugfs_create_file("tap_value", 0644,
4550 dfs_root, (void *)host,
4551 &sdhci_override_tap_data_fops)) {
4552 saved_line = __LINE__;
4556 if (!debugfs_create_file("trim_value", 0644,
4557 dfs_root, (void *)host,
4558 &sdhci_override_trim_data_fops)) {
4559 saved_line = __LINE__;
4562 if (IS_QUIRKS2_DELAYED_CLK_GATE(host)) {
4563 host->clk_gate_tmout_ticks = -1;
4564 if (!debugfs_create_u32("clk_gate_tmout_ticks",
4566 root, (u32 *)&host->clk_gate_tmout_ticks)) {
4567 saved_line = __LINE__;
4575 debugfs_remove_recursive(root);
4576 host->debugfs_root = NULL;
4578 pr_err("%s %s: Failed to initialize debugfs functionality at line=%d\n", __func__,
4579 mmc_hostname(host->mmc), saved_line);
4584 * Simulate the card remove and insert
4585 * set req to true to insert the card
4586 * set req to false to remove the card
4588 static int sdhci_tegra_carddetect(struct sdhci_host *sdhost, bool req)
4590 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
4591 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4592 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
4593 struct tegra_sdhci_platform_data *plat;
4596 plat = pdev->dev.platform_data;
4598 *check if card is inserted physically before performing
4599 *virtual remove or insertion
4601 if (gpio_is_valid(plat->cd_gpio) &&
4602 (gpio_get_value_cansleep(plat->cd_gpio) != 0)) {
4604 dev_err(mmc_dev(sdhost->mmc),
4605 "Card not inserted in slot\n");
4609 /* Ignore the request if card already in requested state*/
4610 if (tegra_host->card_present == req) {
4611 dev_info(mmc_dev(sdhost->mmc),
4612 "Card already in requested state\n");
4615 tegra_host->card_present = req;
4617 if (tegra_host->card_present) {
4618 err = tegra_sdhci_configure_regulators(tegra_host,
4619 CONFIG_REG_EN, 0, 0);
4621 dev_err(mmc_dev(sdhost->mmc),
4622 "Failed to enable card regulators %d\n", err);
4625 /*sdcard power up time max 37msec*/
4626 usleep_range(40000, 41000);
4628 err = tegra_sdhci_configure_regulators(tegra_host,
4629 CONFIG_REG_DIS, 0 , 0);
4631 dev_err(mmc_dev(sdhost->mmc),
4632 "Failed to disable card regulators %d\n", err);
4635 /*sdcard power down time min 1ms*/
4636 usleep_range(1000, 2000);
4639 * Set retune request as tuning should be done next time
4640 * a card is inserted.
4642 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
4643 tegra_host->force_retune = true;
4645 tasklet_schedule(&sdhost->card_tasklet);
4650 static int get_card_insert(void *data, u64 *val)
4652 struct sdhci_host *sdhost = data;
4653 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
4654 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4656 *val = tegra_host->card_present;
4661 static int set_card_insert(void *data, u64 val)
4663 struct sdhci_host *sdhost = data;
4668 dev_err(mmc_dev(sdhost->mmc),
4669 "Usage error. Use 0 to remove, 1 to insert %d\n", err);
4673 if (sdhost->mmc->caps & MMC_CAP_NONREMOVABLE) {
4675 dev_err(mmc_dev(sdhost->mmc),
4676 "usage error, Supports only SDCARD hosts only %d\n", err);
4680 err = sdhci_tegra_carddetect(sdhost, val == 1);
4685 static ssize_t get_bus_timing(struct file *file, char __user *user_buf,
4686 size_t count, loff_t *ppos)
4688 struct sdhci_host *host = file->private_data;
4689 unsigned int len = 0;
4692 static const char *const sdhci_tegra_timing[] = {
4693 [MMC_TIMING_LEGACY] = "legacy",
4694 [MMC_TIMING_MMC_HS] = "highspeed",
4695 [MMC_TIMING_SD_HS] = "highspeed",
4696 [MMC_TIMING_UHS_SDR12] = "SDR12",
4697 [MMC_TIMING_UHS_SDR25] = "SDR25",
4698 [MMC_TIMING_UHS_SDR50] = "SDR50",
4699 [MMC_TIMING_UHS_SDR104] = "SDR104",
4700 [MMC_TIMING_UHS_DDR50] = "DDR50",
4701 [MMC_TIMING_MMC_HS200] = "HS200",
4702 [MMC_TIMING_MMC_HS400] = "HS400",
4705 len = snprintf(buf, sizeof(buf), "%s\n",
4706 sdhci_tegra_timing[host->mmc->ios.timing]);
4707 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
4710 static ssize_t set_bus_timing(struct file *file,
4711 const char __user *userbuf,
4712 size_t count, loff_t *ppos)
4714 struct sdhci_host *sdhost = file->private_data;
4715 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
4716 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4722 /* Ignore the request if card is not yet removed*/
4723 if (tegra_host->card_present != 0) {
4724 dev_err(mmc_dev(sdhost->mmc),
4725 "Sdcard not removed. Set bus timing denied\n");
4730 if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) {
4735 buf[count-1] = '\0';
4737 /*prepare the temp mask to mask higher host timing modes wrt user
4740 mask = ~(MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_DDR50
4741 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25
4742 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104);
4743 if (strcmp(buf, "highspeed") == 0) {
4744 timing_req = MMC_CAP_SD_HIGHSPEED;
4745 mask |= MMC_CAP_SD_HIGHSPEED;
4746 } else if (strcmp(buf, "SDR12") == 0) {
4747 timing_req = MMC_CAP_UHS_SDR12;
4748 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12);
4749 } else if (strcmp(buf, "SDR25") == 0) {
4750 timing_req = MMC_CAP_UHS_SDR25;
4751 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4752 | MMC_CAP_UHS_SDR25);
4753 } else if (strcmp(buf, "SDR50") == 0) {
4754 timing_req = MMC_CAP_UHS_SDR50;
4755 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4756 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50);
4757 } else if (strcmp(buf, "SDR104") == 0) {
4758 timing_req = MMC_CAP_UHS_SDR104;
4759 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4760 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
4761 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50);
4762 } else if (strcmp(buf, "DDR50") == 0) {
4763 timing_req = MMC_CAP_UHS_DDR50;
4764 mask |= (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_SDR12
4765 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
4766 | MMC_CAP_UHS_DDR50);
4767 } else if (strcmp(buf, "legacy")) {
4769 dev_err(mmc_dev(sdhost->mmc),
4770 "Invalid bus timing requested %d\n", err);
4774 /*Checks if user requested mode is supported by host*/
4775 if (timing_req && (!(sdhost->caps_timing_orig & timing_req))) {
4777 dev_err(mmc_dev(sdhost->mmc),
4778 "Timing not supported by Host %d\n", err);
4783 *Limit the capability of host upto user requested timing
4785 sdhost->mmc->caps |= sdhost->caps_timing_orig;
4786 sdhost->mmc->caps &= mask;
4788 dev_dbg(mmc_dev(sdhost->mmc),
4789 "Host Bus Timing limited to %s mode\n", buf);
4790 dev_dbg(mmc_dev(sdhost->mmc),
4791 "when sdcard is inserted next time, bus timing");
4792 dev_dbg(mmc_dev(sdhost->mmc),
4793 "gets selected based on card speed caps");
4801 static const struct file_operations sdhci_host_bus_timing_fops = {
4802 .read = get_bus_timing,
4803 .write = set_bus_timing,
4804 .open = simple_open,
4805 .owner = THIS_MODULE,
4806 .llseek = default_llseek,
4809 DEFINE_SIMPLE_ATTRIBUTE(sdhci_tegra_card_insert_fops, get_card_insert, set_card_insert,
4811 static void sdhci_tegra_misc_debugfs(struct sdhci_host *host)
4813 struct dentry *root = host->debugfs_root;
4814 unsigned saved_line;
4816 /*backup original host timing capabilities as debugfs may override it later*/
4817 host->caps_timing_orig = host->mmc->caps &
4818 (MMC_CAP_SD_HIGHSPEED | MMC_CAP_UHS_DDR50
4819 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25
4820 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104);
4823 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
4824 if (IS_ERR_OR_NULL(root)) {
4825 saved_line = __LINE__;
4828 host->debugfs_root = root;
4831 if (!debugfs_create_file("bus_timing", S_IRUSR | S_IWUSR, root, host,
4832 &sdhci_host_bus_timing_fops)) {
4833 saved_line = __LINE__;
4837 if (!debugfs_create_file("card_insert", S_IRUSR | S_IWUSR, root, host,
4838 &sdhci_tegra_card_insert_fops)) {
4839 saved_line = __LINE__;
4846 debugfs_remove_recursive(root);
4847 host->debugfs_root = NULL;
4849 pr_err("%s %s: Failed to initialize debugfs functionality at line=%d\n", __func__,
4850 mmc_hostname(host->mmc), saved_line);
4854 static ssize_t sdhci_handle_boost_mode_tap(struct device *dev,
4855 struct device_attribute *attr, const char *buf, size_t count)
4858 struct mmc_card *card;
4859 char *p = (char *)buf;
4860 struct sdhci_host *host = dev_get_drvdata(dev);
4861 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4862 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4863 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
4864 struct tegra_sdhci_platform_data *plat;
4865 struct tegra_tuning_data *tuning_data;
4869 tap_cmd = memparse(p, &p);
4871 card = host->mmc->card;
4875 /* if not uhs -- no tuning and no tap value to set */
4876 if (!mmc_sd_card_uhs(card) && !mmc_card_hs200(card))
4879 /* if no change in tap value -- just exit */
4880 if (tap_cmd == tegra_host->tap_cmd)
4883 if ((tap_cmd != TAP_CMD_TRIM_DEFAULT_VOLTAGE) &&
4884 (tap_cmd != TAP_CMD_TRIM_HIGH_VOLTAGE)) {
4885 pr_info("echo 1 > cmd_state # to set normal voltage\n");
4886 pr_info("echo 2 > cmd_state # to set high voltage\n");
4890 tegra_host->tap_cmd = tap_cmd;
4891 plat = pdev->dev.platform_data;
4892 tuning_data = sdhci_tegra_get_tuning_data(host, host->max_clk);
4893 /* Check if host clock is enabled */
4894 if (!tegra_host->clk_enabled) {
4895 /* Nothing to do if the host is not powered ON */
4896 if (host->mmc->ios.power_mode != MMC_POWER_ON)
4898 else if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4899 tegra_sdhci_set_clock(host, host->mmc->ios.clock);
4902 /* Wait for any on-going data transfers */
4903 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
4904 while (present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) {
4909 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
4912 spin_lock(&host->lock);
4914 case TAP_CMD_TRIM_DEFAULT_VOLTAGE:
4915 /* set tap value for voltage range 1.1 to 1.25 */
4916 sdhci_tegra_set_tap_delay(host, tuning_data->best_tap_value);
4919 case TAP_CMD_TRIM_HIGH_VOLTAGE:
4920 /* set tap value for voltage range 1.25 to 1.39 */
4921 sdhci_tegra_set_tap_delay(host,
4922 tuning_data->nom_best_tap_value);
4925 spin_unlock(&host->lock);
4926 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
4927 tegra_sdhci_set_clock(host, 0);
4931 static ssize_t sdhci_show_turbo_mode(struct device *dev,
4932 struct device_attribute *attr, char *buf)
4934 struct sdhci_host *host = dev_get_drvdata(dev);
4935 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4936 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4938 return sprintf(buf, "%d\n", tegra_host->tap_cmd);
4941 static DEVICE_ATTR(cmd_state, 0644, sdhci_show_turbo_mode,
4942 sdhci_handle_boost_mode_tap);
4944 static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
4945 unsigned long event, void *data)
4947 struct sdhci_tegra *tegra_host =
4948 container_of(nb, struct sdhci_tegra, reboot_notify);
4950 struct sdhci_host *sdhci = dev_get_drvdata(tegra_host->dev);
4955 err = tegra_sdhci_configure_regulators(tegra_host,
4956 CONFIG_REG_DIS, 0, 0);
4958 pr_err("Disable regulator in reboot notify failed %d\n",
4961 /* disable runtime pm callbacks */
4962 pr_debug("%s: %s line=%d\n",
4963 mmc_hostname(sdhci->mmc), __func__, __LINE__);
4964 sdhci_runtime_forbid(sdhci);
4971 static void tegra_sdhci_ios_config_enter(struct sdhci_host *sdhci,
4972 struct mmc_ios *ios)
4974 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
4975 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4976 struct clk *new_mode_clk;
4977 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
4978 struct tegra_sdhci_platform_data *plat;
4979 bool change_clk = false;
4982 * Tegra sdmmc controllers require clock to be enabled for any register
4983 * access. Set the minimum controller clock if no clock is requested.
4985 plat = pdev->dev.platform_data;
4986 if (!IS_RTPM_DELAY_CG(plat->rtpm_type)) {
4987 if (ios->clock && (ios->clock != sdhci->clock))
4988 tegra_sdhci_set_clock(sdhci, ios->clock);
4990 if (!sdhci->clock && !ios->clock) {
4991 tegra_sdhci_set_clock(sdhci, sdhci->mmc->f_min);
4992 sdhci->clock = sdhci->mmc->f_min;
4993 } else if (ios->clock && (ios->clock != sdhci->clock)) {
4994 tegra_sdhci_set_clock(sdhci, ios->clock);
4999 * Check for DDR50 mode setting and set ddr_clk if not already
5000 * done. Return if only one clock option is available.
5002 if (!tegra_host->ddr_clk || !tegra_host->sdr_clk) {
5005 if ((ios->timing == MMC_TIMING_UHS_DDR50) &&
5006 !tegra_host->is_ddr_clk_set) {
5008 new_mode_clk = tegra_host->ddr_clk;
5009 } else if ((ios->timing != MMC_TIMING_UHS_DDR50) &&
5010 tegra_host->is_ddr_clk_set) {
5012 new_mode_clk = tegra_host->sdr_clk;
5016 /* below clock on/off also needed for MMC_RTPM */
5017 tegra_sdhci_set_clock(sdhci, 0);
5018 pltfm_host->clk = new_mode_clk;
5019 /* Restore the previous frequency */
5020 tegra_sdhci_set_clock(sdhci, sdhci->max_clk);
5021 tegra_host->is_ddr_clk_set =
5022 !tegra_host->is_ddr_clk_set;
5027 static void tegra_sdhci_ios_config_exit(struct sdhci_host *sdhci,
5028 struct mmc_ios *ios)
5030 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5031 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5033 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
5034 struct tegra_sdhci_platform_data *plat;
5036 plat = pdev->dev.platform_data;
5038 * Do any required handling for retuning requests before powering off
5041 if (ios->power_mode == MMC_POWER_OFF) {
5042 tegra_sdhci_power_off(sdhci, ios->power_mode);
5043 err = tegra_sdhci_configure_regulators(tegra_host,
5044 CONFIG_REG_DIS, 0, 0);
5046 pr_err("Disable regulators failed in ios:%d\n", err);
5048 err = tegra_sdhci_configure_regulators(tegra_host,
5049 CONFIG_REG_EN, 0, 0);
5051 pr_err("Enable regulator failed in ios:%d\n", err);
5055 * In case of power off, turn off controller clock now as all the
5056 * required register accesses are already done.
5058 if (!ios->clock && !sdhci->mmc->skip_host_clkgate) {
5059 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
5060 tegra_sdhci_set_clock(sdhci, 0);
5064 static int tegra_sdhci_get_drive_strength(struct sdhci_host *sdhci,
5065 unsigned int max_dtr, int host_drv, int card_drv)
5067 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5068 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5069 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
5071 return plat->default_drv_type;
5074 static void tegra_sdhci_config_tap(struct sdhci_host *sdhci, u8 option)
5076 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5077 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5081 case SAVE_TUNED_TAP:
5082 tap_delay = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
5083 tap_delay >>= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT;
5084 tap_delay &= SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK;
5085 tegra_host->tuned_tap_delay = tap_delay;
5086 tegra_host->tuning_status = TUNING_STATUS_DONE;
5088 case SET_DEFAULT_TAP:
5089 sdhci_tegra_set_tap_delay(sdhci, tegra_host->plat->tap_delay);
5092 sdhci_tegra_set_tap_delay(sdhci, tegra_host->tuned_tap_delay);
5095 dev_err(mmc_dev(sdhci->mmc),
5096 "Invalid argument passed to tap config\n");
5100 static void sdhci_tegra_select_drive_strength(struct sdhci_host *host,
5103 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5104 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5107 if (!IS_ERR_OR_NULL(tegra_host->pinctrl_sdmmc)) {
5108 if (!IS_ERR_OR_NULL(tegra_host->sdmmc_pad_ctrl[uhs])) {
5109 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
5110 tegra_host->sdmmc_pad_ctrl[uhs]);
5112 dev_warn(mmc_dev(host->mmc),
5113 "setting pad strength for sdcard mode %d failed\n", uhs);
5116 dev_dbg(mmc_dev(host->mmc),
5117 "No custom pad-ctrl strength settings present for sdcard %d mode\n", uhs);
5123 * Set the max pio transfer limits to allow for dynamic switching between dma
5124 * and pio modes if the platform data indicates support for it. Option to set
5125 * different limits for different interfaces.
5127 static void tegra_sdhci_set_max_pio_transfer_limits(struct sdhci_host *sdhci)
5129 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
5130 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5132 if (!tegra_host->plat->dynamic_dma_pio_switch || !sdhci->mmc->card)
5135 switch (sdhci->mmc->card->type) {
5137 sdhci->max_pio_size = 0;
5138 sdhci->max_pio_blocks = 0;
5141 sdhci->max_pio_size = 0;
5142 sdhci->max_pio_blocks = 0;
5145 sdhci->max_pio_size = 0;
5146 sdhci->max_pio_blocks = 0;
5149 dev_err(mmc_dev(sdhci->mmc),
5150 "Unknown device type. No max pio limits set\n");
5154 static bool sdhci_tegra_skip_register_dump(struct sdhci_host *host)
5156 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
5157 struct sdhci_tegra *tegra_host = pltfm_host->priv;
5158 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
5162 const unsigned short kso_cmd52_pattern = 0x341a;
5163 const unsigned int arg_kso_patterns[] = {
5169 * Case: "bcm_sdio_suppress_kso_dump"
5170 * For KSO sleep mode error pattern in host register dump corresponding
5171 * to CMD52 case would be either of the following:
5173 Argument: 0x92003e01 (Write)
5174 Cmd: 0x0000341a (CMD52 indicated by 0x34)
5176 Argument: 0x12003e00 (Read)
5177 Cmd: 0x0000341a (CMD52 indicated by 0x34)
5180 if (plat->bcm_sdio_suppress_kso_dump) {
5181 arg = sdhci_readl(host, SDHCI_ARGUMENT);
5182 cmd = sdhci_readw(host, SDHCI_COMMAND);
5183 if (cmd == kso_cmd52_pattern) {
5184 for (i = 0; i < ARRAY_SIZE(arg_kso_patterns); i++)
5185 if (arg_kso_patterns[i] == arg)
5192 static const struct sdhci_ops tegra_sdhci_ops = {
5193 .get_ro = tegra_sdhci_get_ro,
5194 .get_cd = tegra_sdhci_get_cd,
5195 .read_l = tegra_sdhci_readl,
5196 .read_w = tegra_sdhci_readw,
5197 .write_l = tegra_sdhci_writel,
5198 .write_w = tegra_sdhci_writew,
5199 .platform_bus_width = tegra_sdhci_buswidth,
5200 .set_clock = tegra_sdhci_set_clock,
5201 .suspend = tegra_sdhci_suspend,
5202 .resume = tegra_sdhci_resume,
5203 .platform_resume = tegra_sdhci_post_resume,
5204 .platform_reset_exit = tegra_sdhci_reset_exit,
5205 .platform_get_bus = tegra_sdhci_get_bus,
5206 .platform_ios_config_enter = tegra_sdhci_ios_config_enter,
5207 .platform_ios_config_exit = tegra_sdhci_ios_config_exit,
5208 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
5209 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
5210 .validate_sd2_0 = tegra_sdhci_validate_sd2_0,
5211 .switch_signal_voltage_exit = tegra_sdhci_do_calibration,
5212 .execute_freq_tuning = sdhci_tegra_execute_tuning,
5213 .sd_error_stats = sdhci_tegra_sd_error_stats,
5214 #ifdef CONFIG_MMC_FREQ_SCALING
5215 .dfs_gov_init = sdhci_tegra_freq_gov_init,
5216 .dfs_gov_get_target_freq = sdhci_tegra_get_target_freq,
5218 .get_drive_strength = tegra_sdhci_get_drive_strength,
5219 .post_init = tegra_sdhci_post_init,
5220 .dump_host_cust_regs = tegra_sdhci_dumpregs,
5221 .get_max_tuning_loop_counter = sdhci_tegra_get_max_tuning_loop_counter,
5222 .config_tap_delay = tegra_sdhci_config_tap,
5223 .is_tuning_done = tegra_sdhci_is_tuning_done,
5224 .get_max_pio_transfer_limits = tegra_sdhci_set_max_pio_transfer_limits,
5225 .skip_register_dump = sdhci_tegra_skip_register_dump,
5228 static struct sdhci_pltfm_data sdhci_tegra11_pdata = {
5229 .quirks = TEGRA_SDHCI_QUIRKS,
5230 .quirks2 = TEGRA_SDHCI_QUIRKS2,
5231 .ops = &tegra_sdhci_ops,
5234 static struct sdhci_tegra_soc_data soc_data_tegra11 = {
5235 .pdata = &sdhci_tegra11_pdata,
5236 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5237 NVQUIRK_SET_DRIVE_STRENGTH |
5238 NVQUIRK_SET_TRIM_DELAY |
5239 NVQUIRK_ENABLE_DDR50 |
5240 NVQUIRK_ENABLE_HS200 |
5241 NVQUIRK_ENABLE_AUTO_CMD23 |
5242 NVQUIRK_INFINITE_ERASE_TIMEOUT,
5243 .parent_clk_list = {"pll_p", "pll_c"},
5244 .tuning_freq_list = {81600000, 156000000, 200000000},
5245 .t2t_coeffs = t11x_tuning_coeffs,
5246 .t2t_coeffs_count = 3,
5247 .tap_hole_coeffs = t11x_tap_hole_coeffs,
5248 .tap_hole_coeffs_count = 12,
5251 static struct sdhci_pltfm_data sdhci_tegra12_pdata = {
5252 .quirks = TEGRA_SDHCI_QUIRKS,
5253 .quirks2 = TEGRA_SDHCI_QUIRKS2 |
5254 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
5255 SDHCI_QUIRK2_SUPPORT_64BIT_DMA |
5256 SDHCI_QUIRK2_USE_64BIT_ADDR,
5257 .ops = &tegra_sdhci_ops,
5260 static struct sdhci_tegra_soc_data soc_data_tegra12 = {
5261 .pdata = &sdhci_tegra12_pdata,
5262 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5263 NVQUIRK_SET_TRIM_DELAY |
5264 NVQUIRK_ENABLE_DDR50 |
5265 NVQUIRK_ENABLE_HS200 |
5266 NVQUIRK_ENABLE_AUTO_CMD23 |
5267 NVQUIRK_INFINITE_ERASE_TIMEOUT |
5268 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
5269 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
5270 NVQUIRK_SET_CALIBRATION_OFFSETS,
5271 .parent_clk_list = {"pll_p", "pll_c"},
5272 .tuning_freq_list = {81600000, 136000000, 200000000},
5273 .t2t_coeffs = t12x_tuning_coeffs,
5274 .t2t_coeffs_count = 3,
5275 .tap_hole_coeffs = t12x_tap_hole_coeffs,
5276 .tap_hole_coeffs_count = 14,
5279 static struct sdhci_pltfm_data sdhci_tegra21_pdata = {
5280 .quirks = TEGRA_SDHCI_QUIRKS,
5281 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
5282 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING |
5283 SDHCI_QUIRK2_NON_STD_TUNING_LOOP_CNTR |
5284 SDHCI_QUIRK2_SKIP_TUNING |
5285 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO |
5286 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK |
5287 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
5288 SDHCI_QUIRK2_USE_64BIT_ADDR |
5289 SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK |
5290 SDHCI_QUIRK2_NON_STD_RTPM |
5291 SDHCI_QUIRK2_SUPPORT_64BIT_DMA,
5292 .ops = &tegra_sdhci_ops,
5295 static struct sdhci_tegra_soc_data soc_data_tegra21 = {
5296 .pdata = &sdhci_tegra21_pdata,
5297 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5298 NVQUIRK_SET_TRIM_DELAY |
5299 NVQUIRK_ENABLE_DDR50 |
5300 NVQUIRK_ENABLE_HS200 |
5301 NVQUIRK_ENABLE_HS400 |
5302 NVQUIRK_ENABLE_AUTO_CMD23 |
5303 NVQUIRK_INFINITE_ERASE_TIMEOUT |
5304 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
5305 NVQUIRK_SET_SDMEMCOMP_VREF_SEL |
5306 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
5307 NVQUIRK_SET_CALIBRATION_OFFSETS |
5308 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK |
5309 NVQUIRK_UPDATE_PAD_CNTRL_REG |
5310 NVQUIRK_UPDATE_PIN_CNTRL_REG,
5311 .nvquirks2 = NVQUIRK2_UPDATE_HW_TUNING_CONFG |
5312 NVQUIRK2_CONFIG_PWR_DET |
5313 NVQUIRK2_BROKEN_SD2_0_SUPPORT |
5314 NVQUIRK2_SELECT_SDR50_MODE |
5315 NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION |
5316 NVQUIRK2_SET_PAD_E_INPUT_VOL |
5317 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH,
5320 static struct sdhci_pltfm_data sdhci_tegra18_pdata = {
5321 .quirks = TEGRA_SDHCI_QUIRKS,
5322 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
5323 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING |
5324 SDHCI_QUIRK2_NON_STD_TUNING_LOOP_CNTR |
5325 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO |
5326 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK |
5327 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
5328 SDHCI_QUIRK2_USE_64BIT_ADDR |
5329 SDHCI_QUIRK2_NON_STD_TUN_CARD_CLOCK |
5330 SDHCI_QUIRK2_NON_STD_RTPM |
5331 SDHCI_QUIRK2_SUPPORT_64BIT_DMA,
5332 .ops = &tegra_sdhci_ops,
5335 static struct sdhci_tegra_soc_data soc_data_tegra18 = {
5336 .pdata = &sdhci_tegra18_pdata,
5337 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
5338 NVQUIRK_SET_TRIM_DELAY |
5339 NVQUIRK_ENABLE_DDR50 |
5340 NVQUIRK_ENABLE_HS200 |
5341 NVQUIRK_ENABLE_HS400 |
5342 NVQUIRK_ENABLE_AUTO_CMD23 |
5343 NVQUIRK_INFINITE_ERASE_TIMEOUT |
5344 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
5345 NVQUIRK_SET_SDMEMCOMP_VREF_SEL |
5346 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
5347 NVQUIRK_SET_CALIBRATION_OFFSETS |
5348 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK |
5349 NVQUIRK_UPDATE_PAD_CNTRL_REG |
5350 NVQUIRK_UPDATE_PIN_CNTRL_REG,
5351 .nvquirks2 = NVQUIRK2_UPDATE_HW_TUNING_CONFG |
5352 NVQUIRK2_CONFIG_PWR_DET |
5353 NVQUIRK2_BROKEN_SD2_0_SUPPORT |
5354 NVQUIRK2_SELECT_SDR50_MODE |
5355 NVQUIRK2_ADD_DELAY_AUTO_CALIBRATION |
5356 NVQUIRK2_DYNAMIC_TRIM_SUPPLY_SWITCH,
5359 static const struct of_device_id sdhci_tegra_dt_match[] = {
5360 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra18 },
5361 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra21 },
5362 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra12 },
5363 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra11 },
5366 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
5368 static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
5369 struct platform_device *pdev)
5373 struct tegra_sdhci_platform_data *plat;
5374 struct device_node *np = pdev->dev.of_node;
5382 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
5384 dev_err(&pdev->dev, "Can't allocate platform data\n");
5388 plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
5389 plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
5390 plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
5392 if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
5396 of_property_read_u32(np, "tap-delay", &plat->tap_delay);
5397 plat->is_ddr_tap_delay = of_property_read_bool(np,
5398 "nvidia,is-ddr-tap-delay");
5399 of_property_read_u32(np, "nvidia,ddr-tap-delay", &plat->ddr_tap_delay);
5400 of_property_read_u32(np, "trim-delay", &plat->trim_delay);
5401 plat->is_ddr_trim_delay = of_property_read_bool(np,
5402 "nvidia,is-ddr-trim-delay");
5403 of_property_read_u32(np, "ddr-trim-delay", &plat->ddr_trim_delay);
5404 of_property_read_u32(np, "ddr-clk-limit", &plat->ddr_clk_limit);
5405 of_property_read_u32(np, "max-clk-limit", &plat->max_clk_limit);
5406 of_property_read_u32(np, "id", &plat->id);
5407 of_property_read_u32(np, "dqs-trim-delay", &plat->dqs_trim_delay);
5408 of_property_read_u32(np, "dqs-trim-delay-hs533", &plat->dqs_trim_delay_hs533);
5410 of_property_read_u32(np, "compad-vref-3v3", &plat->compad_vref_3v3);
5411 of_property_read_u32(np, "compad-vref-1v8", &plat->compad_vref_1v8);
5412 of_property_read_u32(np, "uhs-mask", &plat->uhs_mask);
5413 of_property_read_u32(np, "calib-3v3-offsets", &plat->calib_3v3_offsets);
5414 of_property_read_u32(np, "calib-1v8-offsets", &plat->calib_1v8_offsets);
5415 of_property_read_u32(np, "auto-cal-step", &plat->auto_cal_step);
5416 plat->disable_auto_cal = of_property_read_bool(np,
5417 "nvidia,disable-auto-cal");
5419 plat->power_off_rail = of_property_read_bool(np,
5422 plat->pwr_off_during_lp0 = of_property_read_bool(np,
5423 "pwr-off-during-lp0");
5425 plat->limit_vddio_max_volt = of_property_read_bool(np,
5426 "nvidia,limit-vddio-max-volt");
5427 plat->cd_wakeup_incapable = of_property_read_bool(np,
5428 "cd_wakeup_incapable");
5430 plat->mmc_data.built_in = of_property_read_bool(np, "built-in");
5431 plat->update_pinctrl_settings = of_property_read_bool(np,
5432 "nvidia,update-pinctrl-settings");
5433 plat->dll_calib_needed = of_property_read_bool(np,
5434 "nvidia,dll-calib-needed");
5435 plat->enb_ext_loopback = of_property_read_bool(np,
5436 "nvidia,enable-ext-loopback");
5437 plat->disable_clock_gate = of_property_read_bool(np,
5438 "disable-clock-gate");
5439 plat->enable_hs533_mode =
5440 of_property_read_bool(np, "nvidia,enable-hs533-mode");
5441 of_property_read_u32(np, "default-drv-type", &plat->default_drv_type);
5442 plat->en_io_trim_volt = of_property_read_bool(np,
5443 "nvidia,en-io-trim-volt");
5444 plat->is_emmc = of_property_read_bool(np, "nvidia,is-emmc");
5445 plat->is_sd_device = of_property_read_bool(np, "nvidia,sd-device");
5447 of_property_read_bool(np, "nvidia,enable-strobe-mode");
5449 if (!of_property_read_u32(np, "mmc-ocr-mask", &val)) {
5451 plat->mmc_data.ocr_mask = MMC_OCR_1V8_MASK;
5453 plat->mmc_data.ocr_mask = MMC_OCR_2V8_MASK;
5455 plat->mmc_data.ocr_mask = MMC_OCR_3V2_MASK;
5457 plat->mmc_data.ocr_mask = MMC_OCR_3V3_MASK;
5459 plat->pwrdet_support = of_property_read_bool(np, "pwrdet-support");
5460 if (of_find_property(np, "fixed-clock-freq", NULL)) {
5461 plat->is_fix_clock_freq = true;
5462 of_property_read_u32_array(np,
5464 (u32 *)&plat->fixed_clk_freq_table,
5465 MMC_TIMINGS_MAX_MODES);
5467 plat->enable_autocal_slew_override = of_property_read_bool(np,
5468 "nvidia,auto-cal-slew-override");
5470 ret = of_property_read_u32(np, "nvidia,runtime-pm-type",
5472 /* use delayed clock gate if runtime type not specified explicitly */
5474 plat->rtpm_type = RTPM_TYPE_DELAY_CG;
5477 of_property_read_bool(np, "nvidia,enable-cq");
5479 plat->en_periodic_calib = of_property_read_bool(np,
5480 "nvidia,en-periodic-calib");
5481 plat->pin_count = of_gpio_named_count(np, "nvidia,sdmmc-pin-gpios");
5482 for (i = 0; i < plat->pin_count; ++i) {
5483 val = of_get_named_gpio(np, "nvidia,sdmmc-pin-gpios", i);
5484 if (gpio_is_valid(val)) {
5485 plat->gpios[i].gpio = val;
5486 plat->gpios[i].flags = GPIOF_OUT_INIT_HIGH;
5487 sprintf(label, "sdmmc_pin%d", i);
5488 plat->gpios[i].label = label;
5491 plat->bcm_sdio_suppress_kso_dump =
5492 of_property_read_bool(np, "nvidia,bcm-sdio-suppress-kso-dump");
5497 static int sdhci_tegra_get_pll_from_dt(struct platform_device *pdev,
5498 const char **parent_clk_list, int size)
5500 struct device_node *np = pdev->dev.of_node;
5501 const char *pll_str;
5507 if (!of_find_property(np, "pll_source", NULL))
5510 cnt = of_property_count_strings(np, "pll_source");
5515 dev_warn(&pdev->dev,
5516 "pll list provide in DT exceeds max supported\n");
5520 for (i = 0; i < cnt; i++) {
5521 of_property_read_string_index(np, "pll_source", i, &pll_str);
5522 parent_clk_list[i] = pll_str;
5528 * sdhci_tegra_check_bondout
5530 * check whether the specified SDHCI instance is bonded out
5532 * do not validate ID itself, instead, just make sure it's less
5533 * than 4, so that we do not index beyond the end of position array
5535 * non-zero return value means bond-out, so that instance doesn't exist
5537 static inline int sdhci_tegra_check_bondout(unsigned int id)
5539 #ifdef CONFIG_ARCH_TEGRA_21x_SOC
5540 enum tegra_bondout_dev dev[4] = {
5548 return tegra_bonded_out_dev(dev[id]);
5556 static int sdhci_tegra_init_pinctrl_info(struct device *dev,
5557 struct sdhci_tegra *tegra_host,
5558 struct tegra_sdhci_platform_data *plat)
5560 struct device_node *np = dev->of_node;
5561 const char *drive_gname;
5564 struct pinctrl_state *pctl_state;
5569 if (plat->pwrdet_support) {
5570 tegra_host->sdmmc_padctrl = devm_padctrl_get(dev, "sdmmc");
5571 if (IS_ERR(tegra_host->sdmmc_padctrl)) {
5572 ret = PTR_ERR(tegra_host->sdmmc_padctrl);
5573 tegra_host->sdmmc_padctrl = NULL;
5577 if (plat->update_pinctrl_settings) {
5578 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
5579 if (IS_ERR_OR_NULL(tegra_host->pinctrl_sdmmc)) {
5580 dev_err(dev, "Missing pinctrl info\n");
5584 tegra_host->schmitt_enable[0] =
5585 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5586 "sdmmc_schmitt_enable");
5587 if (IS_ERR_OR_NULL(tegra_host->schmitt_enable[0]))
5588 dev_dbg(dev, "Missing schmitt enable state\n");
5590 tegra_host->schmitt_enable[1] =
5591 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5592 "sdmmc_clk_schmitt_enable");
5593 if (IS_ERR_OR_NULL(tegra_host->schmitt_enable[1]))
5594 dev_dbg(dev, "Missing clk schmitt enable state\n");
5596 tegra_host->schmitt_disable[0] =
5597 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5598 "sdmmc_schmitt_disable");
5599 if (IS_ERR_OR_NULL(tegra_host->schmitt_disable[0]))
5600 dev_dbg(dev, "Missing schmitt disable state\n");
5602 tegra_host->schmitt_disable[1] =
5603 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5604 "sdmmc_clk_schmitt_disable");
5605 if (IS_ERR_OR_NULL(tegra_host->schmitt_disable[1]))
5606 dev_dbg(dev, "Missing clk schmitt disable state\n");
5608 for (i = 0; i < 2; i++) {
5609 if (!IS_ERR_OR_NULL(tegra_host->schmitt_disable[i])) {
5610 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
5611 tegra_host->schmitt_disable[i]);
5613 dev_warn(dev, "setting schmitt state failed\n");
5616 tegra_host->drv_code_strength =
5617 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5619 if (IS_ERR_OR_NULL(tegra_host->drv_code_strength))
5620 dev_dbg(dev, "Missing sdmmc drive code state\n");
5622 tegra_host->default_drv_code_strength =
5623 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5624 "sdmmc_default_drv_code");
5625 if (IS_ERR_OR_NULL(tegra_host->default_drv_code_strength))
5626 dev_dbg(dev, "Missing sdmmc default drive code state\n");
5628 /* Apply the default_mode settings to all modes of SD/MMC
5629 initially and then later update the pad strengths depending
5630 upon the states specified if any */
5631 pctl_state = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5633 if (IS_ERR_OR_NULL(pctl_state)) {
5634 dev_dbg(dev, "Missing default mode pad control state\n");
5637 for (i = 0; i < MMC_TIMINGS_MAX_MODES; i++)
5638 tegra_host->sdmmc_pad_ctrl[i] = pctl_state;
5641 pctl_state = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5643 if (IS_ERR_OR_NULL(pctl_state)) {
5644 dev_dbg(dev, "Missing sdr50 pad control state\n");
5647 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_UHS_SDR50] = pctl_state;
5648 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_UHS_DDR50] = pctl_state;
5651 pctl_state = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc,
5653 if (IS_ERR_OR_NULL(pctl_state)) {
5654 dev_dbg(dev, "Missing sdr104 pad control state\n");
5657 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_UHS_SDR104] = pctl_state;
5660 /*Select the default state*/
5661 if (!IS_ERR_OR_NULL(tegra_host->sdmmc_pad_ctrl[MMC_TIMING_MMC_HS])) {
5662 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
5663 tegra_host->sdmmc_pad_ctrl[MMC_TIMING_MMC_HS]);
5665 dev_warn(dev, "setting default pad state failed\n");
5669 tegra_host->pinctrl = pinctrl_get_dev_from_of_property(np,
5670 "drive-pin-pinctrl");
5671 if (!tegra_host->pinctrl)
5674 drive_gname = of_get_property(np, "drive-pin-name", NULL);
5675 tegra_host->drive_group_sel = pinctrl_get_selector_from_group_name(
5676 tegra_host->pinctrl, drive_gname);
5680 static const struct of_device_id sdhci_tegra_device_match[] = {
5681 { .compatible = "nvidia,tegra124-sdhci", },
5685 static int sdhci_tegra_probe(struct platform_device *pdev)
5687 const struct of_device_id *match;
5688 const struct sdhci_tegra_soc_data *soc_data;
5689 struct sdhci_host *host;
5690 struct sdhci_pltfm_host *pltfm_host;
5691 struct tegra_sdhci_platform_data *plat;
5692 struct sdhci_tegra *tegra_host;
5693 unsigned int low_freq;
5694 unsigned int signal_voltage = 0;
5695 const char *parent_clk_list[TEGRA_SDHCI_MAX_PLL_SOURCE];
5698 u32 opt_subrevision;
5701 for (i = 0; i < ARRAY_SIZE(parent_clk_list); i++)
5702 parent_clk_list[i] = NULL;
5703 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
5705 soc_data = match->data;
5707 /* Use id tables and remove the following chip defines */
5708 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
5709 soc_data = &soc_data_tegra11;
5710 #elif defined(CONFIG_ARCH_TEGRA_21x_SOC)
5711 soc_data = &soc_data_tegra21;
5713 soc_data = &soc_data_tegra12;
5717 host = sdhci_pltfm_init(pdev, soc_data->pdata);
5720 return PTR_ERR(host);
5722 pltfm_host = sdhci_priv(host);
5724 plat = pdev->dev.platform_data;
5727 plat = sdhci_tegra_dt_parse_pdata(pdev);
5729 dev_err(mmc_dev(host->mmc), "missing platform data\n");
5733 pr_info("%s: %s line=%d runtime pm type=%s, disable-clock-gate=%d\n",
5734 mmc_hostname(host->mmc), __func__, __LINE__,
5735 GET_RTPM_TYPE(plat->rtpm_type),
5736 plat->disable_clock_gate);
5738 pr_err("%s using board files instead of DT\n",
5739 mmc_hostname(host->mmc));
5740 plat->rtpm_type = RTPM_TYPE_DELAY_CG;
5743 /* sdio delayed clock gate quirk in sdhci_host used */
5744 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
5745 host->quirks2 |= SDHCI_QUIRK2_DELAYED_CLK_GATE;
5746 if (IS_MMC_RTPM(plat->rtpm_type))
5747 host->quirks2 |= SDHCI_QUIRK2_MMC_RTPM;
5749 if (sdhci_tegra_check_bondout(plat->id)) {
5750 dev_err(mmc_dev(host->mmc), "bonded out\n");
5755 /* FIXME: This is for until dma-mask binding is supported in DT.
5756 * Set coherent_dma_mask for each Tegra SKUs.
5757 * If dma_mask is NULL, set it to coherent_dma_mask. */
5758 if (soc_data == &soc_data_tegra11)
5759 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
5761 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
5763 if (!pdev->dev.dma_mask)
5764 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
5766 tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
5768 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
5773 tegra_host->dev = &pdev->dev;
5774 tegra_host->plat = plat;
5775 pdev->dev.platform_data = plat;
5777 tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev,
5778 sizeof(struct sdhci_tegra_sd_stats), GFP_KERNEL);
5779 if (!tegra_host->sd_stat_head) {
5780 dev_err(mmc_dev(host->mmc), "failed to allocate sd_stat_head\n");
5785 tegra_host->soc_data = soc_data;
5786 pltfm_host->priv = tegra_host;
5788 /* check if DT provide list possible pll parents */
5789 if (sdhci_tegra_get_pll_from_dt(pdev,
5790 &parent_clk_list[0], ARRAY_SIZE(parent_clk_list))) {
5791 parent_clk_list[0] = soc_data->parent_clk_list[0];
5792 parent_clk_list[1] = soc_data->parent_clk_list[1];
5795 for (i = 0; i < ARRAY_SIZE(parent_clk_list); i++) {
5796 if (!parent_clk_list[i])
5798 tegra_host->pll_source[i].pll = clk_get_sys(NULL,
5799 parent_clk_list[i]);
5800 if (IS_ERR(tegra_host->pll_source[i].pll)) {
5801 rc = PTR_ERR(tegra_host->pll_source[i].pll);
5802 dev_err(mmc_dev(host->mmc),
5803 "clk[%d] error in getting %s: %d\n",
5804 i, parent_clk_list[i], rc);
5807 tegra_host->pll_source[i].pll_rate =
5808 clk_get_rate(tegra_host->pll_source[i].pll);
5810 dev_info(mmc_dev(host->mmc), "Parent select= %s rate=%ld\n",
5811 parent_clk_list[i], tegra_host->pll_source[i].pll_rate);
5814 #ifdef CONFIG_MMC_EMBEDDED_SDIO
5815 if (plat->mmc_data.embedded_sdio)
5816 mmc_set_embedded_sdio_data(host->mmc,
5817 &plat->mmc_data.embedded_sdio->cis,
5818 &plat->mmc_data.embedded_sdio->cccr,
5819 plat->mmc_data.embedded_sdio->funcs,
5820 plat->mmc_data.embedded_sdio->num_funcs);
5823 if (gpio_is_valid(plat->power_gpio)) {
5824 rc = gpio_request(plat->power_gpio, "sdhci_power");
5826 dev_err(mmc_dev(host->mmc),
5827 "failed to allocate power gpio\n");
5830 gpio_direction_output(plat->power_gpio, 1);
5833 if (gpio_is_valid(plat->cd_gpio)) {
5834 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
5836 dev_err(mmc_dev(host->mmc),
5837 "failed to allocate cd gpio\n");
5840 gpio_direction_input(plat->cd_gpio);
5842 tegra_host->card_present =
5843 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
5845 } else if (plat->mmc_data.register_status_notify) {
5846 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
5849 if (plat->mmc_data.status) {
5850 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
5853 if (gpio_is_valid(plat->wp_gpio)) {
5854 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
5856 dev_err(mmc_dev(host->mmc),
5857 "failed to allocate wp gpio\n");
5860 gpio_direction_input(plat->wp_gpio);
5864 * If there is no card detect gpio, assume that the
5865 * card is always present.
5867 if (!gpio_is_valid(plat->cd_gpio))
5868 tegra_host->card_present = 1;
5870 ret = genpd_dev_pm_add(sdhci_tegra_device_match, &pdev->dev);
5872 pr_err("Could not add %s to power domain using device tree\n",
5873 dev_name(&pdev->dev));
5876 tegra_pd_add_device(&pdev->dev);
5877 /* Get the ddr clock */
5878 tegra_host->ddr_clk = clk_get(mmc_dev(host->mmc), "ddr");
5879 if (IS_ERR(tegra_host->ddr_clk)) {
5880 dev_err(mmc_dev(host->mmc), "ddr clk err\n");
5881 tegra_host->ddr_clk = NULL;
5884 /* Get high speed clock */
5885 tegra_host->sdr_clk = clk_get(mmc_dev(host->mmc), NULL);
5886 if (IS_ERR(tegra_host->sdr_clk)) {
5887 dev_err(mmc_dev(host->mmc), "sdr clk err\n");
5888 tegra_host->sdr_clk = NULL;
5889 /* If both ddr and sdr clks are missing, then fail probe */
5890 if (!tegra_host->ddr_clk && !tegra_host->sdr_clk) {
5891 dev_err(mmc_dev(host->mmc),
5892 "Failed to get ddr and sdr clks\n");
5898 if (tegra_host->sdr_clk) {
5899 pltfm_host->clk = tegra_host->sdr_clk;
5900 tegra_host->is_ddr_clk_set = false;
5902 pltfm_host->clk = tegra_host->ddr_clk;
5903 tegra_host->is_ddr_clk_set = true;
5906 if (clk_get_parent(pltfm_host->clk) == tegra_host->pll_source[0].pll)
5907 tegra_host->is_parent_pll_source_1 = true;
5909 /* enable clocks first time */
5910 rc = clk_prepare_enable(pltfm_host->clk);
5914 /* Reset the sdhci controller to clear all previous status.*/
5915 tegra_periph_reset_assert(pltfm_host->clk);
5917 tegra_periph_reset_deassert(pltfm_host->clk);
5919 tegra_host->emc_clk = devm_clk_get(mmc_dev(host->mmc), "emc");
5920 if (IS_ERR_OR_NULL(tegra_host->emc_clk)) {
5921 dev_err(mmc_dev(host->mmc), "Can't get emc clk\n");
5922 tegra_host->emc_clk = NULL;
5924 clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
5927 tegra_host->sclk = devm_clk_get(mmc_dev(host->mmc), "sclk");
5928 if (IS_ERR_OR_NULL(tegra_host->sclk)) {
5929 dev_err(mmc_dev(host->mmc), "Can't get sclk clock\n");
5930 tegra_host->sclk = NULL;
5932 clk_set_rate(tegra_host->sclk, SDMMC_AHB_MAX_FREQ);
5934 pltfm_host->priv = tegra_host;
5935 tegra_host->clk_enabled = true;
5936 host->is_clk_on = true;
5937 mutex_init(&tegra_host->set_clock_mutex);
5939 tegra_host->max_clk_limit = plat->max_clk_limit;
5940 tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
5942 sdhci_tegra_init_pinctrl_info(&pdev->dev, tegra_host, plat);
5944 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
5945 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
5946 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
5947 } else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
5948 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
5949 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5950 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V2_MASK) {
5951 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V2;
5952 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5953 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V3_MASK) {
5954 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V3;
5955 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5958 * Set the minV and maxV to default
5959 * voltage range of 2.7V - 3.6V
5961 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
5962 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
5965 if (plat->is_sd_device &&
5966 (tegra_get_chipid() == TEGRA_CHIPID_TEGRA21) &&
5967 (tegra_chip_get_revision() == TEGRA_REVISION_A01)) {
5968 opt_subrevision = tegra_get_fuse_opt_subrevision();
5969 if ((opt_subrevision == 0) || (opt_subrevision == 1))
5970 plat->limit_vddio_max_volt = true;
5973 if (plat->limit_vddio_max_volt) {
5974 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
5975 tegra_host->vddio_max_uv = SDHOST_MAX_VOLT_SUPPORT;
5978 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
5980 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
5983 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
5984 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
5985 "Assuming vddio_sdmmc is not required.\n",
5986 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
5987 tegra_host->vdd_io_reg = NULL;
5990 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
5991 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
5992 " Assuming vddio_sd_slot is not required.\n",
5993 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
5994 tegra_host->vdd_slot_reg = NULL;
5997 if ((tegra_host->vdd_slot_reg || tegra_host->vdd_io_reg) &&
5998 (tegra_host->card_present)) {
5999 rc = tegra_sdhci_configure_regulators(tegra_host,
6000 CONFIG_REG_EN, 0, 0);
6002 dev_err(mmc_dev(host->mmc),
6003 "Enable regulators failed in probe %d\n", rc);
6007 if (plat && (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK))
6008 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
6010 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
6011 rc = tegra_sdhci_signal_voltage_switch(host, signal_voltage);
6013 dev_err(mmc_dev(host->mmc),
6014 "Init volt(%duV-%duV) setting failed %d\n",
6015 tegra_host->vddio_min_uv,
6016 tegra_host->vddio_max_uv, rc);
6017 regulator_put(tegra_host->vdd_io_reg);
6018 tegra_host->vdd_io_reg = NULL;
6022 tegra_host->tap_cmd = TAP_CMD_TRIM_DEFAULT_VOLTAGE;
6023 tegra_host->speedo = tegra_soc_speedo_0_value();
6024 dev_info(mmc_dev(host->mmc), "Speedo value %d\n", tegra_host->speedo);
6026 /* update t2t and tap_hole for automotive speedo */
6027 if (tegra_is_soc_automotive_speedo() &&
6028 (soc_data == &soc_data_tegra12)) {
6029 soc_data_tegra12.t2t_coeffs = t12x_automotive_tuning_coeffs;
6030 soc_data_tegra12.t2t_coeffs_count =
6031 ARRAY_SIZE(t12x_automotive_tuning_coeffs);
6032 soc_data_tegra12.tap_hole_coeffs =
6033 t12x_automotive_tap_hole_coeffs;
6034 soc_data_tegra12.tap_hole_coeffs_count =
6035 ARRAY_SIZE(t12x_automotive_tap_hole_coeffs);
6036 /* For automotive SDR50 mode POR frequency is 99Mhz */
6037 soc_data_tegra12.tuning_freq_list[0] = 99000000;
6038 soc_data_tegra12.nvquirks |=
6039 NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS;
6040 soc_data_tegra12.tap_hole_margins =
6041 t12x_automotive_tap_hole_margins;
6042 soc_data_tegra12.tap_hole_margins_count =
6043 ARRAY_SIZE(t12x_automotive_tap_hole_margins);
6044 /* feedback clock need to be enabled for non-tuning timing */
6045 if (plat->enb_ext_loopback)
6046 plat->enb_feedback_clock = true;
6048 host->mmc->pm_caps |= plat->pm_caps;
6049 host->mmc->pm_flags |= plat->pm_flags;
6050 host->mmc->caps |= MMC_CAP_ERASE;
6051 /* enable 1/8V DDR capable */
6052 host->mmc->caps |= MMC_CAP_1_8V_DDR;
6054 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
6055 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
6056 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
6057 if (plat->mmc_data.built_in) {
6058 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
6060 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
6062 if (plat->cd_wakeup_incapable)
6063 host->mmc->pm_flags &= ~MMC_PM_IGNORE_PM_NOTIFY;
6065 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
6067 /* disable access to boot partitions */
6068 host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
6070 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS200)
6071 host->mmc->caps2 |= MMC_CAP2_HS200;
6073 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS400)
6074 host->mmc->caps2 |= MMC_CAP2_HS400;
6076 if ((plat->enable_hs533_mode) && (host->mmc->caps2 & MMC_CAP2_HS400))
6077 host->mmc->caps2 |= MMC_CAP2_HS533;
6079 if (soc_data->nvquirks & NVQUIRK_ENABLE_AUTO_CMD23)
6080 host->mmc->caps |= MMC_CAP_CMD23;
6082 if ((host->mmc->caps2 & MMC_CAP2_HS400) && (plat->en_strobe))
6083 host->mmc->caps2 |= MMC_CAP2_EN_STROBE;
6085 if (plat->enable_cq)
6086 host->mmc->caps2 |= MMC_CAP2_CQ;
6088 host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
6089 host->mmc->caps2 |= MMC_CAP2_PACKED_CMD;
6092 * Enable dyamic frequency scaling support only if the platform clock
6093 * limit is higher than the lowest supported frequency by tuning.
6095 for (i = 0; i < TUNING_FREQ_COUNT; i++) {
6096 low_freq = soc_data->tuning_freq_list[i];
6100 if (plat->en_freq_scaling && (plat->max_clk_limit > low_freq))
6101 host->mmc->caps2 |= MMC_CAP2_FREQ_SCALING;
6103 if (plat->en_periodic_calib)
6104 host->quirks2 |= SDHCI_QUIRK2_PERIODIC_CALIBRATION;
6106 if (plat->pwr_off_during_lp0)
6107 host->mmc->caps2 |= MMC_CAP2_NO_SLEEP_CMD;
6109 if (IS_RTPM_DELAY_CG(plat->rtpm_type) && (!plat->disable_clock_gate))
6110 host->mmc->caps2 |= MMC_CAP2_CLOCK_GATING;
6111 tegra_host->nominal_vcore_mv =
6112 tegra_dvfs_get_core_nominal_millivolts();
6113 tegra_host->min_vcore_override_mv =
6114 tegra_dvfs_get_core_override_floor();
6115 tegra_host->boot_vcore_mv = tegra_dvfs_get_core_boot_level();
6116 dev_info(mmc_dev(host->mmc),
6117 "Tuning constraints: nom_mv %d, boot_mv %d, min_or_mv %d\n",
6118 tegra_host->nominal_vcore_mv, tegra_host->boot_vcore_mv,
6119 tegra_host->min_vcore_override_mv);
6122 * If nominal voltage is equal to boot voltage, there is no need for
6123 * nominal voltage tuning.
6125 if (tegra_host->nominal_vcore_mv <= tegra_host->boot_vcore_mv)
6126 plat->en_nominal_vcore_tuning = false;
6128 if (IS_RTPM_DELAY_CG(plat->rtpm_type))
6129 INIT_DELAYED_WORK(&host->delayed_clk_gate_wrk,
6130 delayed_clk_gate_cb);
6131 rc = sdhci_add_host(host);
6135 if (gpio_is_valid(plat->cd_gpio)) {
6136 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
6138 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
6139 mmc_hostname(host->mmc), host);
6141 dev_err(mmc_dev(host->mmc), "request irq error\n");
6142 goto err_cd_irq_req;
6145 sdhci_tegra_error_stats_debugfs(host);
6146 sdhci_tegra_misc_debugfs(host);
6147 device_create_file(&pdev->dev, &dev_attr_cmd_state);
6149 if (plat->power_off_rail) {
6150 tegra_host->reboot_notify.notifier_call =
6151 tegra_sdhci_reboot_notify;
6152 register_reboot_notifier(&tegra_host->reboot_notify);
6154 #ifdef CONFIG_DEBUG_FS
6155 tegra_host->dbg_cfg.tap_val =
6157 tegra_host->dbg_cfg.trim_val =
6158 plat->ddr_trim_delay;
6159 tegra_host->dbg_cfg.clk_ungated =
6160 plat->disable_clock_gate;
6162 mmc_of_parse(host->mmc);
6166 if (gpio_is_valid(plat->cd_gpio))
6167 gpio_free(plat->cd_gpio);
6169 if (tegra_host->is_ddr_clk_set)
6170 clk_disable_unprepare(tegra_host->ddr_clk);
6172 clk_disable_unprepare(tegra_host->sdr_clk);
6175 if (tegra_host->ddr_clk)
6176 clk_put(tegra_host->ddr_clk);
6177 if (tegra_host->sdr_clk)
6178 clk_put(tegra_host->sdr_clk);
6180 if (gpio_is_valid(plat->wp_gpio))
6181 gpio_free(plat->wp_gpio);
6183 if (gpio_is_valid(plat->cd_gpio))
6184 free_irq(gpio_to_irq(plat->cd_gpio), host);
6186 if (gpio_is_valid(plat->power_gpio))
6187 gpio_free(plat->power_gpio);
6190 sdhci_pltfm_free(pdev);
6194 static int sdhci_tegra_remove(struct platform_device *pdev)
6196 struct sdhci_host *host = platform_get_drvdata(pdev);
6197 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
6198 struct sdhci_tegra *tegra_host = pltfm_host->priv;
6199 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
6200 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
6203 sdhci_remove_host(host, dead);
6205 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_DIS, 0, 0);
6207 dev_err(mmc_dev(host->mmc),
6208 "Regulator disable in remove failed %d\n", rc);
6210 if (tegra_host->vdd_slot_reg)
6211 regulator_put(tegra_host->vdd_slot_reg);
6212 if (tegra_host->vdd_io_reg)
6213 regulator_put(tegra_host->vdd_io_reg);
6215 if (gpio_is_valid(plat->wp_gpio))
6216 gpio_free(plat->wp_gpio);
6218 if (gpio_is_valid(plat->cd_gpio)) {
6219 free_irq(gpio_to_irq(plat->cd_gpio), host);
6220 gpio_free(plat->cd_gpio);
6223 if (gpio_is_valid(plat->power_gpio))
6224 gpio_free(plat->power_gpio);
6226 if (tegra_host->clk_enabled) {
6227 if (tegra_host->is_ddr_clk_set)
6228 clk_disable_unprepare(tegra_host->ddr_clk);
6230 clk_disable_unprepare(tegra_host->sdr_clk);
6233 if (tegra_host->ddr_clk)
6234 clk_put(tegra_host->ddr_clk);
6235 if (tegra_host->sdr_clk)
6236 clk_put(tegra_host->sdr_clk);
6238 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on)
6239 clk_disable_unprepare(tegra_host->emc_clk);
6240 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on)
6241 clk_disable_unprepare(tegra_host->sclk);
6242 if (plat->power_off_rail)
6243 unregister_reboot_notifier(&tegra_host->reboot_notify);
6245 sdhci_pltfm_free(pdev);
6250 static void sdhci_tegra_shutdown(struct platform_device *pdev)
6252 #ifdef CONFIG_MMC_RTPM
6253 struct sdhci_host *host = platform_get_drvdata(pdev);
6254 dev_dbg(&pdev->dev, " %s shutting down\n",
6255 mmc_hostname(host->mmc));
6256 /* applies to delayed clock gate RTPM and MMC RTPM cases */
6257 sdhci_runtime_forbid(host);
6261 static struct platform_driver sdhci_tegra_driver = {
6263 .name = "sdhci-tegra",
6264 .owner = THIS_MODULE,
6265 .of_match_table = sdhci_tegra_dt_match,
6266 .pm = SDHCI_PLTFM_PMOPS,
6268 .probe = sdhci_tegra_probe,
6269 .remove = sdhci_tegra_remove,
6270 .shutdown = sdhci_tegra_shutdown,
6273 module_platform_driver(sdhci_tegra_driver);
6275 MODULE_DESCRIPTION("SDHCI driver for Tegra");
6276 MODULE_AUTHOR("Google, Inc.");
6277 MODULE_LICENSE("GPL v2");