2 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
24 #include <linux/of_device.h>
25 #include <linux/of_gpio.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/module.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/delay.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/tegra_pm_domains.h>
36 #include <linux/dma-mapping.h>
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <linux/reboot.h>
44 #include <linux/devfreq.h>
45 #include <linux/clk/tegra.h>
46 #include <linux/tegra-soc.h>
48 #include <linux/platform_data/mmc-sdhci-tegra.h>
49 #include <mach/pinmux.h>
51 #include "sdhci-pltfm.h"
54 #define SDHCI_TEGRA_DBG(stuff...) pr_info(stuff)
56 #define SDHCI_TEGRA_DBG(stuff...) do {} while (0)
59 #define SDHCI_VNDR_CLK_CTRL 0x100
60 #define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
61 #define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
62 #define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
63 #define SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK 0x2
64 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
65 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
66 #define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
67 #define SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK 0x2
68 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK 0xFF
69 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK 0x1F
71 #define SDHCI_VNDR_MISC_CTRL 0x120
72 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
73 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
74 #define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
75 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
76 #define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
77 #define SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK 0x180
78 #define SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT 17
80 #define SDHCI_VNDR_PRESET_VAL0_0 0x1d4
81 #define SDCLK_FREQ_SEL_HS_SHIFT 20
82 #define SDCLK_FREQ_SEL_DEFAULT_SHIFT 10
84 #define SDHCI_VNDR_PRESET_VAL1_0 0x1d8
85 #define SDCLK_FREQ_SEL_SDR50_SHIFT 20
86 #define SDCLK_FREQ_SEL_SDR25_SHIFT 10
88 #define SDHCI_VNDR_PRESET_VAL2_0 0x1dc
89 #define SDCLK_FREQ_SEL_DDR50_SHIFT 10
91 #define SDMMC_SDMEMCOMPPADCTRL 0x1E0
92 #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
93 #define SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK 0x80000000
95 #define SDMMC_AUTO_CAL_CONFIG 0x1E4
96 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
97 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
98 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
100 #define SDMMC_AUTO_CAL_STATUS 0x1EC
101 #define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
102 #define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
103 #define PULLUP_ADJUSTMENT_OFFSET 20
105 #define SDMMC_VENDOR_ERR_INTR_STATUS_0 0x108
107 #define SDMMC_IO_SPARE_0 0x1F0
108 #define SPARE_OUT_3_OFFSET 19
110 #define SDMMC_VENDOR_IO_TRIM_CNTRL_0 0x1AC
111 #define SDMMC_VENDOR_IO_TRIM_CNTRL_0_SEL_VREG_MASK 0x4
113 /* Erratum: Version register is invalid in HW */
114 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
115 /* Erratum: Enable block gap interrupt detection */
116 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
117 /* Do not enable auto calibration if the platform doesn't support */
118 #define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
119 /* Set Calibration Offsets */
120 #define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
121 /* Set Drive Strengths */
122 #define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
123 /* Enable PADPIPE CLKEN */
124 #define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
125 /* DISABLE SPI_MODE CLKEN */
126 #define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
128 #define NVQUIRK_SET_TAP_DELAY BIT(7)
130 #define NVQUIRK_SET_TRIM_DELAY BIT(8)
131 /* Enable SDHOST v3.0 support */
132 #define NVQUIRK_ENABLE_SD_3_0 BIT(9)
133 /* Enable SDR50 mode */
134 #define NVQUIRK_ENABLE_SDR50 BIT(10)
135 /* Enable SDR104 mode */
136 #define NVQUIRK_ENABLE_SDR104 BIT(11)
137 /*Enable DDR50 mode */
138 #define NVQUIRK_ENABLE_DDR50 BIT(12)
139 /* Enable Frequency Tuning for SDR50 mode */
140 #define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
141 /* Enable HS200 mode */
142 #define NVQUIRK_ENABLE_HS200 BIT(14)
143 /* Enable Infinite Erase Timeout*/
144 #define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(15)
145 /* No Calibration for sdmmc4 */
146 #define NVQUIRK_DISABLE_SDMMC4_CALIB BIT(16)
147 /* ENAABLE FEEDBACK IO CLOCK */
148 #define NVQUIRK_EN_FEEDBACK_CLK BIT(17)
149 /* Disable AUTO CMD23 */
150 #define NVQUIRK_DISABLE_AUTO_CMD23 BIT(18)
151 /* Shadow write xfer mode reg and write it alongwith CMD register */
152 #define NVQUIRK_SHADOW_XFER_MODE_REG BIT(19)
153 /* update PAD_E_INPUT_OR_E_PWRD bit */
154 #define NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD BIT(20)
155 /* Shadow write xfer mode reg and write it alongwith CMD register */
156 #define NVQUIRK_SET_PIPE_STAGES_MASK_0 BIT(21)
157 #define NVQUIRK_HIGH_FREQ_TAP_PROCEDURE BIT(22)
158 /* Disable SDMMC3 external loopback */
159 #define NVQUIRK_DISABLE_EXTERNAL_LOOPBACK BIT(23)
160 /* Select fix tap hole margins */
161 #define NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS BIT(24)
162 /* Enable HS400 mode */
163 #define NVQUIRK_ENABLE_HS400 BIT(26)
164 /* Enable AUTO CMD23 */
165 #define NVQUIRK_ENABLE_AUTO_CMD23 BIT(27)
166 #define NVQUIRK_SET_SDMEMCOMP_VREF_SEL BIT(28)
167 /* Special PAD control register settings are needed for T210 */
168 #define NVQUIRK_UPDATE_PAD_CNTRL_REG BIT(29)
169 #define NVQUIRK_UPDATE_PIN_CNTRL_REG BIT(30)
170 /* Use timeout clk for write crc status data timeout counter */
171 #define NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT BIT(31)
173 /* Enable T210 specific SDMMC WAR - sd card voltage switch */
174 #define NVQUIRK2_CONFIG_PWR_DET BIT(0)
175 /* Enable T210 specific SDMMC WAR - Tuning Step Size, Tuning Iterations*/
176 #define NVQUIRK2_UPDATE_HW_TUNING_CONFG BIT(1)
178 /* Common subset of quirks for Tegra3 and later sdmmc controllers */
179 #define TEGRA_SDHCI_NVQUIRKS (NVQUIRK_ENABLE_PADPIPE_CLKEN | \
180 NVQUIRK_DISABLE_SPI_MODE_CLKEN | \
181 NVQUIRK_EN_FEEDBACK_CLK | \
182 NVQUIRK_SET_TAP_DELAY | \
183 NVQUIRK_ENABLE_SDR50_TUNING | \
184 NVQUIRK_ENABLE_SDR50 | \
185 NVQUIRK_ENABLE_SDR104 | \
186 NVQUIRK_SHADOW_XFER_MODE_REG | \
187 NVQUIRK_DISABLE_AUTO_CMD23)
189 #define TEGRA_SDHCI_QUIRKS (SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | \
190 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
191 SDHCI_QUIRK_SINGLE_POWER_WRITE | \
192 SDHCI_QUIRK_NO_HISPD_BIT | \
193 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | \
194 SDHCI_QUIRK_BROKEN_CARD_DETECTION)
196 #define TEGRA_SDHCI_QUIRKS2 (SDHCI_QUIRK2_PRESET_VALUE_BROKEN | \
197 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING | \
198 SDHCI_QUIRK2_NON_STANDARD_TUNING | \
199 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO | \
200 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
202 #define IS_QUIRKS2_DELAYED_CLK_GATE(host) \
203 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
205 /* Interface voltages */
206 #define SDHOST_1V8_OCR_MASK 0x8
207 #define SDHOST_HIGH_VOLT_MIN 2700000
208 #define SDHOST_HIGH_VOLT_MAX 3600000
209 #define SDHOST_HIGH_VOLT_2V8 2800000
210 #define SDHOST_LOW_VOLT_MIN 1800000
211 #define SDHOST_LOW_VOLT_MAX 1800000
212 #define SDHOST_HIGH_VOLT_3V2 3200000
213 #define SDHOST_HIGH_VOLT_3V3 3300000
215 /* Clock related definitions */
216 #define MAX_DIVISOR_VALUE 128
217 #define DEFAULT_SDHOST_FREQ 50000000
218 #define SDMMC_AHB_MAX_FREQ 150000000
219 #define SDMMC_EMC_MAX_FREQ 150000000
220 #define SDMMC_EMC_NOM_VOLT_FREQ 900000000
222 /* Tuning related definitions */
223 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
224 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
225 #define MAX_TAP_VALUES 255
226 #define TUNING_FREQ_COUNT 3
227 #define TUNING_VOLTAGES_COUNT 3
228 #define TUNING_RETRIES 1
229 #define DFS_FREQ_COUNT 2
230 #define NEG_MAR_CHK_WIN_COUNT 2
231 /* Tuning core voltage requirements */
232 #define NOMINAL_VCORE_TUN BIT(0)
233 #define BOOT_VCORE_TUN BIT(1)
234 #define MIN_OVERRIDE_VCORE_TUN BIT(2)
236 /* Tap cmd sysfs commands */
237 #define TAP_CMD_TRIM_DEFAULT_VOLTAGE 1
238 #define TAP_CMD_TRIM_HIGH_VOLTAGE 2
241 * Defined the chip specific quirks and clock sources. For now, the used clock
242 * sources vary only from chip to chip. If the sources allowed varies from
243 * platform to platform, then move the clock sources list to platform data.
244 * When filling the tuning_freq_list in soc_data, the number of entries should
245 * be equal to TUNNG_FREQ_COUNT. Depending on number DFS frequencies supported,
246 * set the desired low, high or max frequencies and set the remaining entries
247 * as 0s. The number of entries should always be equal to TUNING_FREQ_COUNT
248 * inorder to get the right tuning data.
250 struct sdhci_tegra_soc_data {
251 const struct sdhci_pltfm_data *pdata;
253 const char *parent_clk_list[2];
254 unsigned int tuning_freq_list[TUNING_FREQ_COUNT];
256 u8 tap_hole_coeffs_count;
257 u8 tap_hole_margins_count;
258 struct tuning_t2t_coeffs *t2t_coeffs;
259 struct tap_hole_coeffs *tap_hole_coeffs;
260 struct tuning_tap_hole_margins *tap_hole_margins;
264 enum tegra_regulator_config_ops {
270 enum tegra_tuning_freq {
276 struct tuning_t2t_coeffs {
280 unsigned int t2t_vnom_slope;
281 unsigned int t2t_vnom_int;
282 unsigned int t2t_vmax_slope;
283 unsigned int t2t_vmax_int;
284 unsigned int t2t_vmin_slope;
285 unsigned int t2t_vmin_int;
288 #define SET_TUNING_COEFFS(_device_id, _vmax, _vmin, _t2t_vnom_slope, \
289 _t2t_vnom_int, _t2t_vmax_slope, _t2t_vmax_int, _t2t_vmin_slope, \
292 .dev_id = _device_id, \
295 .t2t_vnom_slope = _t2t_vnom_slope, \
296 .t2t_vnom_int = _t2t_vnom_int, \
297 .t2t_vmax_slope = _t2t_vmax_slope, \
298 .t2t_vmax_int = _t2t_vmax_int, \
299 .t2t_vmin_slope = _t2t_vmin_slope, \
300 .t2t_vmin_int = _t2t_vmin_int, \
303 struct tuning_t2t_coeffs t11x_tuning_coeffs[] = {
304 SET_TUNING_COEFFS("sdhci-tegra.3", 1250, 950, 55, 135434,
305 73, 170493, 243, 455948),
306 SET_TUNING_COEFFS("sdhci-tegra.2", 1250, 950, 50, 129738,
307 73, 168898, 241, 453050),
308 SET_TUNING_COEFFS("sdhci-tegra.0", 1250, 950, 62, 143469,
309 82, 180096, 238, 444285),
312 struct tuning_t2t_coeffs t12x_tuning_coeffs[] = {
313 SET_TUNING_COEFFS("sdhci-tegra.3", 1150, 950, 27, 118295,
314 27, 118295, 48, 188148),
315 SET_TUNING_COEFFS("sdhci-tegra.2", 1150, 950, 29, 124427,
316 29, 124427, 54, 203707),
317 SET_TUNING_COEFFS("sdhci-tegra.0", 1150, 950, 25, 115933,
318 25, 115933, 47, 187224),
321 struct tap_hole_coeffs {
323 unsigned int freq_khz;
324 unsigned int thole_vnom_slope;
325 unsigned int thole_vnom_int;
326 unsigned int thole_vmax_slope;
327 unsigned int thole_vmax_int;
328 unsigned int thole_vmin_slope;
329 unsigned int thole_vmin_int;
332 #define SET_TAP_HOLE_COEFFS(_device_id, _freq_khz, _thole_vnom_slope, \
333 _thole_vnom_int, _thole_vmax_slope, _thole_vmax_int, \
334 _thole_vmin_slope, _thole_vmin_int) \
336 .dev_id = _device_id, \
337 .freq_khz = _freq_khz, \
338 .thole_vnom_slope = _thole_vnom_slope, \
339 .thole_vnom_int = _thole_vnom_int, \
340 .thole_vmax_slope = _thole_vmax_slope, \
341 .thole_vmax_int = _thole_vmax_int, \
342 .thole_vmin_slope = _thole_vmin_slope, \
343 .thole_vmin_int = _thole_vmin_int, \
346 struct tap_hole_coeffs t11x_tap_hole_coeffs[] = {
347 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 765, 102357, 507,
349 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 156000, 1042, 142044, 776,
351 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1215, 167702, 905,
353 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 1925, 284516, 1528,
354 253188, 366, 120001),
355 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 472, 53312, 318,
357 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 156000, 765, 95512, 526,
359 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 949, 121887, 656,
361 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 1901, 259035, 1334,
362 215539, 326, 100986),
363 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 411, 54495, 305,
365 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 156000, 715, 97623, 516,
367 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 905, 124579, 648,
369 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 1893, 264746, 1333,
370 221722, 354, 109880),
373 struct tap_hole_coeffs t12x_tap_hole_coeffs[] = {
374 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 1037, 106934, 1037,
376 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1703, 186307, 1703,
377 186307, 890, 130617),
378 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 100000, 2452, 275601, 2452,
379 275601, 1264, 193957),
380 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 3090, 351666, 3090,
381 351666, 1583, 247913),
382 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 468, 36031, 468,
384 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 200000, 468, 36031, 468,
386 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 1146, 117841, 1146,
388 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 100000, 1879, 206195, 1879,
389 206195, 953, 141341),
390 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 2504, 281460, 2504,
391 281460, 1262, 194452),
392 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 874, 85243, 874,
394 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 1554, 167210, 1554,
395 167210, 793, 115672),
396 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 100000, 2290, 255734, 2290,
397 255734, 1164, 178691),
398 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 2916, 331143, 2916,
399 331143, 1480, 232373),
402 struct tuning_tap_hole_margins {
404 unsigned int tap_hole_margin;
407 #define SET_TUNING_TAP_HOLE_MARGIN(_device_id, _tap_hole_margin) \
409 .dev_id = _device_id, \
410 .tap_hole_margin = _tap_hole_margin, \
413 struct tuning_tap_hole_margins t12x_automotive_tap_hole_margins[] = {
414 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.3", 13),
415 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.2", 7),
416 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.0", 10),
419 struct freq_tuning_constraints {
420 unsigned int vcore_mask;
423 static struct freq_tuning_constraints tuning_vcore_constraints[3] = {
425 .vcore_mask = BOOT_VCORE_TUN,
428 .vcore_mask = BOOT_VCORE_TUN,
431 .vcore_mask = BOOT_VCORE_TUN,
440 enum tap_win_edge_attr {
446 struct tap_window_data {
449 enum tap_win_edge_attr win_start_attr;
450 enum tap_win_edge_attr win_end_attr;
455 struct tuning_values {
463 struct tegra_tuning_data {
464 unsigned int freq_hz;
466 int nom_best_tap_value;
467 struct freq_tuning_constraints constraints;
468 struct tap_hole_coeffs *thole_coeffs;
469 struct tuning_t2t_coeffs *t2t_coeffs;
470 struct tuning_values est_values;
471 struct tuning_values calc_values;
472 struct tap_window_data *tap_data;
473 struct tap_window_data *final_tap_data;
474 u8 num_of_valid_tap_wins;
478 bool is_partial_win_valid;
481 #ifdef CONFIG_MMC_FREQ_SCALING
482 struct freq_gov_params {
484 u8 polling_interval_ms;
485 u8 active_load_threshold;
488 static struct freq_gov_params gov_params[3] = {
490 .idle_mon_cycles = 3,
491 .polling_interval_ms = 50,
492 .active_load_threshold = 25,
495 .idle_mon_cycles = 3,
496 .polling_interval_ms = 50,
497 .active_load_threshold = 25,
500 .idle_mon_cycles = 3,
501 .polling_interval_ms = 50,
502 .active_load_threshold = 25,
507 struct tegra_freq_gov_data {
508 unsigned int curr_active_load;
509 unsigned int avg_active_load;
510 unsigned int act_load_high_threshold;
511 unsigned int max_idle_monitor_cycles;
512 unsigned int curr_freq;
513 unsigned int freqs[DFS_FREQ_COUNT];
514 unsigned int freq_switch_count;
515 bool monitor_idle_load;
518 struct sdhci_tegra_sd_stats {
519 unsigned int data_crc_count;
520 unsigned int cmd_crc_count;
521 unsigned int data_to_count;
522 unsigned int cmd_to_count;
525 #ifdef CONFIG_DEBUG_FS
526 struct dbg_cfg_data {
527 unsigned int tap_val;
528 unsigned int trim_val;
533 const struct tegra_sdhci_platform_data *plat;
534 const struct sdhci_tegra_soc_data *soc_data;
536 /* ensure atomic set clock calls */
537 struct mutex set_clock_mutex;
538 struct regulator *vdd_io_reg;
539 struct regulator *vdd_slot_reg;
540 struct regulator *vcore_reg;
541 /* Host controller instance */
542 unsigned int instance;
544 unsigned int vddio_min_uv;
546 unsigned int vddio_max_uv;
547 /* DDR and low speed modes clock */
549 /* HS200, SDR104 modes clock */
551 /* Check if ddr_clk is being used */
553 /* max clk supported by the platform */
554 unsigned int max_clk_limit;
555 /* max ddr clk supported by the platform */
556 unsigned int ddr_clk_limit;
558 bool is_rail_enabled;
560 bool is_sdmmc_emc_clk_on;
562 bool is_sdmmc_sclk_on;
563 struct sdhci_tegra_sd_stats *sd_stat_head;
564 struct notifier_block reboot_notify;
566 bool set_1v8_calib_offsets;
567 int nominal_vcore_mv;
568 int min_vcore_override_mv;
570 /* Tuning related structures and variables */
571 /* Tuning opcode to be used */
572 unsigned int tuning_opcode;
573 /* Tuning packet size */
574 unsigned int tuning_bsize;
575 /* Num of tuning freqs selected */
576 int tuning_freq_count;
577 unsigned int tap_cmd;
579 unsigned int tuning_status;
581 #define TUNING_STATUS_DONE 1
582 #define TUNING_STATUS_RETUNE 2
583 /* Freq tuning information for each sampling clock freq */
584 struct tegra_tuning_data tuning_data[DFS_FREQ_COUNT];
585 struct tegra_freq_gov_data *gov_data;
587 #ifdef CONFIG_DEBUG_FS
588 /* Override debug config data */
589 struct dbg_cfg_data dbg_cfg;
593 static struct clk *pll_c;
594 static struct clk *pll_p;
595 static unsigned long pll_c_rate;
596 static unsigned long pll_p_rate;
597 static bool vcore_overrides_allowed;
598 static bool maintain_boot_voltage;
599 static unsigned int boot_volt_req_refcount;
600 DEFINE_MUTEX(tuning_mutex);
602 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
603 struct sdhci_host *sdhci, unsigned int clock);
604 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
605 unsigned long desired_rate);
606 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
607 unsigned int tap_delay);
608 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
609 u8 option, int min_uV, int max_uV);
610 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
611 unsigned int trim_delay);
612 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
613 unsigned char signal_voltage);
614 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
615 int t2t_tuning_value);
617 static int show_error_stats_dump(struct seq_file *s, void *data)
619 struct sdhci_host *host = s->private;
620 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
621 struct sdhci_tegra *tegra_host = pltfm_host->priv;
622 struct sdhci_tegra_sd_stats *head;
624 seq_printf(s, "ErrorStatistics:\n");
625 seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
626 head = tegra_host->sd_stat_head;
628 seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
629 head->cmd_crc_count, head->data_to_count,
634 static int show_dfs_stats_dump(struct seq_file *s, void *data)
636 struct sdhci_host *host = s->private;
637 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
638 struct sdhci_tegra *tegra_host = pltfm_host->priv;
639 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
641 seq_printf(s, "DFS statistics:\n");
643 if (host->mmc->dev_stats != NULL)
644 seq_printf(s, "Polling_period: %d\n",
645 host->mmc->dev_stats->polling_interval);
647 if (gov_data != NULL) {
648 seq_printf(s, "cur_active_load: %d\n",
649 gov_data->curr_active_load);
650 seq_printf(s, "avg_active_load: %d\n",
651 gov_data->avg_active_load);
652 seq_printf(s, "act_load_high_threshold: %d\n",
653 gov_data->act_load_high_threshold);
654 seq_printf(s, "freq_switch_count: %d\n",
655 gov_data->freq_switch_count);
660 static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
662 return single_open(file, show_error_stats_dump, inode->i_private);
665 static int sdhci_dfs_stats_dump(struct inode *inode, struct file *file)
667 return single_open(file, show_dfs_stats_dump, inode->i_private);
671 static const struct file_operations sdhci_host_fops = {
672 .open = sdhci_error_stats_dump,
675 .release = single_release,
678 static const struct file_operations sdhci_host_dfs_fops = {
679 .open = sdhci_dfs_stats_dump,
682 .release = single_release,
685 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
689 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
690 /* Use wp_gpio here instead? */
691 val = readl(host->ioaddr + reg);
692 return val | SDHCI_WRITE_PROTECT;
694 return readl(host->ioaddr + reg);
697 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
700 struct sdhci_tegra *tegra_host = pltfm_host->priv;
701 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
703 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
704 (reg == SDHCI_HOST_VERSION))) {
705 return SDHCI_SPEC_200;
707 return readw(host->ioaddr + reg);
710 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
712 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
713 struct sdhci_tegra *tegra_host = pltfm_host->priv;
714 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
716 /* Seems like we're getting spurious timeout and crc errors, so
717 * disable signalling of them. In case of real errors software
718 * timers should take care of eventually detecting them.
720 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
721 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
723 writel(val, host->ioaddr + reg);
725 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
726 (reg == SDHCI_INT_ENABLE))) {
727 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
728 if (val & SDHCI_INT_CARD_INT)
732 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
736 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
738 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
739 struct sdhci_tegra *tegra_host = pltfm_host->priv;
740 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
742 if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
744 case SDHCI_TRANSFER_MODE:
746 * Postpone this write, we must do it together with a
747 * command write that is down below.
749 pltfm_host->xfer_mode_shadow = val;
752 writel((val << 16) | pltfm_host->xfer_mode_shadow,
753 host->ioaddr + SDHCI_TRANSFER_MODE);
754 pltfm_host->xfer_mode_shadow = 0;
759 writew(val, host->ioaddr + reg);
762 #ifdef CONFIG_MMC_FREQ_SCALING
764 static bool disable_scaling __read_mostly;
765 module_param(disable_scaling, bool, 0644);
768 * Dynamic frequency calculation.
769 * The active load for the current period and the average active load
770 * are calculated at the end of each polling interval.
772 * If the current active load is greater than the threshold load, then the
773 * frequency is boosted(156MHz).
774 * If the active load is lower than the threshold, then the load is monitored
775 * for a max of three cycles before reducing the frequency(82MHz). If the
776 * average active load is lower, then the monitoring cycles is reduced.
778 * The active load threshold value for both eMMC and SDIO is set to 25 which
779 * is found to give the optimal power and performance. The polling interval is
782 * The polling interval and active load threshold values can be changed by
783 * the user through sysfs.
785 static unsigned long calculate_mmc_target_freq(
786 struct tegra_freq_gov_data *gov_data)
788 unsigned long desired_freq = gov_data->curr_freq;
789 unsigned int type = MMC_TYPE_MMC;
791 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
792 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
793 gov_data->monitor_idle_load = false;
794 gov_data->max_idle_monitor_cycles =
795 gov_params[type].idle_mon_cycles;
797 if (gov_data->monitor_idle_load) {
798 if (!gov_data->max_idle_monitor_cycles) {
799 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
800 gov_data->max_idle_monitor_cycles =
801 gov_params[type].idle_mon_cycles;
803 gov_data->max_idle_monitor_cycles--;
806 gov_data->monitor_idle_load = true;
807 gov_data->max_idle_monitor_cycles *=
808 gov_data->avg_active_load;
809 gov_data->max_idle_monitor_cycles /= 100;
816 static unsigned long calculate_sdio_target_freq(
817 struct tegra_freq_gov_data *gov_data)
819 unsigned long desired_freq = gov_data->curr_freq;
820 unsigned int type = MMC_TYPE_SDIO;
822 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
823 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
824 gov_data->monitor_idle_load = false;
825 gov_data->max_idle_monitor_cycles =
826 gov_params[type].idle_mon_cycles;
828 if (gov_data->monitor_idle_load) {
829 if (!gov_data->max_idle_monitor_cycles) {
830 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
831 gov_data->max_idle_monitor_cycles =
832 gov_params[type].idle_mon_cycles;
834 gov_data->max_idle_monitor_cycles--;
837 gov_data->monitor_idle_load = true;
838 gov_data->max_idle_monitor_cycles *=
839 gov_data->avg_active_load;
840 gov_data->max_idle_monitor_cycles /= 100;
847 static unsigned long calculate_sd_target_freq(
848 struct tegra_freq_gov_data *gov_data)
850 unsigned long desired_freq = gov_data->curr_freq;
851 unsigned int type = MMC_TYPE_SD;
853 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
854 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
855 gov_data->monitor_idle_load = false;
856 gov_data->max_idle_monitor_cycles =
857 gov_params[type].idle_mon_cycles;
859 if (gov_data->monitor_idle_load) {
860 if (!gov_data->max_idle_monitor_cycles) {
861 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
862 gov_data->max_idle_monitor_cycles =
863 gov_params[type].idle_mon_cycles;
865 gov_data->max_idle_monitor_cycles--;
868 gov_data->monitor_idle_load = true;
869 gov_data->max_idle_monitor_cycles *=
870 gov_data->avg_active_load;
871 gov_data->max_idle_monitor_cycles /= 100;
878 static unsigned long sdhci_tegra_get_target_freq(struct sdhci_host *sdhci,
879 struct devfreq_dev_status *dfs_stats)
881 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
882 struct sdhci_tegra *tegra_host = pltfm_host->priv;
883 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
884 unsigned long freq = sdhci->mmc->actual_clock;
887 dev_err(mmc_dev(sdhci->mmc),
888 "No gov data. Continue using current freq %ld", freq);
896 * If clock gating is enabled and clock is currently disabled, then
899 if (!tegra_host->clk_enabled)
902 if (dfs_stats->total_time) {
903 gov_data->curr_active_load = (dfs_stats->busy_time * 100) /
904 dfs_stats->total_time;
906 gov_data->curr_active_load = 0;
909 gov_data->avg_active_load += gov_data->curr_active_load;
910 gov_data->avg_active_load >>= 1;
912 if (sdhci->mmc->card) {
913 if (sdhci->mmc->card->type == MMC_TYPE_SDIO)
914 freq = calculate_sdio_target_freq(gov_data);
915 else if (sdhci->mmc->card->type == MMC_TYPE_MMC)
916 freq = calculate_mmc_target_freq(gov_data);
917 else if (sdhci->mmc->card->type == MMC_TYPE_SD)
918 freq = calculate_sd_target_freq(gov_data);
919 if (gov_data->curr_freq != freq)
920 gov_data->freq_switch_count++;
921 gov_data->curr_freq = freq;
927 static int sdhci_tegra_freq_gov_init(struct sdhci_host *sdhci)
929 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
930 struct sdhci_tegra *tegra_host = pltfm_host->priv;
935 if (!((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
936 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS200))) {
937 dev_info(mmc_dev(sdhci->mmc),
938 "DFS not required for current operating mode\n");
942 if (!tegra_host->gov_data) {
943 tegra_host->gov_data = devm_kzalloc(mmc_dev(sdhci->mmc),
944 sizeof(struct tegra_freq_gov_data), GFP_KERNEL);
945 if (!tegra_host->gov_data) {
946 dev_err(mmc_dev(sdhci->mmc),
947 "Failed to allocate memory for dfs data\n");
952 /* Find the supported frequencies */
953 dev_info(mmc_dev(sdhci->mmc), "DFS supported freqs");
954 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
955 freq = tegra_host->tuning_data[i].freq_hz;
957 * Check the nearest possible clock with pll_c and pll_p as
958 * the clock sources. Choose the higher frequency.
960 tegra_host->gov_data->freqs[i] =
961 get_nearest_clock_freq(pll_c_rate, freq);
962 freq = get_nearest_clock_freq(pll_p_rate, freq);
963 if (freq > tegra_host->gov_data->freqs[i])
964 tegra_host->gov_data->freqs[i] = freq;
965 pr_err("%d,", tegra_host->gov_data->freqs[i]);
968 tegra_host->gov_data->monitor_idle_load = false;
969 tegra_host->gov_data->curr_freq = sdhci->mmc->actual_clock;
970 if (sdhci->mmc->card) {
971 type = sdhci->mmc->card->type;
972 sdhci->mmc->dev_stats->polling_interval =
973 gov_params[type].polling_interval_ms;
974 tegra_host->gov_data->act_load_high_threshold =
975 gov_params[type].active_load_threshold;
976 tegra_host->gov_data->max_idle_monitor_cycles =
977 gov_params[type].idle_mon_cycles;
985 static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
987 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
988 struct sdhci_tegra *tegra_host = pltfm_host->priv;
990 return tegra_host->card_present;
993 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
995 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
996 struct sdhci_tegra *tegra_host = pltfm_host->priv;
997 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
999 if (!gpio_is_valid(plat->wp_gpio))
1002 return gpio_get_value_cansleep(plat->wp_gpio);
1005 static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1009 u32 vndr_ctrl, trim_delay, best_tap_value;
1010 struct tegra_tuning_data *tuning_data;
1011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1012 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1013 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1015 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1017 /* Select Bus Speed Mode for host
1018 * For HS200 we need to set UHS_MODE_SEL to SDR104.
1019 * It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
1020 * SDR50 mode timing seems to have issues. Programming SDR104
1021 * mode for SDR50 mode for reliable transfers over interface.
1023 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1025 case MMC_TIMING_UHS_SDR12:
1026 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1028 case MMC_TIMING_UHS_SDR25:
1029 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1031 case MMC_TIMING_UHS_SDR50:
1032 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1034 case MMC_TIMING_UHS_SDR104:
1035 case MMC_TIMING_MMC_HS200:
1036 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1038 case MMC_TIMING_UHS_DDR50:
1039 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1043 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1045 if (uhs == MMC_TIMING_UHS_DDR50) {
1046 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1047 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
1048 clk |= 1 << SDHCI_DIVIDER_SHIFT;
1049 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1051 /* Set the ddr mode trim delay if required */
1052 if (plat->ddr_trim_delay != -1) {
1053 trim_delay = plat->ddr_trim_delay;
1054 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1055 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1056 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1057 vndr_ctrl |= (trim_delay <<
1058 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1059 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1062 /* Set the best tap value based on timing */
1063 if (((uhs == MMC_TIMING_MMC_HS200) ||
1064 (uhs == MMC_TIMING_UHS_SDR104) ||
1065 (uhs == MMC_TIMING_UHS_SDR50)) &&
1066 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1067 tuning_data = sdhci_tegra_get_tuning_data(host,
1068 host->mmc->ios.clock);
1069 best_tap_value = (tegra_host->tap_cmd ==
1070 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1071 tuning_data->nom_best_tap_value :
1072 tuning_data->best_tap_value;
1074 best_tap_value = tegra_host->plat->tap_delay;
1076 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1077 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1078 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1079 vndr_ctrl |= (best_tap_value <<
1080 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1081 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1085 static void sdhci_status_notify_cb(int card_present, void *dev_id)
1087 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
1088 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1089 struct tegra_sdhci_platform_data *plat;
1090 unsigned int status, oldstat;
1092 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
1095 plat = pdev->dev.platform_data;
1096 if (!plat->mmc_data.status) {
1097 if (card_present == 1) {
1098 sdhci->mmc->rescan_disable = 0;
1099 mmc_detect_change(sdhci->mmc, 0);
1100 } else if (card_present == 0) {
1101 sdhci->mmc->detect_change = 0;
1102 sdhci->mmc->rescan_disable = 1;
1107 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
1109 oldstat = plat->mmc_data.card_present;
1110 plat->mmc_data.card_present = status;
1111 if (status ^ oldstat) {
1112 pr_debug("%s: Slot status change detected (%d -> %d)\n",
1113 mmc_hostname(sdhci->mmc), oldstat, status);
1114 if (status && !plat->mmc_data.built_in)
1115 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
1117 mmc_detect_change(sdhci->mmc, 0);
1121 static irqreturn_t carddetect_irq(int irq, void *data)
1123 struct sdhci_host *sdhost = (struct sdhci_host *)data;
1124 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
1125 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1126 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
1127 struct tegra_sdhci_platform_data *plat;
1130 plat = pdev->dev.platform_data;
1132 tegra_host->card_present =
1133 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
1135 if (tegra_host->card_present) {
1136 err = tegra_sdhci_configure_regulators(tegra_host,
1137 CONFIG_REG_EN, 0, 0);
1139 dev_err(mmc_dev(sdhost->mmc),
1140 "Failed to enable card regulators %d\n", err);
1142 err = tegra_sdhci_configure_regulators(tegra_host,
1143 CONFIG_REG_DIS, 0 , 0);
1145 dev_err(mmc_dev(sdhost->mmc),
1146 "Failed to disable card regulators %d\n", err);
1148 * Set retune request as tuning should be done next time
1149 * a card is inserted.
1151 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
1152 tegra_host->force_retune = true;
1155 tasklet_schedule(&sdhost->card_tasklet);
1159 static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
1163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1164 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1165 struct tegra_tuning_data *tuning_data;
1166 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1167 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1168 unsigned int best_tap_value;
1170 if (!(mask & SDHCI_RESET_ALL))
1173 if (tegra_host->sd_stat_head != NULL) {
1174 tegra_host->sd_stat_head->data_crc_count = 0;
1175 tegra_host->sd_stat_head->cmd_crc_count = 0;
1176 tegra_host->sd_stat_head->data_to_count = 0;
1177 tegra_host->sd_stat_head->cmd_to_count = 0;
1180 if (tegra_host->gov_data != NULL)
1181 tegra_host->gov_data->freq_switch_count = 0;
1183 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1184 if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
1186 SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
1188 if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
1190 ~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
1192 if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
1194 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1196 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK;
1199 if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
1200 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1201 && (host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
1202 tuning_data = sdhci_tegra_get_tuning_data(host,
1203 host->mmc->ios.clock);
1204 best_tap_value = (tegra_host->tap_cmd ==
1205 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1206 tuning_data->nom_best_tap_value :
1207 tuning_data->best_tap_value;
1209 best_tap_value = tegra_host->plat->tap_delay;
1211 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1212 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1213 vendor_ctrl |= (best_tap_value <<
1214 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1217 if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
1218 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1219 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1220 vendor_ctrl |= (plat->trim_delay <<
1221 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1223 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
1224 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
1225 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1227 misc_ctrl = sdhci_readl(host, SDHCI_VNDR_MISC_CTRL);
1228 if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
1229 misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
1230 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
1232 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
1234 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
1236 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
1238 /* Enable DDR mode support only for SDMMC4 */
1239 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
1240 if (tegra_host->instance == 3) {
1242 SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
1245 if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
1247 SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
1249 if (soc_data->nvquirks & NVQUIRK_SET_PIPE_STAGES_MASK_0)
1250 misc_ctrl &= ~SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK;
1252 /* External loopback is valid for sdmmc3 only */
1253 if ((soc_data->nvquirks & NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
1254 (tegra_host->instance == 2)) {
1255 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1256 && (host->mmc->pm_flags &
1257 MMC_PM_KEEP_POWER)) {
1259 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1262 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1265 sdhci_writel(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
1267 if (soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23)
1268 host->flags &= ~SDHCI_AUTO_CMD23;
1270 /* Mask the support for any UHS modes if specified */
1271 if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
1272 host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
1274 if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
1275 host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
1277 if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
1278 host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
1280 if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
1281 host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
1283 if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
1284 host->mmc->caps &= ~MMC_CAP_UHS_SDR12;
1286 #ifdef CONFIG_MMC_SDHCI_TEGRA_HS200_DISABLE
1287 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1289 if (plat->uhs_mask & MMC_MASK_HS200)
1290 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1294 static int tegra_sdhci_buswidth(struct sdhci_host *sdhci, int bus_width)
1296 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1297 const struct tegra_sdhci_platform_data *plat;
1300 plat = pdev->dev.platform_data;
1302 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
1303 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
1304 ctrl &= ~SDHCI_CTRL_4BITBUS;
1305 ctrl |= SDHCI_CTRL_8BITBUS;
1307 ctrl &= ~SDHCI_CTRL_8BITBUS;
1308 if (bus_width == MMC_BUS_WIDTH_4)
1309 ctrl |= SDHCI_CTRL_4BITBUS;
1311 ctrl &= ~SDHCI_CTRL_4BITBUS;
1313 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
1318 * Calculation of nearest clock frequency for desired rate:
1319 * Get the divisor value, div = p / d_rate
1320 * 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
1321 * nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
1322 * 2. If not, result = p / div
1323 * As the nearest clk freq should be <= to desired_rate,
1324 * 3. If result > desired_rate then increment the div by 0.5
1325 * and do, (p << 1)/((div << 1) + 1)
1326 * 4. Else return result
1327 * Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
1328 * defined index variable.
1330 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
1331 unsigned long desired_rate)
1333 unsigned long result;
1337 div = pll_rate / desired_rate;
1338 if (div > MAX_DIVISOR_VALUE) {
1339 div = MAX_DIVISOR_VALUE;
1340 result = pll_rate / div;
1342 if ((pll_rate % desired_rate) >= (desired_rate / 2))
1343 result = (pll_rate << 1) / ((div << 1) + index++);
1345 result = pll_rate / div;
1347 if (desired_rate < result) {
1349 * Trying to get lower clock freq than desired clock,
1350 * by increasing the divisor value by 0.5
1352 result = (pll_rate << 1) / ((div << 1) + index);
1359 static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
1360 unsigned long desired_rate)
1362 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1363 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1364 struct clk *parent_clk;
1365 unsigned long pll_c_freq;
1366 unsigned long pll_p_freq;
1369 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
1373 * Currently pll_p and pll_c are used as clock sources for SDMMC. If clk
1374 * rate is missing for either of them, then no selection is needed and
1375 * the default parent is used.
1377 if (!pll_c_rate || !pll_p_rate)
1380 pll_c_freq = get_nearest_clock_freq(pll_c_rate, desired_rate);
1381 pll_p_freq = get_nearest_clock_freq(pll_p_rate, desired_rate);
1384 * For low freq requests, both the desired rates might be higher than
1385 * the requested clock frequency. In such cases, select the parent
1386 * with the lower frequency rate.
1388 if ((pll_c_freq > desired_rate) && (pll_p_freq > desired_rate)) {
1389 if (pll_p_freq <= pll_c_freq) {
1390 desired_rate = pll_p_freq;
1393 desired_rate = pll_c_freq;
1396 rc = clk_set_rate(pltfm_host->clk, desired_rate);
1399 if (pll_c_freq > pll_p_freq) {
1400 if (!tegra_host->is_parent_pllc) {
1402 tegra_host->is_parent_pllc = true;
1403 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1406 } else if (tegra_host->is_parent_pllc) {
1408 tegra_host->is_parent_pllc = false;
1412 rc = clk_set_parent(pltfm_host->clk, parent_clk);
1414 pr_err("%s: failed to set pll parent clock %d\n",
1415 mmc_hostname(host->mmc), rc);
1418 static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
1421 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1422 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1423 unsigned int clk_rate;
1424 #ifdef CONFIG_MMC_FREQ_SCALING
1425 unsigned int tap_value;
1426 struct tegra_tuning_data *tuning_data;
1429 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
1431 * In ddr mode, tegra sdmmc controller clock frequency
1432 * should be double the card clock frequency.
1434 if (tegra_host->ddr_clk_limit)
1435 clk_rate = tegra_host->ddr_clk_limit * 2;
1437 clk_rate = clock * 2;
1442 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50)
1443 clk_rate = tegra_host->soc_data->tuning_freq_list[0];
1445 if (tegra_host->max_clk_limit &&
1446 (clk_rate > tegra_host->max_clk_limit))
1447 clk_rate = tegra_host->max_clk_limit;
1449 if (clk_rate > clk_get_max_rate(pltfm_host->clk))
1450 clk_rate = clk_get_max_rate(pltfm_host->clk);
1452 tegra_sdhci_clock_set_parent(sdhci, clk_rate);
1453 clk_set_rate(pltfm_host->clk, clk_rate);
1454 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
1456 /* FPGA supports 26MHz of clock for SDMMC. */
1457 if (tegra_platform_is_fpga())
1458 sdhci->max_clk = 26000000;
1460 #ifdef CONFIG_MMC_FREQ_SCALING
1461 /* Set the tap delay if tuning is done and dfs is enabled */
1462 if (sdhci->mmc->df &&
1463 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1464 tuning_data = sdhci_tegra_get_tuning_data(sdhci, clock);
1465 tap_value = (tegra_host->tap_cmd == TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1466 tuning_data->nom_best_tap_value :
1467 tuning_data->best_tap_value;
1468 sdhci_tegra_set_tap_delay(sdhci, tap_value);
1473 static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
1475 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1476 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1477 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1481 mutex_lock(&tegra_host->set_clock_mutex);
1482 pr_debug("%s %s %u enabled=%u\n", __func__,
1483 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
1485 if (!tegra_host->clk_enabled) {
1486 pm_runtime_get_sync(&pdev->dev);
1487 ret = clk_prepare_enable(pltfm_host->clk);
1489 dev_err(mmc_dev(sdhci->mmc),
1490 "clock enable is failed, ret: %d\n", ret);
1493 tegra_host->clk_enabled = true;
1494 sdhci->is_clk_on = tegra_host->clk_enabled;
1495 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1496 ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1497 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1499 tegra_sdhci_set_clk_rate(sdhci, clock);
1501 if (tegra_host->emc_clk && (!tegra_host->is_sdmmc_emc_clk_on)) {
1502 ret = clk_prepare_enable(tegra_host->emc_clk);
1504 dev_err(mmc_dev(sdhci->mmc),
1505 "clock enable is failed, ret: %d\n", ret);
1508 tegra_host->is_sdmmc_emc_clk_on = true;
1510 if (tegra_host->sclk && (!tegra_host->is_sdmmc_sclk_on)) {
1511 ret = clk_prepare_enable(tegra_host->sclk);
1513 dev_err(mmc_dev(sdhci->mmc),
1514 "clock enable is failed, ret: %d\n", ret);
1517 tegra_host->is_sdmmc_sclk_on = true;
1519 } else if (!clock && tegra_host->clk_enabled) {
1520 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on) {
1521 clk_disable_unprepare(tegra_host->emc_clk);
1522 tegra_host->is_sdmmc_emc_clk_on = false;
1524 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on) {
1525 clk_disable_unprepare(tegra_host->sclk);
1526 tegra_host->is_sdmmc_sclk_on = false;
1528 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1529 ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1530 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1531 clk_disable_unprepare(pltfm_host->clk);
1532 tegra_host->clk_enabled = false;
1533 sdhci->is_clk_on = tegra_host->clk_enabled;
1534 pm_runtime_put_sync(&pdev->dev);
1536 mutex_unlock(&tegra_host->set_clock_mutex);
1539 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
1540 unsigned char signal_voltage)
1543 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1544 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1545 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1546 unsigned int timeout = 10;
1547 unsigned int calib_offsets = 0;
1549 /* No Calibration for sdmmc4 */
1550 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_SDMMC4_CALIB) &&
1551 (tegra_host->instance == 3))
1554 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
1557 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1558 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
1559 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
1560 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1562 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1564 /* Enable Auto Calibration*/
1565 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1566 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1567 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
1568 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
1569 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1570 calib_offsets = tegra_host->plat->calib_3v3_offsets;
1571 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
1572 calib_offsets = tegra_host->plat->calib_1v8_offsets;
1573 if (calib_offsets) {
1574 /* Program Auto cal PD offset(bits 8:14) */
1576 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1577 val |= (((calib_offsets >> 8) & 0xFF) <<
1578 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1579 /* Program Auto cal PU offset(bits 0:6) */
1581 val |= (calib_offsets & 0xFF);
1584 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1586 /* Wait until the calibration is done */
1588 if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
1589 SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
1597 dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
1599 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD) {
1600 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1601 val &= ~SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1602 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1605 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
1606 unsigned int pulldown_code;
1607 unsigned int pullup_code;
1611 /* Disable Auto calibration */
1612 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1613 val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1614 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1616 pg = tegra_drive_get_pingroup(mmc_dev(sdhci->mmc));
1618 /* Get the pull down codes from auto cal status reg */
1620 sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
1621 SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
1622 /* Set the pull down in the pinmux reg */
1623 err = tegra_drive_pinmux_set_pull_down(pg,
1626 dev_err(mmc_dev(sdhci->mmc),
1627 "Failed to set pulldown codes %d err %d\n",
1628 pulldown_code, err);
1630 /* Calculate the pull up codes */
1631 pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
1632 if (pullup_code >= TEGRA_MAX_PULL)
1633 pullup_code = TEGRA_MAX_PULL - 1;
1634 /* Set the pull up code in the pinmux reg */
1635 err = tegra_drive_pinmux_set_pull_up(pg, pullup_code);
1637 dev_err(mmc_dev(sdhci->mmc),
1638 "Failed to set pullup codes %d err %d\n",
1644 static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
1645 unsigned int signal_voltage)
1647 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1648 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1649 unsigned int min_uV = tegra_host->vddio_min_uv;
1650 unsigned int max_uV = tegra_host->vddio_max_uv;
1651 unsigned int rc = 0;
1655 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
1656 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1657 ctrl |= SDHCI_CTRL_VDD_180;
1658 min_uV = SDHOST_LOW_VOLT_MIN;
1659 max_uV = SDHOST_LOW_VOLT_MAX;
1660 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1661 if (ctrl & SDHCI_CTRL_VDD_180)
1662 ctrl &= ~SDHCI_CTRL_VDD_180;
1665 /* Check if the slot can support the required voltage */
1666 if (min_uV > tegra_host->vddio_max_uv)
1669 /* Set/clear the 1.8V signalling */
1670 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
1672 /* Switch the I/O rail voltage */
1673 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_SET_VOLT,
1675 if (rc && (signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1676 dev_err(mmc_dev(sdhci->mmc),
1677 "setting 1.8V failed %d. Revert to 3.3V\n", rc);
1678 rc = tegra_sdhci_configure_regulators(tegra_host,
1679 CONFIG_REG_SET_VOLT, SDHOST_HIGH_VOLT_MIN,
1680 SDHOST_HIGH_VOLT_MAX);
1686 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
1687 u8 option, int min_uV, int max_uV)
1693 if (!tegra_host->is_rail_enabled) {
1694 if (tegra_host->vdd_slot_reg)
1695 rc = regulator_enable(tegra_host->vdd_slot_reg);
1696 if (tegra_host->vdd_io_reg)
1697 rc = regulator_enable(tegra_host->vdd_io_reg);
1698 tegra_host->is_rail_enabled = true;
1701 case CONFIG_REG_DIS:
1702 if (tegra_host->is_rail_enabled) {
1703 if (tegra_host->vdd_io_reg)
1704 rc = regulator_disable(tegra_host->vdd_io_reg);
1705 if (tegra_host->vdd_slot_reg)
1706 rc = regulator_disable(
1707 tegra_host->vdd_slot_reg);
1708 tegra_host->is_rail_enabled = false;
1711 case CONFIG_REG_SET_VOLT:
1712 if (tegra_host->vdd_io_reg)
1713 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
1717 pr_err("Invalid argument passed to reg config %d\n", option);
1723 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
1725 unsigned long timeout;
1727 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
1729 /* Wait max 100 ms */
1732 /* hw clears the bit when it's done */
1733 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
1735 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
1736 "completed.\n", (int)mask);
1743 tegra_sdhci_reset_exit(sdhci, mask);
1746 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
1747 unsigned int tap_delay)
1752 /* Max tap delay value is 255 */
1753 if (tap_delay > MAX_TAP_VALUES) {
1754 dev_err(mmc_dev(sdhci->mmc),
1755 "Valid tap range (0-255). Setting tap value %d\n",
1761 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
1762 clk &= ~SDHCI_CLOCK_CARD_EN;
1763 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
1765 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1766 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1767 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1768 vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1769 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1771 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
1772 clk |= SDHCI_CLOCK_CARD_EN;
1773 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
1777 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
1778 unsigned int trim_delay)
1782 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1783 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1784 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1785 vendor_ctrl |= (trim_delay << SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1786 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1789 static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
1791 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1792 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1793 struct sdhci_tegra_sd_stats *head = tegra_host->sd_stat_head;
1795 if (int_status & SDHCI_INT_DATA_CRC)
1796 head->data_crc_count++;
1797 if (int_status & SDHCI_INT_CRC)
1798 head->cmd_crc_count++;
1799 if (int_status & SDHCI_INT_TIMEOUT)
1800 head->cmd_to_count++;
1801 if (int_status & SDHCI_INT_DATA_TIMEOUT)
1802 head->data_to_count++;
1806 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
1807 struct sdhci_host *sdhci, unsigned int clock)
1809 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1810 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1811 struct tegra_tuning_data *tuning_data;
1812 unsigned int low_freq;
1815 if (tegra_host->tuning_freq_count == 1) {
1816 tuning_data = &tegra_host->tuning_data[0];
1820 /* Get the lowest supported freq */
1821 for (i = 0; i < TUNING_FREQ_COUNT; ++i) {
1822 low_freq = tegra_host->soc_data->tuning_freq_list[i];
1827 if (clock <= low_freq)
1828 tuning_data = &tegra_host->tuning_data[0];
1830 tuning_data = &tegra_host->tuning_data[1];
1836 static void calculate_vmin_values(struct sdhci_host *sdhci,
1837 struct tegra_tuning_data *tuning_data, int vmin, int boot_mv)
1839 struct tuning_values *est_values = &tuning_data->est_values;
1840 struct tuning_values *calc_values = &tuning_data->calc_values;
1841 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
1842 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
1843 int vmin_slope, vmin_int, temp_calc_vmin;
1844 int t2t_vmax, t2t_vmin;
1845 int vmax_thole, vmin_thole;
1848 * If current vmin is equal to vmin or vmax of tuning data, use the
1849 * previously calculated estimated T2T values directly. Note that the
1850 * estimated T2T_vmax is not at Vmax specified in tuning data. It is
1851 * the T2T at the boot or max voltage for the current SKU. Hence,
1852 * boot_mv is used in place of t2t_coeffs->vmax.
1854 if (vmin == t2t_coeffs->vmin) {
1855 t2t_vmin = est_values->t2t_vmin;
1856 } else if (vmin == boot_mv) {
1857 t2t_vmin = est_values->t2t_vmax;
1860 * For any intermediate voltage between boot voltage and vmin
1861 * of tuning data, calculate the slope and intercept from the
1862 * t2t at boot_mv and vmin and calculate the actual values.
1864 t2t_vmax = 1000 / est_values->t2t_vmax;
1865 t2t_vmin = 1000 / est_values->t2t_vmin;
1866 vmin_slope = ((t2t_vmax - t2t_vmin) * 1000) /
1867 (boot_mv - t2t_coeffs->vmin);
1868 vmin_int = (t2t_vmax * 1000 - (vmin_slope * boot_mv)) / 1000;
1869 t2t_vmin = (vmin_slope * vmin) / 1000 + vmin_int;
1870 t2t_vmin = (1000 / t2t_vmin);
1873 calc_values->t2t_vmin = (t2t_vmin * calc_values->t2t_vmax) /
1874 est_values->t2t_vmax;
1876 calc_values->ui_vmin = (1000000 / (tuning_data->freq_hz / 1000000)) /
1877 calc_values->t2t_vmin;
1879 /* Calculate the vmin tap hole at vmin of tuning data */
1880 temp_calc_vmin = (est_values->t2t_vmin * calc_values->t2t_vmax) /
1881 est_values->t2t_vmax;
1882 vmin_thole = (thole_coeffs->thole_vmin_int -
1883 (thole_coeffs->thole_vmin_slope * temp_calc_vmin)) /
1885 vmax_thole = calc_values->vmax_thole;
1887 if (vmin == t2t_coeffs->vmin) {
1888 calc_values->vmin_thole = vmin_thole;
1889 } else if (vmin == boot_mv) {
1890 calc_values->vmin_thole = vmax_thole;
1893 * Interpolate the tap hole for any intermediate voltage.
1894 * Calculate the slope and intercept from the available data
1895 * and use them to calculate the actual values.
1897 vmin_slope = ((vmax_thole - vmin_thole) * 1000) /
1898 (boot_mv - t2t_coeffs->vmin);
1899 vmin_int = (vmax_thole * 1000 - (vmin_slope * boot_mv)) / 1000;
1900 calc_values->vmin_thole = (vmin_slope * vmin) / 1000 + vmin_int;
1903 /* Adjust the partial win start for Vmin boundary */
1904 if (tuning_data->is_partial_win_valid)
1905 tuning_data->final_tap_data[0].win_start =
1906 (tuning_data->final_tap_data[0].win_start *
1907 tuning_data->calc_values.t2t_vmax) /
1908 tuning_data->calc_values.t2t_vmin;
1910 pr_info("**********Tuning values*********\n");
1911 pr_info("**estimated values**\n");
1912 pr_info("T2T_Vmax %d, T2T_Vmin %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
1913 est_values->t2t_vmax, est_values->t2t_vmin,
1914 est_values->vmax_thole, est_values->ui);
1915 pr_info("**Calculated values**\n");
1916 pr_info("T2T_Vmax %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
1917 calc_values->t2t_vmax, calc_values->vmax_thole,
1919 pr_info("T2T_Vmin %d, 1'st_hole_Vmin %d, UI_Vmin %d\n",
1920 calc_values->t2t_vmin, calc_values->vmin_thole,
1921 calc_values->ui_vmin);
1922 pr_info("***********************************\n");
1925 static int slide_window_start(struct sdhci_host *sdhci,
1926 struct tegra_tuning_data *tuning_data,
1927 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
1931 if (edge_attr == WIN_EDGE_BOUN_START) {
1933 tap_value += (1000 / tuning_data->calc_values.t2t_vmin);
1935 tap_value += (1000 / tuning_data->calc_values.t2t_vmax);
1936 } else if (edge_attr == WIN_EDGE_HOLE) {
1937 if (tap_hole >= 0) {
1938 tap_margin = get_tuning_tap_hole_margins(sdhci,
1939 tuning_data->calc_values.t2t_vmax);
1940 tap_value += ((7 * tap_hole) / 100) + tap_margin;
1944 if (tap_value > MAX_TAP_VALUES)
1945 tap_value = MAX_TAP_VALUES;
1950 static int slide_window_end(struct sdhci_host *sdhci,
1951 struct tegra_tuning_data *tuning_data,
1952 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
1956 if (edge_attr == WIN_EDGE_BOUN_END) {
1957 tap_value = (tap_value * tuning_data->calc_values.t2t_vmax) /
1958 tuning_data->calc_values.t2t_vmin;
1959 tap_value -= (1000 / tuning_data->calc_values.t2t_vmin);
1960 } else if (edge_attr == WIN_EDGE_HOLE) {
1961 if (tap_hole >= 0) {
1962 tap_value = tap_hole;
1963 tap_margin = get_tuning_tap_hole_margins(sdhci,
1964 tuning_data->calc_values.t2t_vmin);
1966 tap_value -= ((7 * tap_hole) / 100) + tap_margin;
1971 static int adjust_window_boundaries(struct sdhci_host *sdhci,
1972 struct tegra_tuning_data *tuning_data,
1973 struct tap_window_data *temp_tap_data)
1975 struct tap_window_data *tap_data;
1976 int vmin_tap_hole = 0;
1977 int vmax_tap_hole = 0;
1980 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
1981 tap_data = &temp_tap_data[i];
1982 /* Update with next hole if first hole is taken care of */
1983 if (tap_data->win_start_attr == WIN_EDGE_HOLE)
1984 vmax_tap_hole = tuning_data->calc_values.vmax_thole +
1985 (tap_data->hole_pos - 1) *
1986 tuning_data->calc_values.ui;
1987 tap_data->win_start = slide_window_start(sdhci, tuning_data,
1988 tap_data->win_start, tap_data->win_start_attr,
1991 /* Update with next hole if first hole is taken care of */
1992 if (tap_data->win_end_attr == WIN_EDGE_HOLE)
1993 vmin_tap_hole = tuning_data->calc_values.vmin_thole +
1994 (tap_data->hole_pos - 1) *
1995 tuning_data->calc_values.ui_vmin;
1996 tap_data->win_end = slide_window_end(sdhci, tuning_data,
1997 tap_data->win_end, tap_data->win_end_attr,
2001 pr_info("***********final tuning windows**********\n");
2002 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2003 tap_data = &temp_tap_data[i];
2004 pr_info("win[%d]: %d - %d\n", i, tap_data->win_start,
2007 pr_info("********************************\n");
2011 static int find_best_tap_value(struct tegra_tuning_data *tuning_data,
2012 struct tap_window_data *temp_tap_data, int vmin)
2014 struct tap_window_data *tap_data;
2015 u8 i = 0, sel_win = 0;
2016 int pref_win = 0, curr_win_size = 0;
2017 int best_tap_value = 0;
2019 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2020 tap_data = &temp_tap_data[i];
2021 if (!i && tuning_data->is_partial_win_valid) {
2022 pref_win = tap_data->win_end - tap_data->win_start;
2023 if ((tap_data->win_end * 2) < pref_win)
2024 pref_win = tap_data->win_end * 2;
2027 curr_win_size = tap_data->win_end - tap_data->win_start;
2028 if ((curr_win_size > 0) && (curr_win_size > pref_win)) {
2029 pref_win = curr_win_size;
2035 if (pref_win <= 0) {
2036 pr_err("No window opening for %d vmin\n", vmin);
2040 tap_data = &temp_tap_data[sel_win];
2041 if (!sel_win && tuning_data->is_partial_win_valid) {
2043 best_tap_value = tap_data->win_end - (pref_win / 2);
2044 if (best_tap_value < 0)
2047 best_tap_value = tap_data->win_start +
2048 ((tap_data->win_end - tap_data->win_start) *
2049 tuning_data->calc_values.t2t_vmin) /
2050 (tuning_data->calc_values.t2t_vmin +
2051 tuning_data->calc_values.t2t_vmax);
2054 pr_info("best tap win - (%d-%d), best tap value %d\n",
2055 tap_data->win_start, tap_data->win_end, best_tap_value);
2056 return best_tap_value;
2059 static int sdhci_tegra_calculate_best_tap(struct sdhci_host *sdhci,
2060 struct tegra_tuning_data *tuning_data)
2062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2063 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2064 struct tap_window_data *temp_tap_data = NULL;
2065 int vmin, curr_vmin, best_tap_value = 0;
2068 curr_vmin = tegra_dvfs_predict_millivolts(pltfm_host->clk,
2069 tuning_data->freq_hz);
2071 curr_vmin = tegra_host->boot_vcore_mv;
2075 SDHCI_TEGRA_DBG("%s: checking for win opening with vmin %d\n",
2076 mmc_hostname(sdhci->mmc), vmin);
2077 if ((best_tap_value < 0) &&
2078 (vmin > tegra_host->boot_vcore_mv)) {
2079 dev_err(mmc_dev(sdhci->mmc),
2080 "No best tap for any vcore range\n");
2081 kfree(temp_tap_data);
2082 temp_tap_data = NULL;
2086 calculate_vmin_values(sdhci, tuning_data, vmin,
2087 tegra_host->boot_vcore_mv);
2089 if (temp_tap_data == NULL) {
2090 temp_tap_data = kzalloc(sizeof(struct tap_window_data) *
2091 tuning_data->num_of_valid_tap_wins, GFP_KERNEL);
2092 if (IS_ERR_OR_NULL(temp_tap_data)) {
2093 dev_err(mmc_dev(sdhci->mmc),
2094 "No memory for final tap value calculation\n");
2099 memcpy(temp_tap_data, tuning_data->final_tap_data,
2100 sizeof(struct tap_window_data) *
2101 tuning_data->num_of_valid_tap_wins);
2103 adjust_window_boundaries(sdhci, tuning_data, temp_tap_data);
2105 best_tap_value = find_best_tap_value(tuning_data,
2106 temp_tap_data, vmin);
2108 if (best_tap_value < 0)
2110 } while (best_tap_value < 0);
2112 tuning_data->best_tap_value = best_tap_value;
2113 tuning_data->nom_best_tap_value = best_tap_value;
2116 * Set the new vmin if there is any change. If dvfs overrides are
2117 * disabled, then print the error message but continue execution
2118 * rather than disabling tuning altogether.
2120 if ((tuning_data->best_tap_value >= 0) && (curr_vmin != vmin)) {
2121 err = tegra_dvfs_set_fmax_at_vmin(pltfm_host->clk,
2122 tuning_data->freq_hz, vmin);
2123 if ((err == -EPERM) || (err == -ENOSYS)) {
2125 * tegra_dvfs_set_fmax_at_vmin: will return EPERM or
2126 * ENOSYS, when DVFS override is not enabled, continue
2127 * tuning with default core voltage.
2130 "dvfs overrides disabled. Vmin not updated\n");
2134 kfree(temp_tap_data);
2138 static int sdhci_tegra_issue_tuning_cmd(struct sdhci_host *sdhci)
2140 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2141 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2145 unsigned int timeout = 10;
2149 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
2150 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
2152 dev_err(mmc_dev(sdhci->mmc), "Controller never"
2153 "released inhibit bit(s).\n");
2161 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2162 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2163 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2165 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2166 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2167 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2170 * In response to CMD19, the card sends 64 bytes of tuning
2171 * block to the Host Controller. So we set the block size
2173 * In response to CMD21, the card sends 128 bytes of tuning
2174 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2175 * to the Host Controller. So we set the block size to 64 here.
2177 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
2180 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
2182 sdhci_writew(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2184 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
2186 /* Set the cmd flags */
2187 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
2188 /* Issue the command */
2189 sdhci_writew(sdhci, SDHCI_MAKE_CMD(
2190 tegra_host->tuning_opcode, flags), SDHCI_COMMAND);
2196 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
2198 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
2203 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
2204 !(intstatus & SDHCI_INT_DATA_CRC)) {
2206 sdhci->tuning_done = 1;
2208 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
2209 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
2213 if (sdhci->tuning_done) {
2214 sdhci->tuning_done = 0;
2215 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2216 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
2217 (ctrl & SDHCI_CTRL_TUNED_CLK))
2227 static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
2228 unsigned int starting_tap, bool expect_failure)
2230 unsigned int tap_value = starting_tap;
2232 unsigned int retry = TUNING_RETRIES;
2235 /* Set the tap delay */
2236 sdhci_tegra_set_tap_delay(sdhci, tap_value);
2238 /* Run frequency tuning */
2239 err = sdhci_tegra_issue_tuning_cmd(sdhci);
2244 retry = TUNING_RETRIES;
2245 if ((expect_failure && !err) ||
2246 (!expect_failure && err))
2250 } while (tap_value <= MAX_TAP_VALUES);
2255 static int calculate_actual_tuning_values(int speedo,
2256 struct tegra_tuning_data *tuning_data, int voltage_mv)
2258 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2259 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2260 struct tuning_values *calc_values = &tuning_data->calc_values;
2262 int vmax_thole, vmin_thole;
2264 /* T2T_Vmax = (1000000/freq_MHz)/Calc_UI */
2265 calc_values->t2t_vmax = (1000000 / (tuning_data->freq_hz / 1000000)) /
2269 * Interpolate the tap hole.
2270 * Vmax_1'st_hole = (Calc_T2T_Vmax*(-thole_slope)+thole_tint.
2272 vmax_thole = (thole_coeffs->thole_vmax_int -
2273 (thole_coeffs->thole_vmax_slope * calc_values->t2t_vmax)) /
2275 vmin_thole = (thole_coeffs->thole_vmin_int -
2276 (thole_coeffs->thole_vmin_slope * calc_values->t2t_vmax)) /
2278 if (voltage_mv == t2t_coeffs->vmin) {
2279 calc_values->vmax_thole = vmin_thole;
2280 } else if (voltage_mv == t2t_coeffs->vmax) {
2281 calc_values->vmax_thole = vmax_thole;
2283 slope = (vmax_thole - vmin_thole) /
2284 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2285 inpt = ((vmax_thole * 1000) - (slope * 1250)) / 1000;
2286 calc_values->vmax_thole = slope * voltage_mv + inpt;
2293 * All coeffs are filled up in the table after multiplying by 1000. So, all
2294 * calculations should have a divide by 1000 at the end.
2296 static int calculate_estimated_tuning_values(int speedo,
2297 struct tegra_tuning_data *tuning_data, int voltage_mv)
2299 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2300 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2301 struct tuning_values *est_values = &tuning_data->est_values;
2303 int vmax_t2t, vmin_t2t;
2304 int vmax_thole, vmin_thole;
2306 /* Est_T2T_Vmax = (speedo*(-t2t_slope)+t2t_int */
2307 vmax_t2t = (t2t_coeffs->t2t_vmax_int - (speedo *
2308 t2t_coeffs->t2t_vmax_slope)) / 1000;
2309 vmin_t2t = (t2t_coeffs->t2t_vmin_int - (speedo *
2310 t2t_coeffs->t2t_vmin_slope)) / 1000;
2311 est_values->t2t_vmin = vmin_t2t;
2313 if (voltage_mv == t2t_coeffs->vmin) {
2314 est_values->t2t_vmax = vmin_t2t;
2315 } else if (voltage_mv == t2t_coeffs->vmax) {
2316 est_values->t2t_vmax = vmax_t2t;
2318 vmax_t2t = 1000 / vmax_t2t;
2319 vmin_t2t = 1000 / vmin_t2t;
2321 * For any intermediate voltage between 0.95V and 1.25V,
2322 * calculate the slope and intercept from the T2T and tap hole
2323 * values of 0.95V and 1.25V and use them to calculate the
2324 * actual values. 1/T2T is a linear function of voltage.
2326 slope = ((vmax_t2t - vmin_t2t) * 1000) /
2327 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2328 inpt = (vmax_t2t * 1000 - (slope * t2t_coeffs->vmax)) / 1000;
2329 est_values->t2t_vmax = (slope * voltage_mv) / 1000 + inpt;
2330 est_values->t2t_vmax = (1000 / est_values->t2t_vmax);
2333 /* Est_UI = (1000000/freq_MHz)/Est_T2T_Vmax */
2334 est_values->ui = (1000000 / (thole_coeffs->freq_khz / 1000)) /
2335 est_values->t2t_vmax;
2338 * Est_1'st_hole = (Est_T2T_Vmax*(-thole_slope)) + thole_int.
2340 vmax_thole = (thole_coeffs->thole_vmax_int -
2341 (thole_coeffs->thole_vmax_slope * est_values->t2t_vmax)) / 1000;
2342 vmin_thole = (thole_coeffs->thole_vmin_int -
2343 (thole_coeffs->thole_vmin_slope * est_values->t2t_vmax)) / 1000;
2345 if (voltage_mv == t2t_coeffs->vmin) {
2346 est_values->vmax_thole = vmin_thole;
2347 } else if (voltage_mv == t2t_coeffs->vmax) {
2348 est_values->vmax_thole = vmax_thole;
2351 * For any intermediate voltage between 0.95V and 1.25V,
2352 * calculate the slope and intercept from the t2t and tap hole
2353 * values of 0.95V and 1.25V and use them to calculate the
2354 * actual values. Tap hole is a linear function of voltage.
2356 slope = ((vmax_thole - vmin_thole) * 1000) /
2357 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2358 inpt = (vmax_thole * 1000 - (slope * t2t_coeffs->vmax)) / 1000;
2359 est_values->vmax_thole = (slope * voltage_mv) / 1000 + inpt;
2361 est_values->vmin_thole = vmin_thole;
2367 * Insert the calculated holes and get the final tap windows
2368 * with the boundaries and holes set.
2370 static int adjust_holes_in_tap_windows(struct sdhci_host *sdhci,
2371 struct tegra_tuning_data *tuning_data)
2373 struct tap_window_data *tap_data;
2374 struct tap_window_data *final_tap_data;
2375 struct tuning_values *calc_values = &tuning_data->calc_values;
2376 int tap_hole, size = 0;
2377 u8 i = 0, j = 0, num_of_wins, hole_pos = 0;
2379 tuning_data->final_tap_data =
2380 devm_kzalloc(mmc_dev(sdhci->mmc),
2381 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2382 if (IS_ERR_OR_NULL(tuning_data->final_tap_data)) {
2383 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
2387 num_of_wins = tuning_data->num_of_valid_tap_wins;
2388 tap_hole = calc_values->vmax_thole;
2391 tap_data = &tuning_data->tap_data[i];
2392 final_tap_data = &tuning_data->final_tap_data[j];
2393 if (tap_hole < tap_data->win_start) {
2394 tap_hole += calc_values->ui;
2397 } else if (tap_hole > tap_data->win_end) {
2398 memcpy(final_tap_data, tap_data,
2399 sizeof(struct tap_window_data));
2404 } else if ((tap_hole >= tap_data->win_start) &&
2405 (tap_hole <= tap_data->win_end)) {
2406 size = tap_data->win_end - tap_data->win_start;
2409 &tuning_data->final_tap_data[j];
2410 if (tap_hole == tap_data->win_start) {
2411 final_tap_data->win_start =
2413 final_tap_data->win_start_attr =
2415 final_tap_data->hole_pos = hole_pos;
2416 tap_hole += calc_values->ui;
2419 final_tap_data->win_start =
2420 tap_data->win_start;
2421 final_tap_data->win_start_attr =
2422 WIN_EDGE_BOUN_START;
2424 if (tap_hole <= tap_data->win_end) {
2425 final_tap_data->win_end = tap_hole - 1;
2426 final_tap_data->win_end_attr =
2428 final_tap_data->hole_pos = hole_pos;
2429 tap_data->win_start = tap_hole;
2430 } else if (tap_hole > tap_data->win_end) {
2431 final_tap_data->win_end =
2433 final_tap_data->win_end_attr =
2435 tap_data->win_start =
2438 size = tap_data->win_end - tap_data->win_start;
2444 } while (num_of_wins > 0);
2446 /* Update the num of valid wins count after tap holes insertion */
2447 tuning_data->num_of_valid_tap_wins = j;
2449 pr_info("********tuning windows after inserting holes*****\n");
2450 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2451 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2452 final_tap_data = &tuning_data->final_tap_data[i];
2453 pr_info("win[%d]:%d(%d) - %d(%d)\n", i,
2454 final_tap_data->win_start,
2455 final_tap_data->win_start_attr,
2456 final_tap_data->win_end, final_tap_data->win_end_attr);
2458 pr_info("***********************************************\n");
2464 * Insert the boundaries from negative margin calculations into the windows
2467 static int insert_boundaries_in_tap_windows(struct sdhci_host *sdhci,
2468 struct tegra_tuning_data *tuning_data, u8 boun_end)
2470 struct tap_window_data *tap_data;
2471 struct tap_window_data *new_tap_data;
2472 struct tap_window_data *temp_tap_data;
2473 struct tuning_values *calc_values = &tuning_data->calc_values;
2475 u8 i = 0, j = 0, num_of_wins;
2476 bool get_next_boun = false;
2478 temp_tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
2479 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2480 if (IS_ERR_OR_NULL(temp_tap_data)) {
2481 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
2485 num_of_wins = tuning_data->num_of_valid_tap_wins;
2486 curr_boun = boun_end % calc_values->ui;
2488 if (get_next_boun) {
2489 curr_boun += calc_values->ui;
2491 * If the boun_end exceeds the intial boundary end,
2492 * just copy remaining windows and return.
2494 if (curr_boun >= boun_end)
2495 curr_boun += MAX_TAP_VALUES;
2498 tap_data = &tuning_data->tap_data[i];
2499 new_tap_data = &temp_tap_data[j];
2500 if (curr_boun <= tap_data->win_start) {
2501 get_next_boun = true;
2503 } else if (curr_boun >= tap_data->win_end) {
2504 memcpy(new_tap_data, tap_data,
2505 sizeof(struct tap_window_data));
2509 get_next_boun = false;
2511 } else if ((curr_boun >= tap_data->win_start) &&
2512 (curr_boun <= tap_data->win_end)) {
2513 new_tap_data->win_start = tap_data->win_start;
2514 new_tap_data->win_start_attr =
2515 tap_data->win_start_attr;
2516 new_tap_data->win_end = curr_boun - 1;
2517 new_tap_data->win_end_attr =
2518 tap_data->win_end_attr;
2520 new_tap_data = &temp_tap_data[j];
2521 new_tap_data->win_start = curr_boun;
2522 new_tap_data->win_end = curr_boun;
2523 new_tap_data->win_start_attr =
2524 WIN_EDGE_BOUN_START;
2525 new_tap_data->win_end_attr =
2528 new_tap_data = &temp_tap_data[j];
2529 new_tap_data->win_start = curr_boun + 1;
2530 new_tap_data->win_start_attr = WIN_EDGE_BOUN_START;
2531 new_tap_data->win_end = tap_data->win_end;
2532 new_tap_data->win_end_attr =
2533 tap_data->win_end_attr;
2537 get_next_boun = true;
2539 } while (num_of_wins > 0);
2541 /* Update the num of valid wins count after tap holes insertion */
2542 tuning_data->num_of_valid_tap_wins = j;
2544 memcpy(tuning_data->tap_data, temp_tap_data,
2545 j * sizeof(struct tap_window_data));
2546 SDHCI_TEGRA_DBG("***tuning windows after inserting boundaries***\n");
2547 SDHCI_TEGRA_DBG("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2548 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2549 new_tap_data = &tuning_data->tap_data[i];
2550 SDHCI_TEGRA_DBG("win[%d]:%d(%d) - %d(%d)\n", i,
2551 new_tap_data->win_start,
2552 new_tap_data->win_start_attr,
2553 new_tap_data->win_end, new_tap_data->win_end_attr);
2555 SDHCI_TEGRA_DBG("***********************************************\n");
2561 * Scan for all tap values and get all passing tap windows.
2563 static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
2564 struct tegra_tuning_data *tuning_data)
2566 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2567 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2568 struct tap_window_data *tap_data;
2569 struct tuning_ui tuning_ui[10];
2570 int err = 0, partial_win_start = 0, temp_margin = 0;
2571 unsigned int tap_value, calc_ui = 0;
2572 u8 prev_boundary_end = 0, num_of_wins = 0;
2573 u8 num_of_uis = 0, valid_num_uis = 0;
2574 u8 ref_ui, first_valid_full_win = 0;
2575 u8 boun_end = 0, next_boun_end = 0;
2577 bool valid_ui_found = false;
2580 * Assume there are a max of 10 windows and allocate tap window
2581 * structures for the same. If there are more windows, the array
2582 * size can be adjusted later using realloc.
2584 tuning_data->tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
2585 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2586 if (IS_ERR_OR_NULL(tuning_data->tap_data)) {
2587 dev_err(mmc_dev(sdhci->mmc), "No memory for tap data\n");
2591 spin_lock(&sdhci->lock);
2594 tap_data = &tuning_data->tap_data[num_of_wins];
2595 /* Get the window start */
2596 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true);
2597 tap_data->win_start = min_t(u8, tap_value, MAX_TAP_VALUES);
2599 if (tap_value >= MAX_TAP_VALUES) {
2600 /* If it's first iteration, then all taps failed */
2602 dev_err(mmc_dev(sdhci->mmc),
2603 "All tap values(0-255) failed\n");
2604 spin_unlock(&sdhci->lock);
2607 /* All windows obtained */
2612 /* Get the window end */
2613 tap_value = sdhci_tegra_scan_tap_values(sdhci,
2615 tap_data->win_end = min_t(u8, (tap_value - 1), MAX_TAP_VALUES);
2616 tap_data->win_size = tap_data->win_end - tap_data->win_start;
2620 * If the size of window is more than 4 taps wide, then it is a
2621 * valid window. If tap value 0 has passed, then a partial
2622 * window exists. Mark all the window edges as boundary edges.
2624 if (tap_data->win_size > 4) {
2625 if (tap_data->win_start == 0)
2626 tuning_data->is_partial_win_valid = true;
2627 tap_data->win_start_attr = WIN_EDGE_BOUN_START;
2628 tap_data->win_end_attr = WIN_EDGE_BOUN_END;
2630 /* Invalid window as size is less than 5 taps */
2631 SDHCI_TEGRA_DBG("Invalid tuning win (%d-%d) ignored\n",
2632 tap_data->win_start, tap_data->win_end);
2636 /* Ignore first and last partial UIs */
2637 if (tap_data->win_end_attr == WIN_EDGE_BOUN_END) {
2638 tuning_ui[num_of_uis].ui = tap_data->win_end -
2640 tuning_ui[num_of_uis].is_valid_ui = true;
2642 prev_boundary_end = tap_data->win_end;
2645 } while (tap_value < MAX_TAP_VALUES);
2646 spin_unlock(&sdhci->lock);
2648 tuning_data->num_of_valid_tap_wins = num_of_wins;
2649 valid_num_uis = num_of_uis;
2651 /* Print info of all tap windows */
2652 pr_info("**********Auto tuning windows*************\n");
2653 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2654 for (j = 0; j < tuning_data->num_of_valid_tap_wins; j++) {
2655 tap_data = &tuning_data->tap_data[j];
2656 pr_info("win[%d]: %d(%d) - %d(%d)\n",
2657 j, tap_data->win_start, tap_data->win_start_attr,
2658 tap_data->win_end, tap_data->win_end_attr);
2660 pr_info("***************************************\n");
2662 /* Mark the first last partial UIs as invalid */
2663 tuning_ui[0].is_valid_ui = false;
2664 tuning_ui[num_of_uis - 1].is_valid_ui = false;
2667 /* Discredit all uis at either end with size less than 30% of est ui */
2668 ref_ui = (30 * tuning_data->est_values.ui) / 100;
2669 for (j = 0; j < num_of_uis; j++) {
2670 if (tuning_ui[j].is_valid_ui) {
2671 tuning_ui[j].is_valid_ui = false;
2674 if (tuning_ui[j].ui > ref_ui)
2678 for (j = num_of_uis; j > 0; j--) {
2679 if (tuning_ui[j - 1].ui < ref_ui) {
2680 if (tuning_ui[j - 1].is_valid_ui) {
2681 tuning_ui[j - 1].is_valid_ui = false;
2688 /* Calculate 0.75*est_UI */
2689 ref_ui = (75 * tuning_data->est_values.ui) / 100;
2692 * Check for valid UIs and discredit invalid UIs. A UI is considered
2693 * valid if it's greater than (0.75*est_UI). If an invalid UI is found,
2694 * also discredit the smaller of the two adjacent windows.
2696 for (j = 1; j < (num_of_uis - 1); j++) {
2697 if (tuning_ui[j].ui > ref_ui && tuning_ui[j].is_valid_ui) {
2698 tuning_ui[j].is_valid_ui = true;
2700 if (tuning_ui[j].is_valid_ui) {
2701 tuning_ui[j].is_valid_ui = false;
2704 if (!tuning_ui[j + 1].is_valid_ui ||
2705 !tuning_ui[j - 1].is_valid_ui) {
2706 if (tuning_ui[j - 1].is_valid_ui) {
2707 tuning_ui[j - 1].is_valid_ui = false;
2709 } else if (tuning_ui[j + 1].is_valid_ui) {
2710 tuning_ui[j + 1].is_valid_ui = false;
2715 if (tuning_ui[j - 1].ui > tuning_ui[j + 1].ui)
2716 tuning_ui[j + 1].is_valid_ui = false;
2718 tuning_ui[j - 1].is_valid_ui = false;
2724 /* Calculate the cumulative UI if there are valid UIs left */
2725 if (valid_num_uis) {
2726 for (j = 0; j < num_of_uis; j++)
2727 if (tuning_ui[j].is_valid_ui) {
2728 calc_ui += tuning_ui[j].ui;
2729 if (!first_valid_full_win)
2730 first_valid_full_win = j;
2735 tuning_data->calc_values.ui = (calc_ui / valid_num_uis);
2736 valid_ui_found = true;
2738 tuning_data->calc_values.ui = tuning_data->est_values.ui;
2739 valid_ui_found = false;
2742 SDHCI_TEGRA_DBG("****Tuning UIs***********\n");
2743 for (j = 0; j < num_of_uis; j++)
2744 SDHCI_TEGRA_DBG("Tuning UI[%d] : %d, Is valid[%d]\n",
2745 j, tuning_ui[j].ui, tuning_ui[j].is_valid_ui);
2746 SDHCI_TEGRA_DBG("*************************\n");
2748 /* Get the calculated tuning values */
2749 err = calculate_actual_tuning_values(tegra_host->speedo, tuning_data,
2750 tegra_host->boot_vcore_mv);
2753 * Calculate negative margin if partial win is valid. There are two
2755 * Case 1: If Avg_UI is found, then keep subtracting avg_ui from start
2756 * of first valid full window until a value <=0 is obtained.
2757 * Case 2: If Avg_UI is not found, subtract avg_ui from all boundary
2758 * starts until a value <=0 is found.
2760 if (tuning_data->is_partial_win_valid && (num_of_wins > 1)) {
2761 if (valid_ui_found) {
2763 tuning_data->tap_data[first_valid_full_win].win_start;
2764 boun_end = partial_win_start;
2765 partial_win_start %= tuning_data->calc_values.ui;
2766 partial_win_start -= tuning_data->calc_values.ui;
2768 for (j = 0; j < NEG_MAR_CHK_WIN_COUNT; j++) {
2770 tuning_data->tap_data[j + 1].win_start;
2772 boun_end = temp_margin;
2773 else if (!next_boun_end)
2774 next_boun_end = temp_margin;
2775 temp_margin %= tuning_data->calc_values.ui;
2776 temp_margin -= tuning_data->calc_values.ui;
2777 if (!partial_win_start ||
2778 (temp_margin > partial_win_start))
2779 partial_win_start = temp_margin;
2782 if (partial_win_start <= 0)
2783 tuning_data->tap_data[0].win_start = partial_win_start;
2787 insert_boundaries_in_tap_windows(sdhci, tuning_data, boun_end);
2789 insert_boundaries_in_tap_windows(sdhci, tuning_data, next_boun_end);
2791 /* Insert calculated holes into the windows */
2792 err = adjust_holes_in_tap_windows(sdhci, tuning_data);
2797 static void sdhci_tegra_dump_tuning_constraints(struct sdhci_host *sdhci)
2799 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2800 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2801 struct tegra_tuning_data *tuning_data;
2804 SDHCI_TEGRA_DBG("%s: Num of tuning frequencies%d\n",
2805 mmc_hostname(sdhci->mmc), tegra_host->tuning_freq_count);
2806 for (i = 0; i < tegra_host->tuning_freq_count; ++i) {
2807 tuning_data = &tegra_host->tuning_data[i];
2808 SDHCI_TEGRA_DBG("%s: Tuning freq[%d]: %d, freq band %d\n",
2809 mmc_hostname(sdhci->mmc), i,
2810 tuning_data->freq_hz, tuning_data->freq_band);
2814 static unsigned int get_tuning_voltage(struct sdhci_tegra *tegra_host, u8 *mask)
2821 case NOMINAL_VCORE_TUN:
2822 return tegra_host->nominal_vcore_mv;
2823 case BOOT_VCORE_TUN:
2824 return tegra_host->boot_vcore_mv;
2825 case MIN_OVERRIDE_VCORE_TUN:
2826 return tegra_host->min_vcore_override_mv;
2829 return tegra_host->boot_vcore_mv;
2832 static u8 sdhci_tegra_get_freq_point(struct sdhci_host *sdhci)
2834 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2835 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2836 const unsigned int *freq_list;
2840 curr_clock = sdhci->max_clk;
2841 freq_list = tegra_host->soc_data->tuning_freq_list;
2843 for (i = 0; i < TUNING_FREQ_COUNT; ++i)
2844 if (curr_clock <= freq_list[i])
2847 return TUNING_MAX_FREQ;
2850 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
2851 int t2t_tuning_value)
2853 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2854 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2855 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2856 struct tuning_tap_hole_margins *tap_hole;
2861 if (soc_data->nvquirks & NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS) {
2862 if (soc_data->tap_hole_margins) {
2863 tap_hole = soc_data->tap_hole_margins;
2864 dev_id = dev_name(mmc_dev(sdhci->mmc));
2865 for (i = 0; i < soc_data->tap_hole_margins_count; i++) {
2866 if (!strcmp(dev_id, tap_hole->dev_id))
2867 return tap_hole->tap_hole_margin;
2871 dev_info(mmc_dev(sdhci->mmc),
2872 "Fixed tap hole margins missing\n");
2876 /* if no margin are available calculate tap margin */
2877 tap_margin = (((2 * (450 / t2t_tuning_value)) +
2884 * The frequency tuning algorithm tries to calculate the tap-to-tap delay
2885 * UI and estimate holes using equations and predetermined coefficients from
2886 * the characterization data. The algorithm will not work without this data.
2888 static int find_tuning_coeffs_data(struct sdhci_host *sdhci,
2889 bool force_retuning)
2891 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2892 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2893 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2894 struct tegra_tuning_data *tuning_data;
2895 struct tuning_t2t_coeffs *t2t_coeffs;
2896 struct tap_hole_coeffs *thole_coeffs;
2898 unsigned int freq_khz;
2900 bool coeffs_set = false;
2902 dev_id = dev_name(mmc_dev(sdhci->mmc));
2903 /* Find the coeffs data for all supported frequencies */
2904 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
2905 tuning_data = &tegra_host->tuning_data[i];
2907 /* Skip if T2T coeffs are already found */
2908 if (tuning_data->t2t_coeffs == NULL || force_retuning) {
2909 t2t_coeffs = soc_data->t2t_coeffs;
2910 for (j = 0; j < soc_data->t2t_coeffs_count; j++) {
2911 if (!strcmp(dev_id, t2t_coeffs->dev_id)) {
2912 tuning_data->t2t_coeffs = t2t_coeffs;
2914 dev_info(mmc_dev(sdhci->mmc),
2915 "Found T2T coeffs data\n");
2921 dev_err(mmc_dev(sdhci->mmc),
2922 "T2T coeffs data missing\n");
2923 tuning_data->t2t_coeffs = NULL;
2929 /* Skip if tap hole coeffs are already found */
2930 if (tuning_data->thole_coeffs == NULL || force_retuning) {
2931 thole_coeffs = soc_data->tap_hole_coeffs;
2932 freq_khz = tuning_data->freq_hz / 1000;
2933 for (j = 0; j < soc_data->tap_hole_coeffs_count; j++) {
2934 if (!strcmp(dev_id, thole_coeffs->dev_id) &&
2935 (freq_khz == thole_coeffs->freq_khz)) {
2936 tuning_data->thole_coeffs =
2939 dev_info(mmc_dev(sdhci->mmc),
2940 "%dMHz tap hole coeffs found\n",
2948 dev_err(mmc_dev(sdhci->mmc),
2949 "%dMHz Tap hole coeffs data missing\n",
2951 tuning_data->thole_coeffs = NULL;
2961 * Determines the numbers of frequencies required and then fills up the tuning
2962 * constraints for each of the frequencies. The data of lower frequency is
2963 * filled first and then the higher frequency data. Max supported frequencies
2966 static int setup_freq_constraints(struct sdhci_host *sdhci,
2967 const unsigned int *freq_list)
2969 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2970 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2971 struct tegra_tuning_data *tuning_data;
2975 if ((sdhci->mmc->ios.timing != MMC_TIMING_UHS_SDR50) &&
2976 (sdhci->mmc->caps2 & MMC_CAP2_FREQ_SCALING))
2977 freq_count = DFS_FREQ_COUNT;
2981 freq_band = sdhci_tegra_get_freq_point(sdhci);
2982 /* Fill up the req frequencies */
2983 switch (freq_count) {
2985 tuning_data = &tegra_host->tuning_data[0];
2986 tuning_data->freq_hz = sdhci->max_clk;
2987 tuning_data->freq_band = freq_band;
2988 tuning_data->constraints.vcore_mask =
2989 tuning_vcore_constraints[freq_band].vcore_mask;
2990 tuning_data->nr_voltages =
2991 hweight32(tuning_data->constraints.vcore_mask);
2994 tuning_data = &tegra_host->tuning_data[1];
2995 tuning_data->freq_hz = sdhci->max_clk;
2996 tuning_data->freq_band = freq_band;
2997 tuning_data->constraints.vcore_mask =
2998 tuning_vcore_constraints[freq_band].vcore_mask;
2999 tuning_data->nr_voltages =
3000 hweight32(tuning_data->constraints.vcore_mask);
3002 tuning_data = &tegra_host->tuning_data[0];
3003 for (i = (freq_band - 1); i >= 0; i--) {
3006 tuning_data->freq_hz = freq_list[i];
3007 tuning_data->freq_band = i;
3008 tuning_data->nr_voltages = 1;
3009 tuning_data->constraints.vcore_mask =
3010 tuning_vcore_constraints[i].vcore_mask;
3011 tuning_data->nr_voltages =
3012 hweight32(tuning_data->constraints.vcore_mask);
3016 dev_err(mmc_dev(sdhci->mmc), "Unsupported freq count\n");
3024 * Get the supported frequencies and other tuning related constraints for each
3025 * frequency. The supported frequencies should be determined from the list of
3026 * frequencies in the soc data and also consider the platform clock limits as
3027 * well as any DFS related restrictions.
3029 static int sdhci_tegra_get_tuning_constraints(struct sdhci_host *sdhci,
3030 bool force_retuning)
3032 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3033 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3034 const unsigned int *freq_list;
3037 /* A valid freq count means freq constraints are already set up */
3038 if (!tegra_host->tuning_freq_count || force_retuning) {
3039 freq_list = tegra_host->soc_data->tuning_freq_list;
3040 tegra_host->tuning_freq_count =
3041 setup_freq_constraints(sdhci, freq_list);
3042 if (tegra_host->tuning_freq_count < 0) {
3043 dev_err(mmc_dev(sdhci->mmc),
3044 "Invalid tuning freq count\n");
3049 err = find_tuning_coeffs_data(sdhci, force_retuning);
3053 sdhci_tegra_dump_tuning_constraints(sdhci);
3059 * During boot, only boot voltage for vcore can be set. Check if the current
3060 * voltage is allowed to be used. Nominal and min override voltages can be
3061 * set once boot is done. This will be notified through late subsys init call.
3063 static int sdhci_tegra_set_tuning_voltage(struct sdhci_host *sdhci,
3064 unsigned int voltage)
3066 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3067 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3069 bool nom_emc_freq_set = false;
3071 if (voltage && (voltage != tegra_host->boot_vcore_mv) &&
3072 !vcore_overrides_allowed) {
3073 SDHCI_TEGRA_DBG("%s: Override vcore %dmv not allowed\n",
3074 mmc_hostname(sdhci->mmc), voltage);
3078 SDHCI_TEGRA_DBG("%s: Setting vcore override %d\n",
3079 mmc_hostname(sdhci->mmc), voltage);
3081 * First clear any previous dvfs override settings. If dvfs overrides
3082 * are disabled, then print the error message but continue execution
3083 * rather than failing tuning altogether.
3085 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, 0);
3086 if ((err == -EPERM) || (err == -ENOSYS)) {
3088 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3089 * when DVFS override is not enabled. Continue tuning
3090 * with default core voltage
3092 SDHCI_TEGRA_DBG("dvfs overrides disabled. Nothing to clear\n");
3098 /* EMC clock freq boost might be required for nominal core voltage */
3099 if ((voltage == tegra_host->nominal_vcore_mv) &&
3100 tegra_host->plat->en_nominal_vcore_tuning &&
3101 tegra_host->emc_clk) {
3102 err = clk_set_rate(tegra_host->emc_clk,
3103 SDMMC_EMC_NOM_VOLT_FREQ);
3105 dev_err(mmc_dev(sdhci->mmc),
3106 "Failed to set emc nom clk freq %d\n", err);
3108 nom_emc_freq_set = true;
3112 * If dvfs overrides are disabled, then print the error message but
3113 * continue tuning execution rather than failing tuning altogether.
3115 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, voltage);
3116 if ((err == -EPERM) || (err == -ENOSYS)) {
3118 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3119 * when DVFS override is not enabled. Continue tuning
3120 * with default core voltage
3122 SDHCI_TEGRA_DBG("dvfs overrides disabled. No overrides set\n");
3125 dev_err(mmc_dev(sdhci->mmc),
3126 "failed to set vcore override %dmv\n", voltage);
3128 /* Revert emc clock to normal freq */
3129 if (nom_emc_freq_set) {
3130 err = clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
3132 dev_err(mmc_dev(sdhci->mmc),
3133 "Failed to revert emc nom clk freq %d\n", err);
3139 static int sdhci_tegra_run_tuning(struct sdhci_host *sdhci,
3140 struct tegra_tuning_data *tuning_data)
3142 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3143 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3146 u8 i, vcore_mask = 0;
3148 vcore_mask = tuning_data->constraints.vcore_mask;
3149 for (i = 0; i < tuning_data->nr_voltages; i++) {
3150 voltage = get_tuning_voltage(tegra_host, &vcore_mask);
3151 err = sdhci_tegra_set_tuning_voltage(sdhci, voltage);
3153 dev_err(mmc_dev(sdhci->mmc),
3154 "Unable to set override voltage.\n");
3158 /* Get the tuning window info */
3159 SDHCI_TEGRA_DBG("Getting tuning windows...\n");
3160 err = sdhci_tegra_get_tap_window_data(sdhci, tuning_data);
3162 dev_err(mmc_dev(sdhci->mmc),
3163 "Failed to get tap win %d\n", err);
3166 SDHCI_TEGRA_DBG("%s: %d tuning window data obtained\n",
3167 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3172 static int sdhci_tegra_verify_best_tap(struct sdhci_host *sdhci)
3174 struct tegra_tuning_data *tuning_data;
3177 tuning_data = sdhci_tegra_get_tuning_data(sdhci, sdhci->max_clk);
3178 if ((tuning_data->best_tap_value < 0) ||
3179 (tuning_data->best_tap_value > MAX_TAP_VALUES)) {
3180 dev_err(mmc_dev(sdhci->mmc),
3181 "Trying to verify invalid best tap value\n");
3184 dev_info(mmc_dev(sdhci->mmc),
3185 "%s: tuning freq %dhz, best tap %d\n",
3186 __func__, tuning_data->freq_hz,
3187 tuning_data->best_tap_value);
3190 /* Set the best tap value */
3191 sdhci_tegra_set_tap_delay(sdhci, tuning_data->best_tap_value);
3193 /* Run tuning after setting the best tap value */
3194 err = sdhci_tegra_issue_tuning_cmd(sdhci);
3196 dev_err(mmc_dev(sdhci->mmc),
3197 "%dMHz best tap value verification failed %d\n",
3198 tuning_data->freq_hz, err);
3202 static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
3204 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3205 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3206 struct tegra_tuning_data *tuning_data;
3207 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3212 u8 i, set_retuning = 0;
3213 bool force_retuning = false;
3216 /* Tuning is valid only in SDR104 and SDR50 modes */
3217 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
3218 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
3219 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
3220 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
3223 /* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
3224 if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
3225 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
3226 else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
3227 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
3231 SDHCI_TEGRA_DBG("%s: Starting freq tuning\n", mmc_hostname(sdhci->mmc));
3232 enable_lb_clk = (soc_data->nvquirks &
3233 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
3234 (tegra_host->instance == 2);
3235 if (enable_lb_clk) {
3236 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3238 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3239 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3241 mutex_lock(&tuning_mutex);
3243 /* Set the tuning command to be used */
3244 tegra_host->tuning_opcode = opcode;
3247 * Disable all interrupts signalling.Enable interrupt status
3248 * detection for buffer read ready and data crc. We use
3249 * polling for tuning as it involves less overhead.
3251 ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE);
3252 sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
3253 sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
3254 SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
3257 * If tuning is already done and retune request is not set, then skip
3258 * best tap value calculation and use the old best tap value. If the
3259 * previous best tap value verification failed, force retuning.
3261 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
3262 err = sdhci_tegra_verify_best_tap(sdhci);
3264 dev_err(mmc_dev(sdhci->mmc),
3265 "Prev best tap failed. Re-running tuning\n");
3266 force_retuning = true;
3272 if (tegra_host->force_retune == true) {
3273 force_retuning = true;
3274 tegra_host->force_retune = false;
3277 tegra_host->tuning_status = 0;
3278 err = sdhci_tegra_get_tuning_constraints(sdhci, force_retuning);
3280 dev_err(mmc_dev(sdhci->mmc),
3281 "Failed to get tuning constraints\n");
3285 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3286 tuning_data = &tegra_host->tuning_data[i];
3287 if (tuning_data->tuning_done && !force_retuning)
3290 SDHCI_TEGRA_DBG("%s: Setting tuning freq%d\n",
3291 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3292 tegra_sdhci_set_clock(sdhci, tuning_data->freq_hz);
3294 SDHCI_TEGRA_DBG("%s: Calculating estimated tuning values\n",
3295 mmc_hostname(sdhci->mmc));
3296 err = calculate_estimated_tuning_values(tegra_host->speedo,
3297 tuning_data, tegra_host->boot_vcore_mv);
3301 SDHCI_TEGRA_DBG("Running tuning...\n");
3302 err = sdhci_tegra_run_tuning(sdhci, tuning_data);
3306 SDHCI_TEGRA_DBG("calculating best tap value\n");
3307 err = sdhci_tegra_calculate_best_tap(sdhci, tuning_data);
3311 err = sdhci_tegra_verify_best_tap(sdhci);
3312 if (!err && !set_retuning) {
3313 tuning_data->tuning_done = true;
3314 tegra_host->tuning_status |= TUNING_STATUS_DONE;
3316 tegra_host->tuning_status |= TUNING_STATUS_RETUNE;
3320 /* Release any override core voltages set */
3321 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3323 /* Enable interrupts. Enable full range for core voltage */
3324 sdhci_writel(sdhci, ier, SDHCI_INT_ENABLE);
3325 sdhci_writel(sdhci, ier, SDHCI_SIGNAL_ENABLE);
3326 mutex_unlock(&tuning_mutex);
3328 SDHCI_TEGRA_DBG("%s: Freq tuning done\n", mmc_hostname(sdhci->mmc));
3329 if (enable_lb_clk) {
3330 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3332 /* Tuning is failed and card will try to enumerate in
3333 * Legacy High Speed mode. So, Enable External Loopback
3337 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3340 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3342 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3347 static int __init sdhci_tegra_enable_vcore_override_tuning(void)
3349 vcore_overrides_allowed = true;
3350 maintain_boot_voltage = false;
3353 late_initcall(sdhci_tegra_enable_vcore_override_tuning);
3355 static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
3357 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3358 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3360 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
3361 const struct tegra_sdhci_platform_data *plat;
3362 unsigned int cd_irq;
3364 tegra_sdhci_set_clock(sdhci, 0);
3366 /* Disable the power rails if any */
3367 if (tegra_host->card_present) {
3368 err = tegra_sdhci_configure_regulators(tegra_host,
3369 CONFIG_REG_DIS, 0, 0);
3371 dev_err(mmc_dev(sdhci->mmc),
3372 "Regulators disable in suspend failed %d\n", err);
3374 plat = pdev->dev.platform_data;
3375 if (plat && gpio_is_valid(plat->cd_gpio)) {
3376 if (!plat->cd_wakeup_incapable) {
3377 /* Enable wake irq at end of suspend */
3378 cd_irq = gpio_to_irq(plat->cd_gpio);
3379 err = enable_irq_wake(cd_irq);
3381 dev_err(mmc_dev(sdhci->mmc),
3382 "SD card wake-up event registration for irq=%d failed with error: %d\n",
3389 static int tegra_sdhci_resume(struct sdhci_host *sdhci)
3391 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3392 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3393 struct platform_device *pdev;
3394 struct tegra_sdhci_platform_data *plat;
3395 unsigned int signal_voltage = 0;
3397 unsigned int cd_irq;
3399 pdev = to_platform_device(mmc_dev(sdhci->mmc));
3400 plat = pdev->dev.platform_data;
3402 if (plat && gpio_is_valid(plat->cd_gpio)) {
3403 /* disable wake capability at start of resume */
3404 if (!plat->cd_wakeup_incapable) {
3405 cd_irq = gpio_to_irq(plat->cd_gpio);
3406 disable_irq_wake(cd_irq);
3408 tegra_host->card_present =
3409 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
3412 /* Setting the min identification clock of freq 400KHz */
3413 tegra_sdhci_set_clock(sdhci, 400000);
3415 /* Enable the power rails if any */
3416 if (tegra_host->card_present) {
3417 err = tegra_sdhci_configure_regulators(tegra_host,
3418 CONFIG_REG_EN, 0, 0);
3420 dev_err(mmc_dev(sdhci->mmc),
3421 "Regulators enable in resume failed %d\n", err);
3424 if (tegra_host->vdd_io_reg) {
3425 if (plat && (plat->mmc_data.ocr_mask &
3426 SDHOST_1V8_OCR_MASK))
3427 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
3429 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
3430 tegra_sdhci_signal_voltage_switch(sdhci,
3435 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
3436 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3437 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
3438 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
3441 tegra_sdhci_do_calibration(sdhci, signal_voltage);
3447 static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
3449 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3450 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3452 /* Turn OFF the clocks if the device is not present */
3453 if ((!tegra_host->card_present || !sdhci->mmc->card) &&
3454 tegra_host->clk_enabled)
3455 tegra_sdhci_set_clock(sdhci, 0);
3459 * For tegra specific tuning, core voltage has to be fixed at different
3460 * voltages to get the tap values. Fixing the core voltage during tuning for one
3461 * device might affect transfers of other SDMMC devices. Check if tuning mutex
3462 * is locked before starting a data transfer. The new tuning procedure might
3463 * take at max 1.5s for completion for a single run. Taking DFS into count,
3464 * setting the max timeout for tuning mutex check a 3 secs. Since tuning is
3465 * run only during boot or the first time device is inserted, there wouldn't
3466 * be any delays in cmd/xfer execution once devices enumeration is done.
3468 static void tegra_sdhci_get_bus(struct sdhci_host *sdhci)
3470 unsigned int timeout = 300;
3472 while (mutex_is_locked(&tuning_mutex)) {
3476 dev_err(mmc_dev(sdhci->mmc),
3477 "Tuning mutex locked for long time\n");
3484 * The host/device can be powered off before the retuning request is handled in
3485 * case of SDIDO being off if Wifi is turned off, sd card removal etc. In such
3486 * cases, cancel the pending tuning timer and remove any core voltage
3487 * constraints that are set earlier.
3489 static void tegra_sdhci_power_off(struct sdhci_host *sdhci, u8 power_mode)
3491 int retuning_req_set = 0;
3493 retuning_req_set = (timer_pending(&sdhci->tuning_timer) ||
3494 (sdhci->flags & SDHCI_NEEDS_RETUNING));
3496 if (retuning_req_set) {
3497 del_timer_sync(&sdhci->tuning_timer);
3499 if (boot_volt_req_refcount)
3500 --boot_volt_req_refcount;
3502 if (!boot_volt_req_refcount) {
3503 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3504 SDHCI_TEGRA_DBG("%s: Release override as host is off\n",
3505 mmc_hostname(sdhci->mmc));
3510 static int show_polling_period(void *data, u64 *value)
3512 struct sdhci_host *host = (struct sdhci_host *)data;
3514 if (host->mmc->dev_stats != NULL)
3515 *value = host->mmc->dev_stats->polling_interval;
3520 static int set_polling_period(void *data, u64 value)
3522 struct sdhci_host *host = (struct sdhci_host *)data;
3524 if (host->mmc->dev_stats != NULL) {
3525 /* Limiting the maximum polling period to 1 sec */
3528 host->mmc->dev_stats->polling_interval = value;
3533 static int show_active_load_high_threshold(void *data, u64 *value)
3535 struct sdhci_host *host = (struct sdhci_host *)data;
3536 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3537 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3538 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
3540 if (gov_data != NULL)
3541 *value = gov_data->act_load_high_threshold;
3546 static int set_active_load_high_threshold(void *data, u64 value)
3548 struct sdhci_host *host = (struct sdhci_host *)data;
3549 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3550 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3551 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
3553 if (gov_data != NULL) {
3554 /* Maximum threshold load percentage is 100.*/
3557 gov_data->act_load_high_threshold = value;
3563 static int show_disableclkgating_value(void *data, u64 *value)
3565 struct sdhci_host *host = (struct sdhci_host *)data;
3567 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3568 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3569 if (tegra_host != NULL)
3570 *value = tegra_host->dbg_cfg.clk_ungated;
3575 static int set_disableclkgating_value(void *data, u64 value)
3577 struct sdhci_host *host = (struct sdhci_host *)data;
3579 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3580 if (pltfm_host != NULL) {
3581 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3582 /* Set the CAPS2 register to reflect
3583 * the clk gating value
3585 if (tegra_host != NULL) {
3587 host->mmc->ops->set_ios(host->mmc,
3589 tegra_host->dbg_cfg.clk_ungated = true;
3591 ~MMC_CAP2_CLOCK_GATING;
3593 tegra_host->dbg_cfg.clk_ungated = false;
3595 MMC_CAP2_CLOCK_GATING;
3603 static int set_trim_override_value(void *data, u64 value)
3605 struct sdhci_host *host = (struct sdhci_host *)data;
3607 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3608 if (pltfm_host != NULL) {
3609 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3610 if (tegra_host != NULL) {
3611 /* Make sure clock gating is disabled */
3612 if ((tegra_host->dbg_cfg.clk_ungated) &&
3613 (tegra_host->clk_enabled)) {
3614 sdhci_tegra_set_trim_delay(host, value);
3615 tegra_host->dbg_cfg.trim_val =
3618 pr_info("%s: Disable clock gating before setting value\n",
3619 mmc_hostname(host->mmc));
3627 static int show_trim_override_value(void *data, u64 *value)
3629 struct sdhci_host *host = (struct sdhci_host *)data;
3631 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3632 if (pltfm_host != NULL) {
3633 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3634 if (tegra_host != NULL)
3635 *value = tegra_host->dbg_cfg.trim_val;
3641 static int show_tap_override_value(void *data, u64 *value)
3643 struct sdhci_host *host = (struct sdhci_host *)data;
3645 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3646 if (pltfm_host != NULL) {
3647 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3648 if (tegra_host != NULL)
3649 *value = tegra_host->dbg_cfg.tap_val;
3655 static int set_tap_override_value(void *data, u64 value)
3657 struct sdhci_host *host = (struct sdhci_host *)data;
3659 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3660 if (pltfm_host != NULL) {
3661 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3662 if (tegra_host != NULL) {
3663 /* Make sure clock gating is disabled */
3664 if ((tegra_host->dbg_cfg.clk_ungated) &&
3665 (tegra_host->clk_enabled)) {
3666 sdhci_tegra_set_tap_delay(host, value);
3667 tegra_host->dbg_cfg.tap_val = value;
3669 pr_info("%s: Disable clock gating before setting value\n",
3670 mmc_hostname(host->mmc));
3677 DEFINE_SIMPLE_ATTRIBUTE(sdhci_polling_period_fops, show_polling_period,
3678 set_polling_period, "%llu\n");
3679 DEFINE_SIMPLE_ATTRIBUTE(sdhci_active_load_high_threshold_fops,
3680 show_active_load_high_threshold,
3681 set_active_load_high_threshold, "%llu\n");
3682 DEFINE_SIMPLE_ATTRIBUTE(sdhci_disable_clkgating_fops,
3683 show_disableclkgating_value,
3684 set_disableclkgating_value, "%llu\n");
3685 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_trim_data_fops,
3686 show_trim_override_value,
3687 set_trim_override_value, "%llu\n");
3688 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_tap_data_fops,
3689 show_tap_override_value,
3690 set_tap_override_value, "%llu\n");
3692 static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
3694 struct dentry *root = host->debugfs_root;
3695 struct dentry *dfs_root;
3696 unsigned saved_line;
3699 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
3700 if (IS_ERR_OR_NULL(root)) {
3701 saved_line = __LINE__;
3704 host->debugfs_root = root;
3707 dfs_root = debugfs_create_dir("dfs_stats_dir", root);
3708 if (IS_ERR_OR_NULL(dfs_root)) {
3709 saved_line = __LINE__;
3713 if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
3714 &sdhci_host_fops)) {
3715 saved_line = __LINE__;
3718 if (!debugfs_create_file("dfs_stats", S_IRUSR, dfs_root, host,
3719 &sdhci_host_dfs_fops)) {
3720 saved_line = __LINE__;
3723 if (!debugfs_create_file("polling_period", 0644, dfs_root, (void *)host,
3724 &sdhci_polling_period_fops)) {
3725 saved_line = __LINE__;
3728 if (!debugfs_create_file("active_load_high_threshold", 0644,
3729 dfs_root, (void *)host,
3730 &sdhci_active_load_high_threshold_fops)) {
3731 saved_line = __LINE__;
3735 dfs_root = debugfs_create_dir("override_data", root);
3736 if (IS_ERR_OR_NULL(dfs_root)) {
3737 saved_line = __LINE__;
3741 if (!debugfs_create_file("clk_gate_disabled", 0644,
3742 dfs_root, (void *)host,
3743 &sdhci_disable_clkgating_fops)) {
3744 saved_line = __LINE__;
3748 if (!debugfs_create_file("tap_value", 0644,
3749 dfs_root, (void *)host,
3750 &sdhci_override_tap_data_fops)) {
3751 saved_line = __LINE__;
3755 if (!debugfs_create_file("trim_value", 0644,
3756 dfs_root, (void *)host,
3757 &sdhci_override_trim_data_fops)) {
3758 saved_line = __LINE__;
3761 if (IS_QUIRKS2_DELAYED_CLK_GATE(host)) {
3762 host->clk_gate_tmout_ticks = -1;
3763 if (!debugfs_create_u32("clk_gate_tmout_ticks",
3765 root, (u32 *)&host->clk_gate_tmout_ticks)) {
3766 saved_line = __LINE__;
3774 debugfs_remove_recursive(root);
3775 host->debugfs_root = NULL;
3777 pr_err("%s %s: Failed to initialize debugfs functionality at line=%d\n", __func__,
3778 mmc_hostname(host->mmc), saved_line);
3782 static ssize_t sdhci_handle_boost_mode_tap(struct device *dev,
3783 struct device_attribute *attr, const char *buf, size_t count)
3786 struct mmc_card *card;
3787 char *p = (char *)buf;
3788 struct sdhci_host *host = dev_get_drvdata(dev);
3789 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3790 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3791 struct tegra_tuning_data *tuning_data;
3794 bool clk_set_for_tap_prog = false;
3796 tap_cmd = memparse(p, &p);
3798 card = host->mmc->card;
3802 /* if not uhs -- no tuning and no tap value to set */
3803 if (!mmc_sd_card_uhs(card) && !mmc_card_hs200(card))
3806 /* if no change in tap value -- just exit */
3807 if (tap_cmd == tegra_host->tap_cmd)
3810 if ((tap_cmd != TAP_CMD_TRIM_DEFAULT_VOLTAGE) &&
3811 (tap_cmd != TAP_CMD_TRIM_HIGH_VOLTAGE)) {
3812 pr_info("echo 1 > cmd_state # to set normal voltage\n");
3813 pr_info("echo 2 > cmd_state # to set high voltage\n");
3817 tegra_host->tap_cmd = tap_cmd;
3818 tuning_data = sdhci_tegra_get_tuning_data(host, host->max_clk);
3819 /* Check if host clock is enabled */
3820 if (!tegra_host->clk_enabled) {
3821 /* Nothing to do if the host is not powered ON */
3822 if (host->mmc->ios.power_mode != MMC_POWER_ON)
3825 tegra_sdhci_set_clock(host, host->mmc->ios.clock);
3826 clk_set_for_tap_prog = true;
3830 /* Wait for any on-going data transfers */
3831 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
3832 while (present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) {
3837 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
3840 spin_lock(&host->lock);
3842 case TAP_CMD_TRIM_DEFAULT_VOLTAGE:
3843 /* set tap value for voltage range 1.1 to 1.25 */
3844 sdhci_tegra_set_tap_delay(host, tuning_data->best_tap_value);
3847 case TAP_CMD_TRIM_HIGH_VOLTAGE:
3848 /* set tap value for voltage range 1.25 to 1.39 */
3849 sdhci_tegra_set_tap_delay(host,
3850 tuning_data->nom_best_tap_value);
3853 spin_unlock(&host->lock);
3854 if (clk_set_for_tap_prog) {
3855 tegra_sdhci_set_clock(host, 0);
3856 clk_set_for_tap_prog = false;
3861 static ssize_t sdhci_show_turbo_mode(struct device *dev,
3862 struct device_attribute *attr, char *buf)
3864 struct sdhci_host *host = dev_get_drvdata(dev);
3865 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3866 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3868 return sprintf(buf, "%d\n", tegra_host->tap_cmd);
3871 static DEVICE_ATTR(cmd_state, 0644, sdhci_show_turbo_mode,
3872 sdhci_handle_boost_mode_tap);
3874 static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
3875 unsigned long event, void *data)
3877 struct sdhci_tegra *tegra_host =
3878 container_of(nb, struct sdhci_tegra, reboot_notify);
3884 err = tegra_sdhci_configure_regulators(tegra_host,
3885 CONFIG_REG_DIS, 0, 0);
3887 pr_err("Disable regulator in reboot notify failed %d\n",
3894 void tegra_sdhci_ios_config_enter(struct sdhci_host *sdhci, struct mmc_ios *ios)
3896 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3897 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3898 struct clk *new_mode_clk;
3899 bool change_clk = false;
3902 * Tegra sdmmc controllers require clock to be enabled for any register
3903 * access. Set the minimum controller clock if no clock is requested.
3905 if (!sdhci->clock && !ios->clock) {
3906 tegra_sdhci_set_clock(sdhci, sdhci->mmc->f_min);
3907 sdhci->clock = sdhci->mmc->f_min;
3908 } else if (ios->clock && (ios->clock != sdhci->clock)) {
3909 tegra_sdhci_set_clock(sdhci, ios->clock);
3913 * Check for DDR50 mode setting and set ddr_clk if not already
3914 * done. Return if only one clock option is available.
3916 if (!tegra_host->ddr_clk || !tegra_host->sdr_clk) {
3919 if ((ios->timing == MMC_TIMING_UHS_DDR50) &&
3920 !tegra_host->is_ddr_clk_set) {
3922 new_mode_clk = tegra_host->ddr_clk;
3923 } else if ((ios->timing != MMC_TIMING_UHS_DDR50) &&
3924 tegra_host->is_ddr_clk_set) {
3926 new_mode_clk = tegra_host->sdr_clk;
3930 tegra_sdhci_set_clock(sdhci, 0);
3931 pltfm_host->clk = new_mode_clk;
3932 /* Restore the previous frequency */
3933 tegra_sdhci_set_clock(sdhci, sdhci->max_clk);
3934 tegra_host->is_ddr_clk_set =
3935 !tegra_host->is_ddr_clk_set;
3940 void tegra_sdhci_ios_config_exit(struct sdhci_host *sdhci, struct mmc_ios *ios)
3943 * Do any required handling for retuning requests before powering off
3946 if (ios->power_mode == MMC_POWER_OFF)
3947 tegra_sdhci_power_off(sdhci, ios->power_mode);
3950 * In case of power off, turn off controller clock now as all the
3951 * required register accesses are already done.
3953 if (!ios->clock && !sdhci->mmc->skip_host_clkgate)
3954 tegra_sdhci_set_clock(sdhci, 0);
3957 static int tegra_sdhci_get_drive_strength(struct sdhci_host *sdhci,
3958 unsigned int max_dtr, int host_drv, int card_drv)
3960 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3961 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3962 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
3964 return plat->default_drv_type;
3967 static const struct sdhci_ops tegra_sdhci_ops = {
3968 .get_ro = tegra_sdhci_get_ro,
3969 .get_cd = tegra_sdhci_get_cd,
3970 .read_l = tegra_sdhci_readl,
3971 .read_w = tegra_sdhci_readw,
3972 .write_l = tegra_sdhci_writel,
3973 .write_w = tegra_sdhci_writew,
3974 .platform_bus_width = tegra_sdhci_buswidth,
3975 .set_clock = tegra_sdhci_set_clock,
3976 .suspend = tegra_sdhci_suspend,
3977 .resume = tegra_sdhci_resume,
3978 .platform_resume = tegra_sdhci_post_resume,
3979 .platform_reset_exit = tegra_sdhci_reset_exit,
3980 .platform_get_bus = tegra_sdhci_get_bus,
3981 .platform_ios_config_enter = tegra_sdhci_ios_config_enter,
3982 .platform_ios_config_exit = tegra_sdhci_ios_config_exit,
3983 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
3984 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
3985 .switch_signal_voltage_exit = tegra_sdhci_do_calibration,
3986 .execute_freq_tuning = sdhci_tegra_execute_tuning,
3987 .sd_error_stats = sdhci_tegra_sd_error_stats,
3988 #ifdef CONFIG_MMC_FREQ_SCALING
3989 .dfs_gov_init = sdhci_tegra_freq_gov_init,
3990 .dfs_gov_get_target_freq = sdhci_tegra_get_target_freq,
3992 .get_drive_strength = tegra_sdhci_get_drive_strength,
3995 static struct sdhci_pltfm_data sdhci_tegra11_pdata = {
3996 .quirks = TEGRA_SDHCI_QUIRKS,
3997 .quirks2 = TEGRA_SDHCI_QUIRKS2,
3998 .ops = &tegra_sdhci_ops,
4001 static struct sdhci_tegra_soc_data soc_data_tegra11 = {
4002 .pdata = &sdhci_tegra11_pdata,
4003 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
4004 NVQUIRK_SET_DRIVE_STRENGTH |
4005 NVQUIRK_SET_TRIM_DELAY |
4006 NVQUIRK_ENABLE_DDR50 |
4007 NVQUIRK_ENABLE_HS200 |
4008 NVQUIRK_INFINITE_ERASE_TIMEOUT |
4009 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK |
4010 NVQUIRK_DISABLE_SDMMC4_CALIB,
4011 .parent_clk_list = {"pll_p", "pll_c"},
4012 .tuning_freq_list = {81600000, 156000000, 200000000},
4013 .t2t_coeffs = t11x_tuning_coeffs,
4014 .t2t_coeffs_count = 3,
4015 .tap_hole_coeffs = t11x_tap_hole_coeffs,
4016 .tap_hole_coeffs_count = 12,
4019 static struct sdhci_pltfm_data sdhci_tegra12_pdata = {
4020 .quirks = TEGRA_SDHCI_QUIRKS,
4021 .quirks2 = TEGRA_SDHCI_QUIRKS2 |
4022 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
4023 SDHCI_QUIRK2_SUPPORT_64BIT_DMA |
4024 SDHCI_QUIRK2_USE_64BIT_ADDR,
4025 .ops = &tegra_sdhci_ops,
4028 static struct sdhci_tegra_soc_data soc_data_tegra12 = {
4029 .pdata = &sdhci_tegra12_pdata,
4030 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
4031 NVQUIRK_SET_TRIM_DELAY |
4032 NVQUIRK_ENABLE_DDR50 |
4033 NVQUIRK_ENABLE_HS200 |
4034 NVQUIRK_INFINITE_ERASE_TIMEOUT |
4035 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
4036 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
4037 NVQUIRK_SET_CALIBRATION_OFFSETS |
4038 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK,
4039 .parent_clk_list = {"pll_p", "pll_c"},
4040 .tuning_freq_list = {81600000, 136000000, 200000000},
4041 .t2t_coeffs = t12x_tuning_coeffs,
4042 .t2t_coeffs_count = 3,
4043 .tap_hole_coeffs = t12x_tap_hole_coeffs,
4044 .tap_hole_coeffs_count = 13,
4047 static const struct of_device_id sdhci_tegra_dt_match[] = {
4048 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra12 },
4049 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra11 },
4052 MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
4054 static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
4055 struct platform_device *pdev)
4058 struct tegra_sdhci_platform_data *plat;
4059 struct device_node *np = pdev->dev.of_node;
4065 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
4067 dev_err(&pdev->dev, "Can't allocate platform data\n");
4071 plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
4072 plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
4073 plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
4075 if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
4079 of_property_read_u32(np, "tap-delay", &plat->tap_delay);
4080 of_property_read_u32(np, "trim-delay", &plat->trim_delay);
4081 of_property_read_u32(np, "ddr-clk-limit", &plat->ddr_clk_limit);
4082 of_property_read_u32(np, "max-clk-limit", &plat->max_clk_limit);
4084 of_property_read_u32(np, "uhs_mask", &plat->uhs_mask);
4086 if (of_find_property(np, "built-in", NULL))
4087 plat->mmc_data.built_in = 1;
4089 if (!of_property_read_u32(np, "mmc-ocr-mask", &val)) {
4091 plat->mmc_data.ocr_mask = MMC_OCR_1V8_MASK;
4093 plat->mmc_data.ocr_mask = MMC_OCR_2V8_MASK;
4095 plat->mmc_data.ocr_mask = MMC_OCR_3V2_MASK;
4097 plat->mmc_data.ocr_mask = MMC_OCR_3V3_MASK;
4102 static int sdhci_tegra_probe(struct platform_device *pdev)
4104 const struct of_device_id *match;
4105 const struct sdhci_tegra_soc_data *soc_data;
4106 struct sdhci_host *host;
4107 struct sdhci_pltfm_host *pltfm_host;
4108 struct tegra_sdhci_platform_data *plat;
4109 struct sdhci_tegra *tegra_host;
4110 unsigned int low_freq;
4114 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
4116 soc_data = match->data;
4118 /* Use id tables and remove the following chip defines */
4119 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
4120 soc_data = &soc_data_tegra11;
4122 soc_data = &soc_data_tegra12;
4126 host = sdhci_pltfm_init(pdev, soc_data->pdata);
4128 /* sdio delayed clock gate quirk in sdhci_host used */
4129 host->quirks2 |= SDHCI_QUIRK2_DELAYED_CLK_GATE;
4132 return PTR_ERR(host);
4134 pltfm_host = sdhci_priv(host);
4136 plat = pdev->dev.platform_data;
4139 plat = sdhci_tegra_dt_parse_pdata(pdev);
4142 dev_err(mmc_dev(host->mmc), "missing platform data\n");
4147 /* FIXME: This is for until dma-mask binding is supported in DT.
4148 * Set coherent_dma_mask for each Tegra SKUs.
4149 * If dma_mask is NULL, set it to coherent_dma_mask. */
4150 if (soc_data == &soc_data_tegra11)
4151 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
4153 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
4155 if (!pdev->dev.dma_mask)
4156 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
4158 tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
4160 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
4165 tegra_host->plat = plat;
4166 pdev->dev.platform_data = plat;
4168 tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev,
4169 sizeof(struct sdhci_tegra_sd_stats), GFP_KERNEL);
4170 if (!tegra_host->sd_stat_head) {
4171 dev_err(mmc_dev(host->mmc), "failed to allocate sd_stat_head\n");
4176 tegra_host->soc_data = soc_data;
4177 pltfm_host->priv = tegra_host;
4179 for (i = 0; i < ARRAY_SIZE(soc_data->parent_clk_list); i++) {
4180 if (!soc_data->parent_clk_list[i])
4182 if (!strcmp(soc_data->parent_clk_list[i], "pll_c")) {
4183 pll_c = clk_get_sys(NULL, "pll_c");
4184 if (IS_ERR(pll_c)) {
4185 rc = PTR_ERR(pll_c);
4186 dev_err(mmc_dev(host->mmc),
4187 "clk error in getting pll_c: %d\n", rc);
4189 pll_c_rate = clk_get_rate(pll_c);
4192 if (!strcmp(soc_data->parent_clk_list[i], "pll_p")) {
4193 pll_p = clk_get_sys(NULL, "pll_p");
4194 if (IS_ERR(pll_p)) {
4195 rc = PTR_ERR(pll_p);
4196 dev_err(mmc_dev(host->mmc),
4197 "clk error in getting pll_p: %d\n", rc);
4199 pll_p_rate = clk_get_rate(pll_p);
4203 #ifdef CONFIG_MMC_EMBEDDED_SDIO
4204 if (plat->mmc_data.embedded_sdio)
4205 mmc_set_embedded_sdio_data(host->mmc,
4206 &plat->mmc_data.embedded_sdio->cis,
4207 &plat->mmc_data.embedded_sdio->cccr,
4208 plat->mmc_data.embedded_sdio->funcs,
4209 plat->mmc_data.embedded_sdio->num_funcs);
4212 if (gpio_is_valid(plat->power_gpio)) {
4213 rc = gpio_request(plat->power_gpio, "sdhci_power");
4215 dev_err(mmc_dev(host->mmc),
4216 "failed to allocate power gpio\n");
4219 gpio_direction_output(plat->power_gpio, 1);
4222 if (gpio_is_valid(plat->cd_gpio)) {
4223 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
4225 dev_err(mmc_dev(host->mmc),
4226 "failed to allocate cd gpio\n");
4229 gpio_direction_input(plat->cd_gpio);
4231 tegra_host->card_present =
4232 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
4234 } else if (plat->mmc_data.register_status_notify) {
4235 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
4238 if (plat->mmc_data.status) {
4239 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
4242 if (gpio_is_valid(plat->wp_gpio)) {
4243 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
4245 dev_err(mmc_dev(host->mmc),
4246 "failed to allocate wp gpio\n");
4249 gpio_direction_input(plat->wp_gpio);
4253 * If there is no card detect gpio, assume that the
4254 * card is always present.
4256 if (!gpio_is_valid(plat->cd_gpio))
4257 tegra_host->card_present = 1;
4259 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
4260 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
4261 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
4262 } else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
4263 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
4264 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4265 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V2_MASK) {
4266 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V2;
4267 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4268 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V3_MASK) {
4269 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V3;
4270 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4273 * Set the minV and maxV to default
4274 * voltage range of 2.7V - 3.6V
4276 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
4277 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4280 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
4282 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
4283 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
4284 "Assuming vddio_sdmmc is not required.\n",
4285 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
4286 tegra_host->vdd_io_reg = NULL;
4288 rc = tegra_sdhci_configure_regulators(tegra_host,
4289 CONFIG_REG_SET_VOLT,
4290 tegra_host->vddio_min_uv,
4291 tegra_host->vddio_max_uv);
4293 dev_err(mmc_dev(host->mmc),
4294 "Init volt(%duV-%duV) setting failed %d\n",
4295 tegra_host->vddio_min_uv,
4296 tegra_host->vddio_max_uv, rc);
4297 regulator_put(tegra_host->vdd_io_reg);
4298 tegra_host->vdd_io_reg = NULL;
4302 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
4304 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
4305 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
4306 " Assuming vddio_sd_slot is not required.\n",
4307 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
4308 tegra_host->vdd_slot_reg = NULL;
4311 if (tegra_host->card_present) {
4312 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_EN,
4315 dev_err(mmc_dev(host->mmc),
4316 "Enable regulators failed in probe %d\n", rc);
4321 tegra_pd_add_device(&pdev->dev);
4322 pm_runtime_enable(&pdev->dev);
4324 /* Get the ddr clock */
4325 tegra_host->ddr_clk = clk_get(mmc_dev(host->mmc), "ddr");
4326 if (IS_ERR(tegra_host->ddr_clk)) {
4327 dev_err(mmc_dev(host->mmc), "ddr clk err\n");
4328 tegra_host->ddr_clk = NULL;
4331 /* Get high speed clock */
4332 tegra_host->sdr_clk = clk_get(mmc_dev(host->mmc), NULL);
4333 if (IS_ERR(tegra_host->sdr_clk)) {
4334 dev_err(mmc_dev(host->mmc), "sdr clk err\n");
4335 tegra_host->sdr_clk = NULL;
4336 /* If both ddr and sdr clks are missing, then fail probe */
4337 if (!tegra_host->ddr_clk && !tegra_host->sdr_clk) {
4338 dev_err(mmc_dev(host->mmc),
4339 "Failed to get ddr and sdr clks\n");
4345 if (tegra_host->sdr_clk) {
4346 pltfm_host->clk = tegra_host->sdr_clk;
4347 tegra_host->is_ddr_clk_set = false;
4349 pltfm_host->clk = tegra_host->ddr_clk;
4350 tegra_host->is_ddr_clk_set = true;
4353 if (clk_get_parent(pltfm_host->clk) == pll_c)
4354 tegra_host->is_parent_pllc = true;
4356 pm_runtime_get_sync(&pdev->dev);
4357 rc = clk_prepare_enable(pltfm_host->clk);
4361 tegra_host->emc_clk = devm_clk_get(mmc_dev(host->mmc), "emc");
4362 if (IS_ERR_OR_NULL(tegra_host->emc_clk)) {
4363 dev_err(mmc_dev(host->mmc), "Can't get emc clk\n");
4364 tegra_host->emc_clk = NULL;
4366 clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
4369 tegra_host->sclk = devm_clk_get(mmc_dev(host->mmc), "sclk");
4370 if (IS_ERR_OR_NULL(tegra_host->sclk)) {
4371 dev_err(mmc_dev(host->mmc), "Can't get sclk clock\n");
4372 tegra_host->sclk = NULL;
4374 clk_set_rate(tegra_host->sclk, SDMMC_AHB_MAX_FREQ);
4376 pltfm_host->priv = tegra_host;
4377 tegra_host->clk_enabled = true;
4378 host->is_clk_on = tegra_host->clk_enabled;
4379 mutex_init(&tegra_host->set_clock_mutex);
4381 tegra_host->max_clk_limit = plat->max_clk_limit;
4382 tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
4383 tegra_host->instance = pdev->id;
4384 tegra_host->tap_cmd = TAP_CMD_TRIM_DEFAULT_VOLTAGE;
4385 tegra_host->speedo = plat->cpu_speedo;
4386 dev_info(mmc_dev(host->mmc), "Speedo value %d\n", tegra_host->speedo);
4387 host->mmc->pm_caps |= plat->pm_caps;
4388 host->mmc->pm_flags |= plat->pm_flags;
4390 host->mmc->caps |= MMC_CAP_ERASE;
4391 /* enable 1/8V DDR capable */
4392 host->mmc->caps |= MMC_CAP_1_8V_DDR;
4394 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
4395 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
4396 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
4397 if (plat->mmc_data.built_in) {
4398 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4400 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
4402 /* disable access to boot partitions */
4403 host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4405 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
4406 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS200)
4407 host->mmc->caps2 |= MMC_CAP2_HS200;
4408 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
4409 /* Enable HS200 mode */
4410 host->mmc->caps2 |= MMC_CAP2_HS200;
4412 host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
4413 host->mmc->caps |= MMC_CAP_CMD23;
4414 host->mmc->caps2 |= MMC_CAP2_PACKED_CMD;
4419 * Enable dyamic frequency scaling support only if the platform clock
4420 * limit is higher than the lowest supported frequency by tuning.
4422 for (i = 0; i < TUNING_FREQ_COUNT; i++) {
4423 low_freq = soc_data->tuning_freq_list[i];
4427 if (plat->en_freq_scaling && (plat->max_clk_limit > low_freq))
4428 host->mmc->caps2 |= MMC_CAP2_FREQ_SCALING;
4430 if (!plat->disable_clock_gate)
4431 host->mmc->caps2 |= MMC_CAP2_CLOCK_GATING;
4433 if (plat->nominal_vcore_mv)
4434 tegra_host->nominal_vcore_mv = plat->nominal_vcore_mv;
4435 if (plat->min_vcore_override_mv)
4436 tegra_host->min_vcore_override_mv = plat->min_vcore_override_mv;
4437 if (plat->boot_vcore_mv)
4438 tegra_host->boot_vcore_mv = plat->boot_vcore_mv;
4439 dev_info(mmc_dev(host->mmc),
4440 "Tuning constraints: nom_mv %d, boot_mv %d, min_or_mv %d\n",
4441 tegra_host->nominal_vcore_mv, tegra_host->boot_vcore_mv,
4442 tegra_host->min_vcore_override_mv);
4445 * If nominal voltage is equal to boot voltage, there is no need for
4446 * nominal voltage tuning.
4448 if (plat->nominal_vcore_mv <= plat->boot_vcore_mv)
4449 plat->en_nominal_vcore_tuning = false;
4451 INIT_DELAYED_WORK(&host->delayed_clk_gate_wrk, delayed_clk_gate_cb);
4452 rc = sdhci_add_host(host);
4456 if (gpio_is_valid(plat->cd_gpio)) {
4457 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
4459 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
4460 mmc_hostname(host->mmc), host);
4462 dev_err(mmc_dev(host->mmc), "request irq error\n");
4463 goto err_cd_irq_req;
4466 sdhci_tegra_error_stats_debugfs(host);
4467 device_create_file(&pdev->dev, &dev_attr_cmd_state);
4469 /* Enable async suspend/resume to reduce LP0 latency */
4470 device_enable_async_suspend(&pdev->dev);
4472 if (plat->power_off_rail) {
4473 tegra_host->reboot_notify.notifier_call =
4474 tegra_sdhci_reboot_notify;
4475 register_reboot_notifier(&tegra_host->reboot_notify);
4477 #ifdef CONFIG_DEBUG_FS
4478 tegra_host->dbg_cfg.tap_val =
4480 tegra_host->dbg_cfg.trim_val =
4481 plat->ddr_trim_delay;
4482 tegra_host->dbg_cfg.clk_ungated =
4483 plat->disable_clock_gate;
4488 if (gpio_is_valid(plat->cd_gpio))
4489 gpio_free(plat->cd_gpio);
4491 if (tegra_host->is_ddr_clk_set)
4492 clk_disable_unprepare(tegra_host->ddr_clk);
4494 clk_disable_unprepare(tegra_host->sdr_clk);
4495 pm_runtime_put_sync(&pdev->dev);
4497 if (tegra_host->ddr_clk)
4498 clk_put(tegra_host->ddr_clk);
4499 if (tegra_host->sdr_clk)
4500 clk_put(tegra_host->sdr_clk);
4502 if (gpio_is_valid(plat->wp_gpio))
4503 gpio_free(plat->wp_gpio);
4505 if (gpio_is_valid(plat->cd_gpio))
4506 free_irq(gpio_to_irq(plat->cd_gpio), host);
4508 if (gpio_is_valid(plat->power_gpio))
4509 gpio_free(plat->power_gpio);
4512 sdhci_pltfm_free(pdev);
4516 static int sdhci_tegra_remove(struct platform_device *pdev)
4518 struct sdhci_host *host = platform_get_drvdata(pdev);
4519 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4520 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4521 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
4522 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
4525 sdhci_remove_host(host, dead);
4527 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_DIS, 0, 0);
4529 dev_err(mmc_dev(host->mmc),
4530 "Regulator disable in remove failed %d\n", rc);
4532 if (tegra_host->vdd_slot_reg)
4533 regulator_put(tegra_host->vdd_slot_reg);
4534 if (tegra_host->vdd_io_reg)
4535 regulator_put(tegra_host->vdd_io_reg);
4537 if (gpio_is_valid(plat->wp_gpio))
4538 gpio_free(plat->wp_gpio);
4540 if (gpio_is_valid(plat->cd_gpio)) {
4541 free_irq(gpio_to_irq(plat->cd_gpio), host);
4542 gpio_free(plat->cd_gpio);
4545 if (gpio_is_valid(plat->power_gpio))
4546 gpio_free(plat->power_gpio);
4548 if (tegra_host->clk_enabled) {
4549 if (tegra_host->is_ddr_clk_set)
4550 clk_disable_unprepare(tegra_host->ddr_clk);
4552 clk_disable_unprepare(tegra_host->sdr_clk);
4553 pm_runtime_put_sync(&pdev->dev);
4556 if (tegra_host->ddr_clk)
4557 clk_put(tegra_host->ddr_clk);
4558 if (tegra_host->sdr_clk)
4559 clk_put(tegra_host->sdr_clk);
4561 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on)
4562 clk_disable_unprepare(tegra_host->emc_clk);
4563 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on)
4564 clk_disable_unprepare(tegra_host->sclk);
4565 if (plat->power_off_rail)
4566 unregister_reboot_notifier(&tegra_host->reboot_notify);
4568 sdhci_pltfm_free(pdev);
4573 static struct platform_driver sdhci_tegra_driver = {
4575 .name = "sdhci-tegra",
4576 .owner = THIS_MODULE,
4577 .of_match_table = sdhci_tegra_dt_match,
4578 .pm = SDHCI_PLTFM_PMOPS,
4580 .probe = sdhci_tegra_probe,
4581 .remove = sdhci_tegra_remove,
4584 module_platform_driver(sdhci_tegra_driver);
4586 MODULE_DESCRIPTION("SDHCI driver for Tegra");
4587 MODULE_AUTHOR("Google, Inc.");
4588 MODULE_LICENSE("GPL v2");