2 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
24 #include <linux/of_device.h>
25 #include <linux/of_gpio.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/module.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/delay.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/tegra_pm_domains.h>
36 #include <linux/dma-mapping.h>
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <linux/reboot.h>
44 #include <linux/devfreq.h>
45 #include <linux/clk/tegra.h>
46 #include <linux/tegra-soc.h>
48 #include <linux/platform_data/mmc-sdhci-tegra.h>
49 #include <mach/pinmux.h>
51 #include "sdhci-pltfm.h"
54 #define SDHCI_TEGRA_DBG(stuff...) pr_info(stuff)
56 #define SDHCI_TEGRA_DBG(stuff...) do {} while (0)
59 #define SDHCI_VNDR_CLK_CTRL 0x100
60 #define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
61 #define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
62 #define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
63 #define SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK 0x2
64 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
65 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
66 #define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
67 #define SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK 0x2
68 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK 0xFF
69 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK 0x1F
71 #define SDHCI_VNDR_MISC_CTRL 0x120
72 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
73 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
74 #define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
75 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
76 #define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
77 #define SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK 0x180
78 #define SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT 17
80 #define SDHCI_VNDR_PRESET_VAL0_0 0x1d4
81 #define SDCLK_FREQ_SEL_HS_SHIFT 20
82 #define SDCLK_FREQ_SEL_DEFAULT_SHIFT 10
84 #define SDHCI_VNDR_PRESET_VAL1_0 0x1d8
85 #define SDCLK_FREQ_SEL_SDR50_SHIFT 20
86 #define SDCLK_FREQ_SEL_SDR25_SHIFT 10
88 #define SDHCI_VNDR_PRESET_VAL2_0 0x1dc
89 #define SDCLK_FREQ_SEL_DDR50_SHIFT 10
91 #define SDMMC_SDMEMCOMPPADCTRL 0x1E0
92 #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
93 #define SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK 0x80000000
95 #define SDMMC_AUTO_CAL_CONFIG 0x1E4
96 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
97 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
98 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
100 #define SDMMC_AUTO_CAL_STATUS 0x1EC
101 #define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
102 #define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
103 #define PULLUP_ADJUSTMENT_OFFSET 20
105 #define SDMMC_VENDOR_ERR_INTR_STATUS_0 0x108
107 #define SDMMC_IO_SPARE_0 0x1F0
108 #define SPARE_OUT_3_OFFSET 19
110 #define SDMMC_VENDOR_IO_TRIM_CNTRL_0 0x1AC
111 #define SDMMC_VENDOR_IO_TRIM_CNTRL_0_SEL_VREG_MASK 0x4
113 /* Erratum: Version register is invalid in HW */
114 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
115 /* Erratum: Enable block gap interrupt detection */
116 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
117 /* Do not enable auto calibration if the platform doesn't support */
118 #define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
119 /* Set Calibration Offsets */
120 #define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
121 /* Set Drive Strengths */
122 #define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
123 /* Enable PADPIPE CLKEN */
124 #define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
125 /* DISABLE SPI_MODE CLKEN */
126 #define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
128 #define NVQUIRK_SET_TAP_DELAY BIT(7)
130 #define NVQUIRK_SET_TRIM_DELAY BIT(8)
131 /* Enable SDHOST v3.0 support */
132 #define NVQUIRK_ENABLE_SD_3_0 BIT(9)
133 /* Enable SDR50 mode */
134 #define NVQUIRK_ENABLE_SDR50 BIT(10)
135 /* Enable SDR104 mode */
136 #define NVQUIRK_ENABLE_SDR104 BIT(11)
137 /*Enable DDR50 mode */
138 #define NVQUIRK_ENABLE_DDR50 BIT(12)
139 /* Enable Frequency Tuning for SDR50 mode */
140 #define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
141 /* Enable HS200 mode */
142 #define NVQUIRK_ENABLE_HS200 BIT(14)
143 /* Enable Infinite Erase Timeout*/
144 #define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(15)
145 /* No Calibration for sdmmc4 */
146 #define NVQUIRK_DISABLE_SDMMC4_CALIB BIT(16)
147 /* ENAABLE FEEDBACK IO CLOCK */
148 #define NVQUIRK_EN_FEEDBACK_CLK BIT(17)
149 /* Disable AUTO CMD23 */
150 #define NVQUIRK_DISABLE_AUTO_CMD23 BIT(18)
151 /* Shadow write xfer mode reg and write it alongwith CMD register */
152 #define NVQUIRK_SHADOW_XFER_MODE_REG BIT(19)
153 /* update PAD_E_INPUT_OR_E_PWRD bit */
154 #define NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD BIT(20)
155 /* Shadow write xfer mode reg and write it alongwith CMD register */
156 #define NVQUIRK_SET_PIPE_STAGES_MASK_0 BIT(21)
157 #define NVQUIRK_HIGH_FREQ_TAP_PROCEDURE BIT(22)
158 /* Disable SDMMC3 external loopback */
159 #define NVQUIRK_DISABLE_EXTERNAL_LOOPBACK BIT(23)
160 /* Select fix tap hole margins */
161 #define NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS BIT(24)
162 /* Enable HS400 mode */
163 #define NVQUIRK_ENABLE_HS400 BIT(26)
164 /* Enable AUTO CMD23 */
165 #define NVQUIRK_ENABLE_AUTO_CMD23 BIT(27)
166 #define NVQUIRK_SET_SDMEMCOMP_VREF_SEL BIT(28)
167 /* Special PAD control register settings are needed for T210 */
168 #define NVQUIRK_UPDATE_PAD_CNTRL_REG BIT(29)
169 #define NVQUIRK_UPDATE_PIN_CNTRL_REG BIT(30)
170 /* Use timeout clk for write crc status data timeout counter */
171 #define NVQUIRK_USE_TMCLK_WR_CRC_TIMEOUT BIT(31)
173 /* Enable T210 specific SDMMC WAR - sd card voltage switch */
174 #define NVQUIRK2_CONFIG_PWR_DET BIT(0)
175 /* Enable T210 specific SDMMC WAR - Tuning Step Size, Tuning Iterations*/
176 #define NVQUIRK2_UPDATE_HW_TUNING_CONFG BIT(1)
178 /* Common subset of quirks for Tegra3 and later sdmmc controllers */
179 #define TEGRA_SDHCI_NVQUIRKS (NVQUIRK_ENABLE_PADPIPE_CLKEN | \
180 NVQUIRK_DISABLE_SPI_MODE_CLKEN | \
181 NVQUIRK_EN_FEEDBACK_CLK | \
182 NVQUIRK_SET_TAP_DELAY | \
183 NVQUIRK_ENABLE_SDR50_TUNING | \
184 NVQUIRK_ENABLE_SDR50 | \
185 NVQUIRK_ENABLE_SDR104 | \
186 NVQUIRK_SHADOW_XFER_MODE_REG | \
187 NVQUIRK_DISABLE_AUTO_CMD23)
189 #define TEGRA_SDHCI_QUIRKS (SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | \
190 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
191 SDHCI_QUIRK_SINGLE_POWER_WRITE | \
192 SDHCI_QUIRK_NO_HISPD_BIT | \
193 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | \
194 SDHCI_QUIRK_BROKEN_CARD_DETECTION)
196 #define TEGRA_SDHCI_QUIRKS2 (SDHCI_QUIRK2_PRESET_VALUE_BROKEN | \
197 SDHCI_QUIRK2_NON_STD_VOLTAGE_SWITCHING | \
198 SDHCI_QUIRK2_NON_STANDARD_TUNING | \
199 SDHCI_QUIRK2_NO_CALC_MAX_DISCARD_TO | \
200 SDHCI_QUIRK2_REG_ACCESS_REQ_HOST_CLK)
202 #define IS_QUIRKS2_DELAYED_CLK_GATE(host) \
203 (host->quirks2 & SDHCI_QUIRK2_DELAYED_CLK_GATE)
205 /* Interface voltages */
206 #define SDHOST_1V8_OCR_MASK 0x8
207 #define SDHOST_HIGH_VOLT_MIN 2700000
208 #define SDHOST_HIGH_VOLT_MAX 3600000
209 #define SDHOST_HIGH_VOLT_2V8 2800000
210 #define SDHOST_LOW_VOLT_MIN 1800000
211 #define SDHOST_LOW_VOLT_MAX 1800000
212 #define SDHOST_HIGH_VOLT_3V2 3200000
213 #define SDHOST_HIGH_VOLT_3V3 3300000
215 /* Clock related definitions */
216 #define MAX_DIVISOR_VALUE 128
217 #define DEFAULT_SDHOST_FREQ 50000000
218 #define SDMMC_AHB_MAX_FREQ 150000000
219 #define SDMMC_EMC_MAX_FREQ 150000000
220 #define SDMMC_EMC_NOM_VOLT_FREQ 900000000
222 /* Tuning related definitions */
223 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
224 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
225 #define MAX_TAP_VALUES 255
226 #define TUNING_FREQ_COUNT 3
227 #define TUNING_VOLTAGES_COUNT 3
228 #define TUNING_RETRIES 1
229 #define DFS_FREQ_COUNT 2
230 #define NEG_MAR_CHK_WIN_COUNT 2
231 /* Tuning core voltage requirements */
232 #define NOMINAL_VCORE_TUN BIT(0)
233 #define BOOT_VCORE_TUN BIT(1)
234 #define MIN_OVERRIDE_VCORE_TUN BIT(2)
236 /* Tap cmd sysfs commands */
237 #define TAP_CMD_TRIM_DEFAULT_VOLTAGE 1
238 #define TAP_CMD_TRIM_HIGH_VOLTAGE 2
241 * Defined the chip specific quirks and clock sources. For now, the used clock
242 * sources vary only from chip to chip. If the sources allowed varies from
243 * platform to platform, then move the clock sources list to platform data.
244 * When filling the tuning_freq_list in soc_data, the number of entries should
245 * be equal to TUNNG_FREQ_COUNT. Depending on number DFS frequencies supported,
246 * set the desired low, high or max frequencies and set the remaining entries
247 * as 0s. The number of entries should always be equal to TUNING_FREQ_COUNT
248 * inorder to get the right tuning data.
250 struct sdhci_tegra_soc_data {
251 const struct sdhci_pltfm_data *pdata;
253 const char *parent_clk_list[2];
254 unsigned int tuning_freq_list[TUNING_FREQ_COUNT];
256 u8 tap_hole_coeffs_count;
257 u8 tap_hole_margins_count;
258 struct tuning_t2t_coeffs *t2t_coeffs;
259 struct tap_hole_coeffs *tap_hole_coeffs;
260 struct tuning_tap_hole_margins *tap_hole_margins;
264 enum tegra_regulator_config_ops {
270 enum tegra_tuning_freq {
276 struct tuning_t2t_coeffs {
280 unsigned int t2t_vnom_slope;
281 unsigned int t2t_vnom_int;
282 unsigned int t2t_vmax_slope;
283 unsigned int t2t_vmax_int;
284 unsigned int t2t_vmin_slope;
285 unsigned int t2t_vmin_int;
288 #define SET_TUNING_COEFFS(_device_id, _vmax, _vmin, _t2t_vnom_slope, \
289 _t2t_vnom_int, _t2t_vmax_slope, _t2t_vmax_int, _t2t_vmin_slope, \
292 .dev_id = _device_id, \
295 .t2t_vnom_slope = _t2t_vnom_slope, \
296 .t2t_vnom_int = _t2t_vnom_int, \
297 .t2t_vmax_slope = _t2t_vmax_slope, \
298 .t2t_vmax_int = _t2t_vmax_int, \
299 .t2t_vmin_slope = _t2t_vmin_slope, \
300 .t2t_vmin_int = _t2t_vmin_int, \
303 struct tuning_t2t_coeffs t11x_tuning_coeffs[] = {
304 SET_TUNING_COEFFS("sdhci-tegra.3", 1250, 950, 55, 135434,
305 73, 170493, 243, 455948),
306 SET_TUNING_COEFFS("sdhci-tegra.2", 1250, 950, 50, 129738,
307 73, 168898, 241, 453050),
308 SET_TUNING_COEFFS("sdhci-tegra.0", 1250, 950, 62, 143469,
309 82, 180096, 238, 444285),
312 struct tuning_t2t_coeffs t12x_tuning_coeffs[] = {
313 SET_TUNING_COEFFS("sdhci-tegra.3", 1150, 950, 27, 118295,
314 27, 118295, 48, 188148),
315 SET_TUNING_COEFFS("sdhci-tegra.2", 1150, 950, 29, 124427,
316 29, 124427, 54, 203707),
317 SET_TUNING_COEFFS("sdhci-tegra.0", 1150, 950, 25, 115933,
318 25, 115933, 47, 187224),
321 struct tap_hole_coeffs {
323 unsigned int freq_khz;
324 unsigned int thole_vnom_slope;
325 unsigned int thole_vnom_int;
326 unsigned int thole_vmax_slope;
327 unsigned int thole_vmax_int;
328 unsigned int thole_vmin_slope;
329 unsigned int thole_vmin_int;
332 #define SET_TAP_HOLE_COEFFS(_device_id, _freq_khz, _thole_vnom_slope, \
333 _thole_vnom_int, _thole_vmax_slope, _thole_vmax_int, \
334 _thole_vmin_slope, _thole_vmin_int) \
336 .dev_id = _device_id, \
337 .freq_khz = _freq_khz, \
338 .thole_vnom_slope = _thole_vnom_slope, \
339 .thole_vnom_int = _thole_vnom_int, \
340 .thole_vmax_slope = _thole_vmax_slope, \
341 .thole_vmax_int = _thole_vmax_int, \
342 .thole_vmin_slope = _thole_vmin_slope, \
343 .thole_vmin_int = _thole_vmin_int, \
346 struct tap_hole_coeffs t11x_tap_hole_coeffs[] = {
347 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 765, 102357, 507,
349 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 156000, 1042, 142044, 776,
351 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1215, 167702, 905,
353 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 1925, 284516, 1528,
354 253188, 366, 120001),
355 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 472, 53312, 318,
357 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 156000, 765, 95512, 526,
359 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 949, 121887, 656,
361 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 1901, 259035, 1334,
362 215539, 326, 100986),
363 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 411, 54495, 305,
365 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 156000, 715, 97623, 516,
367 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 905, 124579, 648,
369 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 1893, 264746, 1333,
370 221722, 354, 109880),
373 struct tap_hole_coeffs t12x_tap_hole_coeffs[] = {
374 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 200000, 1037, 106934, 1037,
376 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 136000, 1703, 186307, 1703,
377 186307, 890, 130617),
378 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 100000, 2452, 275601, 2452,
379 275601, 1264, 193957),
380 SET_TAP_HOLE_COEFFS("sdhci-tegra.3", 81600, 3090, 351666, 3090,
381 351666, 1583, 247913),
382 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 204000, 468, 36031, 468,
384 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 200000, 468, 36031, 468,
386 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 136000, 1146, 117841, 1146,
388 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 100000, 1879, 206195, 1879,
389 206195, 953, 141341),
390 SET_TAP_HOLE_COEFFS("sdhci-tegra.2", 81600, 2504, 281460, 2504,
391 281460, 1262, 194452),
392 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 874, 85243, 874,
394 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 1554, 167210, 1554,
395 167210, 793, 115672),
396 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 100000, 2290, 255734, 2290,
397 255734, 1164, 178691),
398 SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 81600, 2916, 331143, 2916,
399 331143, 1480, 232373),
402 struct tuning_tap_hole_margins {
404 unsigned int tap_hole_margin;
407 #define SET_TUNING_TAP_HOLE_MARGIN(_device_id, _tap_hole_margin) \
409 .dev_id = _device_id, \
410 .tap_hole_margin = _tap_hole_margin, \
413 struct tuning_tap_hole_margins t12x_automotive_tap_hole_margins[] = {
414 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.3", 13),
415 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.2", 7),
416 SET_TUNING_TAP_HOLE_MARGIN("sdhci-tegra.0", 10),
419 struct freq_tuning_constraints {
420 unsigned int vcore_mask;
423 static struct freq_tuning_constraints tuning_vcore_constraints[3] = {
425 .vcore_mask = BOOT_VCORE_TUN,
428 .vcore_mask = BOOT_VCORE_TUN,
431 .vcore_mask = BOOT_VCORE_TUN,
440 enum tap_win_edge_attr {
446 struct tap_window_data {
449 enum tap_win_edge_attr win_start_attr;
450 enum tap_win_edge_attr win_end_attr;
455 struct tuning_values {
463 struct tegra_tuning_data {
464 unsigned int freq_hz;
466 int nom_best_tap_value;
467 struct freq_tuning_constraints constraints;
468 struct tap_hole_coeffs *thole_coeffs;
469 struct tuning_t2t_coeffs *t2t_coeffs;
470 struct tuning_values est_values;
471 struct tuning_values calc_values;
472 struct tap_window_data *tap_data;
473 struct tap_window_data *final_tap_data;
474 u8 num_of_valid_tap_wins;
478 bool is_partial_win_valid;
481 #ifdef CONFIG_MMC_FREQ_SCALING
482 struct freq_gov_params {
484 u8 polling_interval_ms;
485 u8 active_load_threshold;
488 static struct freq_gov_params gov_params[3] = {
490 .idle_mon_cycles = 3,
491 .polling_interval_ms = 50,
492 .active_load_threshold = 25,
495 .idle_mon_cycles = 3,
496 .polling_interval_ms = 50,
497 .active_load_threshold = 25,
500 .idle_mon_cycles = 3,
501 .polling_interval_ms = 50,
502 .active_load_threshold = 25,
507 struct tegra_freq_gov_data {
508 unsigned int curr_active_load;
509 unsigned int avg_active_load;
510 unsigned int act_load_high_threshold;
511 unsigned int max_idle_monitor_cycles;
512 unsigned int curr_freq;
513 unsigned int freqs[DFS_FREQ_COUNT];
514 unsigned int freq_switch_count;
515 bool monitor_idle_load;
518 struct sdhci_tegra_sd_stats {
519 unsigned int data_crc_count;
520 unsigned int cmd_crc_count;
521 unsigned int data_to_count;
522 unsigned int cmd_to_count;
525 #ifdef CONFIG_DEBUG_FS
526 struct dbg_cfg_data {
527 unsigned int tap_val;
528 unsigned int trim_val;
533 const struct tegra_sdhci_platform_data *plat;
534 const struct sdhci_tegra_soc_data *soc_data;
536 /* ensure atomic set clock calls */
537 struct mutex set_clock_mutex;
538 struct regulator *vdd_io_reg;
539 struct regulator *vdd_slot_reg;
540 struct regulator *vcore_reg;
541 /* Host controller instance */
542 unsigned int instance;
544 unsigned int vddio_min_uv;
546 unsigned int vddio_max_uv;
547 /* DDR and low speed modes clock */
549 /* HS200, SDR104 modes clock */
551 /* Check if ddr_clk is being used */
553 /* max clk supported by the platform */
554 unsigned int max_clk_limit;
555 /* max ddr clk supported by the platform */
556 unsigned int ddr_clk_limit;
558 bool is_rail_enabled;
560 bool is_sdmmc_emc_clk_on;
562 bool is_sdmmc_sclk_on;
563 struct sdhci_tegra_sd_stats *sd_stat_head;
564 struct notifier_block reboot_notify;
566 bool set_1v8_calib_offsets;
567 int nominal_vcore_mv;
568 int min_vcore_override_mv;
570 /* Tuning related structures and variables */
571 /* Tuning opcode to be used */
572 unsigned int tuning_opcode;
573 /* Tuning packet size */
574 unsigned int tuning_bsize;
575 /* Num of tuning freqs selected */
576 int tuning_freq_count;
577 unsigned int tap_cmd;
579 unsigned int tuning_status;
581 #define TUNING_STATUS_DONE 1
582 #define TUNING_STATUS_RETUNE 2
583 /* Freq tuning information for each sampling clock freq */
584 struct tegra_tuning_data tuning_data[DFS_FREQ_COUNT];
585 struct tegra_freq_gov_data *gov_data;
587 #ifdef CONFIG_DEBUG_FS
588 /* Override debug config data */
589 struct dbg_cfg_data dbg_cfg;
593 static struct clk *pll_c;
594 static struct clk *pll_p;
595 static unsigned long pll_c_rate;
596 static unsigned long pll_p_rate;
597 static bool vcore_overrides_allowed;
598 static bool maintain_boot_voltage;
599 static unsigned int boot_volt_req_refcount;
600 DEFINE_MUTEX(tuning_mutex);
602 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
603 struct sdhci_host *sdhci, unsigned int clock);
604 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
605 unsigned long desired_rate);
606 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
607 unsigned int tap_delay);
608 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
609 u8 option, int min_uV, int max_uV);
610 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
611 unsigned int trim_delay);
612 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
613 unsigned char signal_voltage);
614 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
615 int t2t_tuning_value);
617 static int show_error_stats_dump(struct seq_file *s, void *data)
619 struct sdhci_host *host = s->private;
620 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
621 struct sdhci_tegra *tegra_host = pltfm_host->priv;
622 struct sdhci_tegra_sd_stats *head;
624 seq_printf(s, "ErrorStatistics:\n");
625 seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
626 head = tegra_host->sd_stat_head;
628 seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
629 head->cmd_crc_count, head->data_to_count,
634 static int show_dfs_stats_dump(struct seq_file *s, void *data)
636 struct sdhci_host *host = s->private;
637 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
638 struct sdhci_tegra *tegra_host = pltfm_host->priv;
639 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
641 seq_printf(s, "DFS statistics:\n");
643 if (host->mmc->dev_stats != NULL)
644 seq_printf(s, "Polling_period: %d\n",
645 host->mmc->dev_stats->polling_interval);
647 if (gov_data != NULL) {
648 seq_printf(s, "cur_active_load: %d\n",
649 gov_data->curr_active_load);
650 seq_printf(s, "avg_active_load: %d\n",
651 gov_data->avg_active_load);
652 seq_printf(s, "act_load_high_threshold: %d\n",
653 gov_data->act_load_high_threshold);
654 seq_printf(s, "freq_switch_count: %d\n",
655 gov_data->freq_switch_count);
660 static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
662 return single_open(file, show_error_stats_dump, inode->i_private);
665 static int sdhci_dfs_stats_dump(struct inode *inode, struct file *file)
667 return single_open(file, show_dfs_stats_dump, inode->i_private);
671 static const struct file_operations sdhci_host_fops = {
672 .open = sdhci_error_stats_dump,
675 .release = single_release,
678 static const struct file_operations sdhci_host_dfs_fops = {
679 .open = sdhci_dfs_stats_dump,
682 .release = single_release,
685 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
689 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
690 /* Use wp_gpio here instead? */
691 val = readl(host->ioaddr + reg);
692 return val | SDHCI_WRITE_PROTECT;
694 return readl(host->ioaddr + reg);
697 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
699 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
700 struct sdhci_tegra *tegra_host = pltfm_host->priv;
701 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
703 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
704 (reg == SDHCI_HOST_VERSION))) {
705 return SDHCI_SPEC_200;
707 return readw(host->ioaddr + reg);
710 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
712 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
713 struct sdhci_tegra *tegra_host = pltfm_host->priv;
714 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
716 /* Seems like we're getting spurious timeout and crc errors, so
717 * disable signalling of them. In case of real errors software
718 * timers should take care of eventually detecting them.
720 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
721 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
723 writel(val, host->ioaddr + reg);
725 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
726 (reg == SDHCI_INT_ENABLE))) {
727 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
728 if (val & SDHCI_INT_CARD_INT)
732 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
736 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
738 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
739 struct sdhci_tegra *tegra_host = pltfm_host->priv;
740 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
742 if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
744 case SDHCI_TRANSFER_MODE:
746 * Postpone this write, we must do it together with a
747 * command write that is down below.
749 pltfm_host->xfer_mode_shadow = val;
752 writel((val << 16) | pltfm_host->xfer_mode_shadow,
753 host->ioaddr + SDHCI_TRANSFER_MODE);
754 pltfm_host->xfer_mode_shadow = 0;
759 writew(val, host->ioaddr + reg);
762 #ifdef CONFIG_MMC_FREQ_SCALING
764 static bool disable_scaling __read_mostly;
765 module_param(disable_scaling, bool, 0644);
768 * Dynamic frequency calculation.
769 * The active load for the current period and the average active load
770 * are calculated at the end of each polling interval.
772 * If the current active load is greater than the threshold load, then the
773 * frequency is boosted(156MHz).
774 * If the active load is lower than the threshold, then the load is monitored
775 * for a max of three cycles before reducing the frequency(82MHz). If the
776 * average active load is lower, then the monitoring cycles is reduced.
778 * The active load threshold value for both eMMC and SDIO is set to 25 which
779 * is found to give the optimal power and performance. The polling interval is
782 * The polling interval and active load threshold values can be changed by
783 * the user through sysfs.
785 static unsigned long calculate_mmc_target_freq(
786 struct tegra_freq_gov_data *gov_data)
788 unsigned long desired_freq = gov_data->curr_freq;
789 unsigned int type = MMC_TYPE_MMC;
791 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
792 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
793 gov_data->monitor_idle_load = false;
794 gov_data->max_idle_monitor_cycles =
795 gov_params[type].idle_mon_cycles;
797 if (gov_data->monitor_idle_load) {
798 if (!gov_data->max_idle_monitor_cycles) {
799 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
800 gov_data->max_idle_monitor_cycles =
801 gov_params[type].idle_mon_cycles;
803 gov_data->max_idle_monitor_cycles--;
806 gov_data->monitor_idle_load = true;
807 gov_data->max_idle_monitor_cycles *=
808 gov_data->avg_active_load;
809 gov_data->max_idle_monitor_cycles /= 100;
816 static unsigned long calculate_sdio_target_freq(
817 struct tegra_freq_gov_data *gov_data)
819 unsigned long desired_freq = gov_data->curr_freq;
820 unsigned int type = MMC_TYPE_SDIO;
822 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
823 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
824 gov_data->monitor_idle_load = false;
825 gov_data->max_idle_monitor_cycles =
826 gov_params[type].idle_mon_cycles;
828 if (gov_data->monitor_idle_load) {
829 if (!gov_data->max_idle_monitor_cycles) {
830 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
831 gov_data->max_idle_monitor_cycles =
832 gov_params[type].idle_mon_cycles;
834 gov_data->max_idle_monitor_cycles--;
837 gov_data->monitor_idle_load = true;
838 gov_data->max_idle_monitor_cycles *=
839 gov_data->avg_active_load;
840 gov_data->max_idle_monitor_cycles /= 100;
847 static unsigned long calculate_sd_target_freq(
848 struct tegra_freq_gov_data *gov_data)
850 unsigned long desired_freq = gov_data->curr_freq;
851 unsigned int type = MMC_TYPE_SD;
853 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
854 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
855 gov_data->monitor_idle_load = false;
856 gov_data->max_idle_monitor_cycles =
857 gov_params[type].idle_mon_cycles;
859 if (gov_data->monitor_idle_load) {
860 if (!gov_data->max_idle_monitor_cycles) {
861 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
862 gov_data->max_idle_monitor_cycles =
863 gov_params[type].idle_mon_cycles;
865 gov_data->max_idle_monitor_cycles--;
868 gov_data->monitor_idle_load = true;
869 gov_data->max_idle_monitor_cycles *=
870 gov_data->avg_active_load;
871 gov_data->max_idle_monitor_cycles /= 100;
878 static unsigned long sdhci_tegra_get_target_freq(struct sdhci_host *sdhci,
879 struct devfreq_dev_status *dfs_stats)
881 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
882 struct sdhci_tegra *tegra_host = pltfm_host->priv;
883 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
884 unsigned long freq = sdhci->mmc->actual_clock;
887 dev_err(mmc_dev(sdhci->mmc),
888 "No gov data. Continue using current freq %ld", freq);
896 * If clock gating is enabled and clock is currently disabled, then
899 if (!tegra_host->clk_enabled)
902 if (dfs_stats->total_time) {
903 gov_data->curr_active_load = (dfs_stats->busy_time * 100) /
904 dfs_stats->total_time;
906 gov_data->curr_active_load = 0;
909 gov_data->avg_active_load += gov_data->curr_active_load;
910 gov_data->avg_active_load >>= 1;
912 if (sdhci->mmc->card) {
913 if (sdhci->mmc->card->type == MMC_TYPE_SDIO)
914 freq = calculate_sdio_target_freq(gov_data);
915 else if (sdhci->mmc->card->type == MMC_TYPE_MMC)
916 freq = calculate_mmc_target_freq(gov_data);
917 else if (sdhci->mmc->card->type == MMC_TYPE_SD)
918 freq = calculate_sd_target_freq(gov_data);
919 if (gov_data->curr_freq != freq)
920 gov_data->freq_switch_count++;
921 gov_data->curr_freq = freq;
927 static int sdhci_tegra_freq_gov_init(struct sdhci_host *sdhci)
929 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
930 struct sdhci_tegra *tegra_host = pltfm_host->priv;
935 if (!((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
936 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS200))) {
937 dev_info(mmc_dev(sdhci->mmc),
938 "DFS not required for current operating mode\n");
942 if (!tegra_host->gov_data) {
943 tegra_host->gov_data = devm_kzalloc(mmc_dev(sdhci->mmc),
944 sizeof(struct tegra_freq_gov_data), GFP_KERNEL);
945 if (!tegra_host->gov_data) {
946 dev_err(mmc_dev(sdhci->mmc),
947 "Failed to allocate memory for dfs data\n");
952 /* Find the supported frequencies */
953 dev_info(mmc_dev(sdhci->mmc), "DFS supported freqs");
954 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
955 freq = tegra_host->tuning_data[i].freq_hz;
957 * Check the nearest possible clock with pll_c and pll_p as
958 * the clock sources. Choose the higher frequency.
960 tegra_host->gov_data->freqs[i] =
961 get_nearest_clock_freq(pll_c_rate, freq);
962 freq = get_nearest_clock_freq(pll_p_rate, freq);
963 if (freq > tegra_host->gov_data->freqs[i])
964 tegra_host->gov_data->freqs[i] = freq;
965 pr_err("%d,", tegra_host->gov_data->freqs[i]);
968 tegra_host->gov_data->monitor_idle_load = false;
969 tegra_host->gov_data->curr_freq = sdhci->mmc->actual_clock;
970 if (sdhci->mmc->card) {
971 type = sdhci->mmc->card->type;
972 sdhci->mmc->dev_stats->polling_interval =
973 gov_params[type].polling_interval_ms;
974 tegra_host->gov_data->act_load_high_threshold =
975 gov_params[type].active_load_threshold;
976 tegra_host->gov_data->max_idle_monitor_cycles =
977 gov_params[type].idle_mon_cycles;
985 static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
987 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
988 struct sdhci_tegra *tegra_host = pltfm_host->priv;
990 return tegra_host->card_present;
993 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
995 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
996 struct sdhci_tegra *tegra_host = pltfm_host->priv;
997 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
999 if (!gpio_is_valid(plat->wp_gpio))
1002 return gpio_get_value_cansleep(plat->wp_gpio);
1005 static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1009 u32 vndr_ctrl, trim_delay, best_tap_value;
1010 struct tegra_tuning_data *tuning_data;
1011 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1012 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1013 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1015 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1017 /* Select Bus Speed Mode for host
1018 * For HS200 we need to set UHS_MODE_SEL to SDR104.
1019 * It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
1020 * SDR50 mode timing seems to have issues. Programming SDR104
1021 * mode for SDR50 mode for reliable transfers over interface.
1023 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1025 case MMC_TIMING_UHS_SDR12:
1026 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1028 case MMC_TIMING_UHS_SDR25:
1029 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1031 case MMC_TIMING_UHS_SDR50:
1032 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1034 case MMC_TIMING_UHS_SDR104:
1035 case MMC_TIMING_MMC_HS200:
1036 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1038 case MMC_TIMING_UHS_DDR50:
1039 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1043 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1045 if (uhs == MMC_TIMING_UHS_DDR50) {
1046 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1047 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
1048 clk |= 1 << SDHCI_DIVIDER_SHIFT;
1049 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1051 /* Set the ddr mode trim delay if required */
1052 if (plat->ddr_trim_delay != -1) {
1053 trim_delay = plat->ddr_trim_delay;
1054 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1055 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1056 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1057 vndr_ctrl |= (trim_delay <<
1058 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1059 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1062 /* Set the best tap value based on timing */
1063 if (((uhs == MMC_TIMING_MMC_HS200) ||
1064 (uhs == MMC_TIMING_UHS_SDR104) ||
1065 (uhs == MMC_TIMING_UHS_SDR50)) &&
1066 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1067 tuning_data = sdhci_tegra_get_tuning_data(host,
1068 host->mmc->ios.clock);
1069 best_tap_value = (tegra_host->tap_cmd ==
1070 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1071 tuning_data->nom_best_tap_value :
1072 tuning_data->best_tap_value;
1074 best_tap_value = tegra_host->plat->tap_delay;
1076 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1077 vndr_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1078 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1079 vndr_ctrl |= (best_tap_value <<
1080 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1081 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
1085 static void sdhci_status_notify_cb(int card_present, void *dev_id)
1087 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
1088 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1089 struct tegra_sdhci_platform_data *plat;
1090 unsigned int status, oldstat;
1092 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
1095 plat = pdev->dev.platform_data;
1096 if (!plat->mmc_data.status) {
1097 if (card_present == 1) {
1098 sdhci->mmc->rescan_disable = 0;
1099 mmc_detect_change(sdhci->mmc, 0);
1100 } else if (card_present == 0) {
1101 sdhci->mmc->detect_change = 0;
1102 sdhci->mmc->rescan_disable = 1;
1107 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
1109 oldstat = plat->mmc_data.card_present;
1110 plat->mmc_data.card_present = status;
1111 if (status ^ oldstat) {
1112 pr_debug("%s: Slot status change detected (%d -> %d)\n",
1113 mmc_hostname(sdhci->mmc), oldstat, status);
1114 if (status && !plat->mmc_data.built_in)
1115 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
1117 mmc_detect_change(sdhci->mmc, 0);
1121 static irqreturn_t carddetect_irq(int irq, void *data)
1123 struct sdhci_host *sdhost = (struct sdhci_host *)data;
1124 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
1125 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1126 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
1127 struct tegra_sdhci_platform_data *plat;
1130 plat = pdev->dev.platform_data;
1132 tegra_host->card_present =
1133 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
1135 if (tegra_host->card_present) {
1136 err = tegra_sdhci_configure_regulators(tegra_host,
1137 CONFIG_REG_EN, 0, 0);
1139 dev_err(mmc_dev(sdhost->mmc),
1140 "Failed to enable card regulators %d\n", err);
1142 err = tegra_sdhci_configure_regulators(tegra_host,
1143 CONFIG_REG_DIS, 0 , 0);
1145 dev_err(mmc_dev(sdhost->mmc),
1146 "Failed to disable card regulators %d\n", err);
1148 * Set retune request as tuning should be done next time
1149 * a card is inserted.
1151 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
1152 tegra_host->force_retune = true;
1155 tasklet_schedule(&sdhost->card_tasklet);
1159 static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
1163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1164 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1165 struct tegra_tuning_data *tuning_data;
1166 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1167 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
1168 unsigned int best_tap_value;
1170 if (!(mask & SDHCI_RESET_ALL))
1173 if (tegra_host->sd_stat_head != NULL) {
1174 tegra_host->sd_stat_head->data_crc_count = 0;
1175 tegra_host->sd_stat_head->cmd_crc_count = 0;
1176 tegra_host->sd_stat_head->data_to_count = 0;
1177 tegra_host->sd_stat_head->cmd_to_count = 0;
1180 if (tegra_host->gov_data != NULL)
1181 tegra_host->gov_data->freq_switch_count = 0;
1183 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
1184 if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
1186 SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
1188 if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
1190 ~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
1192 if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
1194 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
1196 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK;
1199 if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
1200 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1201 && (host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
1202 tuning_data = sdhci_tegra_get_tuning_data(host,
1203 host->mmc->ios.clock);
1204 best_tap_value = (tegra_host->tap_cmd ==
1205 TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1206 tuning_data->nom_best_tap_value :
1207 tuning_data->best_tap_value;
1209 best_tap_value = tegra_host->plat->tap_delay;
1211 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1212 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1213 vendor_ctrl |= (best_tap_value <<
1214 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1217 if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
1218 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1219 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1220 vendor_ctrl |= (plat->trim_delay <<
1221 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1223 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
1224 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
1225 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1227 misc_ctrl = sdhci_readl(host, SDHCI_VNDR_MISC_CTRL);
1228 if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
1229 misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
1230 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
1232 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
1234 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
1236 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
1238 /* Enable DDR mode support only for SDMMC4 */
1239 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
1240 if (tegra_host->instance == 3) {
1242 SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
1245 if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
1247 SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
1249 if (soc_data->nvquirks & NVQUIRK_SET_PIPE_STAGES_MASK_0)
1250 misc_ctrl &= ~SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK;
1252 /* External loopback is valid for sdmmc3 only */
1253 if ((soc_data->nvquirks & NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
1254 (tegra_host->instance == 2)) {
1255 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
1256 && (host->mmc->pm_flags &
1257 MMC_PM_KEEP_POWER)) {
1259 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1262 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
1265 sdhci_writel(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
1267 if (soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23)
1268 host->flags &= ~SDHCI_AUTO_CMD23;
1270 /* Mask the support for any UHS modes if specified */
1271 if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
1272 host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
1274 if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
1275 host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
1277 if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
1278 host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
1280 if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
1281 host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
1283 if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
1284 host->mmc->caps &= ~MMC_CAP_UHS_SDR12;
1286 #ifdef CONFIG_MMC_SDHCI_TEGRA_HS200_DISABLE
1287 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1289 if (plat->uhs_mask & MMC_MASK_HS200)
1290 host->mmc->caps2 &= ~MMC_CAP2_HS200;
1294 static int tegra_sdhci_buswidth(struct sdhci_host *sdhci, int bus_width)
1296 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1297 const struct tegra_sdhci_platform_data *plat;
1300 plat = pdev->dev.platform_data;
1302 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
1303 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
1304 ctrl &= ~SDHCI_CTRL_4BITBUS;
1305 ctrl |= SDHCI_CTRL_8BITBUS;
1307 ctrl &= ~SDHCI_CTRL_8BITBUS;
1308 if (bus_width == MMC_BUS_WIDTH_4)
1309 ctrl |= SDHCI_CTRL_4BITBUS;
1311 ctrl &= ~SDHCI_CTRL_4BITBUS;
1313 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
1318 * Calculation of nearest clock frequency for desired rate:
1319 * Get the divisor value, div = p / d_rate
1320 * 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
1321 * nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
1322 * 2. If not, result = p / div
1323 * As the nearest clk freq should be <= to desired_rate,
1324 * 3. If result > desired_rate then increment the div by 0.5
1325 * and do, (p << 1)/((div << 1) + 1)
1326 * 4. Else return result
1327 * Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
1328 * defined index variable.
1330 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
1331 unsigned long desired_rate)
1333 unsigned long result;
1337 div = pll_rate / desired_rate;
1338 if (div > MAX_DIVISOR_VALUE) {
1339 div = MAX_DIVISOR_VALUE;
1340 result = pll_rate / div;
1342 if ((pll_rate % desired_rate) >= (desired_rate / 2))
1343 result = (pll_rate << 1) / ((div << 1) + index++);
1345 result = pll_rate / div;
1347 if (desired_rate < result) {
1349 * Trying to get lower clock freq than desired clock,
1350 * by increasing the divisor value by 0.5
1352 result = (pll_rate << 1) / ((div << 1) + index);
1359 static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
1360 unsigned long desired_rate)
1362 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1363 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1364 struct clk *parent_clk;
1365 unsigned long pll_c_freq;
1366 unsigned long pll_p_freq;
1369 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
1373 * Currently pll_p and pll_c are used as clock sources for SDMMC. If clk
1374 * rate is missing for either of them, then no selection is needed and
1375 * the default parent is used.
1377 if (!pll_c_rate || !pll_p_rate)
1380 pll_c_freq = get_nearest_clock_freq(pll_c_rate, desired_rate);
1381 pll_p_freq = get_nearest_clock_freq(pll_p_rate, desired_rate);
1384 * For low freq requests, both the desired rates might be higher than
1385 * the requested clock frequency. In such cases, select the parent
1386 * with the lower frequency rate.
1388 if ((pll_c_freq > desired_rate) && (pll_p_freq > desired_rate)) {
1389 if (pll_p_freq <= pll_c_freq) {
1390 desired_rate = pll_p_freq;
1393 desired_rate = pll_c_freq;
1396 rc = clk_set_rate(pltfm_host->clk, desired_rate);
1399 if (pll_c_freq > pll_p_freq) {
1400 if (!tegra_host->is_parent_pllc) {
1402 tegra_host->is_parent_pllc = true;
1403 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1406 } else if (tegra_host->is_parent_pllc) {
1408 tegra_host->is_parent_pllc = false;
1412 rc = clk_set_parent(pltfm_host->clk, parent_clk);
1414 pr_err("%s: failed to set pll parent clock %d\n",
1415 mmc_hostname(host->mmc), rc);
1418 static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
1421 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1422 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1423 unsigned int clk_rate;
1424 #ifdef CONFIG_MMC_FREQ_SCALING
1425 unsigned int tap_value;
1426 struct tegra_tuning_data *tuning_data;
1429 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
1431 * In ddr mode, tegra sdmmc controller clock frequency
1432 * should be double the card clock frequency.
1434 if (tegra_host->ddr_clk_limit)
1435 clk_rate = tegra_host->ddr_clk_limit * 2;
1437 clk_rate = clock * 2;
1442 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50)
1443 clk_rate = tegra_host->soc_data->tuning_freq_list[0];
1445 if (tegra_host->max_clk_limit &&
1446 (clk_rate > tegra_host->max_clk_limit))
1447 clk_rate = tegra_host->max_clk_limit;
1449 if (clk_rate > clk_get_max_rate(pltfm_host->clk))
1450 clk_rate = clk_get_max_rate(pltfm_host->clk);
1452 tegra_sdhci_clock_set_parent(sdhci, clk_rate);
1453 clk_set_rate(pltfm_host->clk, clk_rate);
1454 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
1456 /* FPGA supports 26MHz of clock for SDMMC. */
1457 if (tegra_platform_is_fpga())
1458 sdhci->max_clk = 26000000;
1460 #ifdef CONFIG_MMC_FREQ_SCALING
1461 /* Set the tap delay if tuning is done and dfs is enabled */
1462 if (sdhci->mmc->df &&
1463 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1464 tuning_data = sdhci_tegra_get_tuning_data(sdhci, clock);
1465 tap_value = (tegra_host->tap_cmd == TAP_CMD_TRIM_HIGH_VOLTAGE) ?
1466 tuning_data->nom_best_tap_value :
1467 tuning_data->best_tap_value;
1468 sdhci_tegra_set_tap_delay(sdhci, tap_value);
1473 static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
1475 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1476 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1477 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1481 mutex_lock(&tegra_host->set_clock_mutex);
1482 pr_debug("%s %s %u enabled=%u\n", __func__,
1483 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
1485 if (!tegra_host->clk_enabled) {
1486 pm_runtime_get_sync(&pdev->dev);
1487 ret = clk_prepare_enable(pltfm_host->clk);
1489 dev_err(mmc_dev(sdhci->mmc),
1490 "clock enable is failed, ret: %d\n", ret);
1493 tegra_host->clk_enabled = true;
1494 sdhci->is_clk_on = tegra_host->clk_enabled;
1495 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1496 ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1497 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1499 tegra_sdhci_set_clk_rate(sdhci, clock);
1501 if (tegra_host->emc_clk && (!tegra_host->is_sdmmc_emc_clk_on)) {
1502 ret = clk_prepare_enable(tegra_host->emc_clk);
1504 dev_err(mmc_dev(sdhci->mmc),
1505 "clock enable is failed, ret: %d\n", ret);
1508 tegra_host->is_sdmmc_emc_clk_on = true;
1510 if (tegra_host->sclk && (!tegra_host->is_sdmmc_sclk_on)) {
1511 ret = clk_prepare_enable(tegra_host->sclk);
1513 dev_err(mmc_dev(sdhci->mmc),
1514 "clock enable is failed, ret: %d\n", ret);
1517 tegra_host->is_sdmmc_sclk_on = true;
1519 } else if (!clock && tegra_host->clk_enabled) {
1520 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on) {
1521 clk_disable_unprepare(tegra_host->emc_clk);
1522 tegra_host->is_sdmmc_emc_clk_on = false;
1524 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on) {
1525 clk_disable_unprepare(tegra_host->sclk);
1526 tegra_host->is_sdmmc_sclk_on = false;
1528 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1529 ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1530 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1531 clk_disable_unprepare(pltfm_host->clk);
1532 tegra_host->clk_enabled = false;
1533 sdhci->is_clk_on = tegra_host->clk_enabled;
1534 pm_runtime_put_sync(&pdev->dev);
1536 mutex_unlock(&tegra_host->set_clock_mutex);
1539 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci,
1540 unsigned char signal_voltage)
1543 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1544 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1545 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1546 unsigned int timeout = 10;
1547 unsigned int calib_offsets = 0;
1549 /* No Calibration for sdmmc4 */
1550 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_SDMMC4_CALIB) &&
1551 (tegra_host->instance == 3))
1554 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
1557 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1558 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
1559 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
1560 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1562 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1564 /* Enable Auto Calibration*/
1565 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1566 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1567 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
1568 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
1569 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1570 calib_offsets = tegra_host->plat->calib_3v3_offsets;
1571 else if (signal_voltage == MMC_SIGNAL_VOLTAGE_180)
1572 calib_offsets = tegra_host->plat->calib_1v8_offsets;
1573 if (calib_offsets) {
1574 /* Program Auto cal PD offset(bits 8:14) */
1576 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1577 val |= (((calib_offsets >> 8) & 0xFF) <<
1578 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1579 /* Program Auto cal PU offset(bits 0:6) */
1581 val |= (calib_offsets & 0xFF);
1584 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1586 /* Wait until the calibration is done */
1588 if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
1589 SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
1597 dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
1599 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD) {
1600 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1601 val &= ~SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1602 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1605 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
1606 unsigned int pulldown_code;
1607 unsigned int pullup_code;
1611 /* Disable Auto calibration */
1612 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1613 val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1614 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1616 pg = tegra_drive_get_pingroup(mmc_dev(sdhci->mmc));
1618 /* Get the pull down codes from auto cal status reg */
1620 sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
1621 SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
1622 /* Set the pull down in the pinmux reg */
1623 err = tegra_drive_pinmux_set_pull_down(pg,
1626 dev_err(mmc_dev(sdhci->mmc),
1627 "Failed to set pulldown codes %d err %d\n",
1628 pulldown_code, err);
1630 /* Calculate the pull up codes */
1631 pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
1632 if (pullup_code >= TEGRA_MAX_PULL)
1633 pullup_code = TEGRA_MAX_PULL - 1;
1634 /* Set the pull up code in the pinmux reg */
1635 err = tegra_drive_pinmux_set_pull_up(pg, pullup_code);
1637 dev_err(mmc_dev(sdhci->mmc),
1638 "Failed to set pullup codes %d err %d\n",
1644 static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
1645 unsigned int signal_voltage)
1647 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1648 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1649 unsigned int min_uV = tegra_host->vddio_min_uv;
1650 unsigned int max_uV = tegra_host->vddio_max_uv;
1651 unsigned int rc = 0;
1655 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
1656 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1657 ctrl |= SDHCI_CTRL_VDD_180;
1658 min_uV = SDHOST_LOW_VOLT_MIN;
1659 max_uV = SDHOST_LOW_VOLT_MAX;
1660 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1661 if (ctrl & SDHCI_CTRL_VDD_180)
1662 ctrl &= ~SDHCI_CTRL_VDD_180;
1665 /* Check if the slot can support the required voltage */
1666 if (min_uV > tegra_host->vddio_max_uv)
1669 /* Set/clear the 1.8V signalling */
1670 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
1672 /* Switch the I/O rail voltage */
1673 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_SET_VOLT,
1675 if (rc && (signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1676 dev_err(mmc_dev(sdhci->mmc),
1677 "setting 1.8V failed %d. Revert to 3.3V\n", rc);
1678 rc = tegra_sdhci_configure_regulators(tegra_host,
1679 CONFIG_REG_SET_VOLT, SDHOST_HIGH_VOLT_MIN,
1680 SDHOST_HIGH_VOLT_MAX);
1686 static int tegra_sdhci_configure_regulators(struct sdhci_tegra *tegra_host,
1687 u8 option, int min_uV, int max_uV)
1693 if (!tegra_host->is_rail_enabled) {
1694 if (tegra_host->vdd_slot_reg)
1695 rc = regulator_enable(tegra_host->vdd_slot_reg);
1696 if (tegra_host->vdd_io_reg)
1697 rc = regulator_enable(tegra_host->vdd_io_reg);
1698 tegra_host->is_rail_enabled = true;
1701 case CONFIG_REG_DIS:
1702 if (tegra_host->is_rail_enabled) {
1703 if (tegra_host->vdd_io_reg)
1704 rc = regulator_disable(tegra_host->vdd_io_reg);
1705 if (tegra_host->vdd_slot_reg)
1706 rc = regulator_disable(
1707 tegra_host->vdd_slot_reg);
1708 tegra_host->is_rail_enabled = false;
1711 case CONFIG_REG_SET_VOLT:
1712 if (tegra_host->vdd_io_reg)
1713 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
1717 pr_err("Invalid argument passed to reg config %d\n", option);
1723 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
1725 unsigned long timeout;
1727 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
1729 /* Wait max 100 ms */
1732 /* hw clears the bit when it's done */
1733 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
1735 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
1736 "completed.\n", (int)mask);
1743 tegra_sdhci_reset_exit(sdhci, mask);
1746 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
1747 unsigned int tap_delay)
1751 /* Max tap delay value is 255 */
1752 if (tap_delay > MAX_TAP_VALUES) {
1753 dev_err(mmc_dev(sdhci->mmc),
1754 "Valid tap range (0-255). Setting tap value %d\n",
1760 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1761 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TAP_VALUE_MASK <<
1762 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1763 vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1764 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1767 static void sdhci_tegra_set_trim_delay(struct sdhci_host *sdhci,
1768 unsigned int trim_delay)
1772 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1773 vendor_ctrl &= ~(SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_MASK <<
1774 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1775 vendor_ctrl |= (trim_delay << SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
1776 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1779 static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
1781 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1782 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1783 struct sdhci_tegra_sd_stats *head = tegra_host->sd_stat_head;
1785 if (int_status & SDHCI_INT_DATA_CRC)
1786 head->data_crc_count++;
1787 if (int_status & SDHCI_INT_CRC)
1788 head->cmd_crc_count++;
1789 if (int_status & SDHCI_INT_TIMEOUT)
1790 head->cmd_to_count++;
1791 if (int_status & SDHCI_INT_DATA_TIMEOUT)
1792 head->data_to_count++;
1796 static struct tegra_tuning_data *sdhci_tegra_get_tuning_data(
1797 struct sdhci_host *sdhci, unsigned int clock)
1799 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1800 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1801 struct tegra_tuning_data *tuning_data;
1802 unsigned int low_freq;
1805 if (tegra_host->tuning_freq_count == 1) {
1806 tuning_data = &tegra_host->tuning_data[0];
1810 /* Get the lowest supported freq */
1811 for (i = 0; i < TUNING_FREQ_COUNT; ++i) {
1812 low_freq = tegra_host->soc_data->tuning_freq_list[i];
1817 if (clock <= low_freq)
1818 tuning_data = &tegra_host->tuning_data[0];
1820 tuning_data = &tegra_host->tuning_data[1];
1826 static void calculate_vmin_values(struct sdhci_host *sdhci,
1827 struct tegra_tuning_data *tuning_data, int vmin, int boot_mv)
1829 struct tuning_values *est_values = &tuning_data->est_values;
1830 struct tuning_values *calc_values = &tuning_data->calc_values;
1831 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
1832 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
1833 int vmin_slope, vmin_int, temp_calc_vmin;
1834 int t2t_vmax, t2t_vmin;
1835 int vmax_thole, vmin_thole;
1838 * If current vmin is equal to vmin or vmax of tuning data, use the
1839 * previously calculated estimated T2T values directly. Note that the
1840 * estimated T2T_vmax is not at Vmax specified in tuning data. It is
1841 * the T2T at the boot or max voltage for the current SKU. Hence,
1842 * boot_mv is used in place of t2t_coeffs->vmax.
1844 if (vmin == t2t_coeffs->vmin) {
1845 t2t_vmin = est_values->t2t_vmin;
1846 } else if (vmin == boot_mv) {
1847 t2t_vmin = est_values->t2t_vmax;
1850 * For any intermediate voltage between boot voltage and vmin
1851 * of tuning data, calculate the slope and intercept from the
1852 * t2t at boot_mv and vmin and calculate the actual values.
1854 t2t_vmax = 1000 / est_values->t2t_vmax;
1855 t2t_vmin = 1000 / est_values->t2t_vmin;
1856 vmin_slope = ((t2t_vmax - t2t_vmin) * 1000) /
1857 (boot_mv - t2t_coeffs->vmin);
1858 vmin_int = (t2t_vmax * 1000 - (vmin_slope * boot_mv)) / 1000;
1859 t2t_vmin = (vmin_slope * vmin) / 1000 + vmin_int;
1860 t2t_vmin = (1000 / t2t_vmin);
1863 calc_values->t2t_vmin = (t2t_vmin * calc_values->t2t_vmax) /
1864 est_values->t2t_vmax;
1866 calc_values->ui_vmin = (1000000 / (tuning_data->freq_hz / 1000000)) /
1867 calc_values->t2t_vmin;
1869 /* Calculate the vmin tap hole at vmin of tuning data */
1870 temp_calc_vmin = (est_values->t2t_vmin * calc_values->t2t_vmax) /
1871 est_values->t2t_vmax;
1872 vmin_thole = (thole_coeffs->thole_vmin_int -
1873 (thole_coeffs->thole_vmin_slope * temp_calc_vmin)) /
1875 vmax_thole = calc_values->vmax_thole;
1877 if (vmin == t2t_coeffs->vmin) {
1878 calc_values->vmin_thole = vmin_thole;
1879 } else if (vmin == boot_mv) {
1880 calc_values->vmin_thole = vmax_thole;
1883 * Interpolate the tap hole for any intermediate voltage.
1884 * Calculate the slope and intercept from the available data
1885 * and use them to calculate the actual values.
1887 vmin_slope = ((vmax_thole - vmin_thole) * 1000) /
1888 (boot_mv - t2t_coeffs->vmin);
1889 vmin_int = (vmax_thole * 1000 - (vmin_slope * boot_mv)) / 1000;
1890 calc_values->vmin_thole = (vmin_slope * vmin) / 1000 + vmin_int;
1893 /* Adjust the partial win start for Vmin boundary */
1894 if (tuning_data->is_partial_win_valid)
1895 tuning_data->final_tap_data[0].win_start =
1896 (tuning_data->final_tap_data[0].win_start *
1897 tuning_data->calc_values.t2t_vmax) /
1898 tuning_data->calc_values.t2t_vmin;
1900 pr_info("**********Tuning values*********\n");
1901 pr_info("**estimated values**\n");
1902 pr_info("T2T_Vmax %d, T2T_Vmin %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
1903 est_values->t2t_vmax, est_values->t2t_vmin,
1904 est_values->vmax_thole, est_values->ui);
1905 pr_info("**Calculated values**\n");
1906 pr_info("T2T_Vmax %d, 1'st_hole_Vmax %d, UI_Vmax %d\n",
1907 calc_values->t2t_vmax, calc_values->vmax_thole,
1909 pr_info("T2T_Vmin %d, 1'st_hole_Vmin %d, UI_Vmin %d\n",
1910 calc_values->t2t_vmin, calc_values->vmin_thole,
1911 calc_values->ui_vmin);
1912 pr_info("***********************************\n");
1915 static int slide_window_start(struct sdhci_host *sdhci,
1916 struct tegra_tuning_data *tuning_data,
1917 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
1921 if (edge_attr == WIN_EDGE_BOUN_START) {
1923 tap_value += (1000 / tuning_data->calc_values.t2t_vmin);
1925 tap_value += (1000 / tuning_data->calc_values.t2t_vmax);
1926 } else if (edge_attr == WIN_EDGE_HOLE) {
1927 if (tap_hole >= 0) {
1928 tap_margin = get_tuning_tap_hole_margins(sdhci,
1929 tuning_data->calc_values.t2t_vmax);
1930 tap_value += ((7 * tap_hole) / 100) + tap_margin;
1934 if (tap_value > MAX_TAP_VALUES)
1935 tap_value = MAX_TAP_VALUES;
1940 static int slide_window_end(struct sdhci_host *sdhci,
1941 struct tegra_tuning_data *tuning_data,
1942 int tap_value, enum tap_win_edge_attr edge_attr, int tap_hole)
1946 if (edge_attr == WIN_EDGE_BOUN_END) {
1947 tap_value = (tap_value * tuning_data->calc_values.t2t_vmax) /
1948 tuning_data->calc_values.t2t_vmin;
1949 tap_value -= (1000 / tuning_data->calc_values.t2t_vmin);
1950 } else if (edge_attr == WIN_EDGE_HOLE) {
1951 if (tap_hole >= 0) {
1952 tap_value = tap_hole;
1953 tap_margin = get_tuning_tap_hole_margins(sdhci,
1954 tuning_data->calc_values.t2t_vmin);
1956 tap_value -= ((7 * tap_hole) / 100) + tap_margin;
1961 static int adjust_window_boundaries(struct sdhci_host *sdhci,
1962 struct tegra_tuning_data *tuning_data,
1963 struct tap_window_data *temp_tap_data)
1965 struct tap_window_data *tap_data;
1966 int vmin_tap_hole = 0;
1967 int vmax_tap_hole = 0;
1970 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
1971 tap_data = &temp_tap_data[i];
1972 /* Update with next hole if first hole is taken care of */
1973 if (tap_data->win_start_attr == WIN_EDGE_HOLE)
1974 vmax_tap_hole = tuning_data->calc_values.vmax_thole +
1975 (tap_data->hole_pos - 1) *
1976 tuning_data->calc_values.ui;
1977 tap_data->win_start = slide_window_start(sdhci, tuning_data,
1978 tap_data->win_start, tap_data->win_start_attr,
1981 /* Update with next hole if first hole is taken care of */
1982 if (tap_data->win_end_attr == WIN_EDGE_HOLE)
1983 vmin_tap_hole = tuning_data->calc_values.vmin_thole +
1984 (tap_data->hole_pos - 1) *
1985 tuning_data->calc_values.ui_vmin;
1986 tap_data->win_end = slide_window_end(sdhci, tuning_data,
1987 tap_data->win_end, tap_data->win_end_attr,
1991 pr_info("***********final tuning windows**********\n");
1992 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
1993 tap_data = &temp_tap_data[i];
1994 pr_info("win[%d]: %d - %d\n", i, tap_data->win_start,
1997 pr_info("********************************\n");
2001 static int find_best_tap_value(struct tegra_tuning_data *tuning_data,
2002 struct tap_window_data *temp_tap_data, int vmin)
2004 struct tap_window_data *tap_data;
2005 u8 i = 0, sel_win = 0;
2006 int pref_win = 0, curr_win_size = 0;
2007 int best_tap_value = 0;
2009 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2010 tap_data = &temp_tap_data[i];
2011 if (!i && tuning_data->is_partial_win_valid) {
2012 pref_win = tap_data->win_end - tap_data->win_start;
2013 if ((tap_data->win_end * 2) < pref_win)
2014 pref_win = tap_data->win_end * 2;
2017 curr_win_size = tap_data->win_end - tap_data->win_start;
2018 if ((curr_win_size > 0) && (curr_win_size > pref_win)) {
2019 pref_win = curr_win_size;
2025 if (pref_win <= 0) {
2026 pr_err("No window opening for %d vmin\n", vmin);
2030 tap_data = &temp_tap_data[sel_win];
2031 if (!sel_win && tuning_data->is_partial_win_valid) {
2033 best_tap_value = tap_data->win_end - (pref_win / 2);
2034 if (best_tap_value < 0)
2037 best_tap_value = tap_data->win_start +
2038 ((tap_data->win_end - tap_data->win_start) *
2039 tuning_data->calc_values.t2t_vmin) /
2040 (tuning_data->calc_values.t2t_vmin +
2041 tuning_data->calc_values.t2t_vmax);
2044 pr_info("best tap win - (%d-%d), best tap value %d\n",
2045 tap_data->win_start, tap_data->win_end, best_tap_value);
2046 return best_tap_value;
2049 static int sdhci_tegra_calculate_best_tap(struct sdhci_host *sdhci,
2050 struct tegra_tuning_data *tuning_data)
2052 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2053 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2054 struct tap_window_data *temp_tap_data = NULL;
2055 int vmin, curr_vmin, best_tap_value = 0;
2058 curr_vmin = tegra_dvfs_predict_millivolts(pltfm_host->clk,
2059 tuning_data->freq_hz);
2061 curr_vmin = tegra_host->boot_vcore_mv;
2065 SDHCI_TEGRA_DBG("%s: checking for win opening with vmin %d\n",
2066 mmc_hostname(sdhci->mmc), vmin);
2067 if ((best_tap_value < 0) &&
2068 (vmin > tegra_host->boot_vcore_mv)) {
2069 dev_err(mmc_dev(sdhci->mmc),
2070 "No best tap for any vcore range\n");
2071 kfree(temp_tap_data);
2072 temp_tap_data = NULL;
2076 calculate_vmin_values(sdhci, tuning_data, vmin,
2077 tegra_host->boot_vcore_mv);
2079 if (temp_tap_data == NULL) {
2080 temp_tap_data = kzalloc(sizeof(struct tap_window_data) *
2081 tuning_data->num_of_valid_tap_wins, GFP_KERNEL);
2082 if (IS_ERR_OR_NULL(temp_tap_data)) {
2083 dev_err(mmc_dev(sdhci->mmc),
2084 "No memory for final tap value calculation\n");
2089 memcpy(temp_tap_data, tuning_data->final_tap_data,
2090 sizeof(struct tap_window_data) *
2091 tuning_data->num_of_valid_tap_wins);
2093 adjust_window_boundaries(sdhci, tuning_data, temp_tap_data);
2095 best_tap_value = find_best_tap_value(tuning_data,
2096 temp_tap_data, vmin);
2098 if (best_tap_value < 0)
2100 } while (best_tap_value < 0);
2102 tuning_data->best_tap_value = best_tap_value;
2103 tuning_data->nom_best_tap_value = best_tap_value;
2106 * Set the new vmin if there is any change. If dvfs overrides are
2107 * disabled, then print the error message but continue execution
2108 * rather than disabling tuning altogether.
2110 if ((tuning_data->best_tap_value >= 0) && (curr_vmin != vmin)) {
2111 err = tegra_dvfs_set_fmax_at_vmin(pltfm_host->clk,
2112 tuning_data->freq_hz, vmin);
2113 if ((err == -EPERM) || (err == -ENOSYS)) {
2115 * tegra_dvfs_set_fmax_at_vmin: will return EPERM or
2116 * ENOSYS, when DVFS override is not enabled, continue
2117 * tuning with default core voltage.
2120 "dvfs overrides disabled. Vmin not updated\n");
2124 kfree(temp_tap_data);
2128 static int sdhci_tegra_issue_tuning_cmd(struct sdhci_host *sdhci)
2130 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2131 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2135 unsigned int timeout = 10;
2139 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
2140 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
2142 dev_err(mmc_dev(sdhci->mmc), "Controller never"
2143 "released inhibit bit(s).\n");
2151 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2152 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2153 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2155 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2156 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2157 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
2160 * In response to CMD19, the card sends 64 bytes of tuning
2161 * block to the Host Controller. So we set the block size
2163 * In response to CMD21, the card sends 128 bytes of tuning
2164 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
2165 * to the Host Controller. So we set the block size to 64 here.
2167 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
2170 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
2172 sdhci_writew(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2174 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
2176 /* Set the cmd flags */
2177 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
2178 /* Issue the command */
2179 sdhci_writew(sdhci, SDHCI_MAKE_CMD(
2180 tegra_host->tuning_opcode, flags), SDHCI_COMMAND);
2186 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
2188 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
2193 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
2194 !(intstatus & SDHCI_INT_DATA_CRC)) {
2196 sdhci->tuning_done = 1;
2198 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
2199 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
2203 if (sdhci->tuning_done) {
2204 sdhci->tuning_done = 0;
2205 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
2206 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
2207 (ctrl & SDHCI_CTRL_TUNED_CLK))
2217 static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
2218 unsigned int starting_tap, bool expect_failure)
2220 unsigned int tap_value = starting_tap;
2222 unsigned int retry = TUNING_RETRIES;
2225 /* Set the tap delay */
2226 sdhci_tegra_set_tap_delay(sdhci, tap_value);
2228 /* Run frequency tuning */
2229 err = sdhci_tegra_issue_tuning_cmd(sdhci);
2234 retry = TUNING_RETRIES;
2235 if ((expect_failure && !err) ||
2236 (!expect_failure && err))
2240 } while (tap_value <= MAX_TAP_VALUES);
2245 static int calculate_actual_tuning_values(int speedo,
2246 struct tegra_tuning_data *tuning_data, int voltage_mv)
2248 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2249 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2250 struct tuning_values *calc_values = &tuning_data->calc_values;
2252 int vmax_thole, vmin_thole;
2254 /* T2T_Vmax = (1000000/freq_MHz)/Calc_UI */
2255 calc_values->t2t_vmax = (1000000 / (tuning_data->freq_hz / 1000000)) /
2259 * Interpolate the tap hole.
2260 * Vmax_1'st_hole = (Calc_T2T_Vmax*(-thole_slope)+thole_tint.
2262 vmax_thole = (thole_coeffs->thole_vmax_int -
2263 (thole_coeffs->thole_vmax_slope * calc_values->t2t_vmax)) /
2265 vmin_thole = (thole_coeffs->thole_vmin_int -
2266 (thole_coeffs->thole_vmin_slope * calc_values->t2t_vmax)) /
2268 if (voltage_mv == t2t_coeffs->vmin) {
2269 calc_values->vmax_thole = vmin_thole;
2270 } else if (voltage_mv == t2t_coeffs->vmax) {
2271 calc_values->vmax_thole = vmax_thole;
2273 slope = (vmax_thole - vmin_thole) /
2274 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2275 inpt = ((vmax_thole * 1000) - (slope * 1250)) / 1000;
2276 calc_values->vmax_thole = slope * voltage_mv + inpt;
2283 * All coeffs are filled up in the table after multiplying by 1000. So, all
2284 * calculations should have a divide by 1000 at the end.
2286 static int calculate_estimated_tuning_values(int speedo,
2287 struct tegra_tuning_data *tuning_data, int voltage_mv)
2289 struct tuning_t2t_coeffs *t2t_coeffs = tuning_data->t2t_coeffs;
2290 struct tap_hole_coeffs *thole_coeffs = tuning_data->thole_coeffs;
2291 struct tuning_values *est_values = &tuning_data->est_values;
2293 int vmax_t2t, vmin_t2t;
2294 int vmax_thole, vmin_thole;
2296 /* Est_T2T_Vmax = (speedo*(-t2t_slope)+t2t_int */
2297 vmax_t2t = (t2t_coeffs->t2t_vmax_int - (speedo *
2298 t2t_coeffs->t2t_vmax_slope)) / 1000;
2299 vmin_t2t = (t2t_coeffs->t2t_vmin_int - (speedo *
2300 t2t_coeffs->t2t_vmin_slope)) / 1000;
2301 est_values->t2t_vmin = vmin_t2t;
2303 if (voltage_mv == t2t_coeffs->vmin) {
2304 est_values->t2t_vmax = vmin_t2t;
2305 } else if (voltage_mv == t2t_coeffs->vmax) {
2306 est_values->t2t_vmax = vmax_t2t;
2308 vmax_t2t = 1000 / vmax_t2t;
2309 vmin_t2t = 1000 / vmin_t2t;
2311 * For any intermediate voltage between 0.95V and 1.25V,
2312 * calculate the slope and intercept from the T2T and tap hole
2313 * values of 0.95V and 1.25V and use them to calculate the
2314 * actual values. 1/T2T is a linear function of voltage.
2316 slope = ((vmax_t2t - vmin_t2t) * 1000) /
2317 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2318 inpt = (vmax_t2t * 1000 - (slope * t2t_coeffs->vmax)) / 1000;
2319 est_values->t2t_vmax = (slope * voltage_mv) / 1000 + inpt;
2320 est_values->t2t_vmax = (1000 / est_values->t2t_vmax);
2323 /* Est_UI = (1000000/freq_MHz)/Est_T2T_Vmax */
2324 est_values->ui = (1000000 / (thole_coeffs->freq_khz / 1000)) /
2325 est_values->t2t_vmax;
2328 * Est_1'st_hole = (Est_T2T_Vmax*(-thole_slope)) + thole_int.
2330 vmax_thole = (thole_coeffs->thole_vmax_int -
2331 (thole_coeffs->thole_vmax_slope * est_values->t2t_vmax)) / 1000;
2332 vmin_thole = (thole_coeffs->thole_vmin_int -
2333 (thole_coeffs->thole_vmin_slope * est_values->t2t_vmax)) / 1000;
2335 if (voltage_mv == t2t_coeffs->vmin) {
2336 est_values->vmax_thole = vmin_thole;
2337 } else if (voltage_mv == t2t_coeffs->vmax) {
2338 est_values->vmax_thole = vmax_thole;
2341 * For any intermediate voltage between 0.95V and 1.25V,
2342 * calculate the slope and intercept from the t2t and tap hole
2343 * values of 0.95V and 1.25V and use them to calculate the
2344 * actual values. Tap hole is a linear function of voltage.
2346 slope = ((vmax_thole - vmin_thole) * 1000) /
2347 (t2t_coeffs->vmax - t2t_coeffs->vmin);
2348 inpt = (vmax_thole * 1000 - (slope * t2t_coeffs->vmax)) / 1000;
2349 est_values->vmax_thole = (slope * voltage_mv) / 1000 + inpt;
2351 est_values->vmin_thole = vmin_thole;
2357 * Insert the calculated holes and get the final tap windows
2358 * with the boundaries and holes set.
2360 static int adjust_holes_in_tap_windows(struct sdhci_host *sdhci,
2361 struct tegra_tuning_data *tuning_data)
2363 struct tap_window_data *tap_data;
2364 struct tap_window_data *final_tap_data;
2365 struct tuning_values *calc_values = &tuning_data->calc_values;
2366 int tap_hole, size = 0;
2367 u8 i = 0, j = 0, num_of_wins, hole_pos = 0;
2369 tuning_data->final_tap_data =
2370 devm_kzalloc(mmc_dev(sdhci->mmc),
2371 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2372 if (IS_ERR_OR_NULL(tuning_data->final_tap_data)) {
2373 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
2377 num_of_wins = tuning_data->num_of_valid_tap_wins;
2378 tap_hole = calc_values->vmax_thole;
2381 tap_data = &tuning_data->tap_data[i];
2382 final_tap_data = &tuning_data->final_tap_data[j];
2383 if (tap_hole < tap_data->win_start) {
2384 tap_hole += calc_values->ui;
2387 } else if (tap_hole > tap_data->win_end) {
2388 memcpy(final_tap_data, tap_data,
2389 sizeof(struct tap_window_data));
2394 } else if ((tap_hole >= tap_data->win_start) &&
2395 (tap_hole <= tap_data->win_end)) {
2396 size = tap_data->win_end - tap_data->win_start;
2399 &tuning_data->final_tap_data[j];
2400 if (tap_hole == tap_data->win_start) {
2401 final_tap_data->win_start =
2403 final_tap_data->win_start_attr =
2405 final_tap_data->hole_pos = hole_pos;
2406 tap_hole += calc_values->ui;
2409 final_tap_data->win_start =
2410 tap_data->win_start;
2411 final_tap_data->win_start_attr =
2412 WIN_EDGE_BOUN_START;
2414 if (tap_hole <= tap_data->win_end) {
2415 final_tap_data->win_end = tap_hole - 1;
2416 final_tap_data->win_end_attr =
2418 final_tap_data->hole_pos = hole_pos;
2419 tap_data->win_start = tap_hole;
2420 } else if (tap_hole > tap_data->win_end) {
2421 final_tap_data->win_end =
2423 final_tap_data->win_end_attr =
2425 tap_data->win_start =
2428 size = tap_data->win_end - tap_data->win_start;
2434 } while (num_of_wins > 0);
2436 /* Update the num of valid wins count after tap holes insertion */
2437 tuning_data->num_of_valid_tap_wins = j;
2439 pr_info("********tuning windows after inserting holes*****\n");
2440 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2441 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2442 final_tap_data = &tuning_data->final_tap_data[i];
2443 pr_info("win[%d]:%d(%d) - %d(%d)\n", i,
2444 final_tap_data->win_start,
2445 final_tap_data->win_start_attr,
2446 final_tap_data->win_end, final_tap_data->win_end_attr);
2448 pr_info("***********************************************\n");
2454 * Insert the boundaries from negative margin calculations into the windows
2457 static int insert_boundaries_in_tap_windows(struct sdhci_host *sdhci,
2458 struct tegra_tuning_data *tuning_data, u8 boun_end)
2460 struct tap_window_data *tap_data;
2461 struct tap_window_data *new_tap_data;
2462 struct tap_window_data *temp_tap_data;
2463 struct tuning_values *calc_values = &tuning_data->calc_values;
2465 u8 i = 0, j = 0, num_of_wins;
2466 bool get_next_boun = false;
2468 temp_tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
2469 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2470 if (IS_ERR_OR_NULL(temp_tap_data)) {
2471 dev_err(mmc_dev(sdhci->mmc), "No mem for final tap wins\n");
2475 num_of_wins = tuning_data->num_of_valid_tap_wins;
2476 curr_boun = boun_end % calc_values->ui;
2478 if (get_next_boun) {
2479 curr_boun += calc_values->ui;
2481 * If the boun_end exceeds the intial boundary end,
2482 * just copy remaining windows and return.
2484 if (curr_boun >= boun_end)
2485 curr_boun += MAX_TAP_VALUES;
2488 tap_data = &tuning_data->tap_data[i];
2489 new_tap_data = &temp_tap_data[j];
2490 if (curr_boun <= tap_data->win_start) {
2491 get_next_boun = true;
2493 } else if (curr_boun >= tap_data->win_end) {
2494 memcpy(new_tap_data, tap_data,
2495 sizeof(struct tap_window_data));
2499 get_next_boun = false;
2501 } else if ((curr_boun >= tap_data->win_start) &&
2502 (curr_boun <= tap_data->win_end)) {
2503 new_tap_data->win_start = tap_data->win_start;
2504 new_tap_data->win_start_attr =
2505 tap_data->win_start_attr;
2506 new_tap_data->win_end = curr_boun - 1;
2507 new_tap_data->win_end_attr =
2508 tap_data->win_end_attr;
2510 new_tap_data = &temp_tap_data[j];
2511 new_tap_data->win_start = curr_boun;
2512 new_tap_data->win_end = curr_boun;
2513 new_tap_data->win_start_attr =
2514 WIN_EDGE_BOUN_START;
2515 new_tap_data->win_end_attr =
2518 new_tap_data = &temp_tap_data[j];
2519 new_tap_data->win_start = curr_boun + 1;
2520 new_tap_data->win_start_attr = WIN_EDGE_BOUN_START;
2521 new_tap_data->win_end = tap_data->win_end;
2522 new_tap_data->win_end_attr =
2523 tap_data->win_end_attr;
2527 get_next_boun = true;
2529 } while (num_of_wins > 0);
2531 /* Update the num of valid wins count after tap holes insertion */
2532 tuning_data->num_of_valid_tap_wins = j;
2534 memcpy(tuning_data->tap_data, temp_tap_data,
2535 j * sizeof(struct tap_window_data));
2536 SDHCI_TEGRA_DBG("***tuning windows after inserting boundaries***\n");
2537 SDHCI_TEGRA_DBG("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2538 for (i = 0; i < tuning_data->num_of_valid_tap_wins; i++) {
2539 new_tap_data = &tuning_data->tap_data[i];
2540 SDHCI_TEGRA_DBG("win[%d]:%d(%d) - %d(%d)\n", i,
2541 new_tap_data->win_start,
2542 new_tap_data->win_start_attr,
2543 new_tap_data->win_end, new_tap_data->win_end_attr);
2545 SDHCI_TEGRA_DBG("***********************************************\n");
2551 * Scan for all tap values and get all passing tap windows.
2553 static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
2554 struct tegra_tuning_data *tuning_data)
2556 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2557 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2558 struct tap_window_data *tap_data;
2559 struct tuning_ui tuning_ui[10];
2560 int err = 0, partial_win_start = 0, temp_margin = 0;
2561 unsigned int tap_value, calc_ui = 0;
2562 u8 prev_boundary_end = 0, num_of_wins = 0;
2563 u8 num_of_uis = 0, valid_num_uis = 0;
2564 u8 ref_ui, first_valid_full_win = 0;
2565 u8 boun_end = 0, next_boun_end = 0;
2567 bool valid_ui_found = false;
2570 * Assume there are a max of 10 windows and allocate tap window
2571 * structures for the same. If there are more windows, the array
2572 * size can be adjusted later using realloc.
2574 tuning_data->tap_data = devm_kzalloc(mmc_dev(sdhci->mmc),
2575 sizeof(struct tap_window_data) * 42, GFP_KERNEL);
2576 if (IS_ERR_OR_NULL(tuning_data->tap_data)) {
2577 dev_err(mmc_dev(sdhci->mmc), "No memory for tap data\n");
2581 spin_lock(&sdhci->lock);
2584 tap_data = &tuning_data->tap_data[num_of_wins];
2585 /* Get the window start */
2586 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true);
2587 tap_data->win_start = min_t(u8, tap_value, MAX_TAP_VALUES);
2589 if (tap_value >= MAX_TAP_VALUES) {
2590 /* If it's first iteration, then all taps failed */
2592 dev_err(mmc_dev(sdhci->mmc),
2593 "All tap values(0-255) failed\n");
2594 spin_unlock(&sdhci->lock);
2597 /* All windows obtained */
2602 /* Get the window end */
2603 tap_value = sdhci_tegra_scan_tap_values(sdhci,
2605 tap_data->win_end = min_t(u8, (tap_value - 1), MAX_TAP_VALUES);
2606 tap_data->win_size = tap_data->win_end - tap_data->win_start;
2610 * If the size of window is more than 4 taps wide, then it is a
2611 * valid window. If tap value 0 has passed, then a partial
2612 * window exists. Mark all the window edges as boundary edges.
2614 if (tap_data->win_size > 4) {
2615 if (tap_data->win_start == 0)
2616 tuning_data->is_partial_win_valid = true;
2617 tap_data->win_start_attr = WIN_EDGE_BOUN_START;
2618 tap_data->win_end_attr = WIN_EDGE_BOUN_END;
2620 /* Invalid window as size is less than 5 taps */
2621 SDHCI_TEGRA_DBG("Invalid tuning win (%d-%d) ignored\n",
2622 tap_data->win_start, tap_data->win_end);
2626 /* Ignore first and last partial UIs */
2627 if (tap_data->win_end_attr == WIN_EDGE_BOUN_END) {
2628 tuning_ui[num_of_uis].ui = tap_data->win_end -
2630 tuning_ui[num_of_uis].is_valid_ui = true;
2632 prev_boundary_end = tap_data->win_end;
2635 } while (tap_value < MAX_TAP_VALUES);
2636 spin_unlock(&sdhci->lock);
2638 tuning_data->num_of_valid_tap_wins = num_of_wins;
2639 valid_num_uis = num_of_uis;
2641 /* Print info of all tap windows */
2642 pr_info("**********Auto tuning windows*************\n");
2643 pr_info("WIN_ATTR legend: 0-BOUN_ST, 1-BOUN_END, 2-HOLE\n");
2644 for (j = 0; j < tuning_data->num_of_valid_tap_wins; j++) {
2645 tap_data = &tuning_data->tap_data[j];
2646 pr_info("win[%d]: %d(%d) - %d(%d)\n",
2647 j, tap_data->win_start, tap_data->win_start_attr,
2648 tap_data->win_end, tap_data->win_end_attr);
2650 pr_info("***************************************\n");
2652 /* Mark the first last partial UIs as invalid */
2653 tuning_ui[0].is_valid_ui = false;
2654 tuning_ui[num_of_uis - 1].is_valid_ui = false;
2657 /* Discredit all uis at either end with size less than 30% of est ui */
2658 ref_ui = (30 * tuning_data->est_values.ui) / 100;
2659 for (j = 0; j < num_of_uis; j++) {
2660 if (tuning_ui[j].is_valid_ui) {
2661 tuning_ui[j].is_valid_ui = false;
2664 if (tuning_ui[j].ui > ref_ui)
2668 for (j = num_of_uis; j > 0; j--) {
2669 if (tuning_ui[j - 1].ui < ref_ui) {
2670 if (tuning_ui[j - 1].is_valid_ui) {
2671 tuning_ui[j - 1].is_valid_ui = false;
2678 /* Calculate 0.75*est_UI */
2679 ref_ui = (75 * tuning_data->est_values.ui) / 100;
2682 * Check for valid UIs and discredit invalid UIs. A UI is considered
2683 * valid if it's greater than (0.75*est_UI). If an invalid UI is found,
2684 * also discredit the smaller of the two adjacent windows.
2686 for (j = 1; j < (num_of_uis - 1); j++) {
2687 if (tuning_ui[j].ui > ref_ui && tuning_ui[j].is_valid_ui) {
2688 tuning_ui[j].is_valid_ui = true;
2690 if (tuning_ui[j].is_valid_ui) {
2691 tuning_ui[j].is_valid_ui = false;
2694 if (!tuning_ui[j + 1].is_valid_ui ||
2695 !tuning_ui[j - 1].is_valid_ui) {
2696 if (tuning_ui[j - 1].is_valid_ui) {
2697 tuning_ui[j - 1].is_valid_ui = false;
2699 } else if (tuning_ui[j + 1].is_valid_ui) {
2700 tuning_ui[j + 1].is_valid_ui = false;
2705 if (tuning_ui[j - 1].ui > tuning_ui[j + 1].ui)
2706 tuning_ui[j + 1].is_valid_ui = false;
2708 tuning_ui[j - 1].is_valid_ui = false;
2714 /* Calculate the cumulative UI if there are valid UIs left */
2715 if (valid_num_uis) {
2716 for (j = 0; j < num_of_uis; j++)
2717 if (tuning_ui[j].is_valid_ui) {
2718 calc_ui += tuning_ui[j].ui;
2719 if (!first_valid_full_win)
2720 first_valid_full_win = j;
2725 tuning_data->calc_values.ui = (calc_ui / valid_num_uis);
2726 valid_ui_found = true;
2728 tuning_data->calc_values.ui = tuning_data->est_values.ui;
2729 valid_ui_found = false;
2732 SDHCI_TEGRA_DBG("****Tuning UIs***********\n");
2733 for (j = 0; j < num_of_uis; j++)
2734 SDHCI_TEGRA_DBG("Tuning UI[%d] : %d, Is valid[%d]\n",
2735 j, tuning_ui[j].ui, tuning_ui[j].is_valid_ui);
2736 SDHCI_TEGRA_DBG("*************************\n");
2738 /* Get the calculated tuning values */
2739 err = calculate_actual_tuning_values(tegra_host->speedo, tuning_data,
2740 tegra_host->boot_vcore_mv);
2743 * Calculate negative margin if partial win is valid. There are two
2745 * Case 1: If Avg_UI is found, then keep subtracting avg_ui from start
2746 * of first valid full window until a value <=0 is obtained.
2747 * Case 2: If Avg_UI is not found, subtract avg_ui from all boundary
2748 * starts until a value <=0 is found.
2750 if (tuning_data->is_partial_win_valid && (num_of_wins > 1)) {
2751 if (valid_ui_found) {
2753 tuning_data->tap_data[first_valid_full_win].win_start;
2754 boun_end = partial_win_start;
2755 partial_win_start %= tuning_data->calc_values.ui;
2756 partial_win_start -= tuning_data->calc_values.ui;
2758 for (j = 0; j < NEG_MAR_CHK_WIN_COUNT; j++) {
2760 tuning_data->tap_data[j + 1].win_start;
2762 boun_end = temp_margin;
2763 else if (!next_boun_end)
2764 next_boun_end = temp_margin;
2765 temp_margin %= tuning_data->calc_values.ui;
2766 temp_margin -= tuning_data->calc_values.ui;
2767 if (!partial_win_start ||
2768 (temp_margin > partial_win_start))
2769 partial_win_start = temp_margin;
2772 if (partial_win_start <= 0)
2773 tuning_data->tap_data[0].win_start = partial_win_start;
2777 insert_boundaries_in_tap_windows(sdhci, tuning_data, boun_end);
2779 insert_boundaries_in_tap_windows(sdhci, tuning_data, next_boun_end);
2781 /* Insert calculated holes into the windows */
2782 err = adjust_holes_in_tap_windows(sdhci, tuning_data);
2787 static void sdhci_tegra_dump_tuning_constraints(struct sdhci_host *sdhci)
2789 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2790 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2791 struct tegra_tuning_data *tuning_data;
2794 SDHCI_TEGRA_DBG("%s: Num of tuning frequencies%d\n",
2795 mmc_hostname(sdhci->mmc), tegra_host->tuning_freq_count);
2796 for (i = 0; i < tegra_host->tuning_freq_count; ++i) {
2797 tuning_data = &tegra_host->tuning_data[i];
2798 SDHCI_TEGRA_DBG("%s: Tuning freq[%d]: %d, freq band %d\n",
2799 mmc_hostname(sdhci->mmc), i,
2800 tuning_data->freq_hz, tuning_data->freq_band);
2804 static unsigned int get_tuning_voltage(struct sdhci_tegra *tegra_host, u8 *mask)
2811 case NOMINAL_VCORE_TUN:
2812 return tegra_host->nominal_vcore_mv;
2813 case BOOT_VCORE_TUN:
2814 return tegra_host->boot_vcore_mv;
2815 case MIN_OVERRIDE_VCORE_TUN:
2816 return tegra_host->min_vcore_override_mv;
2819 return tegra_host->boot_vcore_mv;
2822 static u8 sdhci_tegra_get_freq_point(struct sdhci_host *sdhci)
2824 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2825 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2826 const unsigned int *freq_list;
2830 curr_clock = sdhci->max_clk;
2831 freq_list = tegra_host->soc_data->tuning_freq_list;
2833 for (i = 0; i < TUNING_FREQ_COUNT; ++i)
2834 if (curr_clock <= freq_list[i])
2837 return TUNING_MAX_FREQ;
2840 static int get_tuning_tap_hole_margins(struct sdhci_host *sdhci,
2841 int t2t_tuning_value)
2843 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2844 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2845 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2846 struct tuning_tap_hole_margins *tap_hole;
2851 if (soc_data->nvquirks & NVQUIRK_SELECT_FIXED_TAP_HOLE_MARGINS) {
2852 if (soc_data->tap_hole_margins) {
2853 tap_hole = soc_data->tap_hole_margins;
2854 dev_id = dev_name(mmc_dev(sdhci->mmc));
2855 for (i = 0; i < soc_data->tap_hole_margins_count; i++) {
2856 if (!strcmp(dev_id, tap_hole->dev_id))
2857 return tap_hole->tap_hole_margin;
2861 dev_info(mmc_dev(sdhci->mmc),
2862 "Fixed tap hole margins missing\n");
2866 /* if no margin are available calculate tap margin */
2867 tap_margin = (((2 * (450 / t2t_tuning_value)) +
2874 * The frequency tuning algorithm tries to calculate the tap-to-tap delay
2875 * UI and estimate holes using equations and predetermined coefficients from
2876 * the characterization data. The algorithm will not work without this data.
2878 static int find_tuning_coeffs_data(struct sdhci_host *sdhci,
2879 bool force_retuning)
2881 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2882 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2883 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
2884 struct tegra_tuning_data *tuning_data;
2885 struct tuning_t2t_coeffs *t2t_coeffs;
2886 struct tap_hole_coeffs *thole_coeffs;
2888 unsigned int freq_khz;
2890 bool coeffs_set = false;
2892 dev_id = dev_name(mmc_dev(sdhci->mmc));
2893 /* Find the coeffs data for all supported frequencies */
2894 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
2895 tuning_data = &tegra_host->tuning_data[i];
2897 /* Skip if T2T coeffs are already found */
2898 if (tuning_data->t2t_coeffs == NULL || force_retuning) {
2899 t2t_coeffs = soc_data->t2t_coeffs;
2900 for (j = 0; j < soc_data->t2t_coeffs_count; j++) {
2901 if (!strcmp(dev_id, t2t_coeffs->dev_id)) {
2902 tuning_data->t2t_coeffs = t2t_coeffs;
2904 dev_info(mmc_dev(sdhci->mmc),
2905 "Found T2T coeffs data\n");
2911 dev_err(mmc_dev(sdhci->mmc),
2912 "T2T coeffs data missing\n");
2913 tuning_data->t2t_coeffs = NULL;
2919 /* Skip if tap hole coeffs are already found */
2920 if (tuning_data->thole_coeffs == NULL || force_retuning) {
2921 thole_coeffs = soc_data->tap_hole_coeffs;
2922 freq_khz = tuning_data->freq_hz / 1000;
2923 for (j = 0; j < soc_data->tap_hole_coeffs_count; j++) {
2924 if (!strcmp(dev_id, thole_coeffs->dev_id) &&
2925 (freq_khz == thole_coeffs->freq_khz)) {
2926 tuning_data->thole_coeffs =
2929 dev_info(mmc_dev(sdhci->mmc),
2930 "%dMHz tap hole coeffs found\n",
2938 dev_err(mmc_dev(sdhci->mmc),
2939 "%dMHz Tap hole coeffs data missing\n",
2941 tuning_data->thole_coeffs = NULL;
2951 * Determines the numbers of frequencies required and then fills up the tuning
2952 * constraints for each of the frequencies. The data of lower frequency is
2953 * filled first and then the higher frequency data. Max supported frequencies
2956 static int setup_freq_constraints(struct sdhci_host *sdhci,
2957 const unsigned int *freq_list)
2959 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2960 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2961 struct tegra_tuning_data *tuning_data;
2965 if ((sdhci->mmc->ios.timing != MMC_TIMING_UHS_SDR50) &&
2966 (sdhci->mmc->caps2 & MMC_CAP2_FREQ_SCALING))
2967 freq_count = DFS_FREQ_COUNT;
2971 freq_band = sdhci_tegra_get_freq_point(sdhci);
2972 /* Fill up the req frequencies */
2973 switch (freq_count) {
2975 tuning_data = &tegra_host->tuning_data[0];
2976 tuning_data->freq_hz = sdhci->max_clk;
2977 tuning_data->freq_band = freq_band;
2978 tuning_data->constraints.vcore_mask =
2979 tuning_vcore_constraints[freq_band].vcore_mask;
2980 tuning_data->nr_voltages =
2981 hweight32(tuning_data->constraints.vcore_mask);
2984 tuning_data = &tegra_host->tuning_data[1];
2985 tuning_data->freq_hz = sdhci->max_clk;
2986 tuning_data->freq_band = freq_band;
2987 tuning_data->constraints.vcore_mask =
2988 tuning_vcore_constraints[freq_band].vcore_mask;
2989 tuning_data->nr_voltages =
2990 hweight32(tuning_data->constraints.vcore_mask);
2992 tuning_data = &tegra_host->tuning_data[0];
2993 for (i = (freq_band - 1); i >= 0; i--) {
2996 tuning_data->freq_hz = freq_list[i];
2997 tuning_data->freq_band = i;
2998 tuning_data->nr_voltages = 1;
2999 tuning_data->constraints.vcore_mask =
3000 tuning_vcore_constraints[i].vcore_mask;
3001 tuning_data->nr_voltages =
3002 hweight32(tuning_data->constraints.vcore_mask);
3006 dev_err(mmc_dev(sdhci->mmc), "Unsupported freq count\n");
3014 * Get the supported frequencies and other tuning related constraints for each
3015 * frequency. The supported frequencies should be determined from the list of
3016 * frequencies in the soc data and also consider the platform clock limits as
3017 * well as any DFS related restrictions.
3019 static int sdhci_tegra_get_tuning_constraints(struct sdhci_host *sdhci,
3020 bool force_retuning)
3022 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3023 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3024 const unsigned int *freq_list;
3027 /* A valid freq count means freq constraints are already set up */
3028 if (!tegra_host->tuning_freq_count || force_retuning) {
3029 freq_list = tegra_host->soc_data->tuning_freq_list;
3030 tegra_host->tuning_freq_count =
3031 setup_freq_constraints(sdhci, freq_list);
3032 if (tegra_host->tuning_freq_count < 0) {
3033 dev_err(mmc_dev(sdhci->mmc),
3034 "Invalid tuning freq count\n");
3039 err = find_tuning_coeffs_data(sdhci, force_retuning);
3043 sdhci_tegra_dump_tuning_constraints(sdhci);
3049 * During boot, only boot voltage for vcore can be set. Check if the current
3050 * voltage is allowed to be used. Nominal and min override voltages can be
3051 * set once boot is done. This will be notified through late subsys init call.
3053 static int sdhci_tegra_set_tuning_voltage(struct sdhci_host *sdhci,
3054 unsigned int voltage)
3056 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3057 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3059 bool nom_emc_freq_set = false;
3061 if (voltage && (voltage != tegra_host->boot_vcore_mv) &&
3062 !vcore_overrides_allowed) {
3063 SDHCI_TEGRA_DBG("%s: Override vcore %dmv not allowed\n",
3064 mmc_hostname(sdhci->mmc), voltage);
3068 SDHCI_TEGRA_DBG("%s: Setting vcore override %d\n",
3069 mmc_hostname(sdhci->mmc), voltage);
3071 * First clear any previous dvfs override settings. If dvfs overrides
3072 * are disabled, then print the error message but continue execution
3073 * rather than failing tuning altogether.
3075 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, 0);
3076 if ((err == -EPERM) || (err == -ENOSYS)) {
3078 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3079 * when DVFS override is not enabled. Continue tuning
3080 * with default core voltage
3082 SDHCI_TEGRA_DBG("dvfs overrides disabled. Nothing to clear\n");
3088 /* EMC clock freq boost might be required for nominal core voltage */
3089 if ((voltage == tegra_host->nominal_vcore_mv) &&
3090 tegra_host->plat->en_nominal_vcore_tuning &&
3091 tegra_host->emc_clk) {
3092 err = clk_set_rate(tegra_host->emc_clk,
3093 SDMMC_EMC_NOM_VOLT_FREQ);
3095 dev_err(mmc_dev(sdhci->mmc),
3096 "Failed to set emc nom clk freq %d\n", err);
3098 nom_emc_freq_set = true;
3102 * If dvfs overrides are disabled, then print the error message but
3103 * continue tuning execution rather than failing tuning altogether.
3105 err = tegra_dvfs_override_core_voltage(pltfm_host->clk, voltage);
3106 if ((err == -EPERM) || (err == -ENOSYS)) {
3108 * tegra_dvfs_override_core_voltage will return EPERM or ENOSYS,
3109 * when DVFS override is not enabled. Continue tuning
3110 * with default core voltage
3112 SDHCI_TEGRA_DBG("dvfs overrides disabled. No overrides set\n");
3115 dev_err(mmc_dev(sdhci->mmc),
3116 "failed to set vcore override %dmv\n", voltage);
3118 /* Revert emc clock to normal freq */
3119 if (nom_emc_freq_set) {
3120 err = clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
3122 dev_err(mmc_dev(sdhci->mmc),
3123 "Failed to revert emc nom clk freq %d\n", err);
3129 static int sdhci_tegra_run_tuning(struct sdhci_host *sdhci,
3130 struct tegra_tuning_data *tuning_data)
3132 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3133 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3136 u8 i, vcore_mask = 0;
3138 vcore_mask = tuning_data->constraints.vcore_mask;
3139 for (i = 0; i < tuning_data->nr_voltages; i++) {
3140 voltage = get_tuning_voltage(tegra_host, &vcore_mask);
3141 err = sdhci_tegra_set_tuning_voltage(sdhci, voltage);
3143 dev_err(mmc_dev(sdhci->mmc),
3144 "Unable to set override voltage.\n");
3148 /* Get the tuning window info */
3149 SDHCI_TEGRA_DBG("Getting tuning windows...\n");
3150 err = sdhci_tegra_get_tap_window_data(sdhci, tuning_data);
3152 dev_err(mmc_dev(sdhci->mmc),
3153 "Failed to get tap win %d\n", err);
3156 SDHCI_TEGRA_DBG("%s: %d tuning window data obtained\n",
3157 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3162 static int sdhci_tegra_verify_best_tap(struct sdhci_host *sdhci)
3164 struct tegra_tuning_data *tuning_data;
3167 tuning_data = sdhci_tegra_get_tuning_data(sdhci, sdhci->max_clk);
3168 if ((tuning_data->best_tap_value < 0) ||
3169 (tuning_data->best_tap_value > MAX_TAP_VALUES)) {
3170 dev_err(mmc_dev(sdhci->mmc),
3171 "Trying to verify invalid best tap value\n");
3174 dev_info(mmc_dev(sdhci->mmc),
3175 "%s: tuning freq %dhz, best tap %d\n",
3176 __func__, tuning_data->freq_hz,
3177 tuning_data->best_tap_value);
3180 /* Set the best tap value */
3181 sdhci_tegra_set_tap_delay(sdhci, tuning_data->best_tap_value);
3183 /* Run tuning after setting the best tap value */
3184 err = sdhci_tegra_issue_tuning_cmd(sdhci);
3186 dev_err(mmc_dev(sdhci->mmc),
3187 "%dMHz best tap value verification failed %d\n",
3188 tuning_data->freq_hz, err);
3192 static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
3194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3195 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3196 struct tegra_tuning_data *tuning_data;
3197 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
3202 u8 i, set_retuning = 0;
3203 bool force_retuning = false;
3206 /* Tuning is valid only in SDR104 and SDR50 modes */
3207 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
3208 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
3209 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
3210 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
3213 /* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
3214 if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
3215 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
3216 else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
3217 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
3221 SDHCI_TEGRA_DBG("%s: Starting freq tuning\n", mmc_hostname(sdhci->mmc));
3222 enable_lb_clk = (soc_data->nvquirks &
3223 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
3224 (tegra_host->instance == 2);
3225 if (enable_lb_clk) {
3226 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3228 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3229 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3231 mutex_lock(&tuning_mutex);
3233 /* Set the tuning command to be used */
3234 tegra_host->tuning_opcode = opcode;
3237 * Disable all interrupts signalling.Enable interrupt status
3238 * detection for buffer read ready and data crc. We use
3239 * polling for tuning as it involves less overhead.
3241 ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE);
3242 sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
3243 sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
3244 SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
3247 * If tuning is already done and retune request is not set, then skip
3248 * best tap value calculation and use the old best tap value. If the
3249 * previous best tap value verification failed, force retuning.
3251 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
3252 err = sdhci_tegra_verify_best_tap(sdhci);
3254 dev_err(mmc_dev(sdhci->mmc),
3255 "Prev best tap failed. Re-running tuning\n");
3256 force_retuning = true;
3262 if (tegra_host->force_retune == true) {
3263 force_retuning = true;
3264 tegra_host->force_retune = false;
3267 tegra_host->tuning_status = 0;
3268 err = sdhci_tegra_get_tuning_constraints(sdhci, force_retuning);
3270 dev_err(mmc_dev(sdhci->mmc),
3271 "Failed to get tuning constraints\n");
3275 for (i = 0; i < tegra_host->tuning_freq_count; i++) {
3276 tuning_data = &tegra_host->tuning_data[i];
3277 if (tuning_data->tuning_done && !force_retuning)
3280 SDHCI_TEGRA_DBG("%s: Setting tuning freq%d\n",
3281 mmc_hostname(sdhci->mmc), tuning_data->freq_hz);
3282 tegra_sdhci_set_clock(sdhci, tuning_data->freq_hz);
3284 SDHCI_TEGRA_DBG("%s: Calculating estimated tuning values\n",
3285 mmc_hostname(sdhci->mmc));
3286 err = calculate_estimated_tuning_values(tegra_host->speedo,
3287 tuning_data, tegra_host->boot_vcore_mv);
3291 SDHCI_TEGRA_DBG("Running tuning...\n");
3292 err = sdhci_tegra_run_tuning(sdhci, tuning_data);
3296 SDHCI_TEGRA_DBG("calculating best tap value\n");
3297 err = sdhci_tegra_calculate_best_tap(sdhci, tuning_data);
3301 err = sdhci_tegra_verify_best_tap(sdhci);
3302 if (!err && !set_retuning) {
3303 tuning_data->tuning_done = true;
3304 tegra_host->tuning_status |= TUNING_STATUS_DONE;
3306 tegra_host->tuning_status |= TUNING_STATUS_RETUNE;
3310 /* Release any override core voltages set */
3311 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3313 /* Enable interrupts. Enable full range for core voltage */
3314 sdhci_writel(sdhci, ier, SDHCI_INT_ENABLE);
3315 sdhci_writel(sdhci, ier, SDHCI_SIGNAL_ENABLE);
3316 mutex_unlock(&tuning_mutex);
3318 SDHCI_TEGRA_DBG("%s: Freq tuning done\n", mmc_hostname(sdhci->mmc));
3319 if (enable_lb_clk) {
3320 misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
3322 /* Tuning is failed and card will try to enumerate in
3323 * Legacy High Speed mode. So, Enable External Loopback
3327 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3330 SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
3332 sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
3337 static int __init sdhci_tegra_enable_vcore_override_tuning(void)
3339 vcore_overrides_allowed = true;
3340 maintain_boot_voltage = false;
3343 late_initcall(sdhci_tegra_enable_vcore_override_tuning);
3345 static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
3347 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3348 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3350 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
3351 const struct tegra_sdhci_platform_data *plat;
3352 unsigned int cd_irq;
3354 tegra_sdhci_set_clock(sdhci, 0);
3356 /* Disable the power rails if any */
3357 if (tegra_host->card_present) {
3358 err = tegra_sdhci_configure_regulators(tegra_host,
3359 CONFIG_REG_DIS, 0, 0);
3361 dev_err(mmc_dev(sdhci->mmc),
3362 "Regulators disable in suspend failed %d\n", err);
3364 plat = pdev->dev.platform_data;
3365 if (plat && gpio_is_valid(plat->cd_gpio)) {
3366 if (!plat->cd_wakeup_incapable) {
3367 /* Enable wake irq at end of suspend */
3368 cd_irq = gpio_to_irq(plat->cd_gpio);
3369 err = enable_irq_wake(cd_irq);
3371 dev_err(mmc_dev(sdhci->mmc),
3372 "SD card wake-up event registration for irq=%d failed with error: %d\n",
3379 static int tegra_sdhci_resume(struct sdhci_host *sdhci)
3381 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3382 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3383 struct platform_device *pdev;
3384 struct tegra_sdhci_platform_data *plat;
3385 unsigned int signal_voltage = 0;
3387 unsigned int cd_irq;
3389 pdev = to_platform_device(mmc_dev(sdhci->mmc));
3390 plat = pdev->dev.platform_data;
3392 if (plat && gpio_is_valid(plat->cd_gpio)) {
3393 /* disable wake capability at start of resume */
3394 if (!plat->cd_wakeup_incapable) {
3395 cd_irq = gpio_to_irq(plat->cd_gpio);
3396 disable_irq_wake(cd_irq);
3398 tegra_host->card_present =
3399 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
3402 /* Setting the min identification clock of freq 400KHz */
3403 tegra_sdhci_set_clock(sdhci, 400000);
3405 /* Enable the power rails if any */
3406 if (tegra_host->card_present) {
3407 err = tegra_sdhci_configure_regulators(tegra_host,
3408 CONFIG_REG_EN, 0, 0);
3410 dev_err(mmc_dev(sdhci->mmc),
3411 "Regulators enable in resume failed %d\n", err);
3414 if (tegra_host->vdd_io_reg) {
3415 if (plat && (plat->mmc_data.ocr_mask &
3416 SDHOST_1V8_OCR_MASK))
3417 signal_voltage = MMC_SIGNAL_VOLTAGE_180;
3419 signal_voltage = MMC_SIGNAL_VOLTAGE_330;
3420 tegra_sdhci_signal_voltage_switch(sdhci,
3425 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
3426 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3427 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
3428 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
3431 tegra_sdhci_do_calibration(sdhci, signal_voltage);
3437 static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
3439 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3440 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3442 /* Turn OFF the clocks if the device is not present */
3443 if ((!tegra_host->card_present || !sdhci->mmc->card) &&
3444 tegra_host->clk_enabled)
3445 tegra_sdhci_set_clock(sdhci, 0);
3449 * For tegra specific tuning, core voltage has to be fixed at different
3450 * voltages to get the tap values. Fixing the core voltage during tuning for one
3451 * device might affect transfers of other SDMMC devices. Check if tuning mutex
3452 * is locked before starting a data transfer. The new tuning procedure might
3453 * take at max 1.5s for completion for a single run. Taking DFS into count,
3454 * setting the max timeout for tuning mutex check a 3 secs. Since tuning is
3455 * run only during boot or the first time device is inserted, there wouldn't
3456 * be any delays in cmd/xfer execution once devices enumeration is done.
3458 static void tegra_sdhci_get_bus(struct sdhci_host *sdhci)
3460 unsigned int timeout = 300;
3462 while (mutex_is_locked(&tuning_mutex)) {
3466 dev_err(mmc_dev(sdhci->mmc),
3467 "Tuning mutex locked for long time\n");
3474 * The host/device can be powered off before the retuning request is handled in
3475 * case of SDIDO being off if Wifi is turned off, sd card removal etc. In such
3476 * cases, cancel the pending tuning timer and remove any core voltage
3477 * constraints that are set earlier.
3479 static void tegra_sdhci_power_off(struct sdhci_host *sdhci, u8 power_mode)
3481 int retuning_req_set = 0;
3483 retuning_req_set = (timer_pending(&sdhci->tuning_timer) ||
3484 (sdhci->flags & SDHCI_NEEDS_RETUNING));
3486 if (retuning_req_set) {
3487 del_timer_sync(&sdhci->tuning_timer);
3489 if (boot_volt_req_refcount)
3490 --boot_volt_req_refcount;
3492 if (!boot_volt_req_refcount) {
3493 sdhci_tegra_set_tuning_voltage(sdhci, 0);
3494 SDHCI_TEGRA_DBG("%s: Release override as host is off\n",
3495 mmc_hostname(sdhci->mmc));
3500 static int show_polling_period(void *data, u64 *value)
3502 struct sdhci_host *host = (struct sdhci_host *)data;
3504 if (host->mmc->dev_stats != NULL)
3505 *value = host->mmc->dev_stats->polling_interval;
3510 static int set_polling_period(void *data, u64 value)
3512 struct sdhci_host *host = (struct sdhci_host *)data;
3514 if (host->mmc->dev_stats != NULL) {
3515 /* Limiting the maximum polling period to 1 sec */
3518 host->mmc->dev_stats->polling_interval = value;
3523 static int show_active_load_high_threshold(void *data, u64 *value)
3525 struct sdhci_host *host = (struct sdhci_host *)data;
3526 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3527 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3528 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
3530 if (gov_data != NULL)
3531 *value = gov_data->act_load_high_threshold;
3536 static int set_active_load_high_threshold(void *data, u64 value)
3538 struct sdhci_host *host = (struct sdhci_host *)data;
3539 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3540 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3541 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
3543 if (gov_data != NULL) {
3544 /* Maximum threshold load percentage is 100.*/
3547 gov_data->act_load_high_threshold = value;
3553 static int show_disableclkgating_value(void *data, u64 *value)
3555 struct sdhci_host *host = (struct sdhci_host *)data;
3557 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3558 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3559 if (tegra_host != NULL)
3560 *value = tegra_host->dbg_cfg.clk_ungated;
3565 static int set_disableclkgating_value(void *data, u64 value)
3567 struct sdhci_host *host = (struct sdhci_host *)data;
3569 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3570 if (pltfm_host != NULL) {
3571 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3572 /* Set the CAPS2 register to reflect
3573 * the clk gating value
3575 if (tegra_host != NULL) {
3577 host->mmc->ops->set_ios(host->mmc,
3579 tegra_host->dbg_cfg.clk_ungated = true;
3581 ~MMC_CAP2_CLOCK_GATING;
3583 tegra_host->dbg_cfg.clk_ungated = false;
3585 MMC_CAP2_CLOCK_GATING;
3593 static int set_trim_override_value(void *data, u64 value)
3595 struct sdhci_host *host = (struct sdhci_host *)data;
3597 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3598 if (pltfm_host != NULL) {
3599 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3600 if (tegra_host != NULL) {
3601 /* Make sure clock gating is disabled */
3602 if ((tegra_host->dbg_cfg.clk_ungated) &&
3603 (tegra_host->clk_enabled)) {
3604 sdhci_tegra_set_trim_delay(host, value);
3605 tegra_host->dbg_cfg.trim_val =
3608 pr_info("%s: Disable clock gating before setting value\n",
3609 mmc_hostname(host->mmc));
3617 static int show_trim_override_value(void *data, u64 *value)
3619 struct sdhci_host *host = (struct sdhci_host *)data;
3621 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3622 if (pltfm_host != NULL) {
3623 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3624 if (tegra_host != NULL)
3625 *value = tegra_host->dbg_cfg.trim_val;
3631 static int show_tap_override_value(void *data, u64 *value)
3633 struct sdhci_host *host = (struct sdhci_host *)data;
3635 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3636 if (pltfm_host != NULL) {
3637 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3638 if (tegra_host != NULL)
3639 *value = tegra_host->dbg_cfg.tap_val;
3645 static int set_tap_override_value(void *data, u64 value)
3647 struct sdhci_host *host = (struct sdhci_host *)data;
3649 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3650 if (pltfm_host != NULL) {
3651 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3652 if (tegra_host != NULL) {
3653 /* Make sure clock gating is disabled */
3654 if ((tegra_host->dbg_cfg.clk_ungated) &&
3655 (tegra_host->clk_enabled)) {
3656 sdhci_tegra_set_tap_delay(host, value);
3657 tegra_host->dbg_cfg.tap_val = value;
3659 pr_info("%s: Disable clock gating before setting value\n",
3660 mmc_hostname(host->mmc));
3667 DEFINE_SIMPLE_ATTRIBUTE(sdhci_polling_period_fops, show_polling_period,
3668 set_polling_period, "%llu\n");
3669 DEFINE_SIMPLE_ATTRIBUTE(sdhci_active_load_high_threshold_fops,
3670 show_active_load_high_threshold,
3671 set_active_load_high_threshold, "%llu\n");
3672 DEFINE_SIMPLE_ATTRIBUTE(sdhci_disable_clkgating_fops,
3673 show_disableclkgating_value,
3674 set_disableclkgating_value, "%llu\n");
3675 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_trim_data_fops,
3676 show_trim_override_value,
3677 set_trim_override_value, "%llu\n");
3678 DEFINE_SIMPLE_ATTRIBUTE(sdhci_override_tap_data_fops,
3679 show_tap_override_value,
3680 set_tap_override_value, "%llu\n");
3682 static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
3684 struct dentry *root = host->debugfs_root;
3685 struct dentry *dfs_root;
3686 unsigned saved_line;
3689 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
3690 if (IS_ERR_OR_NULL(root)) {
3691 saved_line = __LINE__;
3694 host->debugfs_root = root;
3697 dfs_root = debugfs_create_dir("dfs_stats_dir", root);
3698 if (IS_ERR_OR_NULL(dfs_root)) {
3699 saved_line = __LINE__;
3703 if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
3704 &sdhci_host_fops)) {
3705 saved_line = __LINE__;
3708 if (!debugfs_create_file("dfs_stats", S_IRUSR, dfs_root, host,
3709 &sdhci_host_dfs_fops)) {
3710 saved_line = __LINE__;
3713 if (!debugfs_create_file("polling_period", 0644, dfs_root, (void *)host,
3714 &sdhci_polling_period_fops)) {
3715 saved_line = __LINE__;
3718 if (!debugfs_create_file("active_load_high_threshold", 0644,
3719 dfs_root, (void *)host,
3720 &sdhci_active_load_high_threshold_fops)) {
3721 saved_line = __LINE__;
3725 dfs_root = debugfs_create_dir("override_data", root);
3726 if (IS_ERR_OR_NULL(dfs_root)) {
3727 saved_line = __LINE__;
3731 if (!debugfs_create_file("clk_gate_disabled", 0644,
3732 dfs_root, (void *)host,
3733 &sdhci_disable_clkgating_fops)) {
3734 saved_line = __LINE__;
3738 if (!debugfs_create_file("tap_value", 0644,
3739 dfs_root, (void *)host,
3740 &sdhci_override_tap_data_fops)) {
3741 saved_line = __LINE__;
3745 if (!debugfs_create_file("trim_value", 0644,
3746 dfs_root, (void *)host,
3747 &sdhci_override_trim_data_fops)) {
3748 saved_line = __LINE__;
3751 if (IS_QUIRKS2_DELAYED_CLK_GATE(host)) {
3752 host->clk_gate_tmout_ticks = -1;
3753 if (!debugfs_create_u32("clk_gate_tmout_ticks",
3755 root, (u32 *)&host->clk_gate_tmout_ticks)) {
3756 saved_line = __LINE__;
3764 debugfs_remove_recursive(root);
3765 host->debugfs_root = NULL;
3767 pr_err("%s %s: Failed to initialize debugfs functionality at line=%d\n", __func__,
3768 mmc_hostname(host->mmc), saved_line);
3772 static ssize_t sdhci_handle_boost_mode_tap(struct device *dev,
3773 struct device_attribute *attr, const char *buf, size_t count)
3776 struct mmc_card *card;
3777 char *p = (char *)buf;
3778 struct sdhci_host *host = dev_get_drvdata(dev);
3779 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3780 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3781 struct tegra_tuning_data *tuning_data;
3784 bool clk_set_for_tap_prog = false;
3786 tap_cmd = memparse(p, &p);
3788 card = host->mmc->card;
3792 /* if not uhs -- no tuning and no tap value to set */
3793 if (!mmc_sd_card_uhs(card) && !mmc_card_hs200(card))
3796 /* if no change in tap value -- just exit */
3797 if (tap_cmd == tegra_host->tap_cmd)
3800 if ((tap_cmd != TAP_CMD_TRIM_DEFAULT_VOLTAGE) &&
3801 (tap_cmd != TAP_CMD_TRIM_HIGH_VOLTAGE)) {
3802 pr_info("echo 1 > cmd_state # to set normal voltage\n");
3803 pr_info("echo 2 > cmd_state # to set high voltage\n");
3807 tegra_host->tap_cmd = tap_cmd;
3808 tuning_data = sdhci_tegra_get_tuning_data(host, host->max_clk);
3809 /* Check if host clock is enabled */
3810 if (!tegra_host->clk_enabled) {
3811 /* Nothing to do if the host is not powered ON */
3812 if (host->mmc->ios.power_mode != MMC_POWER_ON)
3815 tegra_sdhci_set_clock(host, host->mmc->ios.clock);
3816 clk_set_for_tap_prog = true;
3820 /* Wait for any on-going data transfers */
3821 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
3822 while (present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) {
3827 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
3830 spin_lock(&host->lock);
3832 case TAP_CMD_TRIM_DEFAULT_VOLTAGE:
3833 /* set tap value for voltage range 1.1 to 1.25 */
3834 sdhci_tegra_set_tap_delay(host, tuning_data->best_tap_value);
3837 case TAP_CMD_TRIM_HIGH_VOLTAGE:
3838 /* set tap value for voltage range 1.25 to 1.39 */
3839 sdhci_tegra_set_tap_delay(host,
3840 tuning_data->nom_best_tap_value);
3843 spin_unlock(&host->lock);
3844 if (clk_set_for_tap_prog) {
3845 tegra_sdhci_set_clock(host, 0);
3846 clk_set_for_tap_prog = false;
3851 static ssize_t sdhci_show_turbo_mode(struct device *dev,
3852 struct device_attribute *attr, char *buf)
3854 struct sdhci_host *host = dev_get_drvdata(dev);
3855 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
3856 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3858 return sprintf(buf, "%d\n", tegra_host->tap_cmd);
3861 static DEVICE_ATTR(cmd_state, 0644, sdhci_show_turbo_mode,
3862 sdhci_handle_boost_mode_tap);
3864 static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
3865 unsigned long event, void *data)
3867 struct sdhci_tegra *tegra_host =
3868 container_of(nb, struct sdhci_tegra, reboot_notify);
3874 err = tegra_sdhci_configure_regulators(tegra_host,
3875 CONFIG_REG_DIS, 0, 0);
3877 pr_err("Disable regulator in reboot notify failed %d\n",
3884 void tegra_sdhci_ios_config_enter(struct sdhci_host *sdhci, struct mmc_ios *ios)
3886 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3887 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3888 struct clk *new_mode_clk;
3889 bool change_clk = false;
3892 * Tegra sdmmc controllers require clock to be enabled for any register
3893 * access. Set the minimum controller clock if no clock is requested.
3895 if (!sdhci->clock && !ios->clock) {
3896 tegra_sdhci_set_clock(sdhci, sdhci->mmc->f_min);
3897 sdhci->clock = sdhci->mmc->f_min;
3898 } else if (ios->clock && (ios->clock != sdhci->clock)) {
3899 tegra_sdhci_set_clock(sdhci, ios->clock);
3903 * Check for DDR50 mode setting and set ddr_clk if not already
3904 * done. Return if only one clock option is available.
3906 if (!tegra_host->ddr_clk || !tegra_host->sdr_clk) {
3909 if ((ios->timing == MMC_TIMING_UHS_DDR50) &&
3910 !tegra_host->is_ddr_clk_set) {
3912 new_mode_clk = tegra_host->ddr_clk;
3913 } else if ((ios->timing != MMC_TIMING_UHS_DDR50) &&
3914 tegra_host->is_ddr_clk_set) {
3916 new_mode_clk = tegra_host->sdr_clk;
3920 tegra_sdhci_set_clock(sdhci, 0);
3921 pltfm_host->clk = new_mode_clk;
3922 /* Restore the previous frequency */
3923 tegra_sdhci_set_clock(sdhci, sdhci->max_clk);
3924 tegra_host->is_ddr_clk_set =
3925 !tegra_host->is_ddr_clk_set;
3930 void tegra_sdhci_ios_config_exit(struct sdhci_host *sdhci, struct mmc_ios *ios)
3933 * Do any required handling for retuning requests before powering off
3936 if (ios->power_mode == MMC_POWER_OFF)
3937 tegra_sdhci_power_off(sdhci, ios->power_mode);
3940 * In case of power off, turn off controller clock now as all the
3941 * required register accesses are already done.
3943 if (!ios->clock && !sdhci->mmc->skip_host_clkgate)
3944 tegra_sdhci_set_clock(sdhci, 0);
3947 static int tegra_sdhci_get_drive_strength(struct sdhci_host *sdhci,
3948 unsigned int max_dtr, int host_drv, int card_drv)
3950 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
3951 struct sdhci_tegra *tegra_host = pltfm_host->priv;
3952 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
3954 return plat->default_drv_type;
3957 static const struct sdhci_ops tegra_sdhci_ops = {
3958 .get_ro = tegra_sdhci_get_ro,
3959 .get_cd = tegra_sdhci_get_cd,
3960 .read_l = tegra_sdhci_readl,
3961 .read_w = tegra_sdhci_readw,
3962 .write_l = tegra_sdhci_writel,
3963 .write_w = tegra_sdhci_writew,
3964 .platform_bus_width = tegra_sdhci_buswidth,
3965 .set_clock = tegra_sdhci_set_clock,
3966 .suspend = tegra_sdhci_suspend,
3967 .resume = tegra_sdhci_resume,
3968 .platform_resume = tegra_sdhci_post_resume,
3969 .platform_reset_exit = tegra_sdhci_reset_exit,
3970 .platform_get_bus = tegra_sdhci_get_bus,
3971 .platform_ios_config_enter = tegra_sdhci_ios_config_enter,
3972 .platform_ios_config_exit = tegra_sdhci_ios_config_exit,
3973 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
3974 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
3975 .switch_signal_voltage_exit = tegra_sdhci_do_calibration,
3976 .execute_freq_tuning = sdhci_tegra_execute_tuning,
3977 .sd_error_stats = sdhci_tegra_sd_error_stats,
3978 #ifdef CONFIG_MMC_FREQ_SCALING
3979 .dfs_gov_init = sdhci_tegra_freq_gov_init,
3980 .dfs_gov_get_target_freq = sdhci_tegra_get_target_freq,
3982 .get_drive_strength = tegra_sdhci_get_drive_strength,
3985 static struct sdhci_pltfm_data sdhci_tegra11_pdata = {
3986 .quirks = TEGRA_SDHCI_QUIRKS,
3987 .quirks2 = TEGRA_SDHCI_QUIRKS2,
3988 .ops = &tegra_sdhci_ops,
3991 static struct sdhci_tegra_soc_data soc_data_tegra11 = {
3992 .pdata = &sdhci_tegra11_pdata,
3993 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
3994 NVQUIRK_SET_DRIVE_STRENGTH |
3995 NVQUIRK_SET_TRIM_DELAY |
3996 NVQUIRK_ENABLE_DDR50 |
3997 NVQUIRK_ENABLE_HS200 |
3998 NVQUIRK_INFINITE_ERASE_TIMEOUT |
3999 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK |
4000 NVQUIRK_DISABLE_SDMMC4_CALIB,
4001 .parent_clk_list = {"pll_p", "pll_c"},
4002 .tuning_freq_list = {81600000, 156000000, 200000000},
4003 .t2t_coeffs = t11x_tuning_coeffs,
4004 .t2t_coeffs_count = 3,
4005 .tap_hole_coeffs = t11x_tap_hole_coeffs,
4006 .tap_hole_coeffs_count = 12,
4009 static struct sdhci_pltfm_data sdhci_tegra12_pdata = {
4010 .quirks = TEGRA_SDHCI_QUIRKS,
4011 .quirks2 = TEGRA_SDHCI_QUIRKS2 |
4012 SDHCI_QUIRK2_HOST_OFF_CARD_ON |
4013 SDHCI_QUIRK2_SUPPORT_64BIT_DMA |
4014 SDHCI_QUIRK2_USE_64BIT_ADDR,
4015 .ops = &tegra_sdhci_ops,
4018 static struct sdhci_tegra_soc_data soc_data_tegra12 = {
4019 .pdata = &sdhci_tegra12_pdata,
4020 .nvquirks = TEGRA_SDHCI_NVQUIRKS |
4021 NVQUIRK_SET_TRIM_DELAY |
4022 NVQUIRK_ENABLE_DDR50 |
4023 NVQUIRK_ENABLE_HS200 |
4024 NVQUIRK_INFINITE_ERASE_TIMEOUT |
4025 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
4026 NVQUIRK_HIGH_FREQ_TAP_PROCEDURE |
4027 NVQUIRK_SET_CALIBRATION_OFFSETS |
4028 NVQUIRK_DISABLE_EXTERNAL_LOOPBACK,
4029 .parent_clk_list = {"pll_p", "pll_c"},
4030 .tuning_freq_list = {81600000, 136000000, 200000000},
4031 .t2t_coeffs = t12x_tuning_coeffs,
4032 .t2t_coeffs_count = 3,
4033 .tap_hole_coeffs = t12x_tap_hole_coeffs,
4034 .tap_hole_coeffs_count = 13,
4037 static const struct of_device_id sdhci_tegra_dt_match[] = {
4038 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra12 },
4039 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra11 },
4042 MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
4044 static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
4045 struct platform_device *pdev)
4048 struct tegra_sdhci_platform_data *plat;
4049 struct device_node *np = pdev->dev.of_node;
4055 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
4057 dev_err(&pdev->dev, "Can't allocate platform data\n");
4061 plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
4062 plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
4063 plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
4065 if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
4069 of_property_read_u32(np, "tap-delay", &plat->tap_delay);
4070 of_property_read_u32(np, "trim-delay", &plat->trim_delay);
4071 of_property_read_u32(np, "ddr-clk-limit", &plat->ddr_clk_limit);
4072 of_property_read_u32(np, "max-clk-limit", &plat->max_clk_limit);
4074 of_property_read_u32(np, "uhs_mask", &plat->uhs_mask);
4076 if (of_find_property(np, "built-in", NULL))
4077 plat->mmc_data.built_in = 1;
4079 if (!of_property_read_u32(np, "mmc-ocr-mask", &val)) {
4081 plat->mmc_data.ocr_mask = MMC_OCR_1V8_MASK;
4083 plat->mmc_data.ocr_mask = MMC_OCR_2V8_MASK;
4085 plat->mmc_data.ocr_mask = MMC_OCR_3V2_MASK;
4087 plat->mmc_data.ocr_mask = MMC_OCR_3V3_MASK;
4092 static int sdhci_tegra_probe(struct platform_device *pdev)
4094 const struct of_device_id *match;
4095 const struct sdhci_tegra_soc_data *soc_data;
4096 struct sdhci_host *host;
4097 struct sdhci_pltfm_host *pltfm_host;
4098 struct tegra_sdhci_platform_data *plat;
4099 struct sdhci_tegra *tegra_host;
4100 unsigned int low_freq;
4104 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
4106 soc_data = match->data;
4108 /* Use id tables and remove the following chip defines */
4109 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
4110 soc_data = &soc_data_tegra11;
4112 soc_data = &soc_data_tegra12;
4116 host = sdhci_pltfm_init(pdev, soc_data->pdata);
4118 /* sdio delayed clock gate quirk in sdhci_host used */
4119 host->quirks2 |= SDHCI_QUIRK2_DELAYED_CLK_GATE;
4122 return PTR_ERR(host);
4124 pltfm_host = sdhci_priv(host);
4126 plat = pdev->dev.platform_data;
4129 plat = sdhci_tegra_dt_parse_pdata(pdev);
4132 dev_err(mmc_dev(host->mmc), "missing platform data\n");
4137 /* FIXME: This is for until dma-mask binding is supported in DT.
4138 * Set coherent_dma_mask for each Tegra SKUs.
4139 * If dma_mask is NULL, set it to coherent_dma_mask. */
4140 if (soc_data == &soc_data_tegra11)
4141 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
4143 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
4145 if (!pdev->dev.dma_mask)
4146 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
4148 tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
4150 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
4155 tegra_host->plat = plat;
4156 pdev->dev.platform_data = plat;
4158 tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev,
4159 sizeof(struct sdhci_tegra_sd_stats), GFP_KERNEL);
4160 if (!tegra_host->sd_stat_head) {
4161 dev_err(mmc_dev(host->mmc), "failed to allocate sd_stat_head\n");
4166 tegra_host->soc_data = soc_data;
4167 pltfm_host->priv = tegra_host;
4169 for (i = 0; i < ARRAY_SIZE(soc_data->parent_clk_list); i++) {
4170 if (!soc_data->parent_clk_list[i])
4172 if (!strcmp(soc_data->parent_clk_list[i], "pll_c")) {
4173 pll_c = clk_get_sys(NULL, "pll_c");
4174 if (IS_ERR(pll_c)) {
4175 rc = PTR_ERR(pll_c);
4176 dev_err(mmc_dev(host->mmc),
4177 "clk error in getting pll_c: %d\n", rc);
4179 pll_c_rate = clk_get_rate(pll_c);
4182 if (!strcmp(soc_data->parent_clk_list[i], "pll_p")) {
4183 pll_p = clk_get_sys(NULL, "pll_p");
4184 if (IS_ERR(pll_p)) {
4185 rc = PTR_ERR(pll_p);
4186 dev_err(mmc_dev(host->mmc),
4187 "clk error in getting pll_p: %d\n", rc);
4189 pll_p_rate = clk_get_rate(pll_p);
4193 #ifdef CONFIG_MMC_EMBEDDED_SDIO
4194 if (plat->mmc_data.embedded_sdio)
4195 mmc_set_embedded_sdio_data(host->mmc,
4196 &plat->mmc_data.embedded_sdio->cis,
4197 &plat->mmc_data.embedded_sdio->cccr,
4198 plat->mmc_data.embedded_sdio->funcs,
4199 plat->mmc_data.embedded_sdio->num_funcs);
4202 if (gpio_is_valid(plat->power_gpio)) {
4203 rc = gpio_request(plat->power_gpio, "sdhci_power");
4205 dev_err(mmc_dev(host->mmc),
4206 "failed to allocate power gpio\n");
4209 gpio_direction_output(plat->power_gpio, 1);
4212 if (gpio_is_valid(plat->cd_gpio)) {
4213 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
4215 dev_err(mmc_dev(host->mmc),
4216 "failed to allocate cd gpio\n");
4219 gpio_direction_input(plat->cd_gpio);
4221 tegra_host->card_present =
4222 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
4224 } else if (plat->mmc_data.register_status_notify) {
4225 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
4228 if (plat->mmc_data.status) {
4229 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
4232 if (gpio_is_valid(plat->wp_gpio)) {
4233 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
4235 dev_err(mmc_dev(host->mmc),
4236 "failed to allocate wp gpio\n");
4239 gpio_direction_input(plat->wp_gpio);
4243 * If there is no card detect gpio, assume that the
4244 * card is always present.
4246 if (!gpio_is_valid(plat->cd_gpio))
4247 tegra_host->card_present = 1;
4249 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
4250 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
4251 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
4252 } else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
4253 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
4254 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4255 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V2_MASK) {
4256 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V2;
4257 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4258 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V3_MASK) {
4259 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V3;
4260 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4263 * Set the minV and maxV to default
4264 * voltage range of 2.7V - 3.6V
4266 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
4267 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
4270 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
4272 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
4273 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
4274 "Assuming vddio_sdmmc is not required.\n",
4275 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
4276 tegra_host->vdd_io_reg = NULL;
4278 rc = tegra_sdhci_configure_regulators(tegra_host,
4279 CONFIG_REG_SET_VOLT,
4280 tegra_host->vddio_min_uv,
4281 tegra_host->vddio_max_uv);
4283 dev_err(mmc_dev(host->mmc),
4284 "Init volt(%duV-%duV) setting failed %d\n",
4285 tegra_host->vddio_min_uv,
4286 tegra_host->vddio_max_uv, rc);
4287 regulator_put(tegra_host->vdd_io_reg);
4288 tegra_host->vdd_io_reg = NULL;
4292 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
4294 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
4295 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
4296 " Assuming vddio_sd_slot is not required.\n",
4297 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
4298 tegra_host->vdd_slot_reg = NULL;
4301 if (tegra_host->card_present) {
4302 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_EN,
4305 dev_err(mmc_dev(host->mmc),
4306 "Enable regulators failed in probe %d\n", rc);
4311 tegra_pd_add_device(&pdev->dev);
4312 pm_runtime_enable(&pdev->dev);
4314 /* Get the ddr clock */
4315 tegra_host->ddr_clk = clk_get(mmc_dev(host->mmc), "ddr");
4316 if (IS_ERR(tegra_host->ddr_clk)) {
4317 dev_err(mmc_dev(host->mmc), "ddr clk err\n");
4318 tegra_host->ddr_clk = NULL;
4321 /* Get high speed clock */
4322 tegra_host->sdr_clk = clk_get(mmc_dev(host->mmc), NULL);
4323 if (IS_ERR(tegra_host->sdr_clk)) {
4324 dev_err(mmc_dev(host->mmc), "sdr clk err\n");
4325 tegra_host->sdr_clk = NULL;
4326 /* If both ddr and sdr clks are missing, then fail probe */
4327 if (!tegra_host->ddr_clk && !tegra_host->sdr_clk) {
4328 dev_err(mmc_dev(host->mmc),
4329 "Failed to get ddr and sdr clks\n");
4335 if (tegra_host->sdr_clk) {
4336 pltfm_host->clk = tegra_host->sdr_clk;
4337 tegra_host->is_ddr_clk_set = false;
4339 pltfm_host->clk = tegra_host->ddr_clk;
4340 tegra_host->is_ddr_clk_set = true;
4343 if (clk_get_parent(pltfm_host->clk) == pll_c)
4344 tegra_host->is_parent_pllc = true;
4346 pm_runtime_get_sync(&pdev->dev);
4347 rc = clk_prepare_enable(pltfm_host->clk);
4351 tegra_host->emc_clk = devm_clk_get(mmc_dev(host->mmc), "emc");
4352 if (IS_ERR_OR_NULL(tegra_host->emc_clk)) {
4353 dev_err(mmc_dev(host->mmc), "Can't get emc clk\n");
4354 tegra_host->emc_clk = NULL;
4356 clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
4359 tegra_host->sclk = devm_clk_get(mmc_dev(host->mmc), "sclk");
4360 if (IS_ERR_OR_NULL(tegra_host->sclk)) {
4361 dev_err(mmc_dev(host->mmc), "Can't get sclk clock\n");
4362 tegra_host->sclk = NULL;
4364 clk_set_rate(tegra_host->sclk, SDMMC_AHB_MAX_FREQ);
4366 pltfm_host->priv = tegra_host;
4367 tegra_host->clk_enabled = true;
4368 host->is_clk_on = tegra_host->clk_enabled;
4369 mutex_init(&tegra_host->set_clock_mutex);
4371 tegra_host->max_clk_limit = plat->max_clk_limit;
4372 tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
4373 tegra_host->instance = pdev->id;
4374 tegra_host->tap_cmd = TAP_CMD_TRIM_DEFAULT_VOLTAGE;
4375 tegra_host->speedo = plat->cpu_speedo;
4376 dev_info(mmc_dev(host->mmc), "Speedo value %d\n", tegra_host->speedo);
4377 host->mmc->pm_caps |= plat->pm_caps;
4378 host->mmc->pm_flags |= plat->pm_flags;
4380 host->mmc->caps |= MMC_CAP_ERASE;
4381 /* enable 1/8V DDR capable */
4382 host->mmc->caps |= MMC_CAP_1_8V_DDR;
4384 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
4385 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
4386 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
4387 if (plat->mmc_data.built_in) {
4388 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
4390 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
4392 /* disable access to boot partitions */
4393 host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4395 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
4396 if (soc_data->nvquirks & NVQUIRK_ENABLE_HS200)
4397 host->mmc->caps2 |= MMC_CAP2_HS200;
4398 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
4399 /* Enable HS200 mode */
4400 host->mmc->caps2 |= MMC_CAP2_HS200;
4402 host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
4403 host->mmc->caps |= MMC_CAP_CMD23;
4404 host->mmc->caps2 |= MMC_CAP2_PACKED_CMD;
4409 * Enable dyamic frequency scaling support only if the platform clock
4410 * limit is higher than the lowest supported frequency by tuning.
4412 for (i = 0; i < TUNING_FREQ_COUNT; i++) {
4413 low_freq = soc_data->tuning_freq_list[i];
4417 if (plat->en_freq_scaling && (plat->max_clk_limit > low_freq))
4418 host->mmc->caps2 |= MMC_CAP2_FREQ_SCALING;
4420 if (!plat->disable_clock_gate)
4421 host->mmc->caps2 |= MMC_CAP2_CLOCK_GATING;
4423 if (plat->nominal_vcore_mv)
4424 tegra_host->nominal_vcore_mv = plat->nominal_vcore_mv;
4425 if (plat->min_vcore_override_mv)
4426 tegra_host->min_vcore_override_mv = plat->min_vcore_override_mv;
4427 if (plat->boot_vcore_mv)
4428 tegra_host->boot_vcore_mv = plat->boot_vcore_mv;
4429 dev_info(mmc_dev(host->mmc),
4430 "Tuning constraints: nom_mv %d, boot_mv %d, min_or_mv %d\n",
4431 tegra_host->nominal_vcore_mv, tegra_host->boot_vcore_mv,
4432 tegra_host->min_vcore_override_mv);
4435 * If nominal voltage is equal to boot voltage, there is no need for
4436 * nominal voltage tuning.
4438 if (plat->nominal_vcore_mv <= plat->boot_vcore_mv)
4439 plat->en_nominal_vcore_tuning = false;
4441 INIT_DELAYED_WORK(&host->delayed_clk_gate_wrk, delayed_clk_gate_cb);
4442 rc = sdhci_add_host(host);
4446 if (gpio_is_valid(plat->cd_gpio)) {
4447 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
4449 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
4450 mmc_hostname(host->mmc), host);
4452 dev_err(mmc_dev(host->mmc), "request irq error\n");
4453 goto err_cd_irq_req;
4456 sdhci_tegra_error_stats_debugfs(host);
4457 device_create_file(&pdev->dev, &dev_attr_cmd_state);
4459 /* Enable async suspend/resume to reduce LP0 latency */
4460 device_enable_async_suspend(&pdev->dev);
4462 if (plat->power_off_rail) {
4463 tegra_host->reboot_notify.notifier_call =
4464 tegra_sdhci_reboot_notify;
4465 register_reboot_notifier(&tegra_host->reboot_notify);
4467 #ifdef CONFIG_DEBUG_FS
4468 tegra_host->dbg_cfg.tap_val =
4470 tegra_host->dbg_cfg.trim_val =
4471 plat->ddr_trim_delay;
4472 tegra_host->dbg_cfg.clk_ungated =
4473 plat->disable_clock_gate;
4478 if (gpio_is_valid(plat->cd_gpio))
4479 gpio_free(plat->cd_gpio);
4481 if (tegra_host->is_ddr_clk_set)
4482 clk_disable_unprepare(tegra_host->ddr_clk);
4484 clk_disable_unprepare(tegra_host->sdr_clk);
4485 pm_runtime_put_sync(&pdev->dev);
4487 if (tegra_host->ddr_clk)
4488 clk_put(tegra_host->ddr_clk);
4489 if (tegra_host->sdr_clk)
4490 clk_put(tegra_host->sdr_clk);
4492 if (gpio_is_valid(plat->wp_gpio))
4493 gpio_free(plat->wp_gpio);
4495 if (gpio_is_valid(plat->cd_gpio))
4496 free_irq(gpio_to_irq(plat->cd_gpio), host);
4498 if (gpio_is_valid(plat->power_gpio))
4499 gpio_free(plat->power_gpio);
4502 sdhci_pltfm_free(pdev);
4506 static int sdhci_tegra_remove(struct platform_device *pdev)
4508 struct sdhci_host *host = platform_get_drvdata(pdev);
4509 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
4510 struct sdhci_tegra *tegra_host = pltfm_host->priv;
4511 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
4512 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
4515 sdhci_remove_host(host, dead);
4517 rc = tegra_sdhci_configure_regulators(tegra_host, CONFIG_REG_DIS, 0, 0);
4519 dev_err(mmc_dev(host->mmc),
4520 "Regulator disable in remove failed %d\n", rc);
4522 if (tegra_host->vdd_slot_reg)
4523 regulator_put(tegra_host->vdd_slot_reg);
4524 if (tegra_host->vdd_io_reg)
4525 regulator_put(tegra_host->vdd_io_reg);
4527 if (gpio_is_valid(plat->wp_gpio))
4528 gpio_free(plat->wp_gpio);
4530 if (gpio_is_valid(plat->cd_gpio)) {
4531 free_irq(gpio_to_irq(plat->cd_gpio), host);
4532 gpio_free(plat->cd_gpio);
4535 if (gpio_is_valid(plat->power_gpio))
4536 gpio_free(plat->power_gpio);
4538 if (tegra_host->clk_enabled) {
4539 if (tegra_host->is_ddr_clk_set)
4540 clk_disable_unprepare(tegra_host->ddr_clk);
4542 clk_disable_unprepare(tegra_host->sdr_clk);
4543 pm_runtime_put_sync(&pdev->dev);
4546 if (tegra_host->ddr_clk)
4547 clk_put(tegra_host->ddr_clk);
4548 if (tegra_host->sdr_clk)
4549 clk_put(tegra_host->sdr_clk);
4551 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on)
4552 clk_disable_unprepare(tegra_host->emc_clk);
4553 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on)
4554 clk_disable_unprepare(tegra_host->sclk);
4555 if (plat->power_off_rail)
4556 unregister_reboot_notifier(&tegra_host->reboot_notify);
4558 sdhci_pltfm_free(pdev);
4563 static struct platform_driver sdhci_tegra_driver = {
4565 .name = "sdhci-tegra",
4566 .owner = THIS_MODULE,
4567 .of_match_table = sdhci_tegra_dt_match,
4568 .pm = SDHCI_PLTFM_PMOPS,
4570 .probe = sdhci_tegra_probe,
4571 .remove = sdhci_tegra_remove,
4574 module_platform_driver(sdhci_tegra_driver);
4576 MODULE_DESCRIPTION("SDHCI driver for Tegra");
4577 MODULE_AUTHOR("Google, Inc.");
4578 MODULE_LICENSE("GPL v2");